Thu, 04 Apr 2019 17:56:29 +0800
Merge
aoqi@0 | 1 | /* |
mdoerr@9034 | 2 | * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. |
mdoerr@9034 | 3 | * Copyright 2012, 2017 SAP AG. All rights reserved. |
aoqi@0 | 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 5 | * |
aoqi@0 | 6 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 7 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 8 | * published by the Free Software Foundation. |
aoqi@0 | 9 | * |
aoqi@0 | 10 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 13 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 14 | * accompanied this code). |
aoqi@0 | 15 | * |
aoqi@0 | 16 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 17 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 19 | * |
aoqi@0 | 20 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 21 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 22 | * questions. |
aoqi@0 | 23 | * |
aoqi@0 | 24 | */ |
aoqi@0 | 25 | |
aoqi@0 | 26 | #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP |
aoqi@0 | 27 | #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP |
aoqi@0 | 28 | |
aoqi@0 | 29 | #include "asm/assembler.hpp" |
aoqi@0 | 30 | |
aoqi@0 | 31 | // MacroAssembler extends Assembler by a few frequently used macros. |
aoqi@0 | 32 | |
aoqi@0 | 33 | class ciTypeArray; |
aoqi@0 | 34 | |
aoqi@0 | 35 | class MacroAssembler: public Assembler { |
aoqi@0 | 36 | public: |
aoqi@0 | 37 | MacroAssembler(CodeBuffer* code) : Assembler(code) {} |
aoqi@0 | 38 | |
aoqi@0 | 39 | // |
aoqi@0 | 40 | // Optimized instruction emitters |
aoqi@0 | 41 | // |
aoqi@0 | 42 | |
aoqi@0 | 43 | inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; } |
aoqi@0 | 44 | inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); } |
aoqi@0 | 45 | |
aoqi@0 | 46 | // load d = *[a+si31] |
aoqi@0 | 47 | // Emits several instructions if the offset is not encodable in one instruction. |
aoqi@0 | 48 | void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop); |
aoqi@0 | 49 | void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop); |
aoqi@0 | 50 | inline static bool is_ld_largeoffset(address a); |
aoqi@0 | 51 | inline static int get_ld_largeoffset_offset(address a); |
aoqi@0 | 52 | |
aoqi@0 | 53 | inline void round_to(Register r, int modulus); |
aoqi@0 | 54 | |
aoqi@0 | 55 | // Load/store with type given by parameter. |
aoqi@0 | 56 | void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed); |
aoqi@0 | 57 | void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes); |
aoqi@0 | 58 | |
aoqi@0 | 59 | // Move register if destination register and target register are different |
aoqi@0 | 60 | inline void mr_if_needed(Register rd, Register rs); |
aoqi@0 | 61 | inline void fmr_if_needed(FloatRegister rd, FloatRegister rs); |
aoqi@0 | 62 | // This is dedicated for emitting scheduled mach nodes. For better |
aoqi@0 | 63 | // readability of the ad file I put it here. |
aoqi@0 | 64 | // Endgroups are not needed if |
aoqi@0 | 65 | // - the scheduler is off |
aoqi@0 | 66 | // - the scheduler found that there is a natural group end, in that |
aoqi@0 | 67 | // case it reduced the size of the instruction used in the test |
aoqi@0 | 68 | // yielding 'needed'. |
aoqi@0 | 69 | inline void endgroup_if_needed(bool needed); |
aoqi@0 | 70 | |
aoqi@0 | 71 | // Memory barriers. |
aoqi@0 | 72 | inline void membar(int bits); |
aoqi@0 | 73 | inline void release(); |
aoqi@0 | 74 | inline void acquire(); |
aoqi@0 | 75 | inline void fence(); |
aoqi@0 | 76 | |
aoqi@0 | 77 | // nop padding |
aoqi@0 | 78 | void align(int modulus, int max = 252, int rem = 0); |
aoqi@0 | 79 | |
aoqi@0 | 80 | // |
aoqi@0 | 81 | // Constants, loading constants, TOC support |
aoqi@0 | 82 | // |
aoqi@0 | 83 | |
aoqi@0 | 84 | // Address of the global TOC. |
aoqi@0 | 85 | inline static address global_toc(); |
aoqi@0 | 86 | // Offset of given address to the global TOC. |
aoqi@0 | 87 | inline static int offset_to_global_toc(const address addr); |
aoqi@0 | 88 | |
aoqi@0 | 89 | // Address of TOC of the current method. |
aoqi@0 | 90 | inline address method_toc(); |
aoqi@0 | 91 | // Offset of given address to TOC of the current method. |
aoqi@0 | 92 | inline int offset_to_method_toc(const address addr); |
aoqi@0 | 93 | |
aoqi@0 | 94 | // Global TOC. |
aoqi@0 | 95 | void calculate_address_from_global_toc(Register dst, address addr, |
aoqi@0 | 96 | bool hi16 = true, bool lo16 = true, |
aoqi@0 | 97 | bool add_relocation = true, bool emit_dummy_addr = false); |
aoqi@0 | 98 | inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) { |
aoqi@0 | 99 | calculate_address_from_global_toc(dst, addr, true, false); |
aoqi@0 | 100 | }; |
aoqi@0 | 101 | inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) { |
aoqi@0 | 102 | calculate_address_from_global_toc(dst, addr, false, true); |
aoqi@0 | 103 | }; |
aoqi@0 | 104 | |
aoqi@0 | 105 | inline static bool is_calculate_address_from_global_toc_at(address a, address bound); |
aoqi@0 | 106 | static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound); |
aoqi@0 | 107 | static address get_address_of_calculate_address_from_global_toc_at(address a, address addr); |
aoqi@0 | 108 | |
aoqi@0 | 109 | #ifdef _LP64 |
aoqi@0 | 110 | // Patch narrow oop constant. |
aoqi@0 | 111 | inline static bool is_set_narrow_oop(address a, address bound); |
aoqi@0 | 112 | static int patch_set_narrow_oop(address a, address bound, narrowOop data); |
aoqi@0 | 113 | static narrowOop get_narrow_oop(address a, address bound); |
aoqi@0 | 114 | #endif |
aoqi@0 | 115 | |
aoqi@0 | 116 | inline static bool is_load_const_at(address a); |
aoqi@0 | 117 | |
aoqi@0 | 118 | // Emits an oop const to the constant pool, loads the constant, and |
aoqi@0 | 119 | // sets a relocation info with address current_pc. |
aoqi@0 | 120 | void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc); |
aoqi@0 | 121 | void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) { |
aoqi@0 | 122 | assert(dst == R2_TOC, "base register must be TOC"); |
aoqi@0 | 123 | load_const_from_method_toc(dst, a, toc); |
aoqi@0 | 124 | } |
aoqi@0 | 125 | |
aoqi@0 | 126 | static bool is_load_const_from_method_toc_at(address a); |
aoqi@0 | 127 | static int get_offset_of_load_const_from_method_toc_at(address a); |
aoqi@0 | 128 | |
aoqi@0 | 129 | // Get the 64 bit constant from a `load_const' sequence. |
aoqi@0 | 130 | static long get_const(address load_const); |
aoqi@0 | 131 | |
aoqi@0 | 132 | // Patch the 64 bit constant of a `load_const' sequence. This is a |
aoqi@0 | 133 | // low level procedure. It neither flushes the instruction cache nor |
aoqi@0 | 134 | // is it atomic. |
aoqi@0 | 135 | static void patch_const(address load_const, long x); |
aoqi@0 | 136 | |
aoqi@0 | 137 | // Metadata in code that we have to keep track of. |
aoqi@0 | 138 | AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index |
aoqi@0 | 139 | AddressLiteral constant_metadata_address(Metadata* obj); // find_index |
aoqi@0 | 140 | // Oops used directly in compiled code are stored in the constant pool, |
aoqi@0 | 141 | // and loaded from there. |
aoqi@0 | 142 | // Allocate new entry for oop in constant pool. Generate relocation. |
aoqi@0 | 143 | AddressLiteral allocate_oop_address(jobject obj); |
aoqi@0 | 144 | // Find oop obj in constant pool. Return relocation with it's index. |
aoqi@0 | 145 | AddressLiteral constant_oop_address(jobject obj); |
aoqi@0 | 146 | |
aoqi@0 | 147 | // Find oop in constant pool and emit instructions to load it. |
aoqi@0 | 148 | // Uses constant_oop_address. |
aoqi@0 | 149 | inline void set_oop_constant(jobject obj, Register d); |
aoqi@0 | 150 | // Same as load_address. |
aoqi@0 | 151 | inline void set_oop (AddressLiteral obj_addr, Register d); |
aoqi@0 | 152 | |
aoqi@0 | 153 | // Read runtime constant: Issue load if constant not yet established, |
aoqi@0 | 154 | // else use real constant. |
aoqi@0 | 155 | virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, |
aoqi@0 | 156 | Register tmp, |
aoqi@0 | 157 | int offset); |
aoqi@0 | 158 | |
aoqi@0 | 159 | // |
aoqi@0 | 160 | // branch, jump |
aoqi@0 | 161 | // |
aoqi@0 | 162 | |
aoqi@0 | 163 | inline void pd_patch_instruction(address branch, address target); |
aoqi@0 | 164 | NOT_PRODUCT(static void pd_print_patched_instruction(address branch);) |
aoqi@0 | 165 | |
aoqi@0 | 166 | // Conditional far branch for destinations encodable in 24+2 bits. |
aoqi@0 | 167 | // Same interface as bc, e.g. no inverse boint-field. |
aoqi@0 | 168 | enum { |
aoqi@0 | 169 | bc_far_optimize_not = 0, |
aoqi@0 | 170 | bc_far_optimize_on_relocate = 1 |
aoqi@0 | 171 | }; |
aoqi@0 | 172 | // optimize: flag for telling the conditional far branch to optimize |
aoqi@0 | 173 | // itself when relocated. |
aoqi@0 | 174 | void bc_far(int boint, int biint, Label& dest, int optimize); |
aoqi@0 | 175 | // Relocation of conditional far branches. |
aoqi@0 | 176 | static bool is_bc_far_at(address instruction_addr); |
aoqi@0 | 177 | static address get_dest_of_bc_far_at(address instruction_addr); |
aoqi@0 | 178 | static void set_dest_of_bc_far_at(address instruction_addr, address dest); |
aoqi@0 | 179 | private: |
aoqi@0 | 180 | static bool inline is_bc_far_variant1_at(address instruction_addr); |
aoqi@0 | 181 | static bool inline is_bc_far_variant2_at(address instruction_addr); |
aoqi@0 | 182 | static bool inline is_bc_far_variant3_at(address instruction_addr); |
aoqi@0 | 183 | public: |
aoqi@0 | 184 | |
aoqi@0 | 185 | // Convenience bc_far versions. |
aoqi@0 | 186 | inline void blt_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 187 | inline void bgt_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 188 | inline void beq_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 189 | inline void bso_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 190 | inline void bge_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 191 | inline void ble_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 192 | inline void bne_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 193 | inline void bns_far(ConditionRegister crx, Label& L, int optimize); |
aoqi@0 | 194 | |
aoqi@0 | 195 | // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump. |
aoqi@0 | 196 | private: |
aoqi@0 | 197 | enum { |
aoqi@0 | 198 | bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/), |
aoqi@0 | 199 | bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord, |
aoqi@0 | 200 | bxx64_patchable_ret_addr_offset = bxx64_patchable_size |
aoqi@0 | 201 | }; |
aoqi@0 | 202 | void bxx64_patchable(address target, relocInfo::relocType rt, bool link); |
aoqi@0 | 203 | static bool is_bxx64_patchable_at( address instruction_addr, bool link); |
aoqi@0 | 204 | // Does the instruction use a pc-relative encoding of the destination? |
aoqi@0 | 205 | static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link); |
aoqi@0 | 206 | static bool is_bxx64_patchable_variant1_at( address instruction_addr, bool link); |
aoqi@0 | 207 | // Load destination relative to global toc. |
aoqi@0 | 208 | static bool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link); |
aoqi@0 | 209 | static bool is_bxx64_patchable_variant2_at( address instruction_addr, bool link); |
aoqi@0 | 210 | static void set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link); |
aoqi@0 | 211 | static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link); |
aoqi@0 | 212 | |
aoqi@0 | 213 | public: |
aoqi@0 | 214 | // call |
aoqi@0 | 215 | enum { |
aoqi@0 | 216 | bl64_patchable_instruction_count = bxx64_patchable_instruction_count, |
aoqi@0 | 217 | bl64_patchable_size = bxx64_patchable_size, |
aoqi@0 | 218 | bl64_patchable_ret_addr_offset = bxx64_patchable_ret_addr_offset |
aoqi@0 | 219 | }; |
aoqi@0 | 220 | inline void bl64_patchable(address target, relocInfo::relocType rt) { |
aoqi@0 | 221 | bxx64_patchable(target, rt, /*link=*/true); |
aoqi@0 | 222 | } |
aoqi@0 | 223 | inline static bool is_bl64_patchable_at(address instruction_addr) { |
aoqi@0 | 224 | return is_bxx64_patchable_at(instruction_addr, /*link=*/true); |
aoqi@0 | 225 | } |
aoqi@0 | 226 | inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) { |
aoqi@0 | 227 | return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true); |
aoqi@0 | 228 | } |
aoqi@0 | 229 | inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) { |
aoqi@0 | 230 | set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true); |
aoqi@0 | 231 | } |
aoqi@0 | 232 | inline static address get_dest_of_bl64_patchable_at(address instruction_addr) { |
aoqi@0 | 233 | return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true); |
aoqi@0 | 234 | } |
aoqi@0 | 235 | // jump |
aoqi@0 | 236 | enum { |
aoqi@0 | 237 | b64_patchable_instruction_count = bxx64_patchable_instruction_count, |
aoqi@0 | 238 | b64_patchable_size = bxx64_patchable_size, |
aoqi@0 | 239 | }; |
aoqi@0 | 240 | inline void b64_patchable(address target, relocInfo::relocType rt) { |
aoqi@0 | 241 | bxx64_patchable(target, rt, /*link=*/false); |
aoqi@0 | 242 | } |
aoqi@0 | 243 | inline static bool is_b64_patchable_at(address instruction_addr) { |
aoqi@0 | 244 | return is_bxx64_patchable_at(instruction_addr, /*link=*/false); |
aoqi@0 | 245 | } |
aoqi@0 | 246 | inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) { |
aoqi@0 | 247 | return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false); |
aoqi@0 | 248 | } |
aoqi@0 | 249 | inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) { |
aoqi@0 | 250 | set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false); |
aoqi@0 | 251 | } |
aoqi@0 | 252 | inline static address get_dest_of_b64_patchable_at(address instruction_addr) { |
aoqi@0 | 253 | return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false); |
aoqi@0 | 254 | } |
aoqi@0 | 255 | |
aoqi@0 | 256 | // |
aoqi@0 | 257 | // Support for frame handling |
aoqi@0 | 258 | // |
aoqi@0 | 259 | |
aoqi@0 | 260 | // some ABI-related functions |
aoqi@0 | 261 | void save_nonvolatile_gprs( Register dst_base, int offset); |
aoqi@0 | 262 | void restore_nonvolatile_gprs(Register src_base, int offset); |
aoqi@0 | 263 | void save_volatile_gprs( Register dst_base, int offset); |
aoqi@0 | 264 | void restore_volatile_gprs(Register src_base, int offset); |
aoqi@0 | 265 | void save_LR_CR( Register tmp); // tmp contains LR on return. |
aoqi@0 | 266 | void restore_LR_CR(Register tmp); |
aoqi@0 | 267 | |
aoqi@0 | 268 | // Get current PC using bl-next-instruction trick. |
aoqi@0 | 269 | address get_PC_trash_LR(Register result); |
aoqi@0 | 270 | |
aoqi@0 | 271 | // Resize current frame either relatively wrt to current SP or absolute. |
aoqi@0 | 272 | void resize_frame(Register offset, Register tmp); |
aoqi@0 | 273 | void resize_frame(int offset, Register tmp); |
aoqi@0 | 274 | void resize_frame_absolute(Register addr, Register tmp1, Register tmp2); |
aoqi@0 | 275 | |
aoqi@0 | 276 | // Push a frame of size bytes. |
aoqi@0 | 277 | void push_frame(Register bytes, Register tmp); |
aoqi@0 | 278 | |
aoqi@0 | 279 | // Push a frame of size `bytes'. No abi space provided. |
aoqi@0 | 280 | void push_frame(unsigned int bytes, Register tmp); |
aoqi@0 | 281 | |
aoqi@0 | 282 | // Push a frame of size `bytes' plus abi_reg_args on top. |
aoqi@0 | 283 | void push_frame_reg_args(unsigned int bytes, Register tmp); |
aoqi@0 | 284 | |
aoqi@0 | 285 | // Setup up a new C frame with a spill area for non-volatile GPRs and additional |
aoqi@0 | 286 | // space for local variables |
aoqi@0 | 287 | void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp); |
aoqi@0 | 288 | |
aoqi@0 | 289 | // pop current C frame |
aoqi@0 | 290 | void pop_frame(); |
aoqi@0 | 291 | |
aoqi@0 | 292 | // |
aoqi@0 | 293 | // Calls |
aoqi@0 | 294 | // |
aoqi@0 | 295 | |
aoqi@0 | 296 | private: |
aoqi@0 | 297 | address _last_calls_return_pc; |
aoqi@0 | 298 | |
aoqi@0 | 299 | #if defined(ABI_ELFv2) |
aoqi@0 | 300 | // Generic version of a call to C function. |
aoqi@0 | 301 | // Updates and returns _last_calls_return_pc. |
aoqi@0 | 302 | address branch_to(Register function_entry, bool and_link); |
aoqi@0 | 303 | #else |
aoqi@0 | 304 | // Generic version of a call to C function via a function descriptor |
aoqi@0 | 305 | // with variable support for C calling conventions (TOC, ENV, etc.). |
aoqi@0 | 306 | // updates and returns _last_calls_return_pc. |
aoqi@0 | 307 | address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call, |
aoqi@0 | 308 | bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee); |
aoqi@0 | 309 | #endif |
aoqi@0 | 310 | |
aoqi@0 | 311 | public: |
aoqi@0 | 312 | |
aoqi@0 | 313 | // Get the pc where the last call will return to. returns _last_calls_return_pc. |
aoqi@0 | 314 | inline address last_calls_return_pc(); |
aoqi@0 | 315 | |
aoqi@0 | 316 | #if defined(ABI_ELFv2) |
aoqi@0 | 317 | // Call a C function via a function descriptor and use full C |
aoqi@0 | 318 | // calling conventions. Updates and returns _last_calls_return_pc. |
aoqi@0 | 319 | address call_c(Register function_entry); |
aoqi@0 | 320 | // For tail calls: only branch, don't link, so callee returns to caller of this function. |
aoqi@0 | 321 | address call_c_and_return_to_caller(Register function_entry); |
aoqi@0 | 322 | address call_c(address function_entry, relocInfo::relocType rt); |
aoqi@0 | 323 | #else |
aoqi@0 | 324 | // Call a C function via a function descriptor and use full C |
aoqi@0 | 325 | // calling conventions. Updates and returns _last_calls_return_pc. |
aoqi@0 | 326 | address call_c(Register function_descriptor); |
aoqi@0 | 327 | // For tail calls: only branch, don't link, so callee returns to caller of this function. |
aoqi@0 | 328 | address call_c_and_return_to_caller(Register function_descriptor); |
aoqi@0 | 329 | address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt); |
aoqi@0 | 330 | address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt, |
aoqi@0 | 331 | Register toc); |
aoqi@0 | 332 | #endif |
aoqi@0 | 333 | |
aoqi@0 | 334 | protected: |
aoqi@0 | 335 | |
aoqi@0 | 336 | // It is imperative that all calls into the VM are handled via the |
aoqi@0 | 337 | // call_VM macros. They make sure that the stack linkage is setup |
aoqi@0 | 338 | // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points |
aoqi@0 | 339 | // while call_VM_leaf's correspond to LEAF entry points. |
aoqi@0 | 340 | // |
aoqi@0 | 341 | // This is the base routine called by the different versions of |
aoqi@0 | 342 | // call_VM. The interpreter may customize this version by overriding |
aoqi@0 | 343 | // it for its purposes (e.g., to save/restore additional registers |
aoqi@0 | 344 | // when doing a VM call). |
aoqi@0 | 345 | // |
aoqi@0 | 346 | // If no last_java_sp is specified (noreg) then SP will be used instead. |
aoqi@0 | 347 | virtual void call_VM_base( |
aoqi@0 | 348 | // where an oop-result ends up if any; use noreg otherwise |
aoqi@0 | 349 | Register oop_result, |
aoqi@0 | 350 | // to set up last_Java_frame in stubs; use noreg otherwise |
aoqi@0 | 351 | Register last_java_sp, |
aoqi@0 | 352 | // the entry point |
aoqi@0 | 353 | address entry_point, |
aoqi@0 | 354 | // flag which indicates if exception should be checked |
aoqi@0 | 355 | bool check_exception = true |
aoqi@0 | 356 | ); |
aoqi@0 | 357 | |
aoqi@0 | 358 | // Support for VM calls. This is the base routine called by the |
aoqi@0 | 359 | // different versions of call_VM_leaf. The interpreter may customize |
aoqi@0 | 360 | // this version by overriding it for its purposes (e.g., to |
aoqi@0 | 361 | // save/restore additional registers when doing a VM call). |
aoqi@0 | 362 | void call_VM_leaf_base(address entry_point); |
aoqi@0 | 363 | |
aoqi@0 | 364 | public: |
aoqi@0 | 365 | // Call into the VM. |
aoqi@0 | 366 | // Passes the thread pointer (in R3_ARG1) as a prepended argument. |
aoqi@0 | 367 | // Makes sure oop return values are visible to the GC. |
aoqi@0 | 368 | void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); |
aoqi@0 | 369 | void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); |
aoqi@0 | 370 | void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
goetz@7424 | 371 | void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true); |
aoqi@0 | 372 | void call_VM_leaf(address entry_point); |
aoqi@0 | 373 | void call_VM_leaf(address entry_point, Register arg_1); |
aoqi@0 | 374 | void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); |
aoqi@0 | 375 | void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
aoqi@0 | 376 | |
aoqi@0 | 377 | // Call a stub function via a function descriptor, but don't save |
aoqi@0 | 378 | // TOC before call, don't setup TOC and ENV for call, and don't |
aoqi@0 | 379 | // restore TOC after call. Updates and returns _last_calls_return_pc. |
aoqi@0 | 380 | inline address call_stub(Register function_entry); |
aoqi@0 | 381 | inline void call_stub_and_return_to(Register function_entry, Register return_pc); |
aoqi@0 | 382 | |
aoqi@0 | 383 | // |
aoqi@0 | 384 | // Java utilities |
aoqi@0 | 385 | // |
aoqi@0 | 386 | |
aoqi@0 | 387 | // Read from the polling page, its address is already in a register. |
aoqi@0 | 388 | inline void load_from_polling_page(Register polling_page_address, int offset = 0); |
aoqi@0 | 389 | // Check whether instruction is a read access to the polling page |
aoqi@0 | 390 | // which was emitted by load_from_polling_page(..). |
aoqi@0 | 391 | static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/, |
aoqi@0 | 392 | address* polling_address_ptr = NULL); |
aoqi@0 | 393 | |
aoqi@0 | 394 | // Check whether instruction is a write access to the memory |
aoqi@0 | 395 | // serialization page realized by one of the instructions stw, stwu, |
aoqi@0 | 396 | // stwx, or stwux. |
aoqi@0 | 397 | static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext); |
aoqi@0 | 398 | |
aoqi@0 | 399 | // Support for NULL-checks |
aoqi@0 | 400 | // |
aoqi@0 | 401 | // Generates code that causes a NULL OS exception if the content of reg is NULL. |
aoqi@0 | 402 | // If the accessed location is M[reg + offset] and the offset is known, provide the |
aoqi@0 | 403 | // offset. No explicit code generation is needed if the offset is within a certain |
aoqi@0 | 404 | // range (0 <= offset <= page_size). |
aoqi@0 | 405 | |
aoqi@0 | 406 | // Stack overflow checking |
aoqi@0 | 407 | void bang_stack_with_offset(int offset); |
aoqi@0 | 408 | |
aoqi@0 | 409 | // If instruction is a stack bang of the form ld, stdu, or |
aoqi@0 | 410 | // stdux, return the banged address. Otherwise, return 0. |
aoqi@0 | 411 | static address get_stack_bang_address(int instruction, void* ucontext); |
aoqi@0 | 412 | |
aoqi@0 | 413 | // Atomics |
aoqi@0 | 414 | // CmpxchgX sets condition register to cmpX(current, compare). |
aoqi@0 | 415 | // (flag == ne) => (dest_current_value != compare_value), (!swapped) |
aoqi@0 | 416 | // (flag == eq) => (dest_current_value == compare_value), ( swapped) |
aoqi@0 | 417 | static inline bool cmpxchgx_hint_acquire_lock() { return true; } |
aoqi@0 | 418 | // The stxcx will probably not be succeeded by a releasing store. |
aoqi@0 | 419 | static inline bool cmpxchgx_hint_release_lock() { return false; } |
aoqi@0 | 420 | static inline bool cmpxchgx_hint_atomic_update() { return false; } |
aoqi@0 | 421 | |
aoqi@0 | 422 | // Cmpxchg semantics |
aoqi@0 | 423 | enum { |
aoqi@0 | 424 | MemBarNone = 0, |
aoqi@0 | 425 | MemBarRel = 1, |
aoqi@0 | 426 | MemBarAcq = 2, |
aoqi@0 | 427 | MemBarFenceAfter = 4 // use powers of 2 |
aoqi@0 | 428 | }; |
aoqi@0 | 429 | void cmpxchgw(ConditionRegister flag, |
aoqi@0 | 430 | Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base, |
aoqi@0 | 431 | int semantics, bool cmpxchgx_hint = false, |
aoqi@0 | 432 | Register int_flag_success = noreg, bool contention_hint = false); |
aoqi@0 | 433 | void cmpxchgd(ConditionRegister flag, |
aoqi@0 | 434 | Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base, |
aoqi@0 | 435 | int semantics, bool cmpxchgx_hint = false, |
aoqi@0 | 436 | Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false); |
aoqi@0 | 437 | |
aoqi@0 | 438 | // interface method calling |
aoqi@0 | 439 | void lookup_interface_method(Register recv_klass, |
aoqi@0 | 440 | Register intf_klass, |
aoqi@0 | 441 | RegisterOrConstant itable_index, |
aoqi@0 | 442 | Register method_result, |
aoqi@0 | 443 | Register temp_reg, Register temp2_reg, |
mdoerr@9034 | 444 | Label& no_such_interface, |
mdoerr@9034 | 445 | bool return_method = true); |
aoqi@0 | 446 | |
aoqi@0 | 447 | // virtual method calling |
aoqi@0 | 448 | void lookup_virtual_method(Register recv_klass, |
aoqi@0 | 449 | RegisterOrConstant vtable_index, |
aoqi@0 | 450 | Register method_result); |
aoqi@0 | 451 | |
aoqi@0 | 452 | // Test sub_klass against super_klass, with fast and slow paths. |
aoqi@0 | 453 | |
aoqi@0 | 454 | // The fast path produces a tri-state answer: yes / no / maybe-slow. |
aoqi@0 | 455 | // One of the three labels can be NULL, meaning take the fall-through. |
aoqi@0 | 456 | // If super_check_offset is -1, the value is loaded up from super_klass. |
aoqi@0 | 457 | // No registers are killed, except temp_reg and temp2_reg. |
aoqi@0 | 458 | // If super_check_offset is not -1, temp2_reg is not used and can be noreg. |
aoqi@0 | 459 | void check_klass_subtype_fast_path(Register sub_klass, |
aoqi@0 | 460 | Register super_klass, |
aoqi@0 | 461 | Register temp1_reg, |
aoqi@0 | 462 | Register temp2_reg, |
aoqi@0 | 463 | Label& L_success, |
aoqi@0 | 464 | Label& L_failure); |
aoqi@0 | 465 | |
aoqi@0 | 466 | // The rest of the type check; must be wired to a corresponding fast path. |
aoqi@0 | 467 | // It does not repeat the fast path logic, so don't use it standalone. |
aoqi@0 | 468 | // The temp_reg can be noreg, if no temps are available. |
aoqi@0 | 469 | // It can also be sub_klass or super_klass, meaning it's OK to kill that one. |
aoqi@0 | 470 | // Updates the sub's secondary super cache as necessary. |
aoqi@0 | 471 | void check_klass_subtype_slow_path(Register sub_klass, |
aoqi@0 | 472 | Register super_klass, |
aoqi@0 | 473 | Register temp1_reg, |
aoqi@0 | 474 | Register temp2_reg, |
aoqi@0 | 475 | Label* L_success = NULL, |
aoqi@0 | 476 | Register result_reg = noreg); |
aoqi@0 | 477 | |
aoqi@0 | 478 | // Simplified, combined version, good for typical uses. |
aoqi@0 | 479 | // Falls through on failure. |
aoqi@0 | 480 | void check_klass_subtype(Register sub_klass, |
aoqi@0 | 481 | Register super_klass, |
aoqi@0 | 482 | Register temp1_reg, |
aoqi@0 | 483 | Register temp2_reg, |
aoqi@0 | 484 | Label& L_success); |
aoqi@0 | 485 | |
aoqi@0 | 486 | // Method handle support (JSR 292). |
aoqi@0 | 487 | void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type); |
aoqi@0 | 488 | |
aoqi@0 | 489 | RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0); |
aoqi@0 | 490 | |
aoqi@0 | 491 | // Biased locking support |
aoqi@0 | 492 | // Upon entry,obj_reg must contain the target object, and mark_reg |
aoqi@0 | 493 | // must contain the target object's header. |
aoqi@0 | 494 | // Destroys mark_reg if an attempt is made to bias an anonymously |
aoqi@0 | 495 | // biased lock. In this case a failure will go either to the slow |
aoqi@0 | 496 | // case or fall through with the notEqual condition code set with |
aoqi@0 | 497 | // the expectation that the slow case in the runtime will be called. |
aoqi@0 | 498 | // In the fall-through case where the CAS-based lock is done, |
aoqi@0 | 499 | // mark_reg is not destroyed. |
aoqi@0 | 500 | void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg, |
aoqi@0 | 501 | Register temp2_reg, Label& done, Label* slow_case = NULL); |
aoqi@0 | 502 | // Upon entry, the base register of mark_addr must contain the oop. |
aoqi@0 | 503 | // Destroys temp_reg. |
aoqi@0 | 504 | // If allow_delay_slot_filling is set to true, the next instruction |
aoqi@0 | 505 | // emitted after this one will go in an annulled delay slot if the |
aoqi@0 | 506 | // biased locking exit case failed. |
aoqi@0 | 507 | void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done); |
aoqi@0 | 508 | |
aoqi@0 | 509 | void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3); |
aoqi@0 | 510 | void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3); |
aoqi@0 | 511 | |
aoqi@0 | 512 | // Support for serializing memory accesses between threads |
aoqi@0 | 513 | void serialize_memory(Register thread, Register tmp1, Register tmp2); |
aoqi@0 | 514 | |
aoqi@0 | 515 | // GC barrier support. |
aoqi@0 | 516 | void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp); |
aoqi@0 | 517 | void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj); |
aoqi@0 | 518 | |
aoqi@0 | 519 | #if INCLUDE_ALL_GCS |
aoqi@0 | 520 | // General G1 pre-barrier generator. |
aoqi@0 | 521 | void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val, |
aoqi@0 | 522 | Register Rtmp1, Register Rtmp2, bool needs_frame = false); |
aoqi@0 | 523 | // General G1 post-barrier generator |
aoqi@0 | 524 | void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, |
aoqi@0 | 525 | Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL); |
aoqi@0 | 526 | #endif |
aoqi@0 | 527 | |
aoqi@0 | 528 | // Support for managing the JavaThread pointer (i.e.; the reference to |
aoqi@0 | 529 | // thread-local information). |
aoqi@0 | 530 | |
aoqi@0 | 531 | // Support for last Java frame (but use call_VM instead where possible): |
aoqi@0 | 532 | // access R16_thread->last_Java_sp. |
aoqi@0 | 533 | void set_last_Java_frame(Register last_java_sp, Register last_Java_pc); |
aoqi@0 | 534 | void reset_last_Java_frame(void); |
aoqi@0 | 535 | void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1); |
aoqi@0 | 536 | |
aoqi@0 | 537 | // Read vm result from thread: oop_result = R16_thread->result; |
aoqi@0 | 538 | void get_vm_result (Register oop_result); |
aoqi@0 | 539 | void get_vm_result_2(Register metadata_result); |
aoqi@0 | 540 | |
aoqi@0 | 541 | static bool needs_explicit_null_check(intptr_t offset); |
aoqi@0 | 542 | |
aoqi@0 | 543 | // Trap-instruction-based checks. |
aoqi@0 | 544 | // Range checks can be distinguished from zero checks as they check 32 bit, |
aoqi@0 | 545 | // zero checks all 64 bits (tw, td). |
aoqi@0 | 546 | inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual); |
aoqi@0 | 547 | static bool is_trap_null_check(int x) { |
aoqi@0 | 548 | return is_tdi(x, traptoEqual, -1/*any reg*/, 0) || |
aoqi@0 | 549 | is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0); |
aoqi@0 | 550 | } |
aoqi@0 | 551 | |
aoqi@0 | 552 | inline void trap_zombie_not_entrant(); |
aoqi@0 | 553 | static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); } |
aoqi@0 | 554 | |
aoqi@0 | 555 | inline void trap_should_not_reach_here(); |
aoqi@0 | 556 | static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); } |
aoqi@0 | 557 | |
aoqi@0 | 558 | inline void trap_ic_miss_check(Register a, Register b); |
aoqi@0 | 559 | static bool is_trap_ic_miss_check(int x) { |
aoqi@0 | 560 | return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/); |
aoqi@0 | 561 | } |
aoqi@0 | 562 | |
aoqi@0 | 563 | // Implicit or explicit null check, jumps to static address exception_entry. |
aoqi@0 | 564 | inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry); |
aoqi@0 | 565 | |
aoqi@0 | 566 | // Check accessed object for null. Use SIGTRAP-based null checks on AIX. |
aoqi@0 | 567 | inline void load_with_trap_null_check(Register d, int si16, Register s1); |
aoqi@0 | 568 | |
aoqi@0 | 569 | // Load heap oop and decompress. Loaded oop may not be null. |
aoqi@0 | 570 | inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg); |
aoqi@0 | 571 | inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, |
aoqi@0 | 572 | /*specify if d must stay uncompressed*/ Register tmp = noreg); |
aoqi@0 | 573 | |
aoqi@0 | 574 | // Null allowed. |
aoqi@0 | 575 | inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg); |
aoqi@0 | 576 | |
aoqi@0 | 577 | // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong. |
aoqi@0 | 578 | inline Register encode_heap_oop_not_null(Register d, Register src = noreg); |
aoqi@0 | 579 | inline void decode_heap_oop_not_null(Register d); |
aoqi@0 | 580 | |
aoqi@0 | 581 | // Null allowed. |
aoqi@0 | 582 | inline void decode_heap_oop(Register d); |
aoqi@0 | 583 | |
aoqi@0 | 584 | // Load/Store klass oop from klass field. Compress. |
aoqi@0 | 585 | void load_klass(Register dst, Register src); |
aoqi@0 | 586 | void load_klass_with_trap_null_check(Register dst, Register src); |
aoqi@0 | 587 | void store_klass(Register dst_oop, Register klass, Register tmp = R0); |
aoqi@0 | 588 | void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified. |
aoqi@0 | 589 | static int instr_size_for_decode_klass_not_null(); |
aoqi@0 | 590 | void decode_klass_not_null(Register dst, Register src = noreg); |
aoqi@0 | 591 | void encode_klass_not_null(Register dst, Register src = noreg); |
aoqi@0 | 592 | |
aoqi@0 | 593 | // Load common heap base into register. |
aoqi@0 | 594 | void reinit_heapbase(Register d, Register tmp = noreg); |
aoqi@0 | 595 | |
aoqi@0 | 596 | // SIGTRAP-based range checks for arrays. |
aoqi@0 | 597 | inline void trap_range_check_l(Register a, Register b); |
aoqi@0 | 598 | inline void trap_range_check_l(Register a, int si16); |
aoqi@0 | 599 | static bool is_trap_range_check_l(int x) { |
aoqi@0 | 600 | return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) || |
aoqi@0 | 601 | is_twi(x, traptoLessThanUnsigned, -1/*any reg*/) ); |
aoqi@0 | 602 | } |
aoqi@0 | 603 | inline void trap_range_check_le(Register a, int si16); |
aoqi@0 | 604 | static bool is_trap_range_check_le(int x) { |
aoqi@0 | 605 | return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/); |
aoqi@0 | 606 | } |
aoqi@0 | 607 | inline void trap_range_check_g(Register a, int si16); |
aoqi@0 | 608 | static bool is_trap_range_check_g(int x) { |
aoqi@0 | 609 | return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/); |
aoqi@0 | 610 | } |
aoqi@0 | 611 | inline void trap_range_check_ge(Register a, Register b); |
aoqi@0 | 612 | inline void trap_range_check_ge(Register a, int si16); |
aoqi@0 | 613 | static bool is_trap_range_check_ge(int x) { |
aoqi@0 | 614 | return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) || |
aoqi@0 | 615 | is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/) ); |
aoqi@0 | 616 | } |
aoqi@0 | 617 | static bool is_trap_range_check(int x) { |
aoqi@0 | 618 | return is_trap_range_check_l(x) || is_trap_range_check_le(x) || |
aoqi@0 | 619 | is_trap_range_check_g(x) || is_trap_range_check_ge(x); |
aoqi@0 | 620 | } |
aoqi@0 | 621 | |
aoqi@0 | 622 | void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0); |
aoqi@0 | 623 | |
aoqi@0 | 624 | // Needle of length 1. |
aoqi@0 | 625 | void string_indexof_1(Register result, Register haystack, Register haycnt, |
aoqi@0 | 626 | Register needle, jchar needleChar, |
aoqi@0 | 627 | Register tmp1, Register tmp2); |
aoqi@0 | 628 | // General indexof, eventually with constant needle length. |
aoqi@0 | 629 | void string_indexof(Register result, Register haystack, Register haycnt, |
aoqi@0 | 630 | Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval, |
aoqi@0 | 631 | Register tmp1, Register tmp2, Register tmp3, Register tmp4); |
aoqi@0 | 632 | void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg, |
aoqi@0 | 633 | Register result_reg, Register tmp_reg); |
aoqi@0 | 634 | void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg, |
aoqi@0 | 635 | Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg, |
aoqi@0 | 636 | Register tmp5_reg); |
aoqi@0 | 637 | void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg, |
aoqi@0 | 638 | Register tmp1_reg, Register tmp2_reg); |
aoqi@0 | 639 | |
gromero@9496 | 640 | // CRC32 Intrinsics. |
gromero@9496 | 641 | void load_reverse_32(Register dst, Register src); |
gromero@9496 | 642 | int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3); |
gromero@9496 | 643 | void fold_byte_crc32(Register crc, Register val, Register table, Register tmp); |
gromero@9496 | 644 | void fold_8bit_crc32(Register crc, Register table, Register tmp); |
gromero@9496 | 645 | void update_byte_crc32(Register crc, Register val, Register table); |
gromero@9496 | 646 | void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, |
gromero@9496 | 647 | Register data, bool loopAlignment, bool invertCRC); |
gromero@9496 | 648 | void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, |
gromero@9496 | 649 | Register t0, Register t1, Register t2, Register t3, |
gromero@9496 | 650 | Register tc0, Register tc1, Register tc2, Register tc3); |
gromero@9496 | 651 | void kernel_crc32_2word(Register crc, Register buf, Register len, Register table, |
gromero@9496 | 652 | Register t0, Register t1, Register t2, Register t3, |
gromero@9496 | 653 | Register tc0, Register tc1, Register tc2, Register tc3); |
gromero@9496 | 654 | void kernel_crc32_1word(Register crc, Register buf, Register len, Register table, |
gromero@9496 | 655 | Register t0, Register t1, Register t2, Register t3, |
gromero@9496 | 656 | Register tc0, Register tc1, Register tc2, Register tc3); |
gromero@9496 | 657 | void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, |
gromero@9496 | 658 | Register t0, Register t1, Register t2, Register t3); |
mdoerr@9497 | 659 | void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table, |
mdoerr@9497 | 660 | Register constants, Register barretConstants, |
mdoerr@9497 | 661 | Register t0, Register t1, Register t2, Register t3, Register t4); |
mdoerr@9497 | 662 | void kernel_crc32_1word_aligned(Register crc, Register buf, Register len, |
mdoerr@9497 | 663 | Register constants, Register barretConstants, |
mdoerr@9497 | 664 | Register t0, Register t1, Register t2); |
mdoerr@9497 | 665 | |
gromero@9496 | 666 | void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp); |
gromero@9496 | 667 | |
aoqi@0 | 668 | // |
aoqi@0 | 669 | // Debugging |
aoqi@0 | 670 | // |
aoqi@0 | 671 | |
aoqi@0 | 672 | // assert on cr0 |
aoqi@0 | 673 | void asm_assert(bool check_equal, const char* msg, int id); |
aoqi@0 | 674 | void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); } |
aoqi@0 | 675 | void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); } |
aoqi@0 | 676 | |
aoqi@0 | 677 | private: |
aoqi@0 | 678 | void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base, |
aoqi@0 | 679 | const char* msg, int id); |
aoqi@0 | 680 | |
aoqi@0 | 681 | public: |
aoqi@0 | 682 | |
aoqi@0 | 683 | void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) { |
aoqi@0 | 684 | asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id); |
aoqi@0 | 685 | } |
aoqi@0 | 686 | void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) { |
aoqi@0 | 687 | asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id); |
aoqi@0 | 688 | } |
aoqi@0 | 689 | |
aoqi@0 | 690 | // Verify R16_thread contents. |
aoqi@0 | 691 | void verify_thread(); |
aoqi@0 | 692 | |
aoqi@0 | 693 | // Emit code to verify that reg contains a valid oop if +VerifyOops is set. |
aoqi@0 | 694 | void verify_oop(Register reg, const char* s = "broken oop"); |
aoqi@0 | 695 | |
aoqi@0 | 696 | // TODO: verify method and klass metadata (compare against vptr?) |
aoqi@0 | 697 | void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} |
aoqi@0 | 698 | void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} |
aoqi@0 | 699 | |
aoqi@0 | 700 | // Convenience method returning function entry. For the ELFv1 case |
aoqi@0 | 701 | // creates function descriptor at the current address and returs |
aoqi@0 | 702 | // the pointer to it. For the ELFv2 case returns the current address. |
aoqi@0 | 703 | inline address function_entry(); |
aoqi@0 | 704 | |
aoqi@0 | 705 | #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) |
aoqi@0 | 706 | #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) |
aoqi@0 | 707 | |
aoqi@0 | 708 | private: |
aoqi@0 | 709 | |
aoqi@0 | 710 | enum { |
aoqi@0 | 711 | stop_stop = 0, |
aoqi@0 | 712 | stop_untested = 1, |
aoqi@0 | 713 | stop_unimplemented = 2, |
aoqi@0 | 714 | stop_shouldnotreachhere = 3, |
aoqi@0 | 715 | stop_end = 4 |
aoqi@0 | 716 | }; |
aoqi@0 | 717 | void stop(int type, const char* msg, int id); |
aoqi@0 | 718 | |
aoqi@0 | 719 | public: |
aoqi@0 | 720 | // Prints msg, dumps registers and stops execution. |
aoqi@0 | 721 | void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); } |
aoqi@0 | 722 | void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); } |
aoqi@0 | 723 | void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); } |
aoqi@0 | 724 | void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); } |
aoqi@0 | 725 | |
aoqi@0 | 726 | void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN; |
aoqi@0 | 727 | }; |
aoqi@0 | 728 | |
aoqi@0 | 729 | // class SkipIfEqualZero: |
aoqi@0 | 730 | // |
aoqi@0 | 731 | // Instantiating this class will result in assembly code being output that will |
aoqi@0 | 732 | // jump around any code emitted between the creation of the instance and it's |
aoqi@0 | 733 | // automatic destruction at the end of a scope block, depending on the value of |
aoqi@0 | 734 | // the flag passed to the constructor, which will be checked at run-time. |
aoqi@0 | 735 | class SkipIfEqualZero : public StackObj { |
aoqi@0 | 736 | private: |
aoqi@0 | 737 | MacroAssembler* _masm; |
aoqi@0 | 738 | Label _label; |
aoqi@0 | 739 | |
aoqi@0 | 740 | public: |
aoqi@0 | 741 | // 'Temp' is a temp register that this object can use (and trash). |
aoqi@0 | 742 | explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr); |
aoqi@0 | 743 | ~SkipIfEqualZero(); |
aoqi@0 | 744 | }; |
aoqi@0 | 745 | |
aoqi@0 | 746 | #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP |