Tue, 08 Aug 2017 15:57:29 +0800
merge
1 /*
2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
29 #include "asm/assembler.hpp"
31 // MacroAssembler extends Assembler by a few frequently used macros.
33 class ciTypeArray;
35 class MacroAssembler: public Assembler {
36 public:
37 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
39 //
40 // Optimized instruction emitters
41 //
43 inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
44 inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
46 // load d = *[a+si31]
47 // Emits several instructions if the offset is not encodable in one instruction.
48 void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
49 void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop);
50 inline static bool is_ld_largeoffset(address a);
51 inline static int get_ld_largeoffset_offset(address a);
53 inline void round_to(Register r, int modulus);
55 // Load/store with type given by parameter.
56 void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
57 void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
59 // Move register if destination register and target register are different
60 inline void mr_if_needed(Register rd, Register rs);
61 inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
62 // This is dedicated for emitting scheduled mach nodes. For better
63 // readability of the ad file I put it here.
64 // Endgroups are not needed if
65 // - the scheduler is off
66 // - the scheduler found that there is a natural group end, in that
67 // case it reduced the size of the instruction used in the test
68 // yielding 'needed'.
69 inline void endgroup_if_needed(bool needed);
71 // Memory barriers.
72 inline void membar(int bits);
73 inline void release();
74 inline void acquire();
75 inline void fence();
77 // nop padding
78 void align(int modulus, int max = 252, int rem = 0);
80 //
81 // Constants, loading constants, TOC support
82 //
84 // Address of the global TOC.
85 inline static address global_toc();
86 // Offset of given address to the global TOC.
87 inline static int offset_to_global_toc(const address addr);
89 // Address of TOC of the current method.
90 inline address method_toc();
91 // Offset of given address to TOC of the current method.
92 inline int offset_to_method_toc(const address addr);
94 // Global TOC.
95 void calculate_address_from_global_toc(Register dst, address addr,
96 bool hi16 = true, bool lo16 = true,
97 bool add_relocation = true, bool emit_dummy_addr = false);
98 inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
99 calculate_address_from_global_toc(dst, addr, true, false);
100 };
101 inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
102 calculate_address_from_global_toc(dst, addr, false, true);
103 };
105 inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
106 static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
107 static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
109 #ifdef _LP64
110 // Patch narrow oop constant.
111 inline static bool is_set_narrow_oop(address a, address bound);
112 static int patch_set_narrow_oop(address a, address bound, narrowOop data);
113 static narrowOop get_narrow_oop(address a, address bound);
114 #endif
116 inline static bool is_load_const_at(address a);
118 // Emits an oop const to the constant pool, loads the constant, and
119 // sets a relocation info with address current_pc.
120 void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
121 void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
122 assert(dst == R2_TOC, "base register must be TOC");
123 load_const_from_method_toc(dst, a, toc);
124 }
126 static bool is_load_const_from_method_toc_at(address a);
127 static int get_offset_of_load_const_from_method_toc_at(address a);
129 // Get the 64 bit constant from a `load_const' sequence.
130 static long get_const(address load_const);
132 // Patch the 64 bit constant of a `load_const' sequence. This is a
133 // low level procedure. It neither flushes the instruction cache nor
134 // is it atomic.
135 static void patch_const(address load_const, long x);
137 // Metadata in code that we have to keep track of.
138 AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
139 AddressLiteral constant_metadata_address(Metadata* obj); // find_index
140 // Oops used directly in compiled code are stored in the constant pool,
141 // and loaded from there.
142 // Allocate new entry for oop in constant pool. Generate relocation.
143 AddressLiteral allocate_oop_address(jobject obj);
144 // Find oop obj in constant pool. Return relocation with it's index.
145 AddressLiteral constant_oop_address(jobject obj);
147 // Find oop in constant pool and emit instructions to load it.
148 // Uses constant_oop_address.
149 inline void set_oop_constant(jobject obj, Register d);
150 // Same as load_address.
151 inline void set_oop (AddressLiteral obj_addr, Register d);
153 // Read runtime constant: Issue load if constant not yet established,
154 // else use real constant.
155 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
156 Register tmp,
157 int offset);
159 //
160 // branch, jump
161 //
163 inline void pd_patch_instruction(address branch, address target);
164 NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
166 // Conditional far branch for destinations encodable in 24+2 bits.
167 // Same interface as bc, e.g. no inverse boint-field.
168 enum {
169 bc_far_optimize_not = 0,
170 bc_far_optimize_on_relocate = 1
171 };
172 // optimize: flag for telling the conditional far branch to optimize
173 // itself when relocated.
174 void bc_far(int boint, int biint, Label& dest, int optimize);
175 // Relocation of conditional far branches.
176 static bool is_bc_far_at(address instruction_addr);
177 static address get_dest_of_bc_far_at(address instruction_addr);
178 static void set_dest_of_bc_far_at(address instruction_addr, address dest);
179 private:
180 static bool inline is_bc_far_variant1_at(address instruction_addr);
181 static bool inline is_bc_far_variant2_at(address instruction_addr);
182 static bool inline is_bc_far_variant3_at(address instruction_addr);
183 public:
185 // Convenience bc_far versions.
186 inline void blt_far(ConditionRegister crx, Label& L, int optimize);
187 inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
188 inline void beq_far(ConditionRegister crx, Label& L, int optimize);
189 inline void bso_far(ConditionRegister crx, Label& L, int optimize);
190 inline void bge_far(ConditionRegister crx, Label& L, int optimize);
191 inline void ble_far(ConditionRegister crx, Label& L, int optimize);
192 inline void bne_far(ConditionRegister crx, Label& L, int optimize);
193 inline void bns_far(ConditionRegister crx, Label& L, int optimize);
195 // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
196 private:
197 enum {
198 bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
199 bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord,
200 bxx64_patchable_ret_addr_offset = bxx64_patchable_size
201 };
202 void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
203 static bool is_bxx64_patchable_at( address instruction_addr, bool link);
204 // Does the instruction use a pc-relative encoding of the destination?
205 static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
206 static bool is_bxx64_patchable_variant1_at( address instruction_addr, bool link);
207 // Load destination relative to global toc.
208 static bool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link);
209 static bool is_bxx64_patchable_variant2_at( address instruction_addr, bool link);
210 static void set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link);
211 static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
213 public:
214 // call
215 enum {
216 bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
217 bl64_patchable_size = bxx64_patchable_size,
218 bl64_patchable_ret_addr_offset = bxx64_patchable_ret_addr_offset
219 };
220 inline void bl64_patchable(address target, relocInfo::relocType rt) {
221 bxx64_patchable(target, rt, /*link=*/true);
222 }
223 inline static bool is_bl64_patchable_at(address instruction_addr) {
224 return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
225 }
226 inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
227 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
228 }
229 inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
230 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
231 }
232 inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
233 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
234 }
235 // jump
236 enum {
237 b64_patchable_instruction_count = bxx64_patchable_instruction_count,
238 b64_patchable_size = bxx64_patchable_size,
239 };
240 inline void b64_patchable(address target, relocInfo::relocType rt) {
241 bxx64_patchable(target, rt, /*link=*/false);
242 }
243 inline static bool is_b64_patchable_at(address instruction_addr) {
244 return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
245 }
246 inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
247 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
248 }
249 inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
250 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
251 }
252 inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
253 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
254 }
256 //
257 // Support for frame handling
258 //
260 // some ABI-related functions
261 void save_nonvolatile_gprs( Register dst_base, int offset);
262 void restore_nonvolatile_gprs(Register src_base, int offset);
263 void save_volatile_gprs( Register dst_base, int offset);
264 void restore_volatile_gprs(Register src_base, int offset);
265 void save_LR_CR( Register tmp); // tmp contains LR on return.
266 void restore_LR_CR(Register tmp);
268 // Get current PC using bl-next-instruction trick.
269 address get_PC_trash_LR(Register result);
271 // Resize current frame either relatively wrt to current SP or absolute.
272 void resize_frame(Register offset, Register tmp);
273 void resize_frame(int offset, Register tmp);
274 void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
276 // Push a frame of size bytes.
277 void push_frame(Register bytes, Register tmp);
279 // Push a frame of size `bytes'. No abi space provided.
280 void push_frame(unsigned int bytes, Register tmp);
282 // Push a frame of size `bytes' plus abi_reg_args on top.
283 void push_frame_reg_args(unsigned int bytes, Register tmp);
285 // Setup up a new C frame with a spill area for non-volatile GPRs and additional
286 // space for local variables
287 void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp);
289 // pop current C frame
290 void pop_frame();
292 //
293 // Calls
294 //
296 private:
297 address _last_calls_return_pc;
299 #if defined(ABI_ELFv2)
300 // Generic version of a call to C function.
301 // Updates and returns _last_calls_return_pc.
302 address branch_to(Register function_entry, bool and_link);
303 #else
304 // Generic version of a call to C function via a function descriptor
305 // with variable support for C calling conventions (TOC, ENV, etc.).
306 // updates and returns _last_calls_return_pc.
307 address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
308 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
309 #endif
311 public:
313 // Get the pc where the last call will return to. returns _last_calls_return_pc.
314 inline address last_calls_return_pc();
316 #if defined(ABI_ELFv2)
317 // Call a C function via a function descriptor and use full C
318 // calling conventions. Updates and returns _last_calls_return_pc.
319 address call_c(Register function_entry);
320 // For tail calls: only branch, don't link, so callee returns to caller of this function.
321 address call_c_and_return_to_caller(Register function_entry);
322 address call_c(address function_entry, relocInfo::relocType rt);
323 #else
324 // Call a C function via a function descriptor and use full C
325 // calling conventions. Updates and returns _last_calls_return_pc.
326 address call_c(Register function_descriptor);
327 // For tail calls: only branch, don't link, so callee returns to caller of this function.
328 address call_c_and_return_to_caller(Register function_descriptor);
329 address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
330 address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
331 Register toc);
332 #endif
334 protected:
336 // It is imperative that all calls into the VM are handled via the
337 // call_VM macros. They make sure that the stack linkage is setup
338 // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
339 // while call_VM_leaf's correspond to LEAF entry points.
340 //
341 // This is the base routine called by the different versions of
342 // call_VM. The interpreter may customize this version by overriding
343 // it for its purposes (e.g., to save/restore additional registers
344 // when doing a VM call).
345 //
346 // If no last_java_sp is specified (noreg) then SP will be used instead.
347 virtual void call_VM_base(
348 // where an oop-result ends up if any; use noreg otherwise
349 Register oop_result,
350 // to set up last_Java_frame in stubs; use noreg otherwise
351 Register last_java_sp,
352 // the entry point
353 address entry_point,
354 // flag which indicates if exception should be checked
355 bool check_exception = true
356 );
358 // Support for VM calls. This is the base routine called by the
359 // different versions of call_VM_leaf. The interpreter may customize
360 // this version by overriding it for its purposes (e.g., to
361 // save/restore additional registers when doing a VM call).
362 void call_VM_leaf_base(address entry_point);
364 public:
365 // Call into the VM.
366 // Passes the thread pointer (in R3_ARG1) as a prepended argument.
367 // Makes sure oop return values are visible to the GC.
368 void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
369 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
370 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
371 void call_VM_leaf(address entry_point);
372 void call_VM_leaf(address entry_point, Register arg_1);
373 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
374 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
376 // Call a stub function via a function descriptor, but don't save
377 // TOC before call, don't setup TOC and ENV for call, and don't
378 // restore TOC after call. Updates and returns _last_calls_return_pc.
379 inline address call_stub(Register function_entry);
380 inline void call_stub_and_return_to(Register function_entry, Register return_pc);
382 //
383 // Java utilities
384 //
386 // Read from the polling page, its address is already in a register.
387 inline void load_from_polling_page(Register polling_page_address, int offset = 0);
388 // Check whether instruction is a read access to the polling page
389 // which was emitted by load_from_polling_page(..).
390 static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
391 address* polling_address_ptr = NULL);
393 // Check whether instruction is a write access to the memory
394 // serialization page realized by one of the instructions stw, stwu,
395 // stwx, or stwux.
396 static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
398 // Support for NULL-checks
399 //
400 // Generates code that causes a NULL OS exception if the content of reg is NULL.
401 // If the accessed location is M[reg + offset] and the offset is known, provide the
402 // offset. No explicit code generation is needed if the offset is within a certain
403 // range (0 <= offset <= page_size).
405 // Stack overflow checking
406 void bang_stack_with_offset(int offset);
408 // If instruction is a stack bang of the form ld, stdu, or
409 // stdux, return the banged address. Otherwise, return 0.
410 static address get_stack_bang_address(int instruction, void* ucontext);
412 // Atomics
413 // CmpxchgX sets condition register to cmpX(current, compare).
414 // (flag == ne) => (dest_current_value != compare_value), (!swapped)
415 // (flag == eq) => (dest_current_value == compare_value), ( swapped)
416 static inline bool cmpxchgx_hint_acquire_lock() { return true; }
417 // The stxcx will probably not be succeeded by a releasing store.
418 static inline bool cmpxchgx_hint_release_lock() { return false; }
419 static inline bool cmpxchgx_hint_atomic_update() { return false; }
421 // Cmpxchg semantics
422 enum {
423 MemBarNone = 0,
424 MemBarRel = 1,
425 MemBarAcq = 2,
426 MemBarFenceAfter = 4 // use powers of 2
427 };
428 void cmpxchgw(ConditionRegister flag,
429 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
430 int semantics, bool cmpxchgx_hint = false,
431 Register int_flag_success = noreg, bool contention_hint = false);
432 void cmpxchgd(ConditionRegister flag,
433 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
434 int semantics, bool cmpxchgx_hint = false,
435 Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
437 // interface method calling
438 void lookup_interface_method(Register recv_klass,
439 Register intf_klass,
440 RegisterOrConstant itable_index,
441 Register method_result,
442 Register temp_reg, Register temp2_reg,
443 Label& no_such_interface);
445 // virtual method calling
446 void lookup_virtual_method(Register recv_klass,
447 RegisterOrConstant vtable_index,
448 Register method_result);
450 // Test sub_klass against super_klass, with fast and slow paths.
452 // The fast path produces a tri-state answer: yes / no / maybe-slow.
453 // One of the three labels can be NULL, meaning take the fall-through.
454 // If super_check_offset is -1, the value is loaded up from super_klass.
455 // No registers are killed, except temp_reg and temp2_reg.
456 // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
457 void check_klass_subtype_fast_path(Register sub_klass,
458 Register super_klass,
459 Register temp1_reg,
460 Register temp2_reg,
461 Label& L_success,
462 Label& L_failure);
464 // The rest of the type check; must be wired to a corresponding fast path.
465 // It does not repeat the fast path logic, so don't use it standalone.
466 // The temp_reg can be noreg, if no temps are available.
467 // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
468 // Updates the sub's secondary super cache as necessary.
469 void check_klass_subtype_slow_path(Register sub_klass,
470 Register super_klass,
471 Register temp1_reg,
472 Register temp2_reg,
473 Label* L_success = NULL,
474 Register result_reg = noreg);
476 // Simplified, combined version, good for typical uses.
477 // Falls through on failure.
478 void check_klass_subtype(Register sub_klass,
479 Register super_klass,
480 Register temp1_reg,
481 Register temp2_reg,
482 Label& L_success);
484 // Method handle support (JSR 292).
485 void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
487 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
489 // Biased locking support
490 // Upon entry,obj_reg must contain the target object, and mark_reg
491 // must contain the target object's header.
492 // Destroys mark_reg if an attempt is made to bias an anonymously
493 // biased lock. In this case a failure will go either to the slow
494 // case or fall through with the notEqual condition code set with
495 // the expectation that the slow case in the runtime will be called.
496 // In the fall-through case where the CAS-based lock is done,
497 // mark_reg is not destroyed.
498 void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
499 Register temp2_reg, Label& done, Label* slow_case = NULL);
500 // Upon entry, the base register of mark_addr must contain the oop.
501 // Destroys temp_reg.
502 // If allow_delay_slot_filling is set to true, the next instruction
503 // emitted after this one will go in an annulled delay slot if the
504 // biased locking exit case failed.
505 void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
507 void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
508 void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
510 // Support for serializing memory accesses between threads
511 void serialize_memory(Register thread, Register tmp1, Register tmp2);
513 // GC barrier support.
514 void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
515 void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
517 #if INCLUDE_ALL_GCS
518 // General G1 pre-barrier generator.
519 void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
520 Register Rtmp1, Register Rtmp2, bool needs_frame = false);
521 // General G1 post-barrier generator
522 void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
523 Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
524 #endif
526 // Support for managing the JavaThread pointer (i.e.; the reference to
527 // thread-local information).
529 // Support for last Java frame (but use call_VM instead where possible):
530 // access R16_thread->last_Java_sp.
531 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
532 void reset_last_Java_frame(void);
533 void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
535 // Read vm result from thread: oop_result = R16_thread->result;
536 void get_vm_result (Register oop_result);
537 void get_vm_result_2(Register metadata_result);
539 static bool needs_explicit_null_check(intptr_t offset);
541 // Trap-instruction-based checks.
542 // Range checks can be distinguished from zero checks as they check 32 bit,
543 // zero checks all 64 bits (tw, td).
544 inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
545 static bool is_trap_null_check(int x) {
546 return is_tdi(x, traptoEqual, -1/*any reg*/, 0) ||
547 is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
548 }
550 inline void trap_zombie_not_entrant();
551 static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
553 inline void trap_should_not_reach_here();
554 static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
556 inline void trap_ic_miss_check(Register a, Register b);
557 static bool is_trap_ic_miss_check(int x) {
558 return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
559 }
561 // Implicit or explicit null check, jumps to static address exception_entry.
562 inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
564 // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
565 inline void load_with_trap_null_check(Register d, int si16, Register s1);
567 // Load heap oop and decompress. Loaded oop may not be null.
568 inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
569 inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
570 /*specify if d must stay uncompressed*/ Register tmp = noreg);
572 // Null allowed.
573 inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
575 // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
576 inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
577 inline void decode_heap_oop_not_null(Register d);
579 // Null allowed.
580 inline void decode_heap_oop(Register d);
582 // Load/Store klass oop from klass field. Compress.
583 void load_klass(Register dst, Register src);
584 void load_klass_with_trap_null_check(Register dst, Register src);
585 void store_klass(Register dst_oop, Register klass, Register tmp = R0);
586 void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
587 static int instr_size_for_decode_klass_not_null();
588 void decode_klass_not_null(Register dst, Register src = noreg);
589 void encode_klass_not_null(Register dst, Register src = noreg);
591 // Load common heap base into register.
592 void reinit_heapbase(Register d, Register tmp = noreg);
594 // SIGTRAP-based range checks for arrays.
595 inline void trap_range_check_l(Register a, Register b);
596 inline void trap_range_check_l(Register a, int si16);
597 static bool is_trap_range_check_l(int x) {
598 return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
599 is_twi(x, traptoLessThanUnsigned, -1/*any reg*/) );
600 }
601 inline void trap_range_check_le(Register a, int si16);
602 static bool is_trap_range_check_le(int x) {
603 return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
604 }
605 inline void trap_range_check_g(Register a, int si16);
606 static bool is_trap_range_check_g(int x) {
607 return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
608 }
609 inline void trap_range_check_ge(Register a, Register b);
610 inline void trap_range_check_ge(Register a, int si16);
611 static bool is_trap_range_check_ge(int x) {
612 return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
613 is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/) );
614 }
615 static bool is_trap_range_check(int x) {
616 return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
617 is_trap_range_check_g(x) || is_trap_range_check_ge(x);
618 }
620 void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
622 // Needle of length 1.
623 void string_indexof_1(Register result, Register haystack, Register haycnt,
624 Register needle, jchar needleChar,
625 Register tmp1, Register tmp2);
626 // General indexof, eventually with constant needle length.
627 void string_indexof(Register result, Register haystack, Register haycnt,
628 Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
629 Register tmp1, Register tmp2, Register tmp3, Register tmp4);
630 void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
631 Register result_reg, Register tmp_reg);
632 void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
633 Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
634 Register tmp5_reg);
635 void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
636 Register tmp1_reg, Register tmp2_reg);
638 //
639 // Debugging
640 //
642 // assert on cr0
643 void asm_assert(bool check_equal, const char* msg, int id);
644 void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
645 void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
647 private:
648 void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
649 const char* msg, int id);
651 public:
653 void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
654 asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id);
655 }
656 void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
657 asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
658 }
660 // Verify R16_thread contents.
661 void verify_thread();
663 // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
664 void verify_oop(Register reg, const char* s = "broken oop");
666 // TODO: verify method and klass metadata (compare against vptr?)
667 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
668 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
670 // Convenience method returning function entry. For the ELFv1 case
671 // creates function descriptor at the current address and returs
672 // the pointer to it. For the ELFv2 case returns the current address.
673 inline address function_entry();
675 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
676 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
678 private:
680 enum {
681 stop_stop = 0,
682 stop_untested = 1,
683 stop_unimplemented = 2,
684 stop_shouldnotreachhere = 3,
685 stop_end = 4
686 };
687 void stop(int type, const char* msg, int id);
689 public:
690 // Prints msg, dumps registers and stops execution.
691 void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); }
692 void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); }
693 void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
694 void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); }
696 void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
697 };
699 // class SkipIfEqualZero:
700 //
701 // Instantiating this class will result in assembly code being output that will
702 // jump around any code emitted between the creation of the instance and it's
703 // automatic destruction at the end of a scope block, depending on the value of
704 // the flag passed to the constructor, which will be checked at run-time.
705 class SkipIfEqualZero : public StackObj {
706 private:
707 MacroAssembler* _masm;
708 Label _label;
710 public:
711 // 'Temp' is a temp register that this object can use (and trash).
712 explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
713 ~SkipIfEqualZero();
714 };
716 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP