Thu, 06 Mar 2014 10:55:28 -0800
8035647: PPC64: Support for elf v2 abi.
Summary: ELFv2 ABI used by the little endian PowerPC64 on Linux.
Reviewed-by: kvn
Contributed-by: asmundak@google.com
1.1 --- a/src/cpu/ppc/vm/assembler_ppc.hpp Thu Feb 20 11:05:12 2014 +0100 1.2 +++ b/src/cpu/ppc/vm/assembler_ppc.hpp Thu Mar 06 10:55:28 2014 -0800 1.3 @@ -124,6 +124,7 @@ 1.4 } 1.5 }; 1.6 1.7 +#if !defined(ABI_ELFv2) 1.8 // A ppc64 function descriptor. 1.9 struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC { 1.10 private: 1.11 @@ -161,6 +162,7 @@ 1.12 _env = (address) 0xbad; 1.13 } 1.14 }; 1.15 +#endif 1.16 1.17 class Assembler : public AbstractAssembler { 1.18 protected: 1.19 @@ -1067,6 +1069,7 @@ 1.20 // Emit an address. 1.21 inline address emit_addr(const address addr = NULL); 1.22 1.23 +#if !defined(ABI_ELFv2) 1.24 // Emit a function descriptor with the specified entry point, TOC, 1.25 // and ENV. If the entry point is NULL, the descriptor will point 1.26 // just past the descriptor. 1.27 @@ -1074,6 +1077,7 @@ 1.28 inline address emit_fd(address entry = NULL, 1.29 address toc = (address) FunctionDescriptor::friend_toc, 1.30 address env = (address) FunctionDescriptor::friend_env); 1.31 +#endif 1.32 1.33 ///////////////////////////////////////////////////////////////////////////////////// 1.34 // PPC instructions
2.1 --- a/src/cpu/ppc/vm/assembler_ppc.inline.hpp Thu Feb 20 11:05:12 2014 +0100 2.2 +++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp Thu Mar 06 10:55:28 2014 -0800 2.3 @@ -55,6 +55,7 @@ 2.4 return start; 2.5 } 2.6 2.7 +#if !defined(ABI_ELFv2) 2.8 // Emit a function descriptor with the specified entry point, TOC, and 2.9 // ENV. If the entry point is NULL, the descriptor will point just 2.10 // past the descriptor. 2.11 @@ -73,6 +74,7 @@ 2.12 2.13 return (address)fd; 2.14 } 2.15 +#endif 2.16 2.17 // Issue an illegal instruction. 0 is guaranteed to be an illegal instruction. 2.18 inline void Assembler::illtrap() { Assembler::emit_int32(0); }
3.1 --- a/src/cpu/ppc/vm/cppInterpreter_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 3.2 +++ b/src/cpu/ppc/vm/cppInterpreter_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 3.3 @@ -1136,7 +1136,9 @@ 3.4 // (outgoing C args), R3_ARG1 to R10_ARG8, and F1_ARG1 to 3.5 // F13_ARG13. 3.6 __ mr(R3_ARG1, R18_locals); 3.7 +#if !defined(ABI_ELFv2) 3.8 __ ld(signature_handler_fd, 0, signature_handler_fd); 3.9 +#endif 3.10 __ call_stub(signature_handler_fd); 3.11 // reload method 3.12 __ ld(R19_method, state_(_method)); 3.13 @@ -1295,8 +1297,13 @@ 3.14 // native result acrosss the call. No oop is present 3.15 3.16 __ mr(R3_ARG1, R16_thread); 3.17 +#if defined(ABI_ELFv2) 3.18 + __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 3.19 + relocInfo::none); 3.20 +#else 3.21 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), 3.22 relocInfo::none); 3.23 +#endif 3.24 __ bind(sync_check_done); 3.25 3.26 //============================================================================= 3.27 @@ -1413,7 +1420,7 @@ 3.28 // First, pop to caller's frame. 3.29 __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2); 3.30 3.31 - __ push_frame_abi112(0, R11_scratch1); 3.32 + __ push_frame_reg_args(0, R11_scratch1); 3.33 // Get the address of the exception handler. 3.34 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 3.35 R16_thread, 3.36 @@ -2545,7 +2552,7 @@ 3.37 __ mr(R4_ARG2, R3_ARG1); // ARG2 := ARG1 3.38 3.39 // Find the address of the "catch_exception" stub. 3.40 - __ push_frame_abi112(0, R11_scratch1); 3.41 + __ push_frame_reg_args(0, R11_scratch1); 3.42 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 3.43 R16_thread, 3.44 R4_ARG2);
4.1 --- a/src/cpu/ppc/vm/frame_ppc.hpp Thu Feb 20 11:05:12 2014 +0100 4.2 +++ b/src/cpu/ppc/vm/frame_ppc.hpp Thu Mar 06 10:55:28 2014 -0800 4.3 @@ -50,7 +50,7 @@ 4.4 // [C_FRAME] 4.5 // 4.6 // C_FRAME: 4.7 - // 0 [ABI_112] 4.8 + // 0 [ABI_REG_ARGS] 4.9 // 112 CARG_9: outgoing arg 9 (arg_1 ... arg_8 via gpr_3 ... gpr_{10}) 4.10 // ... 4.11 // 40+M*8 CARG_M: outgoing arg M (M is the maximum of outgoing args taken over all call sites in the procedure) 4.12 @@ -77,7 +77,7 @@ 4.13 // 32 reserved 4.14 // 40 space for TOC (=R2) register for next call 4.15 // 4.16 - // ABI_112: 4.17 + // ABI_REG_ARGS: 4.18 // 0 [ABI_48] 4.19 // 48 CARG_1: spill slot for outgoing arg 1. used by next callee. 4.20 // ... ... 4.21 @@ -95,23 +95,25 @@ 4.22 log_2_of_alignment_in_bits = 7 4.23 }; 4.24 4.25 - // ABI_48: 4.26 - struct abi_48 { 4.27 + // ABI_MINFRAME: 4.28 + struct abi_minframe { 4.29 uint64_t callers_sp; 4.30 uint64_t cr; //_16 4.31 uint64_t lr; 4.32 +#if !defined(ABI_ELFv2) 4.33 uint64_t reserved1; //_16 4.34 uint64_t reserved2; 4.35 +#endif 4.36 uint64_t toc; //_16 4.37 // nothing to add here! 4.38 // aligned to frame::alignment_in_bytes (16) 4.39 }; 4.40 4.41 enum { 4.42 - abi_48_size = sizeof(abi_48) 4.43 + abi_minframe_size = sizeof(abi_minframe) 4.44 }; 4.45 4.46 - struct abi_112 : abi_48 { 4.47 + struct abi_reg_args : abi_minframe { 4.48 uint64_t carg_1; 4.49 uint64_t carg_2; //_16 4.50 uint64_t carg_3; 4.51 @@ -124,13 +126,13 @@ 4.52 }; 4.53 4.54 enum { 4.55 - abi_112_size = sizeof(abi_112) 4.56 + abi_reg_args_size = sizeof(abi_reg_args) 4.57 }; 4.58 4.59 #define _abi(_component) \ 4.60 - (offset_of(frame::abi_112, _component)) 4.61 + (offset_of(frame::abi_reg_args, _component)) 4.62 4.63 - struct abi_112_spill : abi_112 { 4.64 + struct abi_reg_args_spill : abi_reg_args { 4.65 // additional spill slots 4.66 uint64_t spill_ret; 4.67 uint64_t spill_fret; //_16 4.68 @@ -138,11 +140,11 @@ 4.69 }; 4.70 4.71 enum { 4.72 - abi_112_spill_size = sizeof(abi_112_spill) 4.73 + abi_reg_args_spill_size = sizeof(abi_reg_args_spill) 4.74 }; 4.75 4.76 - #define _abi_112_spill(_component) \ 4.77 - (offset_of(frame::abi_112_spill, _component)) 4.78 + #define _abi_reg_args_spill(_component) \ 4.79 + (offset_of(frame::abi_reg_args_spill, _component)) 4.80 4.81 // non-volatile GPRs: 4.82 4.83 @@ -242,7 +244,7 @@ 4.84 // [ENTRY_FRAME_LOCALS] 4.85 // 4.86 // PARENT_IJAVA_FRAME_ABI: 4.87 - // 0 [ABI_48] 4.88 + // 0 [ABI_MINFRAME] 4.89 // top_frame_sp 4.90 // initial_caller_sp 4.91 // 4.92 @@ -258,7 +260,7 @@ 4.93 4.94 // PARENT_IJAVA_FRAME_ABI 4.95 4.96 - struct parent_ijava_frame_abi : abi_48 { 4.97 + struct parent_ijava_frame_abi : abi_minframe { 4.98 // SOE registers. 4.99 // C2i adapters spill their top-frame stack-pointer here. 4.100 uint64_t top_frame_sp; // carg_1 4.101 @@ -285,7 +287,7 @@ 4.102 uint64_t carg_6_unused; //_16 carg_6 4.103 uint64_t carg_7_unused; // carg_7 4.104 // Use arg8 for storing frame_manager_lr. The size of 4.105 - // top_ijava_frame_abi must match abi_112. 4.106 + // top_ijava_frame_abi must match abi_reg_args. 4.107 uint64_t frame_manager_lr; //_16 carg_8 4.108 // nothing to add here! 4.109 // aligned to frame::alignment_in_bytes (16) 4.110 @@ -395,8 +397,8 @@ 4.111 intptr_t* fp() const { return _fp; } 4.112 4.113 // Accessors for ABIs 4.114 - inline abi_48* own_abi() const { return (abi_48*) _sp; } 4.115 - inline abi_48* callers_abi() const { return (abi_48*) _fp; } 4.116 + inline abi_minframe* own_abi() const { return (abi_minframe*) _sp; } 4.117 + inline abi_minframe* callers_abi() const { return (abi_minframe*) _fp; } 4.118 4.119 private: 4.120
5.1 --- a/src/cpu/ppc/vm/interpreterRT_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 5.2 +++ b/src/cpu/ppc/vm/interpreterRT_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 5.3 @@ -109,8 +109,10 @@ 5.4 } 5.5 5.6 void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) { 5.7 +#if !defined(ABI_ELFv2) 5.8 // Emit fd for current codebuffer. Needs patching! 5.9 __ emit_fd(); 5.10 +#endif 5.11 5.12 // Generate code to handle arguments. 5.13 iterate(fingerprint); 5.14 @@ -127,11 +129,13 @@ 5.15 // Implementation of SignatureHandlerLibrary 5.16 5.17 void SignatureHandlerLibrary::pd_set_handler(address handler) { 5.18 +#if !defined(ABI_ELFv2) 5.19 // patch fd here. 5.20 FunctionDescriptor* fd = (FunctionDescriptor*) handler; 5.21 5.22 fd->set_entry(handler + (int)sizeof(FunctionDescriptor)); 5.23 assert(fd->toc() == (address)0xcafe, "need to adjust TOC here"); 5.24 +#endif 5.25 } 5.26 5.27
6.1 --- a/src/cpu/ppc/vm/interpreter_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 6.2 +++ b/src/cpu/ppc/vm/interpreter_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 6.3 @@ -128,13 +128,13 @@ 6.4 const Register target_sp = R28_tmp8; 6.5 const FloatRegister floatSlot = F0; 6.6 6.7 - address entry = __ emit_fd(); 6.8 + address entry = __ function_entry(); 6.9 6.10 __ save_LR_CR(R0); 6.11 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 6.12 // We use target_sp for storing arguments in the C frame. 6.13 __ mr(target_sp, R1_SP); 6.14 - __ push_frame_abi112_nonvolatiles(0, R11_scratch1); 6.15 + __ push_frame_reg_args_nonvolatiles(0, R11_scratch1); 6.16 6.17 __ mr(arg_java, R3_ARG1); 6.18 6.19 @@ -474,7 +474,7 @@ 6.20 6.21 // Push a new C frame and save LR. 6.22 __ save_LR_CR(R0); 6.23 - __ push_frame_abi112(0, R11_scratch1); 6.24 + __ push_frame_reg_args(0, R11_scratch1); 6.25 6.26 // This is not a leaf but we have a JavaFrameAnchor now and we will 6.27 // check (create) exceptions afterward so this is ok.
7.1 --- a/src/cpu/ppc/vm/macroAssembler_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 7.2 +++ b/src/cpu/ppc/vm/macroAssembler_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 7.3 @@ -594,7 +594,13 @@ 7.4 "can't identify emitted call"); 7.5 } else { 7.6 // variant 1: 7.7 - 7.8 +#if defined(ABI_ELFv2) 7.9 + nop(); 7.10 + calculate_address_from_global_toc(R12, dest, true, true, false); 7.11 + mtctr(R12); 7.12 + nop(); 7.13 + nop(); 7.14 +#else 7.15 mr(R0, R11); // spill R11 -> R0. 7.16 7.17 // Load the destination address into CTR, 7.18 @@ -604,6 +610,7 @@ 7.19 mtctr(R11); 7.20 mr(R11, R0); // spill R11 <- R0. 7.21 nop(); 7.22 +#endif 7.23 7.24 // do the call/jump 7.25 if (link) { 7.26 @@ -912,16 +919,16 @@ 7.27 } 7.28 } 7.29 7.30 -// Push a frame of size `bytes' plus abi112 on top. 7.31 -void MacroAssembler::push_frame_abi112(unsigned int bytes, Register tmp) { 7.32 - push_frame(bytes + frame::abi_112_size, tmp); 7.33 +// Push a frame of size `bytes' plus abi_reg_args on top. 7.34 +void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) { 7.35 + push_frame(bytes + frame::abi_reg_args_size, tmp); 7.36 } 7.37 7.38 // Setup up a new C frame with a spill area for non-volatile GPRs and 7.39 // additional space for local variables. 7.40 -void MacroAssembler::push_frame_abi112_nonvolatiles(unsigned int bytes, 7.41 - Register tmp) { 7.42 - push_frame(bytes + frame::abi_112_size + frame::spill_nonvolatiles_size, tmp); 7.43 +void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes, 7.44 + Register tmp) { 7.45 + push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp); 7.46 } 7.47 7.48 // Pop current C frame. 7.49 @@ -929,6 +936,42 @@ 7.50 ld(R1_SP, _abi(callers_sp), R1_SP); 7.51 } 7.52 7.53 +#if defined(ABI_ELFv2) 7.54 +address MacroAssembler::branch_to(Register r_function_entry, bool and_link) { 7.55 + // TODO(asmundak): make sure the caller uses R12 as function descriptor 7.56 + // most of the times. 7.57 + if (R12 != r_function_entry) { 7.58 + mr(R12, r_function_entry); 7.59 + } 7.60 + mtctr(R12); 7.61 + // Do a call or a branch. 7.62 + if (and_link) { 7.63 + bctrl(); 7.64 + } else { 7.65 + bctr(); 7.66 + } 7.67 + _last_calls_return_pc = pc(); 7.68 + 7.69 + return _last_calls_return_pc; 7.70 +} 7.71 + 7.72 +// Call a C function via a function descriptor and use full C 7.73 +// calling conventions. Updates and returns _last_calls_return_pc. 7.74 +address MacroAssembler::call_c(Register r_function_entry) { 7.75 + return branch_to(r_function_entry, /*and_link=*/true); 7.76 +} 7.77 + 7.78 +// For tail calls: only branch, don't link, so callee returns to caller of this function. 7.79 +address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) { 7.80 + return branch_to(r_function_entry, /*and_link=*/false); 7.81 +} 7.82 + 7.83 +address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) { 7.84 + load_const(R12, function_entry, R0); 7.85 + return branch_to(R12, /*and_link=*/true); 7.86 +} 7.87 + 7.88 +#else 7.89 // Generic version of a call to C function via a function descriptor 7.90 // with variable support for C calling conventions (TOC, ENV, etc.). 7.91 // Updates and returns _last_calls_return_pc. 7.92 @@ -1077,6 +1120,7 @@ 7.93 } 7.94 return _last_calls_return_pc; 7.95 } 7.96 +#endif 7.97 7.98 void MacroAssembler::call_VM_base(Register oop_result, 7.99 Register last_java_sp, 7.100 @@ -1091,8 +1135,11 @@ 7.101 7.102 // ARG1 must hold thread address. 7.103 mr(R3_ARG1, R16_thread); 7.104 - 7.105 +#if defined(ABI_ELFv2) 7.106 + address return_pc = call_c(entry_point, relocInfo::none); 7.107 +#else 7.108 address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none); 7.109 +#endif 7.110 7.111 reset_last_Java_frame(); 7.112 7.113 @@ -1113,7 +1160,11 @@ 7.114 7.115 void MacroAssembler::call_VM_leaf_base(address entry_point) { 7.116 BLOCK_COMMENT("call_VM_leaf {"); 7.117 +#if defined(ABI_ELFv2) 7.118 + call_c(entry_point, relocInfo::none); 7.119 +#else 7.120 call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none); 7.121 +#endif 7.122 BLOCK_COMMENT("} call_VM_leaf"); 7.123 } 7.124 7.125 @@ -2227,7 +2278,7 @@ 7.126 // VM call need frame to access(write) O register. 7.127 if (needs_frame) { 7.128 save_LR_CR(Rtmp1); 7.129 - push_frame_abi112(0, Rtmp2); 7.130 + push_frame_reg_args(0, Rtmp2); 7.131 } 7.132 7.133 if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded. 7.134 @@ -3006,13 +3057,13 @@ 7.135 mr(R0, tmp); 7.136 // kill tmp 7.137 save_LR_CR(tmp); 7.138 - push_frame_abi112(nbytes_save, tmp); 7.139 + push_frame_reg_args(nbytes_save, tmp); 7.140 // restore tmp 7.141 mr(tmp, R0); 7.142 save_volatile_gprs(R1_SP, 112); // except R0 7.143 - // load FunctionDescriptor** 7.144 + // load FunctionDescriptor** / entry_address * 7.145 load_const(tmp, fd); 7.146 - // load FunctionDescriptor* 7.147 + // load FunctionDescriptor* / entry_address 7.148 ld(tmp, 0, tmp); 7.149 mr(R4_ARG2, oop); 7.150 load_const(R3_ARG1, (address)msg);
8.1 --- a/src/cpu/ppc/vm/macroAssembler_ppc.hpp Thu Feb 20 11:05:12 2014 +0100 8.2 +++ b/src/cpu/ppc/vm/macroAssembler_ppc.hpp Thu Mar 06 10:55:28 2014 -0800 8.3 @@ -279,12 +279,12 @@ 8.4 // Push a frame of size `bytes'. No abi space provided. 8.5 void push_frame(unsigned int bytes, Register tmp); 8.6 8.7 - // Push a frame of size `bytes' plus abi112 on top. 8.8 - void push_frame_abi112(unsigned int bytes, Register tmp); 8.9 + // Push a frame of size `bytes' plus abi_reg_args on top. 8.10 + void push_frame_reg_args(unsigned int bytes, Register tmp); 8.11 8.12 // Setup up a new C frame with a spill area for non-volatile GPRs and additional 8.13 // space for local variables 8.14 - void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp); 8.15 + void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp); 8.16 8.17 // pop current C frame 8.18 void pop_frame(); 8.19 @@ -296,17 +296,31 @@ 8.20 private: 8.21 address _last_calls_return_pc; 8.22 8.23 +#if defined(ABI_ELFv2) 8.24 + // Generic version of a call to C function. 8.25 + // Updates and returns _last_calls_return_pc. 8.26 + address branch_to(Register function_entry, bool and_link); 8.27 +#else 8.28 // Generic version of a call to C function via a function descriptor 8.29 // with variable support for C calling conventions (TOC, ENV, etc.). 8.30 // updates and returns _last_calls_return_pc. 8.31 address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call, 8.32 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee); 8.33 +#endif 8.34 8.35 public: 8.36 8.37 // Get the pc where the last call will return to. returns _last_calls_return_pc. 8.38 inline address last_calls_return_pc(); 8.39 8.40 +#if defined(ABI_ELFv2) 8.41 + // Call a C function via a function descriptor and use full C 8.42 + // calling conventions. Updates and returns _last_calls_return_pc. 8.43 + address call_c(Register function_entry); 8.44 + // For tail calls: only branch, don't link, so callee returns to caller of this function. 8.45 + address call_c_and_return_to_caller(Register function_entry); 8.46 + address call_c(address function_entry, relocInfo::relocType rt); 8.47 +#else 8.48 // Call a C function via a function descriptor and use full C 8.49 // calling conventions. Updates and returns _last_calls_return_pc. 8.50 address call_c(Register function_descriptor); 8.51 @@ -315,6 +329,7 @@ 8.52 address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt); 8.53 address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt, 8.54 Register toc); 8.55 +#endif 8.56 8.57 protected: 8.58 8.59 @@ -649,6 +664,11 @@ 8.60 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 8.61 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} 8.62 8.63 + // Convenience method returning function entry. For the ELFv1 case 8.64 + // creates function descriptor at the current address and returs 8.65 + // the pointer to it. For the ELFv2 case returns the current address. 8.66 + inline address function_entry(); 8.67 + 8.68 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 8.69 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 8.70
9.1 --- a/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp Thu Feb 20 11:05:12 2014 +0100 9.2 +++ b/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp Thu Mar 06 10:55:28 2014 -0800 9.3 @@ -385,4 +385,10 @@ 9.4 twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16); 9.5 } 9.6 9.7 +#if defined(ABI_ELFv2) 9.8 +inline address MacroAssembler::function_entry() { return pc(); } 9.9 +#else 9.10 +inline address MacroAssembler::function_entry() { return emit_fd(); } 9.11 +#endif 9.12 + 9.13 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
10.1 --- a/src/cpu/ppc/vm/methodHandles_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 10.2 +++ b/src/cpu/ppc/vm/methodHandles_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 10.3 @@ -453,11 +453,11 @@ 10.4 10.5 if (Verbose) { 10.6 tty->print_cr("Registers:"); 10.7 - const int abi_offset = frame::abi_112_size / 8; 10.8 + const int abi_offset = frame::abi_reg_args_size / 8; 10.9 for (int i = R3->encoding(); i <= R12->encoding(); i++) { 10.10 Register r = as_Register(i); 10.11 int count = i - R3->encoding(); 10.12 - // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_112_size)). 10.13 + // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_reg_args_size)). 10.14 tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]); 10.15 if ((count + 1) % 4 == 0) { 10.16 tty->cr(); 10.17 @@ -524,9 +524,9 @@ 10.18 __ save_LR_CR(R0); 10.19 __ mr(R0, R1_SP); // saved_sp 10.20 assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0"); 10.21 - // push_frame_abi112 only uses R0 if nbytes_save is wider than 16 bit 10.22 - __ push_frame_abi112(nbytes_save, R0); 10.23 - __ save_volatile_gprs(R1_SP, frame::abi_112_size); // Except R0. 10.24 + // Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit. 10.25 + __ push_frame_reg_args(nbytes_save, R0); 10.26 + __ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0. 10.27 10.28 __ load_const(R3_ARG1, (address)adaptername); 10.29 __ mr(R4_ARG2, R23_method_handle);
11.1 --- a/src/cpu/ppc/vm/ppc.ad Thu Feb 20 11:05:12 2014 +0100 11.2 +++ b/src/cpu/ppc/vm/ppc.ad Thu Mar 06 10:55:28 2014 -0800 11.3 @@ -1008,7 +1008,11 @@ 11.4 } 11.5 11.6 int MachCallRuntimeNode::ret_addr_offset() { 11.7 +#if defined(ABI_ELFv2) 11.8 + return 28; 11.9 +#else 11.10 return 40; 11.11 +#endif 11.12 } 11.13 11.14 //============================================================================= 11.15 @@ -3686,6 +3690,10 @@ 11.16 MacroAssembler _masm(&cbuf); 11.17 const address start_pc = __ pc(); 11.18 11.19 +#if defined(ABI_ELFv2) 11.20 + address entry= !($meth$$method) ? NULL : (address)$meth$$method; 11.21 + __ call_c(entry, relocInfo::runtime_call_type); 11.22 +#else 11.23 // The function we're going to call. 11.24 FunctionDescriptor fdtemp; 11.25 const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method; 11.26 @@ -3696,6 +3704,7 @@ 11.27 // Put entry, env, toc into the constant pool, this needs up to 3 constant 11.28 // pool entries; call_c_using_toc will optimize the call. 11.29 __ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc); 11.30 +#endif 11.31 11.32 // Check the ret_addr_offset. 11.33 assert(((MachCallRuntimeNode*)this)->ret_addr_offset() == __ last_calls_return_pc() - start_pc, 11.34 @@ -3711,20 +3720,25 @@ 11.35 __ mtctr($src$$Register); 11.36 %} 11.37 11.38 - // postalloc expand emitter for runtime leaf calls. 11.39 + // Postalloc expand emitter for runtime leaf calls. 11.40 enc_class postalloc_expand_java_to_runtime_call(method meth, iRegLdst toc) %{ 11.41 + loadConLNodesTuple loadConLNodes_Entry; 11.42 +#if defined(ABI_ELFv2) 11.43 + jlong entry_address = (jlong) this->entry_point(); 11.44 + assert(entry_address, "need address here"); 11.45 + loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address), 11.46 + OptoReg::Name(R12_H_num), OptoReg::Name(R12_num)); 11.47 +#else 11.48 // Get the struct that describes the function we are about to call. 11.49 FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point(); 11.50 assert(fd, "need fd here"); 11.51 + jlong entry_address = (jlong) fd->entry(); 11.52 // new nodes 11.53 - loadConLNodesTuple loadConLNodes_Entry; 11.54 loadConLNodesTuple loadConLNodes_Env; 11.55 loadConLNodesTuple loadConLNodes_Toc; 11.56 - MachNode *mtctr = NULL; 11.57 - MachCallLeafNode *call = NULL; 11.58 11.59 // Create nodes and operands for loading the entry point. 11.60 - loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->entry()), 11.61 + loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address), 11.62 OptoReg::Name(R12_H_num), OptoReg::Name(R12_num)); 11.63 11.64 11.65 @@ -3745,8 +3759,9 @@ 11.66 // Create nodes and operands for loading the Toc point. 11.67 loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->toc()), 11.68 OptoReg::Name(R2_H_num), OptoReg::Name(R2_num)); 11.69 +#endif // ABI_ELFv2 11.70 // mtctr node 11.71 - mtctr = new (C) CallLeafDirect_mtctrNode(); 11.72 + MachNode *mtctr = new (C) CallLeafDirect_mtctrNode(); 11.73 11.74 assert(loadConLNodes_Entry._last != NULL, "entry must exist"); 11.75 mtctr->add_req(0, loadConLNodes_Entry._last); 11.76 @@ -3755,10 +3770,10 @@ 11.77 mtctr->_opnds[1] = new (C) iRegLdstOper(); 11.78 11.79 // call node 11.80 - call = new (C) CallLeafDirectNode(); 11.81 + MachCallLeafNode *call = new (C) CallLeafDirectNode(); 11.82 11.83 call->_opnds[0] = _opnds[0]; 11.84 - call->_opnds[1] = new (C) methodOper((intptr_t) fd->entry()); // may get set later 11.85 + call->_opnds[1] = new (C) methodOper((intptr_t) entry_address); // May get set later. 11.86 11.87 // Make the new call node look like the old one. 11.88 call->_name = _name; 11.89 @@ -3785,8 +3800,10 @@ 11.90 // These must be reqired edges, as the registers are live up to 11.91 // the call. Else the constants are handled as kills. 11.92 call->add_req(mtctr); 11.93 +#if !defined(ABI_ELFv2) 11.94 call->add_req(loadConLNodes_Env._last); 11.95 call->add_req(loadConLNodes_Toc._last); 11.96 +#endif 11.97 11.98 // ...as well as prec 11.99 for (uint i = req(); i < len(); ++i) { 11.100 @@ -3799,10 +3816,12 @@ 11.101 // Insert the new nodes. 11.102 if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi); 11.103 if (loadConLNodes_Entry._last) nodes->push(loadConLNodes_Entry._last); 11.104 +#if !defined(ABI_ELFv2) 11.105 if (loadConLNodes_Env._large_hi) nodes->push(loadConLNodes_Env._large_hi); 11.106 if (loadConLNodes_Env._last) nodes->push(loadConLNodes_Env._last); 11.107 if (loadConLNodes_Toc._large_hi) nodes->push(loadConLNodes_Toc._large_hi); 11.108 if (loadConLNodes_Toc._last) nodes->push(loadConLNodes_Toc._last); 11.109 +#endif 11.110 nodes->push(mtctr); 11.111 nodes->push(call); 11.112 %} 11.113 @@ -3849,7 +3868,7 @@ 11.114 // out_preserve_stack_slots for calls to C. Supports the var-args 11.115 // backing area for register parms. 11.116 // 11.117 - varargs_C_out_slots_killed(((frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size)); 11.118 + varargs_C_out_slots_killed(((frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size)); 11.119 11.120 // The after-PROLOG location of the return address. Location of 11.121 // return address specifies a type (REG or STACK) and a number
12.1 --- a/src/cpu/ppc/vm/runtime_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 12.2 +++ b/src/cpu/ppc/vm/runtime_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 12.3 @@ -87,7 +87,7 @@ 12.4 12.5 address start = __ pc(); 12.6 12.7 - int frame_size_in_bytes = frame::abi_112_size; 12.8 + int frame_size_in_bytes = frame::abi_reg_args_size; 12.9 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 12.10 12.11 // Exception pc is 'return address' for stack walker. 12.12 @@ -99,7 +99,7 @@ 12.13 12.14 // Save callee-saved registers. 12.15 // Push a C frame for the exception blob. It is needed for the C call later on. 12.16 - __ push_frame_abi112(0, R11_scratch1); 12.17 + __ push_frame_reg_args(0, R11_scratch1); 12.18 12.19 // This call does all the hard work. It checks if an exception handler 12.20 // exists in the method. 12.21 @@ -109,8 +109,12 @@ 12.22 __ set_last_Java_frame(/*sp=*/R1_SP, noreg); 12.23 12.24 __ mr(R3_ARG1, R16_thread); 12.25 +#if defined(ABI_ELFv2) 12.26 + __ call_c((address) OptoRuntime::handle_exception_C, relocInfo::none); 12.27 +#else 12.28 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, OptoRuntime::handle_exception_C), 12.29 relocInfo::none); 12.30 +#endif 12.31 address calls_return_pc = __ last_calls_return_pc(); 12.32 # ifdef ASSERT 12.33 __ cmpdi(CCR0, R3_RET, 0); 12.34 @@ -162,7 +166,11 @@ 12.35 __ bind(mh_callsite); 12.36 __ mr(R31, R3_RET); // Save branch address. 12.37 __ mr(R3_ARG1, R16_thread); 12.38 +#if defined(ABI_ELFv2) 12.39 + __ call_c((address) adjust_SP_for_methodhandle_callsite, relocInfo::none); 12.40 +#else 12.41 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, adjust_SP_for_methodhandle_callsite), relocInfo::none); 12.42 +#endif 12.43 // Returns unextended_sp in R3_RET. 12.44 12.45 __ mtctr(R31); // Move address of exception handler to SR_CTR.
13.1 --- a/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 13.2 +++ b/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 13.3 @@ -67,7 +67,7 @@ 13.4 return_pc_is_thread_saved_exception_pc 13.5 }; 13.6 13.7 - static OopMap* push_frame_abi112_and_save_live_registers(MacroAssembler* masm, 13.8 + static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 13.9 int* out_frame_size_in_bytes, 13.10 bool generate_oop_map, 13.11 int return_pc_adjustment, 13.12 @@ -200,12 +200,12 @@ 13.13 RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register 13.14 }; 13.15 13.16 -OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler* masm, 13.17 +OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 13.18 int* out_frame_size_in_bytes, 13.19 bool generate_oop_map, 13.20 int return_pc_adjustment, 13.21 ReturnPCLocation return_pc_location) { 13.22 - // Push an abi112-frame and store all registers which may be live. 13.23 + // Push an abi_reg_args-frame and store all registers which may be live. 13.24 // If requested, create an OopMap: Record volatile registers as 13.25 // callee-save values in an OopMap so their save locations will be 13.26 // propagated to the RegisterMap of the caller frame during 13.27 @@ -221,7 +221,7 @@ 13.28 sizeof(RegisterSaver::LiveRegType); 13.29 const int register_save_size = regstosave_num * reg_size; 13.30 const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes) 13.31 - + frame::abi_112_size; 13.32 + + frame::abi_reg_args_size; 13.33 *out_frame_size_in_bytes = frame_size_in_bytes; 13.34 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 13.35 const int register_save_offset = frame_size_in_bytes - register_save_size; 13.36 @@ -229,7 +229,7 @@ 13.37 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 13.38 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; 13.39 13.40 - BLOCK_COMMENT("push_frame_abi112_and_save_live_registers {"); 13.41 + BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 13.42 13.43 // Save r30 in the last slot of the not yet pushed frame so that we 13.44 // can use it as scratch reg. 13.45 @@ -294,7 +294,7 @@ 13.46 offset += reg_size; 13.47 } 13.48 13.49 - BLOCK_COMMENT("} push_frame_abi112_and_save_live_registers"); 13.50 + BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 13.51 13.52 // And we're done. 13.53 return map; 13.54 @@ -699,15 +699,19 @@ 13.55 13.56 int i; 13.57 VMReg reg; 13.58 - // Leave room for C-compatible ABI_112. 13.59 - int stk = (frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; 13.60 + // Leave room for C-compatible ABI_REG_ARGS. 13.61 + int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; 13.62 int arg = 0; 13.63 int freg = 0; 13.64 13.65 // Avoid passing C arguments in the wrong stack slots. 13.66 +#if defined(ABI_ELFv2) 13.67 + assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96, 13.68 + "passing C arguments in wrong stack slots"); 13.69 +#else 13.70 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112, 13.71 "passing C arguments in wrong stack slots"); 13.72 - 13.73 +#endif 13.74 // We fill-out regs AND regs2 if an argument must be passed in a 13.75 // register AND in a stack slot. If regs2 is NULL in such a 13.76 // situation, we bail-out with a fatal error. 13.77 @@ -1504,7 +1508,11 @@ 13.78 13.79 __ block_comment("block_for_jni_critical"); 13.80 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical); 13.81 +#if defined(ABI_ELFv2) 13.82 + __ call_c(entry_point, relocInfo::runtime_call_type); 13.83 +#else 13.84 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type); 13.85 +#endif 13.86 address start = __ pc() - __ offset(), 13.87 calls_return_pc = __ last_calls_return_pc(); 13.88 oop_maps->add_gc_map(calls_return_pc - start, map); 13.89 @@ -1877,7 +1885,7 @@ 13.90 // Layout of the native wrapper frame: 13.91 // (stack grows upwards, memory grows downwards) 13.92 // 13.93 - // NW [ABI_112] <-- 1) R1_SP 13.94 + // NW [ABI_REG_ARGS] <-- 1) R1_SP 13.95 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 13.96 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives) 13.97 // klass <-- 4) R1_SP + klass_offset 13.98 @@ -2211,8 +2219,8 @@ 13.99 // slow case of monitor enter. Inline a special case of call_VM that 13.100 // disallows any pending_exception. 13.101 13.102 - // Save argument registers and leave room for C-compatible ABI_112. 13.103 - int frame_size = frame::abi_112_size + 13.104 + // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 13.105 + int frame_size = frame::abi_reg_args_size + 13.106 round_to(total_c_args * wordSize, frame::alignment_in_bytes); 13.107 __ mr(R11_scratch1, R1_SP); 13.108 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); 13.109 @@ -2250,9 +2258,12 @@ 13.110 13.111 // The JNI call 13.112 // -------------------------------------------------------------------------- 13.113 - 13.114 +#if defined(ABI_ELFv2) 13.115 + __ call_c(native_func, relocInfo::runtime_call_type); 13.116 +#else 13.117 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; 13.118 __ call_c(fd_native_method, relocInfo::runtime_call_type); 13.119 +#endif 13.120 13.121 13.122 // Now, we are back from the native code. 13.123 @@ -2724,7 +2735,7 @@ 13.124 OopMapSet *oop_maps = new OopMapSet(); 13.125 13.126 // size of ABI112 plus spill slots for R3_RET and F1_RET. 13.127 - const int frame_size_in_bytes = frame::abi_112_spill_size; 13.128 + const int frame_size_in_bytes = frame::abi_reg_args_spill_size; 13.129 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 13.130 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 13.131 13.132 @@ -2757,11 +2768,11 @@ 13.133 13.134 // Push the "unpack frame" 13.135 // Save everything in sight. 13.136 - map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm, 13.137 - &first_frame_size_in_bytes, 13.138 - /*generate_oop_map=*/ true, 13.139 - return_pc_adjustment_no_exception, 13.140 - RegisterSaver::return_pc_is_lr); 13.141 + map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 13.142 + &first_frame_size_in_bytes, 13.143 + /*generate_oop_map=*/ true, 13.144 + return_pc_adjustment_no_exception, 13.145 + RegisterSaver::return_pc_is_lr); 13.146 assert(map != NULL, "OopMap must have been created"); 13.147 13.148 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 13.149 @@ -2787,11 +2798,11 @@ 13.150 // Push the "unpack frame". 13.151 // Save everything in sight. 13.152 assert(R4 == R4_ARG2, "exception pc must be in r4"); 13.153 - RegisterSaver::push_frame_abi112_and_save_live_registers(masm, 13.154 - &first_frame_size_in_bytes, 13.155 - /*generate_oop_map=*/ false, 13.156 - return_pc_adjustment_exception, 13.157 - RegisterSaver::return_pc_is_r4); 13.158 + RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 13.159 + &first_frame_size_in_bytes, 13.160 + /*generate_oop_map=*/ false, 13.161 + return_pc_adjustment_exception, 13.162 + RegisterSaver::return_pc_is_r4); 13.163 13.164 // Deopt during an exception. Save exec mode for unpack_frames. 13.165 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 13.166 @@ -2876,8 +2887,8 @@ 13.167 // ...). 13.168 13.169 // Spill live volatile registers since we'll do a call. 13.170 - __ std( R3_RET, _abi_112_spill(spill_ret), R1_SP); 13.171 - __ stfd(F1_RET, _abi_112_spill(spill_fret), R1_SP); 13.172 + __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 13.173 + __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 13.174 13.175 // Let the unpacker layout information in the skeletal frames just 13.176 // allocated. 13.177 @@ -2889,8 +2900,8 @@ 13.178 __ reset_last_Java_frame(); 13.179 13.180 // Restore the volatiles saved above. 13.181 - __ ld( R3_RET, _abi_112_spill(spill_ret), R1_SP); 13.182 - __ lfd(F1_RET, _abi_112_spill(spill_fret), R1_SP); 13.183 + __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 13.184 + __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 13.185 13.186 // Pop the unpack frame. 13.187 __ pop_frame(); 13.188 @@ -2930,7 +2941,7 @@ 13.189 Register unc_trap_reg = R23_tmp3; 13.190 13.191 OopMapSet* oop_maps = new OopMapSet(); 13.192 - int frame_size_in_bytes = frame::abi_112_size; 13.193 + int frame_size_in_bytes = frame::abi_reg_args_size; 13.194 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 13.195 13.196 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 13.197 @@ -2943,7 +2954,7 @@ 13.198 __ save_LR_CR(R11_scratch1); 13.199 13.200 // Push an "uncommon_trap" frame. 13.201 - __ push_frame_abi112(0, R11_scratch1); 13.202 + __ push_frame_reg_args(0, R11_scratch1); 13.203 13.204 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 13.205 13.206 @@ -2996,7 +3007,7 @@ 13.207 // interpreter frames just created. 13.208 13.209 // Push a simple "unpack frame" here. 13.210 - __ push_frame_abi112(0, R11_scratch1); 13.211 + __ push_frame_reg_args(0, R11_scratch1); 13.212 13.213 // stack: (unpack frame, skeletal interpreter frame, ..., optional 13.214 // skeletal interpreter frame, optional c2i, caller of deoptee, 13.215 @@ -3064,11 +3075,11 @@ 13.216 } 13.217 13.218 // Save registers, fpu state, and flags. 13.219 - map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm, 13.220 - &frame_size_in_bytes, 13.221 - /*generate_oop_map=*/ true, 13.222 - /*return_pc_adjustment=*/0, 13.223 - return_pc_location); 13.224 + map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 13.225 + &frame_size_in_bytes, 13.226 + /*generate_oop_map=*/ true, 13.227 + /*return_pc_adjustment=*/0, 13.228 + return_pc_location); 13.229 13.230 // The following is basically a call_VM. However, we need the precise 13.231 // address of the call in order to generate an oopmap. Hence, we do all the 13.232 @@ -3151,11 +3162,11 @@ 13.233 13.234 address start = __ pc(); 13.235 13.236 - map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm, 13.237 - &frame_size_in_bytes, 13.238 - /*generate_oop_map*/ true, 13.239 - /*return_pc_adjustment*/ 0, 13.240 - RegisterSaver::return_pc_is_lr); 13.241 + map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 13.242 + &frame_size_in_bytes, 13.243 + /*generate_oop_map*/ true, 13.244 + /*return_pc_adjustment*/ 0, 13.245 + RegisterSaver::return_pc_is_lr); 13.246 13.247 // Use noreg as last_Java_pc, the return pc will be reconstructed 13.248 // from the physical frame.
14.1 --- a/src/cpu/ppc/vm/stubGenerator_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 14.2 +++ b/src/cpu/ppc/vm/stubGenerator_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 14.3 @@ -79,11 +79,11 @@ 14.4 14.5 StubCodeMark mark(this, "StubRoutines", "call_stub"); 14.6 14.7 - address start = __ emit_fd(); 14.8 + address start = __ function_entry(); 14.9 14.10 // some sanity checks 14.11 - assert((sizeof(frame::abi_48) % 16) == 0, "unaligned"); 14.12 - assert((sizeof(frame::abi_112) % 16) == 0, "unaligned"); 14.13 + assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned"); 14.14 + assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned"); 14.15 assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned"); 14.16 assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned"); 14.17 assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned"); 14.18 @@ -444,7 +444,7 @@ 14.19 14.20 // Save LR/CR and copy exception pc (LR) into R4_ARG2. 14.21 __ save_LR_CR(R4_ARG2); 14.22 - __ push_frame_abi112(0, R0); 14.23 + __ push_frame_reg_args(0, R0); 14.24 // Find exception handler. 14.25 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 14.26 SharedRuntime::exception_handler_for_return_address), 14.27 @@ -519,7 +519,7 @@ 14.28 MacroAssembler* masm = new MacroAssembler(&code); 14.29 14.30 OopMapSet* oop_maps = new OopMapSet(); 14.31 - int frame_size_in_bytes = frame::abi_112_size; 14.32 + int frame_size_in_bytes = frame::abi_reg_args_size; 14.33 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 14.34 14.35 StubCodeMark mark(this, "StubRoutines", "throw_exception"); 14.36 @@ -529,7 +529,7 @@ 14.37 __ save_LR_CR(R11_scratch1); 14.38 14.39 // Push a frame. 14.40 - __ push_frame_abi112(0, R11_scratch1); 14.41 + __ push_frame_reg_args(0, R11_scratch1); 14.42 14.43 address frame_complete_pc = __ pc(); 14.44 14.45 @@ -551,8 +551,11 @@ 14.46 if (arg2 != noreg) { 14.47 __ mr(R5_ARG3, arg2); 14.48 } 14.49 - __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), 14.50 - relocInfo::none); 14.51 +#if defined(ABI_ELFv2) 14.52 + __ call_c(runtime_entry, relocInfo::none); 14.53 +#else 14.54 + __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none); 14.55 +#endif 14.56 14.57 // Set an oopmap for the call site. 14.58 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 14.59 @@ -614,7 +617,7 @@ 14.60 // With G1, don't generate the call if we statically know that the target in uninitialized 14.61 if (!dest_uninitialized) { 14.62 const int spill_slots = 4 * wordSize; 14.63 - const int frame_size = frame::abi_112_size + spill_slots; 14.64 + const int frame_size = frame::abi_reg_args_size + spill_slots; 14.65 Label filtered; 14.66 14.67 // Is marking active? 14.68 @@ -628,7 +631,7 @@ 14.69 __ beq(CCR0, filtered); 14.70 14.71 __ save_LR_CR(R0); 14.72 - __ push_frame_abi112(spill_slots, R0); 14.73 + __ push_frame_reg_args(spill_slots, R0); 14.74 __ std(from, frame_size - 1 * wordSize, R1_SP); 14.75 __ std(to, frame_size - 2 * wordSize, R1_SP); 14.76 __ std(count, frame_size - 3 * wordSize, R1_SP); 14.77 @@ -672,7 +675,7 @@ 14.78 if (branchToEnd) { 14.79 __ save_LR_CR(R0); 14.80 // We need this frame only to spill LR. 14.81 - __ push_frame_abi112(0, R0); 14.82 + __ push_frame_reg_args(0, R0); 14.83 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); 14.84 __ pop_frame(); 14.85 __ restore_LR_CR(R0); 14.86 @@ -742,7 +745,7 @@ 14.87 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8"); 14.88 14.89 // Implemented as in ClearArray. 14.90 - address start = __ emit_fd(); 14.91 + address start = __ function_entry(); 14.92 14.93 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned) 14.94 Register cnt_dwords_reg = R4_ARG2; // count (in dwords) 14.95 @@ -820,7 +823,7 @@ 14.96 // 14.97 address generate_handler_for_unsafe_access() { 14.98 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 14.99 - address start = __ emit_fd(); 14.100 + address start = __ function_entry(); 14.101 __ unimplemented("StubRoutines::handler_for_unsafe_access", 93); 14.102 return start; 14.103 } 14.104 @@ -861,7 +864,7 @@ 14.105 // to read from the safepoint polling page. 14.106 address generate_load_from_poll() { 14.107 StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll"); 14.108 - address start = __ emit_fd(); 14.109 + address start = __ function_entry(); 14.110 __ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port 14.111 return start; 14.112 } 14.113 @@ -885,7 +888,7 @@ 14.114 // 14.115 address generate_fill(BasicType t, bool aligned, const char* name) { 14.116 StubCodeMark mark(this, "StubRoutines", name); 14.117 - address start = __ emit_fd(); 14.118 + address start = __ function_entry(); 14.119 14.120 const Register to = R3_ARG1; // source array address 14.121 const Register value = R4_ARG2; // fill value 14.122 @@ -1123,7 +1126,7 @@ 14.123 // 14.124 address generate_disjoint_byte_copy(bool aligned, const char * name) { 14.125 StubCodeMark mark(this, "StubRoutines", name); 14.126 - address start = __ emit_fd(); 14.127 + address start = __ function_entry(); 14.128 14.129 Register tmp1 = R6_ARG4; 14.130 Register tmp2 = R7_ARG5; 14.131 @@ -1254,15 +1257,21 @@ 14.132 // 14.133 address generate_conjoint_byte_copy(bool aligned, const char * name) { 14.134 StubCodeMark mark(this, "StubRoutines", name); 14.135 - address start = __ emit_fd(); 14.136 + address start = __ function_entry(); 14.137 14.138 Register tmp1 = R6_ARG4; 14.139 Register tmp2 = R7_ARG5; 14.140 Register tmp3 = R8_ARG6; 14.141 14.142 +#if defined(ABI_ELFv2) 14.143 + address nooverlap_target = aligned ? 14.144 + StubRoutines::arrayof_jbyte_disjoint_arraycopy() : 14.145 + StubRoutines::jbyte_disjoint_arraycopy(); 14.146 +#else 14.147 address nooverlap_target = aligned ? 14.148 ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() : 14.149 ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry(); 14.150 +#endif 14.151 14.152 array_overlap_test(nooverlap_target, 0); 14.153 // Do reverse copy. We assume the case of actual overlap is rare enough 14.154 @@ -1345,7 +1354,7 @@ 14.155 Register tmp3 = R8_ARG6; 14.156 Register tmp4 = R9_ARG7; 14.157 14.158 - address start = __ emit_fd(); 14.159 + address start = __ function_entry(); 14.160 14.161 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8; 14.162 // don't try anything fancy if arrays don't have many elements 14.163 @@ -1474,15 +1483,21 @@ 14.164 // 14.165 address generate_conjoint_short_copy(bool aligned, const char * name) { 14.166 StubCodeMark mark(this, "StubRoutines", name); 14.167 - address start = __ emit_fd(); 14.168 + address start = __ function_entry(); 14.169 14.170 Register tmp1 = R6_ARG4; 14.171 Register tmp2 = R7_ARG5; 14.172 Register tmp3 = R8_ARG6; 14.173 14.174 +#if defined(ABI_ELFv2) 14.175 + address nooverlap_target = aligned ? 14.176 + StubRoutines::arrayof_jshort_disjoint_arraycopy() : 14.177 + StubRoutines::jshort_disjoint_arraycopy(); 14.178 +#else 14.179 address nooverlap_target = aligned ? 14.180 ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() : 14.181 ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry(); 14.182 +#endif 14.183 14.184 array_overlap_test(nooverlap_target, 1); 14.185 14.186 @@ -1597,7 +1612,7 @@ 14.187 // 14.188 address generate_disjoint_int_copy(bool aligned, const char * name) { 14.189 StubCodeMark mark(this, "StubRoutines", name); 14.190 - address start = __ emit_fd(); 14.191 + address start = __ function_entry(); 14.192 generate_disjoint_int_copy_core(aligned); 14.193 __ blr(); 14.194 return start; 14.195 @@ -1681,11 +1696,17 @@ 14.196 // 14.197 address generate_conjoint_int_copy(bool aligned, const char * name) { 14.198 StubCodeMark mark(this, "StubRoutines", name); 14.199 - address start = __ emit_fd(); 14.200 + address start = __ function_entry(); 14.201 14.202 +#if defined(ABI_ELFv2) 14.203 + address nooverlap_target = aligned ? 14.204 + StubRoutines::arrayof_jint_disjoint_arraycopy() : 14.205 + StubRoutines::jint_disjoint_arraycopy(); 14.206 +#else 14.207 address nooverlap_target = aligned ? 14.208 ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() : 14.209 ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry(); 14.210 +#endif 14.211 14.212 array_overlap_test(nooverlap_target, 2); 14.213 14.214 @@ -1767,7 +1788,7 @@ 14.215 // 14.216 address generate_disjoint_long_copy(bool aligned, const char * name) { 14.217 StubCodeMark mark(this, "StubRoutines", name); 14.218 - address start = __ emit_fd(); 14.219 + address start = __ function_entry(); 14.220 generate_disjoint_long_copy_core(aligned); 14.221 __ blr(); 14.222 14.223 @@ -1849,11 +1870,17 @@ 14.224 // 14.225 address generate_conjoint_long_copy(bool aligned, const char * name) { 14.226 StubCodeMark mark(this, "StubRoutines", name); 14.227 - address start = __ emit_fd(); 14.228 + address start = __ function_entry(); 14.229 14.230 +#if defined(ABI_ELFv2) 14.231 + address nooverlap_target = aligned ? 14.232 + StubRoutines::arrayof_jlong_disjoint_arraycopy() : 14.233 + StubRoutines::jlong_disjoint_arraycopy(); 14.234 +#else 14.235 address nooverlap_target = aligned ? 14.236 ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() : 14.237 ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry(); 14.238 +#endif 14.239 14.240 array_overlap_test(nooverlap_target, 3); 14.241 generate_conjoint_long_copy_core(aligned); 14.242 @@ -1875,11 +1902,17 @@ 14.243 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 14.244 StubCodeMark mark(this, "StubRoutines", name); 14.245 14.246 - address start = __ emit_fd(); 14.247 + address start = __ function_entry(); 14.248 14.249 +#if defined(ABI_ELFv2) 14.250 + address nooverlap_target = aligned ? 14.251 + StubRoutines::arrayof_oop_disjoint_arraycopy() : 14.252 + StubRoutines::oop_disjoint_arraycopy(); 14.253 +#else 14.254 address nooverlap_target = aligned ? 14.255 ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() : 14.256 ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry(); 14.257 +#endif 14.258 14.259 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); 14.260 14.261 @@ -1910,7 +1943,7 @@ 14.262 // 14.263 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 14.264 StubCodeMark mark(this, "StubRoutines", name); 14.265 - address start = __ emit_fd(); 14.266 + address start = __ function_entry(); 14.267 14.268 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); 14.269 14.270 @@ -1991,7 +2024,7 @@ 14.271 StubCodeMark mark(this, "StubRoutines", name); 14.272 14.273 // Entry point, pc or function descriptor. 14.274 - *entry = __ emit_fd(); 14.275 + *entry = __ function_entry(); 14.276 14.277 // Load *adr into R4_ARG2, may fault. 14.278 *fault_pc = __ pc();
15.1 --- a/src/cpu/ppc/vm/vm_version_ppc.cpp Thu Feb 20 11:05:12 2014 +0100 15.2 +++ b/src/cpu/ppc/vm/vm_version_ppc.cpp Thu Mar 06 10:55:28 2014 -0800 15.3 @@ -24,7 +24,8 @@ 15.4 */ 15.5 15.6 #include "precompiled.hpp" 15.7 -#include "assembler_ppc.inline.hpp" 15.8 +#include "asm/assembler.inline.hpp" 15.9 +#include "asm/macroAssembler.inline.hpp" 15.10 #include "compiler/disassembler.hpp" 15.11 #include "memory/resourceArea.hpp" 15.12 #include "runtime/java.hpp" 15.13 @@ -168,7 +169,7 @@ 15.14 15.15 uint32_t *code = (uint32_t *)a->pc(); 15.16 // Emit code. 15.17 - void (*test1)() = (void(*)())(void *)a->emit_fd(); 15.18 + void (*test1)() = (void(*)())(void *)a->function_entry(); 15.19 15.20 Label l1; 15.21 15.22 @@ -242,7 +243,7 @@ 15.23 a->blr(); 15.24 15.25 // Emit code. 15.26 - void (*test2)() = (void(*)())(void *)a->emit_fd(); 15.27 + void (*test2)() = (void(*)())(void *)a->function_entry(); 15.28 // uint32_t *code = (uint32_t *)a->pc(); 15.29 15.30 Label l2; 15.31 @@ -383,8 +384,12 @@ 15.32 #endif // COMPILER2 15.33 15.34 void VM_Version::determine_features() { 15.35 +#if defined(ABI_ELFv2) 15.36 + const int code_size = (num_features+1+2*7)*BytesPerInstWord; // TODO(asmundak): calculation is incorrect. 15.37 +#else 15.38 // 7 InstWords for each call (function descriptor + blr instruction). 15.39 const int code_size = (num_features+1+2*7)*BytesPerInstWord; 15.40 +#endif 15.41 int features = 0; 15.42 15.43 // create test area 15.44 @@ -398,7 +403,7 @@ 15.45 MacroAssembler* a = new MacroAssembler(&cb); 15.46 15.47 // Emit code. 15.48 - void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->emit_fd(); 15.49 + void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry(); 15.50 uint32_t *code = (uint32_t *)a->pc(); 15.51 // Don't use R0 in ldarx. 15.52 // Keep R3_ARG1 unmodified, it contains &field (see below). 15.53 @@ -415,7 +420,7 @@ 15.54 a->blr(); 15.55 15.56 // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it. 15.57 - void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->emit_fd(); 15.58 + void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry(); 15.59 a->dcbz(R3_ARG1); // R3_ARG1 = addr 15.60 a->blr(); 15.61
16.1 --- a/src/share/vm/utilities/elfFile.cpp Thu Feb 20 11:05:12 2014 +0100 16.2 +++ b/src/share/vm/utilities/elfFile.cpp Thu Mar 06 10:55:28 2014 -0800 16.3 @@ -140,7 +140,7 @@ 16.4 } 16.5 } 16.6 16.7 -#if defined(PPC64) 16.8 +#if defined(PPC64) && !defined(ABI_ELFv2) 16.9 // Now read the .opd section wich contains the PPC64 function descriptor table. 16.10 // The .opd section is only available on PPC64 (see for example: 16.11 // http://refspecs.linuxfoundation.org/LSB_3.1.1/LSB-Core-PPC64/LSB-Core-PPC64/specialsections.html)