src/cpu/ppc/vm/sharedRuntime_ppc.cpp

Tue, 24 Oct 2017 23:06:24 -0700

author
asaha
date
Tue, 24 Oct 2017 23:06:24 -0700
changeset 9013
18366fa39fe0
parent 8903
9575483cce09
parent 8997
f8a45a60bc6b
child 9041
95a08233f46c
child 9669
32bc598624bd
permissions
-rw-r--r--

Merge

goetz@6458 1 /*
dbuck@8997 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
goetz@6512 3 * Copyright 2012, 2014 SAP AG. All rights reserved.
goetz@6458 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
goetz@6458 5 *
goetz@6458 6 * This code is free software; you can redistribute it and/or modify it
goetz@6458 7 * under the terms of the GNU General Public License version 2 only, as
goetz@6458 8 * published by the Free Software Foundation.
goetz@6458 9 *
goetz@6458 10 * This code is distributed in the hope that it will be useful, but WITHOUT
goetz@6458 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
goetz@6458 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
goetz@6458 13 * version 2 for more details (a copy is included in the LICENSE file that
goetz@6458 14 * accompanied this code).
goetz@6458 15 *
goetz@6458 16 * You should have received a copy of the GNU General Public License version
goetz@6458 17 * 2 along with this work; if not, write to the Free Software Foundation,
goetz@6458 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
goetz@6458 19 *
goetz@6458 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
goetz@6458 21 * or visit www.oracle.com if you need additional information or have any
goetz@6458 22 * questions.
goetz@6458 23 *
goetz@6458 24 */
goetz@6458 25
goetz@6458 26 #include "precompiled.hpp"
goetz@6458 27 #include "asm/macroAssembler.inline.hpp"
goetz@6458 28 #include "code/debugInfoRec.hpp"
goetz@6458 29 #include "code/icBuffer.hpp"
goetz@6458 30 #include "code/vtableStubs.hpp"
goetz@6458 31 #include "interpreter/interpreter.hpp"
goetz@6458 32 #include "oops/compiledICHolder.hpp"
goetz@6458 33 #include "prims/jvmtiRedefineClassesTrace.hpp"
goetz@6458 34 #include "runtime/sharedRuntime.hpp"
goetz@6458 35 #include "runtime/vframeArray.hpp"
goetz@6458 36 #include "vmreg_ppc.inline.hpp"
goetz@6517 37 #include "adfiles/ad_ppc_64.hpp"
goetz@6458 38 #ifdef COMPILER1
goetz@6458 39 #include "c1/c1_Runtime1.hpp"
goetz@6458 40 #endif
goetz@6458 41 #ifdef COMPILER2
goetz@6458 42 #include "opto/runtime.hpp"
goetz@6458 43 #endif
goetz@6458 44
mdoerr@8903 45 #include <alloca.h>
mdoerr@8903 46
goetz@6458 47 #define __ masm->
goetz@6458 48
goetz@6458 49 #ifdef PRODUCT
goetz@6458 50 #define BLOCK_COMMENT(str) // nothing
goetz@6458 51 #else
goetz@6458 52 #define BLOCK_COMMENT(str) __ block_comment(str)
goetz@6458 53 #endif
goetz@6458 54
goetz@6458 55 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
goetz@6458 56
goetz@6458 57
goetz@6458 58 class RegisterSaver {
goetz@6458 59 // Used for saving volatile registers.
goetz@6458 60 public:
goetz@6458 61
goetz@6458 62 // Support different return pc locations.
goetz@6458 63 enum ReturnPCLocation {
goetz@6458 64 return_pc_is_lr,
goetz@6458 65 return_pc_is_r4,
goetz@6458 66 return_pc_is_thread_saved_exception_pc
goetz@6458 67 };
goetz@6458 68
goetz@6511 69 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
goetz@6458 70 int* out_frame_size_in_bytes,
goetz@6458 71 bool generate_oop_map,
goetz@6458 72 int return_pc_adjustment,
goetz@6458 73 ReturnPCLocation return_pc_location);
goetz@6458 74 static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
goetz@6458 75 int frame_size_in_bytes,
goetz@6458 76 bool restore_ctr);
goetz@6458 77
goetz@6458 78 static void push_frame_and_save_argument_registers(MacroAssembler* masm,
goetz@6458 79 Register r_temp,
goetz@6458 80 int frame_size,
goetz@6458 81 int total_args,
goetz@6458 82 const VMRegPair *regs, const VMRegPair *regs2 = NULL);
goetz@6458 83 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
goetz@6458 84 int frame_size,
goetz@6458 85 int total_args,
goetz@6458 86 const VMRegPair *regs, const VMRegPair *regs2 = NULL);
goetz@6458 87
goetz@6458 88 // During deoptimization only the result registers need to be restored
goetz@6458 89 // all the other values have already been extracted.
goetz@6458 90 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes);
goetz@6458 91
goetz@6458 92 // Constants and data structures:
goetz@6458 93
goetz@6458 94 typedef enum {
goetz@6458 95 int_reg = 0,
goetz@6458 96 float_reg = 1,
goetz@6458 97 special_reg = 2
goetz@6458 98 } RegisterType;
goetz@6458 99
goetz@6458 100 typedef enum {
goetz@6458 101 reg_size = 8,
goetz@6458 102 half_reg_size = reg_size / 2,
goetz@6458 103 } RegisterConstants;
goetz@6458 104
goetz@6458 105 typedef struct {
goetz@6458 106 RegisterType reg_type;
goetz@6458 107 int reg_num;
goetz@6458 108 VMReg vmreg;
goetz@6458 109 } LiveRegType;
goetz@6458 110 };
goetz@6458 111
goetz@6458 112
goetz@6458 113 #define RegisterSaver_LiveSpecialReg(regname) \
goetz@6458 114 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
goetz@6458 115
goetz@6458 116 #define RegisterSaver_LiveIntReg(regname) \
goetz@6458 117 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() }
goetz@6458 118
goetz@6458 119 #define RegisterSaver_LiveFloatReg(regname) \
goetz@6458 120 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() }
goetz@6458 121
goetz@6458 122 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
goetz@6458 123 // Live registers which get spilled to the stack. Register
goetz@6458 124 // positions in this array correspond directly to the stack layout.
goetz@6458 125
goetz@6458 126 //
goetz@6458 127 // live special registers:
goetz@6458 128 //
goetz@6458 129 RegisterSaver_LiveSpecialReg(SR_CTR),
goetz@6458 130 //
goetz@6458 131 // live float registers:
goetz@6458 132 //
goetz@6458 133 RegisterSaver_LiveFloatReg( F0 ),
goetz@6458 134 RegisterSaver_LiveFloatReg( F1 ),
goetz@6458 135 RegisterSaver_LiveFloatReg( F2 ),
goetz@6458 136 RegisterSaver_LiveFloatReg( F3 ),
goetz@6458 137 RegisterSaver_LiveFloatReg( F4 ),
goetz@6458 138 RegisterSaver_LiveFloatReg( F5 ),
goetz@6458 139 RegisterSaver_LiveFloatReg( F6 ),
goetz@6458 140 RegisterSaver_LiveFloatReg( F7 ),
goetz@6458 141 RegisterSaver_LiveFloatReg( F8 ),
goetz@6458 142 RegisterSaver_LiveFloatReg( F9 ),
goetz@6458 143 RegisterSaver_LiveFloatReg( F10 ),
goetz@6458 144 RegisterSaver_LiveFloatReg( F11 ),
goetz@6458 145 RegisterSaver_LiveFloatReg( F12 ),
goetz@6458 146 RegisterSaver_LiveFloatReg( F13 ),
goetz@6458 147 RegisterSaver_LiveFloatReg( F14 ),
goetz@6458 148 RegisterSaver_LiveFloatReg( F15 ),
goetz@6458 149 RegisterSaver_LiveFloatReg( F16 ),
goetz@6458 150 RegisterSaver_LiveFloatReg( F17 ),
goetz@6458 151 RegisterSaver_LiveFloatReg( F18 ),
goetz@6458 152 RegisterSaver_LiveFloatReg( F19 ),
goetz@6458 153 RegisterSaver_LiveFloatReg( F20 ),
goetz@6458 154 RegisterSaver_LiveFloatReg( F21 ),
goetz@6458 155 RegisterSaver_LiveFloatReg( F22 ),
goetz@6458 156 RegisterSaver_LiveFloatReg( F23 ),
goetz@6458 157 RegisterSaver_LiveFloatReg( F24 ),
goetz@6458 158 RegisterSaver_LiveFloatReg( F25 ),
goetz@6458 159 RegisterSaver_LiveFloatReg( F26 ),
goetz@6458 160 RegisterSaver_LiveFloatReg( F27 ),
goetz@6458 161 RegisterSaver_LiveFloatReg( F28 ),
goetz@6458 162 RegisterSaver_LiveFloatReg( F29 ),
goetz@6458 163 RegisterSaver_LiveFloatReg( F30 ),
goetz@6458 164 RegisterSaver_LiveFloatReg( F31 ),
goetz@6458 165 //
goetz@6458 166 // live integer registers:
goetz@6458 167 //
goetz@6458 168 RegisterSaver_LiveIntReg( R0 ),
goetz@6458 169 //RegisterSaver_LiveIntReg( R1 ), // stack pointer
goetz@6458 170 RegisterSaver_LiveIntReg( R2 ),
goetz@6458 171 RegisterSaver_LiveIntReg( R3 ),
goetz@6458 172 RegisterSaver_LiveIntReg( R4 ),
goetz@6458 173 RegisterSaver_LiveIntReg( R5 ),
goetz@6458 174 RegisterSaver_LiveIntReg( R6 ),
goetz@6458 175 RegisterSaver_LiveIntReg( R7 ),
goetz@6458 176 RegisterSaver_LiveIntReg( R8 ),
goetz@6458 177 RegisterSaver_LiveIntReg( R9 ),
goetz@6458 178 RegisterSaver_LiveIntReg( R10 ),
goetz@6458 179 RegisterSaver_LiveIntReg( R11 ),
goetz@6458 180 RegisterSaver_LiveIntReg( R12 ),
goetz@6458 181 //RegisterSaver_LiveIntReg( R13 ), // system thread id
goetz@6458 182 RegisterSaver_LiveIntReg( R14 ),
goetz@6458 183 RegisterSaver_LiveIntReg( R15 ),
goetz@6458 184 RegisterSaver_LiveIntReg( R16 ),
goetz@6458 185 RegisterSaver_LiveIntReg( R17 ),
goetz@6458 186 RegisterSaver_LiveIntReg( R18 ),
goetz@6458 187 RegisterSaver_LiveIntReg( R19 ),
goetz@6458 188 RegisterSaver_LiveIntReg( R20 ),
goetz@6458 189 RegisterSaver_LiveIntReg( R21 ),
goetz@6458 190 RegisterSaver_LiveIntReg( R22 ),
goetz@6458 191 RegisterSaver_LiveIntReg( R23 ),
goetz@6458 192 RegisterSaver_LiveIntReg( R24 ),
goetz@6458 193 RegisterSaver_LiveIntReg( R25 ),
goetz@6458 194 RegisterSaver_LiveIntReg( R26 ),
goetz@6458 195 RegisterSaver_LiveIntReg( R27 ),
goetz@6458 196 RegisterSaver_LiveIntReg( R28 ),
goetz@6458 197 RegisterSaver_LiveIntReg( R29 ),
goetz@6458 198 RegisterSaver_LiveIntReg( R31 ),
goetz@6458 199 RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register
goetz@6458 200 };
goetz@6458 201
goetz@6511 202 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
goetz@6458 203 int* out_frame_size_in_bytes,
goetz@6458 204 bool generate_oop_map,
goetz@6458 205 int return_pc_adjustment,
goetz@6458 206 ReturnPCLocation return_pc_location) {
goetz@6511 207 // Push an abi_reg_args-frame and store all registers which may be live.
goetz@6458 208 // If requested, create an OopMap: Record volatile registers as
goetz@6458 209 // callee-save values in an OopMap so their save locations will be
goetz@6458 210 // propagated to the RegisterMap of the caller frame during
goetz@6458 211 // StackFrameStream construction (needed for deoptimization; see
goetz@6458 212 // compiledVFrame::create_stack_value).
goetz@6458 213 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
goetz@6458 214
goetz@6458 215 int i;
goetz@6458 216 int offset;
goetz@6458 217
goetz@6458 218 // calcualte frame size
goetz@6458 219 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
goetz@6458 220 sizeof(RegisterSaver::LiveRegType);
goetz@6458 221 const int register_save_size = regstosave_num * reg_size;
goetz@6458 222 const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
goetz@6511 223 + frame::abi_reg_args_size;
goetz@6458 224 *out_frame_size_in_bytes = frame_size_in_bytes;
goetz@6458 225 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
goetz@6458 226 const int register_save_offset = frame_size_in_bytes - register_save_size;
goetz@6458 227
goetz@6458 228 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
goetz@6458 229 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
goetz@6458 230
goetz@6511 231 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
goetz@6458 232
goetz@6458 233 // Save r30 in the last slot of the not yet pushed frame so that we
goetz@6458 234 // can use it as scratch reg.
goetz@6458 235 __ std(R30, -reg_size, R1_SP);
goetz@6458 236 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
goetz@6458 237 "consistency check");
goetz@6458 238
goetz@6458 239 // save the flags
goetz@6458 240 // Do the save_LR_CR by hand and adjust the return pc if requested.
goetz@6458 241 __ mfcr(R30);
goetz@6458 242 __ std(R30, _abi(cr), R1_SP);
goetz@6458 243 switch (return_pc_location) {
goetz@6458 244 case return_pc_is_lr: __ mflr(R30); break;
goetz@6458 245 case return_pc_is_r4: __ mr(R30, R4); break;
goetz@6458 246 case return_pc_is_thread_saved_exception_pc:
goetz@6458 247 __ ld(R30, thread_(saved_exception_pc)); break;
goetz@6458 248 default: ShouldNotReachHere();
goetz@6458 249 }
goetz@6458 250 if (return_pc_adjustment != 0)
goetz@6458 251 __ addi(R30, R30, return_pc_adjustment);
goetz@6458 252 __ std(R30, _abi(lr), R1_SP);
goetz@6458 253
goetz@6458 254 // push a new frame
goetz@6458 255 __ push_frame(frame_size_in_bytes, R30);
goetz@6458 256
goetz@6458 257 // save all registers (ints and floats)
goetz@6458 258 offset = register_save_offset;
goetz@6458 259 for (int i = 0; i < regstosave_num; i++) {
goetz@6458 260 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
goetz@6458 261 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
goetz@6458 262
goetz@6458 263 switch (reg_type) {
goetz@6458 264 case RegisterSaver::int_reg: {
goetz@6458 265 if (reg_num != 30) { // We spilled R30 right at the beginning.
goetz@6458 266 __ std(as_Register(reg_num), offset, R1_SP);
goetz@6458 267 }
goetz@6458 268 break;
goetz@6458 269 }
goetz@6458 270 case RegisterSaver::float_reg: {
goetz@6458 271 __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
goetz@6458 272 break;
goetz@6458 273 }
goetz@6458 274 case RegisterSaver::special_reg: {
goetz@6458 275 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
goetz@6458 276 __ mfctr(R30);
goetz@6458 277 __ std(R30, offset, R1_SP);
goetz@6458 278 } else {
goetz@6458 279 Unimplemented();
goetz@6458 280 }
goetz@6458 281 break;
goetz@6458 282 }
goetz@6458 283 default:
goetz@6458 284 ShouldNotReachHere();
goetz@6458 285 }
goetz@6458 286
goetz@6458 287 if (generate_oop_map) {
goetz@6458 288 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
goetz@6458 289 RegisterSaver_LiveRegs[i].vmreg);
goetz@6458 290 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
goetz@6458 291 RegisterSaver_LiveRegs[i].vmreg->next());
goetz@6458 292 }
goetz@6458 293 offset += reg_size;
goetz@6458 294 }
goetz@6458 295
goetz@6511 296 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
goetz@6458 297
goetz@6458 298 // And we're done.
goetz@6458 299 return map;
goetz@6458 300 }
goetz@6458 301
goetz@6458 302
goetz@6458 303 // Pop the current frame and restore all the registers that we
goetz@6458 304 // saved.
goetz@6458 305 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
goetz@6458 306 int frame_size_in_bytes,
goetz@6458 307 bool restore_ctr) {
goetz@6458 308 int i;
goetz@6458 309 int offset;
goetz@6458 310 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
goetz@6458 311 sizeof(RegisterSaver::LiveRegType);
goetz@6458 312 const int register_save_size = regstosave_num * reg_size;
goetz@6458 313 const int register_save_offset = frame_size_in_bytes - register_save_size;
goetz@6458 314
goetz@6458 315 BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
goetz@6458 316
goetz@6458 317 // restore all registers (ints and floats)
goetz@6458 318 offset = register_save_offset;
goetz@6458 319 for (int i = 0; i < regstosave_num; i++) {
goetz@6458 320 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
goetz@6458 321 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
goetz@6458 322
goetz@6458 323 switch (reg_type) {
goetz@6458 324 case RegisterSaver::int_reg: {
goetz@6458 325 if (reg_num != 30) // R30 restored at the end, it's the tmp reg!
goetz@6458 326 __ ld(as_Register(reg_num), offset, R1_SP);
goetz@6458 327 break;
goetz@6458 328 }
goetz@6458 329 case RegisterSaver::float_reg: {
goetz@6458 330 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
goetz@6458 331 break;
goetz@6458 332 }
goetz@6458 333 case RegisterSaver::special_reg: {
goetz@6458 334 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
goetz@6458 335 if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
goetz@6458 336 __ ld(R30, offset, R1_SP);
goetz@6458 337 __ mtctr(R30);
goetz@6458 338 }
goetz@6458 339 } else {
goetz@6458 340 Unimplemented();
goetz@6458 341 }
goetz@6458 342 break;
goetz@6458 343 }
goetz@6458 344 default:
goetz@6458 345 ShouldNotReachHere();
goetz@6458 346 }
goetz@6458 347 offset += reg_size;
goetz@6458 348 }
goetz@6458 349
goetz@6458 350 // pop the frame
goetz@6458 351 __ pop_frame();
goetz@6458 352
goetz@6458 353 // restore the flags
goetz@6458 354 __ restore_LR_CR(R30);
goetz@6458 355
goetz@6458 356 // restore scratch register's value
goetz@6458 357 __ ld(R30, -reg_size, R1_SP);
goetz@6458 358
goetz@6458 359 BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
goetz@6458 360 }
goetz@6458 361
goetz@6458 362 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
goetz@6458 363 int frame_size,int total_args, const VMRegPair *regs,
goetz@6458 364 const VMRegPair *regs2) {
goetz@6458 365 __ push_frame(frame_size, r_temp);
goetz@6458 366 int st_off = frame_size - wordSize;
goetz@6458 367 for (int i = 0; i < total_args; i++) {
goetz@6458 368 VMReg r_1 = regs[i].first();
goetz@6458 369 VMReg r_2 = regs[i].second();
goetz@6458 370 if (!r_1->is_valid()) {
goetz@6458 371 assert(!r_2->is_valid(), "");
goetz@6458 372 continue;
goetz@6458 373 }
goetz@6458 374 if (r_1->is_Register()) {
goetz@6458 375 Register r = r_1->as_Register();
goetz@6458 376 __ std(r, st_off, R1_SP);
goetz@6458 377 st_off -= wordSize;
goetz@6458 378 } else if (r_1->is_FloatRegister()) {
goetz@6458 379 FloatRegister f = r_1->as_FloatRegister();
goetz@6458 380 __ stfd(f, st_off, R1_SP);
goetz@6458 381 st_off -= wordSize;
goetz@6458 382 }
goetz@6458 383 }
goetz@6458 384 if (regs2 != NULL) {
goetz@6458 385 for (int i = 0; i < total_args; i++) {
goetz@6458 386 VMReg r_1 = regs2[i].first();
goetz@6458 387 VMReg r_2 = regs2[i].second();
goetz@6458 388 if (!r_1->is_valid()) {
goetz@6458 389 assert(!r_2->is_valid(), "");
goetz@6458 390 continue;
goetz@6458 391 }
goetz@6458 392 if (r_1->is_Register()) {
goetz@6458 393 Register r = r_1->as_Register();
goetz@6458 394 __ std(r, st_off, R1_SP);
goetz@6458 395 st_off -= wordSize;
goetz@6458 396 } else if (r_1->is_FloatRegister()) {
goetz@6458 397 FloatRegister f = r_1->as_FloatRegister();
goetz@6458 398 __ stfd(f, st_off, R1_SP);
goetz@6458 399 st_off -= wordSize;
goetz@6458 400 }
goetz@6458 401 }
goetz@6458 402 }
goetz@6458 403 }
goetz@6458 404
goetz@6458 405 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size,
goetz@6458 406 int total_args, const VMRegPair *regs,
goetz@6458 407 const VMRegPair *regs2) {
goetz@6458 408 int st_off = frame_size - wordSize;
goetz@6458 409 for (int i = 0; i < total_args; i++) {
goetz@6458 410 VMReg r_1 = regs[i].first();
goetz@6458 411 VMReg r_2 = regs[i].second();
goetz@6458 412 if (r_1->is_Register()) {
goetz@6458 413 Register r = r_1->as_Register();
goetz@6458 414 __ ld(r, st_off, R1_SP);
goetz@6458 415 st_off -= wordSize;
goetz@6458 416 } else if (r_1->is_FloatRegister()) {
goetz@6458 417 FloatRegister f = r_1->as_FloatRegister();
goetz@6458 418 __ lfd(f, st_off, R1_SP);
goetz@6458 419 st_off -= wordSize;
goetz@6458 420 }
goetz@6458 421 }
goetz@6458 422 if (regs2 != NULL)
goetz@6458 423 for (int i = 0; i < total_args; i++) {
goetz@6458 424 VMReg r_1 = regs2[i].first();
goetz@6458 425 VMReg r_2 = regs2[i].second();
goetz@6458 426 if (r_1->is_Register()) {
goetz@6458 427 Register r = r_1->as_Register();
goetz@6458 428 __ ld(r, st_off, R1_SP);
goetz@6458 429 st_off -= wordSize;
goetz@6458 430 } else if (r_1->is_FloatRegister()) {
goetz@6458 431 FloatRegister f = r_1->as_FloatRegister();
goetz@6458 432 __ lfd(f, st_off, R1_SP);
goetz@6458 433 st_off -= wordSize;
goetz@6458 434 }
goetz@6458 435 }
goetz@6458 436 __ pop_frame();
goetz@6458 437 }
goetz@6458 438
goetz@6458 439 // Restore the registers that might be holding a result.
goetz@6458 440 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) {
goetz@6458 441 int i;
goetz@6458 442 int offset;
goetz@6458 443 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
goetz@6458 444 sizeof(RegisterSaver::LiveRegType);
goetz@6458 445 const int register_save_size = regstosave_num * reg_size;
goetz@6458 446 const int register_save_offset = frame_size_in_bytes - register_save_size;
goetz@6458 447
goetz@6458 448 // restore all result registers (ints and floats)
goetz@6458 449 offset = register_save_offset;
goetz@6458 450 for (int i = 0; i < regstosave_num; i++) {
goetz@6458 451 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
goetz@6458 452 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
goetz@6458 453 switch (reg_type) {
goetz@6458 454 case RegisterSaver::int_reg: {
goetz@6458 455 if (as_Register(reg_num)==R3_RET) // int result_reg
goetz@6458 456 __ ld(as_Register(reg_num), offset, R1_SP);
goetz@6458 457 break;
goetz@6458 458 }
goetz@6458 459 case RegisterSaver::float_reg: {
goetz@6458 460 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg
goetz@6458 461 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
goetz@6458 462 break;
goetz@6458 463 }
goetz@6458 464 case RegisterSaver::special_reg: {
goetz@6458 465 // Special registers don't hold a result.
goetz@6458 466 break;
goetz@6458 467 }
goetz@6458 468 default:
goetz@6458 469 ShouldNotReachHere();
goetz@6458 470 }
goetz@6458 471 offset += reg_size;
goetz@6458 472 }
goetz@6458 473 }
goetz@6458 474
goetz@6458 475 // Is vector's size (in bytes) bigger than a size saved by default?
goetz@6458 476 bool SharedRuntime::is_wide_vector(int size) {
goetz@6458 477 ResourceMark rm;
goetz@6458 478 // Note, MaxVectorSize == 8 on PPC64.
goetz@6458 479 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
goetz@6458 480 return size > 8;
goetz@6458 481 }
goetz@6458 482 #ifdef COMPILER2
goetz@6458 483 static int reg2slot(VMReg r) {
goetz@6458 484 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
goetz@6458 485 }
goetz@6458 486
goetz@6458 487 static int reg2offset(VMReg r) {
goetz@6458 488 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
goetz@6458 489 }
goetz@6458 490 #endif
goetz@6458 491
goetz@6458 492 // ---------------------------------------------------------------------------
goetz@6458 493 // Read the array of BasicTypes from a signature, and compute where the
goetz@6458 494 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
goetz@6458 495 // quantities. Values less than VMRegImpl::stack0 are registers, those above
goetz@6458 496 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
goetz@6458 497 // as framesizes are fixed.
goetz@6458 498 // VMRegImpl::stack0 refers to the first slot 0(sp).
goetz@6458 499 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
goetz@6458 500 // up to RegisterImpl::number_of_registers) are the 64-bit
goetz@6458 501 // integer registers.
goetz@6458 502
goetz@6458 503 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
goetz@6458 504 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
goetz@6458 505 // units regardless of build. Of course for i486 there is no 64 bit build
goetz@6458 506
goetz@6458 507 // The Java calling convention is a "shifted" version of the C ABI.
goetz@6458 508 // By skipping the first C ABI register we can call non-static jni methods
goetz@6458 509 // with small numbers of arguments without having to shuffle the arguments
goetz@6458 510 // at all. Since we control the java ABI we ought to at least get some
goetz@6458 511 // advantage out of it.
goetz@6458 512
goetz@6458 513 const VMReg java_iarg_reg[8] = {
goetz@6458 514 R3->as_VMReg(),
goetz@6458 515 R4->as_VMReg(),
goetz@6458 516 R5->as_VMReg(),
goetz@6458 517 R6->as_VMReg(),
goetz@6458 518 R7->as_VMReg(),
goetz@6458 519 R8->as_VMReg(),
goetz@6458 520 R9->as_VMReg(),
goetz@6458 521 R10->as_VMReg()
goetz@6458 522 };
goetz@6458 523
goetz@6458 524 const VMReg java_farg_reg[13] = {
goetz@6458 525 F1->as_VMReg(),
goetz@6458 526 F2->as_VMReg(),
goetz@6458 527 F3->as_VMReg(),
goetz@6458 528 F4->as_VMReg(),
goetz@6458 529 F5->as_VMReg(),
goetz@6458 530 F6->as_VMReg(),
goetz@6458 531 F7->as_VMReg(),
goetz@6458 532 F8->as_VMReg(),
goetz@6458 533 F9->as_VMReg(),
goetz@6458 534 F10->as_VMReg(),
goetz@6458 535 F11->as_VMReg(),
goetz@6458 536 F12->as_VMReg(),
goetz@6458 537 F13->as_VMReg()
goetz@6458 538 };
goetz@6458 539
goetz@6458 540 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]);
goetz@6458 541 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]);
goetz@6458 542
goetz@6458 543 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
goetz@6458 544 VMRegPair *regs,
goetz@6458 545 int total_args_passed,
goetz@6458 546 int is_outgoing) {
goetz@6458 547 // C2c calling conventions for compiled-compiled calls.
goetz@6458 548 // Put 8 ints/longs into registers _AND_ 13 float/doubles into
goetz@6458 549 // registers _AND_ put the rest on the stack.
goetz@6458 550
goetz@6458 551 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats
goetz@6458 552 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
goetz@6458 553
goetz@6458 554 int i;
goetz@6458 555 VMReg reg;
goetz@6458 556 int stk = 0;
goetz@6458 557 int ireg = 0;
goetz@6458 558 int freg = 0;
goetz@6458 559
goetz@6458 560 // We put the first 8 arguments into registers and the rest on the
goetz@6458 561 // stack, float arguments are already in their argument registers
goetz@6458 562 // due to c2c calling conventions (see calling_convention).
goetz@6458 563 for (int i = 0; i < total_args_passed; ++i) {
goetz@6458 564 switch(sig_bt[i]) {
goetz@6458 565 case T_BOOLEAN:
goetz@6458 566 case T_CHAR:
goetz@6458 567 case T_BYTE:
goetz@6458 568 case T_SHORT:
goetz@6458 569 case T_INT:
goetz@6458 570 if (ireg < num_java_iarg_registers) {
goetz@6458 571 // Put int/ptr in register
goetz@6458 572 reg = java_iarg_reg[ireg];
goetz@6458 573 ++ireg;
goetz@6458 574 } else {
goetz@6458 575 // Put int/ptr on stack.
goetz@6458 576 reg = VMRegImpl::stack2reg(stk);
goetz@6458 577 stk += inc_stk_for_intfloat;
goetz@6458 578 }
goetz@6458 579 regs[i].set1(reg);
goetz@6458 580 break;
goetz@6458 581 case T_LONG:
goetz@6458 582 assert(sig_bt[i+1] == T_VOID, "expecting half");
goetz@6458 583 if (ireg < num_java_iarg_registers) {
goetz@6458 584 // Put long in register.
goetz@6458 585 reg = java_iarg_reg[ireg];
goetz@6458 586 ++ireg;
goetz@6458 587 } else {
goetz@6458 588 // Put long on stack. They must be aligned to 2 slots.
goetz@6458 589 if (stk & 0x1) ++stk;
goetz@6458 590 reg = VMRegImpl::stack2reg(stk);
goetz@6458 591 stk += inc_stk_for_longdouble;
goetz@6458 592 }
goetz@6458 593 regs[i].set2(reg);
goetz@6458 594 break;
goetz@6458 595 case T_OBJECT:
goetz@6458 596 case T_ARRAY:
goetz@6458 597 case T_ADDRESS:
goetz@6458 598 if (ireg < num_java_iarg_registers) {
goetz@6458 599 // Put ptr in register.
goetz@6458 600 reg = java_iarg_reg[ireg];
goetz@6458 601 ++ireg;
goetz@6458 602 } else {
goetz@6458 603 // Put ptr on stack. Objects must be aligned to 2 slots too,
goetz@6458 604 // because "64-bit pointers record oop-ishness on 2 aligned
goetz@6458 605 // adjacent registers." (see OopFlow::build_oop_map).
goetz@6458 606 if (stk & 0x1) ++stk;
goetz@6458 607 reg = VMRegImpl::stack2reg(stk);
goetz@6458 608 stk += inc_stk_for_longdouble;
goetz@6458 609 }
goetz@6458 610 regs[i].set2(reg);
goetz@6458 611 break;
goetz@6458 612 case T_FLOAT:
goetz@6458 613 if (freg < num_java_farg_registers) {
goetz@6458 614 // Put float in register.
goetz@6458 615 reg = java_farg_reg[freg];
goetz@6458 616 ++freg;
goetz@6458 617 } else {
goetz@6458 618 // Put float on stack.
goetz@6458 619 reg = VMRegImpl::stack2reg(stk);
goetz@6458 620 stk += inc_stk_for_intfloat;
goetz@6458 621 }
goetz@6458 622 regs[i].set1(reg);
goetz@6458 623 break;
goetz@6458 624 case T_DOUBLE:
goetz@6458 625 assert(sig_bt[i+1] == T_VOID, "expecting half");
goetz@6458 626 if (freg < num_java_farg_registers) {
goetz@6458 627 // Put double in register.
goetz@6458 628 reg = java_farg_reg[freg];
goetz@6458 629 ++freg;
goetz@6458 630 } else {
goetz@6458 631 // Put double on stack. They must be aligned to 2 slots.
goetz@6458 632 if (stk & 0x1) ++stk;
goetz@6458 633 reg = VMRegImpl::stack2reg(stk);
goetz@6458 634 stk += inc_stk_for_longdouble;
goetz@6458 635 }
goetz@6458 636 regs[i].set2(reg);
goetz@6458 637 break;
goetz@6458 638 case T_VOID:
goetz@6458 639 // Do not count halves.
goetz@6458 640 regs[i].set_bad();
goetz@6458 641 break;
goetz@6458 642 default:
goetz@6458 643 ShouldNotReachHere();
goetz@6458 644 }
goetz@6458 645 }
goetz@6458 646 return round_to(stk, 2);
goetz@6458 647 }
goetz@6458 648
goetz@6458 649 #ifdef COMPILER2
goetz@6458 650 // Calling convention for calling C code.
goetz@6458 651 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
goetz@6458 652 VMRegPair *regs,
goetz@6458 653 VMRegPair *regs2,
goetz@6458 654 int total_args_passed) {
goetz@6458 655 // Calling conventions for C runtime calls and calls to JNI native methods.
goetz@6458 656 //
goetz@6458 657 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8
goetz@6458 658 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist
goetz@6458 659 // the first 13 flt/dbl's in the first 13 fp regs but additionally
goetz@6458 660 // copy flt/dbl to the stack if they are beyond the 8th argument.
goetz@6458 661
goetz@6458 662 const VMReg iarg_reg[8] = {
goetz@6458 663 R3->as_VMReg(),
goetz@6458 664 R4->as_VMReg(),
goetz@6458 665 R5->as_VMReg(),
goetz@6458 666 R6->as_VMReg(),
goetz@6458 667 R7->as_VMReg(),
goetz@6458 668 R8->as_VMReg(),
goetz@6458 669 R9->as_VMReg(),
goetz@6458 670 R10->as_VMReg()
goetz@6458 671 };
goetz@6458 672
goetz@6458 673 const VMReg farg_reg[13] = {
goetz@6458 674 F1->as_VMReg(),
goetz@6458 675 F2->as_VMReg(),
goetz@6458 676 F3->as_VMReg(),
goetz@6458 677 F4->as_VMReg(),
goetz@6458 678 F5->as_VMReg(),
goetz@6458 679 F6->as_VMReg(),
goetz@6458 680 F7->as_VMReg(),
goetz@6458 681 F8->as_VMReg(),
goetz@6458 682 F9->as_VMReg(),
goetz@6458 683 F10->as_VMReg(),
goetz@6458 684 F11->as_VMReg(),
goetz@6458 685 F12->as_VMReg(),
goetz@6458 686 F13->as_VMReg()
goetz@6458 687 };
goetz@6458 688
goetz@6458 689 // Check calling conventions consistency.
goetz@6495 690 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c &&
goetz@6495 691 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c,
goetz@6458 692 "consistency");
goetz@6458 693
goetz@6458 694 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy
goetz@6458 695 // 2 such slots, like 64 bit values do.
goetz@6458 696 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats
goetz@6458 697 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
goetz@6458 698
goetz@6458 699 int i;
goetz@6458 700 VMReg reg;
goetz@6511 701 // Leave room for C-compatible ABI_REG_ARGS.
goetz@6511 702 int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
goetz@6458 703 int arg = 0;
goetz@6458 704 int freg = 0;
goetz@6458 705
goetz@6458 706 // Avoid passing C arguments in the wrong stack slots.
goetz@6511 707 #if defined(ABI_ELFv2)
goetz@6511 708 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96,
goetz@6511 709 "passing C arguments in wrong stack slots");
goetz@6511 710 #else
goetz@6458 711 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
goetz@6458 712 "passing C arguments in wrong stack slots");
goetz@6511 713 #endif
goetz@6458 714 // We fill-out regs AND regs2 if an argument must be passed in a
goetz@6458 715 // register AND in a stack slot. If regs2 is NULL in such a
goetz@6458 716 // situation, we bail-out with a fatal error.
goetz@6458 717 for (int i = 0; i < total_args_passed; ++i, ++arg) {
goetz@6458 718 // Initialize regs2 to BAD.
goetz@6458 719 if (regs2 != NULL) regs2[i].set_bad();
goetz@6458 720
goetz@6458 721 switch(sig_bt[i]) {
goetz@6495 722
goetz@6495 723 //
goetz@6495 724 // If arguments 0-7 are integers, they are passed in integer registers.
goetz@6495 725 // Argument i is placed in iarg_reg[i].
goetz@6495 726 //
goetz@6458 727 case T_BOOLEAN:
goetz@6458 728 case T_CHAR:
goetz@6458 729 case T_BYTE:
goetz@6458 730 case T_SHORT:
goetz@6458 731 case T_INT:
goetz@6458 732 // We must cast ints to longs and use full 64 bit stack slots
goetz@6458 733 // here. We do the cast in GraphKit::gen_stub() and just guard
goetz@6458 734 // here against loosing that change.
goetz@6468 735 assert(CCallingConventionRequiresIntsAsLongs,
goetz@6458 736 "argument of type int should be promoted to type long");
goetz@6458 737 guarantee(i > 0 && sig_bt[i-1] == T_LONG,
goetz@6458 738 "argument of type (bt) should have been promoted to type (T_LONG,bt) for bt in "
goetz@6458 739 "{T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
goetz@6458 740 // Do not count halves.
goetz@6458 741 regs[i].set_bad();
goetz@6458 742 --arg;
goetz@6458 743 break;
goetz@6458 744 case T_LONG:
goetz@6458 745 guarantee(sig_bt[i+1] == T_VOID ||
goetz@6458 746 sig_bt[i+1] == T_BOOLEAN || sig_bt[i+1] == T_CHAR ||
goetz@6458 747 sig_bt[i+1] == T_BYTE || sig_bt[i+1] == T_SHORT ||
goetz@6458 748 sig_bt[i+1] == T_INT,
goetz@6458 749 "expecting type (T_LONG,half) or type (T_LONG,bt) with bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
goetz@6458 750 case T_OBJECT:
goetz@6458 751 case T_ARRAY:
goetz@6458 752 case T_ADDRESS:
goetz@6458 753 case T_METADATA:
goetz@6458 754 // Oops are already boxed if required (JNI).
goetz@6495 755 if (arg < Argument::n_int_register_parameters_c) {
goetz@6458 756 reg = iarg_reg[arg];
goetz@6458 757 } else {
goetz@6458 758 reg = VMRegImpl::stack2reg(stk);
goetz@6458 759 stk += inc_stk_for_longdouble;
goetz@6458 760 }
goetz@6458 761 regs[i].set2(reg);
goetz@6458 762 break;
goetz@6495 763
goetz@6495 764 //
goetz@6495 765 // Floats are treated differently from int regs: The first 13 float arguments
goetz@6495 766 // are passed in registers (not the float args among the first 13 args).
goetz@6495 767 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed
goetz@6495 768 // in farg_reg[j] if argument i is the j-th float argument of this call.
goetz@6495 769 //
goetz@6458 770 case T_FLOAT:
goetz@8182 771 #if defined(LINUX)
goetz@8182 772 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
goetz@8182 773 // in the least significant word of an argument slot.
goetz@8182 774 #if defined(VM_LITTLE_ENDIAN)
goetz@8182 775 #define FLOAT_WORD_OFFSET_IN_SLOT 0
goetz@8182 776 #else
goetz@8182 777 #define FLOAT_WORD_OFFSET_IN_SLOT 1
goetz@8182 778 #endif
goetz@8182 779 #elif defined(AIX)
goetz@8182 780 // Although AIX runs on big endian CPU, float is in the most
goetz@8182 781 // significant word of an argument slot.
goetz@8182 782 #define FLOAT_WORD_OFFSET_IN_SLOT 0
goetz@8182 783 #else
goetz@8182 784 #error "unknown OS"
goetz@8182 785 #endif
goetz@6495 786 if (freg < Argument::n_float_register_parameters_c) {
goetz@6495 787 // Put float in register ...
goetz@6458 788 reg = farg_reg[freg];
goetz@6495 789 ++freg;
goetz@6495 790
goetz@6495 791 // Argument i for i > 8 is placed on the stack even if it's
goetz@6495 792 // placed in a register (if it's a float arg). Aix disassembly
goetz@6495 793 // shows that xlC places these float args on the stack AND in
goetz@6495 794 // a register. This is not documented, but we follow this
goetz@6495 795 // convention, too.
goetz@6495 796 if (arg >= Argument::n_regs_not_on_stack_c) {
goetz@6495 797 // ... and on the stack.
goetz@6495 798 guarantee(regs2 != NULL, "must pass float in register and stack slot");
goetz@8182 799 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
goetz@6495 800 regs2[i].set1(reg2);
goetz@6495 801 stk += inc_stk_for_intfloat;
goetz@6495 802 }
goetz@6495 803
goetz@6458 804 } else {
goetz@6495 805 // Put float on stack.
goetz@8182 806 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
goetz@6458 807 stk += inc_stk_for_intfloat;
goetz@6458 808 }
goetz@6458 809 regs[i].set1(reg);
goetz@6458 810 break;
goetz@6458 811 case T_DOUBLE:
goetz@6458 812 assert(sig_bt[i+1] == T_VOID, "expecting half");
goetz@6495 813 if (freg < Argument::n_float_register_parameters_c) {
goetz@6495 814 // Put double in register ...
goetz@6458 815 reg = farg_reg[freg];
goetz@6495 816 ++freg;
goetz@6495 817
goetz@6495 818 // Argument i for i > 8 is placed on the stack even if it's
goetz@6495 819 // placed in a register (if it's a double arg). Aix disassembly
goetz@6495 820 // shows that xlC places these float args on the stack AND in
goetz@6495 821 // a register. This is not documented, but we follow this
goetz@6495 822 // convention, too.
goetz@6495 823 if (arg >= Argument::n_regs_not_on_stack_c) {
goetz@6495 824 // ... and on the stack.
goetz@6495 825 guarantee(regs2 != NULL, "must pass float in register and stack slot");
goetz@6495 826 VMReg reg2 = VMRegImpl::stack2reg(stk);
goetz@6495 827 regs2[i].set2(reg2);
goetz@6495 828 stk += inc_stk_for_longdouble;
goetz@6495 829 }
goetz@6458 830 } else {
goetz@6458 831 // Put double on stack.
goetz@6458 832 reg = VMRegImpl::stack2reg(stk);
goetz@6458 833 stk += inc_stk_for_longdouble;
goetz@6458 834 }
goetz@6458 835 regs[i].set2(reg);
goetz@6458 836 break;
goetz@6495 837
goetz@6458 838 case T_VOID:
goetz@6458 839 // Do not count halves.
goetz@6458 840 regs[i].set_bad();
goetz@6458 841 --arg;
goetz@6458 842 break;
goetz@6458 843 default:
goetz@6458 844 ShouldNotReachHere();
goetz@6458 845 }
goetz@6458 846 }
goetz@6458 847
goetz@6458 848 return round_to(stk, 2);
goetz@6458 849 }
goetz@6458 850 #endif // COMPILER2
goetz@6458 851
goetz@6458 852 static address gen_c2i_adapter(MacroAssembler *masm,
goetz@6458 853 int total_args_passed,
goetz@6458 854 int comp_args_on_stack,
goetz@6458 855 const BasicType *sig_bt,
goetz@6458 856 const VMRegPair *regs,
goetz@6458 857 Label& call_interpreter,
goetz@6458 858 const Register& ientry) {
goetz@6458 859
goetz@6458 860 address c2i_entrypoint;
goetz@6458 861
goetz@6458 862 const Register sender_SP = R21_sender_SP; // == R21_tmp1
goetz@6458 863 const Register code = R22_tmp2;
goetz@6458 864 //const Register ientry = R23_tmp3;
goetz@6458 865 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 };
goetz@6458 866 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
goetz@6458 867 int value_regs_index = 0;
goetz@6458 868
goetz@6458 869 const Register return_pc = R27_tmp7;
goetz@6458 870 const Register tmp = R28_tmp8;
goetz@6458 871
goetz@6458 872 assert_different_registers(sender_SP, code, ientry, return_pc, tmp);
goetz@6458 873
goetz@6458 874 // Adapter needs TOP_IJAVA_FRAME_ABI.
goetz@6458 875 const int adapter_size = frame::top_ijava_frame_abi_size +
goetz@6458 876 round_to(total_args_passed * wordSize, frame::alignment_in_bytes);
goetz@6458 877
goetz@6458 878 // regular (verified) c2i entry point
goetz@6458 879 c2i_entrypoint = __ pc();
goetz@6458 880
goetz@6458 881 // Does compiled code exists? If yes, patch the caller's callsite.
goetz@6458 882 __ ld(code, method_(code));
goetz@6458 883 __ cmpdi(CCR0, code, 0);
goetz@6458 884 __ ld(ientry, method_(interpreter_entry)); // preloaded
goetz@6458 885 __ beq(CCR0, call_interpreter);
goetz@6458 886
goetz@6458 887
goetz@6458 888 // Patch caller's callsite, method_(code) was not NULL which means that
goetz@6458 889 // compiled code exists.
goetz@6458 890 __ mflr(return_pc);
goetz@6458 891 __ std(return_pc, _abi(lr), R1_SP);
goetz@6458 892 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs);
goetz@6458 893
goetz@6458 894 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc);
goetz@6458 895
goetz@6458 896 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs);
goetz@6458 897 __ ld(return_pc, _abi(lr), R1_SP);
goetz@6458 898 __ ld(ientry, method_(interpreter_entry)); // preloaded
goetz@6458 899 __ mtlr(return_pc);
goetz@6458 900
goetz@6458 901
goetz@6495 902 // Call the interpreter.
goetz@6458 903 __ BIND(call_interpreter);
goetz@6458 904 __ mtctr(ientry);
goetz@6458 905
goetz@6458 906 // Get a copy of the current SP for loading caller's arguments.
goetz@6458 907 __ mr(sender_SP, R1_SP);
goetz@6458 908
goetz@6458 909 // Add space for the adapter.
goetz@6458 910 __ resize_frame(-adapter_size, R12_scratch2);
goetz@6458 911
goetz@6458 912 int st_off = adapter_size - wordSize;
goetz@6458 913
goetz@6458 914 // Write the args into the outgoing interpreter space.
goetz@6458 915 for (int i = 0; i < total_args_passed; i++) {
goetz@6458 916 VMReg r_1 = regs[i].first();
goetz@6458 917 VMReg r_2 = regs[i].second();
goetz@6458 918 if (!r_1->is_valid()) {
goetz@6458 919 assert(!r_2->is_valid(), "");
goetz@6458 920 continue;
goetz@6458 921 }
goetz@6458 922 if (r_1->is_stack()) {
goetz@6458 923 Register tmp_reg = value_regs[value_regs_index];
goetz@6458 924 value_regs_index = (value_regs_index + 1) % num_value_regs;
goetz@6458 925 // The calling convention produces OptoRegs that ignore the out
goetz@6458 926 // preserve area (JIT's ABI). We must account for it here.
goetz@6458 927 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
goetz@6458 928 if (!r_2->is_valid()) {
goetz@6458 929 __ lwz(tmp_reg, ld_off, sender_SP);
goetz@6458 930 } else {
goetz@6458 931 __ ld(tmp_reg, ld_off, sender_SP);
goetz@6458 932 }
goetz@6458 933 // Pretend stack targets were loaded into tmp_reg.
goetz@6458 934 r_1 = tmp_reg->as_VMReg();
goetz@6458 935 }
goetz@6458 936
goetz@6458 937 if (r_1->is_Register()) {
goetz@6458 938 Register r = r_1->as_Register();
goetz@6458 939 if (!r_2->is_valid()) {
goetz@6458 940 __ stw(r, st_off, R1_SP);
goetz@6458 941 st_off-=wordSize;
goetz@6458 942 } else {
goetz@6458 943 // Longs are given 2 64-bit slots in the interpreter, but the
goetz@6458 944 // data is passed in only 1 slot.
goetz@6458 945 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
goetz@6458 946 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
goetz@6458 947 st_off-=wordSize;
goetz@6458 948 }
goetz@6458 949 __ std(r, st_off, R1_SP);
goetz@6458 950 st_off-=wordSize;
goetz@6458 951 }
goetz@6458 952 } else {
goetz@6458 953 assert(r_1->is_FloatRegister(), "");
goetz@6458 954 FloatRegister f = r_1->as_FloatRegister();
goetz@6458 955 if (!r_2->is_valid()) {
goetz@6458 956 __ stfs(f, st_off, R1_SP);
goetz@6458 957 st_off-=wordSize;
goetz@6458 958 } else {
goetz@6458 959 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
goetz@6458 960 // data is passed in only 1 slot.
goetz@6458 961 // One of these should get known junk...
goetz@6458 962 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
goetz@6458 963 st_off-=wordSize;
goetz@6458 964 __ stfd(f, st_off, R1_SP);
goetz@6458 965 st_off-=wordSize;
goetz@6458 966 }
goetz@6458 967 }
goetz@6458 968 }
goetz@6458 969
goetz@6458 970 // Jump to the interpreter just as if interpreter was doing it.
goetz@6458 971
goetz@6495 972 #ifdef CC_INTERP
goetz@6495 973 const Register tos = R17_tos;
goetz@6512 974 #else
goetz@6512 975 const Register tos = R15_esp;
goetz@6512 976 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
goetz@6495 977 #endif
goetz@6495 978
goetz@6458 979 // load TOS
goetz@6495 980 __ addi(tos, R1_SP, st_off);
goetz@6458 981
goetz@6458 982 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1.
goetz@6458 983 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register");
goetz@6458 984 __ bctr();
goetz@6458 985
goetz@6458 986 return c2i_entrypoint;
goetz@6458 987 }
goetz@6458 988
goetz@6458 989 static void gen_i2c_adapter(MacroAssembler *masm,
goetz@6458 990 int total_args_passed,
goetz@6458 991 int comp_args_on_stack,
goetz@6458 992 const BasicType *sig_bt,
goetz@6458 993 const VMRegPair *regs) {
goetz@6458 994
goetz@6512 995 // Load method's entry-point from method.
goetz@6458 996 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
goetz@6458 997 __ mtctr(R12_scratch2);
goetz@6458 998
goetz@6458 999 // We will only enter here from an interpreted frame and never from after
goetz@6458 1000 // passing thru a c2i. Azul allowed this but we do not. If we lose the
goetz@6458 1001 // race and use a c2i we will remain interpreted for the race loser(s).
goetz@6458 1002 // This removes all sorts of headaches on the x86 side and also eliminates
goetz@6458 1003 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
goetz@6458 1004
goetz@6458 1005 // Note: r13 contains the senderSP on entry. We must preserve it since
goetz@6458 1006 // we may do a i2c -> c2i transition if we lose a race where compiled
goetz@6458 1007 // code goes non-entrant while we get args ready.
goetz@6458 1008 // In addition we use r13 to locate all the interpreter args as
goetz@6458 1009 // we must align the stack to 16 bytes on an i2c entry else we
goetz@6458 1010 // lose alignment we expect in all compiled code and register
goetz@6458 1011 // save code can segv when fxsave instructions find improperly
goetz@6458 1012 // aligned stack pointer.
goetz@6458 1013
goetz@6495 1014 #ifdef CC_INTERP
goetz@6458 1015 const Register ld_ptr = R17_tos;
goetz@6512 1016 #else
goetz@6512 1017 const Register ld_ptr = R15_esp;
goetz@6495 1018 #endif
goetz@6512 1019
goetz@6458 1020 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
goetz@6458 1021 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
goetz@6458 1022 int value_regs_index = 0;
goetz@6458 1023
goetz@6458 1024 int ld_offset = total_args_passed*wordSize;
goetz@6458 1025
goetz@6458 1026 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
goetz@6458 1027 // in registers, we will occasionally have no stack args.
goetz@6458 1028 int comp_words_on_stack = 0;
goetz@6458 1029 if (comp_args_on_stack) {
goetz@6458 1030 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
goetz@6458 1031 // registers are below. By subtracting stack0, we either get a negative
goetz@6458 1032 // number (all values in registers) or the maximum stack slot accessed.
goetz@6458 1033
goetz@6458 1034 // Convert 4-byte c2 stack slots to words.
goetz@6458 1035 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
goetz@6458 1036 // Round up to miminum stack alignment, in wordSize.
goetz@6458 1037 comp_words_on_stack = round_to(comp_words_on_stack, 2);
goetz@6458 1038 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
goetz@6458 1039 }
goetz@6458 1040
goetz@6458 1041 // Now generate the shuffle code. Pick up all register args and move the
goetz@6458 1042 // rest through register value=Z_R12.
goetz@6458 1043 BLOCK_COMMENT("Shuffle arguments");
goetz@6458 1044 for (int i = 0; i < total_args_passed; i++) {
goetz@6458 1045 if (sig_bt[i] == T_VOID) {
goetz@6458 1046 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
goetz@6458 1047 continue;
goetz@6458 1048 }
goetz@6458 1049
goetz@6458 1050 // Pick up 0, 1 or 2 words from ld_ptr.
goetz@6458 1051 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
goetz@6458 1052 "scrambled load targets?");
goetz@6458 1053 VMReg r_1 = regs[i].first();
goetz@6458 1054 VMReg r_2 = regs[i].second();
goetz@6458 1055 if (!r_1->is_valid()) {
goetz@6458 1056 assert(!r_2->is_valid(), "");
goetz@6458 1057 continue;
goetz@6458 1058 }
goetz@6458 1059 if (r_1->is_FloatRegister()) {
goetz@6458 1060 if (!r_2->is_valid()) {
goetz@6458 1061 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr);
goetz@6458 1062 ld_offset-=wordSize;
goetz@6458 1063 } else {
goetz@6458 1064 // Skip the unused interpreter slot.
goetz@6458 1065 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr);
goetz@6458 1066 ld_offset-=2*wordSize;
goetz@6458 1067 }
goetz@6458 1068 } else {
goetz@6458 1069 Register r;
goetz@6458 1070 if (r_1->is_stack()) {
goetz@6458 1071 // Must do a memory to memory move thru "value".
goetz@6458 1072 r = value_regs[value_regs_index];
goetz@6458 1073 value_regs_index = (value_regs_index + 1) % num_value_regs;
goetz@6458 1074 } else {
goetz@6458 1075 r = r_1->as_Register();
goetz@6458 1076 }
goetz@6458 1077 if (!r_2->is_valid()) {
goetz@6458 1078 // Not sure we need to do this but it shouldn't hurt.
goetz@6458 1079 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
goetz@6458 1080 __ ld(r, ld_offset, ld_ptr);
goetz@6458 1081 ld_offset-=wordSize;
goetz@6458 1082 } else {
goetz@6458 1083 __ lwz(r, ld_offset, ld_ptr);
goetz@6458 1084 ld_offset-=wordSize;
goetz@6458 1085 }
goetz@6458 1086 } else {
goetz@6458 1087 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
goetz@6458 1088 // data is passed in only 1 slot.
goetz@6458 1089 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
goetz@6458 1090 ld_offset-=wordSize;
goetz@6458 1091 }
goetz@6458 1092 __ ld(r, ld_offset, ld_ptr);
goetz@6458 1093 ld_offset-=wordSize;
goetz@6458 1094 }
goetz@6458 1095
goetz@6458 1096 if (r_1->is_stack()) {
goetz@6458 1097 // Now store value where the compiler expects it
goetz@6458 1098 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size;
goetz@6458 1099
goetz@6458 1100 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN ||
goetz@6458 1101 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) {
goetz@6458 1102 __ stw(r, st_off, R1_SP);
goetz@6458 1103 } else {
goetz@6458 1104 __ std(r, st_off, R1_SP);
goetz@6458 1105 }
goetz@6458 1106 }
goetz@6458 1107 }
goetz@6458 1108 }
goetz@6458 1109
goetz@6512 1110 BLOCK_COMMENT("Store method");
goetz@6512 1111 // Store method into thread->callee_target.
goetz@6458 1112 // We might end up in handle_wrong_method if the callee is
goetz@6458 1113 // deoptimized as we race thru here. If that happens we don't want
goetz@6458 1114 // to take a safepoint because the caller frame will look
goetz@6458 1115 // interpreted and arguments are now "compiled" so it is much better
goetz@6458 1116 // to make this transition invisible to the stack walking
goetz@6458 1117 // code. Unfortunately if we try and find the callee by normal means
goetz@6458 1118 // a safepoint is possible. So we stash the desired callee in the
goetz@6458 1119 // thread and the vm will find there should this case occur.
goetz@6458 1120 __ std(R19_method, thread_(callee_target));
goetz@6458 1121
goetz@6458 1122 // Jump to the compiled code just as if compiled code was doing it.
goetz@6458 1123 __ bctr();
goetz@6458 1124 }
goetz@6458 1125
goetz@6458 1126 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
goetz@6458 1127 int total_args_passed,
goetz@6458 1128 int comp_args_on_stack,
goetz@6458 1129 const BasicType *sig_bt,
goetz@6458 1130 const VMRegPair *regs,
goetz@6458 1131 AdapterFingerPrint* fingerprint) {
goetz@6458 1132 address i2c_entry;
goetz@6458 1133 address c2i_unverified_entry;
goetz@6458 1134 address c2i_entry;
goetz@6458 1135
goetz@6458 1136
goetz@6458 1137 // entry: i2c
goetz@6458 1138
goetz@6458 1139 __ align(CodeEntryAlignment);
goetz@6458 1140 i2c_entry = __ pc();
goetz@6458 1141 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
goetz@6458 1142
goetz@6458 1143
goetz@6458 1144 // entry: c2i unverified
goetz@6458 1145
goetz@6458 1146 __ align(CodeEntryAlignment);
goetz@6458 1147 BLOCK_COMMENT("c2i unverified entry");
goetz@6458 1148 c2i_unverified_entry = __ pc();
goetz@6458 1149
goetz@6458 1150 // inline_cache contains a compiledICHolder
goetz@6458 1151 const Register ic = R19_method;
goetz@6458 1152 const Register ic_klass = R11_scratch1;
goetz@6458 1153 const Register receiver_klass = R12_scratch2;
goetz@6458 1154 const Register code = R21_tmp1;
goetz@6458 1155 const Register ientry = R23_tmp3;
goetz@6458 1156
goetz@6458 1157 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry);
goetz@6458 1158 assert(R11_scratch1 == R11, "need prologue scratch register");
goetz@6458 1159
goetz@6458 1160 Label call_interpreter;
goetz@6458 1161
goetz@6458 1162 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
goetz@6458 1163 "klass offset should reach into any page");
goetz@6458 1164 // Check for NULL argument if we don't have implicit null checks.
goetz@6486 1165 if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
goetz@6458 1166 if (TrapBasedNullChecks) {
goetz@6458 1167 __ trap_null_check(R3_ARG1);
goetz@6458 1168 } else {
goetz@6458 1169 Label valid;
goetz@6458 1170 __ cmpdi(CCR0, R3_ARG1, 0);
goetz@6458 1171 __ bne_predict_taken(CCR0, valid);
goetz@6458 1172 // We have a null argument, branch to ic_miss_stub.
goetz@6458 1173 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
goetz@6495 1174 relocInfo::runtime_call_type);
goetz@6458 1175 __ BIND(valid);
goetz@6458 1176 }
goetz@6458 1177 }
goetz@6458 1178 // Assume argument is not NULL, load klass from receiver.
goetz@6458 1179 __ load_klass(receiver_klass, R3_ARG1);
goetz@6458 1180
goetz@6458 1181 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
goetz@6458 1182
goetz@6458 1183 if (TrapBasedICMissChecks) {
goetz@6458 1184 __ trap_ic_miss_check(receiver_klass, ic_klass);
goetz@6458 1185 } else {
goetz@6458 1186 Label valid;
goetz@6458 1187 __ cmpd(CCR0, receiver_klass, ic_klass);
goetz@6458 1188 __ beq_predict_taken(CCR0, valid);
goetz@6458 1189 // We have an unexpected klass, branch to ic_miss_stub.
goetz@6458 1190 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
goetz@6495 1191 relocInfo::runtime_call_type);
goetz@6458 1192 __ BIND(valid);
goetz@6458 1193 }
goetz@6458 1194
goetz@6458 1195 // Argument is valid and klass is as expected, continue.
goetz@6458 1196
goetz@6458 1197 // Extract method from inline cache, verified entry point needs it.
dbuck@8997 1198 __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
goetz@6458 1199 assert(R19_method == ic, "the inline cache register is dead here");
goetz@6458 1200
goetz@6458 1201 __ ld(code, method_(code));
goetz@6458 1202 __ cmpdi(CCR0, code, 0);
goetz@6458 1203 __ ld(ientry, method_(interpreter_entry)); // preloaded
goetz@6458 1204 __ beq_predict_taken(CCR0, call_interpreter);
goetz@6458 1205
goetz@6458 1206 // Branch to ic_miss_stub.
goetz@6495 1207 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
goetz@6458 1208
goetz@6458 1209 // entry: c2i
goetz@6458 1210
goetz@6458 1211 c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
goetz@6458 1212
goetz@6458 1213 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
goetz@6458 1214 }
goetz@6458 1215
goetz@6458 1216 #ifdef COMPILER2
goetz@6458 1217 // An oop arg. Must pass a handle not the oop itself.
goetz@6458 1218 static void object_move(MacroAssembler* masm,
goetz@6458 1219 int frame_size_in_slots,
goetz@6458 1220 OopMap* oop_map, int oop_handle_offset,
goetz@6458 1221 bool is_receiver, int* receiver_offset,
goetz@6458 1222 VMRegPair src, VMRegPair dst,
goetz@6458 1223 Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
goetz@6458 1224 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
goetz@6458 1225 "receiver has already been moved");
goetz@6458 1226
goetz@6458 1227 // We must pass a handle. First figure out the location we use as a handle.
goetz@6458 1228
goetz@6458 1229 if (src.first()->is_stack()) {
goetz@6458 1230 // stack to stack or reg
goetz@6458 1231
goetz@6458 1232 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
goetz@6458 1233 Label skip;
goetz@6458 1234 const int oop_slot_in_callers_frame = reg2slot(src.first());
goetz@6458 1235
goetz@6458 1236 guarantee(!is_receiver, "expecting receiver in register");
goetz@6458 1237 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots));
goetz@6458 1238
goetz@6458 1239 __ addi(r_handle, r_caller_sp, reg2offset(src.first()));
goetz@6458 1240 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
goetz@6458 1241 __ cmpdi(CCR0, r_temp_2, 0);
goetz@6458 1242 __ bne(CCR0, skip);
goetz@6458 1243 // Use a NULL handle if oop is NULL.
goetz@6458 1244 __ li(r_handle, 0);
goetz@6458 1245 __ bind(skip);
goetz@6458 1246
goetz@6458 1247 if (dst.first()->is_stack()) {
goetz@6458 1248 // stack to stack
goetz@6458 1249 __ std(r_handle, reg2offset(dst.first()), R1_SP);
goetz@6458 1250 } else {
goetz@6458 1251 // stack to reg
goetz@6458 1252 // Nothing to do, r_handle is already the dst register.
goetz@6458 1253 }
goetz@6458 1254 } else {
goetz@6458 1255 // reg to stack or reg
goetz@6458 1256 const Register r_oop = src.first()->as_Register();
goetz@6458 1257 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
goetz@6458 1258 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word
goetz@6458 1259 + oop_handle_offset; // in slots
goetz@6458 1260 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size;
goetz@6458 1261 Label skip;
goetz@6458 1262
goetz@6458 1263 if (is_receiver) {
goetz@6458 1264 *receiver_offset = oop_offset;
goetz@6458 1265 }
goetz@6458 1266 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot));
goetz@6458 1267
goetz@6458 1268 __ std( r_oop, oop_offset, R1_SP);
goetz@6458 1269 __ addi(r_handle, R1_SP, oop_offset);
goetz@6458 1270
goetz@6458 1271 __ cmpdi(CCR0, r_oop, 0);
goetz@6458 1272 __ bne(CCR0, skip);
goetz@6458 1273 // Use a NULL handle if oop is NULL.
goetz@6458 1274 __ li(r_handle, 0);
goetz@6458 1275 __ bind(skip);
goetz@6458 1276
goetz@6458 1277 if (dst.first()->is_stack()) {
goetz@6458 1278 // reg to stack
goetz@6458 1279 __ std(r_handle, reg2offset(dst.first()), R1_SP);
goetz@6458 1280 } else {
goetz@6458 1281 // reg to reg
goetz@6458 1282 // Nothing to do, r_handle is already the dst register.
goetz@6458 1283 }
goetz@6458 1284 }
goetz@6458 1285 }
goetz@6458 1286
goetz@6458 1287 static void int_move(MacroAssembler*masm,
goetz@6458 1288 VMRegPair src, VMRegPair dst,
goetz@6458 1289 Register r_caller_sp, Register r_temp) {
goetz@6458 1290 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long-int");
goetz@6458 1291 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
goetz@6458 1292
goetz@6458 1293 if (src.first()->is_stack()) {
goetz@6458 1294 if (dst.first()->is_stack()) {
goetz@6458 1295 // stack to stack
goetz@6458 1296 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp);
goetz@6458 1297 __ std(r_temp, reg2offset(dst.first()), R1_SP);
goetz@6458 1298 } else {
goetz@6458 1299 // stack to reg
goetz@6458 1300 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
goetz@6458 1301 }
goetz@6458 1302 } else if (dst.first()->is_stack()) {
goetz@6458 1303 // reg to stack
goetz@6458 1304 __ extsw(r_temp, src.first()->as_Register());
goetz@6458 1305 __ std(r_temp, reg2offset(dst.first()), R1_SP);
goetz@6458 1306 } else {
goetz@6458 1307 // reg to reg
goetz@6458 1308 __ extsw(dst.first()->as_Register(), src.first()->as_Register());
goetz@6458 1309 }
goetz@6458 1310 }
goetz@6458 1311
goetz@6458 1312 static void long_move(MacroAssembler*masm,
goetz@6458 1313 VMRegPair src, VMRegPair dst,
goetz@6458 1314 Register r_caller_sp, Register r_temp) {
goetz@6458 1315 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long");
goetz@6458 1316 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
goetz@6458 1317
goetz@6458 1318 if (src.first()->is_stack()) {
goetz@6458 1319 if (dst.first()->is_stack()) {
goetz@6458 1320 // stack to stack
goetz@6458 1321 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
goetz@6458 1322 __ std(r_temp, reg2offset(dst.first()), R1_SP);
goetz@6458 1323 } else {
goetz@6458 1324 // stack to reg
goetz@6458 1325 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
goetz@6458 1326 }
goetz@6458 1327 } else if (dst.first()->is_stack()) {
goetz@6458 1328 // reg to stack
goetz@6458 1329 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
goetz@6458 1330 } else {
goetz@6458 1331 // reg to reg
goetz@6458 1332 if (dst.first()->as_Register() != src.first()->as_Register())
goetz@6458 1333 __ mr(dst.first()->as_Register(), src.first()->as_Register());
goetz@6458 1334 }
goetz@6458 1335 }
goetz@6458 1336
goetz@6458 1337 static void float_move(MacroAssembler*masm,
goetz@6458 1338 VMRegPair src, VMRegPair dst,
goetz@6458 1339 Register r_caller_sp, Register r_temp) {
goetz@6458 1340 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float");
goetz@6458 1341 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float");
goetz@6458 1342
goetz@6458 1343 if (src.first()->is_stack()) {
goetz@6458 1344 if (dst.first()->is_stack()) {
goetz@6458 1345 // stack to stack
goetz@6458 1346 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp);
goetz@6458 1347 __ stw(r_temp, reg2offset(dst.first()), R1_SP);
goetz@6458 1348 } else {
goetz@6458 1349 // stack to reg
goetz@6458 1350 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
goetz@6458 1351 }
goetz@6458 1352 } else if (dst.first()->is_stack()) {
goetz@6458 1353 // reg to stack
goetz@6458 1354 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
goetz@6458 1355 } else {
goetz@6458 1356 // reg to reg
goetz@6458 1357 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
goetz@6458 1358 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
goetz@6458 1359 }
goetz@6458 1360 }
goetz@6458 1361
goetz@6458 1362 static void double_move(MacroAssembler*masm,
goetz@6458 1363 VMRegPair src, VMRegPair dst,
goetz@6458 1364 Register r_caller_sp, Register r_temp) {
goetz@6458 1365 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double");
goetz@6458 1366 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double");
goetz@6458 1367
goetz@6458 1368 if (src.first()->is_stack()) {
goetz@6458 1369 if (dst.first()->is_stack()) {
goetz@6458 1370 // stack to stack
goetz@6458 1371 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
goetz@6458 1372 __ std(r_temp, reg2offset(dst.first()), R1_SP);
goetz@6458 1373 } else {
goetz@6458 1374 // stack to reg
goetz@6458 1375 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
goetz@6458 1376 }
goetz@6458 1377 } else if (dst.first()->is_stack()) {
goetz@6458 1378 // reg to stack
goetz@6458 1379 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
goetz@6458 1380 } else {
goetz@6458 1381 // reg to reg
goetz@6458 1382 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
goetz@6458 1383 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
goetz@6458 1384 }
goetz@6458 1385 }
goetz@6458 1386
goetz@6458 1387 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
goetz@6458 1388 switch (ret_type) {
goetz@6458 1389 case T_BOOLEAN:
goetz@6458 1390 case T_CHAR:
goetz@6458 1391 case T_BYTE:
goetz@6458 1392 case T_SHORT:
goetz@6458 1393 case T_INT:
goetz@6458 1394 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1395 break;
goetz@6458 1396 case T_ARRAY:
goetz@6458 1397 case T_OBJECT:
goetz@6458 1398 case T_LONG:
goetz@6458 1399 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1400 break;
goetz@6458 1401 case T_FLOAT:
goetz@6458 1402 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1403 break;
goetz@6458 1404 case T_DOUBLE:
goetz@6458 1405 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1406 break;
goetz@6458 1407 case T_VOID:
goetz@6458 1408 break;
goetz@6458 1409 default:
goetz@6458 1410 ShouldNotReachHere();
goetz@6458 1411 break;
goetz@6458 1412 }
goetz@6458 1413 }
goetz@6458 1414
goetz@6458 1415 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
goetz@6458 1416 switch (ret_type) {
goetz@6458 1417 case T_BOOLEAN:
goetz@6458 1418 case T_CHAR:
goetz@6458 1419 case T_BYTE:
goetz@6458 1420 case T_SHORT:
goetz@6458 1421 case T_INT:
goetz@6458 1422 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1423 break;
goetz@6458 1424 case T_ARRAY:
goetz@6458 1425 case T_OBJECT:
goetz@6458 1426 case T_LONG:
goetz@6458 1427 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1428 break;
goetz@6458 1429 case T_FLOAT:
goetz@6458 1430 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1431 break;
goetz@6458 1432 case T_DOUBLE:
goetz@6458 1433 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
goetz@6458 1434 break;
goetz@6458 1435 case T_VOID:
goetz@6458 1436 break;
goetz@6458 1437 default:
goetz@6458 1438 ShouldNotReachHere();
goetz@6458 1439 break;
goetz@6458 1440 }
goetz@6458 1441 }
goetz@6458 1442
goetz@6458 1443 static void save_or_restore_arguments(MacroAssembler* masm,
goetz@6458 1444 const int stack_slots,
goetz@6458 1445 const int total_in_args,
goetz@6458 1446 const int arg_save_area,
goetz@6458 1447 OopMap* map,
goetz@6458 1448 VMRegPair* in_regs,
goetz@6458 1449 BasicType* in_sig_bt) {
goetz@6458 1450 // If map is non-NULL then the code should store the values,
goetz@6458 1451 // otherwise it should load them.
goetz@6458 1452 int slot = arg_save_area;
goetz@6458 1453 // Save down double word first.
goetz@6458 1454 for (int i = 0; i < total_in_args; i++) {
goetz@6458 1455 if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) {
goetz@6458 1456 int offset = slot * VMRegImpl::stack_slot_size;
goetz@6458 1457 slot += VMRegImpl::slots_per_word;
goetz@6458 1458 assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)");
goetz@6458 1459 if (map != NULL) {
goetz@6458 1460 __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
goetz@6458 1461 } else {
goetz@6458 1462 __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
goetz@6458 1463 }
goetz@6458 1464 } else if (in_regs[i].first()->is_Register() &&
goetz@6458 1465 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
goetz@6458 1466 int offset = slot * VMRegImpl::stack_slot_size;
goetz@6458 1467 if (map != NULL) {
goetz@6458 1468 __ std(in_regs[i].first()->as_Register(), offset, R1_SP);
goetz@6458 1469 if (in_sig_bt[i] == T_ARRAY) {
goetz@6458 1470 map->set_oop(VMRegImpl::stack2reg(slot));
goetz@6458 1471 }
goetz@6458 1472 } else {
goetz@6458 1473 __ ld(in_regs[i].first()->as_Register(), offset, R1_SP);
goetz@6458 1474 }
goetz@6458 1475 slot += VMRegImpl::slots_per_word;
goetz@6458 1476 assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)");
goetz@6458 1477 }
goetz@6458 1478 }
goetz@6458 1479 // Save or restore single word registers.
goetz@6458 1480 for (int i = 0; i < total_in_args; i++) {
goetz@6458 1481 // PPC64: pass ints as longs: must only deal with floats here.
goetz@6458 1482 if (in_regs[i].first()->is_FloatRegister()) {
goetz@6458 1483 if (in_sig_bt[i] == T_FLOAT) {
goetz@6458 1484 int offset = slot * VMRegImpl::stack_slot_size;
goetz@6458 1485 slot++;
goetz@6458 1486 assert(slot <= stack_slots, "overflow (after FLOAT stack slot)");
goetz@6458 1487 if (map != NULL) {
goetz@6458 1488 __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
goetz@6458 1489 } else {
goetz@6458 1490 __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
goetz@6458 1491 }
goetz@6458 1492 }
goetz@6458 1493 } else if (in_regs[i].first()->is_stack()) {
goetz@6458 1494 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
goetz@6458 1495 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
goetz@6458 1496 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
goetz@6458 1497 }
goetz@6458 1498 }
goetz@6458 1499 }
goetz@6458 1500 }
goetz@6458 1501
goetz@6458 1502 // Check GC_locker::needs_gc and enter the runtime if it's true. This
goetz@6458 1503 // keeps a new JNI critical region from starting until a GC has been
goetz@6458 1504 // forced. Save down any oops in registers and describe them in an
goetz@6458 1505 // OopMap.
goetz@6458 1506 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
goetz@6458 1507 const int stack_slots,
goetz@6458 1508 const int total_in_args,
goetz@6458 1509 const int arg_save_area,
goetz@6458 1510 OopMapSet* oop_maps,
goetz@6458 1511 VMRegPair* in_regs,
goetz@6458 1512 BasicType* in_sig_bt,
goetz@6458 1513 Register tmp_reg ) {
goetz@6458 1514 __ block_comment("check GC_locker::needs_gc");
goetz@6458 1515 Label cont;
goetz@6458 1516 __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address());
goetz@6458 1517 __ cmplwi(CCR0, tmp_reg, 0);
goetz@6458 1518 __ beq(CCR0, cont);
goetz@6458 1519
goetz@6458 1520 // Save down any values that are live in registers and call into the
goetz@6458 1521 // runtime to halt for a GC.
goetz@6458 1522 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
goetz@6458 1523 save_or_restore_arguments(masm, stack_slots, total_in_args,
goetz@6458 1524 arg_save_area, map, in_regs, in_sig_bt);
goetz@6458 1525
goetz@6458 1526 __ mr(R3_ARG1, R16_thread);
goetz@6458 1527 __ set_last_Java_frame(R1_SP, noreg);
goetz@6458 1528
goetz@6458 1529 __ block_comment("block_for_jni_critical");
goetz@6458 1530 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
goetz@6511 1531 #if defined(ABI_ELFv2)
goetz@6511 1532 __ call_c(entry_point, relocInfo::runtime_call_type);
goetz@6511 1533 #else
goetz@6458 1534 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
goetz@6511 1535 #endif
goetz@6458 1536 address start = __ pc() - __ offset(),
goetz@6458 1537 calls_return_pc = __ last_calls_return_pc();
goetz@6458 1538 oop_maps->add_gc_map(calls_return_pc - start, map);
goetz@6458 1539
goetz@6458 1540 __ reset_last_Java_frame();
goetz@6458 1541
goetz@6458 1542 // Reload all the register arguments.
goetz@6458 1543 save_or_restore_arguments(masm, stack_slots, total_in_args,
goetz@6458 1544 arg_save_area, NULL, in_regs, in_sig_bt);
goetz@6458 1545
goetz@6458 1546 __ BIND(cont);
goetz@6458 1547
goetz@6458 1548 #ifdef ASSERT
goetz@6458 1549 if (StressCriticalJNINatives) {
goetz@6458 1550 // Stress register saving.
goetz@6458 1551 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
goetz@6458 1552 save_or_restore_arguments(masm, stack_slots, total_in_args,
goetz@6458 1553 arg_save_area, map, in_regs, in_sig_bt);
goetz@6458 1554 // Destroy argument registers.
goetz@6458 1555 for (int i = 0; i < total_in_args; i++) {
goetz@6458 1556 if (in_regs[i].first()->is_Register()) {
goetz@6458 1557 const Register reg = in_regs[i].first()->as_Register();
goetz@6458 1558 __ neg(reg, reg);
goetz@6458 1559 } else if (in_regs[i].first()->is_FloatRegister()) {
goetz@6458 1560 __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
goetz@6458 1561 }
goetz@6458 1562 }
goetz@6458 1563
goetz@6458 1564 save_or_restore_arguments(masm, stack_slots, total_in_args,
goetz@6458 1565 arg_save_area, NULL, in_regs, in_sig_bt);
goetz@6458 1566 }
goetz@6458 1567 #endif
goetz@6458 1568 }
goetz@6458 1569
goetz@6458 1570 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) {
goetz@6458 1571 if (src.first()->is_stack()) {
goetz@6458 1572 if (dst.first()->is_stack()) {
goetz@6458 1573 // stack to stack
goetz@6458 1574 __ ld(r_temp, reg2offset(src.first()), r_caller_sp);
goetz@6458 1575 __ std(r_temp, reg2offset(dst.first()), R1_SP);
goetz@6458 1576 } else {
goetz@6458 1577 // stack to reg
goetz@6458 1578 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
goetz@6458 1579 }
goetz@6458 1580 } else if (dst.first()->is_stack()) {
goetz@6458 1581 // reg to stack
goetz@6458 1582 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
goetz@6458 1583 } else {
goetz@6458 1584 if (dst.first() != src.first()) {
goetz@6458 1585 __ mr(dst.first()->as_Register(), src.first()->as_Register());
goetz@6458 1586 }
goetz@6458 1587 }
goetz@6458 1588 }
goetz@6458 1589
goetz@6458 1590 // Unpack an array argument into a pointer to the body and the length
goetz@6458 1591 // if the array is non-null, otherwise pass 0 for both.
goetz@6458 1592 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type,
goetz@6458 1593 VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp,
goetz@6458 1594 Register tmp_reg, Register tmp2_reg) {
goetz@6458 1595 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
goetz@6458 1596 "possible collision");
goetz@6458 1597 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
goetz@6458 1598 "possible collision");
goetz@6458 1599
goetz@6458 1600 // Pass the length, ptr pair.
goetz@6458 1601 Label set_out_args;
goetz@6458 1602 VMRegPair tmp, tmp2;
goetz@6458 1603 tmp.set_ptr(tmp_reg->as_VMReg());
goetz@6458 1604 tmp2.set_ptr(tmp2_reg->as_VMReg());
goetz@6458 1605 if (reg.first()->is_stack()) {
goetz@6458 1606 // Load the arg up from the stack.
goetz@6458 1607 move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0);
goetz@6458 1608 reg = tmp;
goetz@6458 1609 }
goetz@6458 1610 __ li(tmp2_reg, 0); // Pass zeros if Array=null.
goetz@6458 1611 if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0);
goetz@6458 1612 __ cmpdi(CCR0, reg.first()->as_Register(), 0);
goetz@6458 1613 __ beq(CCR0, set_out_args);
goetz@6458 1614 __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register());
goetz@6458 1615 __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type));
goetz@6458 1616 __ bind(set_out_args);
goetz@6458 1617 move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0);
goetz@6458 1618 move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64.
goetz@6458 1619 }
goetz@6458 1620
goetz@6458 1621 static void verify_oop_args(MacroAssembler* masm,
goetz@6458 1622 methodHandle method,
goetz@6458 1623 const BasicType* sig_bt,
goetz@6458 1624 const VMRegPair* regs) {
goetz@6458 1625 Register temp_reg = R19_method; // not part of any compiled calling seq
goetz@6458 1626 if (VerifyOops) {
goetz@6458 1627 for (int i = 0; i < method->size_of_parameters(); i++) {
goetz@6458 1628 if (sig_bt[i] == T_OBJECT ||
goetz@6458 1629 sig_bt[i] == T_ARRAY) {
goetz@6458 1630 VMReg r = regs[i].first();
goetz@6458 1631 assert(r->is_valid(), "bad oop arg");
goetz@6458 1632 if (r->is_stack()) {
goetz@6458 1633 __ ld(temp_reg, reg2offset(r), R1_SP);
goetz@6458 1634 __ verify_oop(temp_reg);
goetz@6458 1635 } else {
goetz@6458 1636 __ verify_oop(r->as_Register());
goetz@6458 1637 }
goetz@6458 1638 }
goetz@6458 1639 }
goetz@6458 1640 }
goetz@6458 1641 }
goetz@6458 1642
goetz@6458 1643 static void gen_special_dispatch(MacroAssembler* masm,
goetz@6458 1644 methodHandle method,
goetz@6458 1645 const BasicType* sig_bt,
goetz@6458 1646 const VMRegPair* regs) {
goetz@6458 1647 verify_oop_args(masm, method, sig_bt, regs);
goetz@6458 1648 vmIntrinsics::ID iid = method->intrinsic_id();
goetz@6458 1649
goetz@6458 1650 // Now write the args into the outgoing interpreter space
goetz@6458 1651 bool has_receiver = false;
goetz@6458 1652 Register receiver_reg = noreg;
goetz@6458 1653 int member_arg_pos = -1;
goetz@6458 1654 Register member_reg = noreg;
goetz@6458 1655 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
goetz@6458 1656 if (ref_kind != 0) {
goetz@6458 1657 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
goetz@6458 1658 member_reg = R19_method; // known to be free at this point
goetz@6458 1659 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
goetz@6458 1660 } else if (iid == vmIntrinsics::_invokeBasic) {
goetz@6458 1661 has_receiver = true;
goetz@6458 1662 } else {
goetz@6458 1663 fatal(err_msg_res("unexpected intrinsic id %d", iid));
goetz@6458 1664 }
goetz@6458 1665
goetz@6458 1666 if (member_reg != noreg) {
goetz@6458 1667 // Load the member_arg into register, if necessary.
goetz@6458 1668 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
goetz@6458 1669 VMReg r = regs[member_arg_pos].first();
goetz@6458 1670 if (r->is_stack()) {
goetz@6458 1671 __ ld(member_reg, reg2offset(r), R1_SP);
goetz@6458 1672 } else {
goetz@6458 1673 // no data motion is needed
goetz@6458 1674 member_reg = r->as_Register();
goetz@6458 1675 }
goetz@6458 1676 }
goetz@6458 1677
goetz@6458 1678 if (has_receiver) {
goetz@6458 1679 // Make sure the receiver is loaded into a register.
goetz@6458 1680 assert(method->size_of_parameters() > 0, "oob");
goetz@6458 1681 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
goetz@6458 1682 VMReg r = regs[0].first();
goetz@6458 1683 assert(r->is_valid(), "bad receiver arg");
goetz@6458 1684 if (r->is_stack()) {
goetz@6458 1685 // Porting note: This assumes that compiled calling conventions always
goetz@6458 1686 // pass the receiver oop in a register. If this is not true on some
goetz@6458 1687 // platform, pick a temp and load the receiver from stack.
goetz@6458 1688 fatal("receiver always in a register");
goetz@6458 1689 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point?
goetz@6458 1690 __ ld(receiver_reg, reg2offset(r), R1_SP);
goetz@6458 1691 } else {
goetz@6458 1692 // no data motion is needed
goetz@6458 1693 receiver_reg = r->as_Register();
goetz@6458 1694 }
goetz@6458 1695 }
goetz@6458 1696
goetz@6458 1697 // Figure out which address we are really jumping to:
goetz@6458 1698 MethodHandles::generate_method_handle_dispatch(masm, iid,
goetz@6458 1699 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
goetz@6458 1700 }
goetz@6458 1701
goetz@6458 1702 #endif // COMPILER2
goetz@6458 1703
goetz@6458 1704 // ---------------------------------------------------------------------------
goetz@6458 1705 // Generate a native wrapper for a given method. The method takes arguments
goetz@6458 1706 // in the Java compiled code convention, marshals them to the native
goetz@6458 1707 // convention (handlizes oops, etc), transitions to native, makes the call,
goetz@6458 1708 // returns to java state (possibly blocking), unhandlizes any result and
goetz@6458 1709 // returns.
goetz@6458 1710 //
goetz@6458 1711 // Critical native functions are a shorthand for the use of
goetz@6458 1712 // GetPrimtiveArrayCritical and disallow the use of any other JNI
goetz@6458 1713 // functions. The wrapper is expected to unpack the arguments before
goetz@6458 1714 // passing them to the callee and perform checks before and after the
goetz@6458 1715 // native call to ensure that they GC_locker
goetz@6458 1716 // lock_critical/unlock_critical semantics are followed. Some other
goetz@6458 1717 // parts of JNI setup are skipped like the tear down of the JNI handle
goetz@6458 1718 // block and the check for pending exceptions it's impossible for them
goetz@6458 1719 // to be thrown.
goetz@6458 1720 //
goetz@6458 1721 // They are roughly structured like this:
goetz@6458 1722 // if (GC_locker::needs_gc())
goetz@6458 1723 // SharedRuntime::block_for_jni_critical();
goetz@6458 1724 // tranistion to thread_in_native
goetz@6458 1725 // unpack arrray arguments and call native entry point
goetz@6458 1726 // check for safepoint in progress
goetz@6458 1727 // check if any thread suspend flags are set
goetz@6458 1728 // call into JVM and possible unlock the JNI critical
goetz@6458 1729 // if a GC was suppressed while in the critical native.
goetz@6458 1730 // transition back to thread_in_Java
goetz@6458 1731 // return to caller
goetz@6458 1732 //
goetz@6458 1733 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
goetz@6458 1734 methodHandle method,
goetz@6458 1735 int compile_id,
goetz@6458 1736 BasicType *in_sig_bt,
goetz@6458 1737 VMRegPair *in_regs,
goetz@6458 1738 BasicType ret_type) {
goetz@6458 1739 #ifdef COMPILER2
goetz@6458 1740 if (method->is_method_handle_intrinsic()) {
goetz@6458 1741 vmIntrinsics::ID iid = method->intrinsic_id();
goetz@6458 1742 intptr_t start = (intptr_t)__ pc();
goetz@6458 1743 int vep_offset = ((intptr_t)__ pc()) - start;
goetz@6458 1744 gen_special_dispatch(masm,
goetz@6458 1745 method,
goetz@6458 1746 in_sig_bt,
goetz@6458 1747 in_regs);
goetz@6458 1748 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
goetz@6458 1749 __ flush();
goetz@6458 1750 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
goetz@6458 1751 return nmethod::new_native_nmethod(method,
goetz@6458 1752 compile_id,
goetz@6458 1753 masm->code(),
goetz@6458 1754 vep_offset,
goetz@6458 1755 frame_complete,
goetz@6458 1756 stack_slots / VMRegImpl::slots_per_word,
goetz@6458 1757 in_ByteSize(-1),
goetz@6458 1758 in_ByteSize(-1),
goetz@6458 1759 (OopMapSet*)NULL);
goetz@6458 1760 }
goetz@6458 1761
goetz@6458 1762 bool is_critical_native = true;
goetz@6458 1763 address native_func = method->critical_native_function();
goetz@6458 1764 if (native_func == NULL) {
goetz@6458 1765 native_func = method->native_function();
goetz@6458 1766 is_critical_native = false;
goetz@6458 1767 }
goetz@6458 1768 assert(native_func != NULL, "must have function");
goetz@6458 1769
goetz@6458 1770 // First, create signature for outgoing C call
goetz@6458 1771 // --------------------------------------------------------------------------
goetz@6458 1772
goetz@6458 1773 int total_in_args = method->size_of_parameters();
goetz@6458 1774 // We have received a description of where all the java args are located
goetz@6458 1775 // on entry to the wrapper. We need to convert these args to where
goetz@6458 1776 // the jni function will expect them. To figure out where they go
goetz@6458 1777 // we convert the java signature to a C signature by inserting
goetz@6458 1778 // the hidden arguments as arg[0] and possibly arg[1] (static method)
goetz@6458 1779 //
goetz@6458 1780 // Additionally, on ppc64 we must convert integers to longs in the C
goetz@6458 1781 // signature. We do this in advance in order to have no trouble with
goetz@6458 1782 // indexes into the bt-arrays.
goetz@6458 1783 // So convert the signature and registers now, and adjust the total number
goetz@6458 1784 // of in-arguments accordingly.
goetz@6458 1785 int i2l_argcnt = convert_ints_to_longints_argcnt(total_in_args, in_sig_bt); // PPC64: pass ints as longs.
goetz@6458 1786
goetz@6458 1787 // Calculate the total number of C arguments and create arrays for the
goetz@6458 1788 // signature and the outgoing registers.
goetz@6458 1789 // On ppc64, we have two arrays for the outgoing registers, because
goetz@6458 1790 // some floating-point arguments must be passed in registers _and_
goetz@6458 1791 // in stack locations.
goetz@6458 1792 bool method_is_static = method->is_static();
goetz@6458 1793 int total_c_args = i2l_argcnt;
goetz@6458 1794
goetz@6458 1795 if (!is_critical_native) {
goetz@6458 1796 int n_hidden_args = method_is_static ? 2 : 1;
goetz@6458 1797 total_c_args += n_hidden_args;
goetz@6458 1798 } else {
goetz@6458 1799 // No JNIEnv*, no this*, but unpacked arrays (base+length).
goetz@6458 1800 for (int i = 0; i < total_in_args; i++) {
goetz@6458 1801 if (in_sig_bt[i] == T_ARRAY) {
goetz@6458 1802 total_c_args += 2; // PPC64: T_LONG, T_INT, T_ADDRESS (see convert_ints_to_longints and c_calling_convention)
goetz@6458 1803 }
goetz@6458 1804 }
goetz@6458 1805 }
goetz@6458 1806
goetz@6458 1807 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
goetz@6458 1808 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
goetz@6458 1809 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
goetz@6458 1810 BasicType* in_elem_bt = NULL;
goetz@6458 1811
goetz@6458 1812 // Create the signature for the C call:
goetz@6458 1813 // 1) add the JNIEnv*
goetz@6458 1814 // 2) add the class if the method is static
goetz@6458 1815 // 3) copy the rest of the incoming signature (shifted by the number of
goetz@6458 1816 // hidden arguments).
goetz@6458 1817
goetz@6458 1818 int argc = 0;
goetz@6458 1819 if (!is_critical_native) {
goetz@6458 1820 convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
goetz@6458 1821
goetz@6458 1822 out_sig_bt[argc++] = T_ADDRESS;
goetz@6458 1823 if (method->is_static()) {
goetz@6458 1824 out_sig_bt[argc++] = T_OBJECT;
goetz@6458 1825 }
goetz@6458 1826
goetz@6458 1827 for (int i = 0; i < total_in_args ; i++ ) {
goetz@6458 1828 out_sig_bt[argc++] = in_sig_bt[i];
goetz@6458 1829 }
goetz@6458 1830 } else {
goetz@6458 1831 Thread* THREAD = Thread::current();
goetz@6458 1832 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, i2l_argcnt);
goetz@6458 1833 SignatureStream ss(method->signature());
goetz@6458 1834 int o = 0;
goetz@6458 1835 for (int i = 0; i < total_in_args ; i++, o++) {
goetz@6458 1836 if (in_sig_bt[i] == T_ARRAY) {
goetz@6458 1837 // Arrays are passed as int, elem* pair
goetz@6458 1838 Symbol* atype = ss.as_symbol(CHECK_NULL);
goetz@6458 1839 const char* at = atype->as_C_string();
goetz@6458 1840 if (strlen(at) == 2) {
goetz@6458 1841 assert(at[0] == '[', "must be");
goetz@6458 1842 switch (at[1]) {
goetz@6458 1843 case 'B': in_elem_bt[o] = T_BYTE; break;
goetz@6458 1844 case 'C': in_elem_bt[o] = T_CHAR; break;
goetz@6458 1845 case 'D': in_elem_bt[o] = T_DOUBLE; break;
goetz@6458 1846 case 'F': in_elem_bt[o] = T_FLOAT; break;
goetz@6458 1847 case 'I': in_elem_bt[o] = T_INT; break;
goetz@6458 1848 case 'J': in_elem_bt[o] = T_LONG; break;
goetz@6458 1849 case 'S': in_elem_bt[o] = T_SHORT; break;
goetz@6458 1850 case 'Z': in_elem_bt[o] = T_BOOLEAN; break;
goetz@6458 1851 default: ShouldNotReachHere();
goetz@6458 1852 }
goetz@6458 1853 }
goetz@6458 1854 } else {
goetz@6458 1855 in_elem_bt[o] = T_VOID;
goetz@6458 1856 switch(in_sig_bt[i]) { // PPC64: pass ints as longs.
goetz@6458 1857 case T_BOOLEAN:
goetz@6458 1858 case T_CHAR:
goetz@6458 1859 case T_BYTE:
goetz@6458 1860 case T_SHORT:
goetz@6458 1861 case T_INT: in_elem_bt[++o] = T_VOID; break;
goetz@6458 1862 default: break;
goetz@6458 1863 }
goetz@6458 1864 }
goetz@6458 1865 if (in_sig_bt[i] != T_VOID) {
goetz@6458 1866 assert(in_sig_bt[i] == ss.type(), "must match");
goetz@6458 1867 ss.next();
goetz@6458 1868 }
goetz@6458 1869 }
goetz@6458 1870 assert(i2l_argcnt==o, "must match");
goetz@6458 1871
goetz@6458 1872 convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
goetz@6458 1873
goetz@6458 1874 for (int i = 0; i < total_in_args ; i++ ) {
goetz@6458 1875 if (in_sig_bt[i] == T_ARRAY) {
goetz@6458 1876 // Arrays are passed as int, elem* pair.
goetz@6458 1877 out_sig_bt[argc++] = T_LONG; // PPC64: pass ints as longs.
goetz@6458 1878 out_sig_bt[argc++] = T_INT;
goetz@6458 1879 out_sig_bt[argc++] = T_ADDRESS;
goetz@6458 1880 } else {
goetz@6458 1881 out_sig_bt[argc++] = in_sig_bt[i];
goetz@6458 1882 }
goetz@6458 1883 }
goetz@6458 1884 }
goetz@6458 1885
goetz@6458 1886
goetz@6458 1887 // Compute the wrapper's frame size.
goetz@6458 1888 // --------------------------------------------------------------------------
goetz@6458 1889
goetz@6458 1890 // Now figure out where the args must be stored and how much stack space
goetz@6458 1891 // they require.
goetz@6458 1892 //
goetz@6458 1893 // Compute framesize for the wrapper. We need to handlize all oops in
goetz@6458 1894 // incoming registers.
goetz@6458 1895 //
goetz@6458 1896 // Calculate the total number of stack slots we will need:
goetz@6458 1897 // 1) abi requirements
goetz@6458 1898 // 2) outgoing arguments
goetz@6458 1899 // 3) space for inbound oop handle area
goetz@6458 1900 // 4) space for handlizing a klass if static method
goetz@6458 1901 // 5) space for a lock if synchronized method
goetz@6458 1902 // 6) workspace for saving return values, int <-> float reg moves, etc.
goetz@6458 1903 // 7) alignment
goetz@6458 1904 //
goetz@6458 1905 // Layout of the native wrapper frame:
goetz@6458 1906 // (stack grows upwards, memory grows downwards)
goetz@6458 1907 //
goetz@6511 1908 // NW [ABI_REG_ARGS] <-- 1) R1_SP
goetz@6458 1909 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
goetz@6458 1910 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
goetz@6458 1911 // klass <-- 4) R1_SP + klass_offset
goetz@6458 1912 // lock <-- 5) R1_SP + lock_offset
goetz@6458 1913 // [workspace] <-- 6) R1_SP + workspace_offset
goetz@6458 1914 // [alignment] (optional) <-- 7)
goetz@6458 1915 // caller [JIT_TOP_ABI_48] <-- r_callers_sp
goetz@6458 1916 //
goetz@6458 1917 // - *_slot_offset Indicates offset from SP in number of stack slots.
goetz@6458 1918 // - *_offset Indicates offset from SP in bytes.
goetz@6458 1919
goetz@6458 1920 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) // 1+2)
goetz@6458 1921 + SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention.
goetz@6458 1922
goetz@6458 1923 // Now the space for the inbound oop handle area.
goetz@6458 1924 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
goetz@6458 1925 if (is_critical_native) {
goetz@6458 1926 // Critical natives may have to call out so they need a save area
goetz@6458 1927 // for register arguments.
goetz@6458 1928 int double_slots = 0;
goetz@6458 1929 int single_slots = 0;
goetz@6458 1930 for (int i = 0; i < total_in_args; i++) {
goetz@6458 1931 if (in_regs[i].first()->is_Register()) {
goetz@6458 1932 const Register reg = in_regs[i].first()->as_Register();
goetz@6458 1933 switch (in_sig_bt[i]) {
goetz@6458 1934 case T_BOOLEAN:
goetz@6458 1935 case T_BYTE:
goetz@6458 1936 case T_SHORT:
goetz@6458 1937 case T_CHAR:
goetz@6458 1938 case T_INT: /*single_slots++;*/ break; // PPC64: pass ints as longs.
goetz@6458 1939 case T_ARRAY:
goetz@6458 1940 case T_LONG: double_slots++; break;
goetz@6458 1941 default: ShouldNotReachHere();
goetz@6458 1942 }
goetz@6458 1943 } else if (in_regs[i].first()->is_FloatRegister()) {
goetz@6458 1944 switch (in_sig_bt[i]) {
goetz@6458 1945 case T_FLOAT: single_slots++; break;
goetz@6458 1946 case T_DOUBLE: double_slots++; break;
goetz@6458 1947 default: ShouldNotReachHere();
goetz@6458 1948 }
goetz@6458 1949 }
goetz@6458 1950 }
goetz@6458 1951 total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even
goetz@6458 1952 }
goetz@6458 1953
goetz@6458 1954 int oop_handle_slot_offset = stack_slots;
goetz@6458 1955 stack_slots += total_save_slots; // 3)
goetz@6458 1956
goetz@6458 1957 int klass_slot_offset = 0;
goetz@6458 1958 int klass_offset = -1;
goetz@6458 1959 if (method_is_static && !is_critical_native) { // 4)
goetz@6458 1960 klass_slot_offset = stack_slots;
goetz@6458 1961 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
goetz@6458 1962 stack_slots += VMRegImpl::slots_per_word;
goetz@6458 1963 }
goetz@6458 1964
goetz@6458 1965 int lock_slot_offset = 0;
goetz@6458 1966 int lock_offset = -1;
goetz@6458 1967 if (method->is_synchronized()) { // 5)
goetz@6458 1968 lock_slot_offset = stack_slots;
goetz@6458 1969 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size;
goetz@6458 1970 stack_slots += VMRegImpl::slots_per_word;
goetz@6458 1971 }
goetz@6458 1972
goetz@6458 1973 int workspace_slot_offset = stack_slots; // 6)
goetz@6458 1974 stack_slots += 2;
goetz@6458 1975
goetz@6458 1976 // Now compute actual number of stack words we need.
goetz@6458 1977 // Rounding to make stack properly aligned.
goetz@6458 1978 stack_slots = round_to(stack_slots, // 7)
goetz@6458 1979 frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
goetz@6458 1980 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
goetz@6458 1981
goetz@6458 1982
goetz@6458 1983 // Now we can start generating code.
goetz@6458 1984 // --------------------------------------------------------------------------
goetz@6458 1985
goetz@6458 1986 intptr_t start_pc = (intptr_t)__ pc();
goetz@6458 1987 intptr_t vep_start_pc;
goetz@6458 1988 intptr_t frame_done_pc;
goetz@6458 1989 intptr_t oopmap_pc;
goetz@6458 1990
goetz@6458 1991 Label ic_miss;
goetz@6458 1992 Label handle_pending_exception;
goetz@6458 1993
goetz@6458 1994 Register r_callers_sp = R21;
goetz@6458 1995 Register r_temp_1 = R22;
goetz@6458 1996 Register r_temp_2 = R23;
goetz@6458 1997 Register r_temp_3 = R24;
goetz@6458 1998 Register r_temp_4 = R25;
goetz@6458 1999 Register r_temp_5 = R26;
goetz@6458 2000 Register r_temp_6 = R27;
goetz@6458 2001 Register r_return_pc = R28;
goetz@6458 2002
goetz@6458 2003 Register r_carg1_jnienv = noreg;
goetz@6458 2004 Register r_carg2_classorobject = noreg;
goetz@6458 2005 if (!is_critical_native) {
goetz@6458 2006 r_carg1_jnienv = out_regs[0].first()->as_Register();
goetz@6458 2007 r_carg2_classorobject = out_regs[1].first()->as_Register();
goetz@6458 2008 }
goetz@6458 2009
goetz@6458 2010
goetz@6458 2011 // Generate the Unverified Entry Point (UEP).
goetz@6458 2012 // --------------------------------------------------------------------------
goetz@6458 2013 assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
goetz@6458 2014
goetz@6458 2015 // Check ic: object class == cached class?
goetz@6458 2016 if (!method_is_static) {
goetz@6458 2017 Register ic = as_Register(Matcher::inline_cache_reg_encode());
goetz@6458 2018 Register receiver_klass = r_temp_1;
goetz@6458 2019
goetz@6458 2020 __ cmpdi(CCR0, R3_ARG1, 0);
goetz@6458 2021 __ beq(CCR0, ic_miss);
goetz@6458 2022 __ verify_oop(R3_ARG1);
goetz@6458 2023 __ load_klass(receiver_klass, R3_ARG1);
goetz@6458 2024
goetz@6458 2025 __ cmpd(CCR0, receiver_klass, ic);
goetz@6458 2026 __ bne(CCR0, ic_miss);
goetz@6458 2027 }
goetz@6458 2028
goetz@6458 2029
goetz@6458 2030 // Generate the Verified Entry Point (VEP).
goetz@6458 2031 // --------------------------------------------------------------------------
goetz@6458 2032 vep_start_pc = (intptr_t)__ pc();
goetz@6458 2033
goetz@6458 2034 __ save_LR_CR(r_temp_1);
goetz@6458 2035 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
goetz@6458 2036 __ mr(r_callers_sp, R1_SP); // Remember frame pointer.
goetz@6458 2037 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
goetz@6458 2038 frame_done_pc = (intptr_t)__ pc();
goetz@6458 2039
goetz@6458 2040 // Native nmethod wrappers never take possesion of the oop arguments.
goetz@6458 2041 // So the caller will gc the arguments.
goetz@6458 2042 // The only thing we need an oopMap for is if the call is static.
goetz@6458 2043 //
goetz@6458 2044 // An OopMap for lock (and class if static), and one for the VM call itself.
goetz@6458 2045 OopMapSet *oop_maps = new OopMapSet();
goetz@6458 2046 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
goetz@6458 2047
goetz@6458 2048 if (is_critical_native) {
goetz@6458 2049 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1);
goetz@6458 2050 }
goetz@6458 2051
goetz@6458 2052 // Move arguments from register/stack to register/stack.
goetz@6458 2053 // --------------------------------------------------------------------------
goetz@6458 2054 //
goetz@6458 2055 // We immediately shuffle the arguments so that for any vm call we have
goetz@6458 2056 // to make from here on out (sync slow path, jvmti, etc.) we will have
goetz@6458 2057 // captured the oops from our caller and have a valid oopMap for them.
goetz@6458 2058 //
goetz@6458 2059 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
goetz@6458 2060 // (derived from JavaThread* which is in R16_thread) and, if static,
goetz@6458 2061 // the class mirror instead of a receiver. This pretty much guarantees that
goetz@6458 2062 // register layout will not match. We ignore these extra arguments during
goetz@6458 2063 // the shuffle. The shuffle is described by the two calling convention
goetz@6458 2064 // vectors we have in our possession. We simply walk the java vector to
goetz@6458 2065 // get the source locations and the c vector to get the destinations.
goetz@6458 2066
goetz@6458 2067 // Record sp-based slot for receiver on stack for non-static methods.
goetz@6458 2068 int receiver_offset = -1;
goetz@6458 2069
goetz@6458 2070 // We move the arguments backward because the floating point registers
goetz@6458 2071 // destination will always be to a register with a greater or equal
goetz@6458 2072 // register number or the stack.
goetz@6458 2073 // in is the index of the incoming Java arguments
goetz@6458 2074 // out is the index of the outgoing C arguments
goetz@6458 2075
goetz@6458 2076 #ifdef ASSERT
goetz@6458 2077 bool reg_destroyed[RegisterImpl::number_of_registers];
goetz@6458 2078 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
goetz@6458 2079 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) {
goetz@6458 2080 reg_destroyed[r] = false;
goetz@6458 2081 }
goetz@6458 2082 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) {
goetz@6458 2083 freg_destroyed[f] = false;
goetz@6458 2084 }
goetz@6458 2085 #endif // ASSERT
goetz@6458 2086
goetz@6458 2087 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) {
goetz@6458 2088
goetz@6458 2089 #ifdef ASSERT
goetz@6458 2090 if (in_regs[in].first()->is_Register()) {
goetz@6458 2091 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!");
goetz@6458 2092 } else if (in_regs[in].first()->is_FloatRegister()) {
goetz@6458 2093 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!");
goetz@6458 2094 }
goetz@6458 2095 if (out_regs[out].first()->is_Register()) {
goetz@6458 2096 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true;
goetz@6458 2097 } else if (out_regs[out].first()->is_FloatRegister()) {
goetz@6458 2098 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true;
goetz@6458 2099 }
goetz@6458 2100 if (out_regs2[out].first()->is_Register()) {
goetz@6458 2101 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true;
goetz@6458 2102 } else if (out_regs2[out].first()->is_FloatRegister()) {
goetz@6458 2103 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true;
goetz@6458 2104 }
goetz@6458 2105 #endif // ASSERT
goetz@6458 2106
goetz@6458 2107 switch (in_sig_bt[in]) {
goetz@6458 2108 case T_BOOLEAN:
goetz@6458 2109 case T_CHAR:
goetz@6458 2110 case T_BYTE:
goetz@6458 2111 case T_SHORT:
goetz@6458 2112 case T_INT:
goetz@6458 2113 guarantee(in > 0 && in_sig_bt[in-1] == T_LONG,
goetz@6458 2114 "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
goetz@6458 2115 break;
goetz@6458 2116 case T_LONG:
goetz@6458 2117 if (in_sig_bt[in+1] == T_VOID) {
goetz@6458 2118 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
goetz@6458 2119 } else {
goetz@6458 2120 guarantee(in_sig_bt[in+1] == T_BOOLEAN || in_sig_bt[in+1] == T_CHAR ||
goetz@6458 2121 in_sig_bt[in+1] == T_BYTE || in_sig_bt[in+1] == T_SHORT ||
goetz@6458 2122 in_sig_bt[in+1] == T_INT,
goetz@6458 2123 "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
goetz@6458 2124 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
goetz@6458 2125 }
goetz@6458 2126 break;
goetz@6458 2127 case T_ARRAY:
goetz@6458 2128 if (is_critical_native) {
goetz@6458 2129 int body_arg = out;
goetz@6458 2130 out -= 2; // Point to length arg. PPC64: pass ints as longs.
goetz@6458 2131 unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out],
goetz@6458 2132 r_callers_sp, r_temp_1, r_temp_2);
goetz@6458 2133 break;
goetz@6458 2134 }
goetz@6458 2135 case T_OBJECT:
goetz@6458 2136 assert(!is_critical_native, "no oop arguments");
goetz@6458 2137 object_move(masm, stack_slots,
goetz@6458 2138 oop_map, oop_handle_slot_offset,
goetz@6458 2139 ((in == 0) && (!method_is_static)), &receiver_offset,
goetz@6458 2140 in_regs[in], out_regs[out],
goetz@6458 2141 r_callers_sp, r_temp_1, r_temp_2);
goetz@6458 2142 break;
goetz@6458 2143 case T_VOID:
goetz@6458 2144 break;
goetz@6458 2145 case T_FLOAT:
goetz@6458 2146 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
goetz@6458 2147 if (out_regs2[out].first()->is_valid()) {
goetz@6458 2148 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
goetz@6458 2149 }
goetz@6458 2150 break;
goetz@6458 2151 case T_DOUBLE:
goetz@6458 2152 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
goetz@6458 2153 if (out_regs2[out].first()->is_valid()) {
goetz@6458 2154 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
goetz@6458 2155 }
goetz@6458 2156 break;
goetz@6458 2157 case T_ADDRESS:
goetz@6458 2158 fatal("found type (T_ADDRESS) in java args");
goetz@6458 2159 break;
goetz@6458 2160 default:
goetz@6458 2161 ShouldNotReachHere();
goetz@6458 2162 break;
goetz@6458 2163 }
goetz@6458 2164 }
goetz@6458 2165
goetz@6458 2166 // Pre-load a static method's oop into ARG2.
goetz@6458 2167 // Used both by locking code and the normal JNI call code.
goetz@6458 2168 if (method_is_static && !is_critical_native) {
goetz@6458 2169 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
goetz@6458 2170 r_carg2_classorobject);
goetz@6458 2171
goetz@6458 2172 // Now handlize the static class mirror in carg2. It's known not-null.
goetz@6458 2173 __ std(r_carg2_classorobject, klass_offset, R1_SP);
goetz@6458 2174 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
goetz@6458 2175 __ addi(r_carg2_classorobject, R1_SP, klass_offset);
goetz@6458 2176 }
goetz@6458 2177
goetz@6458 2178 // Get JNIEnv* which is first argument to native.
goetz@6458 2179 if (!is_critical_native) {
goetz@6458 2180 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
goetz@6458 2181 }
goetz@6458 2182
goetz@6458 2183 // NOTE:
goetz@6458 2184 //
goetz@6458 2185 // We have all of the arguments setup at this point.
goetz@6458 2186 // We MUST NOT touch any outgoing regs from this point on.
goetz@6458 2187 // So if we must call out we must push a new frame.
goetz@6458 2188
goetz@6458 2189 // Get current pc for oopmap, and load it patchable relative to global toc.
goetz@6458 2190 oopmap_pc = (intptr_t) __ pc();
goetz@6458 2191 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true);
goetz@6458 2192
goetz@6458 2193 // We use the same pc/oopMap repeatedly when we call out.
goetz@6458 2194 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
goetz@6458 2195
goetz@6458 2196 // r_return_pc now has the pc loaded that we will use when we finally call
goetz@6458 2197 // to native.
goetz@6458 2198
goetz@6458 2199 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
goetz@6458 2200 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
goetz@6458 2201
goetz@6458 2202
goetz@6458 2203 # if 0
goetz@6458 2204 // DTrace method entry
goetz@6458 2205 # endif
goetz@6458 2206
goetz@6458 2207 // Lock a synchronized method.
goetz@6458 2208 // --------------------------------------------------------------------------
goetz@6458 2209
goetz@6458 2210 if (method->is_synchronized()) {
goetz@6458 2211 assert(!is_critical_native, "unhandled");
goetz@6458 2212 ConditionRegister r_flag = CCR1;
goetz@6458 2213 Register r_oop = r_temp_4;
goetz@6458 2214 const Register r_box = r_temp_5;
goetz@6458 2215 Label done, locked;
goetz@6458 2216
goetz@6458 2217 // Load the oop for the object or class. r_carg2_classorobject contains
goetz@6458 2218 // either the handlized oop from the incoming arguments or the handlized
goetz@6458 2219 // class mirror (if the method is static).
goetz@6458 2220 __ ld(r_oop, 0, r_carg2_classorobject);
goetz@6458 2221
goetz@6458 2222 // Get the lock box slot's address.
goetz@6458 2223 __ addi(r_box, R1_SP, lock_offset);
goetz@6458 2224
goetz@6458 2225 # ifdef ASSERT
goetz@6458 2226 if (UseBiasedLocking) {
goetz@6458 2227 // Making the box point to itself will make it clear it went unused
goetz@6458 2228 // but also be obviously invalid.
goetz@6458 2229 __ std(r_box, 0, r_box);
goetz@6458 2230 }
goetz@6458 2231 # endif // ASSERT
goetz@6458 2232
goetz@6458 2233 // Try fastpath for locking.
goetz@6458 2234 // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
goetz@6458 2235 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
goetz@6458 2236 __ beq(r_flag, locked);
goetz@6458 2237
goetz@6458 2238 // None of the above fast optimizations worked so we have to get into the
goetz@6458 2239 // slow case of monitor enter. Inline a special case of call_VM that
goetz@6458 2240 // disallows any pending_exception.
goetz@6458 2241
goetz@6511 2242 // Save argument registers and leave room for C-compatible ABI_REG_ARGS.
goetz@6511 2243 int frame_size = frame::abi_reg_args_size +
goetz@6458 2244 round_to(total_c_args * wordSize, frame::alignment_in_bytes);
goetz@6458 2245 __ mr(R11_scratch1, R1_SP);
goetz@6458 2246 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
goetz@6458 2247
goetz@6458 2248 // Do the call.
goetz@6458 2249 __ set_last_Java_frame(R11_scratch1, r_return_pc);
goetz@6458 2250 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register");
goetz@6458 2251 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
goetz@6458 2252 __ reset_last_Java_frame();
goetz@6458 2253
goetz@6458 2254 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2);
goetz@6458 2255
goetz@6458 2256 __ asm_assert_mem8_is_zero(thread_(pending_exception),
goetz@6458 2257 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0);
goetz@6458 2258
goetz@6458 2259 __ bind(locked);
goetz@6458 2260 }
goetz@6458 2261
goetz@6458 2262
goetz@6458 2263 // Publish thread state
goetz@6458 2264 // --------------------------------------------------------------------------
goetz@6458 2265
goetz@6458 2266 // Use that pc we placed in r_return_pc a while back as the current frame anchor.
goetz@6458 2267 __ set_last_Java_frame(R1_SP, r_return_pc);
goetz@6458 2268
goetz@6458 2269 // Transition from _thread_in_Java to _thread_in_native.
goetz@6458 2270 __ li(R0, _thread_in_native);
goetz@6458 2271 __ release();
goetz@6458 2272 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
goetz@6458 2273 __ stw(R0, thread_(thread_state));
goetz@6458 2274 if (UseMembar) {
goetz@6458 2275 __ fence();
goetz@6458 2276 }
goetz@6458 2277
goetz@6458 2278
goetz@6458 2279 // The JNI call
goetz@6458 2280 // --------------------------------------------------------------------------
goetz@6511 2281 #if defined(ABI_ELFv2)
goetz@6511 2282 __ call_c(native_func, relocInfo::runtime_call_type);
goetz@6511 2283 #else
goetz@6458 2284 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
goetz@6458 2285 __ call_c(fd_native_method, relocInfo::runtime_call_type);
goetz@6511 2286 #endif
goetz@6458 2287
goetz@6458 2288
goetz@6458 2289 // Now, we are back from the native code.
goetz@6458 2290
goetz@6458 2291
goetz@6458 2292 // Unpack the native result.
goetz@6458 2293 // --------------------------------------------------------------------------
goetz@6458 2294
goetz@6458 2295 // For int-types, we do any needed sign-extension required.
goetz@6458 2296 // Care must be taken that the return values (R3_RET and F1_RET)
goetz@6458 2297 // will survive any VM calls for blocking or unlocking.
goetz@6458 2298 // An OOP result (handle) is done specially in the slow-path code.
goetz@6458 2299
goetz@6458 2300 switch (ret_type) {
goetz@6458 2301 case T_VOID: break; // Nothing to do!
goetz@6458 2302 case T_FLOAT: break; // Got it where we want it (unless slow-path).
goetz@6458 2303 case T_DOUBLE: break; // Got it where we want it (unless slow-path).
goetz@6458 2304 case T_LONG: break; // Got it where we want it (unless slow-path).
goetz@6458 2305 case T_OBJECT: break; // Really a handle.
goetz@6458 2306 // Cannot de-handlize until after reclaiming jvm_lock.
goetz@6458 2307 case T_ARRAY: break;
goetz@6458 2308
goetz@6458 2309 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1)
goetz@6458 2310 Label skip_modify;
goetz@6458 2311 __ cmpwi(CCR0, R3_RET, 0);
goetz@6458 2312 __ beq(CCR0, skip_modify);
goetz@6458 2313 __ li(R3_RET, 1);
goetz@6458 2314 __ bind(skip_modify);
goetz@6458 2315 break;
goetz@6458 2316 }
goetz@6458 2317 case T_BYTE: { // sign extension
goetz@6458 2318 __ extsb(R3_RET, R3_RET);
goetz@6458 2319 break;
goetz@6458 2320 }
goetz@6458 2321 case T_CHAR: { // unsigned result
goetz@6458 2322 __ andi(R3_RET, R3_RET, 0xffff);
goetz@6458 2323 break;
goetz@6458 2324 }
goetz@6458 2325 case T_SHORT: { // sign extension
goetz@6458 2326 __ extsh(R3_RET, R3_RET);
goetz@6458 2327 break;
goetz@6458 2328 }
goetz@6458 2329 case T_INT: // nothing to do
goetz@6458 2330 break;
goetz@6458 2331 default:
goetz@6458 2332 ShouldNotReachHere();
goetz@6458 2333 break;
goetz@6458 2334 }
goetz@6458 2335
goetz@6458 2336
goetz@6458 2337 // Publish thread state
goetz@6458 2338 // --------------------------------------------------------------------------
goetz@6458 2339
goetz@6458 2340 // Switch thread to "native transition" state before reading the
goetz@6458 2341 // synchronization state. This additional state is necessary because reading
goetz@6458 2342 // and testing the synchronization state is not atomic w.r.t. GC, as this
goetz@6458 2343 // scenario demonstrates:
goetz@6458 2344 // - Java thread A, in _thread_in_native state, loads _not_synchronized
goetz@6458 2345 // and is preempted.
goetz@6458 2346 // - VM thread changes sync state to synchronizing and suspends threads
goetz@6458 2347 // for GC.
goetz@6458 2348 // - Thread A is resumed to finish this native method, but doesn't block
goetz@6458 2349 // here since it didn't see any synchronization in progress, and escapes.
goetz@6458 2350
goetz@6458 2351 // Transition from _thread_in_native to _thread_in_native_trans.
goetz@6458 2352 __ li(R0, _thread_in_native_trans);
goetz@6458 2353 __ release();
goetz@6458 2354 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
goetz@6458 2355 __ stw(R0, thread_(thread_state));
goetz@6458 2356
goetz@6458 2357
goetz@6458 2358 // Must we block?
goetz@6458 2359 // --------------------------------------------------------------------------
goetz@6458 2360
goetz@6458 2361 // Block, if necessary, before resuming in _thread_in_Java state.
goetz@6458 2362 // In order for GC to work, don't clear the last_Java_sp until after blocking.
goetz@6458 2363 Label after_transition;
goetz@6458 2364 {
goetz@6458 2365 Label no_block, sync;
goetz@6458 2366
goetz@6458 2367 if (os::is_MP()) {
goetz@6458 2368 if (UseMembar) {
goetz@6458 2369 // Force this write out before the read below.
goetz@6458 2370 __ fence();
goetz@6458 2371 } else {
goetz@6458 2372 // Write serialization page so VM thread can do a pseudo remote membar.
goetz@6458 2373 // We use the current thread pointer to calculate a thread specific
goetz@6458 2374 // offset to write to within the page. This minimizes bus traffic
goetz@6458 2375 // due to cache line collision.
goetz@6458 2376 __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
goetz@6458 2377 }
goetz@6458 2378 }
goetz@6458 2379
goetz@6458 2380 Register sync_state_addr = r_temp_4;
goetz@6458 2381 Register sync_state = r_temp_5;
goetz@6458 2382 Register suspend_flags = r_temp_6;
goetz@6458 2383
goetz@6458 2384 __ load_const(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/ sync_state);
goetz@6458 2385
goetz@6458 2386 // TODO: PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
goetz@6458 2387 __ lwz(sync_state, 0, sync_state_addr);
goetz@6458 2388
goetz@6458 2389 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
goetz@6458 2390 __ lwz(suspend_flags, thread_(suspend_flags));
goetz@6458 2391
goetz@6458 2392 __ acquire();
goetz@6458 2393
goetz@6458 2394 Label do_safepoint;
goetz@6458 2395 // No synchronization in progress nor yet synchronized.
goetz@6458 2396 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
goetz@6458 2397 // Not suspended.
goetz@6458 2398 __ cmpwi(CCR1, suspend_flags, 0);
goetz@6458 2399
goetz@6458 2400 __ bne(CCR0, sync);
goetz@6458 2401 __ beq(CCR1, no_block);
goetz@6458 2402
goetz@6458 2403 // Block. Save any potential method result value before the operation and
goetz@6458 2404 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
goetz@6458 2405 // lets us share the oopMap we used when we went native rather than create
goetz@6458 2406 // a distinct one for this pc.
goetz@6458 2407 __ bind(sync);
goetz@6458 2408
goetz@6458 2409 address entry_point = is_critical_native
goetz@6458 2410 ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
goetz@6458 2411 : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
goetz@6458 2412 save_native_result(masm, ret_type, workspace_slot_offset);
goetz@6458 2413 __ call_VM_leaf(entry_point, R16_thread);
goetz@6458 2414 restore_native_result(masm, ret_type, workspace_slot_offset);
goetz@6458 2415
goetz@6458 2416 if (is_critical_native) {
goetz@6458 2417 __ b(after_transition); // No thread state transition here.
goetz@6458 2418 }
goetz@6458 2419 __ bind(no_block);
goetz@6458 2420 }
goetz@6458 2421
goetz@6458 2422 // Publish thread state.
goetz@6458 2423 // --------------------------------------------------------------------------
goetz@6458 2424
goetz@6458 2425 // Thread state is thread_in_native_trans. Any safepoint blocking has
goetz@6458 2426 // already happened so we can now change state to _thread_in_Java.
goetz@6458 2427
goetz@6458 2428 // Transition from _thread_in_native_trans to _thread_in_Java.
goetz@6458 2429 __ li(R0, _thread_in_Java);
goetz@6458 2430 __ release();
goetz@6458 2431 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
goetz@6458 2432 __ stw(R0, thread_(thread_state));
goetz@6458 2433 if (UseMembar) {
goetz@6458 2434 __ fence();
goetz@6458 2435 }
goetz@6458 2436 __ bind(after_transition);
goetz@6458 2437
goetz@6458 2438 // Reguard any pages if necessary.
goetz@6458 2439 // --------------------------------------------------------------------------
goetz@6458 2440
goetz@6458 2441 Label no_reguard;
goetz@6458 2442 __ lwz(r_temp_1, thread_(stack_guard_state));
goetz@6458 2443 __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_disabled);
goetz@6458 2444 __ bne(CCR0, no_reguard);
goetz@6458 2445
goetz@6458 2446 save_native_result(masm, ret_type, workspace_slot_offset);
goetz@6458 2447 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
goetz@6458 2448 restore_native_result(masm, ret_type, workspace_slot_offset);
goetz@6458 2449
goetz@6458 2450 __ bind(no_reguard);
goetz@6458 2451
goetz@6458 2452
goetz@6458 2453 // Unlock
goetz@6458 2454 // --------------------------------------------------------------------------
goetz@6458 2455
goetz@6458 2456 if (method->is_synchronized()) {
goetz@6458 2457
goetz@6458 2458 ConditionRegister r_flag = CCR1;
goetz@6458 2459 const Register r_oop = r_temp_4;
goetz@6458 2460 const Register r_box = r_temp_5;
goetz@6458 2461 const Register r_exception = r_temp_6;
goetz@6458 2462 Label done;
goetz@6458 2463
goetz@6458 2464 // Get oop and address of lock object box.
goetz@6458 2465 if (method_is_static) {
goetz@6458 2466 assert(klass_offset != -1, "");
goetz@6458 2467 __ ld(r_oop, klass_offset, R1_SP);
goetz@6458 2468 } else {
goetz@6458 2469 assert(receiver_offset != -1, "");
goetz@6458 2470 __ ld(r_oop, receiver_offset, R1_SP);
goetz@6458 2471 }
goetz@6458 2472 __ addi(r_box, R1_SP, lock_offset);
goetz@6458 2473
goetz@6458 2474 // Try fastpath for unlocking.
goetz@6458 2475 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
goetz@6458 2476 __ beq(r_flag, done);
goetz@6458 2477
goetz@6458 2478 // Save and restore any potential method result value around the unlocking operation.
goetz@6458 2479 save_native_result(masm, ret_type, workspace_slot_offset);
goetz@6458 2480
goetz@6458 2481 // Must save pending exception around the slow-path VM call. Since it's a
goetz@6458 2482 // leaf call, the pending exception (if any) can be kept in a register.
goetz@6458 2483 __ ld(r_exception, thread_(pending_exception));
goetz@6458 2484 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile");
goetz@6458 2485 __ li(R0, 0);
goetz@6458 2486 __ std(R0, thread_(pending_exception));
goetz@6458 2487
goetz@6458 2488 // Slow case of monitor enter.
goetz@6458 2489 // Inline a special case of call_VM that disallows any pending_exception.
goetz@6458 2490 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box);
goetz@6458 2491
goetz@6458 2492 __ asm_assert_mem8_is_zero(thread_(pending_exception),
goetz@6458 2493 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0);
goetz@6458 2494
goetz@6458 2495 restore_native_result(masm, ret_type, workspace_slot_offset);
goetz@6458 2496
goetz@6458 2497 // Check_forward_pending_exception jump to forward_exception if any pending
goetz@6458 2498 // exception is set. The forward_exception routine expects to see the
goetz@6458 2499 // exception in pending_exception and not in a register. Kind of clumsy,
goetz@6458 2500 // since all folks who branch to forward_exception must have tested
goetz@6458 2501 // pending_exception first and hence have it in a register already.
goetz@6458 2502 __ std(r_exception, thread_(pending_exception));
goetz@6458 2503
goetz@6458 2504 __ bind(done);
goetz@6458 2505 }
goetz@6458 2506
goetz@6458 2507 # if 0
goetz@6458 2508 // DTrace method exit
goetz@6458 2509 # endif
goetz@6458 2510
goetz@6458 2511 // Clear "last Java frame" SP and PC.
goetz@6458 2512 // --------------------------------------------------------------------------
goetz@6458 2513
goetz@6458 2514 __ reset_last_Java_frame();
goetz@6458 2515
goetz@6458 2516 // Unpack oop result.
goetz@6458 2517 // --------------------------------------------------------------------------
goetz@6458 2518
goetz@6458 2519 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
goetz@6458 2520 Label skip_unboxing;
goetz@6458 2521 __ cmpdi(CCR0, R3_RET, 0);
goetz@6458 2522 __ beq(CCR0, skip_unboxing);
goetz@6458 2523 __ ld(R3_RET, 0, R3_RET);
goetz@6458 2524 __ bind(skip_unboxing);
goetz@6458 2525 __ verify_oop(R3_RET);
goetz@6458 2526 }
goetz@6458 2527
goetz@6458 2528
goetz@6458 2529 // Reset handle block.
goetz@6458 2530 // --------------------------------------------------------------------------
goetz@6458 2531 if (!is_critical_native) {
goetz@6458 2532 __ ld(r_temp_1, thread_(active_handles));
goetz@6458 2533 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
goetz@6458 2534 __ li(r_temp_2, 0);
goetz@6458 2535 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1);
goetz@6458 2536
goetz@6458 2537
goetz@6458 2538 // Check for pending exceptions.
goetz@6458 2539 // --------------------------------------------------------------------------
goetz@6458 2540 __ ld(r_temp_2, thread_(pending_exception));
goetz@6458 2541 __ cmpdi(CCR0, r_temp_2, 0);
goetz@6458 2542 __ bne(CCR0, handle_pending_exception);
goetz@6458 2543 }
goetz@6458 2544
goetz@6458 2545 // Return
goetz@6458 2546 // --------------------------------------------------------------------------
goetz@6458 2547
goetz@6458 2548 __ pop_frame();
goetz@6458 2549 __ restore_LR_CR(R11);
goetz@6458 2550 __ blr();
goetz@6458 2551
goetz@6458 2552
goetz@6458 2553 // Handler for pending exceptions (out-of-line).
goetz@6458 2554 // --------------------------------------------------------------------------
goetz@6458 2555
goetz@6458 2556 // Since this is a native call, we know the proper exception handler
goetz@6458 2557 // is the empty function. We just pop this frame and then jump to
goetz@6458 2558 // forward_exception_entry.
goetz@6458 2559 if (!is_critical_native) {
goetz@6458 2560 __ align(InteriorEntryAlignment);
goetz@6458 2561 __ bind(handle_pending_exception);
goetz@6458 2562
goetz@6458 2563 __ pop_frame();
goetz@6458 2564 __ restore_LR_CR(R11);
goetz@6458 2565 __ b64_patchable((address)StubRoutines::forward_exception_entry(),
goetz@6458 2566 relocInfo::runtime_call_type);
goetz@6458 2567 }
goetz@6458 2568
goetz@6458 2569 // Handler for a cache miss (out-of-line).
goetz@6458 2570 // --------------------------------------------------------------------------
goetz@6458 2571
goetz@6458 2572 if (!method_is_static) {
goetz@6458 2573 __ align(InteriorEntryAlignment);
goetz@6458 2574 __ bind(ic_miss);
goetz@6458 2575
goetz@6458 2576 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
goetz@6458 2577 relocInfo::runtime_call_type);
goetz@6458 2578 }
goetz@6458 2579
goetz@6458 2580 // Done.
goetz@6458 2581 // --------------------------------------------------------------------------
goetz@6458 2582
goetz@6458 2583 __ flush();
goetz@6458 2584
goetz@6458 2585 nmethod *nm = nmethod::new_native_nmethod(method,
goetz@6458 2586 compile_id,
goetz@6458 2587 masm->code(),
goetz@6458 2588 vep_start_pc-start_pc,
goetz@6458 2589 frame_done_pc-start_pc,
goetz@6458 2590 stack_slots / VMRegImpl::slots_per_word,
goetz@6458 2591 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
goetz@6458 2592 in_ByteSize(lock_offset),
goetz@6458 2593 oop_maps);
goetz@6458 2594
goetz@6458 2595 if (is_critical_native) {
goetz@6458 2596 nm->set_lazy_critical_native(true);
goetz@6458 2597 }
goetz@6458 2598
goetz@6458 2599 return nm;
goetz@6458 2600 #else
goetz@6458 2601 ShouldNotReachHere();
goetz@6458 2602 return NULL;
goetz@6458 2603 #endif // COMPILER2
goetz@6458 2604 }
goetz@6458 2605
goetz@6458 2606 // This function returns the adjust size (in number of words) to a c2i adapter
goetz@6458 2607 // activation for use during deoptimization.
goetz@6458 2608 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
goetz@6458 2609 return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
goetz@6458 2610 }
goetz@6458 2611
goetz@6458 2612 uint SharedRuntime::out_preserve_stack_slots() {
goetz@6458 2613 #ifdef COMPILER2
goetz@6458 2614 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
goetz@6458 2615 #else
goetz@6458 2616 return 0;
goetz@6458 2617 #endif
goetz@6458 2618 }
goetz@6458 2619
goetz@6458 2620 #ifdef COMPILER2
goetz@6458 2621 // Frame generation for deopt and uncommon trap blobs.
goetz@6458 2622 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
goetz@6458 2623 /* Read */
goetz@6458 2624 Register unroll_block_reg,
goetz@6458 2625 /* Update */
goetz@6458 2626 Register frame_sizes_reg,
goetz@6458 2627 Register number_of_frames_reg,
goetz@6458 2628 Register pcs_reg,
goetz@6458 2629 /* Invalidate */
goetz@6458 2630 Register frame_size_reg,
goetz@6458 2631 Register pc_reg) {
goetz@6458 2632
goetz@6458 2633 __ ld(pc_reg, 0, pcs_reg);
goetz@6458 2634 __ ld(frame_size_reg, 0, frame_sizes_reg);
goetz@6458 2635 __ std(pc_reg, _abi(lr), R1_SP);
goetz@6458 2636 __ push_frame(frame_size_reg, R0/*tmp*/);
goetz@6495 2637 #ifdef CC_INTERP
goetz@6458 2638 __ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
goetz@6495 2639 #else
goetz@6512 2640 #ifdef ASSERT
goetz@6512 2641 __ load_const_optimized(pc_reg, 0x5afe);
goetz@6512 2642 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
goetz@6495 2643 #endif
goetz@6512 2644 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
goetz@6512 2645 #endif // CC_INTERP
goetz@6458 2646 __ addi(number_of_frames_reg, number_of_frames_reg, -1);
goetz@6458 2647 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
goetz@6458 2648 __ addi(pcs_reg, pcs_reg, wordSize);
goetz@6458 2649 }
goetz@6458 2650
goetz@6458 2651 // Loop through the UnrollBlock info and create new frames.
goetz@6458 2652 static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
goetz@6458 2653 /* read */
goetz@6458 2654 Register unroll_block_reg,
goetz@6458 2655 /* invalidate */
goetz@6458 2656 Register frame_sizes_reg,
goetz@6458 2657 Register number_of_frames_reg,
goetz@6458 2658 Register pcs_reg,
goetz@6458 2659 Register frame_size_reg,
goetz@6458 2660 Register pc_reg) {
goetz@6458 2661 Label loop;
goetz@6458 2662
goetz@6458 2663 // _number_of_frames is of type int (deoptimization.hpp)
goetz@6458 2664 __ lwa(number_of_frames_reg,
goetz@6458 2665 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(),
goetz@6458 2666 unroll_block_reg);
goetz@6458 2667 __ ld(pcs_reg,
goetz@6458 2668 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(),
goetz@6458 2669 unroll_block_reg);
goetz@6458 2670 __ ld(frame_sizes_reg,
goetz@6458 2671 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(),
goetz@6458 2672 unroll_block_reg);
goetz@6458 2673
goetz@6458 2674 // stack: (caller_of_deoptee, ...).
goetz@6458 2675
goetz@6458 2676 // At this point we either have an interpreter frame or a compiled
goetz@6458 2677 // frame on top of stack. If it is a compiled frame we push a new c2i
goetz@6458 2678 // adapter here
goetz@6458 2679
goetz@6458 2680 // Memorize top-frame stack-pointer.
goetz@6458 2681 __ mr(frame_size_reg/*old_sp*/, R1_SP);
goetz@6458 2682
goetz@6458 2683 // Resize interpreter top frame OR C2I adapter.
goetz@6458 2684
goetz@6458 2685 // At this moment, the top frame (which is the caller of the deoptee) is
goetz@6458 2686 // an interpreter frame or a newly pushed C2I adapter or an entry frame.
goetz@6458 2687 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the
goetz@6458 2688 // outgoing arguments.
goetz@6458 2689 //
goetz@6458 2690 // In order to push the interpreter frame for the deoptee, we need to
goetz@6458 2691 // resize the top frame such that we are able to place the deoptee's
goetz@6458 2692 // locals in the frame.
goetz@6458 2693 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI
goetz@6458 2694 // into a valid PARENT_IJAVA_FRAME_ABI.
goetz@6458 2695
goetz@6458 2696 __ lwa(R11_scratch1,
goetz@6458 2697 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(),
goetz@6458 2698 unroll_block_reg);
goetz@6458 2699 __ neg(R11_scratch1, R11_scratch1);
goetz@6458 2700
goetz@6458 2701 // R11_scratch1 contains size of locals for frame resizing.
goetz@6458 2702 // R12_scratch2 contains top frame's lr.
goetz@6458 2703
goetz@6458 2704 // Resize frame by complete frame size prevents TOC from being
goetz@6458 2705 // overwritten by locals. A more stack space saving way would be
goetz@6458 2706 // to copy the TOC to its location in the new abi.
goetz@6458 2707 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size);
goetz@6458 2708
goetz@6458 2709 // now, resize the frame
goetz@6458 2710 __ resize_frame(R11_scratch1, pc_reg/*tmp*/);
goetz@6458 2711
goetz@6458 2712 // In the case where we have resized a c2i frame above, the optional
goetz@6458 2713 // alignment below the locals has size 32 (why?).
goetz@6458 2714 __ std(R12_scratch2, _abi(lr), R1_SP);
goetz@6458 2715
goetz@6458 2716 // Initialize initial_caller_sp.
goetz@6512 2717 #ifdef CC_INTERP
goetz@6458 2718 __ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
goetz@6512 2719 #else
goetz@6512 2720 #ifdef ASSERT
goetz@6512 2721 __ load_const_optimized(pc_reg, 0x5afe);
goetz@6512 2722 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
goetz@6512 2723 #endif
goetz@6512 2724 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
goetz@6512 2725 #endif // CC_INTERP
goetz@6458 2726
goetz@6458 2727 #ifdef ASSERT
goetz@6458 2728 // Make sure that there is at least one entry in the array.
goetz@6458 2729 __ cmpdi(CCR0, number_of_frames_reg, 0);
goetz@6458 2730 __ asm_assert_ne("array_size must be > 0", 0x205);
goetz@6458 2731 #endif
goetz@6458 2732
goetz@6458 2733 // Now push the new interpreter frames.
goetz@6458 2734 //
goetz@6458 2735 __ bind(loop);
goetz@6458 2736 // Allocate a new frame, fill in the pc.
goetz@6458 2737 push_skeleton_frame(masm, deopt,
goetz@6458 2738 unroll_block_reg,
goetz@6458 2739 frame_sizes_reg,
goetz@6458 2740 number_of_frames_reg,
goetz@6458 2741 pcs_reg,
goetz@6458 2742 frame_size_reg,
goetz@6458 2743 pc_reg);
goetz@6458 2744 __ cmpdi(CCR0, number_of_frames_reg, 0);
goetz@6458 2745 __ bne(CCR0, loop);
goetz@6458 2746
goetz@6458 2747 // Get the return address pointing into the frame manager.
goetz@6458 2748 __ ld(R0, 0, pcs_reg);
goetz@6458 2749 // Store it in the top interpreter frame.
goetz@6458 2750 __ std(R0, _abi(lr), R1_SP);
goetz@6458 2751 // Initialize frame_manager_lr of interpreter top frame.
goetz@6495 2752 #ifdef CC_INTERP
goetz@6458 2753 __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
goetz@6495 2754 #endif
goetz@6458 2755 }
goetz@6458 2756 #endif
goetz@6458 2757
goetz@6458 2758 void SharedRuntime::generate_deopt_blob() {
goetz@6458 2759 // Allocate space for the code
goetz@6458 2760 ResourceMark rm;
goetz@6458 2761 // Setup code generation tools
goetz@6458 2762 CodeBuffer buffer("deopt_blob", 2048, 1024);
goetz@6458 2763 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
goetz@6458 2764 Label exec_mode_initialized;
goetz@6458 2765 int frame_size_in_words;
goetz@6458 2766 OopMap* map = NULL;
goetz@6458 2767 OopMapSet *oop_maps = new OopMapSet();
goetz@6458 2768
goetz@6458 2769 // size of ABI112 plus spill slots for R3_RET and F1_RET.
goetz@6511 2770 const int frame_size_in_bytes = frame::abi_reg_args_spill_size;
goetz@6458 2771 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
goetz@6458 2772 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
goetz@6458 2773
goetz@6458 2774 const Register exec_mode_reg = R21_tmp1;
goetz@6458 2775
goetz@6458 2776 const address start = __ pc();
goetz@6458 2777
goetz@6458 2778 #ifdef COMPILER2
goetz@6458 2779 // --------------------------------------------------------------------------
goetz@6458 2780 // Prolog for non exception case!
goetz@6458 2781
goetz@6458 2782 // We have been called from the deopt handler of the deoptee.
goetz@6458 2783 //
goetz@6458 2784 // deoptee:
goetz@6458 2785 // ...
goetz@6458 2786 // call X
goetz@6458 2787 // ...
goetz@6458 2788 // deopt_handler: call_deopt_stub
goetz@6458 2789 // cur. return pc --> ...
goetz@6458 2790 //
goetz@6458 2791 // So currently SR_LR points behind the call in the deopt handler.
goetz@6458 2792 // We adjust it such that it points to the start of the deopt handler.
goetz@6458 2793 // The return_pc has been stored in the frame of the deoptee and
goetz@6458 2794 // will replace the address of the deopt_handler in the call
goetz@6458 2795 // to Deoptimization::fetch_unroll_info below.
goetz@6458 2796 // We can't grab a free register here, because all registers may
goetz@6458 2797 // contain live values, so let the RegisterSaver do the adjustment
goetz@6458 2798 // of the return pc.
goetz@6517 2799 const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();
goetz@6458 2800
goetz@6458 2801 // Push the "unpack frame"
goetz@6458 2802 // Save everything in sight.
goetz@6511 2803 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
goetz@6511 2804 &first_frame_size_in_bytes,
goetz@6511 2805 /*generate_oop_map=*/ true,
goetz@6511 2806 return_pc_adjustment_no_exception,
goetz@6511 2807 RegisterSaver::return_pc_is_lr);
goetz@6458 2808 assert(map != NULL, "OopMap must have been created");
goetz@6458 2809
goetz@6458 2810 __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
goetz@6458 2811 // Save exec mode for unpack_frames.
goetz@6458 2812 __ b(exec_mode_initialized);
goetz@6458 2813
goetz@6458 2814 // --------------------------------------------------------------------------
goetz@6458 2815 // Prolog for exception case
goetz@6458 2816
goetz@6458 2817 // An exception is pending.
goetz@6458 2818 // We have been called with a return (interpreter) or a jump (exception blob).
goetz@6458 2819 //
goetz@6458 2820 // - R3_ARG1: exception oop
goetz@6458 2821 // - R4_ARG2: exception pc
goetz@6458 2822
goetz@6458 2823 int exception_offset = __ pc() - start;
goetz@6458 2824
goetz@6458 2825 BLOCK_COMMENT("Prolog for exception case");
goetz@6458 2826
goetz@6458 2827 // The RegisterSaves doesn't need to adjust the return pc for this situation.
goetz@6458 2828 const int return_pc_adjustment_exception = 0;
goetz@6458 2829
goetz@6458 2830 // Push the "unpack frame".
goetz@6458 2831 // Save everything in sight.
goetz@6458 2832 assert(R4 == R4_ARG2, "exception pc must be in r4");
goetz@6511 2833 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
goetz@6511 2834 &first_frame_size_in_bytes,
goetz@6511 2835 /*generate_oop_map=*/ false,
goetz@6511 2836 return_pc_adjustment_exception,
goetz@6511 2837 RegisterSaver::return_pc_is_r4);
goetz@6458 2838
goetz@6458 2839 // Deopt during an exception. Save exec mode for unpack_frames.
goetz@6458 2840 __ li(exec_mode_reg, Deoptimization::Unpack_exception);
goetz@6458 2841
goetz@6458 2842 // Store exception oop and pc in thread (location known to GC).
goetz@6458 2843 // This is needed since the call to "fetch_unroll_info()" may safepoint.
goetz@6458 2844 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
goetz@6458 2845 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
goetz@6458 2846
goetz@6458 2847 // fall through
goetz@6458 2848
goetz@6458 2849 // --------------------------------------------------------------------------
goetz@6458 2850 __ BIND(exec_mode_initialized);
goetz@6458 2851
goetz@6458 2852 {
goetz@6458 2853 const Register unroll_block_reg = R22_tmp2;
goetz@6458 2854
goetz@6458 2855 // We need to set `last_Java_frame' because `fetch_unroll_info' will
goetz@6458 2856 // call `last_Java_frame()'. The value of the pc in the frame is not
goetz@6458 2857 // particularly important. It just needs to identify this blob.
goetz@6458 2858 __ set_last_Java_frame(R1_SP, noreg);
goetz@6458 2859
goetz@6458 2860 // With EscapeAnalysis turned on, this call may safepoint!
goetz@6458 2861 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread);
goetz@6458 2862 address calls_return_pc = __ last_calls_return_pc();
goetz@6458 2863 // Set an oopmap for the call site that describes all our saved registers.
goetz@6458 2864 oop_maps->add_gc_map(calls_return_pc - start, map);
goetz@6458 2865
goetz@6458 2866 __ reset_last_Java_frame();
goetz@6458 2867 // Save the return value.
goetz@6458 2868 __ mr(unroll_block_reg, R3_RET);
goetz@6458 2869
goetz@6458 2870 // Restore only the result registers that have been saved
goetz@6458 2871 // by save_volatile_registers(...).
goetz@6458 2872 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
goetz@6458 2873
goetz@6458 2874 // In excp_deopt_mode, restore and clear exception oop which we
goetz@6458 2875 // stored in the thread during exception entry above. The exception
goetz@6458 2876 // oop will be the return value of this stub.
goetz@6458 2877 Label skip_restore_excp;
goetz@6458 2878 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
goetz@6458 2879 __ bne(CCR0, skip_restore_excp);
goetz@6458 2880 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
goetz@6458 2881 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
goetz@6458 2882 __ li(R0, 0);
goetz@6458 2883 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
goetz@6458 2884 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
goetz@6458 2885 __ BIND(skip_restore_excp);
goetz@6458 2886
goetz@6458 2887 // reload narrro_oop_base
goetz@6458 2888 if (UseCompressedOops && Universe::narrow_oop_base() != 0) {
goetz@6458 2889 __ load_const_optimized(R30, Universe::narrow_oop_base());
goetz@6458 2890 }
goetz@6458 2891
goetz@6458 2892 __ pop_frame();
goetz@6458 2893
goetz@6458 2894 // stack: (deoptee, optional i2c, caller of deoptee, ...).
goetz@6458 2895
goetz@6458 2896 // pop the deoptee's frame
goetz@6458 2897 __ pop_frame();
goetz@6458 2898
goetz@6458 2899 // stack: (caller_of_deoptee, ...).
goetz@6458 2900
goetz@6458 2901 // Loop through the `UnrollBlock' info and create interpreter frames.
goetz@6458 2902 push_skeleton_frames(masm, true/*deopt*/,
goetz@6458 2903 unroll_block_reg,
goetz@6458 2904 R23_tmp3,
goetz@6458 2905 R24_tmp4,
goetz@6458 2906 R25_tmp5,
goetz@6458 2907 R26_tmp6,
goetz@6458 2908 R27_tmp7);
goetz@6458 2909
goetz@6458 2910 // stack: (skeletal interpreter frame, ..., optional skeletal
goetz@6458 2911 // interpreter frame, optional c2i, caller of deoptee, ...).
goetz@6458 2912 }
goetz@6458 2913
goetz@6458 2914 // push an `unpack_frame' taking care of float / int return values.
goetz@6458 2915 __ push_frame(frame_size_in_bytes, R0/*tmp*/);
goetz@6458 2916
goetz@6458 2917 // stack: (unpack frame, skeletal interpreter frame, ..., optional
goetz@6458 2918 // skeletal interpreter frame, optional c2i, caller of deoptee,
goetz@6458 2919 // ...).
goetz@6458 2920
goetz@6458 2921 // Spill live volatile registers since we'll do a call.
goetz@6511 2922 __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
goetz@6511 2923 __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
goetz@6458 2924
goetz@6458 2925 // Let the unpacker layout information in the skeletal frames just
goetz@6458 2926 // allocated.
goetz@6458 2927 __ get_PC_trash_LR(R3_RET);
goetz@6458 2928 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET);
goetz@6458 2929 // This is a call to a LEAF method, so no oop map is required.
goetz@6458 2930 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
goetz@6458 2931 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/);
goetz@6458 2932 __ reset_last_Java_frame();
goetz@6458 2933
goetz@6458 2934 // Restore the volatiles saved above.
goetz@6511 2935 __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
goetz@6511 2936 __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
goetz@6458 2937
goetz@6458 2938 // Pop the unpack frame.
goetz@6458 2939 __ pop_frame();
goetz@6458 2940 __ restore_LR_CR(R0);
goetz@6458 2941
goetz@6458 2942 // stack: (top interpreter frame, ..., optional interpreter frame,
goetz@6458 2943 // optional c2i, caller of deoptee, ...).
goetz@6458 2944
goetz@6458 2945 // Initialize R14_state.
goetz@6512 2946 #ifdef CC_INTERP
goetz@6458 2947 __ ld(R14_state, 0, R1_SP);
goetz@6495 2948 __ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
goetz@6458 2949 // Also inititialize R15_prev_state.
goetz@6458 2950 __ restore_prev_state();
goetz@6512 2951 #else
goetz@6512 2952 __ restore_interpreter_state(R11_scratch1);
goetz@6512 2953 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
goetz@6512 2954 #endif // CC_INTERP
goetz@6512 2955
goetz@6458 2956
goetz@6458 2957 // Return to the interpreter entry point.
goetz@6458 2958 __ blr();
goetz@6458 2959 __ flush();
goetz@6458 2960 #else // COMPILER2
goetz@6458 2961 __ unimplemented("deopt blob needed only with compiler");
goetz@6458 2962 int exception_offset = __ pc() - start;
goetz@6458 2963 #endif // COMPILER2
goetz@6458 2964
goetz@6458 2965 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, first_frame_size_in_bytes / wordSize);
goetz@6458 2966 }
goetz@6458 2967
goetz@6458 2968 #ifdef COMPILER2
goetz@6458 2969 void SharedRuntime::generate_uncommon_trap_blob() {
goetz@6458 2970 // Allocate space for the code.
goetz@6458 2971 ResourceMark rm;
goetz@6458 2972 // Setup code generation tools.
goetz@6458 2973 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
goetz@6458 2974 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
goetz@6458 2975 address start = __ pc();
goetz@6458 2976
goetz@6458 2977 Register unroll_block_reg = R21_tmp1;
goetz@6458 2978 Register klass_index_reg = R22_tmp2;
goetz@6458 2979 Register unc_trap_reg = R23_tmp3;
goetz@6458 2980
goetz@6458 2981 OopMapSet* oop_maps = new OopMapSet();
goetz@6511 2982 int frame_size_in_bytes = frame::abi_reg_args_size;
goetz@6458 2983 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
goetz@6458 2984
goetz@6458 2985 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
goetz@6458 2986
goetz@6458 2987 // Push a dummy `unpack_frame' and call
goetz@6458 2988 // `Deoptimization::uncommon_trap' to pack the compiled frame into a
goetz@6458 2989 // vframe array and return the `UnrollBlock' information.
goetz@6458 2990
goetz@6458 2991 // Save LR to compiled frame.
goetz@6458 2992 __ save_LR_CR(R11_scratch1);
goetz@6458 2993
goetz@6458 2994 // Push an "uncommon_trap" frame.
goetz@6511 2995 __ push_frame_reg_args(0, R11_scratch1);
goetz@6458 2996
goetz@6458 2997 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
goetz@6458 2998
goetz@6458 2999 // Set the `unpack_frame' as last_Java_frame.
goetz@6458 3000 // `Deoptimization::uncommon_trap' expects it and considers its
goetz@6458 3001 // sender frame as the deoptee frame.
goetz@6458 3002 // Remember the offset of the instruction whose address will be
goetz@6458 3003 // moved to R11_scratch1.
goetz@6458 3004 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
goetz@6458 3005
goetz@6458 3006 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
goetz@6458 3007
goetz@6458 3008 __ mr(klass_index_reg, R3);
goetz@6458 3009 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
goetz@6458 3010 R16_thread, klass_index_reg);
goetz@6458 3011
goetz@6458 3012 // Set an oopmap for the call site.
goetz@6458 3013 oop_maps->add_gc_map(gc_map_pc - start, map);
goetz@6458 3014
goetz@6458 3015 __ reset_last_Java_frame();
goetz@6458 3016
goetz@6458 3017 // Pop the `unpack frame'.
goetz@6458 3018 __ pop_frame();
goetz@6458 3019
goetz@6458 3020 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
goetz@6458 3021
goetz@6458 3022 // Save the return value.
goetz@6458 3023 __ mr(unroll_block_reg, R3_RET);
goetz@6458 3024
goetz@6458 3025 // Pop the uncommon_trap frame.
goetz@6458 3026 __ pop_frame();
goetz@6458 3027
goetz@6458 3028 // stack: (caller_of_deoptee, ...).
goetz@6458 3029
goetz@6458 3030 // Allocate new interpreter frame(s) and possibly a c2i adapter
goetz@6458 3031 // frame.
goetz@6458 3032 push_skeleton_frames(masm, false/*deopt*/,
goetz@6458 3033 unroll_block_reg,
goetz@6458 3034 R22_tmp2,
goetz@6458 3035 R23_tmp3,
goetz@6458 3036 R24_tmp4,
goetz@6458 3037 R25_tmp5,
goetz@6458 3038 R26_tmp6);
goetz@6458 3039
goetz@6458 3040 // stack: (skeletal interpreter frame, ..., optional skeletal
goetz@6458 3041 // interpreter frame, optional c2i, caller of deoptee, ...).
goetz@6458 3042
goetz@6458 3043 // Push a dummy `unpack_frame' taking care of float return values.
goetz@6458 3044 // Call `Deoptimization::unpack_frames' to layout information in the
goetz@6458 3045 // interpreter frames just created.
goetz@6458 3046
goetz@6458 3047 // Push a simple "unpack frame" here.
goetz@6511 3048 __ push_frame_reg_args(0, R11_scratch1);
goetz@6458 3049
goetz@6458 3050 // stack: (unpack frame, skeletal interpreter frame, ..., optional
goetz@6458 3051 // skeletal interpreter frame, optional c2i, caller of deoptee,
goetz@6458 3052 // ...).
goetz@6458 3053
goetz@6458 3054 // Set the "unpack_frame" as last_Java_frame.
goetz@6458 3055 __ get_PC_trash_LR(R11_scratch1);
goetz@6458 3056 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
goetz@6458 3057
goetz@6458 3058 // Indicate it is the uncommon trap case.
goetz@6458 3059 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap);
goetz@6458 3060 // Let the unpacker layout information in the skeletal frames just
goetz@6458 3061 // allocated.
goetz@6458 3062 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
goetz@6458 3063 R16_thread, unc_trap_reg);
goetz@6458 3064
goetz@6458 3065 __ reset_last_Java_frame();
goetz@6458 3066 // Pop the `unpack frame'.
goetz@6458 3067 __ pop_frame();
goetz@6458 3068 // Restore LR from top interpreter frame.
goetz@6458 3069 __ restore_LR_CR(R11_scratch1);
goetz@6458 3070
goetz@6458 3071 // stack: (top interpreter frame, ..., optional interpreter frame,
goetz@6458 3072 // optional c2i, caller of deoptee, ...).
goetz@6458 3073
goetz@6512 3074 #ifdef CC_INTERP
goetz@6458 3075 // Initialize R14_state, ...
goetz@6458 3076 __ ld(R11_scratch1, 0, R1_SP);
goetz@6495 3077 __ addi(R14_state, R11_scratch1, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
goetz@6458 3078 // also initialize R15_prev_state.
goetz@6458 3079 __ restore_prev_state();
goetz@6512 3080 #else
goetz@6512 3081 __ restore_interpreter_state(R11_scratch1);
goetz@6512 3082 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
goetz@6512 3083 #endif // CC_INTERP
goetz@6512 3084
goetz@6458 3085 // Return to the interpreter entry point.
goetz@6458 3086 __ blr();
goetz@6458 3087
goetz@6458 3088 masm->flush();
goetz@6458 3089
goetz@6458 3090 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize);
goetz@6458 3091 }
goetz@6458 3092 #endif // COMPILER2
goetz@6458 3093
goetz@6458 3094 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
goetz@6458 3095 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
goetz@6458 3096 assert(StubRoutines::forward_exception_entry() != NULL,
goetz@6458 3097 "must be generated before");
goetz@6458 3098
goetz@6458 3099 ResourceMark rm;
goetz@6458 3100 OopMapSet *oop_maps = new OopMapSet();
goetz@6458 3101 OopMap* map;
goetz@6458 3102
goetz@6458 3103 // Allocate space for the code. Setup code generation tools.
goetz@6458 3104 CodeBuffer buffer("handler_blob", 2048, 1024);
goetz@6458 3105 MacroAssembler* masm = new MacroAssembler(&buffer);
goetz@6458 3106
goetz@6458 3107 address start = __ pc();
goetz@6458 3108 int frame_size_in_bytes = 0;
goetz@6458 3109
goetz@6458 3110 RegisterSaver::ReturnPCLocation return_pc_location;
goetz@6458 3111 bool cause_return = (poll_type == POLL_AT_RETURN);
goetz@6458 3112 if (cause_return) {
goetz@6458 3113 // Nothing to do here. The frame has already been popped in MachEpilogNode.
goetz@6458 3114 // Register LR already contains the return pc.
goetz@6458 3115 return_pc_location = RegisterSaver::return_pc_is_lr;
goetz@6458 3116 } else {
goetz@6458 3117 // Use thread()->saved_exception_pc() as return pc.
goetz@6458 3118 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
goetz@6458 3119 }
goetz@6458 3120
goetz@6458 3121 // Save registers, fpu state, and flags.
goetz@6511 3122 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
goetz@6511 3123 &frame_size_in_bytes,
goetz@6511 3124 /*generate_oop_map=*/ true,
goetz@6511 3125 /*return_pc_adjustment=*/0,
goetz@6511 3126 return_pc_location);
goetz@6458 3127
goetz@6458 3128 // The following is basically a call_VM. However, we need the precise
goetz@6458 3129 // address of the call in order to generate an oopmap. Hence, we do all the
goetz@6458 3130 // work outselves.
goetz@6458 3131 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
goetz@6458 3132
goetz@6458 3133 // The return address must always be correct so that the frame constructor
goetz@6458 3134 // never sees an invalid pc.
goetz@6458 3135
goetz@6458 3136 // Do the call
goetz@6458 3137 __ call_VM_leaf(call_ptr, R16_thread);
goetz@6458 3138 address calls_return_pc = __ last_calls_return_pc();
goetz@6458 3139
goetz@6458 3140 // Set an oopmap for the call site. This oopmap will map all
goetz@6458 3141 // oop-registers and debug-info registers as callee-saved. This
goetz@6458 3142 // will allow deoptimization at this safepoint to find all possible
goetz@6458 3143 // debug-info recordings, as well as let GC find all oops.
goetz@6458 3144 oop_maps->add_gc_map(calls_return_pc - start, map);
goetz@6458 3145
goetz@6458 3146 Label noException;
goetz@6458 3147
goetz@6458 3148 // Clear the last Java frame.
goetz@6458 3149 __ reset_last_Java_frame();
goetz@6458 3150
goetz@6458 3151 BLOCK_COMMENT(" Check pending exception.");
goetz@6458 3152 const Register pending_exception = R0;
goetz@6458 3153 __ ld(pending_exception, thread_(pending_exception));
goetz@6458 3154 __ cmpdi(CCR0, pending_exception, 0);
goetz@6458 3155 __ beq(CCR0, noException);
goetz@6458 3156
goetz@6458 3157 // Exception pending
goetz@6458 3158 RegisterSaver::restore_live_registers_and_pop_frame(masm,
goetz@6458 3159 frame_size_in_bytes,
goetz@6458 3160 /*restore_ctr=*/true);
goetz@6458 3161
goetz@6458 3162 BLOCK_COMMENT(" Jump to forward_exception_entry.");
goetz@6458 3163 // Jump to forward_exception_entry, with the issuing PC in LR
goetz@6458 3164 // so it looks like the original nmethod called forward_exception_entry.
goetz@6458 3165 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
goetz@6458 3166
goetz@6458 3167 // No exception case.
goetz@6458 3168 __ BIND(noException);
goetz@6458 3169
goetz@6458 3170
goetz@6458 3171 // Normal exit, restore registers and exit.
goetz@6458 3172 RegisterSaver::restore_live_registers_and_pop_frame(masm,
goetz@6458 3173 frame_size_in_bytes,
goetz@6458 3174 /*restore_ctr=*/true);
goetz@6458 3175
goetz@6458 3176 __ blr();
goetz@6458 3177
goetz@6458 3178 // Make sure all code is generated
goetz@6458 3179 masm->flush();
goetz@6458 3180
goetz@6458 3181 // Fill-out other meta info
goetz@6458 3182 // CodeBlob frame size is in words.
goetz@6458 3183 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
goetz@6458 3184 }
goetz@6458 3185
goetz@6458 3186 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
goetz@6458 3187 //
goetz@6458 3188 // Generate a stub that calls into the vm to find out the proper destination
goetz@6458 3189 // of a java call. All the argument registers are live at this point
goetz@6458 3190 // but since this is generic code we don't know what they are and the caller
goetz@6458 3191 // must do any gc of the args.
goetz@6458 3192 //
goetz@6458 3193 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
goetz@6458 3194
goetz@6458 3195 // allocate space for the code
goetz@6458 3196 ResourceMark rm;
goetz@6458 3197
goetz@6458 3198 CodeBuffer buffer(name, 1000, 512);
goetz@6458 3199 MacroAssembler* masm = new MacroAssembler(&buffer);
goetz@6458 3200
goetz@6458 3201 int frame_size_in_bytes;
goetz@6458 3202
goetz@6458 3203 OopMapSet *oop_maps = new OopMapSet();
goetz@6458 3204 OopMap* map = NULL;
goetz@6458 3205
goetz@6458 3206 address start = __ pc();
goetz@6458 3207
goetz@6511 3208 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
goetz@6511 3209 &frame_size_in_bytes,
goetz@6511 3210 /*generate_oop_map*/ true,
goetz@6511 3211 /*return_pc_adjustment*/ 0,
goetz@6511 3212 RegisterSaver::return_pc_is_lr);
goetz@6458 3213
goetz@6458 3214 // Use noreg as last_Java_pc, the return pc will be reconstructed
goetz@6458 3215 // from the physical frame.
goetz@6458 3216 __ set_last_Java_frame(/*sp*/R1_SP, noreg);
goetz@6458 3217
goetz@6458 3218 int frame_complete = __ offset();
goetz@6458 3219
goetz@6458 3220 // Pass R19_method as 2nd (optional) argument, used by
goetz@6458 3221 // counter_overflow_stub.
goetz@6458 3222 __ call_VM_leaf(destination, R16_thread, R19_method);
goetz@6458 3223 address calls_return_pc = __ last_calls_return_pc();
goetz@6458 3224 // Set an oopmap for the call site.
goetz@6458 3225 // We need this not only for callee-saved registers, but also for volatile
goetz@6458 3226 // registers that the compiler might be keeping live across a safepoint.
goetz@6458 3227 // Create the oopmap for the call's return pc.
goetz@6458 3228 oop_maps->add_gc_map(calls_return_pc - start, map);
goetz@6458 3229
goetz@6458 3230 // R3_RET contains the address we are going to jump to assuming no exception got installed.
goetz@6458 3231
goetz@6458 3232 // clear last_Java_sp
goetz@6458 3233 __ reset_last_Java_frame();
goetz@6458 3234
goetz@6458 3235 // Check for pending exceptions.
goetz@6458 3236 BLOCK_COMMENT("Check for pending exceptions.");
goetz@6458 3237 Label pending;
goetz@6458 3238 __ ld(R11_scratch1, thread_(pending_exception));
goetz@6458 3239 __ cmpdi(CCR0, R11_scratch1, 0);
goetz@6458 3240 __ bne(CCR0, pending);
goetz@6458 3241
goetz@6458 3242 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
goetz@6458 3243
goetz@6458 3244 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
goetz@6458 3245
goetz@6512 3246 // Get the returned method.
goetz@6458 3247 __ get_vm_result_2(R19_method);
goetz@6458 3248
goetz@6458 3249 __ bctr();
goetz@6458 3250
goetz@6458 3251
goetz@6458 3252 // Pending exception after the safepoint.
goetz@6458 3253 __ BIND(pending);
goetz@6458 3254
goetz@6458 3255 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true);
goetz@6458 3256
goetz@6458 3257 // exception pending => remove activation and forward to exception handler
goetz@6458 3258
goetz@6458 3259 __ li(R11_scratch1, 0);
goetz@6458 3260 __ ld(R3_ARG1, thread_(pending_exception));
goetz@6458 3261 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread);
goetz@6458 3262 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
goetz@6458 3263
goetz@6458 3264 // -------------
goetz@6458 3265 // Make sure all code is generated.
goetz@6458 3266 masm->flush();
goetz@6458 3267
goetz@6458 3268 // return the blob
goetz@6458 3269 // frame_size_words or bytes??
goetz@6458 3270 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
goetz@6458 3271 oop_maps, true);
goetz@6458 3272 }
mdoerr@8903 3273
mdoerr@8903 3274
mdoerr@8903 3275 //------------------------------Montgomery multiplication------------------------
mdoerr@8903 3276 //
mdoerr@8903 3277
mdoerr@8903 3278 // Subtract 0:b from carry:a. Return carry.
mdoerr@8903 3279 static unsigned long
mdoerr@8903 3280 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
mdoerr@8903 3281 long i = 0;
mdoerr@8903 3282 unsigned long tmp, tmp2;
mdoerr@8903 3283 __asm__ __volatile__ (
mdoerr@8903 3284 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA
mdoerr@8903 3285 "mtctr %[len] \n"
mdoerr@8903 3286 "0: \n"
mdoerr@8903 3287 "ldx %[tmp], %[i], %[a] \n"
mdoerr@8903 3288 "ldx %[tmp2], %[i], %[b] \n"
mdoerr@8903 3289 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended
mdoerr@8903 3290 "stdx %[tmp], %[i], %[a] \n"
mdoerr@8903 3291 "addi %[i], %[i], 8 \n"
mdoerr@8903 3292 "bdnz 0b \n"
mdoerr@8903 3293 "addme %[tmp], %[carry] \n" // carry + CA - 1
mdoerr@8903 3294 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2)
mdoerr@8903 3295 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len)
mdoerr@8903 3296 : "ctr", "xer", "memory"
mdoerr@8903 3297 );
mdoerr@8903 3298 return tmp;
mdoerr@8903 3299 }
mdoerr@8903 3300
mdoerr@8903 3301 // Multiply (unsigned) Long A by Long B, accumulating the double-
mdoerr@8903 3302 // length result into the accumulator formed of T0, T1, and T2.
mdoerr@8903 3303 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
mdoerr@8903 3304 unsigned long hi, lo;
mdoerr@8903 3305 __asm__ __volatile__ (
mdoerr@8903 3306 "mulld %[lo], %[A], %[B] \n"
mdoerr@8903 3307 "mulhdu %[hi], %[A], %[B] \n"
mdoerr@8903 3308 "addc %[T0], %[T0], %[lo] \n"
mdoerr@8903 3309 "adde %[T1], %[T1], %[hi] \n"
mdoerr@8903 3310 "addze %[T2], %[T2] \n"
mdoerr@8903 3311 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
mdoerr@8903 3312 : [A]"r"(A), [B]"r"(B)
mdoerr@8903 3313 : "xer"
mdoerr@8903 3314 );
mdoerr@8903 3315 }
mdoerr@8903 3316
mdoerr@8903 3317 // As above, but add twice the double-length result into the
mdoerr@8903 3318 // accumulator.
mdoerr@8903 3319 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
mdoerr@8903 3320 unsigned long hi, lo;
mdoerr@8903 3321 __asm__ __volatile__ (
mdoerr@8903 3322 "mulld %[lo], %[A], %[B] \n"
mdoerr@8903 3323 "mulhdu %[hi], %[A], %[B] \n"
mdoerr@8903 3324 "addc %[T0], %[T0], %[lo] \n"
mdoerr@8903 3325 "adde %[T1], %[T1], %[hi] \n"
mdoerr@8903 3326 "addze %[T2], %[T2] \n"
mdoerr@8903 3327 "addc %[T0], %[T0], %[lo] \n"
mdoerr@8903 3328 "adde %[T1], %[T1], %[hi] \n"
mdoerr@8903 3329 "addze %[T2], %[T2] \n"
mdoerr@8903 3330 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
mdoerr@8903 3331 : [A]"r"(A), [B]"r"(B)
mdoerr@8903 3332 : "xer"
mdoerr@8903 3333 );
mdoerr@8903 3334 }
mdoerr@8903 3335
mdoerr@8903 3336 // Fast Montgomery multiplication. The derivation of the algorithm is
mdoerr@8903 3337 // in "A Cryptographic Library for the Motorola DSP56000,
mdoerr@8903 3338 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237".
mdoerr@8903 3339 static void
mdoerr@8903 3340 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
mdoerr@8903 3341 unsigned long m[], unsigned long inv, int len) {
mdoerr@8903 3342 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
mdoerr@8903 3343 int i;
mdoerr@8903 3344
mdoerr@8903 3345 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
mdoerr@8903 3346
mdoerr@8903 3347 for (i = 0; i < len; i++) {
mdoerr@8903 3348 int j;
mdoerr@8903 3349 for (j = 0; j < i; j++) {
mdoerr@8903 3350 MACC(a[j], b[i-j], t0, t1, t2);
mdoerr@8903 3351 MACC(m[j], n[i-j], t0, t1, t2);
mdoerr@8903 3352 }
mdoerr@8903 3353 MACC(a[i], b[0], t0, t1, t2);
mdoerr@8903 3354 m[i] = t0 * inv;
mdoerr@8903 3355 MACC(m[i], n[0], t0, t1, t2);
mdoerr@8903 3356
mdoerr@8903 3357 assert(t0 == 0, "broken Montgomery multiply");
mdoerr@8903 3358
mdoerr@8903 3359 t0 = t1; t1 = t2; t2 = 0;
mdoerr@8903 3360 }
mdoerr@8903 3361
mdoerr@8903 3362 for (i = len; i < 2*len; i++) {
mdoerr@8903 3363 int j;
mdoerr@8903 3364 for (j = i-len+1; j < len; j++) {
mdoerr@8903 3365 MACC(a[j], b[i-j], t0, t1, t2);
mdoerr@8903 3366 MACC(m[j], n[i-j], t0, t1, t2);
mdoerr@8903 3367 }
mdoerr@8903 3368 m[i-len] = t0;
mdoerr@8903 3369 t0 = t1; t1 = t2; t2 = 0;
mdoerr@8903 3370 }
mdoerr@8903 3371
mdoerr@8903 3372 while (t0) {
mdoerr@8903 3373 t0 = sub(m, n, t0, len);
mdoerr@8903 3374 }
mdoerr@8903 3375 }
mdoerr@8903 3376
mdoerr@8903 3377 // Fast Montgomery squaring. This uses asymptotically 25% fewer
mdoerr@8903 3378 // multiplies so it should be up to 25% faster than Montgomery
mdoerr@8903 3379 // multiplication. However, its loop control is more complex and it
mdoerr@8903 3380 // may actually run slower on some machines.
mdoerr@8903 3381 static void
mdoerr@8903 3382 montgomery_square(unsigned long a[], unsigned long n[],
mdoerr@8903 3383 unsigned long m[], unsigned long inv, int len) {
mdoerr@8903 3384 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
mdoerr@8903 3385 int i;
mdoerr@8903 3386
mdoerr@8903 3387 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
mdoerr@8903 3388
mdoerr@8903 3389 for (i = 0; i < len; i++) {
mdoerr@8903 3390 int j;
mdoerr@8903 3391 int end = (i+1)/2;
mdoerr@8903 3392 for (j = 0; j < end; j++) {
mdoerr@8903 3393 MACC2(a[j], a[i-j], t0, t1, t2);
mdoerr@8903 3394 MACC(m[j], n[i-j], t0, t1, t2);
mdoerr@8903 3395 }
mdoerr@8903 3396 if ((i & 1) == 0) {
mdoerr@8903 3397 MACC(a[j], a[j], t0, t1, t2);
mdoerr@8903 3398 }
mdoerr@8903 3399 for (; j < i; j++) {
mdoerr@8903 3400 MACC(m[j], n[i-j], t0, t1, t2);
mdoerr@8903 3401 }
mdoerr@8903 3402 m[i] = t0 * inv;
mdoerr@8903 3403 MACC(m[i], n[0], t0, t1, t2);
mdoerr@8903 3404
mdoerr@8903 3405 assert(t0 == 0, "broken Montgomery square");
mdoerr@8903 3406
mdoerr@8903 3407 t0 = t1; t1 = t2; t2 = 0;
mdoerr@8903 3408 }
mdoerr@8903 3409
mdoerr@8903 3410 for (i = len; i < 2*len; i++) {
mdoerr@8903 3411 int start = i-len+1;
mdoerr@8903 3412 int end = start + (len - start)/2;
mdoerr@8903 3413 int j;
mdoerr@8903 3414 for (j = start; j < end; j++) {
mdoerr@8903 3415 MACC2(a[j], a[i-j], t0, t1, t2);
mdoerr@8903 3416 MACC(m[j], n[i-j], t0, t1, t2);
mdoerr@8903 3417 }
mdoerr@8903 3418 if ((i & 1) == 0) {
mdoerr@8903 3419 MACC(a[j], a[j], t0, t1, t2);
mdoerr@8903 3420 }
mdoerr@8903 3421 for (; j < len; j++) {
mdoerr@8903 3422 MACC(m[j], n[i-j], t0, t1, t2);
mdoerr@8903 3423 }
mdoerr@8903 3424 m[i-len] = t0;
mdoerr@8903 3425 t0 = t1; t1 = t2; t2 = 0;
mdoerr@8903 3426 }
mdoerr@8903 3427
mdoerr@8903 3428 while (t0) {
mdoerr@8903 3429 t0 = sub(m, n, t0, len);
mdoerr@8903 3430 }
mdoerr@8903 3431 }
mdoerr@8903 3432
mdoerr@8903 3433 // The threshold at which squaring is advantageous was determined
mdoerr@8903 3434 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
mdoerr@8903 3435 // Doesn't seem to be relevant for Power8 so we use the same value.
mdoerr@8903 3436 #define MONTGOMERY_SQUARING_THRESHOLD 64
mdoerr@8903 3437
mdoerr@8903 3438 // Copy len longwords from s to d, word-swapping as we go. The
mdoerr@8903 3439 // destination array is reversed.
mdoerr@8903 3440 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
mdoerr@8903 3441 d += len;
mdoerr@8903 3442 while(len-- > 0) {
mdoerr@8903 3443 d--;
mdoerr@8903 3444 unsigned long s_val = *s;
mdoerr@8903 3445 // Swap words in a longword on little endian machines.
mdoerr@8903 3446 #ifdef VM_LITTLE_ENDIAN
mdoerr@8903 3447 s_val = (s_val << 32) | (s_val >> 32);
mdoerr@8903 3448 #endif
mdoerr@8903 3449 *d = s_val;
mdoerr@8903 3450 s++;
mdoerr@8903 3451 }
mdoerr@8903 3452 }
mdoerr@8903 3453
mdoerr@8903 3454 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
mdoerr@8903 3455 jint len, jlong inv,
mdoerr@8903 3456 jint *m_ints) {
mdoerr@8903 3457 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
mdoerr@8903 3458 int longwords = len/2;
mdoerr@8903 3459 assert(longwords > 0, "unsupported");
mdoerr@8903 3460
mdoerr@8903 3461 // Make very sure we don't use so much space that the stack might
mdoerr@8903 3462 // overflow. 512 jints corresponds to an 16384-bit integer and
mdoerr@8903 3463 // will use here a total of 8k bytes of stack space.
mdoerr@8903 3464 int total_allocation = longwords * sizeof (unsigned long) * 4;
mdoerr@8903 3465 guarantee(total_allocation <= 8192, "must be");
mdoerr@8903 3466 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
mdoerr@8903 3467
mdoerr@8903 3468 // Local scratch arrays
mdoerr@8903 3469 unsigned long
mdoerr@8903 3470 *a = scratch + 0 * longwords,
mdoerr@8903 3471 *b = scratch + 1 * longwords,
mdoerr@8903 3472 *n = scratch + 2 * longwords,
mdoerr@8903 3473 *m = scratch + 3 * longwords;
mdoerr@8903 3474
mdoerr@8903 3475 reverse_words((unsigned long *)a_ints, a, longwords);
mdoerr@8903 3476 reverse_words((unsigned long *)b_ints, b, longwords);
mdoerr@8903 3477 reverse_words((unsigned long *)n_ints, n, longwords);
mdoerr@8903 3478
mdoerr@8903 3479 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
mdoerr@8903 3480
mdoerr@8903 3481 reverse_words(m, (unsigned long *)m_ints, longwords);
mdoerr@8903 3482 }
mdoerr@8903 3483
mdoerr@8903 3484 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
mdoerr@8903 3485 jint len, jlong inv,
mdoerr@8903 3486 jint *m_ints) {
mdoerr@8903 3487 assert(len % 2 == 0, "array length in montgomery_square must be even");
mdoerr@8903 3488 int longwords = len/2;
mdoerr@8903 3489 assert(longwords > 0, "unsupported");
mdoerr@8903 3490
mdoerr@8903 3491 // Make very sure we don't use so much space that the stack might
mdoerr@8903 3492 // overflow. 512 jints corresponds to an 16384-bit integer and
mdoerr@8903 3493 // will use here a total of 6k bytes of stack space.
mdoerr@8903 3494 int total_allocation = longwords * sizeof (unsigned long) * 3;
mdoerr@8903 3495 guarantee(total_allocation <= 8192, "must be");
mdoerr@8903 3496 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
mdoerr@8903 3497
mdoerr@8903 3498 // Local scratch arrays
mdoerr@8903 3499 unsigned long
mdoerr@8903 3500 *a = scratch + 0 * longwords,
mdoerr@8903 3501 *n = scratch + 1 * longwords,
mdoerr@8903 3502 *m = scratch + 2 * longwords;
mdoerr@8903 3503
mdoerr@8903 3504 reverse_words((unsigned long *)a_ints, a, longwords);
mdoerr@8903 3505 reverse_words((unsigned long *)n_ints, n, longwords);
mdoerr@8903 3506
mdoerr@8903 3507 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
mdoerr@8903 3508 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
mdoerr@8903 3509 } else {
mdoerr@8903 3510 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
mdoerr@8903 3511 }
mdoerr@8903 3512
mdoerr@8903 3513 reverse_words(m, (unsigned long *)m_ints, longwords);
mdoerr@8903 3514 }

mercurial