Wed, 15 Apr 2020 11:49:55 +0800
Merge
1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/vframeArray.hpp"
36 #include "vmreg_ppc.inline.hpp"
37 #include "adfiles/ad_ppc_64.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_Runtime1.hpp"
40 #endif
41 #ifdef COMPILER2
42 #include "opto/runtime.hpp"
43 #endif
45 #include <alloca.h>
47 #define __ masm->
49 #ifdef PRODUCT
50 #define BLOCK_COMMENT(str) // nothing
51 #else
52 #define BLOCK_COMMENT(str) __ block_comment(str)
53 #endif
55 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
58 class RegisterSaver {
59 // Used for saving volatile registers.
60 public:
62 // Support different return pc locations.
63 enum ReturnPCLocation {
64 return_pc_is_lr,
65 return_pc_is_r4,
66 return_pc_is_thread_saved_exception_pc
67 };
69 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
70 int* out_frame_size_in_bytes,
71 bool generate_oop_map,
72 int return_pc_adjustment,
73 ReturnPCLocation return_pc_location);
74 static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
75 int frame_size_in_bytes,
76 bool restore_ctr);
78 static void push_frame_and_save_argument_registers(MacroAssembler* masm,
79 Register r_temp,
80 int frame_size,
81 int total_args,
82 const VMRegPair *regs, const VMRegPair *regs2 = NULL);
83 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
84 int frame_size,
85 int total_args,
86 const VMRegPair *regs, const VMRegPair *regs2 = NULL);
88 // During deoptimization only the result registers need to be restored
89 // all the other values have already been extracted.
90 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes);
92 // Constants and data structures:
94 typedef enum {
95 int_reg = 0,
96 float_reg = 1,
97 special_reg = 2
98 } RegisterType;
100 typedef enum {
101 reg_size = 8,
102 half_reg_size = reg_size / 2,
103 } RegisterConstants;
105 typedef struct {
106 RegisterType reg_type;
107 int reg_num;
108 VMReg vmreg;
109 } LiveRegType;
110 };
113 #define RegisterSaver_LiveSpecialReg(regname) \
114 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
116 #define RegisterSaver_LiveIntReg(regname) \
117 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() }
119 #define RegisterSaver_LiveFloatReg(regname) \
120 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() }
122 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
123 // Live registers which get spilled to the stack. Register
124 // positions in this array correspond directly to the stack layout.
126 //
127 // live special registers:
128 //
129 RegisterSaver_LiveSpecialReg(SR_CTR),
130 //
131 // live float registers:
132 //
133 RegisterSaver_LiveFloatReg( F0 ),
134 RegisterSaver_LiveFloatReg( F1 ),
135 RegisterSaver_LiveFloatReg( F2 ),
136 RegisterSaver_LiveFloatReg( F3 ),
137 RegisterSaver_LiveFloatReg( F4 ),
138 RegisterSaver_LiveFloatReg( F5 ),
139 RegisterSaver_LiveFloatReg( F6 ),
140 RegisterSaver_LiveFloatReg( F7 ),
141 RegisterSaver_LiveFloatReg( F8 ),
142 RegisterSaver_LiveFloatReg( F9 ),
143 RegisterSaver_LiveFloatReg( F10 ),
144 RegisterSaver_LiveFloatReg( F11 ),
145 RegisterSaver_LiveFloatReg( F12 ),
146 RegisterSaver_LiveFloatReg( F13 ),
147 RegisterSaver_LiveFloatReg( F14 ),
148 RegisterSaver_LiveFloatReg( F15 ),
149 RegisterSaver_LiveFloatReg( F16 ),
150 RegisterSaver_LiveFloatReg( F17 ),
151 RegisterSaver_LiveFloatReg( F18 ),
152 RegisterSaver_LiveFloatReg( F19 ),
153 RegisterSaver_LiveFloatReg( F20 ),
154 RegisterSaver_LiveFloatReg( F21 ),
155 RegisterSaver_LiveFloatReg( F22 ),
156 RegisterSaver_LiveFloatReg( F23 ),
157 RegisterSaver_LiveFloatReg( F24 ),
158 RegisterSaver_LiveFloatReg( F25 ),
159 RegisterSaver_LiveFloatReg( F26 ),
160 RegisterSaver_LiveFloatReg( F27 ),
161 RegisterSaver_LiveFloatReg( F28 ),
162 RegisterSaver_LiveFloatReg( F29 ),
163 RegisterSaver_LiveFloatReg( F30 ),
164 RegisterSaver_LiveFloatReg( F31 ),
165 //
166 // live integer registers:
167 //
168 RegisterSaver_LiveIntReg( R0 ),
169 //RegisterSaver_LiveIntReg( R1 ), // stack pointer
170 RegisterSaver_LiveIntReg( R2 ),
171 RegisterSaver_LiveIntReg( R3 ),
172 RegisterSaver_LiveIntReg( R4 ),
173 RegisterSaver_LiveIntReg( R5 ),
174 RegisterSaver_LiveIntReg( R6 ),
175 RegisterSaver_LiveIntReg( R7 ),
176 RegisterSaver_LiveIntReg( R8 ),
177 RegisterSaver_LiveIntReg( R9 ),
178 RegisterSaver_LiveIntReg( R10 ),
179 RegisterSaver_LiveIntReg( R11 ),
180 RegisterSaver_LiveIntReg( R12 ),
181 //RegisterSaver_LiveIntReg( R13 ), // system thread id
182 RegisterSaver_LiveIntReg( R14 ),
183 RegisterSaver_LiveIntReg( R15 ),
184 RegisterSaver_LiveIntReg( R16 ),
185 RegisterSaver_LiveIntReg( R17 ),
186 RegisterSaver_LiveIntReg( R18 ),
187 RegisterSaver_LiveIntReg( R19 ),
188 RegisterSaver_LiveIntReg( R20 ),
189 RegisterSaver_LiveIntReg( R21 ),
190 RegisterSaver_LiveIntReg( R22 ),
191 RegisterSaver_LiveIntReg( R23 ),
192 RegisterSaver_LiveIntReg( R24 ),
193 RegisterSaver_LiveIntReg( R25 ),
194 RegisterSaver_LiveIntReg( R26 ),
195 RegisterSaver_LiveIntReg( R27 ),
196 RegisterSaver_LiveIntReg( R28 ),
197 RegisterSaver_LiveIntReg( R29 ),
198 RegisterSaver_LiveIntReg( R31 ),
199 RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register
200 };
202 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
203 int* out_frame_size_in_bytes,
204 bool generate_oop_map,
205 int return_pc_adjustment,
206 ReturnPCLocation return_pc_location) {
207 // Push an abi_reg_args-frame and store all registers which may be live.
208 // If requested, create an OopMap: Record volatile registers as
209 // callee-save values in an OopMap so their save locations will be
210 // propagated to the RegisterMap of the caller frame during
211 // StackFrameStream construction (needed for deoptimization; see
212 // compiledVFrame::create_stack_value).
213 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
215 int i;
216 int offset;
218 // calcualte frame size
219 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
220 sizeof(RegisterSaver::LiveRegType);
221 const int register_save_size = regstosave_num * reg_size;
222 const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
223 + frame::abi_reg_args_size;
224 *out_frame_size_in_bytes = frame_size_in_bytes;
225 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
226 const int register_save_offset = frame_size_in_bytes - register_save_size;
228 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
229 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
231 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
233 // Save r30 in the last slot of the not yet pushed frame so that we
234 // can use it as scratch reg.
235 __ std(R30, -reg_size, R1_SP);
236 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
237 "consistency check");
239 // save the flags
240 // Do the save_LR_CR by hand and adjust the return pc if requested.
241 __ mfcr(R30);
242 __ std(R30, _abi(cr), R1_SP);
243 switch (return_pc_location) {
244 case return_pc_is_lr: __ mflr(R30); break;
245 case return_pc_is_r4: __ mr(R30, R4); break;
246 case return_pc_is_thread_saved_exception_pc:
247 __ ld(R30, thread_(saved_exception_pc)); break;
248 default: ShouldNotReachHere();
249 }
250 if (return_pc_adjustment != 0)
251 __ addi(R30, R30, return_pc_adjustment);
252 __ std(R30, _abi(lr), R1_SP);
254 // push a new frame
255 __ push_frame(frame_size_in_bytes, R30);
257 // save all registers (ints and floats)
258 offset = register_save_offset;
259 for (int i = 0; i < regstosave_num; i++) {
260 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
261 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
263 switch (reg_type) {
264 case RegisterSaver::int_reg: {
265 if (reg_num != 30) { // We spilled R30 right at the beginning.
266 __ std(as_Register(reg_num), offset, R1_SP);
267 }
268 break;
269 }
270 case RegisterSaver::float_reg: {
271 __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
272 break;
273 }
274 case RegisterSaver::special_reg: {
275 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
276 __ mfctr(R30);
277 __ std(R30, offset, R1_SP);
278 } else {
279 Unimplemented();
280 }
281 break;
282 }
283 default:
284 ShouldNotReachHere();
285 }
287 if (generate_oop_map) {
288 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
289 RegisterSaver_LiveRegs[i].vmreg);
290 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
291 RegisterSaver_LiveRegs[i].vmreg->next());
292 }
293 offset += reg_size;
294 }
296 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
298 // And we're done.
299 return map;
300 }
303 // Pop the current frame and restore all the registers that we
304 // saved.
305 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
306 int frame_size_in_bytes,
307 bool restore_ctr) {
308 int i;
309 int offset;
310 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
311 sizeof(RegisterSaver::LiveRegType);
312 const int register_save_size = regstosave_num * reg_size;
313 const int register_save_offset = frame_size_in_bytes - register_save_size;
315 BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
317 // restore all registers (ints and floats)
318 offset = register_save_offset;
319 for (int i = 0; i < regstosave_num; i++) {
320 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
321 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
323 switch (reg_type) {
324 case RegisterSaver::int_reg: {
325 if (reg_num != 30) // R30 restored at the end, it's the tmp reg!
326 __ ld(as_Register(reg_num), offset, R1_SP);
327 break;
328 }
329 case RegisterSaver::float_reg: {
330 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
331 break;
332 }
333 case RegisterSaver::special_reg: {
334 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
335 if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
336 __ ld(R30, offset, R1_SP);
337 __ mtctr(R30);
338 }
339 } else {
340 Unimplemented();
341 }
342 break;
343 }
344 default:
345 ShouldNotReachHere();
346 }
347 offset += reg_size;
348 }
350 // pop the frame
351 __ pop_frame();
353 // restore the flags
354 __ restore_LR_CR(R30);
356 // restore scratch register's value
357 __ ld(R30, -reg_size, R1_SP);
359 BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
360 }
362 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
363 int frame_size,int total_args, const VMRegPair *regs,
364 const VMRegPair *regs2) {
365 __ push_frame(frame_size, r_temp);
366 int st_off = frame_size - wordSize;
367 for (int i = 0; i < total_args; i++) {
368 VMReg r_1 = regs[i].first();
369 VMReg r_2 = regs[i].second();
370 if (!r_1->is_valid()) {
371 assert(!r_2->is_valid(), "");
372 continue;
373 }
374 if (r_1->is_Register()) {
375 Register r = r_1->as_Register();
376 __ std(r, st_off, R1_SP);
377 st_off -= wordSize;
378 } else if (r_1->is_FloatRegister()) {
379 FloatRegister f = r_1->as_FloatRegister();
380 __ stfd(f, st_off, R1_SP);
381 st_off -= wordSize;
382 }
383 }
384 if (regs2 != NULL) {
385 for (int i = 0; i < total_args; i++) {
386 VMReg r_1 = regs2[i].first();
387 VMReg r_2 = regs2[i].second();
388 if (!r_1->is_valid()) {
389 assert(!r_2->is_valid(), "");
390 continue;
391 }
392 if (r_1->is_Register()) {
393 Register r = r_1->as_Register();
394 __ std(r, st_off, R1_SP);
395 st_off -= wordSize;
396 } else if (r_1->is_FloatRegister()) {
397 FloatRegister f = r_1->as_FloatRegister();
398 __ stfd(f, st_off, R1_SP);
399 st_off -= wordSize;
400 }
401 }
402 }
403 }
405 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size,
406 int total_args, const VMRegPair *regs,
407 const VMRegPair *regs2) {
408 int st_off = frame_size - wordSize;
409 for (int i = 0; i < total_args; i++) {
410 VMReg r_1 = regs[i].first();
411 VMReg r_2 = regs[i].second();
412 if (r_1->is_Register()) {
413 Register r = r_1->as_Register();
414 __ ld(r, st_off, R1_SP);
415 st_off -= wordSize;
416 } else if (r_1->is_FloatRegister()) {
417 FloatRegister f = r_1->as_FloatRegister();
418 __ lfd(f, st_off, R1_SP);
419 st_off -= wordSize;
420 }
421 }
422 if (regs2 != NULL)
423 for (int i = 0; i < total_args; i++) {
424 VMReg r_1 = regs2[i].first();
425 VMReg r_2 = regs2[i].second();
426 if (r_1->is_Register()) {
427 Register r = r_1->as_Register();
428 __ ld(r, st_off, R1_SP);
429 st_off -= wordSize;
430 } else if (r_1->is_FloatRegister()) {
431 FloatRegister f = r_1->as_FloatRegister();
432 __ lfd(f, st_off, R1_SP);
433 st_off -= wordSize;
434 }
435 }
436 __ pop_frame();
437 }
439 // Restore the registers that might be holding a result.
440 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) {
441 int i;
442 int offset;
443 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
444 sizeof(RegisterSaver::LiveRegType);
445 const int register_save_size = regstosave_num * reg_size;
446 const int register_save_offset = frame_size_in_bytes - register_save_size;
448 // restore all result registers (ints and floats)
449 offset = register_save_offset;
450 for (int i = 0; i < regstosave_num; i++) {
451 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
452 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
453 switch (reg_type) {
454 case RegisterSaver::int_reg: {
455 if (as_Register(reg_num)==R3_RET) // int result_reg
456 __ ld(as_Register(reg_num), offset, R1_SP);
457 break;
458 }
459 case RegisterSaver::float_reg: {
460 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg
461 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
462 break;
463 }
464 case RegisterSaver::special_reg: {
465 // Special registers don't hold a result.
466 break;
467 }
468 default:
469 ShouldNotReachHere();
470 }
471 offset += reg_size;
472 }
473 }
475 // Is vector's size (in bytes) bigger than a size saved by default?
476 bool SharedRuntime::is_wide_vector(int size) {
477 ResourceMark rm;
478 // Note, MaxVectorSize == 8 on PPC64.
479 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
480 return size > 8;
481 }
482 #ifdef COMPILER2
483 static int reg2slot(VMReg r) {
484 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
485 }
487 static int reg2offset(VMReg r) {
488 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
489 }
490 #endif
492 // ---------------------------------------------------------------------------
493 // Read the array of BasicTypes from a signature, and compute where the
494 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
495 // quantities. Values less than VMRegImpl::stack0 are registers, those above
496 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
497 // as framesizes are fixed.
498 // VMRegImpl::stack0 refers to the first slot 0(sp).
499 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
500 // up to RegisterImpl::number_of_registers) are the 64-bit
501 // integer registers.
503 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
504 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
505 // units regardless of build. Of course for i486 there is no 64 bit build
507 // The Java calling convention is a "shifted" version of the C ABI.
508 // By skipping the first C ABI register we can call non-static jni methods
509 // with small numbers of arguments without having to shuffle the arguments
510 // at all. Since we control the java ABI we ought to at least get some
511 // advantage out of it.
513 const VMReg java_iarg_reg[8] = {
514 R3->as_VMReg(),
515 R4->as_VMReg(),
516 R5->as_VMReg(),
517 R6->as_VMReg(),
518 R7->as_VMReg(),
519 R8->as_VMReg(),
520 R9->as_VMReg(),
521 R10->as_VMReg()
522 };
524 const VMReg java_farg_reg[13] = {
525 F1->as_VMReg(),
526 F2->as_VMReg(),
527 F3->as_VMReg(),
528 F4->as_VMReg(),
529 F5->as_VMReg(),
530 F6->as_VMReg(),
531 F7->as_VMReg(),
532 F8->as_VMReg(),
533 F9->as_VMReg(),
534 F10->as_VMReg(),
535 F11->as_VMReg(),
536 F12->as_VMReg(),
537 F13->as_VMReg()
538 };
540 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]);
541 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]);
543 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
544 VMRegPair *regs,
545 int total_args_passed,
546 int is_outgoing) {
547 // C2c calling conventions for compiled-compiled calls.
548 // Put 8 ints/longs into registers _AND_ 13 float/doubles into
549 // registers _AND_ put the rest on the stack.
551 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats
552 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
554 int i;
555 VMReg reg;
556 int stk = 0;
557 int ireg = 0;
558 int freg = 0;
560 // We put the first 8 arguments into registers and the rest on the
561 // stack, float arguments are already in their argument registers
562 // due to c2c calling conventions (see calling_convention).
563 for (int i = 0; i < total_args_passed; ++i) {
564 switch(sig_bt[i]) {
565 case T_BOOLEAN:
566 case T_CHAR:
567 case T_BYTE:
568 case T_SHORT:
569 case T_INT:
570 if (ireg < num_java_iarg_registers) {
571 // Put int/ptr in register
572 reg = java_iarg_reg[ireg];
573 ++ireg;
574 } else {
575 // Put int/ptr on stack.
576 reg = VMRegImpl::stack2reg(stk);
577 stk += inc_stk_for_intfloat;
578 }
579 regs[i].set1(reg);
580 break;
581 case T_LONG:
582 assert(sig_bt[i+1] == T_VOID, "expecting half");
583 if (ireg < num_java_iarg_registers) {
584 // Put long in register.
585 reg = java_iarg_reg[ireg];
586 ++ireg;
587 } else {
588 // Put long on stack. They must be aligned to 2 slots.
589 if (stk & 0x1) ++stk;
590 reg = VMRegImpl::stack2reg(stk);
591 stk += inc_stk_for_longdouble;
592 }
593 regs[i].set2(reg);
594 break;
595 case T_OBJECT:
596 case T_ARRAY:
597 case T_ADDRESS:
598 if (ireg < num_java_iarg_registers) {
599 // Put ptr in register.
600 reg = java_iarg_reg[ireg];
601 ++ireg;
602 } else {
603 // Put ptr on stack. Objects must be aligned to 2 slots too,
604 // because "64-bit pointers record oop-ishness on 2 aligned
605 // adjacent registers." (see OopFlow::build_oop_map).
606 if (stk & 0x1) ++stk;
607 reg = VMRegImpl::stack2reg(stk);
608 stk += inc_stk_for_longdouble;
609 }
610 regs[i].set2(reg);
611 break;
612 case T_FLOAT:
613 if (freg < num_java_farg_registers) {
614 // Put float in register.
615 reg = java_farg_reg[freg];
616 ++freg;
617 } else {
618 // Put float on stack.
619 reg = VMRegImpl::stack2reg(stk);
620 stk += inc_stk_for_intfloat;
621 }
622 regs[i].set1(reg);
623 break;
624 case T_DOUBLE:
625 assert(sig_bt[i+1] == T_VOID, "expecting half");
626 if (freg < num_java_farg_registers) {
627 // Put double in register.
628 reg = java_farg_reg[freg];
629 ++freg;
630 } else {
631 // Put double on stack. They must be aligned to 2 slots.
632 if (stk & 0x1) ++stk;
633 reg = VMRegImpl::stack2reg(stk);
634 stk += inc_stk_for_longdouble;
635 }
636 regs[i].set2(reg);
637 break;
638 case T_VOID:
639 // Do not count halves.
640 regs[i].set_bad();
641 break;
642 default:
643 ShouldNotReachHere();
644 }
645 }
646 return round_to(stk, 2);
647 }
649 #ifdef COMPILER2
650 // Calling convention for calling C code.
651 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
652 VMRegPair *regs,
653 VMRegPair *regs2,
654 int total_args_passed) {
655 // Calling conventions for C runtime calls and calls to JNI native methods.
656 //
657 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8
658 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist
659 // the first 13 flt/dbl's in the first 13 fp regs but additionally
660 // copy flt/dbl to the stack if they are beyond the 8th argument.
662 const VMReg iarg_reg[8] = {
663 R3->as_VMReg(),
664 R4->as_VMReg(),
665 R5->as_VMReg(),
666 R6->as_VMReg(),
667 R7->as_VMReg(),
668 R8->as_VMReg(),
669 R9->as_VMReg(),
670 R10->as_VMReg()
671 };
673 const VMReg farg_reg[13] = {
674 F1->as_VMReg(),
675 F2->as_VMReg(),
676 F3->as_VMReg(),
677 F4->as_VMReg(),
678 F5->as_VMReg(),
679 F6->as_VMReg(),
680 F7->as_VMReg(),
681 F8->as_VMReg(),
682 F9->as_VMReg(),
683 F10->as_VMReg(),
684 F11->as_VMReg(),
685 F12->as_VMReg(),
686 F13->as_VMReg()
687 };
689 // Check calling conventions consistency.
690 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c &&
691 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c,
692 "consistency");
694 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy
695 // 2 such slots, like 64 bit values do.
696 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats
697 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
699 int i;
700 VMReg reg;
701 // Leave room for C-compatible ABI_REG_ARGS.
702 int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
703 int arg = 0;
704 int freg = 0;
706 // Avoid passing C arguments in the wrong stack slots.
707 #if defined(ABI_ELFv2)
708 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96,
709 "passing C arguments in wrong stack slots");
710 #else
711 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
712 "passing C arguments in wrong stack slots");
713 #endif
714 // We fill-out regs AND regs2 if an argument must be passed in a
715 // register AND in a stack slot. If regs2 is NULL in such a
716 // situation, we bail-out with a fatal error.
717 for (int i = 0; i < total_args_passed; ++i, ++arg) {
718 // Initialize regs2 to BAD.
719 if (regs2 != NULL) regs2[i].set_bad();
721 switch(sig_bt[i]) {
723 //
724 // If arguments 0-7 are integers, they are passed in integer registers.
725 // Argument i is placed in iarg_reg[i].
726 //
727 case T_BOOLEAN:
728 case T_CHAR:
729 case T_BYTE:
730 case T_SHORT:
731 case T_INT:
732 // We must cast ints to longs and use full 64 bit stack slots
733 // here. We do the cast in GraphKit::gen_stub() and just guard
734 // here against loosing that change.
735 assert(CCallingConventionRequiresIntsAsLongs,
736 "argument of type int should be promoted to type long");
737 guarantee(i > 0 && sig_bt[i-1] == T_LONG,
738 "argument of type (bt) should have been promoted to type (T_LONG,bt) for bt in "
739 "{T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
740 // Do not count halves.
741 regs[i].set_bad();
742 --arg;
743 break;
744 case T_LONG:
745 guarantee(sig_bt[i+1] == T_VOID ||
746 sig_bt[i+1] == T_BOOLEAN || sig_bt[i+1] == T_CHAR ||
747 sig_bt[i+1] == T_BYTE || sig_bt[i+1] == T_SHORT ||
748 sig_bt[i+1] == T_INT,
749 "expecting type (T_LONG,half) or type (T_LONG,bt) with bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
750 case T_OBJECT:
751 case T_ARRAY:
752 case T_ADDRESS:
753 case T_METADATA:
754 // Oops are already boxed if required (JNI).
755 if (arg < Argument::n_int_register_parameters_c) {
756 reg = iarg_reg[arg];
757 } else {
758 reg = VMRegImpl::stack2reg(stk);
759 stk += inc_stk_for_longdouble;
760 }
761 regs[i].set2(reg);
762 break;
764 //
765 // Floats are treated differently from int regs: The first 13 float arguments
766 // are passed in registers (not the float args among the first 13 args).
767 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed
768 // in farg_reg[j] if argument i is the j-th float argument of this call.
769 //
770 case T_FLOAT:
771 #if defined(LINUX)
772 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
773 // in the least significant word of an argument slot.
774 #if defined(VM_LITTLE_ENDIAN)
775 #define FLOAT_WORD_OFFSET_IN_SLOT 0
776 #else
777 #define FLOAT_WORD_OFFSET_IN_SLOT 1
778 #endif
779 #elif defined(AIX)
780 // Although AIX runs on big endian CPU, float is in the most
781 // significant word of an argument slot.
782 #define FLOAT_WORD_OFFSET_IN_SLOT 0
783 #else
784 #error "unknown OS"
785 #endif
786 if (freg < Argument::n_float_register_parameters_c) {
787 // Put float in register ...
788 reg = farg_reg[freg];
789 ++freg;
791 // Argument i for i > 8 is placed on the stack even if it's
792 // placed in a register (if it's a float arg). Aix disassembly
793 // shows that xlC places these float args on the stack AND in
794 // a register. This is not documented, but we follow this
795 // convention, too.
796 if (arg >= Argument::n_regs_not_on_stack_c) {
797 // ... and on the stack.
798 guarantee(regs2 != NULL, "must pass float in register and stack slot");
799 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
800 regs2[i].set1(reg2);
801 stk += inc_stk_for_intfloat;
802 }
804 } else {
805 // Put float on stack.
806 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
807 stk += inc_stk_for_intfloat;
808 }
809 regs[i].set1(reg);
810 break;
811 case T_DOUBLE:
812 assert(sig_bt[i+1] == T_VOID, "expecting half");
813 if (freg < Argument::n_float_register_parameters_c) {
814 // Put double in register ...
815 reg = farg_reg[freg];
816 ++freg;
818 // Argument i for i > 8 is placed on the stack even if it's
819 // placed in a register (if it's a double arg). Aix disassembly
820 // shows that xlC places these float args on the stack AND in
821 // a register. This is not documented, but we follow this
822 // convention, too.
823 if (arg >= Argument::n_regs_not_on_stack_c) {
824 // ... and on the stack.
825 guarantee(regs2 != NULL, "must pass float in register and stack slot");
826 VMReg reg2 = VMRegImpl::stack2reg(stk);
827 regs2[i].set2(reg2);
828 stk += inc_stk_for_longdouble;
829 }
830 } else {
831 // Put double on stack.
832 reg = VMRegImpl::stack2reg(stk);
833 stk += inc_stk_for_longdouble;
834 }
835 regs[i].set2(reg);
836 break;
838 case T_VOID:
839 // Do not count halves.
840 regs[i].set_bad();
841 --arg;
842 break;
843 default:
844 ShouldNotReachHere();
845 }
846 }
848 return round_to(stk, 2);
849 }
850 #endif // COMPILER2
852 static address gen_c2i_adapter(MacroAssembler *masm,
853 int total_args_passed,
854 int comp_args_on_stack,
855 const BasicType *sig_bt,
856 const VMRegPair *regs,
857 Label& call_interpreter,
858 const Register& ientry) {
860 address c2i_entrypoint;
862 const Register sender_SP = R21_sender_SP; // == R21_tmp1
863 const Register code = R22_tmp2;
864 //const Register ientry = R23_tmp3;
865 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 };
866 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
867 int value_regs_index = 0;
869 const Register return_pc = R27_tmp7;
870 const Register tmp = R28_tmp8;
872 assert_different_registers(sender_SP, code, ientry, return_pc, tmp);
874 // Adapter needs TOP_IJAVA_FRAME_ABI.
875 const int adapter_size = frame::top_ijava_frame_abi_size +
876 round_to(total_args_passed * wordSize, frame::alignment_in_bytes);
878 // regular (verified) c2i entry point
879 c2i_entrypoint = __ pc();
881 // Does compiled code exists? If yes, patch the caller's callsite.
882 __ ld(code, method_(code));
883 __ cmpdi(CCR0, code, 0);
884 __ ld(ientry, method_(interpreter_entry)); // preloaded
885 __ beq(CCR0, call_interpreter);
888 // Patch caller's callsite, method_(code) was not NULL which means that
889 // compiled code exists.
890 __ mflr(return_pc);
891 __ std(return_pc, _abi(lr), R1_SP);
892 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs);
894 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc);
896 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs);
897 __ ld(return_pc, _abi(lr), R1_SP);
898 __ ld(ientry, method_(interpreter_entry)); // preloaded
899 __ mtlr(return_pc);
902 // Call the interpreter.
903 __ BIND(call_interpreter);
904 __ mtctr(ientry);
906 // Get a copy of the current SP for loading caller's arguments.
907 __ mr(sender_SP, R1_SP);
909 // Add space for the adapter.
910 __ resize_frame(-adapter_size, R12_scratch2);
912 int st_off = adapter_size - wordSize;
914 // Write the args into the outgoing interpreter space.
915 for (int i = 0; i < total_args_passed; i++) {
916 VMReg r_1 = regs[i].first();
917 VMReg r_2 = regs[i].second();
918 if (!r_1->is_valid()) {
919 assert(!r_2->is_valid(), "");
920 continue;
921 }
922 if (r_1->is_stack()) {
923 Register tmp_reg = value_regs[value_regs_index];
924 value_regs_index = (value_regs_index + 1) % num_value_regs;
925 // The calling convention produces OptoRegs that ignore the out
926 // preserve area (JIT's ABI). We must account for it here.
927 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
928 if (!r_2->is_valid()) {
929 __ lwz(tmp_reg, ld_off, sender_SP);
930 } else {
931 __ ld(tmp_reg, ld_off, sender_SP);
932 }
933 // Pretend stack targets were loaded into tmp_reg.
934 r_1 = tmp_reg->as_VMReg();
935 }
937 if (r_1->is_Register()) {
938 Register r = r_1->as_Register();
939 if (!r_2->is_valid()) {
940 __ stw(r, st_off, R1_SP);
941 st_off-=wordSize;
942 } else {
943 // Longs are given 2 64-bit slots in the interpreter, but the
944 // data is passed in only 1 slot.
945 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
946 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
947 st_off-=wordSize;
948 }
949 __ std(r, st_off, R1_SP);
950 st_off-=wordSize;
951 }
952 } else {
953 assert(r_1->is_FloatRegister(), "");
954 FloatRegister f = r_1->as_FloatRegister();
955 if (!r_2->is_valid()) {
956 __ stfs(f, st_off, R1_SP);
957 st_off-=wordSize;
958 } else {
959 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
960 // data is passed in only 1 slot.
961 // One of these should get known junk...
962 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
963 st_off-=wordSize;
964 __ stfd(f, st_off, R1_SP);
965 st_off-=wordSize;
966 }
967 }
968 }
970 // Jump to the interpreter just as if interpreter was doing it.
972 #ifdef CC_INTERP
973 const Register tos = R17_tos;
974 #else
975 const Register tos = R15_esp;
976 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
977 #endif
979 // load TOS
980 __ addi(tos, R1_SP, st_off);
982 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1.
983 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register");
984 __ bctr();
986 return c2i_entrypoint;
987 }
989 static void gen_i2c_adapter(MacroAssembler *masm,
990 int total_args_passed,
991 int comp_args_on_stack,
992 const BasicType *sig_bt,
993 const VMRegPair *regs) {
995 // Load method's entry-point from method.
996 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
997 __ mtctr(R12_scratch2);
999 // We will only enter here from an interpreted frame and never from after
1000 // passing thru a c2i. Azul allowed this but we do not. If we lose the
1001 // race and use a c2i we will remain interpreted for the race loser(s).
1002 // This removes all sorts of headaches on the x86 side and also eliminates
1003 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
1005 // Note: r13 contains the senderSP on entry. We must preserve it since
1006 // we may do a i2c -> c2i transition if we lose a race where compiled
1007 // code goes non-entrant while we get args ready.
1008 // In addition we use r13 to locate all the interpreter args as
1009 // we must align the stack to 16 bytes on an i2c entry else we
1010 // lose alignment we expect in all compiled code and register
1011 // save code can segv when fxsave instructions find improperly
1012 // aligned stack pointer.
1014 #ifdef CC_INTERP
1015 const Register ld_ptr = R17_tos;
1016 #else
1017 const Register ld_ptr = R15_esp;
1018 #endif
1020 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
1021 const int num_value_regs = sizeof(value_regs) / sizeof(Register);
1022 int value_regs_index = 0;
1024 int ld_offset = total_args_passed*wordSize;
1026 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
1027 // in registers, we will occasionally have no stack args.
1028 int comp_words_on_stack = 0;
1029 if (comp_args_on_stack) {
1030 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
1031 // registers are below. By subtracting stack0, we either get a negative
1032 // number (all values in registers) or the maximum stack slot accessed.
1034 // Convert 4-byte c2 stack slots to words.
1035 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1036 // Round up to miminum stack alignment, in wordSize.
1037 comp_words_on_stack = round_to(comp_words_on_stack, 2);
1038 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
1039 }
1041 // Now generate the shuffle code. Pick up all register args and move the
1042 // rest through register value=Z_R12.
1043 BLOCK_COMMENT("Shuffle arguments");
1044 for (int i = 0; i < total_args_passed; i++) {
1045 if (sig_bt[i] == T_VOID) {
1046 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1047 continue;
1048 }
1050 // Pick up 0, 1 or 2 words from ld_ptr.
1051 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1052 "scrambled load targets?");
1053 VMReg r_1 = regs[i].first();
1054 VMReg r_2 = regs[i].second();
1055 if (!r_1->is_valid()) {
1056 assert(!r_2->is_valid(), "");
1057 continue;
1058 }
1059 if (r_1->is_FloatRegister()) {
1060 if (!r_2->is_valid()) {
1061 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr);
1062 ld_offset-=wordSize;
1063 } else {
1064 // Skip the unused interpreter slot.
1065 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr);
1066 ld_offset-=2*wordSize;
1067 }
1068 } else {
1069 Register r;
1070 if (r_1->is_stack()) {
1071 // Must do a memory to memory move thru "value".
1072 r = value_regs[value_regs_index];
1073 value_regs_index = (value_regs_index + 1) % num_value_regs;
1074 } else {
1075 r = r_1->as_Register();
1076 }
1077 if (!r_2->is_valid()) {
1078 // Not sure we need to do this but it shouldn't hurt.
1079 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
1080 __ ld(r, ld_offset, ld_ptr);
1081 ld_offset-=wordSize;
1082 } else {
1083 __ lwz(r, ld_offset, ld_ptr);
1084 ld_offset-=wordSize;
1085 }
1086 } else {
1087 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
1088 // data is passed in only 1 slot.
1089 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
1090 ld_offset-=wordSize;
1091 }
1092 __ ld(r, ld_offset, ld_ptr);
1093 ld_offset-=wordSize;
1094 }
1096 if (r_1->is_stack()) {
1097 // Now store value where the compiler expects it
1098 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size;
1100 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN ||
1101 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) {
1102 __ stw(r, st_off, R1_SP);
1103 } else {
1104 __ std(r, st_off, R1_SP);
1105 }
1106 }
1107 }
1108 }
1110 BLOCK_COMMENT("Store method");
1111 // Store method into thread->callee_target.
1112 // We might end up in handle_wrong_method if the callee is
1113 // deoptimized as we race thru here. If that happens we don't want
1114 // to take a safepoint because the caller frame will look
1115 // interpreted and arguments are now "compiled" so it is much better
1116 // to make this transition invisible to the stack walking
1117 // code. Unfortunately if we try and find the callee by normal means
1118 // a safepoint is possible. So we stash the desired callee in the
1119 // thread and the vm will find there should this case occur.
1120 __ std(R19_method, thread_(callee_target));
1122 // Jump to the compiled code just as if compiled code was doing it.
1123 __ bctr();
1124 }
1126 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1127 int total_args_passed,
1128 int comp_args_on_stack,
1129 const BasicType *sig_bt,
1130 const VMRegPair *regs,
1131 AdapterFingerPrint* fingerprint) {
1132 address i2c_entry;
1133 address c2i_unverified_entry;
1134 address c2i_entry;
1137 // entry: i2c
1139 __ align(CodeEntryAlignment);
1140 i2c_entry = __ pc();
1141 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
1144 // entry: c2i unverified
1146 __ align(CodeEntryAlignment);
1147 BLOCK_COMMENT("c2i unverified entry");
1148 c2i_unverified_entry = __ pc();
1150 // inline_cache contains a compiledICHolder
1151 const Register ic = R19_method;
1152 const Register ic_klass = R11_scratch1;
1153 const Register receiver_klass = R12_scratch2;
1154 const Register code = R21_tmp1;
1155 const Register ientry = R23_tmp3;
1157 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry);
1158 assert(R11_scratch1 == R11, "need prologue scratch register");
1160 Label call_interpreter;
1162 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
1163 "klass offset should reach into any page");
1164 // Check for NULL argument if we don't have implicit null checks.
1165 if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
1166 if (TrapBasedNullChecks) {
1167 __ trap_null_check(R3_ARG1);
1168 } else {
1169 Label valid;
1170 __ cmpdi(CCR0, R3_ARG1, 0);
1171 __ bne_predict_taken(CCR0, valid);
1172 // We have a null argument, branch to ic_miss_stub.
1173 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
1174 relocInfo::runtime_call_type);
1175 __ BIND(valid);
1176 }
1177 }
1178 // Assume argument is not NULL, load klass from receiver.
1179 __ load_klass(receiver_klass, R3_ARG1);
1181 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
1183 if (TrapBasedICMissChecks) {
1184 __ trap_ic_miss_check(receiver_klass, ic_klass);
1185 } else {
1186 Label valid;
1187 __ cmpd(CCR0, receiver_klass, ic_klass);
1188 __ beq_predict_taken(CCR0, valid);
1189 // We have an unexpected klass, branch to ic_miss_stub.
1190 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
1191 relocInfo::runtime_call_type);
1192 __ BIND(valid);
1193 }
1195 // Argument is valid and klass is as expected, continue.
1197 // Extract method from inline cache, verified entry point needs it.
1198 __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
1199 assert(R19_method == ic, "the inline cache register is dead here");
1201 __ ld(code, method_(code));
1202 __ cmpdi(CCR0, code, 0);
1203 __ ld(ientry, method_(interpreter_entry)); // preloaded
1204 __ beq_predict_taken(CCR0, call_interpreter);
1206 // Branch to ic_miss_stub.
1207 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1209 // entry: c2i
1211 c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
1213 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1214 }
1216 #ifdef COMPILER2
1217 // An oop arg. Must pass a handle not the oop itself.
1218 static void object_move(MacroAssembler* masm,
1219 int frame_size_in_slots,
1220 OopMap* oop_map, int oop_handle_offset,
1221 bool is_receiver, int* receiver_offset,
1222 VMRegPair src, VMRegPair dst,
1223 Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1224 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1225 "receiver has already been moved");
1227 // We must pass a handle. First figure out the location we use as a handle.
1229 if (src.first()->is_stack()) {
1230 // stack to stack or reg
1232 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1233 Label skip;
1234 const int oop_slot_in_callers_frame = reg2slot(src.first());
1236 guarantee(!is_receiver, "expecting receiver in register");
1237 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots));
1239 __ addi(r_handle, r_caller_sp, reg2offset(src.first()));
1240 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
1241 __ cmpdi(CCR0, r_temp_2, 0);
1242 __ bne(CCR0, skip);
1243 // Use a NULL handle if oop is NULL.
1244 __ li(r_handle, 0);
1245 __ bind(skip);
1247 if (dst.first()->is_stack()) {
1248 // stack to stack
1249 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1250 } else {
1251 // stack to reg
1252 // Nothing to do, r_handle is already the dst register.
1253 }
1254 } else {
1255 // reg to stack or reg
1256 const Register r_oop = src.first()->as_Register();
1257 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1258 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word
1259 + oop_handle_offset; // in slots
1260 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size;
1261 Label skip;
1263 if (is_receiver) {
1264 *receiver_offset = oop_offset;
1265 }
1266 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot));
1268 __ std( r_oop, oop_offset, R1_SP);
1269 __ addi(r_handle, R1_SP, oop_offset);
1271 __ cmpdi(CCR0, r_oop, 0);
1272 __ bne(CCR0, skip);
1273 // Use a NULL handle if oop is NULL.
1274 __ li(r_handle, 0);
1275 __ bind(skip);
1277 if (dst.first()->is_stack()) {
1278 // reg to stack
1279 __ std(r_handle, reg2offset(dst.first()), R1_SP);
1280 } else {
1281 // reg to reg
1282 // Nothing to do, r_handle is already the dst register.
1283 }
1284 }
1285 }
1287 static void int_move(MacroAssembler*masm,
1288 VMRegPair src, VMRegPair dst,
1289 Register r_caller_sp, Register r_temp) {
1290 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long-int");
1291 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1293 if (src.first()->is_stack()) {
1294 if (dst.first()->is_stack()) {
1295 // stack to stack
1296 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp);
1297 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1298 } else {
1299 // stack to reg
1300 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1301 }
1302 } else if (dst.first()->is_stack()) {
1303 // reg to stack
1304 __ extsw(r_temp, src.first()->as_Register());
1305 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1306 } else {
1307 // reg to reg
1308 __ extsw(dst.first()->as_Register(), src.first()->as_Register());
1309 }
1310 }
1312 static void long_move(MacroAssembler*masm,
1313 VMRegPair src, VMRegPair dst,
1314 Register r_caller_sp, Register r_temp) {
1315 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long");
1316 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
1318 if (src.first()->is_stack()) {
1319 if (dst.first()->is_stack()) {
1320 // stack to stack
1321 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1322 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1323 } else {
1324 // stack to reg
1325 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1326 }
1327 } else if (dst.first()->is_stack()) {
1328 // reg to stack
1329 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
1330 } else {
1331 // reg to reg
1332 if (dst.first()->as_Register() != src.first()->as_Register())
1333 __ mr(dst.first()->as_Register(), src.first()->as_Register());
1334 }
1335 }
1337 static void float_move(MacroAssembler*masm,
1338 VMRegPair src, VMRegPair dst,
1339 Register r_caller_sp, Register r_temp) {
1340 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float");
1341 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float");
1343 if (src.first()->is_stack()) {
1344 if (dst.first()->is_stack()) {
1345 // stack to stack
1346 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp);
1347 __ stw(r_temp, reg2offset(dst.first()), R1_SP);
1348 } else {
1349 // stack to reg
1350 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1351 }
1352 } else if (dst.first()->is_stack()) {
1353 // reg to stack
1354 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1355 } else {
1356 // reg to reg
1357 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1358 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1359 }
1360 }
1362 static void double_move(MacroAssembler*masm,
1363 VMRegPair src, VMRegPair dst,
1364 Register r_caller_sp, Register r_temp) {
1365 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double");
1366 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double");
1368 if (src.first()->is_stack()) {
1369 if (dst.first()->is_stack()) {
1370 // stack to stack
1371 __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
1372 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1373 } else {
1374 // stack to reg
1375 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
1376 }
1377 } else if (dst.first()->is_stack()) {
1378 // reg to stack
1379 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
1380 } else {
1381 // reg to reg
1382 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
1383 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1384 }
1385 }
1387 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1388 switch (ret_type) {
1389 case T_BOOLEAN:
1390 case T_CHAR:
1391 case T_BYTE:
1392 case T_SHORT:
1393 case T_INT:
1394 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1395 break;
1396 case T_ARRAY:
1397 case T_OBJECT:
1398 case T_LONG:
1399 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1400 break;
1401 case T_FLOAT:
1402 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1403 break;
1404 case T_DOUBLE:
1405 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1406 break;
1407 case T_VOID:
1408 break;
1409 default:
1410 ShouldNotReachHere();
1411 break;
1412 }
1413 }
1415 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1416 switch (ret_type) {
1417 case T_BOOLEAN:
1418 case T_CHAR:
1419 case T_BYTE:
1420 case T_SHORT:
1421 case T_INT:
1422 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1423 break;
1424 case T_ARRAY:
1425 case T_OBJECT:
1426 case T_LONG:
1427 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1428 break;
1429 case T_FLOAT:
1430 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1431 break;
1432 case T_DOUBLE:
1433 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
1434 break;
1435 case T_VOID:
1436 break;
1437 default:
1438 ShouldNotReachHere();
1439 break;
1440 }
1441 }
1443 static void save_or_restore_arguments(MacroAssembler* masm,
1444 const int stack_slots,
1445 const int total_in_args,
1446 const int arg_save_area,
1447 OopMap* map,
1448 VMRegPair* in_regs,
1449 BasicType* in_sig_bt) {
1450 // If map is non-NULL then the code should store the values,
1451 // otherwise it should load them.
1452 int slot = arg_save_area;
1453 // Save down double word first.
1454 for (int i = 0; i < total_in_args; i++) {
1455 if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) {
1456 int offset = slot * VMRegImpl::stack_slot_size;
1457 slot += VMRegImpl::slots_per_word;
1458 assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)");
1459 if (map != NULL) {
1460 __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
1461 } else {
1462 __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
1463 }
1464 } else if (in_regs[i].first()->is_Register() &&
1465 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1466 int offset = slot * VMRegImpl::stack_slot_size;
1467 if (map != NULL) {
1468 __ std(in_regs[i].first()->as_Register(), offset, R1_SP);
1469 if (in_sig_bt[i] == T_ARRAY) {
1470 map->set_oop(VMRegImpl::stack2reg(slot));
1471 }
1472 } else {
1473 __ ld(in_regs[i].first()->as_Register(), offset, R1_SP);
1474 }
1475 slot += VMRegImpl::slots_per_word;
1476 assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)");
1477 }
1478 }
1479 // Save or restore single word registers.
1480 for (int i = 0; i < total_in_args; i++) {
1481 // PPC64: pass ints as longs: must only deal with floats here.
1482 if (in_regs[i].first()->is_FloatRegister()) {
1483 if (in_sig_bt[i] == T_FLOAT) {
1484 int offset = slot * VMRegImpl::stack_slot_size;
1485 slot++;
1486 assert(slot <= stack_slots, "overflow (after FLOAT stack slot)");
1487 if (map != NULL) {
1488 __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
1489 } else {
1490 __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
1491 }
1492 }
1493 } else if (in_regs[i].first()->is_stack()) {
1494 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1495 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1496 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1497 }
1498 }
1499 }
1500 }
1502 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1503 // keeps a new JNI critical region from starting until a GC has been
1504 // forced. Save down any oops in registers and describe them in an
1505 // OopMap.
1506 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1507 const int stack_slots,
1508 const int total_in_args,
1509 const int arg_save_area,
1510 OopMapSet* oop_maps,
1511 VMRegPair* in_regs,
1512 BasicType* in_sig_bt,
1513 Register tmp_reg ) {
1514 __ block_comment("check GC_locker::needs_gc");
1515 Label cont;
1516 __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address());
1517 __ cmplwi(CCR0, tmp_reg, 0);
1518 __ beq(CCR0, cont);
1520 // Save down any values that are live in registers and call into the
1521 // runtime to halt for a GC.
1522 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1523 save_or_restore_arguments(masm, stack_slots, total_in_args,
1524 arg_save_area, map, in_regs, in_sig_bt);
1526 __ mr(R3_ARG1, R16_thread);
1527 __ set_last_Java_frame(R1_SP, noreg);
1529 __ block_comment("block_for_jni_critical");
1530 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
1531 #if defined(ABI_ELFv2)
1532 __ call_c(entry_point, relocInfo::runtime_call_type);
1533 #else
1534 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
1535 #endif
1536 address start = __ pc() - __ offset(),
1537 calls_return_pc = __ last_calls_return_pc();
1538 oop_maps->add_gc_map(calls_return_pc - start, map);
1540 __ reset_last_Java_frame();
1542 // Reload all the register arguments.
1543 save_or_restore_arguments(masm, stack_slots, total_in_args,
1544 arg_save_area, NULL, in_regs, in_sig_bt);
1546 __ BIND(cont);
1548 #ifdef ASSERT
1549 if (StressCriticalJNINatives) {
1550 // Stress register saving.
1551 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1552 save_or_restore_arguments(masm, stack_slots, total_in_args,
1553 arg_save_area, map, in_regs, in_sig_bt);
1554 // Destroy argument registers.
1555 for (int i = 0; i < total_in_args; i++) {
1556 if (in_regs[i].first()->is_Register()) {
1557 const Register reg = in_regs[i].first()->as_Register();
1558 __ neg(reg, reg);
1559 } else if (in_regs[i].first()->is_FloatRegister()) {
1560 __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1561 }
1562 }
1564 save_or_restore_arguments(masm, stack_slots, total_in_args,
1565 arg_save_area, NULL, in_regs, in_sig_bt);
1566 }
1567 #endif
1568 }
1570 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) {
1571 if (src.first()->is_stack()) {
1572 if (dst.first()->is_stack()) {
1573 // stack to stack
1574 __ ld(r_temp, reg2offset(src.first()), r_caller_sp);
1575 __ std(r_temp, reg2offset(dst.first()), R1_SP);
1576 } else {
1577 // stack to reg
1578 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
1579 }
1580 } else if (dst.first()->is_stack()) {
1581 // reg to stack
1582 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
1583 } else {
1584 if (dst.first() != src.first()) {
1585 __ mr(dst.first()->as_Register(), src.first()->as_Register());
1586 }
1587 }
1588 }
1590 // Unpack an array argument into a pointer to the body and the length
1591 // if the array is non-null, otherwise pass 0 for both.
1592 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type,
1593 VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp,
1594 Register tmp_reg, Register tmp2_reg) {
1595 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1596 "possible collision");
1597 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1598 "possible collision");
1600 // Pass the length, ptr pair.
1601 Label set_out_args;
1602 VMRegPair tmp, tmp2;
1603 tmp.set_ptr(tmp_reg->as_VMReg());
1604 tmp2.set_ptr(tmp2_reg->as_VMReg());
1605 if (reg.first()->is_stack()) {
1606 // Load the arg up from the stack.
1607 move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0);
1608 reg = tmp;
1609 }
1610 __ li(tmp2_reg, 0); // Pass zeros if Array=null.
1611 if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0);
1612 __ cmpdi(CCR0, reg.first()->as_Register(), 0);
1613 __ beq(CCR0, set_out_args);
1614 __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register());
1615 __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type));
1616 __ bind(set_out_args);
1617 move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0);
1618 move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64.
1619 }
1621 static void verify_oop_args(MacroAssembler* masm,
1622 methodHandle method,
1623 const BasicType* sig_bt,
1624 const VMRegPair* regs) {
1625 Register temp_reg = R19_method; // not part of any compiled calling seq
1626 if (VerifyOops) {
1627 for (int i = 0; i < method->size_of_parameters(); i++) {
1628 if (sig_bt[i] == T_OBJECT ||
1629 sig_bt[i] == T_ARRAY) {
1630 VMReg r = regs[i].first();
1631 assert(r->is_valid(), "bad oop arg");
1632 if (r->is_stack()) {
1633 __ ld(temp_reg, reg2offset(r), R1_SP);
1634 __ verify_oop(temp_reg);
1635 } else {
1636 __ verify_oop(r->as_Register());
1637 }
1638 }
1639 }
1640 }
1641 }
1643 static void gen_special_dispatch(MacroAssembler* masm,
1644 methodHandle method,
1645 const BasicType* sig_bt,
1646 const VMRegPair* regs) {
1647 verify_oop_args(masm, method, sig_bt, regs);
1648 vmIntrinsics::ID iid = method->intrinsic_id();
1650 // Now write the args into the outgoing interpreter space
1651 bool has_receiver = false;
1652 Register receiver_reg = noreg;
1653 int member_arg_pos = -1;
1654 Register member_reg = noreg;
1655 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1656 if (ref_kind != 0) {
1657 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1658 member_reg = R19_method; // known to be free at this point
1659 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1660 } else if (iid == vmIntrinsics::_invokeBasic) {
1661 has_receiver = true;
1662 } else {
1663 fatal(err_msg_res("unexpected intrinsic id %d", iid));
1664 }
1666 if (member_reg != noreg) {
1667 // Load the member_arg into register, if necessary.
1668 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1669 VMReg r = regs[member_arg_pos].first();
1670 if (r->is_stack()) {
1671 __ ld(member_reg, reg2offset(r), R1_SP);
1672 } else {
1673 // no data motion is needed
1674 member_reg = r->as_Register();
1675 }
1676 }
1678 if (has_receiver) {
1679 // Make sure the receiver is loaded into a register.
1680 assert(method->size_of_parameters() > 0, "oob");
1681 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1682 VMReg r = regs[0].first();
1683 assert(r->is_valid(), "bad receiver arg");
1684 if (r->is_stack()) {
1685 // Porting note: This assumes that compiled calling conventions always
1686 // pass the receiver oop in a register. If this is not true on some
1687 // platform, pick a temp and load the receiver from stack.
1688 fatal("receiver always in a register");
1689 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point?
1690 __ ld(receiver_reg, reg2offset(r), R1_SP);
1691 } else {
1692 // no data motion is needed
1693 receiver_reg = r->as_Register();
1694 }
1695 }
1697 // Figure out which address we are really jumping to:
1698 MethodHandles::generate_method_handle_dispatch(masm, iid,
1699 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1700 }
1702 #endif // COMPILER2
1704 // ---------------------------------------------------------------------------
1705 // Generate a native wrapper for a given method. The method takes arguments
1706 // in the Java compiled code convention, marshals them to the native
1707 // convention (handlizes oops, etc), transitions to native, makes the call,
1708 // returns to java state (possibly blocking), unhandlizes any result and
1709 // returns.
1710 //
1711 // Critical native functions are a shorthand for the use of
1712 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1713 // functions. The wrapper is expected to unpack the arguments before
1714 // passing them to the callee and perform checks before and after the
1715 // native call to ensure that they GC_locker
1716 // lock_critical/unlock_critical semantics are followed. Some other
1717 // parts of JNI setup are skipped like the tear down of the JNI handle
1718 // block and the check for pending exceptions it's impossible for them
1719 // to be thrown.
1720 //
1721 // They are roughly structured like this:
1722 // if (GC_locker::needs_gc())
1723 // SharedRuntime::block_for_jni_critical();
1724 // tranistion to thread_in_native
1725 // unpack arrray arguments and call native entry point
1726 // check for safepoint in progress
1727 // check if any thread suspend flags are set
1728 // call into JVM and possible unlock the JNI critical
1729 // if a GC was suppressed while in the critical native.
1730 // transition back to thread_in_Java
1731 // return to caller
1732 //
1733 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1734 methodHandle method,
1735 int compile_id,
1736 BasicType *in_sig_bt,
1737 VMRegPair *in_regs,
1738 BasicType ret_type) {
1739 #ifdef COMPILER2
1740 if (method->is_method_handle_intrinsic()) {
1741 vmIntrinsics::ID iid = method->intrinsic_id();
1742 intptr_t start = (intptr_t)__ pc();
1743 int vep_offset = ((intptr_t)__ pc()) - start;
1744 gen_special_dispatch(masm,
1745 method,
1746 in_sig_bt,
1747 in_regs);
1748 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1749 __ flush();
1750 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1751 return nmethod::new_native_nmethod(method,
1752 compile_id,
1753 masm->code(),
1754 vep_offset,
1755 frame_complete,
1756 stack_slots / VMRegImpl::slots_per_word,
1757 in_ByteSize(-1),
1758 in_ByteSize(-1),
1759 (OopMapSet*)NULL);
1760 }
1762 bool is_critical_native = true;
1763 address native_func = method->critical_native_function();
1764 if (native_func == NULL) {
1765 native_func = method->native_function();
1766 is_critical_native = false;
1767 }
1768 assert(native_func != NULL, "must have function");
1770 // First, create signature for outgoing C call
1771 // --------------------------------------------------------------------------
1773 int total_in_args = method->size_of_parameters();
1774 // We have received a description of where all the java args are located
1775 // on entry to the wrapper. We need to convert these args to where
1776 // the jni function will expect them. To figure out where they go
1777 // we convert the java signature to a C signature by inserting
1778 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1779 //
1780 // Additionally, on ppc64 we must convert integers to longs in the C
1781 // signature. We do this in advance in order to have no trouble with
1782 // indexes into the bt-arrays.
1783 // So convert the signature and registers now, and adjust the total number
1784 // of in-arguments accordingly.
1785 int i2l_argcnt = convert_ints_to_longints_argcnt(total_in_args, in_sig_bt); // PPC64: pass ints as longs.
1787 // Calculate the total number of C arguments and create arrays for the
1788 // signature and the outgoing registers.
1789 // On ppc64, we have two arrays for the outgoing registers, because
1790 // some floating-point arguments must be passed in registers _and_
1791 // in stack locations.
1792 bool method_is_static = method->is_static();
1793 int total_c_args = i2l_argcnt;
1795 if (!is_critical_native) {
1796 int n_hidden_args = method_is_static ? 2 : 1;
1797 total_c_args += n_hidden_args;
1798 } else {
1799 // No JNIEnv*, no this*, but unpacked arrays (base+length).
1800 for (int i = 0; i < total_in_args; i++) {
1801 if (in_sig_bt[i] == T_ARRAY) {
1802 total_c_args += 2; // PPC64: T_LONG, T_INT, T_ADDRESS (see convert_ints_to_longints and c_calling_convention)
1803 }
1804 }
1805 }
1807 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1808 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1809 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1810 BasicType* in_elem_bt = NULL;
1812 // Create the signature for the C call:
1813 // 1) add the JNIEnv*
1814 // 2) add the class if the method is static
1815 // 3) copy the rest of the incoming signature (shifted by the number of
1816 // hidden arguments).
1818 int argc = 0;
1819 if (!is_critical_native) {
1820 convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
1822 out_sig_bt[argc++] = T_ADDRESS;
1823 if (method->is_static()) {
1824 out_sig_bt[argc++] = T_OBJECT;
1825 }
1827 for (int i = 0; i < total_in_args ; i++ ) {
1828 out_sig_bt[argc++] = in_sig_bt[i];
1829 }
1830 } else {
1831 Thread* THREAD = Thread::current();
1832 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, i2l_argcnt);
1833 SignatureStream ss(method->signature());
1834 int o = 0;
1835 for (int i = 0; i < total_in_args ; i++, o++) {
1836 if (in_sig_bt[i] == T_ARRAY) {
1837 // Arrays are passed as int, elem* pair
1838 Symbol* atype = ss.as_symbol(CHECK_NULL);
1839 const char* at = atype->as_C_string();
1840 if (strlen(at) == 2) {
1841 assert(at[0] == '[', "must be");
1842 switch (at[1]) {
1843 case 'B': in_elem_bt[o] = T_BYTE; break;
1844 case 'C': in_elem_bt[o] = T_CHAR; break;
1845 case 'D': in_elem_bt[o] = T_DOUBLE; break;
1846 case 'F': in_elem_bt[o] = T_FLOAT; break;
1847 case 'I': in_elem_bt[o] = T_INT; break;
1848 case 'J': in_elem_bt[o] = T_LONG; break;
1849 case 'S': in_elem_bt[o] = T_SHORT; break;
1850 case 'Z': in_elem_bt[o] = T_BOOLEAN; break;
1851 default: ShouldNotReachHere();
1852 }
1853 }
1854 } else {
1855 in_elem_bt[o] = T_VOID;
1856 switch(in_sig_bt[i]) { // PPC64: pass ints as longs.
1857 case T_BOOLEAN:
1858 case T_CHAR:
1859 case T_BYTE:
1860 case T_SHORT:
1861 case T_INT: in_elem_bt[++o] = T_VOID; break;
1862 default: break;
1863 }
1864 }
1865 if (in_sig_bt[i] != T_VOID) {
1866 assert(in_sig_bt[i] == ss.type(), "must match");
1867 ss.next();
1868 }
1869 }
1870 assert(i2l_argcnt==o, "must match");
1872 convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
1874 for (int i = 0; i < total_in_args ; i++ ) {
1875 if (in_sig_bt[i] == T_ARRAY) {
1876 // Arrays are passed as int, elem* pair.
1877 out_sig_bt[argc++] = T_LONG; // PPC64: pass ints as longs.
1878 out_sig_bt[argc++] = T_INT;
1879 out_sig_bt[argc++] = T_ADDRESS;
1880 } else {
1881 out_sig_bt[argc++] = in_sig_bt[i];
1882 }
1883 }
1884 }
1887 // Compute the wrapper's frame size.
1888 // --------------------------------------------------------------------------
1890 // Now figure out where the args must be stored and how much stack space
1891 // they require.
1892 //
1893 // Compute framesize for the wrapper. We need to handlize all oops in
1894 // incoming registers.
1895 //
1896 // Calculate the total number of stack slots we will need:
1897 // 1) abi requirements
1898 // 2) outgoing arguments
1899 // 3) space for inbound oop handle area
1900 // 4) space for handlizing a klass if static method
1901 // 5) space for a lock if synchronized method
1902 // 6) workspace for saving return values, int <-> float reg moves, etc.
1903 // 7) alignment
1904 //
1905 // Layout of the native wrapper frame:
1906 // (stack grows upwards, memory grows downwards)
1907 //
1908 // NW [ABI_REG_ARGS] <-- 1) R1_SP
1909 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
1910 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
1911 // klass <-- 4) R1_SP + klass_offset
1912 // lock <-- 5) R1_SP + lock_offset
1913 // [workspace] <-- 6) R1_SP + workspace_offset
1914 // [alignment] (optional) <-- 7)
1915 // caller [JIT_TOP_ABI_48] <-- r_callers_sp
1916 //
1917 // - *_slot_offset Indicates offset from SP in number of stack slots.
1918 // - *_offset Indicates offset from SP in bytes.
1920 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) // 1+2)
1921 + SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention.
1923 // Now the space for the inbound oop handle area.
1924 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
1925 if (is_critical_native) {
1926 // Critical natives may have to call out so they need a save area
1927 // for register arguments.
1928 int double_slots = 0;
1929 int single_slots = 0;
1930 for (int i = 0; i < total_in_args; i++) {
1931 if (in_regs[i].first()->is_Register()) {
1932 const Register reg = in_regs[i].first()->as_Register();
1933 switch (in_sig_bt[i]) {
1934 case T_BOOLEAN:
1935 case T_BYTE:
1936 case T_SHORT:
1937 case T_CHAR:
1938 case T_INT: /*single_slots++;*/ break; // PPC64: pass ints as longs.
1939 case T_ARRAY:
1940 case T_LONG: double_slots++; break;
1941 default: ShouldNotReachHere();
1942 }
1943 } else if (in_regs[i].first()->is_FloatRegister()) {
1944 switch (in_sig_bt[i]) {
1945 case T_FLOAT: single_slots++; break;
1946 case T_DOUBLE: double_slots++; break;
1947 default: ShouldNotReachHere();
1948 }
1949 }
1950 }
1951 total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even
1952 }
1954 int oop_handle_slot_offset = stack_slots;
1955 stack_slots += total_save_slots; // 3)
1957 int klass_slot_offset = 0;
1958 int klass_offset = -1;
1959 if (method_is_static && !is_critical_native) { // 4)
1960 klass_slot_offset = stack_slots;
1961 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1962 stack_slots += VMRegImpl::slots_per_word;
1963 }
1965 int lock_slot_offset = 0;
1966 int lock_offset = -1;
1967 if (method->is_synchronized()) { // 5)
1968 lock_slot_offset = stack_slots;
1969 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size;
1970 stack_slots += VMRegImpl::slots_per_word;
1971 }
1973 int workspace_slot_offset = stack_slots; // 6)
1974 stack_slots += 2;
1976 // Now compute actual number of stack words we need.
1977 // Rounding to make stack properly aligned.
1978 stack_slots = round_to(stack_slots, // 7)
1979 frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
1980 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
1983 // Now we can start generating code.
1984 // --------------------------------------------------------------------------
1986 intptr_t start_pc = (intptr_t)__ pc();
1987 intptr_t vep_start_pc;
1988 intptr_t frame_done_pc;
1989 intptr_t oopmap_pc;
1991 Label ic_miss;
1992 Label handle_pending_exception;
1994 Register r_callers_sp = R21;
1995 Register r_temp_1 = R22;
1996 Register r_temp_2 = R23;
1997 Register r_temp_3 = R24;
1998 Register r_temp_4 = R25;
1999 Register r_temp_5 = R26;
2000 Register r_temp_6 = R27;
2001 Register r_return_pc = R28;
2003 Register r_carg1_jnienv = noreg;
2004 Register r_carg2_classorobject = noreg;
2005 if (!is_critical_native) {
2006 r_carg1_jnienv = out_regs[0].first()->as_Register();
2007 r_carg2_classorobject = out_regs[1].first()->as_Register();
2008 }
2011 // Generate the Unverified Entry Point (UEP).
2012 // --------------------------------------------------------------------------
2013 assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
2015 // Check ic: object class == cached class?
2016 if (!method_is_static) {
2017 Register ic = as_Register(Matcher::inline_cache_reg_encode());
2018 Register receiver_klass = r_temp_1;
2020 __ cmpdi(CCR0, R3_ARG1, 0);
2021 __ beq(CCR0, ic_miss);
2022 __ verify_oop(R3_ARG1);
2023 __ load_klass(receiver_klass, R3_ARG1);
2025 __ cmpd(CCR0, receiver_klass, ic);
2026 __ bne(CCR0, ic_miss);
2027 }
2030 // Generate the Verified Entry Point (VEP).
2031 // --------------------------------------------------------------------------
2032 vep_start_pc = (intptr_t)__ pc();
2034 __ save_LR_CR(r_temp_1);
2035 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2036 __ mr(r_callers_sp, R1_SP); // Remember frame pointer.
2037 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
2038 frame_done_pc = (intptr_t)__ pc();
2040 // Native nmethod wrappers never take possesion of the oop arguments.
2041 // So the caller will gc the arguments.
2042 // The only thing we need an oopMap for is if the call is static.
2043 //
2044 // An OopMap for lock (and class if static), and one for the VM call itself.
2045 OopMapSet *oop_maps = new OopMapSet();
2046 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2048 if (is_critical_native) {
2049 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1);
2050 }
2052 // Move arguments from register/stack to register/stack.
2053 // --------------------------------------------------------------------------
2054 //
2055 // We immediately shuffle the arguments so that for any vm call we have
2056 // to make from here on out (sync slow path, jvmti, etc.) we will have
2057 // captured the oops from our caller and have a valid oopMap for them.
2058 //
2059 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2060 // (derived from JavaThread* which is in R16_thread) and, if static,
2061 // the class mirror instead of a receiver. This pretty much guarantees that
2062 // register layout will not match. We ignore these extra arguments during
2063 // the shuffle. The shuffle is described by the two calling convention
2064 // vectors we have in our possession. We simply walk the java vector to
2065 // get the source locations and the c vector to get the destinations.
2067 // Record sp-based slot for receiver on stack for non-static methods.
2068 int receiver_offset = -1;
2070 // We move the arguments backward because the floating point registers
2071 // destination will always be to a register with a greater or equal
2072 // register number or the stack.
2073 // in is the index of the incoming Java arguments
2074 // out is the index of the outgoing C arguments
2076 #ifdef ASSERT
2077 bool reg_destroyed[RegisterImpl::number_of_registers];
2078 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2079 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) {
2080 reg_destroyed[r] = false;
2081 }
2082 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) {
2083 freg_destroyed[f] = false;
2084 }
2085 #endif // ASSERT
2087 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) {
2089 #ifdef ASSERT
2090 if (in_regs[in].first()->is_Register()) {
2091 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!");
2092 } else if (in_regs[in].first()->is_FloatRegister()) {
2093 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!");
2094 }
2095 if (out_regs[out].first()->is_Register()) {
2096 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true;
2097 } else if (out_regs[out].first()->is_FloatRegister()) {
2098 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true;
2099 }
2100 if (out_regs2[out].first()->is_Register()) {
2101 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true;
2102 } else if (out_regs2[out].first()->is_FloatRegister()) {
2103 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true;
2104 }
2105 #endif // ASSERT
2107 switch (in_sig_bt[in]) {
2108 case T_BOOLEAN:
2109 case T_CHAR:
2110 case T_BYTE:
2111 case T_SHORT:
2112 case T_INT:
2113 guarantee(in > 0 && in_sig_bt[in-1] == T_LONG,
2114 "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
2115 break;
2116 case T_LONG:
2117 if (in_sig_bt[in+1] == T_VOID) {
2118 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2119 } else {
2120 guarantee(in_sig_bt[in+1] == T_BOOLEAN || in_sig_bt[in+1] == T_CHAR ||
2121 in_sig_bt[in+1] == T_BYTE || in_sig_bt[in+1] == T_SHORT ||
2122 in_sig_bt[in+1] == T_INT,
2123 "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
2124 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2125 }
2126 break;
2127 case T_ARRAY:
2128 if (is_critical_native) {
2129 int body_arg = out;
2130 out -= 2; // Point to length arg. PPC64: pass ints as longs.
2131 unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out],
2132 r_callers_sp, r_temp_1, r_temp_2);
2133 break;
2134 }
2135 case T_OBJECT:
2136 assert(!is_critical_native, "no oop arguments");
2137 object_move(masm, stack_slots,
2138 oop_map, oop_handle_slot_offset,
2139 ((in == 0) && (!method_is_static)), &receiver_offset,
2140 in_regs[in], out_regs[out],
2141 r_callers_sp, r_temp_1, r_temp_2);
2142 break;
2143 case T_VOID:
2144 break;
2145 case T_FLOAT:
2146 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2147 if (out_regs2[out].first()->is_valid()) {
2148 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
2149 }
2150 break;
2151 case T_DOUBLE:
2152 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
2153 if (out_regs2[out].first()->is_valid()) {
2154 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
2155 }
2156 break;
2157 case T_ADDRESS:
2158 fatal("found type (T_ADDRESS) in java args");
2159 break;
2160 default:
2161 ShouldNotReachHere();
2162 break;
2163 }
2164 }
2166 // Pre-load a static method's oop into ARG2.
2167 // Used both by locking code and the normal JNI call code.
2168 if (method_is_static && !is_critical_native) {
2169 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
2170 r_carg2_classorobject);
2172 // Now handlize the static class mirror in carg2. It's known not-null.
2173 __ std(r_carg2_classorobject, klass_offset, R1_SP);
2174 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2175 __ addi(r_carg2_classorobject, R1_SP, klass_offset);
2176 }
2178 // Get JNIEnv* which is first argument to native.
2179 if (!is_critical_native) {
2180 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
2181 }
2183 // NOTE:
2184 //
2185 // We have all of the arguments setup at this point.
2186 // We MUST NOT touch any outgoing regs from this point on.
2187 // So if we must call out we must push a new frame.
2189 // Get current pc for oopmap, and load it patchable relative to global toc.
2190 oopmap_pc = (intptr_t) __ pc();
2191 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true);
2193 // We use the same pc/oopMap repeatedly when we call out.
2194 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
2196 // r_return_pc now has the pc loaded that we will use when we finally call
2197 // to native.
2199 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
2200 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
2203 # if 0
2204 // DTrace method entry
2205 # endif
2207 // Lock a synchronized method.
2208 // --------------------------------------------------------------------------
2210 if (method->is_synchronized()) {
2211 assert(!is_critical_native, "unhandled");
2212 ConditionRegister r_flag = CCR1;
2213 Register r_oop = r_temp_4;
2214 const Register r_box = r_temp_5;
2215 Label done, locked;
2217 // Load the oop for the object or class. r_carg2_classorobject contains
2218 // either the handlized oop from the incoming arguments or the handlized
2219 // class mirror (if the method is static).
2220 __ ld(r_oop, 0, r_carg2_classorobject);
2222 // Get the lock box slot's address.
2223 __ addi(r_box, R1_SP, lock_offset);
2225 # ifdef ASSERT
2226 if (UseBiasedLocking) {
2227 // Making the box point to itself will make it clear it went unused
2228 // but also be obviously invalid.
2229 __ std(r_box, 0, r_box);
2230 }
2231 # endif // ASSERT
2233 // Try fastpath for locking.
2234 // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
2235 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
2236 __ beq(r_flag, locked);
2238 // None of the above fast optimizations worked so we have to get into the
2239 // slow case of monitor enter. Inline a special case of call_VM that
2240 // disallows any pending_exception.
2242 // Save argument registers and leave room for C-compatible ABI_REG_ARGS.
2243 int frame_size = frame::abi_reg_args_size +
2244 round_to(total_c_args * wordSize, frame::alignment_in_bytes);
2245 __ mr(R11_scratch1, R1_SP);
2246 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
2248 // Do the call.
2249 __ set_last_Java_frame(R11_scratch1, r_return_pc);
2250 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register");
2251 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
2252 __ reset_last_Java_frame();
2254 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2);
2256 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2257 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0);
2259 __ bind(locked);
2260 }
2263 // Publish thread state
2264 // --------------------------------------------------------------------------
2266 // Use that pc we placed in r_return_pc a while back as the current frame anchor.
2267 __ set_last_Java_frame(R1_SP, r_return_pc);
2269 // Transition from _thread_in_Java to _thread_in_native.
2270 __ li(R0, _thread_in_native);
2271 __ release();
2272 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2273 __ stw(R0, thread_(thread_state));
2274 if (UseMembar) {
2275 __ fence();
2276 }
2279 // The JNI call
2280 // --------------------------------------------------------------------------
2281 #if defined(ABI_ELFv2)
2282 __ call_c(native_func, relocInfo::runtime_call_type);
2283 #else
2284 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
2285 __ call_c(fd_native_method, relocInfo::runtime_call_type);
2286 #endif
2289 // Now, we are back from the native code.
2292 // Unpack the native result.
2293 // --------------------------------------------------------------------------
2295 // For int-types, we do any needed sign-extension required.
2296 // Care must be taken that the return values (R3_RET and F1_RET)
2297 // will survive any VM calls for blocking or unlocking.
2298 // An OOP result (handle) is done specially in the slow-path code.
2300 switch (ret_type) {
2301 case T_VOID: break; // Nothing to do!
2302 case T_FLOAT: break; // Got it where we want it (unless slow-path).
2303 case T_DOUBLE: break; // Got it where we want it (unless slow-path).
2304 case T_LONG: break; // Got it where we want it (unless slow-path).
2305 case T_OBJECT: break; // Really a handle.
2306 // Cannot de-handlize until after reclaiming jvm_lock.
2307 case T_ARRAY: break;
2309 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1)
2310 Label skip_modify;
2311 __ cmpwi(CCR0, R3_RET, 0);
2312 __ beq(CCR0, skip_modify);
2313 __ li(R3_RET, 1);
2314 __ bind(skip_modify);
2315 break;
2316 }
2317 case T_BYTE: { // sign extension
2318 __ extsb(R3_RET, R3_RET);
2319 break;
2320 }
2321 case T_CHAR: { // unsigned result
2322 __ andi(R3_RET, R3_RET, 0xffff);
2323 break;
2324 }
2325 case T_SHORT: { // sign extension
2326 __ extsh(R3_RET, R3_RET);
2327 break;
2328 }
2329 case T_INT: // nothing to do
2330 break;
2331 default:
2332 ShouldNotReachHere();
2333 break;
2334 }
2337 // Publish thread state
2338 // --------------------------------------------------------------------------
2340 // Switch thread to "native transition" state before reading the
2341 // synchronization state. This additional state is necessary because reading
2342 // and testing the synchronization state is not atomic w.r.t. GC, as this
2343 // scenario demonstrates:
2344 // - Java thread A, in _thread_in_native state, loads _not_synchronized
2345 // and is preempted.
2346 // - VM thread changes sync state to synchronizing and suspends threads
2347 // for GC.
2348 // - Thread A is resumed to finish this native method, but doesn't block
2349 // here since it didn't see any synchronization in progress, and escapes.
2351 // Transition from _thread_in_native to _thread_in_native_trans.
2352 __ li(R0, _thread_in_native_trans);
2353 __ release();
2354 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2355 __ stw(R0, thread_(thread_state));
2358 // Must we block?
2359 // --------------------------------------------------------------------------
2361 // Block, if necessary, before resuming in _thread_in_Java state.
2362 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2363 Label after_transition;
2364 {
2365 Label no_block, sync;
2367 if (os::is_MP()) {
2368 if (UseMembar) {
2369 // Force this write out before the read below.
2370 __ fence();
2371 } else {
2372 // Write serialization page so VM thread can do a pseudo remote membar.
2373 // We use the current thread pointer to calculate a thread specific
2374 // offset to write to within the page. This minimizes bus traffic
2375 // due to cache line collision.
2376 __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
2377 }
2378 }
2380 Register sync_state_addr = r_temp_4;
2381 Register sync_state = r_temp_5;
2382 Register suspend_flags = r_temp_6;
2384 __ load_const(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/ sync_state);
2386 // TODO: PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
2387 __ lwz(sync_state, 0, sync_state_addr);
2389 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
2390 __ lwz(suspend_flags, thread_(suspend_flags));
2392 __ acquire();
2394 Label do_safepoint;
2395 // No synchronization in progress nor yet synchronized.
2396 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
2397 // Not suspended.
2398 __ cmpwi(CCR1, suspend_flags, 0);
2400 __ bne(CCR0, sync);
2401 __ beq(CCR1, no_block);
2403 // Block. Save any potential method result value before the operation and
2404 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2405 // lets us share the oopMap we used when we went native rather than create
2406 // a distinct one for this pc.
2407 __ bind(sync);
2409 address entry_point = is_critical_native
2410 ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
2411 : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
2412 save_native_result(masm, ret_type, workspace_slot_offset);
2413 __ call_VM_leaf(entry_point, R16_thread);
2414 restore_native_result(masm, ret_type, workspace_slot_offset);
2416 if (is_critical_native) {
2417 __ b(after_transition); // No thread state transition here.
2418 }
2419 __ bind(no_block);
2420 }
2422 // Publish thread state.
2423 // --------------------------------------------------------------------------
2425 // Thread state is thread_in_native_trans. Any safepoint blocking has
2426 // already happened so we can now change state to _thread_in_Java.
2428 // Transition from _thread_in_native_trans to _thread_in_Java.
2429 __ li(R0, _thread_in_Java);
2430 __ release();
2431 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2432 __ stw(R0, thread_(thread_state));
2433 if (UseMembar) {
2434 __ fence();
2435 }
2436 __ bind(after_transition);
2438 // Reguard any pages if necessary.
2439 // --------------------------------------------------------------------------
2441 Label no_reguard;
2442 __ lwz(r_temp_1, thread_(stack_guard_state));
2443 __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_disabled);
2444 __ bne(CCR0, no_reguard);
2446 save_native_result(masm, ret_type, workspace_slot_offset);
2447 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2448 restore_native_result(masm, ret_type, workspace_slot_offset);
2450 __ bind(no_reguard);
2453 // Unlock
2454 // --------------------------------------------------------------------------
2456 if (method->is_synchronized()) {
2458 ConditionRegister r_flag = CCR1;
2459 const Register r_oop = r_temp_4;
2460 const Register r_box = r_temp_5;
2461 const Register r_exception = r_temp_6;
2462 Label done;
2464 // Get oop and address of lock object box.
2465 if (method_is_static) {
2466 assert(klass_offset != -1, "");
2467 __ ld(r_oop, klass_offset, R1_SP);
2468 } else {
2469 assert(receiver_offset != -1, "");
2470 __ ld(r_oop, receiver_offset, R1_SP);
2471 }
2472 __ addi(r_box, R1_SP, lock_offset);
2474 // Try fastpath for unlocking.
2475 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
2476 __ beq(r_flag, done);
2478 // Save and restore any potential method result value around the unlocking operation.
2479 save_native_result(masm, ret_type, workspace_slot_offset);
2481 // Must save pending exception around the slow-path VM call. Since it's a
2482 // leaf call, the pending exception (if any) can be kept in a register.
2483 __ ld(r_exception, thread_(pending_exception));
2484 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile");
2485 __ li(R0, 0);
2486 __ std(R0, thread_(pending_exception));
2488 // Slow case of monitor enter.
2489 // Inline a special case of call_VM that disallows any pending_exception.
2490 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box);
2492 __ asm_assert_mem8_is_zero(thread_(pending_exception),
2493 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0);
2495 restore_native_result(masm, ret_type, workspace_slot_offset);
2497 // Check_forward_pending_exception jump to forward_exception if any pending
2498 // exception is set. The forward_exception routine expects to see the
2499 // exception in pending_exception and not in a register. Kind of clumsy,
2500 // since all folks who branch to forward_exception must have tested
2501 // pending_exception first and hence have it in a register already.
2502 __ std(r_exception, thread_(pending_exception));
2504 __ bind(done);
2505 }
2507 # if 0
2508 // DTrace method exit
2509 # endif
2511 // Clear "last Java frame" SP and PC.
2512 // --------------------------------------------------------------------------
2514 __ reset_last_Java_frame();
2516 // Unbox oop result, e.g. JNIHandles::resolve value.
2517 // --------------------------------------------------------------------------
2519 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2520 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, /* needs_frame */ false); // kills R31
2521 }
2524 // Reset handle block.
2525 // --------------------------------------------------------------------------
2526 if (!is_critical_native) {
2527 __ ld(r_temp_1, thread_(active_handles));
2528 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
2529 __ li(r_temp_2, 0);
2530 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1);
2533 // Check for pending exceptions.
2534 // --------------------------------------------------------------------------
2535 __ ld(r_temp_2, thread_(pending_exception));
2536 __ cmpdi(CCR0, r_temp_2, 0);
2537 __ bne(CCR0, handle_pending_exception);
2538 }
2540 // Return
2541 // --------------------------------------------------------------------------
2543 __ pop_frame();
2544 __ restore_LR_CR(R11);
2545 __ blr();
2548 // Handler for pending exceptions (out-of-line).
2549 // --------------------------------------------------------------------------
2551 // Since this is a native call, we know the proper exception handler
2552 // is the empty function. We just pop this frame and then jump to
2553 // forward_exception_entry.
2554 if (!is_critical_native) {
2555 __ align(InteriorEntryAlignment);
2556 __ bind(handle_pending_exception);
2558 __ pop_frame();
2559 __ restore_LR_CR(R11);
2560 __ b64_patchable((address)StubRoutines::forward_exception_entry(),
2561 relocInfo::runtime_call_type);
2562 }
2564 // Handler for a cache miss (out-of-line).
2565 // --------------------------------------------------------------------------
2567 if (!method_is_static) {
2568 __ align(InteriorEntryAlignment);
2569 __ bind(ic_miss);
2571 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
2572 relocInfo::runtime_call_type);
2573 }
2575 // Done.
2576 // --------------------------------------------------------------------------
2578 __ flush();
2580 nmethod *nm = nmethod::new_native_nmethod(method,
2581 compile_id,
2582 masm->code(),
2583 vep_start_pc-start_pc,
2584 frame_done_pc-start_pc,
2585 stack_slots / VMRegImpl::slots_per_word,
2586 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2587 in_ByteSize(lock_offset),
2588 oop_maps);
2590 if (is_critical_native) {
2591 nm->set_lazy_critical_native(true);
2592 }
2594 return nm;
2595 #else
2596 ShouldNotReachHere();
2597 return NULL;
2598 #endif // COMPILER2
2599 }
2601 // This function returns the adjust size (in number of words) to a c2i adapter
2602 // activation for use during deoptimization.
2603 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2604 return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
2605 }
2607 uint SharedRuntime::out_preserve_stack_slots() {
2608 #ifdef COMPILER2
2609 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2610 #else
2611 return 0;
2612 #endif
2613 }
2615 #ifdef COMPILER2
2616 // Frame generation for deopt and uncommon trap blobs.
2617 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2618 /* Read */
2619 Register unroll_block_reg,
2620 /* Update */
2621 Register frame_sizes_reg,
2622 Register number_of_frames_reg,
2623 Register pcs_reg,
2624 /* Invalidate */
2625 Register frame_size_reg,
2626 Register pc_reg) {
2628 __ ld(pc_reg, 0, pcs_reg);
2629 __ ld(frame_size_reg, 0, frame_sizes_reg);
2630 __ std(pc_reg, _abi(lr), R1_SP);
2631 __ push_frame(frame_size_reg, R0/*tmp*/);
2632 #ifdef CC_INTERP
2633 __ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
2634 #else
2635 #ifdef ASSERT
2636 __ load_const_optimized(pc_reg, 0x5afe);
2637 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
2638 #endif
2639 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
2640 #endif // CC_INTERP
2641 __ addi(number_of_frames_reg, number_of_frames_reg, -1);
2642 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
2643 __ addi(pcs_reg, pcs_reg, wordSize);
2644 }
2646 // Loop through the UnrollBlock info and create new frames.
2647 static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
2648 /* read */
2649 Register unroll_block_reg,
2650 /* invalidate */
2651 Register frame_sizes_reg,
2652 Register number_of_frames_reg,
2653 Register pcs_reg,
2654 Register frame_size_reg,
2655 Register pc_reg) {
2656 Label loop;
2658 // _number_of_frames is of type int (deoptimization.hpp)
2659 __ lwa(number_of_frames_reg,
2660 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(),
2661 unroll_block_reg);
2662 __ ld(pcs_reg,
2663 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(),
2664 unroll_block_reg);
2665 __ ld(frame_sizes_reg,
2666 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(),
2667 unroll_block_reg);
2669 // stack: (caller_of_deoptee, ...).
2671 // At this point we either have an interpreter frame or a compiled
2672 // frame on top of stack. If it is a compiled frame we push a new c2i
2673 // adapter here
2675 // Memorize top-frame stack-pointer.
2676 __ mr(frame_size_reg/*old_sp*/, R1_SP);
2678 // Resize interpreter top frame OR C2I adapter.
2680 // At this moment, the top frame (which is the caller of the deoptee) is
2681 // an interpreter frame or a newly pushed C2I adapter or an entry frame.
2682 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the
2683 // outgoing arguments.
2684 //
2685 // In order to push the interpreter frame for the deoptee, we need to
2686 // resize the top frame such that we are able to place the deoptee's
2687 // locals in the frame.
2688 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI
2689 // into a valid PARENT_IJAVA_FRAME_ABI.
2691 __ lwa(R11_scratch1,
2692 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(),
2693 unroll_block_reg);
2694 __ neg(R11_scratch1, R11_scratch1);
2696 // R11_scratch1 contains size of locals for frame resizing.
2697 // R12_scratch2 contains top frame's lr.
2699 // Resize frame by complete frame size prevents TOC from being
2700 // overwritten by locals. A more stack space saving way would be
2701 // to copy the TOC to its location in the new abi.
2702 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size);
2704 // now, resize the frame
2705 __ resize_frame(R11_scratch1, pc_reg/*tmp*/);
2707 // In the case where we have resized a c2i frame above, the optional
2708 // alignment below the locals has size 32 (why?).
2709 __ std(R12_scratch2, _abi(lr), R1_SP);
2711 // Initialize initial_caller_sp.
2712 #ifdef CC_INTERP
2713 __ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
2714 #else
2715 #ifdef ASSERT
2716 __ load_const_optimized(pc_reg, 0x5afe);
2717 __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
2718 #endif
2719 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
2720 #endif // CC_INTERP
2722 #ifdef ASSERT
2723 // Make sure that there is at least one entry in the array.
2724 __ cmpdi(CCR0, number_of_frames_reg, 0);
2725 __ asm_assert_ne("array_size must be > 0", 0x205);
2726 #endif
2728 // Now push the new interpreter frames.
2729 //
2730 __ bind(loop);
2731 // Allocate a new frame, fill in the pc.
2732 push_skeleton_frame(masm, deopt,
2733 unroll_block_reg,
2734 frame_sizes_reg,
2735 number_of_frames_reg,
2736 pcs_reg,
2737 frame_size_reg,
2738 pc_reg);
2739 __ cmpdi(CCR0, number_of_frames_reg, 0);
2740 __ bne(CCR0, loop);
2742 // Get the return address pointing into the frame manager.
2743 __ ld(R0, 0, pcs_reg);
2744 // Store it in the top interpreter frame.
2745 __ std(R0, _abi(lr), R1_SP);
2746 // Initialize frame_manager_lr of interpreter top frame.
2747 #ifdef CC_INTERP
2748 __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
2749 #endif
2750 }
2751 #endif
2753 void SharedRuntime::generate_deopt_blob() {
2754 // Allocate space for the code
2755 ResourceMark rm;
2756 // Setup code generation tools
2757 CodeBuffer buffer("deopt_blob", 2048, 1024);
2758 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
2759 Label exec_mode_initialized;
2760 int frame_size_in_words;
2761 OopMap* map = NULL;
2762 OopMapSet *oop_maps = new OopMapSet();
2764 // size of ABI112 plus spill slots for R3_RET and F1_RET.
2765 const int frame_size_in_bytes = frame::abi_reg_args_spill_size;
2766 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
2767 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
2769 const Register exec_mode_reg = R21_tmp1;
2771 const address start = __ pc();
2773 #ifdef COMPILER2
2774 // --------------------------------------------------------------------------
2775 // Prolog for non exception case!
2777 // We have been called from the deopt handler of the deoptee.
2778 //
2779 // deoptee:
2780 // ...
2781 // call X
2782 // ...
2783 // deopt_handler: call_deopt_stub
2784 // cur. return pc --> ...
2785 //
2786 // So currently SR_LR points behind the call in the deopt handler.
2787 // We adjust it such that it points to the start of the deopt handler.
2788 // The return_pc has been stored in the frame of the deoptee and
2789 // will replace the address of the deopt_handler in the call
2790 // to Deoptimization::fetch_unroll_info below.
2791 // We can't grab a free register here, because all registers may
2792 // contain live values, so let the RegisterSaver do the adjustment
2793 // of the return pc.
2794 const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();
2796 // Push the "unpack frame"
2797 // Save everything in sight.
2798 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2799 &first_frame_size_in_bytes,
2800 /*generate_oop_map=*/ true,
2801 return_pc_adjustment_no_exception,
2802 RegisterSaver::return_pc_is_lr);
2803 assert(map != NULL, "OopMap must have been created");
2805 __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
2806 // Save exec mode for unpack_frames.
2807 __ b(exec_mode_initialized);
2809 // --------------------------------------------------------------------------
2810 // Prolog for exception case
2812 // An exception is pending.
2813 // We have been called with a return (interpreter) or a jump (exception blob).
2814 //
2815 // - R3_ARG1: exception oop
2816 // - R4_ARG2: exception pc
2818 int exception_offset = __ pc() - start;
2820 BLOCK_COMMENT("Prolog for exception case");
2822 // The RegisterSaves doesn't need to adjust the return pc for this situation.
2823 const int return_pc_adjustment_exception = 0;
2825 // Push the "unpack frame".
2826 // Save everything in sight.
2827 assert(R4 == R4_ARG2, "exception pc must be in r4");
2828 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2829 &first_frame_size_in_bytes,
2830 /*generate_oop_map=*/ false,
2831 return_pc_adjustment_exception,
2832 RegisterSaver::return_pc_is_r4);
2834 // Deopt during an exception. Save exec mode for unpack_frames.
2835 __ li(exec_mode_reg, Deoptimization::Unpack_exception);
2837 // Store exception oop and pc in thread (location known to GC).
2838 // This is needed since the call to "fetch_unroll_info()" may safepoint.
2839 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2840 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2842 // fall through
2844 // --------------------------------------------------------------------------
2845 __ BIND(exec_mode_initialized);
2847 {
2848 const Register unroll_block_reg = R22_tmp2;
2850 // We need to set `last_Java_frame' because `fetch_unroll_info' will
2851 // call `last_Java_frame()'. The value of the pc in the frame is not
2852 // particularly important. It just needs to identify this blob.
2853 __ set_last_Java_frame(R1_SP, noreg);
2855 // With EscapeAnalysis turned on, this call may safepoint!
2856 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread);
2857 address calls_return_pc = __ last_calls_return_pc();
2858 // Set an oopmap for the call site that describes all our saved registers.
2859 oop_maps->add_gc_map(calls_return_pc - start, map);
2861 __ reset_last_Java_frame();
2862 // Save the return value.
2863 __ mr(unroll_block_reg, R3_RET);
2865 // Restore only the result registers that have been saved
2866 // by save_volatile_registers(...).
2867 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
2869 // In excp_deopt_mode, restore and clear exception oop which we
2870 // stored in the thread during exception entry above. The exception
2871 // oop will be the return value of this stub.
2872 Label skip_restore_excp;
2873 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
2874 __ bne(CCR0, skip_restore_excp);
2875 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2876 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2877 __ li(R0, 0);
2878 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2879 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2880 __ BIND(skip_restore_excp);
2882 // reload narrro_oop_base
2883 if (UseCompressedOops && Universe::narrow_oop_base() != 0) {
2884 __ load_const_optimized(R30, Universe::narrow_oop_base());
2885 }
2887 __ pop_frame();
2889 // stack: (deoptee, optional i2c, caller of deoptee, ...).
2891 // pop the deoptee's frame
2892 __ pop_frame();
2894 // stack: (caller_of_deoptee, ...).
2896 // Loop through the `UnrollBlock' info and create interpreter frames.
2897 push_skeleton_frames(masm, true/*deopt*/,
2898 unroll_block_reg,
2899 R23_tmp3,
2900 R24_tmp4,
2901 R25_tmp5,
2902 R26_tmp6,
2903 R27_tmp7);
2905 // stack: (skeletal interpreter frame, ..., optional skeletal
2906 // interpreter frame, optional c2i, caller of deoptee, ...).
2907 }
2909 // push an `unpack_frame' taking care of float / int return values.
2910 __ push_frame(frame_size_in_bytes, R0/*tmp*/);
2912 // stack: (unpack frame, skeletal interpreter frame, ..., optional
2913 // skeletal interpreter frame, optional c2i, caller of deoptee,
2914 // ...).
2916 // Spill live volatile registers since we'll do a call.
2917 __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
2918 __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
2920 // Let the unpacker layout information in the skeletal frames just
2921 // allocated.
2922 __ get_PC_trash_LR(R3_RET);
2923 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET);
2924 // This is a call to a LEAF method, so no oop map is required.
2925 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
2926 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/);
2927 __ reset_last_Java_frame();
2929 // Restore the volatiles saved above.
2930 __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
2931 __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
2933 // Pop the unpack frame.
2934 __ pop_frame();
2935 __ restore_LR_CR(R0);
2937 // stack: (top interpreter frame, ..., optional interpreter frame,
2938 // optional c2i, caller of deoptee, ...).
2940 // Initialize R14_state.
2941 #ifdef CC_INTERP
2942 __ ld(R14_state, 0, R1_SP);
2943 __ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
2944 // Also inititialize R15_prev_state.
2945 __ restore_prev_state();
2946 #else
2947 __ restore_interpreter_state(R11_scratch1);
2948 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
2949 #endif // CC_INTERP
2952 // Return to the interpreter entry point.
2953 __ blr();
2954 __ flush();
2955 #else // COMPILER2
2956 __ unimplemented("deopt blob needed only with compiler");
2957 int exception_offset = __ pc() - start;
2958 #endif // COMPILER2
2960 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, first_frame_size_in_bytes / wordSize);
2961 }
2963 #ifdef COMPILER2
2964 void SharedRuntime::generate_uncommon_trap_blob() {
2965 // Allocate space for the code.
2966 ResourceMark rm;
2967 // Setup code generation tools.
2968 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2969 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
2970 address start = __ pc();
2972 Register unroll_block_reg = R21_tmp1;
2973 Register klass_index_reg = R22_tmp2;
2974 Register unc_trap_reg = R23_tmp3;
2976 OopMapSet* oop_maps = new OopMapSet();
2977 int frame_size_in_bytes = frame::abi_reg_args_size;
2978 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
2980 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
2982 // Push a dummy `unpack_frame' and call
2983 // `Deoptimization::uncommon_trap' to pack the compiled frame into a
2984 // vframe array and return the `UnrollBlock' information.
2986 // Save LR to compiled frame.
2987 __ save_LR_CR(R11_scratch1);
2989 // Push an "uncommon_trap" frame.
2990 __ push_frame_reg_args(0, R11_scratch1);
2992 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
2994 // Set the `unpack_frame' as last_Java_frame.
2995 // `Deoptimization::uncommon_trap' expects it and considers its
2996 // sender frame as the deoptee frame.
2997 // Remember the offset of the instruction whose address will be
2998 // moved to R11_scratch1.
2999 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
3001 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
3003 __ mr(klass_index_reg, R3);
3004 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
3005 R16_thread, klass_index_reg);
3007 // Set an oopmap for the call site.
3008 oop_maps->add_gc_map(gc_map_pc - start, map);
3010 __ reset_last_Java_frame();
3012 // Pop the `unpack frame'.
3013 __ pop_frame();
3015 // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
3017 // Save the return value.
3018 __ mr(unroll_block_reg, R3_RET);
3020 // Pop the uncommon_trap frame.
3021 __ pop_frame();
3023 // stack: (caller_of_deoptee, ...).
3025 // Allocate new interpreter frame(s) and possibly a c2i adapter
3026 // frame.
3027 push_skeleton_frames(masm, false/*deopt*/,
3028 unroll_block_reg,
3029 R22_tmp2,
3030 R23_tmp3,
3031 R24_tmp4,
3032 R25_tmp5,
3033 R26_tmp6);
3035 // stack: (skeletal interpreter frame, ..., optional skeletal
3036 // interpreter frame, optional c2i, caller of deoptee, ...).
3038 // Push a dummy `unpack_frame' taking care of float return values.
3039 // Call `Deoptimization::unpack_frames' to layout information in the
3040 // interpreter frames just created.
3042 // Push a simple "unpack frame" here.
3043 __ push_frame_reg_args(0, R11_scratch1);
3045 // stack: (unpack frame, skeletal interpreter frame, ..., optional
3046 // skeletal interpreter frame, optional c2i, caller of deoptee,
3047 // ...).
3049 // Set the "unpack_frame" as last_Java_frame.
3050 __ get_PC_trash_LR(R11_scratch1);
3051 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
3053 // Indicate it is the uncommon trap case.
3054 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap);
3055 // Let the unpacker layout information in the skeletal frames just
3056 // allocated.
3057 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
3058 R16_thread, unc_trap_reg);
3060 __ reset_last_Java_frame();
3061 // Pop the `unpack frame'.
3062 __ pop_frame();
3063 // Restore LR from top interpreter frame.
3064 __ restore_LR_CR(R11_scratch1);
3066 // stack: (top interpreter frame, ..., optional interpreter frame,
3067 // optional c2i, caller of deoptee, ...).
3069 #ifdef CC_INTERP
3070 // Initialize R14_state, ...
3071 __ ld(R11_scratch1, 0, R1_SP);
3072 __ addi(R14_state, R11_scratch1, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
3073 // also initialize R15_prev_state.
3074 __ restore_prev_state();
3075 #else
3076 __ restore_interpreter_state(R11_scratch1);
3077 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
3078 #endif // CC_INTERP
3080 // Return to the interpreter entry point.
3081 __ blr();
3083 masm->flush();
3085 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize);
3086 }
3087 #endif // COMPILER2
3089 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
3090 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3091 assert(StubRoutines::forward_exception_entry() != NULL,
3092 "must be generated before");
3094 ResourceMark rm;
3095 OopMapSet *oop_maps = new OopMapSet();
3096 OopMap* map;
3098 // Allocate space for the code. Setup code generation tools.
3099 CodeBuffer buffer("handler_blob", 2048, 1024);
3100 MacroAssembler* masm = new MacroAssembler(&buffer);
3102 address start = __ pc();
3103 int frame_size_in_bytes = 0;
3105 RegisterSaver::ReturnPCLocation return_pc_location;
3106 bool cause_return = (poll_type == POLL_AT_RETURN);
3107 if (cause_return) {
3108 // Nothing to do here. The frame has already been popped in MachEpilogNode.
3109 // Register LR already contains the return pc.
3110 return_pc_location = RegisterSaver::return_pc_is_lr;
3111 } else {
3112 // Use thread()->saved_exception_pc() as return pc.
3113 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
3114 }
3116 // Save registers, fpu state, and flags.
3117 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3118 &frame_size_in_bytes,
3119 /*generate_oop_map=*/ true,
3120 /*return_pc_adjustment=*/0,
3121 return_pc_location);
3123 // The following is basically a call_VM. However, we need the precise
3124 // address of the call in order to generate an oopmap. Hence, we do all the
3125 // work outselves.
3126 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
3128 // The return address must always be correct so that the frame constructor
3129 // never sees an invalid pc.
3131 // Do the call
3132 __ call_VM_leaf(call_ptr, R16_thread);
3133 address calls_return_pc = __ last_calls_return_pc();
3135 // Set an oopmap for the call site. This oopmap will map all
3136 // oop-registers and debug-info registers as callee-saved. This
3137 // will allow deoptimization at this safepoint to find all possible
3138 // debug-info recordings, as well as let GC find all oops.
3139 oop_maps->add_gc_map(calls_return_pc - start, map);
3141 Label noException;
3143 // Clear the last Java frame.
3144 __ reset_last_Java_frame();
3146 BLOCK_COMMENT(" Check pending exception.");
3147 const Register pending_exception = R0;
3148 __ ld(pending_exception, thread_(pending_exception));
3149 __ cmpdi(CCR0, pending_exception, 0);
3150 __ beq(CCR0, noException);
3152 // Exception pending
3153 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3154 frame_size_in_bytes,
3155 /*restore_ctr=*/true);
3157 BLOCK_COMMENT(" Jump to forward_exception_entry.");
3158 // Jump to forward_exception_entry, with the issuing PC in LR
3159 // so it looks like the original nmethod called forward_exception_entry.
3160 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3162 // No exception case.
3163 __ BIND(noException);
3166 // Normal exit, restore registers and exit.
3167 RegisterSaver::restore_live_registers_and_pop_frame(masm,
3168 frame_size_in_bytes,
3169 /*restore_ctr=*/true);
3171 __ blr();
3173 // Make sure all code is generated
3174 masm->flush();
3176 // Fill-out other meta info
3177 // CodeBlob frame size is in words.
3178 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
3179 }
3181 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
3182 //
3183 // Generate a stub that calls into the vm to find out the proper destination
3184 // of a java call. All the argument registers are live at this point
3185 // but since this is generic code we don't know what they are and the caller
3186 // must do any gc of the args.
3187 //
3188 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3190 // allocate space for the code
3191 ResourceMark rm;
3193 CodeBuffer buffer(name, 1000, 512);
3194 MacroAssembler* masm = new MacroAssembler(&buffer);
3196 int frame_size_in_bytes;
3198 OopMapSet *oop_maps = new OopMapSet();
3199 OopMap* map = NULL;
3201 address start = __ pc();
3203 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3204 &frame_size_in_bytes,
3205 /*generate_oop_map*/ true,
3206 /*return_pc_adjustment*/ 0,
3207 RegisterSaver::return_pc_is_lr);
3209 // Use noreg as last_Java_pc, the return pc will be reconstructed
3210 // from the physical frame.
3211 __ set_last_Java_frame(/*sp*/R1_SP, noreg);
3213 int frame_complete = __ offset();
3215 // Pass R19_method as 2nd (optional) argument, used by
3216 // counter_overflow_stub.
3217 __ call_VM_leaf(destination, R16_thread, R19_method);
3218 address calls_return_pc = __ last_calls_return_pc();
3219 // Set an oopmap for the call site.
3220 // We need this not only for callee-saved registers, but also for volatile
3221 // registers that the compiler might be keeping live across a safepoint.
3222 // Create the oopmap for the call's return pc.
3223 oop_maps->add_gc_map(calls_return_pc - start, map);
3225 // R3_RET contains the address we are going to jump to assuming no exception got installed.
3227 // clear last_Java_sp
3228 __ reset_last_Java_frame();
3230 // Check for pending exceptions.
3231 BLOCK_COMMENT("Check for pending exceptions.");
3232 Label pending;
3233 __ ld(R11_scratch1, thread_(pending_exception));
3234 __ cmpdi(CCR0, R11_scratch1, 0);
3235 __ bne(CCR0, pending);
3237 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
3239 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
3241 // Get the returned method.
3242 __ get_vm_result_2(R19_method);
3244 __ bctr();
3247 // Pending exception after the safepoint.
3248 __ BIND(pending);
3250 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true);
3252 // exception pending => remove activation and forward to exception handler
3254 __ li(R11_scratch1, 0);
3255 __ ld(R3_ARG1, thread_(pending_exception));
3256 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread);
3257 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3259 // -------------
3260 // Make sure all code is generated.
3261 masm->flush();
3263 // return the blob
3264 // frame_size_words or bytes??
3265 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
3266 oop_maps, true);
3267 }
3270 //------------------------------Montgomery multiplication------------------------
3271 //
3273 // Subtract 0:b from carry:a. Return carry.
3274 static unsigned long
3275 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3276 long i = 0;
3277 unsigned long tmp, tmp2;
3278 __asm__ __volatile__ (
3279 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA
3280 "mtctr %[len] \n"
3281 "0: \n"
3282 "ldx %[tmp], %[i], %[a] \n"
3283 "ldx %[tmp2], %[i], %[b] \n"
3284 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended
3285 "stdx %[tmp], %[i], %[a] \n"
3286 "addi %[i], %[i], 8 \n"
3287 "bdnz 0b \n"
3288 "addme %[tmp], %[carry] \n" // carry + CA - 1
3289 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2)
3290 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len)
3291 : "ctr", "xer", "memory"
3292 );
3293 return tmp;
3294 }
3296 // Multiply (unsigned) Long A by Long B, accumulating the double-
3297 // length result into the accumulator formed of T0, T1, and T2.
3298 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3299 unsigned long hi, lo;
3300 __asm__ __volatile__ (
3301 "mulld %[lo], %[A], %[B] \n"
3302 "mulhdu %[hi], %[A], %[B] \n"
3303 "addc %[T0], %[T0], %[lo] \n"
3304 "adde %[T1], %[T1], %[hi] \n"
3305 "addze %[T2], %[T2] \n"
3306 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3307 : [A]"r"(A), [B]"r"(B)
3308 : "xer"
3309 );
3310 }
3312 // As above, but add twice the double-length result into the
3313 // accumulator.
3314 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
3315 unsigned long hi, lo;
3316 __asm__ __volatile__ (
3317 "mulld %[lo], %[A], %[B] \n"
3318 "mulhdu %[hi], %[A], %[B] \n"
3319 "addc %[T0], %[T0], %[lo] \n"
3320 "adde %[T1], %[T1], %[hi] \n"
3321 "addze %[T2], %[T2] \n"
3322 "addc %[T0], %[T0], %[lo] \n"
3323 "adde %[T1], %[T1], %[hi] \n"
3324 "addze %[T2], %[T2] \n"
3325 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
3326 : [A]"r"(A), [B]"r"(B)
3327 : "xer"
3328 );
3329 }
3331 // Fast Montgomery multiplication. The derivation of the algorithm is
3332 // in "A Cryptographic Library for the Motorola DSP56000,
3333 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237".
3334 static void
3335 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3336 unsigned long m[], unsigned long inv, int len) {
3337 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3338 int i;
3340 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3342 for (i = 0; i < len; i++) {
3343 int j;
3344 for (j = 0; j < i; j++) {
3345 MACC(a[j], b[i-j], t0, t1, t2);
3346 MACC(m[j], n[i-j], t0, t1, t2);
3347 }
3348 MACC(a[i], b[0], t0, t1, t2);
3349 m[i] = t0 * inv;
3350 MACC(m[i], n[0], t0, t1, t2);
3352 assert(t0 == 0, "broken Montgomery multiply");
3354 t0 = t1; t1 = t2; t2 = 0;
3355 }
3357 for (i = len; i < 2*len; i++) {
3358 int j;
3359 for (j = i-len+1; j < len; j++) {
3360 MACC(a[j], b[i-j], t0, t1, t2);
3361 MACC(m[j], n[i-j], t0, t1, t2);
3362 }
3363 m[i-len] = t0;
3364 t0 = t1; t1 = t2; t2 = 0;
3365 }
3367 while (t0) {
3368 t0 = sub(m, n, t0, len);
3369 }
3370 }
3372 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3373 // multiplies so it should be up to 25% faster than Montgomery
3374 // multiplication. However, its loop control is more complex and it
3375 // may actually run slower on some machines.
3376 static void
3377 montgomery_square(unsigned long a[], unsigned long n[],
3378 unsigned long m[], unsigned long inv, int len) {
3379 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3380 int i;
3382 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3384 for (i = 0; i < len; i++) {
3385 int j;
3386 int end = (i+1)/2;
3387 for (j = 0; j < end; j++) {
3388 MACC2(a[j], a[i-j], t0, t1, t2);
3389 MACC(m[j], n[i-j], t0, t1, t2);
3390 }
3391 if ((i & 1) == 0) {
3392 MACC(a[j], a[j], t0, t1, t2);
3393 }
3394 for (; j < i; j++) {
3395 MACC(m[j], n[i-j], t0, t1, t2);
3396 }
3397 m[i] = t0 * inv;
3398 MACC(m[i], n[0], t0, t1, t2);
3400 assert(t0 == 0, "broken Montgomery square");
3402 t0 = t1; t1 = t2; t2 = 0;
3403 }
3405 for (i = len; i < 2*len; i++) {
3406 int start = i-len+1;
3407 int end = start + (len - start)/2;
3408 int j;
3409 for (j = start; j < end; j++) {
3410 MACC2(a[j], a[i-j], t0, t1, t2);
3411 MACC(m[j], n[i-j], t0, t1, t2);
3412 }
3413 if ((i & 1) == 0) {
3414 MACC(a[j], a[j], t0, t1, t2);
3415 }
3416 for (; j < len; j++) {
3417 MACC(m[j], n[i-j], t0, t1, t2);
3418 }
3419 m[i-len] = t0;
3420 t0 = t1; t1 = t2; t2 = 0;
3421 }
3423 while (t0) {
3424 t0 = sub(m, n, t0, len);
3425 }
3426 }
3428 // The threshold at which squaring is advantageous was determined
3429 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3430 // Doesn't seem to be relevant for Power8 so we use the same value.
3431 #define MONTGOMERY_SQUARING_THRESHOLD 64
3433 // Copy len longwords from s to d, word-swapping as we go. The
3434 // destination array is reversed.
3435 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3436 d += len;
3437 while(len-- > 0) {
3438 d--;
3439 unsigned long s_val = *s;
3440 // Swap words in a longword on little endian machines.
3441 #ifdef VM_LITTLE_ENDIAN
3442 s_val = (s_val << 32) | (s_val >> 32);
3443 #endif
3444 *d = s_val;
3445 s++;
3446 }
3447 }
3449 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3450 jint len, jlong inv,
3451 jint *m_ints) {
3452 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3453 int longwords = len/2;
3454 assert(longwords > 0, "unsupported");
3456 // Make very sure we don't use so much space that the stack might
3457 // overflow. 512 jints corresponds to an 16384-bit integer and
3458 // will use here a total of 8k bytes of stack space.
3459 int total_allocation = longwords * sizeof (unsigned long) * 4;
3460 guarantee(total_allocation <= 8192, "must be");
3461 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3463 // Local scratch arrays
3464 unsigned long
3465 *a = scratch + 0 * longwords,
3466 *b = scratch + 1 * longwords,
3467 *n = scratch + 2 * longwords,
3468 *m = scratch + 3 * longwords;
3470 reverse_words((unsigned long *)a_ints, a, longwords);
3471 reverse_words((unsigned long *)b_ints, b, longwords);
3472 reverse_words((unsigned long *)n_ints, n, longwords);
3474 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
3476 reverse_words(m, (unsigned long *)m_ints, longwords);
3477 }
3479 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3480 jint len, jlong inv,
3481 jint *m_ints) {
3482 assert(len % 2 == 0, "array length in montgomery_square must be even");
3483 int longwords = len/2;
3484 assert(longwords > 0, "unsupported");
3486 // Make very sure we don't use so much space that the stack might
3487 // overflow. 512 jints corresponds to an 16384-bit integer and
3488 // will use here a total of 6k bytes of stack space.
3489 int total_allocation = longwords * sizeof (unsigned long) * 3;
3490 guarantee(total_allocation <= 8192, "must be");
3491 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3493 // Local scratch arrays
3494 unsigned long
3495 *a = scratch + 0 * longwords,
3496 *n = scratch + 1 * longwords,
3497 *m = scratch + 2 * longwords;
3499 reverse_words((unsigned long *)a_ints, a, longwords);
3500 reverse_words((unsigned long *)n_ints, n, longwords);
3502 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3503 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
3504 } else {
3505 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
3506 }
3508 reverse_words(m, (unsigned long *)m_ints, longwords);
3509 }