Thu, 12 Oct 2017 21:27:07 +0800
merge
1 /*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "code/debugInfoRec.hpp"
28 #include "code/icBuffer.hpp"
29 #include "code/vtableStubs.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "oops/compiledICHolder.hpp"
32 #include "prims/jvmtiRedefineClassesTrace.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "runtime/vframeArray.hpp"
35 #include "vmreg_sparc.inline.hpp"
36 #ifdef COMPILER1
37 #include "c1/c1_Runtime1.hpp"
38 #endif
39 #ifdef COMPILER2
40 #include "opto/runtime.hpp"
41 #endif
42 #ifdef SHARK
43 #include "compiler/compileBroker.hpp"
44 #include "shark/sharkCompiler.hpp"
45 #endif
47 #define __ masm->
50 class RegisterSaver {
52 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
53 // The Oregs are problematic. In the 32bit build the compiler can
54 // have O registers live with 64 bit quantities. A window save will
55 // cut the heads off of the registers. We have to do a very extensive
56 // stack dance to save and restore these properly.
58 // Note that the Oregs problem only exists if we block at either a polling
59 // page exception a compiled code safepoint that was not originally a call
60 // or deoptimize following one of these kinds of safepoints.
62 // Lots of registers to save. For all builds, a window save will preserve
63 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
64 // builds a window-save will preserve the %o registers. In the LION build
65 // we need to save the 64-bit %o registers which requires we save them
66 // before the window-save (as then they become %i registers and get their
67 // heads chopped off on interrupt). We have to save some %g registers here
68 // as well.
69 enum {
70 // This frame's save area. Includes extra space for the native call:
71 // vararg's layout space and the like. Briefly holds the caller's
72 // register save area.
73 call_args_area = frame::register_save_words_sp_offset +
74 frame::memory_parameter_word_sp_offset*wordSize,
75 // Make sure save locations are always 8 byte aligned.
76 // can't use round_to because it doesn't produce compile time constant
77 start_of_extra_save_area = ((call_args_area + 7) & ~7),
78 g1_offset = start_of_extra_save_area, // g-regs needing saving
79 g3_offset = g1_offset+8,
80 g4_offset = g3_offset+8,
81 g5_offset = g4_offset+8,
82 o0_offset = g5_offset+8,
83 o1_offset = o0_offset+8,
84 o2_offset = o1_offset+8,
85 o3_offset = o2_offset+8,
86 o4_offset = o3_offset+8,
87 o5_offset = o4_offset+8,
88 start_of_flags_save_area = o5_offset+8,
89 ccr_offset = start_of_flags_save_area,
90 fsr_offset = ccr_offset + 8,
91 d00_offset = fsr_offset+8, // Start of float save area
92 register_save_size = d00_offset+8*32
93 };
96 public:
98 static int Oexception_offset() { return o0_offset; };
99 static int G3_offset() { return g3_offset; };
100 static int G5_offset() { return g5_offset; };
101 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
102 static void restore_live_registers(MacroAssembler* masm);
104 // During deoptimization only the result register need to be restored
105 // all the other values have already been extracted.
107 static void restore_result_registers(MacroAssembler* masm);
108 };
110 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
111 // Record volatile registers as callee-save values in an OopMap so their save locations will be
112 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
113 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
114 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
115 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
116 int i;
117 // Always make the frame size 16 byte aligned.
118 int frame_size = round_to(additional_frame_words + register_save_size, 16);
119 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
120 int frame_size_in_slots = frame_size / sizeof(jint);
121 // CodeBlob frame size is in words.
122 *total_frame_words = frame_size / wordSize;
123 // OopMap* map = new OopMap(*total_frame_words, 0);
124 OopMap* map = new OopMap(frame_size_in_slots, 0);
126 #if !defined(_LP64)
128 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
129 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
130 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
131 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
132 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
133 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
134 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
135 #endif /* _LP64 */
137 __ save(SP, -frame_size, SP);
139 #ifndef _LP64
140 // Reload the 64 bit Oregs. Although they are now Iregs we load them
141 // to Oregs here to avoid interrupts cutting off their heads
143 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
144 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
145 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
146 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
150 __ stx(O0, SP, o0_offset+STACK_BIAS);
151 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
153 __ stx(O1, SP, o1_offset+STACK_BIAS);
155 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
157 __ stx(O2, SP, o2_offset+STACK_BIAS);
158 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
160 __ stx(O3, SP, o3_offset+STACK_BIAS);
161 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
163 __ stx(O4, SP, o4_offset+STACK_BIAS);
164 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
166 __ stx(O5, SP, o5_offset+STACK_BIAS);
167 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
168 #endif /* _LP64 */
171 #ifdef _LP64
172 int debug_offset = 0;
173 #else
174 int debug_offset = 4;
175 #endif
176 // Save the G's
177 __ stx(G1, SP, g1_offset+STACK_BIAS);
178 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
180 __ stx(G3, SP, g3_offset+STACK_BIAS);
181 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
183 __ stx(G4, SP, g4_offset+STACK_BIAS);
184 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
186 __ stx(G5, SP, g5_offset+STACK_BIAS);
187 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
189 // This is really a waste but we'll keep things as they were for now
190 if (true) {
191 #ifndef _LP64
192 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
193 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
194 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
195 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
196 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
197 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
198 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
199 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
200 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
201 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
202 #endif /* _LP64 */
203 }
206 // Save the flags
207 __ rdccr( G5 );
208 __ stx(G5, SP, ccr_offset+STACK_BIAS);
209 __ stxfsr(SP, fsr_offset+STACK_BIAS);
211 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
212 int offset = d00_offset;
213 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
214 FloatRegister f = as_FloatRegister(i);
215 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
216 // Record as callee saved both halves of double registers (2 float registers).
217 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
218 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
219 offset += sizeof(double);
220 }
222 // And we're done.
224 return map;
225 }
228 // Pop the current frame and restore all the registers that we
229 // saved.
230 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
232 // Restore all the FP registers
233 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
234 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
235 }
237 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
238 __ wrccr (G1) ;
240 // Restore the G's
241 // Note that G2 (AKA GThread) must be saved and restored separately.
242 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
244 __ ldx(SP, g1_offset+STACK_BIAS, G1);
245 __ ldx(SP, g3_offset+STACK_BIAS, G3);
246 __ ldx(SP, g4_offset+STACK_BIAS, G4);
247 __ ldx(SP, g5_offset+STACK_BIAS, G5);
250 #if !defined(_LP64)
251 // Restore the 64-bit O's.
252 __ ldx(SP, o0_offset+STACK_BIAS, O0);
253 __ ldx(SP, o1_offset+STACK_BIAS, O1);
254 __ ldx(SP, o2_offset+STACK_BIAS, O2);
255 __ ldx(SP, o3_offset+STACK_BIAS, O3);
256 __ ldx(SP, o4_offset+STACK_BIAS, O4);
257 __ ldx(SP, o5_offset+STACK_BIAS, O5);
259 // And temporarily place them in TLS
261 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
262 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
263 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
264 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
265 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
266 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
267 #endif /* _LP64 */
269 // Restore flags
271 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
273 __ restore();
275 #if !defined(_LP64)
276 // Now reload the 64bit Oregs after we've restore the window.
277 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
278 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
279 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
280 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
283 #endif /* _LP64 */
285 }
287 // Pop the current frame and restore the registers that might be holding
288 // a result.
289 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
291 #if !defined(_LP64)
292 // 32bit build returns longs in G1
293 __ ldx(SP, g1_offset+STACK_BIAS, G1);
295 // Retrieve the 64-bit O's.
296 __ ldx(SP, o0_offset+STACK_BIAS, O0);
297 __ ldx(SP, o1_offset+STACK_BIAS, O1);
298 // and save to TLS
299 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
300 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
301 #endif /* _LP64 */
303 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
305 __ restore();
307 #if !defined(_LP64)
308 // Now reload the 64bit Oregs after we've restore the window.
309 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
310 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
311 #endif /* _LP64 */
313 }
315 // Is vector's size (in bytes) bigger than a size saved by default?
316 // 8 bytes FP registers are saved by default on SPARC.
317 bool SharedRuntime::is_wide_vector(int size) {
318 // Note, MaxVectorSize == 8 on SPARC.
319 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
320 return size > 8;
321 }
323 // The java_calling_convention describes stack locations as ideal slots on
324 // a frame with no abi restrictions. Since we must observe abi restrictions
325 // (like the placement of the register window) the slots must be biased by
326 // the following value.
327 static int reg2offset(VMReg r) {
328 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
329 }
331 static VMRegPair reg64_to_VMRegPair(Register r) {
332 VMRegPair ret;
333 if (wordSize == 8) {
334 ret.set2(r->as_VMReg());
335 } else {
336 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
337 }
338 return ret;
339 }
341 // ---------------------------------------------------------------------------
342 // Read the array of BasicTypes from a signature, and compute where the
343 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
344 // quantities. Values less than VMRegImpl::stack0 are registers, those above
345 // refer to 4-byte stack slots. All stack slots are based off of the window
346 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
347 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
348 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
349 // integer registers. Values 64-95 are the (32-bit only) float registers.
350 // Each 32-bit quantity is given its own number, so the integer registers
351 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
352 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
354 // Register results are passed in O0-O5, for outgoing call arguments. To
355 // convert to incoming arguments, convert all O's to I's. The regs array
356 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
357 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
358 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
359 // passed (used as a placeholder for the other half of longs and doubles in
360 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
361 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
362 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
363 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
364 // same VMRegPair.
366 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
367 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
368 // units regardless of build.
371 // ---------------------------------------------------------------------------
372 // The compiled Java calling convention. The Java convention always passes
373 // 64-bit values in adjacent aligned locations (either registers or stack),
374 // floats in float registers and doubles in aligned float pairs. There is
375 // no backing varargs store for values in registers.
376 // In the 32-bit build, longs are passed on the stack (cannot be
377 // passed in I's, because longs in I's get their heads chopped off at
378 // interrupt).
379 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
380 VMRegPair *regs,
381 int total_args_passed,
382 int is_outgoing) {
383 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
385 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
386 const int flt_reg_max = 8;
388 int int_reg = 0;
389 int flt_reg = 0;
390 int slot = 0;
392 for (int i = 0; i < total_args_passed; i++) {
393 switch (sig_bt[i]) {
394 case T_INT:
395 case T_SHORT:
396 case T_CHAR:
397 case T_BYTE:
398 case T_BOOLEAN:
399 #ifndef _LP64
400 case T_OBJECT:
401 case T_ARRAY:
402 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
403 #endif // _LP64
404 if (int_reg < int_reg_max) {
405 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
406 regs[i].set1(r->as_VMReg());
407 } else {
408 regs[i].set1(VMRegImpl::stack2reg(slot++));
409 }
410 break;
412 #ifdef _LP64
413 case T_LONG:
414 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
415 // fall-through
416 case T_OBJECT:
417 case T_ARRAY:
418 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
419 if (int_reg < int_reg_max) {
420 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
421 regs[i].set2(r->as_VMReg());
422 } else {
423 slot = round_to(slot, 2); // align
424 regs[i].set2(VMRegImpl::stack2reg(slot));
425 slot += 2;
426 }
427 break;
428 #else
429 case T_LONG:
430 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
431 // On 32-bit SPARC put longs always on the stack to keep the pressure off
432 // integer argument registers. They should be used for oops.
433 slot = round_to(slot, 2); // align
434 regs[i].set2(VMRegImpl::stack2reg(slot));
435 slot += 2;
436 #endif
437 break;
439 case T_FLOAT:
440 if (flt_reg < flt_reg_max) {
441 FloatRegister r = as_FloatRegister(flt_reg++);
442 regs[i].set1(r->as_VMReg());
443 } else {
444 regs[i].set1(VMRegImpl::stack2reg(slot++));
445 }
446 break;
448 case T_DOUBLE:
449 assert(sig_bt[i+1] == T_VOID, "expecting half");
450 if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
451 flt_reg = round_to(flt_reg, 2); // align
452 FloatRegister r = as_FloatRegister(flt_reg);
453 regs[i].set2(r->as_VMReg());
454 flt_reg += 2;
455 } else {
456 slot = round_to(slot, 2); // align
457 regs[i].set2(VMRegImpl::stack2reg(slot));
458 slot += 2;
459 }
460 break;
462 case T_VOID:
463 regs[i].set_bad(); // Halves of longs & doubles
464 break;
466 default:
467 fatal(err_msg_res("unknown basic type %d", sig_bt[i]));
468 break;
469 }
470 }
472 // retun the amount of stack space these arguments will need.
473 return slot;
474 }
476 // Helper class mostly to avoid passing masm everywhere, and handle
477 // store displacement overflow logic.
478 class AdapterGenerator {
479 MacroAssembler *masm;
480 Register Rdisp;
481 void set_Rdisp(Register r) { Rdisp = r; }
483 void patch_callers_callsite();
485 // base+st_off points to top of argument
486 int arg_offset(const int st_off) { return st_off; }
487 int next_arg_offset(const int st_off) {
488 return st_off - Interpreter::stackElementSize;
489 }
491 // Argument slot values may be loaded first into a register because
492 // they might not fit into displacement.
493 RegisterOrConstant arg_slot(const int st_off);
494 RegisterOrConstant next_arg_slot(const int st_off);
496 // Stores long into offset pointed to by base
497 void store_c2i_long(Register r, Register base,
498 const int st_off, bool is_stack);
499 void store_c2i_object(Register r, Register base,
500 const int st_off);
501 void store_c2i_int(Register r, Register base,
502 const int st_off);
503 void store_c2i_double(VMReg r_2,
504 VMReg r_1, Register base, const int st_off);
505 void store_c2i_float(FloatRegister f, Register base,
506 const int st_off);
508 public:
509 void gen_c2i_adapter(int total_args_passed,
510 // VMReg max_arg,
511 int comp_args_on_stack, // VMRegStackSlots
512 const BasicType *sig_bt,
513 const VMRegPair *regs,
514 Label& skip_fixup);
515 void gen_i2c_adapter(int total_args_passed,
516 // VMReg max_arg,
517 int comp_args_on_stack, // VMRegStackSlots
518 const BasicType *sig_bt,
519 const VMRegPair *regs);
521 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
522 };
525 // Patch the callers callsite with entry to compiled code if it exists.
526 void AdapterGenerator::patch_callers_callsite() {
527 Label L;
528 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
529 __ br_null(G3_scratch, false, Assembler::pt, L);
530 __ delayed()->nop();
531 // Call into the VM to patch the caller, then jump to compiled callee
532 __ save_frame(4); // Args in compiled layout; do not blow them
534 // Must save all the live Gregs the list is:
535 // G1: 1st Long arg (32bit build)
536 // G2: global allocated to TLS
537 // G3: used in inline cache check (scratch)
538 // G4: 2nd Long arg (32bit build);
539 // G5: used in inline cache check (Method*)
541 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
543 #ifdef _LP64
544 // mov(s,d)
545 __ mov(G1, L1);
546 __ mov(G4, L4);
547 __ mov(G5_method, L5);
548 __ mov(G5_method, O0); // VM needs target method
549 __ mov(I7, O1); // VM needs caller's callsite
550 // Must be a leaf call...
551 // can be very far once the blob has been relocated
552 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
553 __ relocate(relocInfo::runtime_call_type);
554 __ jumpl_to(dest, O7, O7);
555 __ delayed()->mov(G2_thread, L7_thread_cache);
556 __ mov(L7_thread_cache, G2_thread);
557 __ mov(L1, G1);
558 __ mov(L4, G4);
559 __ mov(L5, G5_method);
560 #else
561 __ stx(G1, FP, -8 + STACK_BIAS);
562 __ stx(G4, FP, -16 + STACK_BIAS);
563 __ mov(G5_method, L5);
564 __ mov(G5_method, O0); // VM needs target method
565 __ mov(I7, O1); // VM needs caller's callsite
566 // Must be a leaf call...
567 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
568 __ delayed()->mov(G2_thread, L7_thread_cache);
569 __ mov(L7_thread_cache, G2_thread);
570 __ ldx(FP, -8 + STACK_BIAS, G1);
571 __ ldx(FP, -16 + STACK_BIAS, G4);
572 __ mov(L5, G5_method);
573 #endif /* _LP64 */
575 __ restore(); // Restore args
576 __ bind(L);
577 }
580 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
581 RegisterOrConstant roc(arg_offset(st_off));
582 return __ ensure_simm13_or_reg(roc, Rdisp);
583 }
585 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
586 RegisterOrConstant roc(next_arg_offset(st_off));
587 return __ ensure_simm13_or_reg(roc, Rdisp);
588 }
591 // Stores long into offset pointed to by base
592 void AdapterGenerator::store_c2i_long(Register r, Register base,
593 const int st_off, bool is_stack) {
594 #ifdef _LP64
595 // In V9, longs are given 2 64-bit slots in the interpreter, but the
596 // data is passed in only 1 slot.
597 __ stx(r, base, next_arg_slot(st_off));
598 #else
599 #ifdef COMPILER2
600 // Misaligned store of 64-bit data
601 __ stw(r, base, arg_slot(st_off)); // lo bits
602 __ srlx(r, 32, r);
603 __ stw(r, base, next_arg_slot(st_off)); // hi bits
604 #else
605 if (is_stack) {
606 // Misaligned store of 64-bit data
607 __ stw(r, base, arg_slot(st_off)); // lo bits
608 __ srlx(r, 32, r);
609 __ stw(r, base, next_arg_slot(st_off)); // hi bits
610 } else {
611 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
612 __ stw(r , base, next_arg_slot(st_off)); // hi bits
613 }
614 #endif // COMPILER2
615 #endif // _LP64
616 }
618 void AdapterGenerator::store_c2i_object(Register r, Register base,
619 const int st_off) {
620 __ st_ptr (r, base, arg_slot(st_off));
621 }
623 void AdapterGenerator::store_c2i_int(Register r, Register base,
624 const int st_off) {
625 __ st (r, base, arg_slot(st_off));
626 }
628 // Stores into offset pointed to by base
629 void AdapterGenerator::store_c2i_double(VMReg r_2,
630 VMReg r_1, Register base, const int st_off) {
631 #ifdef _LP64
632 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
633 // data is passed in only 1 slot.
634 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
635 #else
636 // Need to marshal 64-bit value from misaligned Lesp loads
637 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
638 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
639 #endif
640 }
642 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
643 const int st_off) {
644 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
645 }
647 void AdapterGenerator::gen_c2i_adapter(
648 int total_args_passed,
649 // VMReg max_arg,
650 int comp_args_on_stack, // VMRegStackSlots
651 const BasicType *sig_bt,
652 const VMRegPair *regs,
653 Label& L_skip_fixup) {
655 // Before we get into the guts of the C2I adapter, see if we should be here
656 // at all. We've come from compiled code and are attempting to jump to the
657 // interpreter, which means the caller made a static call to get here
658 // (vcalls always get a compiled target if there is one). Check for a
659 // compiled target. If there is one, we need to patch the caller's call.
660 // However we will run interpreted if we come thru here. The next pass
661 // thru the call site will run compiled. If we ran compiled here then
662 // we can (theorectically) do endless i2c->c2i->i2c transitions during
663 // deopt/uncommon trap cycles. If we always go interpreted here then
664 // we can have at most one and don't need to play any tricks to keep
665 // from endlessly growing the stack.
666 //
667 // Actually if we detected that we had an i2c->c2i transition here we
668 // ought to be able to reset the world back to the state of the interpreted
669 // call and not bother building another interpreter arg area. We don't
670 // do that at this point.
672 patch_callers_callsite();
674 __ bind(L_skip_fixup);
676 // Since all args are passed on the stack, total_args_passed*wordSize is the
677 // space we need. Add in varargs area needed by the interpreter. Round up
678 // to stack alignment.
679 const int arg_size = total_args_passed * Interpreter::stackElementSize;
680 const int varargs_area =
681 (frame::varargs_offset - frame::register_save_words)*wordSize;
682 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
684 const int bias = STACK_BIAS;
685 const int interp_arg_offset = frame::varargs_offset*wordSize +
686 (total_args_passed-1)*Interpreter::stackElementSize;
688 const Register base = SP;
690 // Make some extra space on the stack.
691 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP);
692 set_Rdisp(G3_scratch);
694 // Write the args into the outgoing interpreter space.
695 for (int i = 0; i < total_args_passed; i++) {
696 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
697 VMReg r_1 = regs[i].first();
698 VMReg r_2 = regs[i].second();
699 if (!r_1->is_valid()) {
700 assert(!r_2->is_valid(), "");
701 continue;
702 }
703 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
704 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias;
705 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp);
706 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
707 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
708 else __ ldx(base, ld_off, G1_scratch);
709 }
711 if (r_1->is_Register()) {
712 Register r = r_1->as_Register()->after_restore();
713 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
714 store_c2i_object(r, base, st_off);
715 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
716 store_c2i_long(r, base, st_off, r_2->is_stack());
717 } else {
718 store_c2i_int(r, base, st_off);
719 }
720 } else {
721 assert(r_1->is_FloatRegister(), "");
722 if (sig_bt[i] == T_FLOAT) {
723 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
724 } else {
725 assert(sig_bt[i] == T_DOUBLE, "wrong type");
726 store_c2i_double(r_2, r_1, base, st_off);
727 }
728 }
729 }
731 // Load the interpreter entry point.
732 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
734 // Pass O5_savedSP as an argument to the interpreter.
735 // The interpreter will restore SP to this value before returning.
736 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP);
738 __ mov((frame::varargs_offset)*wordSize -
739 1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
740 // Jump to the interpreter just as if interpreter was doing it.
741 __ jmpl(G3_scratch, 0, G0);
742 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
743 // (really L0) is in use by the compiled frame as a generic temp. However,
744 // the interpreter does not know where its args are without some kind of
745 // arg pointer being passed in. Pass it in Gargs.
746 __ delayed()->add(SP, G1, Gargs);
747 }
749 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
750 address code_start, address code_end,
751 Label& L_ok) {
752 Label L_fail;
753 __ set(ExternalAddress(code_start), temp_reg);
754 __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
755 __ cmp(pc_reg, temp_reg);
756 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
757 __ delayed()->add(temp_reg, temp2_reg, temp_reg);
758 __ cmp(pc_reg, temp_reg);
759 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
760 __ bind(L_fail);
761 }
763 void AdapterGenerator::gen_i2c_adapter(
764 int total_args_passed,
765 // VMReg max_arg,
766 int comp_args_on_stack, // VMRegStackSlots
767 const BasicType *sig_bt,
768 const VMRegPair *regs) {
770 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
771 // layout. Lesp was saved by the calling I-frame and will be restored on
772 // return. Meanwhile, outgoing arg space is all owned by the callee
773 // C-frame, so we can mangle it at will. After adjusting the frame size,
774 // hoist register arguments and repack other args according to the compiled
775 // code convention. Finally, end in a jump to the compiled code. The entry
776 // point address is the start of the buffer.
778 // We will only enter here from an interpreted frame and never from after
779 // passing thru a c2i. Azul allowed this but we do not. If we lose the
780 // race and use a c2i we will remain interpreted for the race loser(s).
781 // This removes all sorts of headaches on the x86 side and also eliminates
782 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
784 // More detail:
785 // Adapters can be frameless because they do not require the caller
786 // to perform additional cleanup work, such as correcting the stack pointer.
787 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
788 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
789 // even if a callee has modified the stack pointer.
790 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
791 // routinely repairs its caller's stack pointer (from sender_sp, which is set
792 // up via the senderSP register).
793 // In other words, if *either* the caller or callee is interpreted, we can
794 // get the stack pointer repaired after a call.
795 // This is why c2i and i2c adapters cannot be indefinitely composed.
796 // In particular, if a c2i adapter were to somehow call an i2c adapter,
797 // both caller and callee would be compiled methods, and neither would
798 // clean up the stack pointer changes performed by the two adapters.
799 // If this happens, control eventually transfers back to the compiled
800 // caller, but with an uncorrected stack, causing delayed havoc.
802 if (VerifyAdapterCalls &&
803 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
804 // So, let's test for cascading c2i/i2c adapters right now.
805 // assert(Interpreter::contains($return_addr) ||
806 // StubRoutines::contains($return_addr),
807 // "i2c adapter must return to an interpreter frame");
808 __ block_comment("verify_i2c { ");
809 Label L_ok;
810 if (Interpreter::code() != NULL)
811 range_check(masm, O7, O0, O1,
812 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
813 L_ok);
814 if (StubRoutines::code1() != NULL)
815 range_check(masm, O7, O0, O1,
816 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
817 L_ok);
818 if (StubRoutines::code2() != NULL)
819 range_check(masm, O7, O0, O1,
820 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
821 L_ok);
822 const char* msg = "i2c adapter must return to an interpreter frame";
823 __ block_comment(msg);
824 __ stop(msg);
825 __ bind(L_ok);
826 __ block_comment("} verify_i2ce ");
827 }
829 // As you can see from the list of inputs & outputs there are not a lot
830 // of temp registers to work with: mostly G1, G3 & G4.
832 // Inputs:
833 // G2_thread - TLS
834 // G5_method - Method oop
835 // G4 (Gargs) - Pointer to interpreter's args
836 // O0..O4 - free for scratch
837 // O5_savedSP - Caller's saved SP, to be restored if needed
838 // O6 - Current SP!
839 // O7 - Valid return address
840 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
842 // Outputs:
843 // G2_thread - TLS
844 // O0-O5 - Outgoing args in compiled layout
845 // O6 - Adjusted or restored SP
846 // O7 - Valid return address
847 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
848 // F0-F7 - more outgoing args
851 // Gargs is the incoming argument base, and also an outgoing argument.
852 __ sub(Gargs, BytesPerWord, Gargs);
854 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
855 // WITH O7 HOLDING A VALID RETURN PC
856 //
857 // | |
858 // : java stack :
859 // | |
860 // +--------------+ <--- start of outgoing args
861 // | receiver | |
862 // : rest of args : |---size is java-arg-words
863 // | | |
864 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
865 // | | |
866 // : unused : |---Space for max Java stack, plus stack alignment
867 // | | |
868 // +--------------+ <--- SP + 16*wordsize
869 // | |
870 // : window :
871 // | |
872 // +--------------+ <--- SP
874 // WE REPACK THE STACK. We use the common calling convention layout as
875 // discovered by calling SharedRuntime::calling_convention. We assume it
876 // causes an arbitrary shuffle of memory, which may require some register
877 // temps to do the shuffle. We hope for (and optimize for) the case where
878 // temps are not needed. We may have to resize the stack slightly, in case
879 // we need alignment padding (32-bit interpreter can pass longs & doubles
880 // misaligned, but the compilers expect them aligned).
881 //
882 // | |
883 // : java stack :
884 // | |
885 // +--------------+ <--- start of outgoing args
886 // | pad, align | |
887 // +--------------+ |
888 // | ints, longs, | |
889 // | floats, | |---Outgoing stack args.
890 // : doubles : | First few args in registers.
891 // | | |
892 // +--------------+ <--- SP' + 16*wordsize
893 // | |
894 // : window :
895 // | |
896 // +--------------+ <--- SP'
898 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
899 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
900 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
902 // Cut-out for having no stack args. Since up to 6 args are passed
903 // in registers, we will commonly have no stack args.
904 if (comp_args_on_stack > 0) {
905 // Convert VMReg stack slots to words.
906 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
907 // Round up to miminum stack alignment, in wordSize
908 comp_words_on_stack = round_to(comp_words_on_stack, 2);
909 // Now compute the distance from Lesp to SP. This calculation does not
910 // include the space for total_args_passed because Lesp has not yet popped
911 // the arguments.
912 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
913 }
915 // Now generate the shuffle code. Pick up all register args and move the
916 // rest through G1_scratch.
917 for (int i = 0; i < total_args_passed; i++) {
918 if (sig_bt[i] == T_VOID) {
919 // Longs and doubles are passed in native word order, but misaligned
920 // in the 32-bit build.
921 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
922 continue;
923 }
925 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
926 // 32-bit build and aligned in the 64-bit build. Look for the obvious
927 // ldx/lddf optimizations.
929 // Load in argument order going down.
930 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
931 set_Rdisp(G1_scratch);
933 VMReg r_1 = regs[i].first();
934 VMReg r_2 = regs[i].second();
935 if (!r_1->is_valid()) {
936 assert(!r_2->is_valid(), "");
937 continue;
938 }
939 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
940 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
941 if (r_2->is_valid()) r_2 = r_1->next();
942 }
943 if (r_1->is_Register()) { // Register argument
944 Register r = r_1->as_Register()->after_restore();
945 if (!r_2->is_valid()) {
946 __ ld(Gargs, arg_slot(ld_off), r);
947 } else {
948 #ifdef _LP64
949 // In V9, longs are given 2 64-bit slots in the interpreter, but the
950 // data is passed in only 1 slot.
951 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
952 next_arg_slot(ld_off) : arg_slot(ld_off);
953 __ ldx(Gargs, slot, r);
954 #else
955 fatal("longs should be on stack");
956 #endif
957 }
958 } else {
959 assert(r_1->is_FloatRegister(), "");
960 if (!r_2->is_valid()) {
961 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
962 } else {
963 #ifdef _LP64
964 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
965 // data is passed in only 1 slot. This code also handles longs that
966 // are passed on the stack, but need a stack-to-stack move through a
967 // spare float register.
968 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
969 next_arg_slot(ld_off) : arg_slot(ld_off);
970 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
971 #else
972 // Need to marshal 64-bit value from misaligned Lesp loads
973 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
974 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
975 #endif
976 }
977 }
978 // Was the argument really intended to be on the stack, but was loaded
979 // into F8/F9?
980 if (regs[i].first()->is_stack()) {
981 assert(r_1->as_FloatRegister() == F8, "fix this code");
982 // Convert stack slot to an SP offset
983 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
984 // Store down the shuffled stack word. Target address _is_ aligned.
985 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
986 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
987 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
988 }
989 }
991 // Jump to the compiled code just as if compiled code was doing it.
992 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
994 // 6243940 We might end up in handle_wrong_method if
995 // the callee is deoptimized as we race thru here. If that
996 // happens we don't want to take a safepoint because the
997 // caller frame will look interpreted and arguments are now
998 // "compiled" so it is much better to make this transition
999 // invisible to the stack walking code. Unfortunately if
1000 // we try and find the callee by normal means a safepoint
1001 // is possible. So we stash the desired callee in the thread
1002 // and the vm will find there should this case occur.
1003 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1004 __ st_ptr(G5_method, callee_target_addr);
1005 __ jmpl(G3, 0, G0);
1006 __ delayed()->nop();
1007 }
1009 // ---------------------------------------------------------------
1010 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1011 int total_args_passed,
1012 // VMReg max_arg,
1013 int comp_args_on_stack, // VMRegStackSlots
1014 const BasicType *sig_bt,
1015 const VMRegPair *regs,
1016 AdapterFingerPrint* fingerprint) {
1017 address i2c_entry = __ pc();
1019 AdapterGenerator agen(masm);
1021 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1024 // -------------------------------------------------------------------------
1025 // Generate a C2I adapter. On entry we know G5 holds the Method*. The
1026 // args start out packed in the compiled layout. They need to be unpacked
1027 // into the interpreter layout. This will almost always require some stack
1028 // space. We grow the current (compiled) stack, then repack the args. We
1029 // finally end in a jump to the generic interpreter entry point. On exit
1030 // from the interpreter, the interpreter will restore our SP (lest the
1031 // compiled code, which relys solely on SP and not FP, get sick).
1033 address c2i_unverified_entry = __ pc();
1034 Label L_skip_fixup;
1035 {
1036 Register R_temp = G1; // another scratch register
1038 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1040 __ verify_oop(O0);
1041 __ load_klass(O0, G3_scratch);
1043 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
1044 __ cmp(G3_scratch, R_temp);
1046 Label ok, ok2;
1047 __ brx(Assembler::equal, false, Assembler::pt, ok);
1048 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
1049 __ jump_to(ic_miss, G3_scratch);
1050 __ delayed()->nop();
1052 __ bind(ok);
1053 // Method might have been compiled since the call site was patched to
1054 // interpreted if that is the case treat it as a miss so we can get
1055 // the call site corrected.
1056 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
1057 __ bind(ok2);
1058 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup);
1059 __ delayed()->nop();
1060 __ jump_to(ic_miss, G3_scratch);
1061 __ delayed()->nop();
1063 }
1065 address c2i_entry = __ pc();
1067 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
1069 __ flush();
1070 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1072 }
1074 // Helper function for native calling conventions
1075 static VMReg int_stk_helper( int i ) {
1076 // Bias any stack based VMReg we get by ignoring the window area
1077 // but not the register parameter save area.
1078 //
1079 // This is strange for the following reasons. We'd normally expect
1080 // the calling convention to return an VMReg for a stack slot
1081 // completely ignoring any abi reserved area. C2 thinks of that
1082 // abi area as only out_preserve_stack_slots. This does not include
1083 // the area allocated by the C abi to store down integer arguments
1084 // because the java calling convention does not use it. So
1085 // since c2 assumes that there are only out_preserve_stack_slots
1086 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1087 // location the c calling convention must add in this bias amount
1088 // to make up for the fact that the out_preserve_stack_slots is
1089 // insufficient for C calls. What a mess. I sure hope those 6
1090 // stack words were worth it on every java call!
1092 // Another way of cleaning this up would be for out_preserve_stack_slots
1093 // to take a parameter to say whether it was C or java calling conventions.
1094 // Then things might look a little better (but not much).
1096 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1097 if( mem_parm_offset < 0 ) {
1098 return as_oRegister(i)->as_VMReg();
1099 } else {
1100 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1101 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1102 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1103 }
1104 }
1107 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1108 VMRegPair *regs,
1109 VMRegPair *regs2,
1110 int total_args_passed) {
1111 assert(regs2 == NULL, "not needed on sparc");
1113 // Return the number of VMReg stack_slots needed for the args.
1114 // This value does not include an abi space (like register window
1115 // save area).
1117 // The native convention is V8 if !LP64
1118 // The LP64 convention is the V9 convention which is slightly more sane.
1120 // We return the amount of VMReg stack slots we need to reserve for all
1121 // the arguments NOT counting out_preserve_stack_slots. Since we always
1122 // have space for storing at least 6 registers to memory we start with that.
1123 // See int_stk_helper for a further discussion.
1124 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1126 #ifdef _LP64
1127 // V9 convention: All things "as-if" on double-wide stack slots.
1128 // Hoist any int/ptr/long's in the first 6 to int regs.
1129 // Hoist any flt/dbl's in the first 16 dbl regs.
1130 int j = 0; // Count of actual args, not HALVES
1131 VMRegPair param_array_reg; // location of the argument in the parameter array
1132 for (int i = 0; i < total_args_passed; i++, j++) {
1133 param_array_reg.set_bad();
1134 switch (sig_bt[i]) {
1135 case T_BOOLEAN:
1136 case T_BYTE:
1137 case T_CHAR:
1138 case T_INT:
1139 case T_SHORT:
1140 regs[i].set1(int_stk_helper(j));
1141 break;
1142 case T_LONG:
1143 assert(sig_bt[i+1] == T_VOID, "expecting half");
1144 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1145 case T_ARRAY:
1146 case T_OBJECT:
1147 case T_METADATA:
1148 regs[i].set2(int_stk_helper(j));
1149 break;
1150 case T_FLOAT:
1151 // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
1152 // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
1153 //
1154 // "When a callee prototype exists, and does not indicate variable arguments,
1155 // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
1156 // will be promoted to floating-point registers"
1157 //
1158 // By "promoted" it means that the argument is located in two places, an unused
1159 // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
1160 // float register. In most cases, there are 6 or fewer arguments of any type,
1161 // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
1162 // serve as shadow slots. Per the spec floating point registers %d6 to %d16
1163 // require slots beyond that (up to %sp+BIAS+248).
1164 //
1165 {
1166 // V9ism: floats go in ODD registers and stack slots
1167 int float_index = 1 + (j << 1);
1168 param_array_reg.set1(VMRegImpl::stack2reg(float_index));
1169 if (j < 16) {
1170 regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
1171 } else {
1172 regs[i] = param_array_reg;
1173 }
1174 }
1175 break;
1176 case T_DOUBLE:
1177 {
1178 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1179 // V9ism: doubles go in EVEN/ODD regs and stack slots
1180 int double_index = (j << 1);
1181 param_array_reg.set2(VMRegImpl::stack2reg(double_index));
1182 if (j < 16) {
1183 regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
1184 } else {
1185 // V9ism: doubles go in EVEN/ODD stack slots
1186 regs[i] = param_array_reg;
1187 }
1188 }
1189 break;
1190 case T_VOID:
1191 regs[i].set_bad();
1192 j--;
1193 break; // Do not count HALVES
1194 default:
1195 ShouldNotReachHere();
1196 }
1197 // Keep track of the deepest parameter array slot.
1198 if (!param_array_reg.first()->is_valid()) {
1199 param_array_reg = regs[i];
1200 }
1201 if (param_array_reg.first()->is_stack()) {
1202 int off = param_array_reg.first()->reg2stack();
1203 if (off > max_stack_slots) max_stack_slots = off;
1204 }
1205 if (param_array_reg.second()->is_stack()) {
1206 int off = param_array_reg.second()->reg2stack();
1207 if (off > max_stack_slots) max_stack_slots = off;
1208 }
1209 }
1211 #else // _LP64
1212 // V8 convention: first 6 things in O-regs, rest on stack.
1213 // Alignment is willy-nilly.
1214 for (int i = 0; i < total_args_passed; i++) {
1215 switch (sig_bt[i]) {
1216 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1217 case T_ARRAY:
1218 case T_BOOLEAN:
1219 case T_BYTE:
1220 case T_CHAR:
1221 case T_FLOAT:
1222 case T_INT:
1223 case T_OBJECT:
1224 case T_METADATA:
1225 case T_SHORT:
1226 regs[i].set1(int_stk_helper(i));
1227 break;
1228 case T_DOUBLE:
1229 case T_LONG:
1230 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1231 regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
1232 break;
1233 case T_VOID: regs[i].set_bad(); break;
1234 default:
1235 ShouldNotReachHere();
1236 }
1237 if (regs[i].first()->is_stack()) {
1238 int off = regs[i].first()->reg2stack();
1239 if (off > max_stack_slots) max_stack_slots = off;
1240 }
1241 if (regs[i].second()->is_stack()) {
1242 int off = regs[i].second()->reg2stack();
1243 if (off > max_stack_slots) max_stack_slots = off;
1244 }
1245 }
1246 #endif // _LP64
1248 return round_to(max_stack_slots + 1, 2);
1250 }
1253 // ---------------------------------------------------------------------------
1254 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1255 switch (ret_type) {
1256 case T_FLOAT:
1257 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1258 break;
1259 case T_DOUBLE:
1260 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1261 break;
1262 }
1263 }
1265 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1266 switch (ret_type) {
1267 case T_FLOAT:
1268 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1269 break;
1270 case T_DOUBLE:
1271 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1272 break;
1273 }
1274 }
1276 // Check and forward and pending exception. Thread is stored in
1277 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
1278 // is no exception handler. We merely pop this frame off and throw the
1279 // exception in the caller's frame.
1280 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1281 Label L;
1282 __ br_null(Rex_oop, false, Assembler::pt, L);
1283 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1284 // Since this is a native call, we *know* the proper exception handler
1285 // without calling into the VM: it's the empty function. Just pop this
1286 // frame and then jump to forward_exception_entry; O7 will contain the
1287 // native caller's return PC.
1288 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1289 __ jump_to(exception_entry, G3_scratch);
1290 __ delayed()->restore(); // Pop this frame off.
1291 __ bind(L);
1292 }
1294 // A simple move of integer like type
1295 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1296 if (src.first()->is_stack()) {
1297 if (dst.first()->is_stack()) {
1298 // stack to stack
1299 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1300 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1301 } else {
1302 // stack to reg
1303 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1304 }
1305 } else if (dst.first()->is_stack()) {
1306 // reg to stack
1307 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1308 } else {
1309 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1310 }
1311 }
1313 // On 64 bit we will store integer like items to the stack as
1314 // 64 bits items (sparc abi) even though java would only store
1315 // 32bits for a parameter. On 32bit it will simply be 32 bits
1316 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1317 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1318 if (src.first()->is_stack()) {
1319 if (dst.first()->is_stack()) {
1320 // stack to stack
1321 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1322 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1323 } else {
1324 // stack to reg
1325 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1326 }
1327 } else if (dst.first()->is_stack()) {
1328 // reg to stack
1329 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1330 } else {
1331 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1332 }
1333 }
1336 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1337 if (src.first()->is_stack()) {
1338 if (dst.first()->is_stack()) {
1339 // stack to stack
1340 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1341 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1342 } else {
1343 // stack to reg
1344 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1345 }
1346 } else if (dst.first()->is_stack()) {
1347 // reg to stack
1348 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1349 } else {
1350 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1351 }
1352 }
1355 // An oop arg. Must pass a handle not the oop itself
1356 static void object_move(MacroAssembler* masm,
1357 OopMap* map,
1358 int oop_handle_offset,
1359 int framesize_in_slots,
1360 VMRegPair src,
1361 VMRegPair dst,
1362 bool is_receiver,
1363 int* receiver_offset) {
1365 // must pass a handle. First figure out the location we use as a handle
1367 if (src.first()->is_stack()) {
1368 // Oop is already on the stack
1369 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1370 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1371 __ ld_ptr(rHandle, 0, L4);
1372 #ifdef _LP64
1373 __ movr( Assembler::rc_z, L4, G0, rHandle );
1374 #else
1375 __ tst( L4 );
1376 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1377 #endif
1378 if (dst.first()->is_stack()) {
1379 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1380 }
1381 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1382 if (is_receiver) {
1383 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1384 }
1385 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1386 } else {
1387 // Oop is in an input register pass we must flush it to the stack
1388 const Register rOop = src.first()->as_Register();
1389 const Register rHandle = L5;
1390 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1391 int offset = oop_slot * VMRegImpl::stack_slot_size;
1392 __ st_ptr(rOop, SP, offset + STACK_BIAS);
1393 if (is_receiver) {
1394 *receiver_offset = offset;
1395 }
1396 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1397 __ add(SP, offset + STACK_BIAS, rHandle);
1398 #ifdef _LP64
1399 __ movr( Assembler::rc_z, rOop, G0, rHandle );
1400 #else
1401 __ tst( rOop );
1402 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1403 #endif
1405 if (dst.first()->is_stack()) {
1406 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1407 } else {
1408 __ mov(rHandle, dst.first()->as_Register());
1409 }
1410 }
1411 }
1413 // A float arg may have to do float reg int reg conversion
1414 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1415 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1417 if (src.first()->is_stack()) {
1418 if (dst.first()->is_stack()) {
1419 // stack to stack the easiest of the bunch
1420 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1421 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1422 } else {
1423 // stack to reg
1424 if (dst.first()->is_Register()) {
1425 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1426 } else {
1427 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1428 }
1429 }
1430 } else if (dst.first()->is_stack()) {
1431 // reg to stack
1432 if (src.first()->is_Register()) {
1433 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1434 } else {
1435 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1436 }
1437 } else {
1438 // reg to reg
1439 if (src.first()->is_Register()) {
1440 if (dst.first()->is_Register()) {
1441 // gpr -> gpr
1442 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1443 } else {
1444 // gpr -> fpr
1445 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1446 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1447 }
1448 } else if (dst.first()->is_Register()) {
1449 // fpr -> gpr
1450 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1451 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1452 } else {
1453 // fpr -> fpr
1454 // In theory these overlap but the ordering is such that this is likely a nop
1455 if ( src.first() != dst.first()) {
1456 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1457 }
1458 }
1459 }
1460 }
1462 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1463 VMRegPair src_lo(src.first());
1464 VMRegPair src_hi(src.second());
1465 VMRegPair dst_lo(dst.first());
1466 VMRegPair dst_hi(dst.second());
1467 simple_move32(masm, src_lo, dst_lo);
1468 simple_move32(masm, src_hi, dst_hi);
1469 }
1471 // A long move
1472 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1474 // Do the simple ones here else do two int moves
1475 if (src.is_single_phys_reg() ) {
1476 if (dst.is_single_phys_reg()) {
1477 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1478 } else {
1479 // split src into two separate registers
1480 // Remember hi means hi address or lsw on sparc
1481 // Move msw to lsw
1482 if (dst.second()->is_reg()) {
1483 // MSW -> MSW
1484 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1485 // Now LSW -> LSW
1486 // this will only move lo -> lo and ignore hi
1487 VMRegPair split(dst.second());
1488 simple_move32(masm, src, split);
1489 } else {
1490 VMRegPair split(src.first(), L4->as_VMReg());
1491 // MSW -> MSW (lo ie. first word)
1492 __ srax(src.first()->as_Register(), 32, L4);
1493 split_long_move(masm, split, dst);
1494 }
1495 }
1496 } else if (dst.is_single_phys_reg()) {
1497 if (src.is_adjacent_aligned_on_stack(2)) {
1498 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1499 } else {
1500 // dst is a single reg.
1501 // Remember lo is low address not msb for stack slots
1502 // and lo is the "real" register for registers
1503 // src is
1505 VMRegPair split;
1507 if (src.first()->is_reg()) {
1508 // src.lo (msw) is a reg, src.hi is stk/reg
1509 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1510 split.set_pair(dst.first(), src.first());
1511 } else {
1512 // msw is stack move to L5
1513 // lsw is stack move to dst.lo (real reg)
1514 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1515 split.set_pair(dst.first(), L5->as_VMReg());
1516 }
1518 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1519 // msw -> src.lo/L5, lsw -> dst.lo
1520 split_long_move(masm, src, split);
1522 // So dst now has the low order correct position the
1523 // msw half
1524 __ sllx(split.first()->as_Register(), 32, L5);
1526 const Register d = dst.first()->as_Register();
1527 __ or3(L5, d, d);
1528 }
1529 } else {
1530 // For LP64 we can probably do better.
1531 split_long_move(masm, src, dst);
1532 }
1533 }
1535 // A double move
1536 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1538 // The painful thing here is that like long_move a VMRegPair might be
1539 // 1: a single physical register
1540 // 2: two physical registers (v8)
1541 // 3: a physical reg [lo] and a stack slot [hi] (v8)
1542 // 4: two stack slots
1544 // Since src is always a java calling convention we know that the src pair
1545 // is always either all registers or all stack (and aligned?)
1547 // in a register [lo] and a stack slot [hi]
1548 if (src.first()->is_stack()) {
1549 if (dst.first()->is_stack()) {
1550 // stack to stack the easiest of the bunch
1551 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1552 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1553 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1554 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1555 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1556 } else {
1557 // stack to reg
1558 if (dst.second()->is_stack()) {
1559 // stack -> reg, stack -> stack
1560 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1561 if (dst.first()->is_Register()) {
1562 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1563 } else {
1564 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1565 }
1566 // This was missing. (very rare case)
1567 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1568 } else {
1569 // stack -> reg
1570 // Eventually optimize for alignment QQQ
1571 if (dst.first()->is_Register()) {
1572 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1573 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1574 } else {
1575 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1576 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1577 }
1578 }
1579 }
1580 } else if (dst.first()->is_stack()) {
1581 // reg to stack
1582 if (src.first()->is_Register()) {
1583 // Eventually optimize for alignment QQQ
1584 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1585 if (src.second()->is_stack()) {
1586 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1587 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1588 } else {
1589 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1590 }
1591 } else {
1592 // fpr to stack
1593 if (src.second()->is_stack()) {
1594 ShouldNotReachHere();
1595 } else {
1596 // Is the stack aligned?
1597 if (reg2offset(dst.first()) & 0x7) {
1598 // No do as pairs
1599 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1600 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1601 } else {
1602 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1603 }
1604 }
1605 }
1606 } else {
1607 // reg to reg
1608 if (src.first()->is_Register()) {
1609 if (dst.first()->is_Register()) {
1610 // gpr -> gpr
1611 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1612 __ mov(src.second()->as_Register(), dst.second()->as_Register());
1613 } else {
1614 // gpr -> fpr
1615 // ought to be able to do a single store
1616 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1617 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1618 // ought to be able to do a single load
1619 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1620 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1621 }
1622 } else if (dst.first()->is_Register()) {
1623 // fpr -> gpr
1624 // ought to be able to do a single store
1625 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1626 // ought to be able to do a single load
1627 // REMEMBER first() is low address not LSB
1628 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1629 if (dst.second()->is_Register()) {
1630 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1631 } else {
1632 __ ld(FP, -4 + STACK_BIAS, L4);
1633 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1634 }
1635 } else {
1636 // fpr -> fpr
1637 // In theory these overlap but the ordering is such that this is likely a nop
1638 if ( src.first() != dst.first()) {
1639 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1640 }
1641 }
1642 }
1643 }
1645 // Creates an inner frame if one hasn't already been created, and
1646 // saves a copy of the thread in L7_thread_cache
1647 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1648 if (!*already_created) {
1649 __ save_frame(0);
1650 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1651 // Don't use save_thread because it smashes G2 and we merely want to save a
1652 // copy
1653 __ mov(G2_thread, L7_thread_cache);
1654 *already_created = true;
1655 }
1656 }
1659 static void save_or_restore_arguments(MacroAssembler* masm,
1660 const int stack_slots,
1661 const int total_in_args,
1662 const int arg_save_area,
1663 OopMap* map,
1664 VMRegPair* in_regs,
1665 BasicType* in_sig_bt) {
1666 // if map is non-NULL then the code should store the values,
1667 // otherwise it should load them.
1668 if (map != NULL) {
1669 // Fill in the map
1670 for (int i = 0; i < total_in_args; i++) {
1671 if (in_sig_bt[i] == T_ARRAY) {
1672 if (in_regs[i].first()->is_stack()) {
1673 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1674 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1675 } else if (in_regs[i].first()->is_Register()) {
1676 map->set_oop(in_regs[i].first());
1677 } else {
1678 ShouldNotReachHere();
1679 }
1680 }
1681 }
1682 }
1684 // Save or restore double word values
1685 int handle_index = 0;
1686 for (int i = 0; i < total_in_args; i++) {
1687 int slot = handle_index + arg_save_area;
1688 int offset = slot * VMRegImpl::stack_slot_size;
1689 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1690 const Register reg = in_regs[i].first()->as_Register();
1691 if (reg->is_global()) {
1692 handle_index += 2;
1693 assert(handle_index <= stack_slots, "overflow");
1694 if (map != NULL) {
1695 __ stx(reg, SP, offset + STACK_BIAS);
1696 } else {
1697 __ ldx(SP, offset + STACK_BIAS, reg);
1698 }
1699 }
1700 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1701 handle_index += 2;
1702 assert(handle_index <= stack_slots, "overflow");
1703 if (map != NULL) {
1704 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1705 } else {
1706 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1707 }
1708 }
1709 }
1710 // Save floats
1711 for (int i = 0; i < total_in_args; i++) {
1712 int slot = handle_index + arg_save_area;
1713 int offset = slot * VMRegImpl::stack_slot_size;
1714 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1715 handle_index++;
1716 assert(handle_index <= stack_slots, "overflow");
1717 if (map != NULL) {
1718 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1719 } else {
1720 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1721 }
1722 }
1723 }
1725 }
1728 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1729 // keeps a new JNI critical region from starting until a GC has been
1730 // forced. Save down any oops in registers and describe them in an
1731 // OopMap.
1732 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1733 const int stack_slots,
1734 const int total_in_args,
1735 const int arg_save_area,
1736 OopMapSet* oop_maps,
1737 VMRegPair* in_regs,
1738 BasicType* in_sig_bt) {
1739 __ block_comment("check GC_locker::needs_gc");
1740 Label cont;
1741 AddressLiteral sync_state(GC_locker::needs_gc_address());
1742 __ load_bool_contents(sync_state, G3_scratch);
1743 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1744 __ delayed()->nop();
1746 // Save down any values that are live in registers and call into the
1747 // runtime to halt for a GC
1748 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1749 save_or_restore_arguments(masm, stack_slots, total_in_args,
1750 arg_save_area, map, in_regs, in_sig_bt);
1752 __ mov(G2_thread, L7_thread_cache);
1754 __ set_last_Java_frame(SP, noreg);
1756 __ block_comment("block_for_jni_critical");
1757 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1758 __ delayed()->mov(L7_thread_cache, O0);
1759 oop_maps->add_gc_map( __ offset(), map);
1761 __ restore_thread(L7_thread_cache); // restore G2_thread
1762 __ reset_last_Java_frame();
1764 // Reload all the register arguments
1765 save_or_restore_arguments(masm, stack_slots, total_in_args,
1766 arg_save_area, NULL, in_regs, in_sig_bt);
1768 __ bind(cont);
1769 #ifdef ASSERT
1770 if (StressCriticalJNINatives) {
1771 // Stress register saving
1772 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1773 save_or_restore_arguments(masm, stack_slots, total_in_args,
1774 arg_save_area, map, in_regs, in_sig_bt);
1775 // Destroy argument registers
1776 for (int i = 0; i < total_in_args; i++) {
1777 if (in_regs[i].first()->is_Register()) {
1778 const Register reg = in_regs[i].first()->as_Register();
1779 if (reg->is_global()) {
1780 __ mov(G0, reg);
1781 }
1782 } else if (in_regs[i].first()->is_FloatRegister()) {
1783 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1784 }
1785 }
1787 save_or_restore_arguments(masm, stack_slots, total_in_args,
1788 arg_save_area, NULL, in_regs, in_sig_bt);
1789 }
1790 #endif
1791 }
1793 // Unpack an array argument into a pointer to the body and the length
1794 // if the array is non-null, otherwise pass 0 for both.
1795 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1796 // Pass the length, ptr pair
1797 Label is_null, done;
1798 if (reg.first()->is_stack()) {
1799 VMRegPair tmp = reg64_to_VMRegPair(L2);
1800 // Load the arg up from the stack
1801 move_ptr(masm, reg, tmp);
1802 reg = tmp;
1803 }
1804 __ cmp(reg.first()->as_Register(), G0);
1805 __ brx(Assembler::equal, false, Assembler::pt, is_null);
1806 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1807 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1808 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1809 move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1810 __ ba_short(done);
1811 __ bind(is_null);
1812 // Pass zeros
1813 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1814 move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1815 __ bind(done);
1816 }
1818 static void verify_oop_args(MacroAssembler* masm,
1819 methodHandle method,
1820 const BasicType* sig_bt,
1821 const VMRegPair* regs) {
1822 Register temp_reg = G5_method; // not part of any compiled calling seq
1823 if (VerifyOops) {
1824 for (int i = 0; i < method->size_of_parameters(); i++) {
1825 if (sig_bt[i] == T_OBJECT ||
1826 sig_bt[i] == T_ARRAY) {
1827 VMReg r = regs[i].first();
1828 assert(r->is_valid(), "bad oop arg");
1829 if (r->is_stack()) {
1830 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1831 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
1832 __ ld_ptr(SP, ld_off, temp_reg);
1833 __ verify_oop(temp_reg);
1834 } else {
1835 __ verify_oop(r->as_Register());
1836 }
1837 }
1838 }
1839 }
1840 }
1842 static void gen_special_dispatch(MacroAssembler* masm,
1843 methodHandle method,
1844 const BasicType* sig_bt,
1845 const VMRegPair* regs) {
1846 verify_oop_args(masm, method, sig_bt, regs);
1847 vmIntrinsics::ID iid = method->intrinsic_id();
1849 // Now write the args into the outgoing interpreter space
1850 bool has_receiver = false;
1851 Register receiver_reg = noreg;
1852 int member_arg_pos = -1;
1853 Register member_reg = noreg;
1854 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1855 if (ref_kind != 0) {
1856 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1857 member_reg = G5_method; // known to be free at this point
1858 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1859 } else if (iid == vmIntrinsics::_invokeBasic) {
1860 has_receiver = true;
1861 } else {
1862 fatal(err_msg_res("unexpected intrinsic id %d", iid));
1863 }
1865 if (member_reg != noreg) {
1866 // Load the member_arg into register, if necessary.
1867 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1868 VMReg r = regs[member_arg_pos].first();
1869 if (r->is_stack()) {
1870 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1871 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1872 __ ld_ptr(SP, ld_off, member_reg);
1873 } else {
1874 // no data motion is needed
1875 member_reg = r->as_Register();
1876 }
1877 }
1879 if (has_receiver) {
1880 // Make sure the receiver is loaded into a register.
1881 assert(method->size_of_parameters() > 0, "oob");
1882 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1883 VMReg r = regs[0].first();
1884 assert(r->is_valid(), "bad receiver arg");
1885 if (r->is_stack()) {
1886 // Porting note: This assumes that compiled calling conventions always
1887 // pass the receiver oop in a register. If this is not true on some
1888 // platform, pick a temp and load the receiver from stack.
1889 fatal("receiver always in a register");
1890 receiver_reg = G3_scratch; // known to be free at this point
1891 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1892 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1893 __ ld_ptr(SP, ld_off, receiver_reg);
1894 } else {
1895 // no data motion is needed
1896 receiver_reg = r->as_Register();
1897 }
1898 }
1900 // Figure out which address we are really jumping to:
1901 MethodHandles::generate_method_handle_dispatch(masm, iid,
1902 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1903 }
1905 // ---------------------------------------------------------------------------
1906 // Generate a native wrapper for a given method. The method takes arguments
1907 // in the Java compiled code convention, marshals them to the native
1908 // convention (handlizes oops, etc), transitions to native, makes the call,
1909 // returns to java state (possibly blocking), unhandlizes any result and
1910 // returns.
1911 //
1912 // Critical native functions are a shorthand for the use of
1913 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1914 // functions. The wrapper is expected to unpack the arguments before
1915 // passing them to the callee and perform checks before and after the
1916 // native call to ensure that they GC_locker
1917 // lock_critical/unlock_critical semantics are followed. Some other
1918 // parts of JNI setup are skipped like the tear down of the JNI handle
1919 // block and the check for pending exceptions it's impossible for them
1920 // to be thrown.
1921 //
1922 // They are roughly structured like this:
1923 // if (GC_locker::needs_gc())
1924 // SharedRuntime::block_for_jni_critical();
1925 // tranistion to thread_in_native
1926 // unpack arrray arguments and call native entry point
1927 // check for safepoint in progress
1928 // check if any thread suspend flags are set
1929 // call into JVM and possible unlock the JNI critical
1930 // if a GC was suppressed while in the critical native.
1931 // transition back to thread_in_Java
1932 // return to caller
1933 //
1934 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1935 methodHandle method,
1936 int compile_id,
1937 BasicType* in_sig_bt,
1938 VMRegPair* in_regs,
1939 BasicType ret_type) {
1940 if (method->is_method_handle_intrinsic()) {
1941 vmIntrinsics::ID iid = method->intrinsic_id();
1942 intptr_t start = (intptr_t)__ pc();
1943 int vep_offset = ((intptr_t)__ pc()) - start;
1944 gen_special_dispatch(masm,
1945 method,
1946 in_sig_bt,
1947 in_regs);
1948 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1949 __ flush();
1950 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1951 return nmethod::new_native_nmethod(method,
1952 compile_id,
1953 masm->code(),
1954 vep_offset,
1955 frame_complete,
1956 stack_slots / VMRegImpl::slots_per_word,
1957 in_ByteSize(-1),
1958 in_ByteSize(-1),
1959 (OopMapSet*)NULL);
1960 }
1961 bool is_critical_native = true;
1962 address native_func = method->critical_native_function();
1963 if (native_func == NULL) {
1964 native_func = method->native_function();
1965 is_critical_native = false;
1966 }
1967 assert(native_func != NULL, "must have function");
1969 // Native nmethod wrappers never take possesion of the oop arguments.
1970 // So the caller will gc the arguments. The only thing we need an
1971 // oopMap for is if the call is static
1972 //
1973 // An OopMap for lock (and class if static), and one for the VM call itself
1974 OopMapSet *oop_maps = new OopMapSet();
1975 intptr_t start = (intptr_t)__ pc();
1977 // First thing make an ic check to see if we should even be here
1978 {
1979 Label L;
1980 const Register temp_reg = G3_scratch;
1981 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1982 __ verify_oop(O0);
1983 __ load_klass(O0, temp_reg);
1984 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
1986 __ jump_to(ic_miss, temp_reg);
1987 __ delayed()->nop();
1988 __ align(CodeEntryAlignment);
1989 __ bind(L);
1990 }
1992 int vep_offset = ((intptr_t)__ pc()) - start;
1994 #ifdef COMPILER1
1995 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1996 // Object.hashCode can pull the hashCode from the header word
1997 // instead of doing a full VM transition once it's been computed.
1998 // Since hashCode is usually polymorphic at call sites we can't do
1999 // this optimization at the call site without a lot of work.
2000 Label slowCase;
2001 Register receiver = O0;
2002 Register result = O0;
2003 Register header = G3_scratch;
2004 Register hash = G3_scratch; // overwrite header value with hash value
2005 Register mask = G1; // to get hash field from header
2007 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
2008 // We depend on hash_mask being at most 32 bits and avoid the use of
2009 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
2010 // vm: see markOop.hpp.
2011 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
2012 __ sethi(markOopDesc::hash_mask, mask);
2013 __ btst(markOopDesc::unlocked_value, header);
2014 __ br(Assembler::zero, false, Assembler::pn, slowCase);
2015 if (UseBiasedLocking) {
2016 // Check if biased and fall through to runtime if so
2017 __ delayed()->nop();
2018 __ btst(markOopDesc::biased_lock_bit_in_place, header);
2019 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
2020 }
2021 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
2023 // Check for a valid (non-zero) hash code and get its value.
2024 #ifdef _LP64
2025 __ srlx(header, markOopDesc::hash_shift, hash);
2026 #else
2027 __ srl(header, markOopDesc::hash_shift, hash);
2028 #endif
2029 __ andcc(hash, mask, hash);
2030 __ br(Assembler::equal, false, Assembler::pn, slowCase);
2031 __ delayed()->nop();
2033 // leaf return.
2034 __ retl();
2035 __ delayed()->mov(hash, result);
2036 __ bind(slowCase);
2037 }
2038 #endif // COMPILER1
2041 // We have received a description of where all the java arg are located
2042 // on entry to the wrapper. We need to convert these args to where
2043 // the jni function will expect them. To figure out where they go
2044 // we convert the java signature to a C signature by inserting
2045 // the hidden arguments as arg[0] and possibly arg[1] (static method)
2047 const int total_in_args = method->size_of_parameters();
2048 int total_c_args = total_in_args;
2049 int total_save_slots = 6 * VMRegImpl::slots_per_word;
2050 if (!is_critical_native) {
2051 total_c_args += 1;
2052 if (method->is_static()) {
2053 total_c_args++;
2054 }
2055 } else {
2056 for (int i = 0; i < total_in_args; i++) {
2057 if (in_sig_bt[i] == T_ARRAY) {
2058 // These have to be saved and restored across the safepoint
2059 total_c_args++;
2060 }
2061 }
2062 }
2064 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2065 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2066 BasicType* in_elem_bt = NULL;
2068 int argc = 0;
2069 if (!is_critical_native) {
2070 out_sig_bt[argc++] = T_ADDRESS;
2071 if (method->is_static()) {
2072 out_sig_bt[argc++] = T_OBJECT;
2073 }
2075 for (int i = 0; i < total_in_args ; i++ ) {
2076 out_sig_bt[argc++] = in_sig_bt[i];
2077 }
2078 } else {
2079 Thread* THREAD = Thread::current();
2080 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2081 SignatureStream ss(method->signature());
2082 for (int i = 0; i < total_in_args ; i++ ) {
2083 if (in_sig_bt[i] == T_ARRAY) {
2084 // Arrays are passed as int, elem* pair
2085 out_sig_bt[argc++] = T_INT;
2086 out_sig_bt[argc++] = T_ADDRESS;
2087 Symbol* atype = ss.as_symbol(CHECK_NULL);
2088 const char* at = atype->as_C_string();
2089 if (strlen(at) == 2) {
2090 assert(at[0] == '[', "must be");
2091 switch (at[1]) {
2092 case 'B': in_elem_bt[i] = T_BYTE; break;
2093 case 'C': in_elem_bt[i] = T_CHAR; break;
2094 case 'D': in_elem_bt[i] = T_DOUBLE; break;
2095 case 'F': in_elem_bt[i] = T_FLOAT; break;
2096 case 'I': in_elem_bt[i] = T_INT; break;
2097 case 'J': in_elem_bt[i] = T_LONG; break;
2098 case 'S': in_elem_bt[i] = T_SHORT; break;
2099 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
2100 default: ShouldNotReachHere();
2101 }
2102 }
2103 } else {
2104 out_sig_bt[argc++] = in_sig_bt[i];
2105 in_elem_bt[i] = T_VOID;
2106 }
2107 if (in_sig_bt[i] != T_VOID) {
2108 assert(in_sig_bt[i] == ss.type(), "must match");
2109 ss.next();
2110 }
2111 }
2112 }
2114 // Now figure out where the args must be stored and how much stack space
2115 // they require (neglecting out_preserve_stack_slots but space for storing
2116 // the 1st six register arguments). It's weird see int_stk_helper.
2117 //
2118 int out_arg_slots;
2119 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2121 if (is_critical_native) {
2122 // Critical natives may have to call out so they need a save area
2123 // for register arguments.
2124 int double_slots = 0;
2125 int single_slots = 0;
2126 for ( int i = 0; i < total_in_args; i++) {
2127 if (in_regs[i].first()->is_Register()) {
2128 const Register reg = in_regs[i].first()->as_Register();
2129 switch (in_sig_bt[i]) {
2130 case T_ARRAY:
2131 case T_BOOLEAN:
2132 case T_BYTE:
2133 case T_SHORT:
2134 case T_CHAR:
2135 case T_INT: assert(reg->is_in(), "don't need to save these"); break;
2136 case T_LONG: if (reg->is_global()) double_slots++; break;
2137 default: ShouldNotReachHere();
2138 }
2139 } else if (in_regs[i].first()->is_FloatRegister()) {
2140 switch (in_sig_bt[i]) {
2141 case T_FLOAT: single_slots++; break;
2142 case T_DOUBLE: double_slots++; break;
2143 default: ShouldNotReachHere();
2144 }
2145 }
2146 }
2147 total_save_slots = double_slots * 2 + single_slots;
2148 }
2150 // Compute framesize for the wrapper. We need to handlize all oops in
2151 // registers. We must create space for them here that is disjoint from
2152 // the windowed save area because we have no control over when we might
2153 // flush the window again and overwrite values that gc has since modified.
2154 // (The live window race)
2155 //
2156 // We always just allocate 6 word for storing down these object. This allow
2157 // us to simply record the base and use the Ireg number to decide which
2158 // slot to use. (Note that the reg number is the inbound number not the
2159 // outbound number).
2160 // We must shuffle args to match the native convention, and include var-args space.
2162 // Calculate the total number of stack slots we will need.
2164 // First count the abi requirement plus all of the outgoing args
2165 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2167 // Now the space for the inbound oop handle area
2169 int oop_handle_offset = round_to(stack_slots, 2);
2170 stack_slots += total_save_slots;
2172 // Now any space we need for handlizing a klass if static method
2174 int klass_slot_offset = 0;
2175 int klass_offset = -1;
2176 int lock_slot_offset = 0;
2177 bool is_static = false;
2179 if (method->is_static()) {
2180 klass_slot_offset = stack_slots;
2181 stack_slots += VMRegImpl::slots_per_word;
2182 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2183 is_static = true;
2184 }
2186 // Plus a lock if needed
2188 if (method->is_synchronized()) {
2189 lock_slot_offset = stack_slots;
2190 stack_slots += VMRegImpl::slots_per_word;
2191 }
2193 // Now a place to save return value or as a temporary for any gpr -> fpr moves
2194 stack_slots += 2;
2196 // Ok The space we have allocated will look like:
2197 //
2198 //
2199 // FP-> | |
2200 // |---------------------|
2201 // | 2 slots for moves |
2202 // |---------------------|
2203 // | lock box (if sync) |
2204 // |---------------------| <- lock_slot_offset
2205 // | klass (if static) |
2206 // |---------------------| <- klass_slot_offset
2207 // | oopHandle area |
2208 // |---------------------| <- oop_handle_offset
2209 // | outbound memory |
2210 // | based arguments |
2211 // | |
2212 // |---------------------|
2213 // | vararg area |
2214 // |---------------------|
2215 // | |
2216 // SP-> | out_preserved_slots |
2217 //
2218 //
2221 // Now compute actual number of stack words we need rounding to make
2222 // stack properly aligned.
2223 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2225 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2227 // Generate stack overflow check before creating frame
2228 __ generate_stack_overflow_check(stack_size);
2230 // Generate a new frame for the wrapper.
2231 __ save(SP, -stack_size, SP);
2233 int frame_complete = ((intptr_t)__ pc()) - start;
2235 __ verify_thread();
2237 if (is_critical_native) {
2238 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
2239 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2240 }
2242 //
2243 // We immediately shuffle the arguments so that any vm call we have to
2244 // make from here on out (sync slow path, jvmti, etc.) we will have
2245 // captured the oops from our caller and have a valid oopMap for
2246 // them.
2248 // -----------------
2249 // The Grand Shuffle
2250 //
2251 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2252 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2253 // the class mirror instead of a receiver. This pretty much guarantees that
2254 // register layout will not match. We ignore these extra arguments during
2255 // the shuffle. The shuffle is described by the two calling convention
2256 // vectors we have in our possession. We simply walk the java vector to
2257 // get the source locations and the c vector to get the destinations.
2258 // Because we have a new window and the argument registers are completely
2259 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2260 // here.
2262 // This is a trick. We double the stack slots so we can claim
2263 // the oops in the caller's frame. Since we are sure to have
2264 // more args than the caller doubling is enough to make
2265 // sure we can capture all the incoming oop args from the
2266 // caller.
2267 //
2268 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2269 // Record sp-based slot for receiver on stack for non-static methods
2270 int receiver_offset = -1;
2272 // We move the arguments backward because the floating point registers
2273 // destination will always be to a register with a greater or equal register
2274 // number or the stack.
2276 #ifdef ASSERT
2277 bool reg_destroyed[RegisterImpl::number_of_registers];
2278 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2279 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2280 reg_destroyed[r] = false;
2281 }
2282 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2283 freg_destroyed[f] = false;
2284 }
2286 #endif /* ASSERT */
2288 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2290 #ifdef ASSERT
2291 if (in_regs[i].first()->is_Register()) {
2292 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2293 } else if (in_regs[i].first()->is_FloatRegister()) {
2294 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2295 }
2296 if (out_regs[c_arg].first()->is_Register()) {
2297 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2298 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2299 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2300 }
2301 #endif /* ASSERT */
2303 switch (in_sig_bt[i]) {
2304 case T_ARRAY:
2305 if (is_critical_native) {
2306 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2307 c_arg--;
2308 break;
2309 }
2310 case T_OBJECT:
2311 assert(!is_critical_native, "no oop arguments");
2312 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2313 ((i == 0) && (!is_static)),
2314 &receiver_offset);
2315 break;
2316 case T_VOID:
2317 break;
2319 case T_FLOAT:
2320 float_move(masm, in_regs[i], out_regs[c_arg]);
2321 break;
2323 case T_DOUBLE:
2324 assert( i + 1 < total_in_args &&
2325 in_sig_bt[i + 1] == T_VOID &&
2326 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2327 double_move(masm, in_regs[i], out_regs[c_arg]);
2328 break;
2330 case T_LONG :
2331 long_move(masm, in_regs[i], out_regs[c_arg]);
2332 break;
2334 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2336 default:
2337 move32_64(masm, in_regs[i], out_regs[c_arg]);
2338 }
2339 }
2341 // Pre-load a static method's oop into O1. Used both by locking code and
2342 // the normal JNI call code.
2343 if (method->is_static() && !is_critical_native) {
2344 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
2346 // Now handlize the static class mirror in O1. It's known not-null.
2347 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2348 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2349 __ add(SP, klass_offset + STACK_BIAS, O1);
2350 }
2353 const Register L6_handle = L6;
2355 if (method->is_synchronized()) {
2356 assert(!is_critical_native, "unhandled");
2357 __ mov(O1, L6_handle);
2358 }
2360 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2361 // except O6/O7. So if we must call out we must push a new frame. We immediately
2362 // push a new frame and flush the windows.
2363 #ifdef _LP64
2364 intptr_t thepc = (intptr_t) __ pc();
2365 {
2366 address here = __ pc();
2367 // Call the next instruction
2368 __ call(here + 8, relocInfo::none);
2369 __ delayed()->nop();
2370 }
2371 #else
2372 intptr_t thepc = __ load_pc_address(O7, 0);
2373 #endif /* _LP64 */
2375 // We use the same pc/oopMap repeatedly when we call out
2376 oop_maps->add_gc_map(thepc - start, map);
2378 // O7 now has the pc loaded that we will use when we finally call to native.
2380 // Save thread in L7; it crosses a bunch of VM calls below
2381 // Don't use save_thread because it smashes G2 and we merely
2382 // want to save a copy
2383 __ mov(G2_thread, L7_thread_cache);
2386 // If we create an inner frame once is plenty
2387 // when we create it we must also save G2_thread
2388 bool inner_frame_created = false;
2390 // dtrace method entry support
2391 {
2392 SkipIfEqual skip_if(
2393 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2394 // create inner frame
2395 __ save_frame(0);
2396 __ mov(G2_thread, L7_thread_cache);
2397 __ set_metadata_constant(method(), O1);
2398 __ call_VM_leaf(L7_thread_cache,
2399 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2400 G2_thread, O1);
2401 __ restore();
2402 }
2404 // RedefineClasses() tracing support for obsolete method entry
2405 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2406 // create inner frame
2407 __ save_frame(0);
2408 __ mov(G2_thread, L7_thread_cache);
2409 __ set_metadata_constant(method(), O1);
2410 __ call_VM_leaf(L7_thread_cache,
2411 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2412 G2_thread, O1);
2413 __ restore();
2414 }
2416 // We are in the jni frame unless saved_frame is true in which case
2417 // we are in one frame deeper (the "inner" frame). If we are in the
2418 // "inner" frames the args are in the Iregs and if the jni frame then
2419 // they are in the Oregs.
2420 // If we ever need to go to the VM (for locking, jvmti) then
2421 // we will always be in the "inner" frame.
2423 // Lock a synchronized method
2424 int lock_offset = -1; // Set if locked
2425 if (method->is_synchronized()) {
2426 Register Roop = O1;
2427 const Register L3_box = L3;
2429 create_inner_frame(masm, &inner_frame_created);
2431 __ ld_ptr(I1, 0, O1);
2432 Label done;
2434 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2435 __ add(FP, lock_offset+STACK_BIAS, L3_box);
2436 #ifdef ASSERT
2437 if (UseBiasedLocking) {
2438 // making the box point to itself will make it clear it went unused
2439 // but also be obviously invalid
2440 __ st_ptr(L3_box, L3_box, 0);
2441 }
2442 #endif // ASSERT
2443 //
2444 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2445 //
2446 __ compiler_lock_object(Roop, L1, L3_box, L2);
2447 __ br(Assembler::equal, false, Assembler::pt, done);
2448 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2451 // None of the above fast optimizations worked so we have to get into the
2452 // slow case of monitor enter. Inline a special case of call_VM that
2453 // disallows any pending_exception.
2454 __ mov(Roop, O0); // Need oop in O0
2455 __ mov(L3_box, O1);
2457 // Record last_Java_sp, in case the VM code releases the JVM lock.
2459 __ set_last_Java_frame(FP, I7);
2461 // do the call
2462 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2463 __ delayed()->mov(L7_thread_cache, O2);
2465 __ restore_thread(L7_thread_cache); // restore G2_thread
2466 __ reset_last_Java_frame();
2468 #ifdef ASSERT
2469 { Label L;
2470 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2471 __ br_null_short(O0, Assembler::pt, L);
2472 __ stop("no pending exception allowed on exit from IR::monitorenter");
2473 __ bind(L);
2474 }
2475 #endif
2476 __ bind(done);
2477 }
2480 // Finally just about ready to make the JNI call
2482 __ flushw();
2483 if (inner_frame_created) {
2484 __ restore();
2485 } else {
2486 // Store only what we need from this frame
2487 // QQQ I think that non-v9 (like we care) we don't need these saves
2488 // either as the flush traps and the current window goes too.
2489 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2490 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2491 }
2493 // get JNIEnv* which is first argument to native
2494 if (!is_critical_native) {
2495 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2496 }
2498 // Use that pc we placed in O7 a while back as the current frame anchor
2499 __ set_last_Java_frame(SP, O7);
2501 // We flushed the windows ages ago now mark them as flushed before transitioning.
2502 __ set(JavaFrameAnchor::flushed, G3_scratch);
2503 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2505 // Transition from _thread_in_Java to _thread_in_native.
2506 __ set(_thread_in_native, G3_scratch);
2508 #ifdef _LP64
2509 AddressLiteral dest(native_func);
2510 __ relocate(relocInfo::runtime_call_type);
2511 __ jumpl_to(dest, O7, O7);
2512 #else
2513 __ call(native_func, relocInfo::runtime_call_type);
2514 #endif
2515 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2517 __ restore_thread(L7_thread_cache); // restore G2_thread
2519 // Unpack native results. For int-types, we do any needed sign-extension
2520 // and move things into I0. The return value there will survive any VM
2521 // calls for blocking or unlocking. An FP or OOP result (handle) is done
2522 // specially in the slow-path code.
2523 switch (ret_type) {
2524 case T_VOID: break; // Nothing to do!
2525 case T_FLOAT: break; // Got it where we want it (unless slow-path)
2526 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
2527 // In 64 bits build result is in O0, in O0, O1 in 32bit build
2528 case T_LONG:
2529 #ifndef _LP64
2530 __ mov(O1, I1);
2531 #endif
2532 // Fall thru
2533 case T_OBJECT: // Really a handle
2534 case T_ARRAY:
2535 case T_INT:
2536 __ mov(O0, I0);
2537 break;
2538 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2539 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2540 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2541 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2542 break; // Cannot de-handlize until after reclaiming jvm_lock
2543 default:
2544 ShouldNotReachHere();
2545 }
2547 Label after_transition;
2548 // must we block?
2550 // Block, if necessary, before resuming in _thread_in_Java state.
2551 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2552 { Label no_block;
2553 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2555 // Switch thread to "native transition" state before reading the synchronization state.
2556 // This additional state is necessary because reading and testing the synchronization
2557 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2558 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2559 // VM thread changes sync state to synchronizing and suspends threads for GC.
2560 // Thread A is resumed to finish this native method, but doesn't block here since it
2561 // didn't see any synchronization is progress, and escapes.
2562 __ set(_thread_in_native_trans, G3_scratch);
2563 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2564 if(os::is_MP()) {
2565 if (UseMembar) {
2566 // Force this write out before the read below
2567 __ membar(Assembler::StoreLoad);
2568 } else {
2569 // Write serialization page so VM thread can do a pseudo remote membar.
2570 // We use the current thread pointer to calculate a thread specific
2571 // offset to write to within the page. This minimizes bus traffic
2572 // due to cache line collision.
2573 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2574 }
2575 }
2576 __ load_contents(sync_state, G3_scratch);
2577 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2579 Label L;
2580 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2581 __ br(Assembler::notEqual, false, Assembler::pn, L);
2582 __ delayed()->ld(suspend_state, G3_scratch);
2583 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2584 __ bind(L);
2586 // Block. Save any potential method result value before the operation and
2587 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2588 // lets us share the oopMap we used when we went native rather the create
2589 // a distinct one for this pc
2590 //
2591 save_native_result(masm, ret_type, stack_slots);
2592 if (!is_critical_native) {
2593 __ call_VM_leaf(L7_thread_cache,
2594 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2595 G2_thread);
2596 } else {
2597 __ call_VM_leaf(L7_thread_cache,
2598 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2599 G2_thread);
2600 }
2602 // Restore any method result value
2603 restore_native_result(masm, ret_type, stack_slots);
2605 if (is_critical_native) {
2606 // The call above performed the transition to thread_in_Java so
2607 // skip the transition logic below.
2608 __ ba(after_transition);
2609 __ delayed()->nop();
2610 }
2612 __ bind(no_block);
2613 }
2615 // thread state is thread_in_native_trans. Any safepoint blocking has already
2616 // happened so we can now change state to _thread_in_Java.
2617 __ set(_thread_in_Java, G3_scratch);
2618 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2619 __ bind(after_transition);
2621 Label no_reguard;
2622 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2623 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2625 save_native_result(masm, ret_type, stack_slots);
2626 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2627 __ delayed()->nop();
2629 __ restore_thread(L7_thread_cache); // restore G2_thread
2630 restore_native_result(masm, ret_type, stack_slots);
2632 __ bind(no_reguard);
2634 // Handle possible exception (will unlock if necessary)
2636 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2638 // Unlock
2639 if (method->is_synchronized()) {
2640 Label done;
2641 Register I2_ex_oop = I2;
2642 const Register L3_box = L3;
2643 // Get locked oop from the handle we passed to jni
2644 __ ld_ptr(L6_handle, 0, L4);
2645 __ add(SP, lock_offset+STACK_BIAS, L3_box);
2646 // Must save pending exception around the slow-path VM call. Since it's a
2647 // leaf call, the pending exception (if any) can be kept in a register.
2648 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2649 // Now unlock
2650 // (Roop, Rmark, Rbox, Rscratch)
2651 __ compiler_unlock_object(L4, L1, L3_box, L2);
2652 __ br(Assembler::equal, false, Assembler::pt, done);
2653 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2655 // save and restore any potential method result value around the unlocking
2656 // operation. Will save in I0 (or stack for FP returns).
2657 save_native_result(masm, ret_type, stack_slots);
2659 // Must clear pending-exception before re-entering the VM. Since this is
2660 // a leaf call, pending-exception-oop can be safely kept in a register.
2661 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2663 // slow case of monitor enter. Inline a special case of call_VM that
2664 // disallows any pending_exception.
2665 __ mov(L3_box, O1);
2667 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2668 __ delayed()->mov(L4, O0); // Need oop in O0
2670 __ restore_thread(L7_thread_cache); // restore G2_thread
2672 #ifdef ASSERT
2673 { Label L;
2674 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2675 __ br_null_short(O0, Assembler::pt, L);
2676 __ stop("no pending exception allowed on exit from IR::monitorexit");
2677 __ bind(L);
2678 }
2679 #endif
2680 restore_native_result(masm, ret_type, stack_slots);
2681 // check_forward_pending_exception jump to forward_exception if any pending
2682 // exception is set. The forward_exception routine expects to see the
2683 // exception in pending_exception and not in a register. Kind of clumsy,
2684 // since all folks who branch to forward_exception must have tested
2685 // pending_exception first and hence have it in a register already.
2686 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2687 __ bind(done);
2688 }
2690 // Tell dtrace about this method exit
2691 {
2692 SkipIfEqual skip_if(
2693 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2694 save_native_result(masm, ret_type, stack_slots);
2695 __ set_metadata_constant(method(), O1);
2696 __ call_VM_leaf(L7_thread_cache,
2697 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2698 G2_thread, O1);
2699 restore_native_result(masm, ret_type, stack_slots);
2700 }
2702 // Clear "last Java frame" SP and PC.
2703 __ verify_thread(); // G2_thread must be correct
2704 __ reset_last_Java_frame();
2706 // Unpack oop result
2707 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2708 Label L;
2709 __ addcc(G0, I0, G0);
2710 __ brx(Assembler::notZero, true, Assembler::pt, L);
2711 __ delayed()->ld_ptr(I0, 0, I0);
2712 __ mov(G0, I0);
2713 __ bind(L);
2714 __ verify_oop(I0);
2715 }
2717 if (!is_critical_native) {
2718 // reset handle block
2719 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2720 __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2722 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2723 check_forward_pending_exception(masm, G3_scratch);
2724 }
2727 // Return
2729 #ifndef _LP64
2730 if (ret_type == T_LONG) {
2732 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2733 __ sllx(I0, 32, G1); // Shift bits into high G1
2734 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
2735 __ or3 (I1, G1, G1); // OR 64 bits into G1
2736 }
2737 #endif
2739 __ ret();
2740 __ delayed()->restore();
2742 __ flush();
2744 nmethod *nm = nmethod::new_native_nmethod(method,
2745 compile_id,
2746 masm->code(),
2747 vep_offset,
2748 frame_complete,
2749 stack_slots / VMRegImpl::slots_per_word,
2750 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2751 in_ByteSize(lock_offset),
2752 oop_maps);
2754 if (is_critical_native) {
2755 nm->set_lazy_critical_native(true);
2756 }
2757 return nm;
2759 }
2761 #ifdef HAVE_DTRACE_H
2762 // ---------------------------------------------------------------------------
2763 // Generate a dtrace nmethod for a given signature. The method takes arguments
2764 // in the Java compiled code convention, marshals them to the native
2765 // abi and then leaves nops at the position you would expect to call a native
2766 // function. When the probe is enabled the nops are replaced with a trap
2767 // instruction that dtrace inserts and the trace will cause a notification
2768 // to dtrace.
2769 //
2770 // The probes are only able to take primitive types and java/lang/String as
2771 // arguments. No other java types are allowed. Strings are converted to utf8
2772 // strings so that from dtrace point of view java strings are converted to C
2773 // strings. There is an arbitrary fixed limit on the total space that a method
2774 // can use for converting the strings. (256 chars per string in the signature).
2775 // So any java string larger then this is truncated.
2777 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2778 static bool offsets_initialized = false;
2780 nmethod *SharedRuntime::generate_dtrace_nmethod(
2781 MacroAssembler *masm, methodHandle method) {
2784 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2785 // be single threaded in this method.
2786 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2788 // Fill in the signature array, for the calling-convention call.
2789 int total_args_passed = method->size_of_parameters();
2791 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2792 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2794 // The signature we are going to use for the trap that dtrace will see
2795 // java/lang/String is converted. We drop "this" and any other object
2796 // is converted to NULL. (A one-slot java/lang/Long object reference
2797 // is converted to a two-slot long, which is why we double the allocation).
2798 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2799 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2801 int i=0;
2802 int total_strings = 0;
2803 int first_arg_to_pass = 0;
2804 int total_c_args = 0;
2806 // Skip the receiver as dtrace doesn't want to see it
2807 if( !method->is_static() ) {
2808 in_sig_bt[i++] = T_OBJECT;
2809 first_arg_to_pass = 1;
2810 }
2812 SignatureStream ss(method->signature());
2813 for ( ; !ss.at_return_type(); ss.next()) {
2814 BasicType bt = ss.type();
2815 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2816 out_sig_bt[total_c_args++] = bt;
2817 if( bt == T_OBJECT) {
2818 Symbol* s = ss.as_symbol_or_null();
2819 if (s == vmSymbols::java_lang_String()) {
2820 total_strings++;
2821 out_sig_bt[total_c_args-1] = T_ADDRESS;
2822 } else if (s == vmSymbols::java_lang_Boolean() ||
2823 s == vmSymbols::java_lang_Byte()) {
2824 out_sig_bt[total_c_args-1] = T_BYTE;
2825 } else if (s == vmSymbols::java_lang_Character() ||
2826 s == vmSymbols::java_lang_Short()) {
2827 out_sig_bt[total_c_args-1] = T_SHORT;
2828 } else if (s == vmSymbols::java_lang_Integer() ||
2829 s == vmSymbols::java_lang_Float()) {
2830 out_sig_bt[total_c_args-1] = T_INT;
2831 } else if (s == vmSymbols::java_lang_Long() ||
2832 s == vmSymbols::java_lang_Double()) {
2833 out_sig_bt[total_c_args-1] = T_LONG;
2834 out_sig_bt[total_c_args++] = T_VOID;
2835 }
2836 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2837 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2838 // We convert double to long
2839 out_sig_bt[total_c_args-1] = T_LONG;
2840 out_sig_bt[total_c_args++] = T_VOID;
2841 } else if ( bt == T_FLOAT) {
2842 // We convert float to int
2843 out_sig_bt[total_c_args-1] = T_INT;
2844 }
2845 }
2847 assert(i==total_args_passed, "validly parsed signature");
2849 // Now get the compiled-Java layout as input arguments
2850 int comp_args_on_stack;
2851 comp_args_on_stack = SharedRuntime::java_calling_convention(
2852 in_sig_bt, in_regs, total_args_passed, false);
2854 // We have received a description of where all the java arg are located
2855 // on entry to the wrapper. We need to convert these args to where
2856 // the a native (non-jni) function would expect them. To figure out
2857 // where they go we convert the java signature to a C signature and remove
2858 // T_VOID for any long/double we might have received.
2861 // Now figure out where the args must be stored and how much stack space
2862 // they require (neglecting out_preserve_stack_slots but space for storing
2863 // the 1st six register arguments). It's weird see int_stk_helper.
2864 //
2865 int out_arg_slots;
2866 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2868 // Calculate the total number of stack slots we will need.
2870 // First count the abi requirement plus all of the outgoing args
2871 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2873 // Plus a temp for possible converion of float/double/long register args
2875 int conversion_temp = stack_slots;
2876 stack_slots += 2;
2879 // Now space for the string(s) we must convert
2881 int string_locs = stack_slots;
2882 stack_slots += total_strings *
2883 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2885 // Ok The space we have allocated will look like:
2886 //
2887 //
2888 // FP-> | |
2889 // |---------------------|
2890 // | string[n] |
2891 // |---------------------| <- string_locs[n]
2892 // | string[n-1] |
2893 // |---------------------| <- string_locs[n-1]
2894 // | ... |
2895 // | ... |
2896 // |---------------------| <- string_locs[1]
2897 // | string[0] |
2898 // |---------------------| <- string_locs[0]
2899 // | temp |
2900 // |---------------------| <- conversion_temp
2901 // | outbound memory |
2902 // | based arguments |
2903 // | |
2904 // |---------------------|
2905 // | |
2906 // SP-> | out_preserved_slots |
2907 //
2908 //
2910 // Now compute actual number of stack words we need rounding to make
2911 // stack properly aligned.
2912 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2914 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2916 intptr_t start = (intptr_t)__ pc();
2918 // First thing make an ic check to see if we should even be here
2920 {
2921 Label L;
2922 const Register temp_reg = G3_scratch;
2923 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2924 __ verify_oop(O0);
2925 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2926 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
2928 __ jump_to(ic_miss, temp_reg);
2929 __ delayed()->nop();
2930 __ align(CodeEntryAlignment);
2931 __ bind(L);
2932 }
2934 int vep_offset = ((intptr_t)__ pc()) - start;
2937 // The instruction at the verified entry point must be 5 bytes or longer
2938 // because it can be patched on the fly by make_non_entrant. The stack bang
2939 // instruction fits that requirement.
2941 // Generate stack overflow check before creating frame
2942 __ generate_stack_overflow_check(stack_size);
2944 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2945 "valid size for make_non_entrant");
2947 // Generate a new frame for the wrapper.
2948 __ save(SP, -stack_size, SP);
2950 // Frame is now completed as far a size and linkage.
2952 int frame_complete = ((intptr_t)__ pc()) - start;
2954 #ifdef ASSERT
2955 bool reg_destroyed[RegisterImpl::number_of_registers];
2956 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2957 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2958 reg_destroyed[r] = false;
2959 }
2960 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2961 freg_destroyed[f] = false;
2962 }
2964 #endif /* ASSERT */
2966 VMRegPair zero;
2967 const Register g0 = G0; // without this we get a compiler warning (why??)
2968 zero.set2(g0->as_VMReg());
2970 int c_arg, j_arg;
2972 Register conversion_off = noreg;
2974 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2975 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2977 VMRegPair src = in_regs[j_arg];
2978 VMRegPair dst = out_regs[c_arg];
2980 #ifdef ASSERT
2981 if (src.first()->is_Register()) {
2982 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2983 } else if (src.first()->is_FloatRegister()) {
2984 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2985 FloatRegisterImpl::S)], "ack!");
2986 }
2987 if (dst.first()->is_Register()) {
2988 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2989 } else if (dst.first()->is_FloatRegister()) {
2990 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2991 FloatRegisterImpl::S)] = true;
2992 }
2993 #endif /* ASSERT */
2995 switch (in_sig_bt[j_arg]) {
2996 case T_ARRAY:
2997 case T_OBJECT:
2998 {
2999 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
3000 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3001 // need to unbox a one-slot value
3002 Register in_reg = L0;
3003 Register tmp = L2;
3004 if ( src.first()->is_reg() ) {
3005 in_reg = src.first()->as_Register();
3006 } else {
3007 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
3008 "must be");
3009 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
3010 }
3011 // If the final destination is an acceptable register
3012 if ( dst.first()->is_reg() ) {
3013 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
3014 tmp = dst.first()->as_Register();
3015 }
3016 }
3018 Label skipUnbox;
3019 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
3020 __ mov(G0, tmp->successor());
3021 }
3022 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
3023 __ delayed()->mov(G0, tmp);
3025 BasicType bt = out_sig_bt[c_arg];
3026 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3027 switch (bt) {
3028 case T_BYTE:
3029 __ ldub(in_reg, box_offset, tmp); break;
3030 case T_SHORT:
3031 __ lduh(in_reg, box_offset, tmp); break;
3032 case T_INT:
3033 __ ld(in_reg, box_offset, tmp); break;
3034 case T_LONG:
3035 __ ld_long(in_reg, box_offset, tmp); break;
3036 default: ShouldNotReachHere();
3037 }
3039 __ bind(skipUnbox);
3040 // If tmp wasn't final destination copy to final destination
3041 if (tmp == L2) {
3042 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
3043 if (out_sig_bt[c_arg] == T_LONG) {
3044 long_move(masm, tmp_as_VM, dst);
3045 } else {
3046 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
3047 }
3048 }
3049 if (out_sig_bt[c_arg] == T_LONG) {
3050 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3051 ++c_arg; // move over the T_VOID to keep the loop indices in sync
3052 }
3053 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
3054 Register s =
3055 src.first()->is_reg() ? src.first()->as_Register() : L2;
3056 Register d =
3057 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
3059 // We store the oop now so that the conversion pass can reach
3060 // while in the inner frame. This will be the only store if
3061 // the oop is NULL.
3062 if (s != L2) {
3063 // src is register
3064 if (d != L2) {
3065 // dst is register
3066 __ mov(s, d);
3067 } else {
3068 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3069 STACK_BIAS), "must be");
3070 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
3071 }
3072 } else {
3073 // src not a register
3074 assert(Assembler::is_simm13(reg2offset(src.first()) +
3075 STACK_BIAS), "must be");
3076 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
3077 if (d == L2) {
3078 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3079 STACK_BIAS), "must be");
3080 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
3081 }
3082 }
3083 } else if (out_sig_bt[c_arg] != T_VOID) {
3084 // Convert the arg to NULL
3085 if (dst.first()->is_reg()) {
3086 __ mov(G0, dst.first()->as_Register());
3087 } else {
3088 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3089 STACK_BIAS), "must be");
3090 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
3091 }
3092 }
3093 }
3094 break;
3095 case T_VOID:
3096 break;
3098 case T_FLOAT:
3099 if (src.first()->is_stack()) {
3100 // Stack to stack/reg is simple
3101 move32_64(masm, src, dst);
3102 } else {
3103 if (dst.first()->is_reg()) {
3104 // freg -> reg
3105 int off =
3106 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3107 Register d = dst.first()->as_Register();
3108 if (Assembler::is_simm13(off)) {
3109 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3110 SP, off);
3111 __ ld(SP, off, d);
3112 } else {
3113 if (conversion_off == noreg) {
3114 __ set(off, L6);
3115 conversion_off = L6;
3116 }
3117 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3118 SP, conversion_off);
3119 __ ld(SP, conversion_off , d);
3120 }
3121 } else {
3122 // freg -> mem
3123 int off = STACK_BIAS + reg2offset(dst.first());
3124 if (Assembler::is_simm13(off)) {
3125 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3126 SP, off);
3127 } else {
3128 if (conversion_off == noreg) {
3129 __ set(off, L6);
3130 conversion_off = L6;
3131 }
3132 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3133 SP, conversion_off);
3134 }
3135 }
3136 }
3137 break;
3139 case T_DOUBLE:
3140 assert( j_arg + 1 < total_args_passed &&
3141 in_sig_bt[j_arg + 1] == T_VOID &&
3142 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
3143 if (src.first()->is_stack()) {
3144 // Stack to stack/reg is simple
3145 long_move(masm, src, dst);
3146 } else {
3147 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
3149 // Destination could be an odd reg on 32bit in which case
3150 // we can't load direct to the destination.
3152 if (!d->is_even() && wordSize == 4) {
3153 d = L2;
3154 }
3155 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3156 if (Assembler::is_simm13(off)) {
3157 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
3158 SP, off);
3159 __ ld_long(SP, off, d);
3160 } else {
3161 if (conversion_off == noreg) {
3162 __ set(off, L6);
3163 conversion_off = L6;
3164 }
3165 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
3166 SP, conversion_off);
3167 __ ld_long(SP, conversion_off, d);
3168 }
3169 if (d == L2) {
3170 long_move(masm, reg64_to_VMRegPair(L2), dst);
3171 }
3172 }
3173 break;
3175 case T_LONG :
3176 // 32bit can't do a split move of something like g1 -> O0, O1
3177 // so use a memory temp
3178 if (src.is_single_phys_reg() && wordSize == 4) {
3179 Register tmp = L2;
3180 if (dst.first()->is_reg() &&
3181 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
3182 tmp = dst.first()->as_Register();
3183 }
3185 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3186 if (Assembler::is_simm13(off)) {
3187 __ stx(src.first()->as_Register(), SP, off);
3188 __ ld_long(SP, off, tmp);
3189 } else {
3190 if (conversion_off == noreg) {
3191 __ set(off, L6);
3192 conversion_off = L6;
3193 }
3194 __ stx(src.first()->as_Register(), SP, conversion_off);
3195 __ ld_long(SP, conversion_off, tmp);
3196 }
3198 if (tmp == L2) {
3199 long_move(masm, reg64_to_VMRegPair(L2), dst);
3200 }
3201 } else {
3202 long_move(masm, src, dst);
3203 }
3204 break;
3206 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
3208 default:
3209 move32_64(masm, src, dst);
3210 }
3211 }
3214 // If we have any strings we must store any register based arg to the stack
3215 // This includes any still live xmm registers too.
3217 if (total_strings > 0 ) {
3219 // protect all the arg registers
3220 __ save_frame(0);
3221 __ mov(G2_thread, L7_thread_cache);
3222 const Register L2_string_off = L2;
3224 // Get first string offset
3225 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
3227 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
3228 if (out_sig_bt[c_arg] == T_ADDRESS) {
3230 VMRegPair dst = out_regs[c_arg];
3231 const Register d = dst.first()->is_reg() ?
3232 dst.first()->as_Register()->after_save() : noreg;
3234 // It's a string the oop and it was already copied to the out arg
3235 // position
3236 if (d != noreg) {
3237 __ mov(d, O0);
3238 } else {
3239 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3240 "must be");
3241 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
3242 }
3243 Label skip;
3245 __ br_null(O0, false, Assembler::pn, skip);
3246 __ delayed()->add(FP, L2_string_off, O1);
3248 if (d != noreg) {
3249 __ mov(O1, d);
3250 } else {
3251 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3252 "must be");
3253 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
3254 }
3256 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
3257 relocInfo::runtime_call_type);
3258 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
3260 __ bind(skip);
3262 }
3264 }
3265 __ mov(L7_thread_cache, G2_thread);
3266 __ restore();
3268 }
3271 // Ok now we are done. Need to place the nop that dtrace wants in order to
3272 // patch in the trap
3274 int patch_offset = ((intptr_t)__ pc()) - start;
3276 __ nop();
3279 // Return
3281 __ ret();
3282 __ delayed()->restore();
3284 __ flush();
3286 nmethod *nm = nmethod::new_dtrace_nmethod(
3287 method, masm->code(), vep_offset, patch_offset, frame_complete,
3288 stack_slots / VMRegImpl::slots_per_word);
3289 return nm;
3291 }
3293 #endif // HAVE_DTRACE_H
3295 // this function returns the adjust size (in number of words) to a c2i adapter
3296 // activation for use during deoptimization
3297 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3298 assert(callee_locals >= callee_parameters,
3299 "test and remove; got more parms than locals");
3300 if (callee_locals < callee_parameters)
3301 return 0; // No adjustment for negative locals
3302 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3303 return round_to(diff, WordsPerLong);
3304 }
3306 // "Top of Stack" slots that may be unused by the calling convention but must
3307 // otherwise be preserved.
3308 // On Intel these are not necessary and the value can be zero.
3309 // On Sparc this describes the words reserved for storing a register window
3310 // when an interrupt occurs.
3311 uint SharedRuntime::out_preserve_stack_slots() {
3312 return frame::register_save_words * VMRegImpl::slots_per_word;
3313 }
3315 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3316 //
3317 // Common out the new frame generation for deopt and uncommon trap
3318 //
3319 Register G3pcs = G3_scratch; // Array of new pcs (input)
3320 Register Oreturn0 = O0;
3321 Register Oreturn1 = O1;
3322 Register O2UnrollBlock = O2;
3323 Register O3array = O3; // Array of frame sizes (input)
3324 Register O4array_size = O4; // number of frames (input)
3325 Register O7frame_size = O7; // number of frames (input)
3327 __ ld_ptr(O3array, 0, O7frame_size);
3328 __ sub(G0, O7frame_size, O7frame_size);
3329 __ save(SP, O7frame_size, SP);
3330 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
3332 #ifdef ASSERT
3333 // make sure that the frames are aligned properly
3334 #ifndef _LP64
3335 __ btst(wordSize*2-1, SP);
3336 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
3337 #endif
3338 #endif
3340 // Deopt needs to pass some extra live values from frame to frame
3342 if (deopt) {
3343 __ mov(Oreturn0->after_save(), Oreturn0);
3344 __ mov(Oreturn1->after_save(), Oreturn1);
3345 }
3347 __ mov(O4array_size->after_save(), O4array_size);
3348 __ sub(O4array_size, 1, O4array_size);
3349 __ mov(O3array->after_save(), O3array);
3350 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3351 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
3353 #ifdef ASSERT
3354 // trash registers to show a clear pattern in backtraces
3355 __ set(0xDEAD0000, I0);
3356 __ add(I0, 2, I1);
3357 __ add(I0, 4, I2);
3358 __ add(I0, 6, I3);
3359 __ add(I0, 8, I4);
3360 // Don't touch I5 could have valuable savedSP
3361 __ set(0xDEADBEEF, L0);
3362 __ mov(L0, L1);
3363 __ mov(L0, L2);
3364 __ mov(L0, L3);
3365 __ mov(L0, L4);
3366 __ mov(L0, L5);
3368 // trash the return value as there is nothing to return yet
3369 __ set(0xDEAD0001, O7);
3370 #endif
3372 __ mov(SP, O5_savedSP);
3373 }
3376 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3377 //
3378 // loop through the UnrollBlock info and create new frames
3379 //
3380 Register G3pcs = G3_scratch;
3381 Register Oreturn0 = O0;
3382 Register Oreturn1 = O1;
3383 Register O2UnrollBlock = O2;
3384 Register O3array = O3;
3385 Register O4array_size = O4;
3386 Label loop;
3388 #ifdef ASSERT
3389 // Compilers generate code that bang the stack by as much as the
3390 // interpreter would need. So this stack banging should never
3391 // trigger a fault. Verify that it does not on non product builds.
3392 if (UseStackBanging) {
3393 // Get total frame size for interpreted frames
3394 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3395 __ bang_stack_size(O4, O3, G3_scratch);
3396 }
3397 #endif
3399 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3400 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3401 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3403 // Adjust old interpreter frame to make space for new frame's extra java locals
3404 //
3405 // We capture the original sp for the transition frame only because it is needed in
3406 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3407 // every interpreter frame captures a savedSP it is only needed at the transition
3408 // (fortunately). If we had to have it correct everywhere then we would need to
3409 // be told the sp_adjustment for each frame we create. If the frame size array
3410 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3411 // for each frame we create and keep up the illusion every where.
3412 //
3414 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3415 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3416 __ sub(SP, O7, SP);
3418 #ifdef ASSERT
3419 // make sure that there is at least one entry in the array
3420 __ tst(O4array_size);
3421 __ breakpoint_trap(Assembler::zero, Assembler::icc);
3422 #endif
3424 // Now push the new interpreter frames
3425 __ bind(loop);
3427 // allocate a new frame, filling the registers
3429 gen_new_frame(masm, deopt); // allocate an interpreter frame
3431 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
3432 __ delayed()->add(O3array, wordSize, O3array);
3433 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
3435 }
3437 //------------------------------generate_deopt_blob----------------------------
3438 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3439 // instead.
3440 void SharedRuntime::generate_deopt_blob() {
3441 // allocate space for the code
3442 ResourceMark rm;
3443 // setup code generation tools
3444 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3445 #ifdef ASSERT
3446 if (UseStackBanging) {
3447 pad += StackShadowPages*16 + 32;
3448 }
3449 #endif
3450 #ifdef _LP64
3451 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3452 #else
3453 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3454 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3455 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3456 #endif /* _LP64 */
3457 MacroAssembler* masm = new MacroAssembler(&buffer);
3458 FloatRegister Freturn0 = F0;
3459 Register Greturn1 = G1;
3460 Register Oreturn0 = O0;
3461 Register Oreturn1 = O1;
3462 Register O2UnrollBlock = O2;
3463 Register L0deopt_mode = L0;
3464 Register G4deopt_mode = G4_scratch;
3465 int frame_size_words;
3466 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3467 #if !defined(_LP64) && defined(COMPILER2)
3468 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3469 #endif
3470 Label cont;
3472 OopMapSet *oop_maps = new OopMapSet();
3474 //
3475 // This is the entry point for code which is returning to a de-optimized
3476 // frame.
3477 // The steps taken by this frame are as follows:
3478 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3479 // and all potentially live registers (at a pollpoint many registers can be live).
3480 //
3481 // - call the C routine: Deoptimization::fetch_unroll_info (this function
3482 // returns information about the number and size of interpreter frames
3483 // which are equivalent to the frame which is being deoptimized)
3484 // - deallocate the unpack frame, restoring only results values. Other
3485 // volatile registers will now be captured in the vframeArray as needed.
3486 // - deallocate the deoptimization frame
3487 // - in a loop using the information returned in the previous step
3488 // push new interpreter frames (take care to propagate the return
3489 // values through each new frame pushed)
3490 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3491 // - call the C routine: Deoptimization::unpack_frames (this function
3492 // lays out values on the interpreter frame which was just created)
3493 // - deallocate the dummy unpack_frame
3494 // - ensure that all the return values are correctly set and then do
3495 // a return to the interpreter entry point
3496 //
3497 // Refer to the following methods for more information:
3498 // - Deoptimization::fetch_unroll_info
3499 // - Deoptimization::unpack_frames
3501 OopMap* map = NULL;
3503 int start = __ offset();
3505 // restore G2, the trampoline destroyed it
3506 __ get_thread();
3508 // On entry we have been called by the deoptimized nmethod with a call that
3509 // replaced the original call (or safepoint polling location) so the deoptimizing
3510 // pc is now in O7. Return values are still in the expected places
3512 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3513 __ ba(cont);
3514 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3516 int exception_offset = __ offset() - start;
3518 // restore G2, the trampoline destroyed it
3519 __ get_thread();
3521 // On entry we have been jumped to by the exception handler (or exception_blob
3522 // for server). O0 contains the exception oop and O7 contains the original
3523 // exception pc. So if we push a frame here it will look to the
3524 // stack walking code (fetch_unroll_info) just like a normal call so
3525 // state will be extracted normally.
3527 // save exception oop in JavaThread and fall through into the
3528 // exception_in_tls case since they are handled in same way except
3529 // for where the pending exception is kept.
3530 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3532 //
3533 // Vanilla deoptimization with an exception pending in exception_oop
3534 //
3535 int exception_in_tls_offset = __ offset() - start;
3537 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3538 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3540 // Restore G2_thread
3541 __ get_thread();
3543 #ifdef ASSERT
3544 {
3545 // verify that there is really an exception oop in exception_oop
3546 Label has_exception;
3547 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3548 __ br_notnull_short(Oexception, Assembler::pt, has_exception);
3549 __ stop("no exception in thread");
3550 __ bind(has_exception);
3552 // verify that there is no pending exception
3553 Label no_pending_exception;
3554 Address exception_addr(G2_thread, Thread::pending_exception_offset());
3555 __ ld_ptr(exception_addr, Oexception);
3556 __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
3557 __ stop("must not have pending exception here");
3558 __ bind(no_pending_exception);
3559 }
3560 #endif
3562 __ ba(cont);
3563 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3565 //
3566 // Reexecute entry, similar to c2 uncommon trap
3567 //
3568 int reexecute_offset = __ offset() - start;
3570 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3571 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3573 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3575 __ bind(cont);
3577 __ set_last_Java_frame(SP, noreg);
3579 // do the call by hand so we can get the oopmap
3581 __ mov(G2_thread, L7_thread_cache);
3582 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3583 __ delayed()->mov(G2_thread, O0);
3585 // Set an oopmap for the call site this describes all our saved volatile registers
3587 oop_maps->add_gc_map( __ offset()-start, map);
3589 __ mov(L7_thread_cache, G2_thread);
3591 __ reset_last_Java_frame();
3593 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3594 // so this move will survive
3596 __ mov(L0deopt_mode, G4deopt_mode);
3598 __ mov(O0, O2UnrollBlock->after_save());
3600 RegisterSaver::restore_result_registers(masm);
3602 Label noException;
3603 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
3605 // Move the pending exception from exception_oop to Oexception so
3606 // the pending exception will be picked up the interpreter.
3607 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3608 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3609 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
3610 __ bind(noException);
3612 // deallocate the deoptimization frame taking care to preserve the return values
3613 __ mov(Oreturn0, Oreturn0->after_save());
3614 __ mov(Oreturn1, Oreturn1->after_save());
3615 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3616 __ restore();
3618 // Allocate new interpreter frame(s) and possible c2i adapter frame
3620 make_new_frames(masm, true);
3622 // push a dummy "unpack_frame" taking care of float return values and
3623 // call Deoptimization::unpack_frames to have the unpacker layout
3624 // information in the interpreter frames just created and then return
3625 // to the interpreter entry point
3626 __ save(SP, -frame_size_words*wordSize, SP);
3627 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3628 #if !defined(_LP64)
3629 #if defined(COMPILER2)
3630 // 32-bit 1-register longs return longs in G1
3631 __ stx(Greturn1, saved_Greturn1_addr);
3632 #endif
3633 __ set_last_Java_frame(SP, noreg);
3634 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3635 #else
3636 // LP64 uses g4 in set_last_Java_frame
3637 __ mov(G4deopt_mode, O1);
3638 __ set_last_Java_frame(SP, G0);
3639 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3640 #endif
3641 __ reset_last_Java_frame();
3642 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3644 #if !defined(_LP64) && defined(COMPILER2)
3645 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3646 // I0/I1 if the return value is long.
3647 Label not_long;
3648 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
3649 __ ldd(saved_Greturn1_addr,I0);
3650 __ bind(not_long);
3651 #endif
3652 __ ret();
3653 __ delayed()->restore();
3655 masm->flush();
3656 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3657 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3658 }
3660 #ifdef COMPILER2
3662 //------------------------------generate_uncommon_trap_blob--------------------
3663 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3664 // instead.
3665 void SharedRuntime::generate_uncommon_trap_blob() {
3666 // allocate space for the code
3667 ResourceMark rm;
3668 // setup code generation tools
3669 int pad = VerifyThread ? 512 : 0;
3670 #ifdef ASSERT
3671 if (UseStackBanging) {
3672 pad += StackShadowPages*16 + 32;
3673 }
3674 #endif
3675 #ifdef _LP64
3676 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3677 #else
3678 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3679 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3680 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3681 #endif
3682 MacroAssembler* masm = new MacroAssembler(&buffer);
3683 Register O2UnrollBlock = O2;
3684 Register O2klass_index = O2;
3686 //
3687 // This is the entry point for all traps the compiler takes when it thinks
3688 // it cannot handle further execution of compilation code. The frame is
3689 // deoptimized in these cases and converted into interpreter frames for
3690 // execution
3691 // The steps taken by this frame are as follows:
3692 // - push a fake "unpack_frame"
3693 // - call the C routine Deoptimization::uncommon_trap (this function
3694 // packs the current compiled frame into vframe arrays and returns
3695 // information about the number and size of interpreter frames which
3696 // are equivalent to the frame which is being deoptimized)
3697 // - deallocate the "unpack_frame"
3698 // - deallocate the deoptimization frame
3699 // - in a loop using the information returned in the previous step
3700 // push interpreter frames;
3701 // - create a dummy "unpack_frame"
3702 // - call the C routine: Deoptimization::unpack_frames (this function
3703 // lays out values on the interpreter frame which was just created)
3704 // - deallocate the dummy unpack_frame
3705 // - return to the interpreter entry point
3706 //
3707 // Refer to the following methods for more information:
3708 // - Deoptimization::uncommon_trap
3709 // - Deoptimization::unpack_frame
3711 // the unloaded class index is in O0 (first parameter to this blob)
3713 // push a dummy "unpack_frame"
3714 // and call Deoptimization::uncommon_trap to pack the compiled frame into
3715 // vframe array and return the UnrollBlock information
3716 __ save_frame(0);
3717 __ set_last_Java_frame(SP, noreg);
3718 __ mov(I0, O2klass_index);
3719 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3720 __ reset_last_Java_frame();
3721 __ mov(O0, O2UnrollBlock->after_save());
3722 __ restore();
3724 // deallocate the deoptimized frame taking care to preserve the return values
3725 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3726 __ restore();
3728 // Allocate new interpreter frame(s) and possible c2i adapter frame
3730 make_new_frames(masm, false);
3732 // push a dummy "unpack_frame" taking care of float return values and
3733 // call Deoptimization::unpack_frames to have the unpacker layout
3734 // information in the interpreter frames just created and then return
3735 // to the interpreter entry point
3736 __ save_frame(0);
3737 __ set_last_Java_frame(SP, noreg);
3738 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3739 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3740 __ reset_last_Java_frame();
3741 __ ret();
3742 __ delayed()->restore();
3744 masm->flush();
3745 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3746 }
3748 #endif // COMPILER2
3750 //------------------------------generate_handler_blob-------------------
3751 //
3752 // Generate a special Compile2Runtime blob that saves all registers, and sets
3753 // up an OopMap.
3754 //
3755 // This blob is jumped to (via a breakpoint and the signal handler) from a
3756 // safepoint in compiled code. On entry to this blob, O7 contains the
3757 // address in the original nmethod at which we should resume normal execution.
3758 // Thus, this blob looks like a subroutine which must preserve lots of
3759 // registers and return normally. Note that O7 is never register-allocated,
3760 // so it is guaranteed to be free here.
3761 //
3763 // The hardest part of what this blob must do is to save the 64-bit %o
3764 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
3765 // an interrupt will chop off their heads. Making space in the caller's frame
3766 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3767 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3768 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
3769 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3770 // Tricky, tricky, tricky...
3772 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3773 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3775 // allocate space for the code
3776 ResourceMark rm;
3777 // setup code generation tools
3778 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3779 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3780 // even larger with TraceJumps
3781 int pad = TraceJumps ? 512 : 0;
3782 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3783 MacroAssembler* masm = new MacroAssembler(&buffer);
3784 int frame_size_words;
3785 OopMapSet *oop_maps = new OopMapSet();
3786 OopMap* map = NULL;
3788 int start = __ offset();
3790 bool cause_return = (poll_type == POLL_AT_RETURN);
3791 // If this causes a return before the processing, then do a "restore"
3792 if (cause_return) {
3793 __ restore();
3794 } else {
3795 // Make it look like we were called via the poll
3796 // so that frame constructor always sees a valid return address
3797 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3798 __ sub(O7, frame::pc_return_offset, O7);
3799 }
3801 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3803 // setup last_Java_sp (blows G4)
3804 __ set_last_Java_frame(SP, noreg);
3806 // call into the runtime to handle illegal instructions exception
3807 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3808 __ mov(G2_thread, O0);
3809 __ save_thread(L7_thread_cache);
3810 __ call(call_ptr);
3811 __ delayed()->nop();
3813 // Set an oopmap for the call site.
3814 // We need this not only for callee-saved registers, but also for volatile
3815 // registers that the compiler might be keeping live across a safepoint.
3817 oop_maps->add_gc_map( __ offset() - start, map);
3819 __ restore_thread(L7_thread_cache);
3820 // clear last_Java_sp
3821 __ reset_last_Java_frame();
3823 // Check for exceptions
3824 Label pending;
3826 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3827 __ br_notnull_short(O1, Assembler::pn, pending);
3829 RegisterSaver::restore_live_registers(masm);
3831 // We are back the the original state on entry and ready to go.
3833 __ retl();
3834 __ delayed()->nop();
3836 // Pending exception after the safepoint
3838 __ bind(pending);
3840 RegisterSaver::restore_live_registers(masm);
3842 // We are back the the original state on entry.
3844 // Tail-call forward_exception_entry, with the issuing PC in O7,
3845 // so it looks like the original nmethod called forward_exception_entry.
3846 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3847 __ JMP(O0, 0);
3848 __ delayed()->nop();
3850 // -------------
3851 // make sure all code is generated
3852 masm->flush();
3854 // return exception blob
3855 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3856 }
3858 //
3859 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3860 //
3861 // Generate a stub that calls into vm to find out the proper destination
3862 // of a java call. All the argument registers are live at this point
3863 // but since this is generic code we don't know what they are and the caller
3864 // must do any gc of the args.
3865 //
3866 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3867 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3869 // allocate space for the code
3870 ResourceMark rm;
3871 // setup code generation tools
3872 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3873 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3874 // even larger with TraceJumps
3875 int pad = TraceJumps ? 512 : 0;
3876 CodeBuffer buffer(name, 1600 + pad, 512);
3877 MacroAssembler* masm = new MacroAssembler(&buffer);
3878 int frame_size_words;
3879 OopMapSet *oop_maps = new OopMapSet();
3880 OopMap* map = NULL;
3882 int start = __ offset();
3884 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3886 int frame_complete = __ offset();
3888 // setup last_Java_sp (blows G4)
3889 __ set_last_Java_frame(SP, noreg);
3891 // call into the runtime to handle illegal instructions exception
3892 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3893 __ mov(G2_thread, O0);
3894 __ save_thread(L7_thread_cache);
3895 __ call(destination, relocInfo::runtime_call_type);
3896 __ delayed()->nop();
3898 // O0 contains the address we are going to jump to assuming no exception got installed
3900 // Set an oopmap for the call site.
3901 // We need this not only for callee-saved registers, but also for volatile
3902 // registers that the compiler might be keeping live across a safepoint.
3904 oop_maps->add_gc_map( __ offset() - start, map);
3906 __ restore_thread(L7_thread_cache);
3907 // clear last_Java_sp
3908 __ reset_last_Java_frame();
3910 // Check for exceptions
3911 Label pending;
3913 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3914 __ br_notnull_short(O1, Assembler::pn, pending);
3916 // get the returned Method*
3918 __ get_vm_result_2(G5_method);
3919 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3921 // O0 is where we want to jump, overwrite G3 which is saved and scratch
3923 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3925 RegisterSaver::restore_live_registers(masm);
3927 // We are back the the original state on entry and ready to go.
3929 __ JMP(G3, 0);
3930 __ delayed()->nop();
3932 // Pending exception after the safepoint
3934 __ bind(pending);
3936 RegisterSaver::restore_live_registers(masm);
3938 // We are back the the original state on entry.
3940 // Tail-call forward_exception_entry, with the issuing PC in O7,
3941 // so it looks like the original nmethod called forward_exception_entry.
3942 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3943 __ JMP(O0, 0);
3944 __ delayed()->nop();
3946 // -------------
3947 // make sure all code is generated
3948 masm->flush();
3950 // return the blob
3951 // frame_size_words or bytes??
3952 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3953 }