Tue, 06 Nov 2012 15:09:37 -0500
8000725: NPG: method_holder() and pool_holder() and pool_holder field should be InstanceKlass
Summary: Change types of above methods and field to InstanceKlass and remove unneeded casts from the source files.
Reviewed-by: dholmes, coleenp, zgu
Contributed-by: harold.seigel@oracle.com
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_sparc.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/vframeArray.hpp"
36 #include "vmreg_sparc.inline.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_Runtime1.hpp"
39 #endif
40 #ifdef COMPILER2
41 #include "opto/runtime.hpp"
42 #endif
43 #ifdef SHARK
44 #include "compiler/compileBroker.hpp"
45 #include "shark/sharkCompiler.hpp"
46 #endif
48 #define __ masm->
51 class RegisterSaver {
53 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
54 // The Oregs are problematic. In the 32bit build the compiler can
55 // have O registers live with 64 bit quantities. A window save will
56 // cut the heads off of the registers. We have to do a very extensive
57 // stack dance to save and restore these properly.
59 // Note that the Oregs problem only exists if we block at either a polling
60 // page exception a compiled code safepoint that was not originally a call
61 // or deoptimize following one of these kinds of safepoints.
63 // Lots of registers to save. For all builds, a window save will preserve
64 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
65 // builds a window-save will preserve the %o registers. In the LION build
66 // we need to save the 64-bit %o registers which requires we save them
67 // before the window-save (as then they become %i registers and get their
68 // heads chopped off on interrupt). We have to save some %g registers here
69 // as well.
70 enum {
71 // This frame's save area. Includes extra space for the native call:
72 // vararg's layout space and the like. Briefly holds the caller's
73 // register save area.
74 call_args_area = frame::register_save_words_sp_offset +
75 frame::memory_parameter_word_sp_offset*wordSize,
76 // Make sure save locations are always 8 byte aligned.
77 // can't use round_to because it doesn't produce compile time constant
78 start_of_extra_save_area = ((call_args_area + 7) & ~7),
79 g1_offset = start_of_extra_save_area, // g-regs needing saving
80 g3_offset = g1_offset+8,
81 g4_offset = g3_offset+8,
82 g5_offset = g4_offset+8,
83 o0_offset = g5_offset+8,
84 o1_offset = o0_offset+8,
85 o2_offset = o1_offset+8,
86 o3_offset = o2_offset+8,
87 o4_offset = o3_offset+8,
88 o5_offset = o4_offset+8,
89 start_of_flags_save_area = o5_offset+8,
90 ccr_offset = start_of_flags_save_area,
91 fsr_offset = ccr_offset + 8,
92 d00_offset = fsr_offset+8, // Start of float save area
93 register_save_size = d00_offset+8*32
94 };
97 public:
99 static int Oexception_offset() { return o0_offset; };
100 static int G3_offset() { return g3_offset; };
101 static int G5_offset() { return g5_offset; };
102 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
103 static void restore_live_registers(MacroAssembler* masm);
105 // During deoptimization only the result register need to be restored
106 // all the other values have already been extracted.
108 static void restore_result_registers(MacroAssembler* masm);
109 };
111 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
112 // Record volatile registers as callee-save values in an OopMap so their save locations will be
113 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
114 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
115 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
116 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
117 int i;
118 // Always make the frame size 16 byte aligned.
119 int frame_size = round_to(additional_frame_words + register_save_size, 16);
120 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
121 int frame_size_in_slots = frame_size / sizeof(jint);
122 // CodeBlob frame size is in words.
123 *total_frame_words = frame_size / wordSize;
124 // OopMap* map = new OopMap(*total_frame_words, 0);
125 OopMap* map = new OopMap(frame_size_in_slots, 0);
127 #if !defined(_LP64)
129 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
130 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
131 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
132 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
133 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
134 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
135 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
136 #endif /* _LP64 */
138 __ save(SP, -frame_size, SP);
140 #ifndef _LP64
141 // Reload the 64 bit Oregs. Although they are now Iregs we load them
142 // to Oregs here to avoid interrupts cutting off their heads
144 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
145 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
146 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
149 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
151 __ stx(O0, SP, o0_offset+STACK_BIAS);
152 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
154 __ stx(O1, SP, o1_offset+STACK_BIAS);
156 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
158 __ stx(O2, SP, o2_offset+STACK_BIAS);
159 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
161 __ stx(O3, SP, o3_offset+STACK_BIAS);
162 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
164 __ stx(O4, SP, o4_offset+STACK_BIAS);
165 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
167 __ stx(O5, SP, o5_offset+STACK_BIAS);
168 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
169 #endif /* _LP64 */
172 #ifdef _LP64
173 int debug_offset = 0;
174 #else
175 int debug_offset = 4;
176 #endif
177 // Save the G's
178 __ stx(G1, SP, g1_offset+STACK_BIAS);
179 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
181 __ stx(G3, SP, g3_offset+STACK_BIAS);
182 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
184 __ stx(G4, SP, g4_offset+STACK_BIAS);
185 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
187 __ stx(G5, SP, g5_offset+STACK_BIAS);
188 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
190 // This is really a waste but we'll keep things as they were for now
191 if (true) {
192 #ifndef _LP64
193 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
194 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
195 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
196 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
197 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
198 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
199 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
200 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
201 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
202 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
203 #endif /* _LP64 */
204 }
207 // Save the flags
208 __ rdccr( G5 );
209 __ stx(G5, SP, ccr_offset+STACK_BIAS);
210 __ stxfsr(SP, fsr_offset+STACK_BIAS);
212 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
213 int offset = d00_offset;
214 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
215 FloatRegister f = as_FloatRegister(i);
216 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
217 // Record as callee saved both halves of double registers (2 float registers).
218 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
219 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
220 offset += sizeof(double);
221 }
223 // And we're done.
225 return map;
226 }
229 // Pop the current frame and restore all the registers that we
230 // saved.
231 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
233 // Restore all the FP registers
234 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
235 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
236 }
238 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
239 __ wrccr (G1) ;
241 // Restore the G's
242 // Note that G2 (AKA GThread) must be saved and restored separately.
243 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
245 __ ldx(SP, g1_offset+STACK_BIAS, G1);
246 __ ldx(SP, g3_offset+STACK_BIAS, G3);
247 __ ldx(SP, g4_offset+STACK_BIAS, G4);
248 __ ldx(SP, g5_offset+STACK_BIAS, G5);
251 #if !defined(_LP64)
252 // Restore the 64-bit O's.
253 __ ldx(SP, o0_offset+STACK_BIAS, O0);
254 __ ldx(SP, o1_offset+STACK_BIAS, O1);
255 __ ldx(SP, o2_offset+STACK_BIAS, O2);
256 __ ldx(SP, o3_offset+STACK_BIAS, O3);
257 __ ldx(SP, o4_offset+STACK_BIAS, O4);
258 __ ldx(SP, o5_offset+STACK_BIAS, O5);
260 // And temporarily place them in TLS
262 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
263 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
264 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
265 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
266 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
267 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
268 #endif /* _LP64 */
270 // Restore flags
272 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
274 __ restore();
276 #if !defined(_LP64)
277 // Now reload the 64bit Oregs after we've restore the window.
278 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
279 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
280 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
283 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
284 #endif /* _LP64 */
286 }
288 // Pop the current frame and restore the registers that might be holding
289 // a result.
290 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
292 #if !defined(_LP64)
293 // 32bit build returns longs in G1
294 __ ldx(SP, g1_offset+STACK_BIAS, G1);
296 // Retrieve the 64-bit O's.
297 __ ldx(SP, o0_offset+STACK_BIAS, O0);
298 __ ldx(SP, o1_offset+STACK_BIAS, O1);
299 // and save to TLS
300 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
301 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
302 #endif /* _LP64 */
304 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
306 __ restore();
308 #if !defined(_LP64)
309 // Now reload the 64bit Oregs after we've restore the window.
310 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
311 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
312 #endif /* _LP64 */
314 }
316 // Is vector's size (in bytes) bigger than a size saved by default?
317 // 8 bytes FP registers are saved by default on SPARC.
318 bool SharedRuntime::is_wide_vector(int size) {
319 // Note, MaxVectorSize == 8 on SPARC.
320 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
321 return size > 8;
322 }
324 // The java_calling_convention describes stack locations as ideal slots on
325 // a frame with no abi restrictions. Since we must observe abi restrictions
326 // (like the placement of the register window) the slots must be biased by
327 // the following value.
328 static int reg2offset(VMReg r) {
329 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
330 }
332 static VMRegPair reg64_to_VMRegPair(Register r) {
333 VMRegPair ret;
334 if (wordSize == 8) {
335 ret.set2(r->as_VMReg());
336 } else {
337 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
338 }
339 return ret;
340 }
342 // ---------------------------------------------------------------------------
343 // Read the array of BasicTypes from a signature, and compute where the
344 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
345 // quantities. Values less than VMRegImpl::stack0 are registers, those above
346 // refer to 4-byte stack slots. All stack slots are based off of the window
347 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
348 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
349 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
350 // integer registers. Values 64-95 are the (32-bit only) float registers.
351 // Each 32-bit quantity is given its own number, so the integer registers
352 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
353 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
355 // Register results are passed in O0-O5, for outgoing call arguments. To
356 // convert to incoming arguments, convert all O's to I's. The regs array
357 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
358 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
359 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
360 // passed (used as a placeholder for the other half of longs and doubles in
361 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
362 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
363 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
364 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
365 // same VMRegPair.
367 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
368 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
369 // units regardless of build.
372 // ---------------------------------------------------------------------------
373 // The compiled Java calling convention. The Java convention always passes
374 // 64-bit values in adjacent aligned locations (either registers or stack),
375 // floats in float registers and doubles in aligned float pairs. There is
376 // no backing varargs store for values in registers.
377 // In the 32-bit build, longs are passed on the stack (cannot be
378 // passed in I's, because longs in I's get their heads chopped off at
379 // interrupt).
380 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
381 VMRegPair *regs,
382 int total_args_passed,
383 int is_outgoing) {
384 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
386 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
387 const int flt_reg_max = 8;
389 int int_reg = 0;
390 int flt_reg = 0;
391 int slot = 0;
393 for (int i = 0; i < total_args_passed; i++) {
394 switch (sig_bt[i]) {
395 case T_INT:
396 case T_SHORT:
397 case T_CHAR:
398 case T_BYTE:
399 case T_BOOLEAN:
400 #ifndef _LP64
401 case T_OBJECT:
402 case T_ARRAY:
403 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
404 #endif // _LP64
405 if (int_reg < int_reg_max) {
406 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
407 regs[i].set1(r->as_VMReg());
408 } else {
409 regs[i].set1(VMRegImpl::stack2reg(slot++));
410 }
411 break;
413 #ifdef _LP64
414 case T_LONG:
415 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
416 // fall-through
417 case T_OBJECT:
418 case T_ARRAY:
419 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
420 if (int_reg < int_reg_max) {
421 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
422 regs[i].set2(r->as_VMReg());
423 } else {
424 slot = round_to(slot, 2); // align
425 regs[i].set2(VMRegImpl::stack2reg(slot));
426 slot += 2;
427 }
428 break;
429 #else
430 case T_LONG:
431 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
432 // On 32-bit SPARC put longs always on the stack to keep the pressure off
433 // integer argument registers. They should be used for oops.
434 slot = round_to(slot, 2); // align
435 regs[i].set2(VMRegImpl::stack2reg(slot));
436 slot += 2;
437 #endif
438 break;
440 case T_FLOAT:
441 if (flt_reg < flt_reg_max) {
442 FloatRegister r = as_FloatRegister(flt_reg++);
443 regs[i].set1(r->as_VMReg());
444 } else {
445 regs[i].set1(VMRegImpl::stack2reg(slot++));
446 }
447 break;
449 case T_DOUBLE:
450 assert(sig_bt[i+1] == T_VOID, "expecting half");
451 if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
452 flt_reg = round_to(flt_reg, 2); // align
453 FloatRegister r = as_FloatRegister(flt_reg);
454 regs[i].set2(r->as_VMReg());
455 flt_reg += 2;
456 } else {
457 slot = round_to(slot, 2); // align
458 regs[i].set2(VMRegImpl::stack2reg(slot));
459 slot += 2;
460 }
461 break;
463 case T_VOID:
464 regs[i].set_bad(); // Halves of longs & doubles
465 break;
467 default:
468 fatal(err_msg_res("unknown basic type %d", sig_bt[i]));
469 break;
470 }
471 }
473 // retun the amount of stack space these arguments will need.
474 return slot;
475 }
477 // Helper class mostly to avoid passing masm everywhere, and handle
478 // store displacement overflow logic.
479 class AdapterGenerator {
480 MacroAssembler *masm;
481 Register Rdisp;
482 void set_Rdisp(Register r) { Rdisp = r; }
484 void patch_callers_callsite();
486 // base+st_off points to top of argument
487 int arg_offset(const int st_off) { return st_off; }
488 int next_arg_offset(const int st_off) {
489 return st_off - Interpreter::stackElementSize;
490 }
492 // Argument slot values may be loaded first into a register because
493 // they might not fit into displacement.
494 RegisterOrConstant arg_slot(const int st_off);
495 RegisterOrConstant next_arg_slot(const int st_off);
497 // Stores long into offset pointed to by base
498 void store_c2i_long(Register r, Register base,
499 const int st_off, bool is_stack);
500 void store_c2i_object(Register r, Register base,
501 const int st_off);
502 void store_c2i_int(Register r, Register base,
503 const int st_off);
504 void store_c2i_double(VMReg r_2,
505 VMReg r_1, Register base, const int st_off);
506 void store_c2i_float(FloatRegister f, Register base,
507 const int st_off);
509 public:
510 void gen_c2i_adapter(int total_args_passed,
511 // VMReg max_arg,
512 int comp_args_on_stack, // VMRegStackSlots
513 const BasicType *sig_bt,
514 const VMRegPair *regs,
515 Label& skip_fixup);
516 void gen_i2c_adapter(int total_args_passed,
517 // VMReg max_arg,
518 int comp_args_on_stack, // VMRegStackSlots
519 const BasicType *sig_bt,
520 const VMRegPair *regs);
522 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
523 };
526 // Patch the callers callsite with entry to compiled code if it exists.
527 void AdapterGenerator::patch_callers_callsite() {
528 Label L;
529 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
530 __ br_null(G3_scratch, false, Assembler::pt, L);
531 __ delayed()->nop();
532 // Call into the VM to patch the caller, then jump to compiled callee
533 __ save_frame(4); // Args in compiled layout; do not blow them
535 // Must save all the live Gregs the list is:
536 // G1: 1st Long arg (32bit build)
537 // G2: global allocated to TLS
538 // G3: used in inline cache check (scratch)
539 // G4: 2nd Long arg (32bit build);
540 // G5: used in inline cache check (Method*)
542 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
544 #ifdef _LP64
545 // mov(s,d)
546 __ mov(G1, L1);
547 __ mov(G4, L4);
548 __ mov(G5_method, L5);
549 __ mov(G5_method, O0); // VM needs target method
550 __ mov(I7, O1); // VM needs caller's callsite
551 // Must be a leaf call...
552 // can be very far once the blob has been relocated
553 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
554 __ relocate(relocInfo::runtime_call_type);
555 __ jumpl_to(dest, O7, O7);
556 __ delayed()->mov(G2_thread, L7_thread_cache);
557 __ mov(L7_thread_cache, G2_thread);
558 __ mov(L1, G1);
559 __ mov(L4, G4);
560 __ mov(L5, G5_method);
561 #else
562 __ stx(G1, FP, -8 + STACK_BIAS);
563 __ stx(G4, FP, -16 + STACK_BIAS);
564 __ mov(G5_method, L5);
565 __ mov(G5_method, O0); // VM needs target method
566 __ mov(I7, O1); // VM needs caller's callsite
567 // Must be a leaf call...
568 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
569 __ delayed()->mov(G2_thread, L7_thread_cache);
570 __ mov(L7_thread_cache, G2_thread);
571 __ ldx(FP, -8 + STACK_BIAS, G1);
572 __ ldx(FP, -16 + STACK_BIAS, G4);
573 __ mov(L5, G5_method);
574 #endif /* _LP64 */
576 __ restore(); // Restore args
577 __ bind(L);
578 }
581 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
582 RegisterOrConstant roc(arg_offset(st_off));
583 return __ ensure_simm13_or_reg(roc, Rdisp);
584 }
586 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
587 RegisterOrConstant roc(next_arg_offset(st_off));
588 return __ ensure_simm13_or_reg(roc, Rdisp);
589 }
592 // Stores long into offset pointed to by base
593 void AdapterGenerator::store_c2i_long(Register r, Register base,
594 const int st_off, bool is_stack) {
595 #ifdef _LP64
596 // In V9, longs are given 2 64-bit slots in the interpreter, but the
597 // data is passed in only 1 slot.
598 __ stx(r, base, next_arg_slot(st_off));
599 #else
600 #ifdef COMPILER2
601 // Misaligned store of 64-bit data
602 __ stw(r, base, arg_slot(st_off)); // lo bits
603 __ srlx(r, 32, r);
604 __ stw(r, base, next_arg_slot(st_off)); // hi bits
605 #else
606 if (is_stack) {
607 // Misaligned store of 64-bit data
608 __ stw(r, base, arg_slot(st_off)); // lo bits
609 __ srlx(r, 32, r);
610 __ stw(r, base, next_arg_slot(st_off)); // hi bits
611 } else {
612 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
613 __ stw(r , base, next_arg_slot(st_off)); // hi bits
614 }
615 #endif // COMPILER2
616 #endif // _LP64
617 }
619 void AdapterGenerator::store_c2i_object(Register r, Register base,
620 const int st_off) {
621 __ st_ptr (r, base, arg_slot(st_off));
622 }
624 void AdapterGenerator::store_c2i_int(Register r, Register base,
625 const int st_off) {
626 __ st (r, base, arg_slot(st_off));
627 }
629 // Stores into offset pointed to by base
630 void AdapterGenerator::store_c2i_double(VMReg r_2,
631 VMReg r_1, Register base, const int st_off) {
632 #ifdef _LP64
633 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
634 // data is passed in only 1 slot.
635 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
636 #else
637 // Need to marshal 64-bit value from misaligned Lesp loads
638 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
639 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
640 #endif
641 }
643 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
644 const int st_off) {
645 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
646 }
648 void AdapterGenerator::gen_c2i_adapter(
649 int total_args_passed,
650 // VMReg max_arg,
651 int comp_args_on_stack, // VMRegStackSlots
652 const BasicType *sig_bt,
653 const VMRegPair *regs,
654 Label& L_skip_fixup) {
656 // Before we get into the guts of the C2I adapter, see if we should be here
657 // at all. We've come from compiled code and are attempting to jump to the
658 // interpreter, which means the caller made a static call to get here
659 // (vcalls always get a compiled target if there is one). Check for a
660 // compiled target. If there is one, we need to patch the caller's call.
661 // However we will run interpreted if we come thru here. The next pass
662 // thru the call site will run compiled. If we ran compiled here then
663 // we can (theorectically) do endless i2c->c2i->i2c transitions during
664 // deopt/uncommon trap cycles. If we always go interpreted here then
665 // we can have at most one and don't need to play any tricks to keep
666 // from endlessly growing the stack.
667 //
668 // Actually if we detected that we had an i2c->c2i transition here we
669 // ought to be able to reset the world back to the state of the interpreted
670 // call and not bother building another interpreter arg area. We don't
671 // do that at this point.
673 patch_callers_callsite();
675 __ bind(L_skip_fixup);
677 // Since all args are passed on the stack, total_args_passed*wordSize is the
678 // space we need. Add in varargs area needed by the interpreter. Round up
679 // to stack alignment.
680 const int arg_size = total_args_passed * Interpreter::stackElementSize;
681 const int varargs_area =
682 (frame::varargs_offset - frame::register_save_words)*wordSize;
683 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
685 const int bias = STACK_BIAS;
686 const int interp_arg_offset = frame::varargs_offset*wordSize +
687 (total_args_passed-1)*Interpreter::stackElementSize;
689 const Register base = SP;
691 // Make some extra space on the stack.
692 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP);
693 set_Rdisp(G3_scratch);
695 // Write the args into the outgoing interpreter space.
696 for (int i = 0; i < total_args_passed; i++) {
697 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
698 VMReg r_1 = regs[i].first();
699 VMReg r_2 = regs[i].second();
700 if (!r_1->is_valid()) {
701 assert(!r_2->is_valid(), "");
702 continue;
703 }
704 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
705 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias;
706 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp);
707 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
708 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
709 else __ ldx(base, ld_off, G1_scratch);
710 }
712 if (r_1->is_Register()) {
713 Register r = r_1->as_Register()->after_restore();
714 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
715 store_c2i_object(r, base, st_off);
716 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
717 store_c2i_long(r, base, st_off, r_2->is_stack());
718 } else {
719 store_c2i_int(r, base, st_off);
720 }
721 } else {
722 assert(r_1->is_FloatRegister(), "");
723 if (sig_bt[i] == T_FLOAT) {
724 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
725 } else {
726 assert(sig_bt[i] == T_DOUBLE, "wrong type");
727 store_c2i_double(r_2, r_1, base, st_off);
728 }
729 }
730 }
732 // Load the interpreter entry point.
733 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
735 // Pass O5_savedSP as an argument to the interpreter.
736 // The interpreter will restore SP to this value before returning.
737 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP);
739 __ mov((frame::varargs_offset)*wordSize -
740 1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
741 // Jump to the interpreter just as if interpreter was doing it.
742 __ jmpl(G3_scratch, 0, G0);
743 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
744 // (really L0) is in use by the compiled frame as a generic temp. However,
745 // the interpreter does not know where its args are without some kind of
746 // arg pointer being passed in. Pass it in Gargs.
747 __ delayed()->add(SP, G1, Gargs);
748 }
750 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
751 address code_start, address code_end,
752 Label& L_ok) {
753 Label L_fail;
754 __ set(ExternalAddress(code_start), temp_reg);
755 __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
756 __ cmp(pc_reg, temp_reg);
757 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
758 __ delayed()->add(temp_reg, temp2_reg, temp_reg);
759 __ cmp(pc_reg, temp_reg);
760 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
761 __ bind(L_fail);
762 }
764 void AdapterGenerator::gen_i2c_adapter(
765 int total_args_passed,
766 // VMReg max_arg,
767 int comp_args_on_stack, // VMRegStackSlots
768 const BasicType *sig_bt,
769 const VMRegPair *regs) {
771 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
772 // layout. Lesp was saved by the calling I-frame and will be restored on
773 // return. Meanwhile, outgoing arg space is all owned by the callee
774 // C-frame, so we can mangle it at will. After adjusting the frame size,
775 // hoist register arguments and repack other args according to the compiled
776 // code convention. Finally, end in a jump to the compiled code. The entry
777 // point address is the start of the buffer.
779 // We will only enter here from an interpreted frame and never from after
780 // passing thru a c2i. Azul allowed this but we do not. If we lose the
781 // race and use a c2i we will remain interpreted for the race loser(s).
782 // This removes all sorts of headaches on the x86 side and also eliminates
783 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
785 // More detail:
786 // Adapters can be frameless because they do not require the caller
787 // to perform additional cleanup work, such as correcting the stack pointer.
788 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
789 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
790 // even if a callee has modified the stack pointer.
791 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
792 // routinely repairs its caller's stack pointer (from sender_sp, which is set
793 // up via the senderSP register).
794 // In other words, if *either* the caller or callee is interpreted, we can
795 // get the stack pointer repaired after a call.
796 // This is why c2i and i2c adapters cannot be indefinitely composed.
797 // In particular, if a c2i adapter were to somehow call an i2c adapter,
798 // both caller and callee would be compiled methods, and neither would
799 // clean up the stack pointer changes performed by the two adapters.
800 // If this happens, control eventually transfers back to the compiled
801 // caller, but with an uncorrected stack, causing delayed havoc.
803 if (VerifyAdapterCalls &&
804 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
805 // So, let's test for cascading c2i/i2c adapters right now.
806 // assert(Interpreter::contains($return_addr) ||
807 // StubRoutines::contains($return_addr),
808 // "i2c adapter must return to an interpreter frame");
809 __ block_comment("verify_i2c { ");
810 Label L_ok;
811 if (Interpreter::code() != NULL)
812 range_check(masm, O7, O0, O1,
813 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
814 L_ok);
815 if (StubRoutines::code1() != NULL)
816 range_check(masm, O7, O0, O1,
817 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
818 L_ok);
819 if (StubRoutines::code2() != NULL)
820 range_check(masm, O7, O0, O1,
821 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
822 L_ok);
823 const char* msg = "i2c adapter must return to an interpreter frame";
824 __ block_comment(msg);
825 __ stop(msg);
826 __ bind(L_ok);
827 __ block_comment("} verify_i2ce ");
828 }
830 // As you can see from the list of inputs & outputs there are not a lot
831 // of temp registers to work with: mostly G1, G3 & G4.
833 // Inputs:
834 // G2_thread - TLS
835 // G5_method - Method oop
836 // G4 (Gargs) - Pointer to interpreter's args
837 // O0..O4 - free for scratch
838 // O5_savedSP - Caller's saved SP, to be restored if needed
839 // O6 - Current SP!
840 // O7 - Valid return address
841 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
843 // Outputs:
844 // G2_thread - TLS
845 // O0-O5 - Outgoing args in compiled layout
846 // O6 - Adjusted or restored SP
847 // O7 - Valid return address
848 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
849 // F0-F7 - more outgoing args
852 // Gargs is the incoming argument base, and also an outgoing argument.
853 __ sub(Gargs, BytesPerWord, Gargs);
855 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
856 // WITH O7 HOLDING A VALID RETURN PC
857 //
858 // | |
859 // : java stack :
860 // | |
861 // +--------------+ <--- start of outgoing args
862 // | receiver | |
863 // : rest of args : |---size is java-arg-words
864 // | | |
865 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
866 // | | |
867 // : unused : |---Space for max Java stack, plus stack alignment
868 // | | |
869 // +--------------+ <--- SP + 16*wordsize
870 // | |
871 // : window :
872 // | |
873 // +--------------+ <--- SP
875 // WE REPACK THE STACK. We use the common calling convention layout as
876 // discovered by calling SharedRuntime::calling_convention. We assume it
877 // causes an arbitrary shuffle of memory, which may require some register
878 // temps to do the shuffle. We hope for (and optimize for) the case where
879 // temps are not needed. We may have to resize the stack slightly, in case
880 // we need alignment padding (32-bit interpreter can pass longs & doubles
881 // misaligned, but the compilers expect them aligned).
882 //
883 // | |
884 // : java stack :
885 // | |
886 // +--------------+ <--- start of outgoing args
887 // | pad, align | |
888 // +--------------+ |
889 // | ints, longs, | |
890 // | floats, | |---Outgoing stack args.
891 // : doubles : | First few args in registers.
892 // | | |
893 // +--------------+ <--- SP' + 16*wordsize
894 // | |
895 // : window :
896 // | |
897 // +--------------+ <--- SP'
899 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
900 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
901 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
903 // Cut-out for having no stack args. Since up to 6 args are passed
904 // in registers, we will commonly have no stack args.
905 if (comp_args_on_stack > 0) {
906 // Convert VMReg stack slots to words.
907 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
908 // Round up to miminum stack alignment, in wordSize
909 comp_words_on_stack = round_to(comp_words_on_stack, 2);
910 // Now compute the distance from Lesp to SP. This calculation does not
911 // include the space for total_args_passed because Lesp has not yet popped
912 // the arguments.
913 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
914 }
916 // Now generate the shuffle code. Pick up all register args and move the
917 // rest through G1_scratch.
918 for (int i = 0; i < total_args_passed; i++) {
919 if (sig_bt[i] == T_VOID) {
920 // Longs and doubles are passed in native word order, but misaligned
921 // in the 32-bit build.
922 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
923 continue;
924 }
926 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
927 // 32-bit build and aligned in the 64-bit build. Look for the obvious
928 // ldx/lddf optimizations.
930 // Load in argument order going down.
931 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
932 set_Rdisp(G1_scratch);
934 VMReg r_1 = regs[i].first();
935 VMReg r_2 = regs[i].second();
936 if (!r_1->is_valid()) {
937 assert(!r_2->is_valid(), "");
938 continue;
939 }
940 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
941 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
942 if (r_2->is_valid()) r_2 = r_1->next();
943 }
944 if (r_1->is_Register()) { // Register argument
945 Register r = r_1->as_Register()->after_restore();
946 if (!r_2->is_valid()) {
947 __ ld(Gargs, arg_slot(ld_off), r);
948 } else {
949 #ifdef _LP64
950 // In V9, longs are given 2 64-bit slots in the interpreter, but the
951 // data is passed in only 1 slot.
952 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
953 next_arg_slot(ld_off) : arg_slot(ld_off);
954 __ ldx(Gargs, slot, r);
955 #else
956 fatal("longs should be on stack");
957 #endif
958 }
959 } else {
960 assert(r_1->is_FloatRegister(), "");
961 if (!r_2->is_valid()) {
962 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
963 } else {
964 #ifdef _LP64
965 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
966 // data is passed in only 1 slot. This code also handles longs that
967 // are passed on the stack, but need a stack-to-stack move through a
968 // spare float register.
969 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
970 next_arg_slot(ld_off) : arg_slot(ld_off);
971 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
972 #else
973 // Need to marshal 64-bit value from misaligned Lesp loads
974 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
975 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
976 #endif
977 }
978 }
979 // Was the argument really intended to be on the stack, but was loaded
980 // into F8/F9?
981 if (regs[i].first()->is_stack()) {
982 assert(r_1->as_FloatRegister() == F8, "fix this code");
983 // Convert stack slot to an SP offset
984 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
985 // Store down the shuffled stack word. Target address _is_ aligned.
986 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
987 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
988 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
989 }
990 }
992 // Jump to the compiled code just as if compiled code was doing it.
993 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
995 // 6243940 We might end up in handle_wrong_method if
996 // the callee is deoptimized as we race thru here. If that
997 // happens we don't want to take a safepoint because the
998 // caller frame will look interpreted and arguments are now
999 // "compiled" so it is much better to make this transition
1000 // invisible to the stack walking code. Unfortunately if
1001 // we try and find the callee by normal means a safepoint
1002 // is possible. So we stash the desired callee in the thread
1003 // and the vm will find there should this case occur.
1004 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1005 __ st_ptr(G5_method, callee_target_addr);
1007 if (StressNonEntrant) {
1008 // Open a big window for deopt failure
1009 __ save_frame(0);
1010 __ mov(G0, L0);
1011 Label loop;
1012 __ bind(loop);
1013 __ sub(L0, 1, L0);
1014 __ br_null_short(L0, Assembler::pt, loop);
1015 __ restore();
1016 }
1018 __ jmpl(G3, 0, G0);
1019 __ delayed()->nop();
1020 }
1022 // ---------------------------------------------------------------
1023 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1024 int total_args_passed,
1025 // VMReg max_arg,
1026 int comp_args_on_stack, // VMRegStackSlots
1027 const BasicType *sig_bt,
1028 const VMRegPair *regs,
1029 AdapterFingerPrint* fingerprint) {
1030 address i2c_entry = __ pc();
1032 AdapterGenerator agen(masm);
1034 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1037 // -------------------------------------------------------------------------
1038 // Generate a C2I adapter. On entry we know G5 holds the Method*. The
1039 // args start out packed in the compiled layout. They need to be unpacked
1040 // into the interpreter layout. This will almost always require some stack
1041 // space. We grow the current (compiled) stack, then repack the args. We
1042 // finally end in a jump to the generic interpreter entry point. On exit
1043 // from the interpreter, the interpreter will restore our SP (lest the
1044 // compiled code, which relys solely on SP and not FP, get sick).
1046 address c2i_unverified_entry = __ pc();
1047 Label L_skip_fixup;
1048 {
1049 Register R_temp = G1; // another scratch register
1051 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1053 __ verify_oop(O0);
1054 __ load_klass(O0, G3_scratch);
1056 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
1057 __ cmp(G3_scratch, R_temp);
1059 Label ok, ok2;
1060 __ brx(Assembler::equal, false, Assembler::pt, ok);
1061 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
1062 __ jump_to(ic_miss, G3_scratch);
1063 __ delayed()->nop();
1065 __ bind(ok);
1066 // Method might have been compiled since the call site was patched to
1067 // interpreted if that is the case treat it as a miss so we can get
1068 // the call site corrected.
1069 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
1070 __ bind(ok2);
1071 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup);
1072 __ delayed()->nop();
1073 __ jump_to(ic_miss, G3_scratch);
1074 __ delayed()->nop();
1076 }
1078 address c2i_entry = __ pc();
1080 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
1082 __ flush();
1083 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1085 }
1087 // Helper function for native calling conventions
1088 static VMReg int_stk_helper( int i ) {
1089 // Bias any stack based VMReg we get by ignoring the window area
1090 // but not the register parameter save area.
1091 //
1092 // This is strange for the following reasons. We'd normally expect
1093 // the calling convention to return an VMReg for a stack slot
1094 // completely ignoring any abi reserved area. C2 thinks of that
1095 // abi area as only out_preserve_stack_slots. This does not include
1096 // the area allocated by the C abi to store down integer arguments
1097 // because the java calling convention does not use it. So
1098 // since c2 assumes that there are only out_preserve_stack_slots
1099 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1100 // location the c calling convention must add in this bias amount
1101 // to make up for the fact that the out_preserve_stack_slots is
1102 // insufficient for C calls. What a mess. I sure hope those 6
1103 // stack words were worth it on every java call!
1105 // Another way of cleaning this up would be for out_preserve_stack_slots
1106 // to take a parameter to say whether it was C or java calling conventions.
1107 // Then things might look a little better (but not much).
1109 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1110 if( mem_parm_offset < 0 ) {
1111 return as_oRegister(i)->as_VMReg();
1112 } else {
1113 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1114 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1115 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1116 }
1117 }
1120 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1121 VMRegPair *regs,
1122 int total_args_passed) {
1124 // Return the number of VMReg stack_slots needed for the args.
1125 // This value does not include an abi space (like register window
1126 // save area).
1128 // The native convention is V8 if !LP64
1129 // The LP64 convention is the V9 convention which is slightly more sane.
1131 // We return the amount of VMReg stack slots we need to reserve for all
1132 // the arguments NOT counting out_preserve_stack_slots. Since we always
1133 // have space for storing at least 6 registers to memory we start with that.
1134 // See int_stk_helper for a further discussion.
1135 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1137 #ifdef _LP64
1138 // V9 convention: All things "as-if" on double-wide stack slots.
1139 // Hoist any int/ptr/long's in the first 6 to int regs.
1140 // Hoist any flt/dbl's in the first 16 dbl regs.
1141 int j = 0; // Count of actual args, not HALVES
1142 for( int i=0; i<total_args_passed; i++, j++ ) {
1143 switch( sig_bt[i] ) {
1144 case T_BOOLEAN:
1145 case T_BYTE:
1146 case T_CHAR:
1147 case T_INT:
1148 case T_SHORT:
1149 regs[i].set1( int_stk_helper( j ) ); break;
1150 case T_LONG:
1151 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1152 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1153 case T_ARRAY:
1154 case T_OBJECT:
1155 case T_METADATA:
1156 regs[i].set2( int_stk_helper( j ) );
1157 break;
1158 case T_FLOAT:
1159 if ( j < 16 ) {
1160 // V9ism: floats go in ODD registers
1161 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1162 } else {
1163 // V9ism: floats go in ODD stack slot
1164 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1165 }
1166 break;
1167 case T_DOUBLE:
1168 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1169 if ( j < 16 ) {
1170 // V9ism: doubles go in EVEN/ODD regs
1171 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1172 } else {
1173 // V9ism: doubles go in EVEN/ODD stack slots
1174 regs[i].set2(VMRegImpl::stack2reg(j<<1));
1175 }
1176 break;
1177 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
1178 default:
1179 ShouldNotReachHere();
1180 }
1181 if (regs[i].first()->is_stack()) {
1182 int off = regs[i].first()->reg2stack();
1183 if (off > max_stack_slots) max_stack_slots = off;
1184 }
1185 if (regs[i].second()->is_stack()) {
1186 int off = regs[i].second()->reg2stack();
1187 if (off > max_stack_slots) max_stack_slots = off;
1188 }
1189 }
1191 #else // _LP64
1192 // V8 convention: first 6 things in O-regs, rest on stack.
1193 // Alignment is willy-nilly.
1194 for( int i=0; i<total_args_passed; i++ ) {
1195 switch( sig_bt[i] ) {
1196 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1197 case T_ARRAY:
1198 case T_BOOLEAN:
1199 case T_BYTE:
1200 case T_CHAR:
1201 case T_FLOAT:
1202 case T_INT:
1203 case T_OBJECT:
1204 case T_METADATA:
1205 case T_SHORT:
1206 regs[i].set1( int_stk_helper( i ) );
1207 break;
1208 case T_DOUBLE:
1209 case T_LONG:
1210 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1211 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1212 break;
1213 case T_VOID: regs[i].set_bad(); break;
1214 default:
1215 ShouldNotReachHere();
1216 }
1217 if (regs[i].first()->is_stack()) {
1218 int off = regs[i].first()->reg2stack();
1219 if (off > max_stack_slots) max_stack_slots = off;
1220 }
1221 if (regs[i].second()->is_stack()) {
1222 int off = regs[i].second()->reg2stack();
1223 if (off > max_stack_slots) max_stack_slots = off;
1224 }
1225 }
1226 #endif // _LP64
1228 return round_to(max_stack_slots + 1, 2);
1230 }
1233 // ---------------------------------------------------------------------------
1234 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1235 switch (ret_type) {
1236 case T_FLOAT:
1237 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1238 break;
1239 case T_DOUBLE:
1240 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1241 break;
1242 }
1243 }
1245 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1246 switch (ret_type) {
1247 case T_FLOAT:
1248 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1249 break;
1250 case T_DOUBLE:
1251 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1252 break;
1253 }
1254 }
1256 // Check and forward and pending exception. Thread is stored in
1257 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
1258 // is no exception handler. We merely pop this frame off and throw the
1259 // exception in the caller's frame.
1260 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1261 Label L;
1262 __ br_null(Rex_oop, false, Assembler::pt, L);
1263 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1264 // Since this is a native call, we *know* the proper exception handler
1265 // without calling into the VM: it's the empty function. Just pop this
1266 // frame and then jump to forward_exception_entry; O7 will contain the
1267 // native caller's return PC.
1268 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1269 __ jump_to(exception_entry, G3_scratch);
1270 __ delayed()->restore(); // Pop this frame off.
1271 __ bind(L);
1272 }
1274 // A simple move of integer like type
1275 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1276 if (src.first()->is_stack()) {
1277 if (dst.first()->is_stack()) {
1278 // stack to stack
1279 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1280 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1281 } else {
1282 // stack to reg
1283 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1284 }
1285 } else if (dst.first()->is_stack()) {
1286 // reg to stack
1287 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1288 } else {
1289 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1290 }
1291 }
1293 // On 64 bit we will store integer like items to the stack as
1294 // 64 bits items (sparc abi) even though java would only store
1295 // 32bits for a parameter. On 32bit it will simply be 32 bits
1296 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1297 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1298 if (src.first()->is_stack()) {
1299 if (dst.first()->is_stack()) {
1300 // stack to stack
1301 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1302 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1303 } else {
1304 // stack to reg
1305 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1306 }
1307 } else if (dst.first()->is_stack()) {
1308 // reg to stack
1309 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1310 } else {
1311 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1312 }
1313 }
1316 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1317 if (src.first()->is_stack()) {
1318 if (dst.first()->is_stack()) {
1319 // stack to stack
1320 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1321 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1322 } else {
1323 // stack to reg
1324 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1325 }
1326 } else if (dst.first()->is_stack()) {
1327 // reg to stack
1328 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1329 } else {
1330 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1331 }
1332 }
1335 // An oop arg. Must pass a handle not the oop itself
1336 static void object_move(MacroAssembler* masm,
1337 OopMap* map,
1338 int oop_handle_offset,
1339 int framesize_in_slots,
1340 VMRegPair src,
1341 VMRegPair dst,
1342 bool is_receiver,
1343 int* receiver_offset) {
1345 // must pass a handle. First figure out the location we use as a handle
1347 if (src.first()->is_stack()) {
1348 // Oop is already on the stack
1349 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1350 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1351 __ ld_ptr(rHandle, 0, L4);
1352 #ifdef _LP64
1353 __ movr( Assembler::rc_z, L4, G0, rHandle );
1354 #else
1355 __ tst( L4 );
1356 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1357 #endif
1358 if (dst.first()->is_stack()) {
1359 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1360 }
1361 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1362 if (is_receiver) {
1363 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1364 }
1365 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1366 } else {
1367 // Oop is in an input register pass we must flush it to the stack
1368 const Register rOop = src.first()->as_Register();
1369 const Register rHandle = L5;
1370 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1371 int offset = oop_slot*VMRegImpl::stack_slot_size;
1372 Label skip;
1373 __ st_ptr(rOop, SP, offset + STACK_BIAS);
1374 if (is_receiver) {
1375 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1376 }
1377 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1378 __ add(SP, offset + STACK_BIAS, rHandle);
1379 #ifdef _LP64
1380 __ movr( Assembler::rc_z, rOop, G0, rHandle );
1381 #else
1382 __ tst( rOop );
1383 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1384 #endif
1386 if (dst.first()->is_stack()) {
1387 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1388 } else {
1389 __ mov(rHandle, dst.first()->as_Register());
1390 }
1391 }
1392 }
1394 // A float arg may have to do float reg int reg conversion
1395 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1396 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1398 if (src.first()->is_stack()) {
1399 if (dst.first()->is_stack()) {
1400 // stack to stack the easiest of the bunch
1401 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1402 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1403 } else {
1404 // stack to reg
1405 if (dst.first()->is_Register()) {
1406 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1407 } else {
1408 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1409 }
1410 }
1411 } else if (dst.first()->is_stack()) {
1412 // reg to stack
1413 if (src.first()->is_Register()) {
1414 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1415 } else {
1416 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1417 }
1418 } else {
1419 // reg to reg
1420 if (src.first()->is_Register()) {
1421 if (dst.first()->is_Register()) {
1422 // gpr -> gpr
1423 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1424 } else {
1425 // gpr -> fpr
1426 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1427 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1428 }
1429 } else if (dst.first()->is_Register()) {
1430 // fpr -> gpr
1431 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1432 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1433 } else {
1434 // fpr -> fpr
1435 // In theory these overlap but the ordering is such that this is likely a nop
1436 if ( src.first() != dst.first()) {
1437 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1438 }
1439 }
1440 }
1441 }
1443 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1444 VMRegPair src_lo(src.first());
1445 VMRegPair src_hi(src.second());
1446 VMRegPair dst_lo(dst.first());
1447 VMRegPair dst_hi(dst.second());
1448 simple_move32(masm, src_lo, dst_lo);
1449 simple_move32(masm, src_hi, dst_hi);
1450 }
1452 // A long move
1453 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1455 // Do the simple ones here else do two int moves
1456 if (src.is_single_phys_reg() ) {
1457 if (dst.is_single_phys_reg()) {
1458 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1459 } else {
1460 // split src into two separate registers
1461 // Remember hi means hi address or lsw on sparc
1462 // Move msw to lsw
1463 if (dst.second()->is_reg()) {
1464 // MSW -> MSW
1465 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1466 // Now LSW -> LSW
1467 // this will only move lo -> lo and ignore hi
1468 VMRegPair split(dst.second());
1469 simple_move32(masm, src, split);
1470 } else {
1471 VMRegPair split(src.first(), L4->as_VMReg());
1472 // MSW -> MSW (lo ie. first word)
1473 __ srax(src.first()->as_Register(), 32, L4);
1474 split_long_move(masm, split, dst);
1475 }
1476 }
1477 } else if (dst.is_single_phys_reg()) {
1478 if (src.is_adjacent_aligned_on_stack(2)) {
1479 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1480 } else {
1481 // dst is a single reg.
1482 // Remember lo is low address not msb for stack slots
1483 // and lo is the "real" register for registers
1484 // src is
1486 VMRegPair split;
1488 if (src.first()->is_reg()) {
1489 // src.lo (msw) is a reg, src.hi is stk/reg
1490 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1491 split.set_pair(dst.first(), src.first());
1492 } else {
1493 // msw is stack move to L5
1494 // lsw is stack move to dst.lo (real reg)
1495 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1496 split.set_pair(dst.first(), L5->as_VMReg());
1497 }
1499 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1500 // msw -> src.lo/L5, lsw -> dst.lo
1501 split_long_move(masm, src, split);
1503 // So dst now has the low order correct position the
1504 // msw half
1505 __ sllx(split.first()->as_Register(), 32, L5);
1507 const Register d = dst.first()->as_Register();
1508 __ or3(L5, d, d);
1509 }
1510 } else {
1511 // For LP64 we can probably do better.
1512 split_long_move(masm, src, dst);
1513 }
1514 }
1516 // A double move
1517 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1519 // The painful thing here is that like long_move a VMRegPair might be
1520 // 1: a single physical register
1521 // 2: two physical registers (v8)
1522 // 3: a physical reg [lo] and a stack slot [hi] (v8)
1523 // 4: two stack slots
1525 // Since src is always a java calling convention we know that the src pair
1526 // is always either all registers or all stack (and aligned?)
1528 // in a register [lo] and a stack slot [hi]
1529 if (src.first()->is_stack()) {
1530 if (dst.first()->is_stack()) {
1531 // stack to stack the easiest of the bunch
1532 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1533 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1534 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1535 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1536 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1537 } else {
1538 // stack to reg
1539 if (dst.second()->is_stack()) {
1540 // stack -> reg, stack -> stack
1541 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1542 if (dst.first()->is_Register()) {
1543 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1544 } else {
1545 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1546 }
1547 // This was missing. (very rare case)
1548 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1549 } else {
1550 // stack -> reg
1551 // Eventually optimize for alignment QQQ
1552 if (dst.first()->is_Register()) {
1553 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1554 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1555 } else {
1556 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1557 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1558 }
1559 }
1560 }
1561 } else if (dst.first()->is_stack()) {
1562 // reg to stack
1563 if (src.first()->is_Register()) {
1564 // Eventually optimize for alignment QQQ
1565 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1566 if (src.second()->is_stack()) {
1567 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1568 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1569 } else {
1570 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1571 }
1572 } else {
1573 // fpr to stack
1574 if (src.second()->is_stack()) {
1575 ShouldNotReachHere();
1576 } else {
1577 // Is the stack aligned?
1578 if (reg2offset(dst.first()) & 0x7) {
1579 // No do as pairs
1580 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1581 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1582 } else {
1583 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1584 }
1585 }
1586 }
1587 } else {
1588 // reg to reg
1589 if (src.first()->is_Register()) {
1590 if (dst.first()->is_Register()) {
1591 // gpr -> gpr
1592 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1593 __ mov(src.second()->as_Register(), dst.second()->as_Register());
1594 } else {
1595 // gpr -> fpr
1596 // ought to be able to do a single store
1597 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1598 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1599 // ought to be able to do a single load
1600 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1601 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1602 }
1603 } else if (dst.first()->is_Register()) {
1604 // fpr -> gpr
1605 // ought to be able to do a single store
1606 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1607 // ought to be able to do a single load
1608 // REMEMBER first() is low address not LSB
1609 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1610 if (dst.second()->is_Register()) {
1611 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1612 } else {
1613 __ ld(FP, -4 + STACK_BIAS, L4);
1614 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1615 }
1616 } else {
1617 // fpr -> fpr
1618 // In theory these overlap but the ordering is such that this is likely a nop
1619 if ( src.first() != dst.first()) {
1620 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1621 }
1622 }
1623 }
1624 }
1626 // Creates an inner frame if one hasn't already been created, and
1627 // saves a copy of the thread in L7_thread_cache
1628 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1629 if (!*already_created) {
1630 __ save_frame(0);
1631 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1632 // Don't use save_thread because it smashes G2 and we merely want to save a
1633 // copy
1634 __ mov(G2_thread, L7_thread_cache);
1635 *already_created = true;
1636 }
1637 }
1640 static void save_or_restore_arguments(MacroAssembler* masm,
1641 const int stack_slots,
1642 const int total_in_args,
1643 const int arg_save_area,
1644 OopMap* map,
1645 VMRegPair* in_regs,
1646 BasicType* in_sig_bt) {
1647 // if map is non-NULL then the code should store the values,
1648 // otherwise it should load them.
1649 if (map != NULL) {
1650 // Fill in the map
1651 for (int i = 0; i < total_in_args; i++) {
1652 if (in_sig_bt[i] == T_ARRAY) {
1653 if (in_regs[i].first()->is_stack()) {
1654 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1655 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1656 } else if (in_regs[i].first()->is_Register()) {
1657 map->set_oop(in_regs[i].first());
1658 } else {
1659 ShouldNotReachHere();
1660 }
1661 }
1662 }
1663 }
1665 // Save or restore double word values
1666 int handle_index = 0;
1667 for (int i = 0; i < total_in_args; i++) {
1668 int slot = handle_index + arg_save_area;
1669 int offset = slot * VMRegImpl::stack_slot_size;
1670 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1671 const Register reg = in_regs[i].first()->as_Register();
1672 if (reg->is_global()) {
1673 handle_index += 2;
1674 assert(handle_index <= stack_slots, "overflow");
1675 if (map != NULL) {
1676 __ stx(reg, SP, offset + STACK_BIAS);
1677 } else {
1678 __ ldx(SP, offset + STACK_BIAS, reg);
1679 }
1680 }
1681 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1682 handle_index += 2;
1683 assert(handle_index <= stack_slots, "overflow");
1684 if (map != NULL) {
1685 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1686 } else {
1687 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1688 }
1689 }
1690 }
1691 // Save floats
1692 for (int i = 0; i < total_in_args; i++) {
1693 int slot = handle_index + arg_save_area;
1694 int offset = slot * VMRegImpl::stack_slot_size;
1695 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1696 handle_index++;
1697 assert(handle_index <= stack_slots, "overflow");
1698 if (map != NULL) {
1699 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1700 } else {
1701 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1702 }
1703 }
1704 }
1706 }
1709 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1710 // keeps a new JNI critical region from starting until a GC has been
1711 // forced. Save down any oops in registers and describe them in an
1712 // OopMap.
1713 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1714 const int stack_slots,
1715 const int total_in_args,
1716 const int arg_save_area,
1717 OopMapSet* oop_maps,
1718 VMRegPair* in_regs,
1719 BasicType* in_sig_bt) {
1720 __ block_comment("check GC_locker::needs_gc");
1721 Label cont;
1722 AddressLiteral sync_state(GC_locker::needs_gc_address());
1723 __ load_bool_contents(sync_state, G3_scratch);
1724 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1725 __ delayed()->nop();
1727 // Save down any values that are live in registers and call into the
1728 // runtime to halt for a GC
1729 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1730 save_or_restore_arguments(masm, stack_slots, total_in_args,
1731 arg_save_area, map, in_regs, in_sig_bt);
1733 __ mov(G2_thread, L7_thread_cache);
1735 __ set_last_Java_frame(SP, noreg);
1737 __ block_comment("block_for_jni_critical");
1738 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1739 __ delayed()->mov(L7_thread_cache, O0);
1740 oop_maps->add_gc_map( __ offset(), map);
1742 __ restore_thread(L7_thread_cache); // restore G2_thread
1743 __ reset_last_Java_frame();
1745 // Reload all the register arguments
1746 save_or_restore_arguments(masm, stack_slots, total_in_args,
1747 arg_save_area, NULL, in_regs, in_sig_bt);
1749 __ bind(cont);
1750 #ifdef ASSERT
1751 if (StressCriticalJNINatives) {
1752 // Stress register saving
1753 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1754 save_or_restore_arguments(masm, stack_slots, total_in_args,
1755 arg_save_area, map, in_regs, in_sig_bt);
1756 // Destroy argument registers
1757 for (int i = 0; i < total_in_args; i++) {
1758 if (in_regs[i].first()->is_Register()) {
1759 const Register reg = in_regs[i].first()->as_Register();
1760 if (reg->is_global()) {
1761 __ mov(G0, reg);
1762 }
1763 } else if (in_regs[i].first()->is_FloatRegister()) {
1764 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1765 }
1766 }
1768 save_or_restore_arguments(masm, stack_slots, total_in_args,
1769 arg_save_area, NULL, in_regs, in_sig_bt);
1770 }
1771 #endif
1772 }
1774 // Unpack an array argument into a pointer to the body and the length
1775 // if the array is non-null, otherwise pass 0 for both.
1776 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1777 // Pass the length, ptr pair
1778 Label is_null, done;
1779 if (reg.first()->is_stack()) {
1780 VMRegPair tmp = reg64_to_VMRegPair(L2);
1781 // Load the arg up from the stack
1782 move_ptr(masm, reg, tmp);
1783 reg = tmp;
1784 }
1785 __ cmp(reg.first()->as_Register(), G0);
1786 __ brx(Assembler::equal, false, Assembler::pt, is_null);
1787 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1788 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1789 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1790 move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1791 __ ba_short(done);
1792 __ bind(is_null);
1793 // Pass zeros
1794 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1795 move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1796 __ bind(done);
1797 }
1799 static void verify_oop_args(MacroAssembler* masm,
1800 methodHandle method,
1801 const BasicType* sig_bt,
1802 const VMRegPair* regs) {
1803 Register temp_reg = G5_method; // not part of any compiled calling seq
1804 if (VerifyOops) {
1805 for (int i = 0; i < method->size_of_parameters(); i++) {
1806 if (sig_bt[i] == T_OBJECT ||
1807 sig_bt[i] == T_ARRAY) {
1808 VMReg r = regs[i].first();
1809 assert(r->is_valid(), "bad oop arg");
1810 if (r->is_stack()) {
1811 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1812 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
1813 __ ld_ptr(SP, ld_off, temp_reg);
1814 __ verify_oop(temp_reg);
1815 } else {
1816 __ verify_oop(r->as_Register());
1817 }
1818 }
1819 }
1820 }
1821 }
1823 static void gen_special_dispatch(MacroAssembler* masm,
1824 methodHandle method,
1825 const BasicType* sig_bt,
1826 const VMRegPair* regs) {
1827 verify_oop_args(masm, method, sig_bt, regs);
1828 vmIntrinsics::ID iid = method->intrinsic_id();
1830 // Now write the args into the outgoing interpreter space
1831 bool has_receiver = false;
1832 Register receiver_reg = noreg;
1833 int member_arg_pos = -1;
1834 Register member_reg = noreg;
1835 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1836 if (ref_kind != 0) {
1837 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1838 member_reg = G5_method; // known to be free at this point
1839 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1840 } else if (iid == vmIntrinsics::_invokeBasic) {
1841 has_receiver = true;
1842 } else {
1843 fatal(err_msg_res("unexpected intrinsic id %d", iid));
1844 }
1846 if (member_reg != noreg) {
1847 // Load the member_arg into register, if necessary.
1848 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1849 VMReg r = regs[member_arg_pos].first();
1850 if (r->is_stack()) {
1851 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1852 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1853 __ ld_ptr(SP, ld_off, member_reg);
1854 } else {
1855 // no data motion is needed
1856 member_reg = r->as_Register();
1857 }
1858 }
1860 if (has_receiver) {
1861 // Make sure the receiver is loaded into a register.
1862 assert(method->size_of_parameters() > 0, "oob");
1863 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1864 VMReg r = regs[0].first();
1865 assert(r->is_valid(), "bad receiver arg");
1866 if (r->is_stack()) {
1867 // Porting note: This assumes that compiled calling conventions always
1868 // pass the receiver oop in a register. If this is not true on some
1869 // platform, pick a temp and load the receiver from stack.
1870 fatal("receiver always in a register");
1871 receiver_reg = G3_scratch; // known to be free at this point
1872 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1873 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1874 __ ld_ptr(SP, ld_off, receiver_reg);
1875 } else {
1876 // no data motion is needed
1877 receiver_reg = r->as_Register();
1878 }
1879 }
1881 // Figure out which address we are really jumping to:
1882 MethodHandles::generate_method_handle_dispatch(masm, iid,
1883 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1884 }
1886 // ---------------------------------------------------------------------------
1887 // Generate a native wrapper for a given method. The method takes arguments
1888 // in the Java compiled code convention, marshals them to the native
1889 // convention (handlizes oops, etc), transitions to native, makes the call,
1890 // returns to java state (possibly blocking), unhandlizes any result and
1891 // returns.
1892 //
1893 // Critical native functions are a shorthand for the use of
1894 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1895 // functions. The wrapper is expected to unpack the arguments before
1896 // passing them to the callee and perform checks before and after the
1897 // native call to ensure that they GC_locker
1898 // lock_critical/unlock_critical semantics are followed. Some other
1899 // parts of JNI setup are skipped like the tear down of the JNI handle
1900 // block and the check for pending exceptions it's impossible for them
1901 // to be thrown.
1902 //
1903 // They are roughly structured like this:
1904 // if (GC_locker::needs_gc())
1905 // SharedRuntime::block_for_jni_critical();
1906 // tranistion to thread_in_native
1907 // unpack arrray arguments and call native entry point
1908 // check for safepoint in progress
1909 // check if any thread suspend flags are set
1910 // call into JVM and possible unlock the JNI critical
1911 // if a GC was suppressed while in the critical native.
1912 // transition back to thread_in_Java
1913 // return to caller
1914 //
1915 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1916 methodHandle method,
1917 int compile_id,
1918 BasicType* in_sig_bt,
1919 VMRegPair* in_regs,
1920 BasicType ret_type) {
1921 if (method->is_method_handle_intrinsic()) {
1922 vmIntrinsics::ID iid = method->intrinsic_id();
1923 intptr_t start = (intptr_t)__ pc();
1924 int vep_offset = ((intptr_t)__ pc()) - start;
1925 gen_special_dispatch(masm,
1926 method,
1927 in_sig_bt,
1928 in_regs);
1929 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1930 __ flush();
1931 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1932 return nmethod::new_native_nmethod(method,
1933 compile_id,
1934 masm->code(),
1935 vep_offset,
1936 frame_complete,
1937 stack_slots / VMRegImpl::slots_per_word,
1938 in_ByteSize(-1),
1939 in_ByteSize(-1),
1940 (OopMapSet*)NULL);
1941 }
1942 bool is_critical_native = true;
1943 address native_func = method->critical_native_function();
1944 if (native_func == NULL) {
1945 native_func = method->native_function();
1946 is_critical_native = false;
1947 }
1948 assert(native_func != NULL, "must have function");
1950 // Native nmethod wrappers never take possesion of the oop arguments.
1951 // So the caller will gc the arguments. The only thing we need an
1952 // oopMap for is if the call is static
1953 //
1954 // An OopMap for lock (and class if static), and one for the VM call itself
1955 OopMapSet *oop_maps = new OopMapSet();
1956 intptr_t start = (intptr_t)__ pc();
1958 // First thing make an ic check to see if we should even be here
1959 {
1960 Label L;
1961 const Register temp_reg = G3_scratch;
1962 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1963 __ verify_oop(O0);
1964 __ load_klass(O0, temp_reg);
1965 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
1967 __ jump_to(ic_miss, temp_reg);
1968 __ delayed()->nop();
1969 __ align(CodeEntryAlignment);
1970 __ bind(L);
1971 }
1973 int vep_offset = ((intptr_t)__ pc()) - start;
1975 #ifdef COMPILER1
1976 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1977 // Object.hashCode can pull the hashCode from the header word
1978 // instead of doing a full VM transition once it's been computed.
1979 // Since hashCode is usually polymorphic at call sites we can't do
1980 // this optimization at the call site without a lot of work.
1981 Label slowCase;
1982 Register receiver = O0;
1983 Register result = O0;
1984 Register header = G3_scratch;
1985 Register hash = G3_scratch; // overwrite header value with hash value
1986 Register mask = G1; // to get hash field from header
1988 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
1989 // We depend on hash_mask being at most 32 bits and avoid the use of
1990 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1991 // vm: see markOop.hpp.
1992 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
1993 __ sethi(markOopDesc::hash_mask, mask);
1994 __ btst(markOopDesc::unlocked_value, header);
1995 __ br(Assembler::zero, false, Assembler::pn, slowCase);
1996 if (UseBiasedLocking) {
1997 // Check if biased and fall through to runtime if so
1998 __ delayed()->nop();
1999 __ btst(markOopDesc::biased_lock_bit_in_place, header);
2000 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
2001 }
2002 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
2004 // Check for a valid (non-zero) hash code and get its value.
2005 #ifdef _LP64
2006 __ srlx(header, markOopDesc::hash_shift, hash);
2007 #else
2008 __ srl(header, markOopDesc::hash_shift, hash);
2009 #endif
2010 __ andcc(hash, mask, hash);
2011 __ br(Assembler::equal, false, Assembler::pn, slowCase);
2012 __ delayed()->nop();
2014 // leaf return.
2015 __ retl();
2016 __ delayed()->mov(hash, result);
2017 __ bind(slowCase);
2018 }
2019 #endif // COMPILER1
2022 // We have received a description of where all the java arg are located
2023 // on entry to the wrapper. We need to convert these args to where
2024 // the jni function will expect them. To figure out where they go
2025 // we convert the java signature to a C signature by inserting
2026 // the hidden arguments as arg[0] and possibly arg[1] (static method)
2028 const int total_in_args = method->size_of_parameters();
2029 int total_c_args = total_in_args;
2030 int total_save_slots = 6 * VMRegImpl::slots_per_word;
2031 if (!is_critical_native) {
2032 total_c_args += 1;
2033 if (method->is_static()) {
2034 total_c_args++;
2035 }
2036 } else {
2037 for (int i = 0; i < total_in_args; i++) {
2038 if (in_sig_bt[i] == T_ARRAY) {
2039 // These have to be saved and restored across the safepoint
2040 total_c_args++;
2041 }
2042 }
2043 }
2045 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2046 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2047 BasicType* in_elem_bt = NULL;
2049 int argc = 0;
2050 if (!is_critical_native) {
2051 out_sig_bt[argc++] = T_ADDRESS;
2052 if (method->is_static()) {
2053 out_sig_bt[argc++] = T_OBJECT;
2054 }
2056 for (int i = 0; i < total_in_args ; i++ ) {
2057 out_sig_bt[argc++] = in_sig_bt[i];
2058 }
2059 } else {
2060 Thread* THREAD = Thread::current();
2061 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2062 SignatureStream ss(method->signature());
2063 for (int i = 0; i < total_in_args ; i++ ) {
2064 if (in_sig_bt[i] == T_ARRAY) {
2065 // Arrays are passed as int, elem* pair
2066 out_sig_bt[argc++] = T_INT;
2067 out_sig_bt[argc++] = T_ADDRESS;
2068 Symbol* atype = ss.as_symbol(CHECK_NULL);
2069 const char* at = atype->as_C_string();
2070 if (strlen(at) == 2) {
2071 assert(at[0] == '[', "must be");
2072 switch (at[1]) {
2073 case 'B': in_elem_bt[i] = T_BYTE; break;
2074 case 'C': in_elem_bt[i] = T_CHAR; break;
2075 case 'D': in_elem_bt[i] = T_DOUBLE; break;
2076 case 'F': in_elem_bt[i] = T_FLOAT; break;
2077 case 'I': in_elem_bt[i] = T_INT; break;
2078 case 'J': in_elem_bt[i] = T_LONG; break;
2079 case 'S': in_elem_bt[i] = T_SHORT; break;
2080 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
2081 default: ShouldNotReachHere();
2082 }
2083 }
2084 } else {
2085 out_sig_bt[argc++] = in_sig_bt[i];
2086 in_elem_bt[i] = T_VOID;
2087 }
2088 if (in_sig_bt[i] != T_VOID) {
2089 assert(in_sig_bt[i] == ss.type(), "must match");
2090 ss.next();
2091 }
2092 }
2093 }
2095 // Now figure out where the args must be stored and how much stack space
2096 // they require (neglecting out_preserve_stack_slots but space for storing
2097 // the 1st six register arguments). It's weird see int_stk_helper.
2098 //
2099 int out_arg_slots;
2100 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2102 if (is_critical_native) {
2103 // Critical natives may have to call out so they need a save area
2104 // for register arguments.
2105 int double_slots = 0;
2106 int single_slots = 0;
2107 for ( int i = 0; i < total_in_args; i++) {
2108 if (in_regs[i].first()->is_Register()) {
2109 const Register reg = in_regs[i].first()->as_Register();
2110 switch (in_sig_bt[i]) {
2111 case T_ARRAY:
2112 case T_BOOLEAN:
2113 case T_BYTE:
2114 case T_SHORT:
2115 case T_CHAR:
2116 case T_INT: assert(reg->is_in(), "don't need to save these"); break;
2117 case T_LONG: if (reg->is_global()) double_slots++; break;
2118 default: ShouldNotReachHere();
2119 }
2120 } else if (in_regs[i].first()->is_FloatRegister()) {
2121 switch (in_sig_bt[i]) {
2122 case T_FLOAT: single_slots++; break;
2123 case T_DOUBLE: double_slots++; break;
2124 default: ShouldNotReachHere();
2125 }
2126 }
2127 }
2128 total_save_slots = double_slots * 2 + single_slots;
2129 }
2131 // Compute framesize for the wrapper. We need to handlize all oops in
2132 // registers. We must create space for them here that is disjoint from
2133 // the windowed save area because we have no control over when we might
2134 // flush the window again and overwrite values that gc has since modified.
2135 // (The live window race)
2136 //
2137 // We always just allocate 6 word for storing down these object. This allow
2138 // us to simply record the base and use the Ireg number to decide which
2139 // slot to use. (Note that the reg number is the inbound number not the
2140 // outbound number).
2141 // We must shuffle args to match the native convention, and include var-args space.
2143 // Calculate the total number of stack slots we will need.
2145 // First count the abi requirement plus all of the outgoing args
2146 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2148 // Now the space for the inbound oop handle area
2150 int oop_handle_offset = round_to(stack_slots, 2);
2151 stack_slots += total_save_slots;
2153 // Now any space we need for handlizing a klass if static method
2155 int klass_slot_offset = 0;
2156 int klass_offset = -1;
2157 int lock_slot_offset = 0;
2158 bool is_static = false;
2160 if (method->is_static()) {
2161 klass_slot_offset = stack_slots;
2162 stack_slots += VMRegImpl::slots_per_word;
2163 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2164 is_static = true;
2165 }
2167 // Plus a lock if needed
2169 if (method->is_synchronized()) {
2170 lock_slot_offset = stack_slots;
2171 stack_slots += VMRegImpl::slots_per_word;
2172 }
2174 // Now a place to save return value or as a temporary for any gpr -> fpr moves
2175 stack_slots += 2;
2177 // Ok The space we have allocated will look like:
2178 //
2179 //
2180 // FP-> | |
2181 // |---------------------|
2182 // | 2 slots for moves |
2183 // |---------------------|
2184 // | lock box (if sync) |
2185 // |---------------------| <- lock_slot_offset
2186 // | klass (if static) |
2187 // |---------------------| <- klass_slot_offset
2188 // | oopHandle area |
2189 // |---------------------| <- oop_handle_offset
2190 // | outbound memory |
2191 // | based arguments |
2192 // | |
2193 // |---------------------|
2194 // | vararg area |
2195 // |---------------------|
2196 // | |
2197 // SP-> | out_preserved_slots |
2198 //
2199 //
2202 // Now compute actual number of stack words we need rounding to make
2203 // stack properly aligned.
2204 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2206 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2208 // Generate stack overflow check before creating frame
2209 __ generate_stack_overflow_check(stack_size);
2211 // Generate a new frame for the wrapper.
2212 __ save(SP, -stack_size, SP);
2214 int frame_complete = ((intptr_t)__ pc()) - start;
2216 __ verify_thread();
2218 if (is_critical_native) {
2219 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
2220 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2221 }
2223 //
2224 // We immediately shuffle the arguments so that any vm call we have to
2225 // make from here on out (sync slow path, jvmti, etc.) we will have
2226 // captured the oops from our caller and have a valid oopMap for
2227 // them.
2229 // -----------------
2230 // The Grand Shuffle
2231 //
2232 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2233 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2234 // the class mirror instead of a receiver. This pretty much guarantees that
2235 // register layout will not match. We ignore these extra arguments during
2236 // the shuffle. The shuffle is described by the two calling convention
2237 // vectors we have in our possession. We simply walk the java vector to
2238 // get the source locations and the c vector to get the destinations.
2239 // Because we have a new window and the argument registers are completely
2240 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2241 // here.
2243 // This is a trick. We double the stack slots so we can claim
2244 // the oops in the caller's frame. Since we are sure to have
2245 // more args than the caller doubling is enough to make
2246 // sure we can capture all the incoming oop args from the
2247 // caller.
2248 //
2249 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2250 // Record sp-based slot for receiver on stack for non-static methods
2251 int receiver_offset = -1;
2253 // We move the arguments backward because the floating point registers
2254 // destination will always be to a register with a greater or equal register
2255 // number or the stack.
2257 #ifdef ASSERT
2258 bool reg_destroyed[RegisterImpl::number_of_registers];
2259 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2260 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2261 reg_destroyed[r] = false;
2262 }
2263 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2264 freg_destroyed[f] = false;
2265 }
2267 #endif /* ASSERT */
2269 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2271 #ifdef ASSERT
2272 if (in_regs[i].first()->is_Register()) {
2273 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2274 } else if (in_regs[i].first()->is_FloatRegister()) {
2275 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2276 }
2277 if (out_regs[c_arg].first()->is_Register()) {
2278 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2279 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2280 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2281 }
2282 #endif /* ASSERT */
2284 switch (in_sig_bt[i]) {
2285 case T_ARRAY:
2286 if (is_critical_native) {
2287 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2288 c_arg--;
2289 break;
2290 }
2291 case T_OBJECT:
2292 assert(!is_critical_native, "no oop arguments");
2293 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2294 ((i == 0) && (!is_static)),
2295 &receiver_offset);
2296 break;
2297 case T_VOID:
2298 break;
2300 case T_FLOAT:
2301 float_move(masm, in_regs[i], out_regs[c_arg]);
2302 break;
2304 case T_DOUBLE:
2305 assert( i + 1 < total_in_args &&
2306 in_sig_bt[i + 1] == T_VOID &&
2307 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2308 double_move(masm, in_regs[i], out_regs[c_arg]);
2309 break;
2311 case T_LONG :
2312 long_move(masm, in_regs[i], out_regs[c_arg]);
2313 break;
2315 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2317 default:
2318 move32_64(masm, in_regs[i], out_regs[c_arg]);
2319 }
2320 }
2322 // Pre-load a static method's oop into O1. Used both by locking code and
2323 // the normal JNI call code.
2324 if (method->is_static() && !is_critical_native) {
2325 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
2327 // Now handlize the static class mirror in O1. It's known not-null.
2328 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2329 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2330 __ add(SP, klass_offset + STACK_BIAS, O1);
2331 }
2334 const Register L6_handle = L6;
2336 if (method->is_synchronized()) {
2337 assert(!is_critical_native, "unhandled");
2338 __ mov(O1, L6_handle);
2339 }
2341 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2342 // except O6/O7. So if we must call out we must push a new frame. We immediately
2343 // push a new frame and flush the windows.
2344 #ifdef _LP64
2345 intptr_t thepc = (intptr_t) __ pc();
2346 {
2347 address here = __ pc();
2348 // Call the next instruction
2349 __ call(here + 8, relocInfo::none);
2350 __ delayed()->nop();
2351 }
2352 #else
2353 intptr_t thepc = __ load_pc_address(O7, 0);
2354 #endif /* _LP64 */
2356 // We use the same pc/oopMap repeatedly when we call out
2357 oop_maps->add_gc_map(thepc - start, map);
2359 // O7 now has the pc loaded that we will use when we finally call to native.
2361 // Save thread in L7; it crosses a bunch of VM calls below
2362 // Don't use save_thread because it smashes G2 and we merely
2363 // want to save a copy
2364 __ mov(G2_thread, L7_thread_cache);
2367 // If we create an inner frame once is plenty
2368 // when we create it we must also save G2_thread
2369 bool inner_frame_created = false;
2371 // dtrace method entry support
2372 {
2373 SkipIfEqual skip_if(
2374 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2375 // create inner frame
2376 __ save_frame(0);
2377 __ mov(G2_thread, L7_thread_cache);
2378 __ set_metadata_constant(method(), O1);
2379 __ call_VM_leaf(L7_thread_cache,
2380 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2381 G2_thread, O1);
2382 __ restore();
2383 }
2385 // RedefineClasses() tracing support for obsolete method entry
2386 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2387 // create inner frame
2388 __ save_frame(0);
2389 __ mov(G2_thread, L7_thread_cache);
2390 __ set_metadata_constant(method(), O1);
2391 __ call_VM_leaf(L7_thread_cache,
2392 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2393 G2_thread, O1);
2394 __ restore();
2395 }
2397 // We are in the jni frame unless saved_frame is true in which case
2398 // we are in one frame deeper (the "inner" frame). If we are in the
2399 // "inner" frames the args are in the Iregs and if the jni frame then
2400 // they are in the Oregs.
2401 // If we ever need to go to the VM (for locking, jvmti) then
2402 // we will always be in the "inner" frame.
2404 // Lock a synchronized method
2405 int lock_offset = -1; // Set if locked
2406 if (method->is_synchronized()) {
2407 Register Roop = O1;
2408 const Register L3_box = L3;
2410 create_inner_frame(masm, &inner_frame_created);
2412 __ ld_ptr(I1, 0, O1);
2413 Label done;
2415 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2416 __ add(FP, lock_offset+STACK_BIAS, L3_box);
2417 #ifdef ASSERT
2418 if (UseBiasedLocking) {
2419 // making the box point to itself will make it clear it went unused
2420 // but also be obviously invalid
2421 __ st_ptr(L3_box, L3_box, 0);
2422 }
2423 #endif // ASSERT
2424 //
2425 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2426 //
2427 __ compiler_lock_object(Roop, L1, L3_box, L2);
2428 __ br(Assembler::equal, false, Assembler::pt, done);
2429 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2432 // None of the above fast optimizations worked so we have to get into the
2433 // slow case of monitor enter. Inline a special case of call_VM that
2434 // disallows any pending_exception.
2435 __ mov(Roop, O0); // Need oop in O0
2436 __ mov(L3_box, O1);
2438 // Record last_Java_sp, in case the VM code releases the JVM lock.
2440 __ set_last_Java_frame(FP, I7);
2442 // do the call
2443 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2444 __ delayed()->mov(L7_thread_cache, O2);
2446 __ restore_thread(L7_thread_cache); // restore G2_thread
2447 __ reset_last_Java_frame();
2449 #ifdef ASSERT
2450 { Label L;
2451 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2452 __ br_null_short(O0, Assembler::pt, L);
2453 __ stop("no pending exception allowed on exit from IR::monitorenter");
2454 __ bind(L);
2455 }
2456 #endif
2457 __ bind(done);
2458 }
2461 // Finally just about ready to make the JNI call
2463 __ flush_windows();
2464 if (inner_frame_created) {
2465 __ restore();
2466 } else {
2467 // Store only what we need from this frame
2468 // QQQ I think that non-v9 (like we care) we don't need these saves
2469 // either as the flush traps and the current window goes too.
2470 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2471 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2472 }
2474 // get JNIEnv* which is first argument to native
2475 if (!is_critical_native) {
2476 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2477 }
2479 // Use that pc we placed in O7 a while back as the current frame anchor
2480 __ set_last_Java_frame(SP, O7);
2482 // We flushed the windows ages ago now mark them as flushed before transitioning.
2483 __ set(JavaFrameAnchor::flushed, G3_scratch);
2484 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2486 // Transition from _thread_in_Java to _thread_in_native.
2487 __ set(_thread_in_native, G3_scratch);
2489 #ifdef _LP64
2490 AddressLiteral dest(native_func);
2491 __ relocate(relocInfo::runtime_call_type);
2492 __ jumpl_to(dest, O7, O7);
2493 #else
2494 __ call(native_func, relocInfo::runtime_call_type);
2495 #endif
2496 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2498 __ restore_thread(L7_thread_cache); // restore G2_thread
2500 // Unpack native results. For int-types, we do any needed sign-extension
2501 // and move things into I0. The return value there will survive any VM
2502 // calls for blocking or unlocking. An FP or OOP result (handle) is done
2503 // specially in the slow-path code.
2504 switch (ret_type) {
2505 case T_VOID: break; // Nothing to do!
2506 case T_FLOAT: break; // Got it where we want it (unless slow-path)
2507 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
2508 // In 64 bits build result is in O0, in O0, O1 in 32bit build
2509 case T_LONG:
2510 #ifndef _LP64
2511 __ mov(O1, I1);
2512 #endif
2513 // Fall thru
2514 case T_OBJECT: // Really a handle
2515 case T_ARRAY:
2516 case T_INT:
2517 __ mov(O0, I0);
2518 break;
2519 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2520 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2521 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2522 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2523 break; // Cannot de-handlize until after reclaiming jvm_lock
2524 default:
2525 ShouldNotReachHere();
2526 }
2528 Label after_transition;
2529 // must we block?
2531 // Block, if necessary, before resuming in _thread_in_Java state.
2532 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2533 { Label no_block;
2534 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2536 // Switch thread to "native transition" state before reading the synchronization state.
2537 // This additional state is necessary because reading and testing the synchronization
2538 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2539 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2540 // VM thread changes sync state to synchronizing and suspends threads for GC.
2541 // Thread A is resumed to finish this native method, but doesn't block here since it
2542 // didn't see any synchronization is progress, and escapes.
2543 __ set(_thread_in_native_trans, G3_scratch);
2544 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2545 if(os::is_MP()) {
2546 if (UseMembar) {
2547 // Force this write out before the read below
2548 __ membar(Assembler::StoreLoad);
2549 } else {
2550 // Write serialization page so VM thread can do a pseudo remote membar.
2551 // We use the current thread pointer to calculate a thread specific
2552 // offset to write to within the page. This minimizes bus traffic
2553 // due to cache line collision.
2554 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2555 }
2556 }
2557 __ load_contents(sync_state, G3_scratch);
2558 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2560 Label L;
2561 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2562 __ br(Assembler::notEqual, false, Assembler::pn, L);
2563 __ delayed()->ld(suspend_state, G3_scratch);
2564 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2565 __ bind(L);
2567 // Block. Save any potential method result value before the operation and
2568 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2569 // lets us share the oopMap we used when we went native rather the create
2570 // a distinct one for this pc
2571 //
2572 save_native_result(masm, ret_type, stack_slots);
2573 if (!is_critical_native) {
2574 __ call_VM_leaf(L7_thread_cache,
2575 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2576 G2_thread);
2577 } else {
2578 __ call_VM_leaf(L7_thread_cache,
2579 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2580 G2_thread);
2581 }
2583 // Restore any method result value
2584 restore_native_result(masm, ret_type, stack_slots);
2586 if (is_critical_native) {
2587 // The call above performed the transition to thread_in_Java so
2588 // skip the transition logic below.
2589 __ ba(after_transition);
2590 __ delayed()->nop();
2591 }
2593 __ bind(no_block);
2594 }
2596 // thread state is thread_in_native_trans. Any safepoint blocking has already
2597 // happened so we can now change state to _thread_in_Java.
2598 __ set(_thread_in_Java, G3_scratch);
2599 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2600 __ bind(after_transition);
2602 Label no_reguard;
2603 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2604 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2606 save_native_result(masm, ret_type, stack_slots);
2607 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2608 __ delayed()->nop();
2610 __ restore_thread(L7_thread_cache); // restore G2_thread
2611 restore_native_result(masm, ret_type, stack_slots);
2613 __ bind(no_reguard);
2615 // Handle possible exception (will unlock if necessary)
2617 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2619 // Unlock
2620 if (method->is_synchronized()) {
2621 Label done;
2622 Register I2_ex_oop = I2;
2623 const Register L3_box = L3;
2624 // Get locked oop from the handle we passed to jni
2625 __ ld_ptr(L6_handle, 0, L4);
2626 __ add(SP, lock_offset+STACK_BIAS, L3_box);
2627 // Must save pending exception around the slow-path VM call. Since it's a
2628 // leaf call, the pending exception (if any) can be kept in a register.
2629 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2630 // Now unlock
2631 // (Roop, Rmark, Rbox, Rscratch)
2632 __ compiler_unlock_object(L4, L1, L3_box, L2);
2633 __ br(Assembler::equal, false, Assembler::pt, done);
2634 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2636 // save and restore any potential method result value around the unlocking
2637 // operation. Will save in I0 (or stack for FP returns).
2638 save_native_result(masm, ret_type, stack_slots);
2640 // Must clear pending-exception before re-entering the VM. Since this is
2641 // a leaf call, pending-exception-oop can be safely kept in a register.
2642 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2644 // slow case of monitor enter. Inline a special case of call_VM that
2645 // disallows any pending_exception.
2646 __ mov(L3_box, O1);
2648 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2649 __ delayed()->mov(L4, O0); // Need oop in O0
2651 __ restore_thread(L7_thread_cache); // restore G2_thread
2653 #ifdef ASSERT
2654 { Label L;
2655 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2656 __ br_null_short(O0, Assembler::pt, L);
2657 __ stop("no pending exception allowed on exit from IR::monitorexit");
2658 __ bind(L);
2659 }
2660 #endif
2661 restore_native_result(masm, ret_type, stack_slots);
2662 // check_forward_pending_exception jump to forward_exception if any pending
2663 // exception is set. The forward_exception routine expects to see the
2664 // exception in pending_exception and not in a register. Kind of clumsy,
2665 // since all folks who branch to forward_exception must have tested
2666 // pending_exception first and hence have it in a register already.
2667 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2668 __ bind(done);
2669 }
2671 // Tell dtrace about this method exit
2672 {
2673 SkipIfEqual skip_if(
2674 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2675 save_native_result(masm, ret_type, stack_slots);
2676 __ set_metadata_constant(method(), O1);
2677 __ call_VM_leaf(L7_thread_cache,
2678 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2679 G2_thread, O1);
2680 restore_native_result(masm, ret_type, stack_slots);
2681 }
2683 // Clear "last Java frame" SP and PC.
2684 __ verify_thread(); // G2_thread must be correct
2685 __ reset_last_Java_frame();
2687 // Unpack oop result
2688 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2689 Label L;
2690 __ addcc(G0, I0, G0);
2691 __ brx(Assembler::notZero, true, Assembler::pt, L);
2692 __ delayed()->ld_ptr(I0, 0, I0);
2693 __ mov(G0, I0);
2694 __ bind(L);
2695 __ verify_oop(I0);
2696 }
2698 if (!is_critical_native) {
2699 // reset handle block
2700 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2701 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2703 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2704 check_forward_pending_exception(masm, G3_scratch);
2705 }
2708 // Return
2710 #ifndef _LP64
2711 if (ret_type == T_LONG) {
2713 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2714 __ sllx(I0, 32, G1); // Shift bits into high G1
2715 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
2716 __ or3 (I1, G1, G1); // OR 64 bits into G1
2717 }
2718 #endif
2720 __ ret();
2721 __ delayed()->restore();
2723 __ flush();
2725 nmethod *nm = nmethod::new_native_nmethod(method,
2726 compile_id,
2727 masm->code(),
2728 vep_offset,
2729 frame_complete,
2730 stack_slots / VMRegImpl::slots_per_word,
2731 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2732 in_ByteSize(lock_offset),
2733 oop_maps);
2735 if (is_critical_native) {
2736 nm->set_lazy_critical_native(true);
2737 }
2738 return nm;
2740 }
2742 #ifdef HAVE_DTRACE_H
2743 // ---------------------------------------------------------------------------
2744 // Generate a dtrace nmethod for a given signature. The method takes arguments
2745 // in the Java compiled code convention, marshals them to the native
2746 // abi and then leaves nops at the position you would expect to call a native
2747 // function. When the probe is enabled the nops are replaced with a trap
2748 // instruction that dtrace inserts and the trace will cause a notification
2749 // to dtrace.
2750 //
2751 // The probes are only able to take primitive types and java/lang/String as
2752 // arguments. No other java types are allowed. Strings are converted to utf8
2753 // strings so that from dtrace point of view java strings are converted to C
2754 // strings. There is an arbitrary fixed limit on the total space that a method
2755 // can use for converting the strings. (256 chars per string in the signature).
2756 // So any java string larger then this is truncated.
2758 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2759 static bool offsets_initialized = false;
2761 nmethod *SharedRuntime::generate_dtrace_nmethod(
2762 MacroAssembler *masm, methodHandle method) {
2765 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2766 // be single threaded in this method.
2767 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2769 // Fill in the signature array, for the calling-convention call.
2770 int total_args_passed = method->size_of_parameters();
2772 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2773 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2775 // The signature we are going to use for the trap that dtrace will see
2776 // java/lang/String is converted. We drop "this" and any other object
2777 // is converted to NULL. (A one-slot java/lang/Long object reference
2778 // is converted to a two-slot long, which is why we double the allocation).
2779 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2780 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2782 int i=0;
2783 int total_strings = 0;
2784 int first_arg_to_pass = 0;
2785 int total_c_args = 0;
2787 // Skip the receiver as dtrace doesn't want to see it
2788 if( !method->is_static() ) {
2789 in_sig_bt[i++] = T_OBJECT;
2790 first_arg_to_pass = 1;
2791 }
2793 SignatureStream ss(method->signature());
2794 for ( ; !ss.at_return_type(); ss.next()) {
2795 BasicType bt = ss.type();
2796 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2797 out_sig_bt[total_c_args++] = bt;
2798 if( bt == T_OBJECT) {
2799 Symbol* s = ss.as_symbol_or_null();
2800 if (s == vmSymbols::java_lang_String()) {
2801 total_strings++;
2802 out_sig_bt[total_c_args-1] = T_ADDRESS;
2803 } else if (s == vmSymbols::java_lang_Boolean() ||
2804 s == vmSymbols::java_lang_Byte()) {
2805 out_sig_bt[total_c_args-1] = T_BYTE;
2806 } else if (s == vmSymbols::java_lang_Character() ||
2807 s == vmSymbols::java_lang_Short()) {
2808 out_sig_bt[total_c_args-1] = T_SHORT;
2809 } else if (s == vmSymbols::java_lang_Integer() ||
2810 s == vmSymbols::java_lang_Float()) {
2811 out_sig_bt[total_c_args-1] = T_INT;
2812 } else if (s == vmSymbols::java_lang_Long() ||
2813 s == vmSymbols::java_lang_Double()) {
2814 out_sig_bt[total_c_args-1] = T_LONG;
2815 out_sig_bt[total_c_args++] = T_VOID;
2816 }
2817 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2818 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2819 // We convert double to long
2820 out_sig_bt[total_c_args-1] = T_LONG;
2821 out_sig_bt[total_c_args++] = T_VOID;
2822 } else if ( bt == T_FLOAT) {
2823 // We convert float to int
2824 out_sig_bt[total_c_args-1] = T_INT;
2825 }
2826 }
2828 assert(i==total_args_passed, "validly parsed signature");
2830 // Now get the compiled-Java layout as input arguments
2831 int comp_args_on_stack;
2832 comp_args_on_stack = SharedRuntime::java_calling_convention(
2833 in_sig_bt, in_regs, total_args_passed, false);
2835 // We have received a description of where all the java arg are located
2836 // on entry to the wrapper. We need to convert these args to where
2837 // the a native (non-jni) function would expect them. To figure out
2838 // where they go we convert the java signature to a C signature and remove
2839 // T_VOID for any long/double we might have received.
2842 // Now figure out where the args must be stored and how much stack space
2843 // they require (neglecting out_preserve_stack_slots but space for storing
2844 // the 1st six register arguments). It's weird see int_stk_helper.
2845 //
2846 int out_arg_slots;
2847 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2849 // Calculate the total number of stack slots we will need.
2851 // First count the abi requirement plus all of the outgoing args
2852 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2854 // Plus a temp for possible converion of float/double/long register args
2856 int conversion_temp = stack_slots;
2857 stack_slots += 2;
2860 // Now space for the string(s) we must convert
2862 int string_locs = stack_slots;
2863 stack_slots += total_strings *
2864 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2866 // Ok The space we have allocated will look like:
2867 //
2868 //
2869 // FP-> | |
2870 // |---------------------|
2871 // | string[n] |
2872 // |---------------------| <- string_locs[n]
2873 // | string[n-1] |
2874 // |---------------------| <- string_locs[n-1]
2875 // | ... |
2876 // | ... |
2877 // |---------------------| <- string_locs[1]
2878 // | string[0] |
2879 // |---------------------| <- string_locs[0]
2880 // | temp |
2881 // |---------------------| <- conversion_temp
2882 // | outbound memory |
2883 // | based arguments |
2884 // | |
2885 // |---------------------|
2886 // | |
2887 // SP-> | out_preserved_slots |
2888 //
2889 //
2891 // Now compute actual number of stack words we need rounding to make
2892 // stack properly aligned.
2893 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2895 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2897 intptr_t start = (intptr_t)__ pc();
2899 // First thing make an ic check to see if we should even be here
2901 {
2902 Label L;
2903 const Register temp_reg = G3_scratch;
2904 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2905 __ verify_oop(O0);
2906 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2907 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
2909 __ jump_to(ic_miss, temp_reg);
2910 __ delayed()->nop();
2911 __ align(CodeEntryAlignment);
2912 __ bind(L);
2913 }
2915 int vep_offset = ((intptr_t)__ pc()) - start;
2918 // The instruction at the verified entry point must be 5 bytes or longer
2919 // because it can be patched on the fly by make_non_entrant. The stack bang
2920 // instruction fits that requirement.
2922 // Generate stack overflow check before creating frame
2923 __ generate_stack_overflow_check(stack_size);
2925 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2926 "valid size for make_non_entrant");
2928 // Generate a new frame for the wrapper.
2929 __ save(SP, -stack_size, SP);
2931 // Frame is now completed as far a size and linkage.
2933 int frame_complete = ((intptr_t)__ pc()) - start;
2935 #ifdef ASSERT
2936 bool reg_destroyed[RegisterImpl::number_of_registers];
2937 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2938 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2939 reg_destroyed[r] = false;
2940 }
2941 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2942 freg_destroyed[f] = false;
2943 }
2945 #endif /* ASSERT */
2947 VMRegPair zero;
2948 const Register g0 = G0; // without this we get a compiler warning (why??)
2949 zero.set2(g0->as_VMReg());
2951 int c_arg, j_arg;
2953 Register conversion_off = noreg;
2955 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2956 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2958 VMRegPair src = in_regs[j_arg];
2959 VMRegPair dst = out_regs[c_arg];
2961 #ifdef ASSERT
2962 if (src.first()->is_Register()) {
2963 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2964 } else if (src.first()->is_FloatRegister()) {
2965 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2966 FloatRegisterImpl::S)], "ack!");
2967 }
2968 if (dst.first()->is_Register()) {
2969 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2970 } else if (dst.first()->is_FloatRegister()) {
2971 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2972 FloatRegisterImpl::S)] = true;
2973 }
2974 #endif /* ASSERT */
2976 switch (in_sig_bt[j_arg]) {
2977 case T_ARRAY:
2978 case T_OBJECT:
2979 {
2980 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
2981 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2982 // need to unbox a one-slot value
2983 Register in_reg = L0;
2984 Register tmp = L2;
2985 if ( src.first()->is_reg() ) {
2986 in_reg = src.first()->as_Register();
2987 } else {
2988 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
2989 "must be");
2990 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
2991 }
2992 // If the final destination is an acceptable register
2993 if ( dst.first()->is_reg() ) {
2994 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
2995 tmp = dst.first()->as_Register();
2996 }
2997 }
2999 Label skipUnbox;
3000 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
3001 __ mov(G0, tmp->successor());
3002 }
3003 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
3004 __ delayed()->mov(G0, tmp);
3006 BasicType bt = out_sig_bt[c_arg];
3007 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3008 switch (bt) {
3009 case T_BYTE:
3010 __ ldub(in_reg, box_offset, tmp); break;
3011 case T_SHORT:
3012 __ lduh(in_reg, box_offset, tmp); break;
3013 case T_INT:
3014 __ ld(in_reg, box_offset, tmp); break;
3015 case T_LONG:
3016 __ ld_long(in_reg, box_offset, tmp); break;
3017 default: ShouldNotReachHere();
3018 }
3020 __ bind(skipUnbox);
3021 // If tmp wasn't final destination copy to final destination
3022 if (tmp == L2) {
3023 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
3024 if (out_sig_bt[c_arg] == T_LONG) {
3025 long_move(masm, tmp_as_VM, dst);
3026 } else {
3027 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
3028 }
3029 }
3030 if (out_sig_bt[c_arg] == T_LONG) {
3031 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3032 ++c_arg; // move over the T_VOID to keep the loop indices in sync
3033 }
3034 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
3035 Register s =
3036 src.first()->is_reg() ? src.first()->as_Register() : L2;
3037 Register d =
3038 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
3040 // We store the oop now so that the conversion pass can reach
3041 // while in the inner frame. This will be the only store if
3042 // the oop is NULL.
3043 if (s != L2) {
3044 // src is register
3045 if (d != L2) {
3046 // dst is register
3047 __ mov(s, d);
3048 } else {
3049 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3050 STACK_BIAS), "must be");
3051 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
3052 }
3053 } else {
3054 // src not a register
3055 assert(Assembler::is_simm13(reg2offset(src.first()) +
3056 STACK_BIAS), "must be");
3057 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
3058 if (d == L2) {
3059 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3060 STACK_BIAS), "must be");
3061 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
3062 }
3063 }
3064 } else if (out_sig_bt[c_arg] != T_VOID) {
3065 // Convert the arg to NULL
3066 if (dst.first()->is_reg()) {
3067 __ mov(G0, dst.first()->as_Register());
3068 } else {
3069 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3070 STACK_BIAS), "must be");
3071 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
3072 }
3073 }
3074 }
3075 break;
3076 case T_VOID:
3077 break;
3079 case T_FLOAT:
3080 if (src.first()->is_stack()) {
3081 // Stack to stack/reg is simple
3082 move32_64(masm, src, dst);
3083 } else {
3084 if (dst.first()->is_reg()) {
3085 // freg -> reg
3086 int off =
3087 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3088 Register d = dst.first()->as_Register();
3089 if (Assembler::is_simm13(off)) {
3090 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3091 SP, off);
3092 __ ld(SP, off, d);
3093 } else {
3094 if (conversion_off == noreg) {
3095 __ set(off, L6);
3096 conversion_off = L6;
3097 }
3098 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3099 SP, conversion_off);
3100 __ ld(SP, conversion_off , d);
3101 }
3102 } else {
3103 // freg -> mem
3104 int off = STACK_BIAS + reg2offset(dst.first());
3105 if (Assembler::is_simm13(off)) {
3106 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3107 SP, off);
3108 } else {
3109 if (conversion_off == noreg) {
3110 __ set(off, L6);
3111 conversion_off = L6;
3112 }
3113 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3114 SP, conversion_off);
3115 }
3116 }
3117 }
3118 break;
3120 case T_DOUBLE:
3121 assert( j_arg + 1 < total_args_passed &&
3122 in_sig_bt[j_arg + 1] == T_VOID &&
3123 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
3124 if (src.first()->is_stack()) {
3125 // Stack to stack/reg is simple
3126 long_move(masm, src, dst);
3127 } else {
3128 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
3130 // Destination could be an odd reg on 32bit in which case
3131 // we can't load direct to the destination.
3133 if (!d->is_even() && wordSize == 4) {
3134 d = L2;
3135 }
3136 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3137 if (Assembler::is_simm13(off)) {
3138 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
3139 SP, off);
3140 __ ld_long(SP, off, d);
3141 } else {
3142 if (conversion_off == noreg) {
3143 __ set(off, L6);
3144 conversion_off = L6;
3145 }
3146 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
3147 SP, conversion_off);
3148 __ ld_long(SP, conversion_off, d);
3149 }
3150 if (d == L2) {
3151 long_move(masm, reg64_to_VMRegPair(L2), dst);
3152 }
3153 }
3154 break;
3156 case T_LONG :
3157 // 32bit can't do a split move of something like g1 -> O0, O1
3158 // so use a memory temp
3159 if (src.is_single_phys_reg() && wordSize == 4) {
3160 Register tmp = L2;
3161 if (dst.first()->is_reg() &&
3162 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
3163 tmp = dst.first()->as_Register();
3164 }
3166 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3167 if (Assembler::is_simm13(off)) {
3168 __ stx(src.first()->as_Register(), SP, off);
3169 __ ld_long(SP, off, tmp);
3170 } else {
3171 if (conversion_off == noreg) {
3172 __ set(off, L6);
3173 conversion_off = L6;
3174 }
3175 __ stx(src.first()->as_Register(), SP, conversion_off);
3176 __ ld_long(SP, conversion_off, tmp);
3177 }
3179 if (tmp == L2) {
3180 long_move(masm, reg64_to_VMRegPair(L2), dst);
3181 }
3182 } else {
3183 long_move(masm, src, dst);
3184 }
3185 break;
3187 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
3189 default:
3190 move32_64(masm, src, dst);
3191 }
3192 }
3195 // If we have any strings we must store any register based arg to the stack
3196 // This includes any still live xmm registers too.
3198 if (total_strings > 0 ) {
3200 // protect all the arg registers
3201 __ save_frame(0);
3202 __ mov(G2_thread, L7_thread_cache);
3203 const Register L2_string_off = L2;
3205 // Get first string offset
3206 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
3208 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
3209 if (out_sig_bt[c_arg] == T_ADDRESS) {
3211 VMRegPair dst = out_regs[c_arg];
3212 const Register d = dst.first()->is_reg() ?
3213 dst.first()->as_Register()->after_save() : noreg;
3215 // It's a string the oop and it was already copied to the out arg
3216 // position
3217 if (d != noreg) {
3218 __ mov(d, O0);
3219 } else {
3220 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3221 "must be");
3222 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
3223 }
3224 Label skip;
3226 __ br_null(O0, false, Assembler::pn, skip);
3227 __ delayed()->add(FP, L2_string_off, O1);
3229 if (d != noreg) {
3230 __ mov(O1, d);
3231 } else {
3232 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3233 "must be");
3234 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
3235 }
3237 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
3238 relocInfo::runtime_call_type);
3239 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
3241 __ bind(skip);
3243 }
3245 }
3246 __ mov(L7_thread_cache, G2_thread);
3247 __ restore();
3249 }
3252 // Ok now we are done. Need to place the nop that dtrace wants in order to
3253 // patch in the trap
3255 int patch_offset = ((intptr_t)__ pc()) - start;
3257 __ nop();
3260 // Return
3262 __ ret();
3263 __ delayed()->restore();
3265 __ flush();
3267 nmethod *nm = nmethod::new_dtrace_nmethod(
3268 method, masm->code(), vep_offset, patch_offset, frame_complete,
3269 stack_slots / VMRegImpl::slots_per_word);
3270 return nm;
3272 }
3274 #endif // HAVE_DTRACE_H
3276 // this function returns the adjust size (in number of words) to a c2i adapter
3277 // activation for use during deoptimization
3278 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3279 assert(callee_locals >= callee_parameters,
3280 "test and remove; got more parms than locals");
3281 if (callee_locals < callee_parameters)
3282 return 0; // No adjustment for negative locals
3283 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3284 return round_to(diff, WordsPerLong);
3285 }
3287 // "Top of Stack" slots that may be unused by the calling convention but must
3288 // otherwise be preserved.
3289 // On Intel these are not necessary and the value can be zero.
3290 // On Sparc this describes the words reserved for storing a register window
3291 // when an interrupt occurs.
3292 uint SharedRuntime::out_preserve_stack_slots() {
3293 return frame::register_save_words * VMRegImpl::slots_per_word;
3294 }
3296 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3297 //
3298 // Common out the new frame generation for deopt and uncommon trap
3299 //
3300 Register G3pcs = G3_scratch; // Array of new pcs (input)
3301 Register Oreturn0 = O0;
3302 Register Oreturn1 = O1;
3303 Register O2UnrollBlock = O2;
3304 Register O3array = O3; // Array of frame sizes (input)
3305 Register O4array_size = O4; // number of frames (input)
3306 Register O7frame_size = O7; // number of frames (input)
3308 __ ld_ptr(O3array, 0, O7frame_size);
3309 __ sub(G0, O7frame_size, O7frame_size);
3310 __ save(SP, O7frame_size, SP);
3311 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
3313 #ifdef ASSERT
3314 // make sure that the frames are aligned properly
3315 #ifndef _LP64
3316 __ btst(wordSize*2-1, SP);
3317 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
3318 #endif
3319 #endif
3321 // Deopt needs to pass some extra live values from frame to frame
3323 if (deopt) {
3324 __ mov(Oreturn0->after_save(), Oreturn0);
3325 __ mov(Oreturn1->after_save(), Oreturn1);
3326 }
3328 __ mov(O4array_size->after_save(), O4array_size);
3329 __ sub(O4array_size, 1, O4array_size);
3330 __ mov(O3array->after_save(), O3array);
3331 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3332 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
3334 #ifdef ASSERT
3335 // trash registers to show a clear pattern in backtraces
3336 __ set(0xDEAD0000, I0);
3337 __ add(I0, 2, I1);
3338 __ add(I0, 4, I2);
3339 __ add(I0, 6, I3);
3340 __ add(I0, 8, I4);
3341 // Don't touch I5 could have valuable savedSP
3342 __ set(0xDEADBEEF, L0);
3343 __ mov(L0, L1);
3344 __ mov(L0, L2);
3345 __ mov(L0, L3);
3346 __ mov(L0, L4);
3347 __ mov(L0, L5);
3349 // trash the return value as there is nothing to return yet
3350 __ set(0xDEAD0001, O7);
3351 #endif
3353 __ mov(SP, O5_savedSP);
3354 }
3357 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3358 //
3359 // loop through the UnrollBlock info and create new frames
3360 //
3361 Register G3pcs = G3_scratch;
3362 Register Oreturn0 = O0;
3363 Register Oreturn1 = O1;
3364 Register O2UnrollBlock = O2;
3365 Register O3array = O3;
3366 Register O4array_size = O4;
3367 Label loop;
3369 // Before we make new frames, check to see if stack is available.
3370 // Do this after the caller's return address is on top of stack
3371 if (UseStackBanging) {
3372 // Get total frame size for interpreted frames
3373 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3374 __ bang_stack_size(O4, O3, G3_scratch);
3375 }
3377 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3378 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3379 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3381 // Adjust old interpreter frame to make space for new frame's extra java locals
3382 //
3383 // We capture the original sp for the transition frame only because it is needed in
3384 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3385 // every interpreter frame captures a savedSP it is only needed at the transition
3386 // (fortunately). If we had to have it correct everywhere then we would need to
3387 // be told the sp_adjustment for each frame we create. If the frame size array
3388 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3389 // for each frame we create and keep up the illusion every where.
3390 //
3392 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3393 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3394 __ sub(SP, O7, SP);
3396 #ifdef ASSERT
3397 // make sure that there is at least one entry in the array
3398 __ tst(O4array_size);
3399 __ breakpoint_trap(Assembler::zero, Assembler::icc);
3400 #endif
3402 // Now push the new interpreter frames
3403 __ bind(loop);
3405 // allocate a new frame, filling the registers
3407 gen_new_frame(masm, deopt); // allocate an interpreter frame
3409 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
3410 __ delayed()->add(O3array, wordSize, O3array);
3411 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
3413 }
3415 //------------------------------generate_deopt_blob----------------------------
3416 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3417 // instead.
3418 void SharedRuntime::generate_deopt_blob() {
3419 // allocate space for the code
3420 ResourceMark rm;
3421 // setup code generation tools
3422 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3423 if (UseStackBanging) {
3424 pad += StackShadowPages*16 + 32;
3425 }
3426 #ifdef _LP64
3427 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3428 #else
3429 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3430 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3431 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3432 #endif /* _LP64 */
3433 MacroAssembler* masm = new MacroAssembler(&buffer);
3434 FloatRegister Freturn0 = F0;
3435 Register Greturn1 = G1;
3436 Register Oreturn0 = O0;
3437 Register Oreturn1 = O1;
3438 Register O2UnrollBlock = O2;
3439 Register L0deopt_mode = L0;
3440 Register G4deopt_mode = G4_scratch;
3441 int frame_size_words;
3442 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3443 #if !defined(_LP64) && defined(COMPILER2)
3444 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3445 #endif
3446 Label cont;
3448 OopMapSet *oop_maps = new OopMapSet();
3450 //
3451 // This is the entry point for code which is returning to a de-optimized
3452 // frame.
3453 // The steps taken by this frame are as follows:
3454 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3455 // and all potentially live registers (at a pollpoint many registers can be live).
3456 //
3457 // - call the C routine: Deoptimization::fetch_unroll_info (this function
3458 // returns information about the number and size of interpreter frames
3459 // which are equivalent to the frame which is being deoptimized)
3460 // - deallocate the unpack frame, restoring only results values. Other
3461 // volatile registers will now be captured in the vframeArray as needed.
3462 // - deallocate the deoptimization frame
3463 // - in a loop using the information returned in the previous step
3464 // push new interpreter frames (take care to propagate the return
3465 // values through each new frame pushed)
3466 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3467 // - call the C routine: Deoptimization::unpack_frames (this function
3468 // lays out values on the interpreter frame which was just created)
3469 // - deallocate the dummy unpack_frame
3470 // - ensure that all the return values are correctly set and then do
3471 // a return to the interpreter entry point
3472 //
3473 // Refer to the following methods for more information:
3474 // - Deoptimization::fetch_unroll_info
3475 // - Deoptimization::unpack_frames
3477 OopMap* map = NULL;
3479 int start = __ offset();
3481 // restore G2, the trampoline destroyed it
3482 __ get_thread();
3484 // On entry we have been called by the deoptimized nmethod with a call that
3485 // replaced the original call (or safepoint polling location) so the deoptimizing
3486 // pc is now in O7. Return values are still in the expected places
3488 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3489 __ ba(cont);
3490 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3492 int exception_offset = __ offset() - start;
3494 // restore G2, the trampoline destroyed it
3495 __ get_thread();
3497 // On entry we have been jumped to by the exception handler (or exception_blob
3498 // for server). O0 contains the exception oop and O7 contains the original
3499 // exception pc. So if we push a frame here it will look to the
3500 // stack walking code (fetch_unroll_info) just like a normal call so
3501 // state will be extracted normally.
3503 // save exception oop in JavaThread and fall through into the
3504 // exception_in_tls case since they are handled in same way except
3505 // for where the pending exception is kept.
3506 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3508 //
3509 // Vanilla deoptimization with an exception pending in exception_oop
3510 //
3511 int exception_in_tls_offset = __ offset() - start;
3513 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3514 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3516 // Restore G2_thread
3517 __ get_thread();
3519 #ifdef ASSERT
3520 {
3521 // verify that there is really an exception oop in exception_oop
3522 Label has_exception;
3523 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3524 __ br_notnull_short(Oexception, Assembler::pt, has_exception);
3525 __ stop("no exception in thread");
3526 __ bind(has_exception);
3528 // verify that there is no pending exception
3529 Label no_pending_exception;
3530 Address exception_addr(G2_thread, Thread::pending_exception_offset());
3531 __ ld_ptr(exception_addr, Oexception);
3532 __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
3533 __ stop("must not have pending exception here");
3534 __ bind(no_pending_exception);
3535 }
3536 #endif
3538 __ ba(cont);
3539 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3541 //
3542 // Reexecute entry, similar to c2 uncommon trap
3543 //
3544 int reexecute_offset = __ offset() - start;
3546 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3547 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3549 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3551 __ bind(cont);
3553 __ set_last_Java_frame(SP, noreg);
3555 // do the call by hand so we can get the oopmap
3557 __ mov(G2_thread, L7_thread_cache);
3558 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3559 __ delayed()->mov(G2_thread, O0);
3561 // Set an oopmap for the call site this describes all our saved volatile registers
3563 oop_maps->add_gc_map( __ offset()-start, map);
3565 __ mov(L7_thread_cache, G2_thread);
3567 __ reset_last_Java_frame();
3569 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3570 // so this move will survive
3572 __ mov(L0deopt_mode, G4deopt_mode);
3574 __ mov(O0, O2UnrollBlock->after_save());
3576 RegisterSaver::restore_result_registers(masm);
3578 Label noException;
3579 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
3581 // Move the pending exception from exception_oop to Oexception so
3582 // the pending exception will be picked up the interpreter.
3583 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3584 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3585 __ bind(noException);
3587 // deallocate the deoptimization frame taking care to preserve the return values
3588 __ mov(Oreturn0, Oreturn0->after_save());
3589 __ mov(Oreturn1, Oreturn1->after_save());
3590 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3591 __ restore();
3593 // Allocate new interpreter frame(s) and possible c2i adapter frame
3595 make_new_frames(masm, true);
3597 // push a dummy "unpack_frame" taking care of float return values and
3598 // call Deoptimization::unpack_frames to have the unpacker layout
3599 // information in the interpreter frames just created and then return
3600 // to the interpreter entry point
3601 __ save(SP, -frame_size_words*wordSize, SP);
3602 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3603 #if !defined(_LP64)
3604 #if defined(COMPILER2)
3605 // 32-bit 1-register longs return longs in G1
3606 __ stx(Greturn1, saved_Greturn1_addr);
3607 #endif
3608 __ set_last_Java_frame(SP, noreg);
3609 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3610 #else
3611 // LP64 uses g4 in set_last_Java_frame
3612 __ mov(G4deopt_mode, O1);
3613 __ set_last_Java_frame(SP, G0);
3614 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3615 #endif
3616 __ reset_last_Java_frame();
3617 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3619 #if !defined(_LP64) && defined(COMPILER2)
3620 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3621 // I0/I1 if the return value is long.
3622 Label not_long;
3623 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
3624 __ ldd(saved_Greturn1_addr,I0);
3625 __ bind(not_long);
3626 #endif
3627 __ ret();
3628 __ delayed()->restore();
3630 masm->flush();
3631 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3632 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3633 }
3635 #ifdef COMPILER2
3637 //------------------------------generate_uncommon_trap_blob--------------------
3638 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3639 // instead.
3640 void SharedRuntime::generate_uncommon_trap_blob() {
3641 // allocate space for the code
3642 ResourceMark rm;
3643 // setup code generation tools
3644 int pad = VerifyThread ? 512 : 0;
3645 if (UseStackBanging) {
3646 pad += StackShadowPages*16 + 32;
3647 }
3648 #ifdef _LP64
3649 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3650 #else
3651 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3652 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3653 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3654 #endif
3655 MacroAssembler* masm = new MacroAssembler(&buffer);
3656 Register O2UnrollBlock = O2;
3657 Register O2klass_index = O2;
3659 //
3660 // This is the entry point for all traps the compiler takes when it thinks
3661 // it cannot handle further execution of compilation code. The frame is
3662 // deoptimized in these cases and converted into interpreter frames for
3663 // execution
3664 // The steps taken by this frame are as follows:
3665 // - push a fake "unpack_frame"
3666 // - call the C routine Deoptimization::uncommon_trap (this function
3667 // packs the current compiled frame into vframe arrays and returns
3668 // information about the number and size of interpreter frames which
3669 // are equivalent to the frame which is being deoptimized)
3670 // - deallocate the "unpack_frame"
3671 // - deallocate the deoptimization frame
3672 // - in a loop using the information returned in the previous step
3673 // push interpreter frames;
3674 // - create a dummy "unpack_frame"
3675 // - call the C routine: Deoptimization::unpack_frames (this function
3676 // lays out values on the interpreter frame which was just created)
3677 // - deallocate the dummy unpack_frame
3678 // - return to the interpreter entry point
3679 //
3680 // Refer to the following methods for more information:
3681 // - Deoptimization::uncommon_trap
3682 // - Deoptimization::unpack_frame
3684 // the unloaded class index is in O0 (first parameter to this blob)
3686 // push a dummy "unpack_frame"
3687 // and call Deoptimization::uncommon_trap to pack the compiled frame into
3688 // vframe array and return the UnrollBlock information
3689 __ save_frame(0);
3690 __ set_last_Java_frame(SP, noreg);
3691 __ mov(I0, O2klass_index);
3692 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3693 __ reset_last_Java_frame();
3694 __ mov(O0, O2UnrollBlock->after_save());
3695 __ restore();
3697 // deallocate the deoptimized frame taking care to preserve the return values
3698 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3699 __ restore();
3701 // Allocate new interpreter frame(s) and possible c2i adapter frame
3703 make_new_frames(masm, false);
3705 // push a dummy "unpack_frame" taking care of float return values and
3706 // call Deoptimization::unpack_frames to have the unpacker layout
3707 // information in the interpreter frames just created and then return
3708 // to the interpreter entry point
3709 __ save_frame(0);
3710 __ set_last_Java_frame(SP, noreg);
3711 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3712 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3713 __ reset_last_Java_frame();
3714 __ ret();
3715 __ delayed()->restore();
3717 masm->flush();
3718 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3719 }
3721 #endif // COMPILER2
3723 //------------------------------generate_handler_blob-------------------
3724 //
3725 // Generate a special Compile2Runtime blob that saves all registers, and sets
3726 // up an OopMap.
3727 //
3728 // This blob is jumped to (via a breakpoint and the signal handler) from a
3729 // safepoint in compiled code. On entry to this blob, O7 contains the
3730 // address in the original nmethod at which we should resume normal execution.
3731 // Thus, this blob looks like a subroutine which must preserve lots of
3732 // registers and return normally. Note that O7 is never register-allocated,
3733 // so it is guaranteed to be free here.
3734 //
3736 // The hardest part of what this blob must do is to save the 64-bit %o
3737 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
3738 // an interrupt will chop off their heads. Making space in the caller's frame
3739 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3740 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3741 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
3742 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3743 // Tricky, tricky, tricky...
3745 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3746 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3748 // allocate space for the code
3749 ResourceMark rm;
3750 // setup code generation tools
3751 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3752 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3753 // even larger with TraceJumps
3754 int pad = TraceJumps ? 512 : 0;
3755 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3756 MacroAssembler* masm = new MacroAssembler(&buffer);
3757 int frame_size_words;
3758 OopMapSet *oop_maps = new OopMapSet();
3759 OopMap* map = NULL;
3761 int start = __ offset();
3763 bool cause_return = (poll_type == POLL_AT_RETURN);
3764 // If this causes a return before the processing, then do a "restore"
3765 if (cause_return) {
3766 __ restore();
3767 } else {
3768 // Make it look like we were called via the poll
3769 // so that frame constructor always sees a valid return address
3770 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3771 __ sub(O7, frame::pc_return_offset, O7);
3772 }
3774 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3776 // setup last_Java_sp (blows G4)
3777 __ set_last_Java_frame(SP, noreg);
3779 // call into the runtime to handle illegal instructions exception
3780 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3781 __ mov(G2_thread, O0);
3782 __ save_thread(L7_thread_cache);
3783 __ call(call_ptr);
3784 __ delayed()->nop();
3786 // Set an oopmap for the call site.
3787 // We need this not only for callee-saved registers, but also for volatile
3788 // registers that the compiler might be keeping live across a safepoint.
3790 oop_maps->add_gc_map( __ offset() - start, map);
3792 __ restore_thread(L7_thread_cache);
3793 // clear last_Java_sp
3794 __ reset_last_Java_frame();
3796 // Check for exceptions
3797 Label pending;
3799 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3800 __ br_notnull_short(O1, Assembler::pn, pending);
3802 RegisterSaver::restore_live_registers(masm);
3804 // We are back the the original state on entry and ready to go.
3806 __ retl();
3807 __ delayed()->nop();
3809 // Pending exception after the safepoint
3811 __ bind(pending);
3813 RegisterSaver::restore_live_registers(masm);
3815 // We are back the the original state on entry.
3817 // Tail-call forward_exception_entry, with the issuing PC in O7,
3818 // so it looks like the original nmethod called forward_exception_entry.
3819 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3820 __ JMP(O0, 0);
3821 __ delayed()->nop();
3823 // -------------
3824 // make sure all code is generated
3825 masm->flush();
3827 // return exception blob
3828 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3829 }
3831 //
3832 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3833 //
3834 // Generate a stub that calls into vm to find out the proper destination
3835 // of a java call. All the argument registers are live at this point
3836 // but since this is generic code we don't know what they are and the caller
3837 // must do any gc of the args.
3838 //
3839 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3840 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3842 // allocate space for the code
3843 ResourceMark rm;
3844 // setup code generation tools
3845 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3846 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3847 // even larger with TraceJumps
3848 int pad = TraceJumps ? 512 : 0;
3849 CodeBuffer buffer(name, 1600 + pad, 512);
3850 MacroAssembler* masm = new MacroAssembler(&buffer);
3851 int frame_size_words;
3852 OopMapSet *oop_maps = new OopMapSet();
3853 OopMap* map = NULL;
3855 int start = __ offset();
3857 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3859 int frame_complete = __ offset();
3861 // setup last_Java_sp (blows G4)
3862 __ set_last_Java_frame(SP, noreg);
3864 // call into the runtime to handle illegal instructions exception
3865 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3866 __ mov(G2_thread, O0);
3867 __ save_thread(L7_thread_cache);
3868 __ call(destination, relocInfo::runtime_call_type);
3869 __ delayed()->nop();
3871 // O0 contains the address we are going to jump to assuming no exception got installed
3873 // Set an oopmap for the call site.
3874 // We need this not only for callee-saved registers, but also for volatile
3875 // registers that the compiler might be keeping live across a safepoint.
3877 oop_maps->add_gc_map( __ offset() - start, map);
3879 __ restore_thread(L7_thread_cache);
3880 // clear last_Java_sp
3881 __ reset_last_Java_frame();
3883 // Check for exceptions
3884 Label pending;
3886 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3887 __ br_notnull_short(O1, Assembler::pn, pending);
3889 // get the returned Method*
3891 __ get_vm_result_2(G5_method);
3892 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3894 // O0 is where we want to jump, overwrite G3 which is saved and scratch
3896 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3898 RegisterSaver::restore_live_registers(masm);
3900 // We are back the the original state on entry and ready to go.
3902 __ JMP(G3, 0);
3903 __ delayed()->nop();
3905 // Pending exception after the safepoint
3907 __ bind(pending);
3909 RegisterSaver::restore_live_registers(masm);
3911 // We are back the the original state on entry.
3913 // Tail-call forward_exception_entry, with the issuing PC in O7,
3914 // so it looks like the original nmethod called forward_exception_entry.
3915 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3916 __ JMP(O0, 0);
3917 __ delayed()->nop();
3919 // -------------
3920 // make sure all code is generated
3921 masm->flush();
3923 // return the blob
3924 // frame_size_words or bytes??
3925 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3926 }