Mon, 27 Aug 2012 15:17:17 -0700
6677625: Move platform specific flags from globals.hpp to globals_<arch>.hpp
Reviewed-by: kvn, dholmes, coleenp
Contributed-by: Tao Mao <tao.mao@oracle.com>
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_sparc.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "oops/compiledICHolderOop.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/vframeArray.hpp"
36 #include "vmreg_sparc.inline.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_Runtime1.hpp"
39 #endif
40 #ifdef COMPILER2
41 #include "opto/runtime.hpp"
42 #endif
43 #ifdef SHARK
44 #include "compiler/compileBroker.hpp"
45 #include "shark/sharkCompiler.hpp"
46 #endif
48 #define __ masm->
51 class RegisterSaver {
53 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
54 // The Oregs are problematic. In the 32bit build the compiler can
55 // have O registers live with 64 bit quantities. A window save will
56 // cut the heads off of the registers. We have to do a very extensive
57 // stack dance to save and restore these properly.
59 // Note that the Oregs problem only exists if we block at either a polling
60 // page exception a compiled code safepoint that was not originally a call
61 // or deoptimize following one of these kinds of safepoints.
63 // Lots of registers to save. For all builds, a window save will preserve
64 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
65 // builds a window-save will preserve the %o registers. In the LION build
66 // we need to save the 64-bit %o registers which requires we save them
67 // before the window-save (as then they become %i registers and get their
68 // heads chopped off on interrupt). We have to save some %g registers here
69 // as well.
70 enum {
71 // This frame's save area. Includes extra space for the native call:
72 // vararg's layout space and the like. Briefly holds the caller's
73 // register save area.
74 call_args_area = frame::register_save_words_sp_offset +
75 frame::memory_parameter_word_sp_offset*wordSize,
76 // Make sure save locations are always 8 byte aligned.
77 // can't use round_to because it doesn't produce compile time constant
78 start_of_extra_save_area = ((call_args_area + 7) & ~7),
79 g1_offset = start_of_extra_save_area, // g-regs needing saving
80 g3_offset = g1_offset+8,
81 g4_offset = g3_offset+8,
82 g5_offset = g4_offset+8,
83 o0_offset = g5_offset+8,
84 o1_offset = o0_offset+8,
85 o2_offset = o1_offset+8,
86 o3_offset = o2_offset+8,
87 o4_offset = o3_offset+8,
88 o5_offset = o4_offset+8,
89 start_of_flags_save_area = o5_offset+8,
90 ccr_offset = start_of_flags_save_area,
91 fsr_offset = ccr_offset + 8,
92 d00_offset = fsr_offset+8, // Start of float save area
93 register_save_size = d00_offset+8*32
94 };
97 public:
99 static int Oexception_offset() { return o0_offset; };
100 static int G3_offset() { return g3_offset; };
101 static int G5_offset() { return g5_offset; };
102 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
103 static void restore_live_registers(MacroAssembler* masm);
105 // During deoptimization only the result register need to be restored
106 // all the other values have already been extracted.
108 static void restore_result_registers(MacroAssembler* masm);
109 };
111 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
112 // Record volatile registers as callee-save values in an OopMap so their save locations will be
113 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
114 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
115 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
116 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
117 int i;
118 // Always make the frame size 16 byte aligned.
119 int frame_size = round_to(additional_frame_words + register_save_size, 16);
120 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
121 int frame_size_in_slots = frame_size / sizeof(jint);
122 // CodeBlob frame size is in words.
123 *total_frame_words = frame_size / wordSize;
124 // OopMap* map = new OopMap(*total_frame_words, 0);
125 OopMap* map = new OopMap(frame_size_in_slots, 0);
127 #if !defined(_LP64)
129 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
130 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
131 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
132 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
133 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
134 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
135 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
136 #endif /* _LP64 */
138 __ save(SP, -frame_size, SP);
140 #ifndef _LP64
141 // Reload the 64 bit Oregs. Although they are now Iregs we load them
142 // to Oregs here to avoid interrupts cutting off their heads
144 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
145 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
146 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
149 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
151 __ stx(O0, SP, o0_offset+STACK_BIAS);
152 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
154 __ stx(O1, SP, o1_offset+STACK_BIAS);
156 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
158 __ stx(O2, SP, o2_offset+STACK_BIAS);
159 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
161 __ stx(O3, SP, o3_offset+STACK_BIAS);
162 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
164 __ stx(O4, SP, o4_offset+STACK_BIAS);
165 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
167 __ stx(O5, SP, o5_offset+STACK_BIAS);
168 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
169 #endif /* _LP64 */
172 #ifdef _LP64
173 int debug_offset = 0;
174 #else
175 int debug_offset = 4;
176 #endif
177 // Save the G's
178 __ stx(G1, SP, g1_offset+STACK_BIAS);
179 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
181 __ stx(G3, SP, g3_offset+STACK_BIAS);
182 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
184 __ stx(G4, SP, g4_offset+STACK_BIAS);
185 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
187 __ stx(G5, SP, g5_offset+STACK_BIAS);
188 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
190 // This is really a waste but we'll keep things as they were for now
191 if (true) {
192 #ifndef _LP64
193 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
194 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
195 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
196 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
197 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
198 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
199 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
200 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
201 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
202 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
203 #endif /* _LP64 */
204 }
207 // Save the flags
208 __ rdccr( G5 );
209 __ stx(G5, SP, ccr_offset+STACK_BIAS);
210 __ stxfsr(SP, fsr_offset+STACK_BIAS);
212 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
213 int offset = d00_offset;
214 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
215 FloatRegister f = as_FloatRegister(i);
216 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
217 // Record as callee saved both halves of double registers (2 float registers).
218 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
219 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
220 offset += sizeof(double);
221 }
223 // And we're done.
225 return map;
226 }
229 // Pop the current frame and restore all the registers that we
230 // saved.
231 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
233 // Restore all the FP registers
234 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
235 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
236 }
238 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
239 __ wrccr (G1) ;
241 // Restore the G's
242 // Note that G2 (AKA GThread) must be saved and restored separately.
243 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
245 __ ldx(SP, g1_offset+STACK_BIAS, G1);
246 __ ldx(SP, g3_offset+STACK_BIAS, G3);
247 __ ldx(SP, g4_offset+STACK_BIAS, G4);
248 __ ldx(SP, g5_offset+STACK_BIAS, G5);
251 #if !defined(_LP64)
252 // Restore the 64-bit O's.
253 __ ldx(SP, o0_offset+STACK_BIAS, O0);
254 __ ldx(SP, o1_offset+STACK_BIAS, O1);
255 __ ldx(SP, o2_offset+STACK_BIAS, O2);
256 __ ldx(SP, o3_offset+STACK_BIAS, O3);
257 __ ldx(SP, o4_offset+STACK_BIAS, O4);
258 __ ldx(SP, o5_offset+STACK_BIAS, O5);
260 // And temporarily place them in TLS
262 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
263 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
264 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
265 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
266 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
267 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
268 #endif /* _LP64 */
270 // Restore flags
272 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
274 __ restore();
276 #if !defined(_LP64)
277 // Now reload the 64bit Oregs after we've restore the window.
278 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
279 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
280 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
283 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
284 #endif /* _LP64 */
286 }
288 // Pop the current frame and restore the registers that might be holding
289 // a result.
290 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
292 #if !defined(_LP64)
293 // 32bit build returns longs in G1
294 __ ldx(SP, g1_offset+STACK_BIAS, G1);
296 // Retrieve the 64-bit O's.
297 __ ldx(SP, o0_offset+STACK_BIAS, O0);
298 __ ldx(SP, o1_offset+STACK_BIAS, O1);
299 // and save to TLS
300 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
301 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
302 #endif /* _LP64 */
304 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
306 __ restore();
308 #if !defined(_LP64)
309 // Now reload the 64bit Oregs after we've restore the window.
310 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
311 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
312 #endif /* _LP64 */
314 }
316 // The java_calling_convention describes stack locations as ideal slots on
317 // a frame with no abi restrictions. Since we must observe abi restrictions
318 // (like the placement of the register window) the slots must be biased by
319 // the following value.
320 static int reg2offset(VMReg r) {
321 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
322 }
324 static VMRegPair reg64_to_VMRegPair(Register r) {
325 VMRegPair ret;
326 if (wordSize == 8) {
327 ret.set2(r->as_VMReg());
328 } else {
329 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
330 }
331 return ret;
332 }
334 // ---------------------------------------------------------------------------
335 // Read the array of BasicTypes from a signature, and compute where the
336 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
337 // quantities. Values less than VMRegImpl::stack0 are registers, those above
338 // refer to 4-byte stack slots. All stack slots are based off of the window
339 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
340 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
341 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
342 // integer registers. Values 64-95 are the (32-bit only) float registers.
343 // Each 32-bit quantity is given its own number, so the integer registers
344 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
345 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
347 // Register results are passed in O0-O5, for outgoing call arguments. To
348 // convert to incoming arguments, convert all O's to I's. The regs array
349 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
350 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
351 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
352 // passed (used as a placeholder for the other half of longs and doubles in
353 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
354 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
355 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
356 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
357 // same VMRegPair.
359 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
360 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
361 // units regardless of build.
364 // ---------------------------------------------------------------------------
365 // The compiled Java calling convention. The Java convention always passes
366 // 64-bit values in adjacent aligned locations (either registers or stack),
367 // floats in float registers and doubles in aligned float pairs. Values are
368 // packed in the registers. There is no backing varargs store for values in
369 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be
370 // passed in I's, because longs in I's get their heads chopped off at
371 // interrupt).
372 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
373 VMRegPair *regs,
374 int total_args_passed,
375 int is_outgoing) {
376 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
378 // Convention is to pack the first 6 int/oop args into the first 6 registers
379 // (I0-I5), extras spill to the stack. Then pack the first 8 float args
380 // into F0-F7, extras spill to the stack. Then pad all register sets to
381 // align. Then put longs and doubles into the same registers as they fit,
382 // else spill to the stack.
383 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
384 const int flt_reg_max = 8;
385 //
386 // Where 32-bit 1-reg longs start being passed
387 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
388 // So make it look like we've filled all the G regs that c2 wants to use.
389 Register g_reg = TieredCompilation ? noreg : G1;
391 // Count int/oop and float args. See how many stack slots we'll need and
392 // where the longs & doubles will go.
393 int int_reg_cnt = 0;
394 int flt_reg_cnt = 0;
395 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
396 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
397 int stk_reg_pairs = 0;
398 for (int i = 0; i < total_args_passed; i++) {
399 switch (sig_bt[i]) {
400 case T_LONG: // LP64, longs compete with int args
401 assert(sig_bt[i+1] == T_VOID, "");
402 #ifdef _LP64
403 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
404 #endif
405 break;
406 case T_OBJECT:
407 case T_ARRAY:
408 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
409 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
410 #ifndef _LP64
411 else stk_reg_pairs++;
412 #endif
413 break;
414 case T_INT:
415 case T_SHORT:
416 case T_CHAR:
417 case T_BYTE:
418 case T_BOOLEAN:
419 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
420 else stk_reg_pairs++;
421 break;
422 case T_FLOAT:
423 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
424 else stk_reg_pairs++;
425 break;
426 case T_DOUBLE:
427 assert(sig_bt[i+1] == T_VOID, "");
428 break;
429 case T_VOID:
430 break;
431 default:
432 ShouldNotReachHere();
433 }
434 }
436 // This is where the longs/doubles start on the stack.
437 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
439 int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
441 // int stk_reg = frame::register_save_words*(wordSize>>2);
442 // int stk_reg = SharedRuntime::out_preserve_stack_slots();
443 int stk_reg = 0;
444 int int_reg = 0;
445 int flt_reg = 0;
447 // Now do the signature layout
448 for (int i = 0; i < total_args_passed; i++) {
449 switch (sig_bt[i]) {
450 case T_INT:
451 case T_SHORT:
452 case T_CHAR:
453 case T_BYTE:
454 case T_BOOLEAN:
455 #ifndef _LP64
456 case T_OBJECT:
457 case T_ARRAY:
458 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
459 #endif // _LP64
460 if (int_reg < int_reg_max) {
461 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
462 regs[i].set1(r->as_VMReg());
463 } else {
464 regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
465 }
466 break;
468 #ifdef _LP64
469 case T_OBJECT:
470 case T_ARRAY:
471 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
472 if (int_reg < int_reg_max) {
473 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
474 regs[i].set2(r->as_VMReg());
475 } else {
476 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
477 stk_reg_pairs += 2;
478 }
479 break;
480 #endif // _LP64
482 case T_LONG:
483 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
484 #ifdef _LP64
485 if (int_reg < int_reg_max) {
486 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
487 regs[i].set2(r->as_VMReg());
488 } else {
489 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
490 stk_reg_pairs += 2;
491 }
492 #else
493 #ifdef COMPILER2
494 // For 32-bit build, can't pass longs in O-regs because they become
495 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
496 // spare and available. This convention isn't used by the Sparc ABI or
497 // anywhere else. If we're tiered then we don't use G-regs because c1
498 // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
499 // G0: zero
500 // G1: 1st Long arg
501 // G2: global allocated to TLS
502 // G3: used in inline cache check
503 // G4: 2nd Long arg
504 // G5: used in inline cache check
505 // G6: used by OS
506 // G7: used by OS
508 if (g_reg == G1) {
509 regs[i].set2(G1->as_VMReg()); // This long arg in G1
510 g_reg = G4; // Where the next arg goes
511 } else if (g_reg == G4) {
512 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
513 g_reg = noreg; // No more longs in registers
514 } else {
515 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
516 stk_reg_pairs += 2;
517 }
518 #else // COMPILER2
519 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
520 stk_reg_pairs += 2;
521 #endif // COMPILER2
522 #endif // _LP64
523 break;
525 case T_FLOAT:
526 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
527 else regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
528 break;
529 case T_DOUBLE:
530 assert(sig_bt[i+1] == T_VOID, "expecting half");
531 if (flt_reg_pairs + 1 < flt_reg_max) {
532 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
533 flt_reg_pairs += 2;
534 } else {
535 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
536 stk_reg_pairs += 2;
537 }
538 break;
539 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles
540 default:
541 ShouldNotReachHere();
542 }
543 }
545 // retun the amount of stack space these arguments will need.
546 return stk_reg_pairs;
548 }
550 // Helper class mostly to avoid passing masm everywhere, and handle
551 // store displacement overflow logic.
552 class AdapterGenerator {
553 MacroAssembler *masm;
554 Register Rdisp;
555 void set_Rdisp(Register r) { Rdisp = r; }
557 void patch_callers_callsite();
559 // base+st_off points to top of argument
560 int arg_offset(const int st_off) { return st_off; }
561 int next_arg_offset(const int st_off) {
562 return st_off - Interpreter::stackElementSize;
563 }
565 // Argument slot values may be loaded first into a register because
566 // they might not fit into displacement.
567 RegisterOrConstant arg_slot(const int st_off);
568 RegisterOrConstant next_arg_slot(const int st_off);
570 // Stores long into offset pointed to by base
571 void store_c2i_long(Register r, Register base,
572 const int st_off, bool is_stack);
573 void store_c2i_object(Register r, Register base,
574 const int st_off);
575 void store_c2i_int(Register r, Register base,
576 const int st_off);
577 void store_c2i_double(VMReg r_2,
578 VMReg r_1, Register base, const int st_off);
579 void store_c2i_float(FloatRegister f, Register base,
580 const int st_off);
582 public:
583 void gen_c2i_adapter(int total_args_passed,
584 // VMReg max_arg,
585 int comp_args_on_stack, // VMRegStackSlots
586 const BasicType *sig_bt,
587 const VMRegPair *regs,
588 Label& skip_fixup);
589 void gen_i2c_adapter(int total_args_passed,
590 // VMReg max_arg,
591 int comp_args_on_stack, // VMRegStackSlots
592 const BasicType *sig_bt,
593 const VMRegPair *regs);
595 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
596 };
599 // Patch the callers callsite with entry to compiled code if it exists.
600 void AdapterGenerator::patch_callers_callsite() {
601 Label L;
602 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
603 __ br_null(G3_scratch, false, Assembler::pt, L);
604 // Schedule the branch target address early.
605 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
606 // Call into the VM to patch the caller, then jump to compiled callee
607 __ save_frame(4); // Args in compiled layout; do not blow them
609 // Must save all the live Gregs the list is:
610 // G1: 1st Long arg (32bit build)
611 // G2: global allocated to TLS
612 // G3: used in inline cache check (scratch)
613 // G4: 2nd Long arg (32bit build);
614 // G5: used in inline cache check (methodOop)
616 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
618 #ifdef _LP64
619 // mov(s,d)
620 __ mov(G1, L1);
621 __ mov(G4, L4);
622 __ mov(G5_method, L5);
623 __ mov(G5_method, O0); // VM needs target method
624 __ mov(I7, O1); // VM needs caller's callsite
625 // Must be a leaf call...
626 // can be very far once the blob has been relocated
627 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
628 __ relocate(relocInfo::runtime_call_type);
629 __ jumpl_to(dest, O7, O7);
630 __ delayed()->mov(G2_thread, L7_thread_cache);
631 __ mov(L7_thread_cache, G2_thread);
632 __ mov(L1, G1);
633 __ mov(L4, G4);
634 __ mov(L5, G5_method);
635 #else
636 __ stx(G1, FP, -8 + STACK_BIAS);
637 __ stx(G4, FP, -16 + STACK_BIAS);
638 __ mov(G5_method, L5);
639 __ mov(G5_method, O0); // VM needs target method
640 __ mov(I7, O1); // VM needs caller's callsite
641 // Must be a leaf call...
642 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
643 __ delayed()->mov(G2_thread, L7_thread_cache);
644 __ mov(L7_thread_cache, G2_thread);
645 __ ldx(FP, -8 + STACK_BIAS, G1);
646 __ ldx(FP, -16 + STACK_BIAS, G4);
647 __ mov(L5, G5_method);
648 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
649 #endif /* _LP64 */
651 __ restore(); // Restore args
652 __ bind(L);
653 }
656 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
657 RegisterOrConstant roc(arg_offset(st_off));
658 return __ ensure_simm13_or_reg(roc, Rdisp);
659 }
661 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
662 RegisterOrConstant roc(next_arg_offset(st_off));
663 return __ ensure_simm13_or_reg(roc, Rdisp);
664 }
667 // Stores long into offset pointed to by base
668 void AdapterGenerator::store_c2i_long(Register r, Register base,
669 const int st_off, bool is_stack) {
670 #ifdef _LP64
671 // In V9, longs are given 2 64-bit slots in the interpreter, but the
672 // data is passed in only 1 slot.
673 __ stx(r, base, next_arg_slot(st_off));
674 #else
675 #ifdef COMPILER2
676 // Misaligned store of 64-bit data
677 __ stw(r, base, arg_slot(st_off)); // lo bits
678 __ srlx(r, 32, r);
679 __ stw(r, base, next_arg_slot(st_off)); // hi bits
680 #else
681 if (is_stack) {
682 // Misaligned store of 64-bit data
683 __ stw(r, base, arg_slot(st_off)); // lo bits
684 __ srlx(r, 32, r);
685 __ stw(r, base, next_arg_slot(st_off)); // hi bits
686 } else {
687 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
688 __ stw(r , base, next_arg_slot(st_off)); // hi bits
689 }
690 #endif // COMPILER2
691 #endif // _LP64
692 }
694 void AdapterGenerator::store_c2i_object(Register r, Register base,
695 const int st_off) {
696 __ st_ptr (r, base, arg_slot(st_off));
697 }
699 void AdapterGenerator::store_c2i_int(Register r, Register base,
700 const int st_off) {
701 __ st (r, base, arg_slot(st_off));
702 }
704 // Stores into offset pointed to by base
705 void AdapterGenerator::store_c2i_double(VMReg r_2,
706 VMReg r_1, Register base, const int st_off) {
707 #ifdef _LP64
708 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
709 // data is passed in only 1 slot.
710 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
711 #else
712 // Need to marshal 64-bit value from misaligned Lesp loads
713 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
714 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
715 #endif
716 }
718 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
719 const int st_off) {
720 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
721 }
723 void AdapterGenerator::gen_c2i_adapter(
724 int total_args_passed,
725 // VMReg max_arg,
726 int comp_args_on_stack, // VMRegStackSlots
727 const BasicType *sig_bt,
728 const VMRegPair *regs,
729 Label& skip_fixup) {
731 // Before we get into the guts of the C2I adapter, see if we should be here
732 // at all. We've come from compiled code and are attempting to jump to the
733 // interpreter, which means the caller made a static call to get here
734 // (vcalls always get a compiled target if there is one). Check for a
735 // compiled target. If there is one, we need to patch the caller's call.
736 // However we will run interpreted if we come thru here. The next pass
737 // thru the call site will run compiled. If we ran compiled here then
738 // we can (theorectically) do endless i2c->c2i->i2c transitions during
739 // deopt/uncommon trap cycles. If we always go interpreted here then
740 // we can have at most one and don't need to play any tricks to keep
741 // from endlessly growing the stack.
742 //
743 // Actually if we detected that we had an i2c->c2i transition here we
744 // ought to be able to reset the world back to the state of the interpreted
745 // call and not bother building another interpreter arg area. We don't
746 // do that at this point.
748 patch_callers_callsite();
750 __ bind(skip_fixup);
752 // Since all args are passed on the stack, total_args_passed*wordSize is the
753 // space we need. Add in varargs area needed by the interpreter. Round up
754 // to stack alignment.
755 const int arg_size = total_args_passed * Interpreter::stackElementSize;
756 const int varargs_area =
757 (frame::varargs_offset - frame::register_save_words)*wordSize;
758 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
760 int bias = STACK_BIAS;
761 const int interp_arg_offset = frame::varargs_offset*wordSize +
762 (total_args_passed-1)*Interpreter::stackElementSize;
764 Register base = SP;
766 #ifdef _LP64
767 // In the 64bit build because of wider slots and STACKBIAS we can run
768 // out of bits in the displacement to do loads and stores. Use g3 as
769 // temporary displacement.
770 if (!Assembler::is_simm13(extraspace)) {
771 __ set(extraspace, G3_scratch);
772 __ sub(SP, G3_scratch, SP);
773 } else {
774 __ sub(SP, extraspace, SP);
775 }
776 set_Rdisp(G3_scratch);
777 #else
778 __ sub(SP, extraspace, SP);
779 #endif // _LP64
781 // First write G1 (if used) to where ever it must go
782 for (int i=0; i<total_args_passed; i++) {
783 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
784 VMReg r_1 = regs[i].first();
785 VMReg r_2 = regs[i].second();
786 if (r_1 == G1_scratch->as_VMReg()) {
787 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
788 store_c2i_object(G1_scratch, base, st_off);
789 } else if (sig_bt[i] == T_LONG) {
790 assert(!TieredCompilation, "should not use register args for longs");
791 store_c2i_long(G1_scratch, base, st_off, false);
792 } else {
793 store_c2i_int(G1_scratch, base, st_off);
794 }
795 }
796 }
798 // Now write the args into the outgoing interpreter space
799 for (int i=0; i<total_args_passed; i++) {
800 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
801 VMReg r_1 = regs[i].first();
802 VMReg r_2 = regs[i].second();
803 if (!r_1->is_valid()) {
804 assert(!r_2->is_valid(), "");
805 continue;
806 }
807 // Skip G1 if found as we did it first in order to free it up
808 if (r_1 == G1_scratch->as_VMReg()) {
809 continue;
810 }
811 #ifdef ASSERT
812 bool G1_forced = false;
813 #endif // ASSERT
814 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
815 #ifdef _LP64
816 Register ld_off = Rdisp;
817 __ set(reg2offset(r_1) + extraspace + bias, ld_off);
818 #else
819 int ld_off = reg2offset(r_1) + extraspace + bias;
820 #endif // _LP64
821 #ifdef ASSERT
822 G1_forced = true;
823 #endif // ASSERT
824 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
825 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
826 else __ ldx(base, ld_off, G1_scratch);
827 }
829 if (r_1->is_Register()) {
830 Register r = r_1->as_Register()->after_restore();
831 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
832 store_c2i_object(r, base, st_off);
833 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
834 #ifndef _LP64
835 if (TieredCompilation) {
836 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
837 }
838 #endif // _LP64
839 store_c2i_long(r, base, st_off, r_2->is_stack());
840 } else {
841 store_c2i_int(r, base, st_off);
842 }
843 } else {
844 assert(r_1->is_FloatRegister(), "");
845 if (sig_bt[i] == T_FLOAT) {
846 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
847 } else {
848 assert(sig_bt[i] == T_DOUBLE, "wrong type");
849 store_c2i_double(r_2, r_1, base, st_off);
850 }
851 }
852 }
854 #ifdef _LP64
855 // Need to reload G3_scratch, used for temporary displacements.
856 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
858 // Pass O5_savedSP as an argument to the interpreter.
859 // The interpreter will restore SP to this value before returning.
860 __ set(extraspace, G1);
861 __ add(SP, G1, O5_savedSP);
862 #else
863 // Pass O5_savedSP as an argument to the interpreter.
864 // The interpreter will restore SP to this value before returning.
865 __ add(SP, extraspace, O5_savedSP);
866 #endif // _LP64
868 __ mov((frame::varargs_offset)*wordSize -
869 1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
870 // Jump to the interpreter just as if interpreter was doing it.
871 __ jmpl(G3_scratch, 0, G0);
872 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
873 // (really L0) is in use by the compiled frame as a generic temp. However,
874 // the interpreter does not know where its args are without some kind of
875 // arg pointer being passed in. Pass it in Gargs.
876 __ delayed()->add(SP, G1, Gargs);
877 }
879 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
880 address code_start, address code_end,
881 Label& L_ok) {
882 Label L_fail;
883 __ set(ExternalAddress(code_start), temp_reg);
884 __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
885 __ cmp(pc_reg, temp_reg);
886 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
887 __ delayed()->add(temp_reg, temp2_reg, temp_reg);
888 __ cmp(pc_reg, temp_reg);
889 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
890 __ bind(L_fail);
891 }
893 void AdapterGenerator::gen_i2c_adapter(
894 int total_args_passed,
895 // VMReg max_arg,
896 int comp_args_on_stack, // VMRegStackSlots
897 const BasicType *sig_bt,
898 const VMRegPair *regs) {
900 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
901 // layout. Lesp was saved by the calling I-frame and will be restored on
902 // return. Meanwhile, outgoing arg space is all owned by the callee
903 // C-frame, so we can mangle it at will. After adjusting the frame size,
904 // hoist register arguments and repack other args according to the compiled
905 // code convention. Finally, end in a jump to the compiled code. The entry
906 // point address is the start of the buffer.
908 // We will only enter here from an interpreted frame and never from after
909 // passing thru a c2i. Azul allowed this but we do not. If we lose the
910 // race and use a c2i we will remain interpreted for the race loser(s).
911 // This removes all sorts of headaches on the x86 side and also eliminates
912 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
914 // More detail:
915 // Adapters can be frameless because they do not require the caller
916 // to perform additional cleanup work, such as correcting the stack pointer.
917 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
918 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
919 // even if a callee has modified the stack pointer.
920 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
921 // routinely repairs its caller's stack pointer (from sender_sp, which is set
922 // up via the senderSP register).
923 // In other words, if *either* the caller or callee is interpreted, we can
924 // get the stack pointer repaired after a call.
925 // This is why c2i and i2c adapters cannot be indefinitely composed.
926 // In particular, if a c2i adapter were to somehow call an i2c adapter,
927 // both caller and callee would be compiled methods, and neither would
928 // clean up the stack pointer changes performed by the two adapters.
929 // If this happens, control eventually transfers back to the compiled
930 // caller, but with an uncorrected stack, causing delayed havoc.
932 if (VerifyAdapterCalls &&
933 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
934 // So, let's test for cascading c2i/i2c adapters right now.
935 // assert(Interpreter::contains($return_addr) ||
936 // StubRoutines::contains($return_addr),
937 // "i2c adapter must return to an interpreter frame");
938 __ block_comment("verify_i2c { ");
939 Label L_ok;
940 if (Interpreter::code() != NULL)
941 range_check(masm, O7, O0, O1,
942 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
943 L_ok);
944 if (StubRoutines::code1() != NULL)
945 range_check(masm, O7, O0, O1,
946 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
947 L_ok);
948 if (StubRoutines::code2() != NULL)
949 range_check(masm, O7, O0, O1,
950 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
951 L_ok);
952 const char* msg = "i2c adapter must return to an interpreter frame";
953 __ block_comment(msg);
954 __ stop(msg);
955 __ bind(L_ok);
956 __ block_comment("} verify_i2ce ");
957 }
959 // As you can see from the list of inputs & outputs there are not a lot
960 // of temp registers to work with: mostly G1, G3 & G4.
962 // Inputs:
963 // G2_thread - TLS
964 // G5_method - Method oop
965 // G4 (Gargs) - Pointer to interpreter's args
966 // O0..O4 - free for scratch
967 // O5_savedSP - Caller's saved SP, to be restored if needed
968 // O6 - Current SP!
969 // O7 - Valid return address
970 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
972 // Outputs:
973 // G2_thread - TLS
974 // G1, G4 - Outgoing long args in 32-bit build
975 // O0-O5 - Outgoing args in compiled layout
976 // O6 - Adjusted or restored SP
977 // O7 - Valid return address
978 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
979 // F0-F7 - more outgoing args
982 // Gargs is the incoming argument base, and also an outgoing argument.
983 __ sub(Gargs, BytesPerWord, Gargs);
985 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
986 // WITH O7 HOLDING A VALID RETURN PC
987 //
988 // | |
989 // : java stack :
990 // | |
991 // +--------------+ <--- start of outgoing args
992 // | receiver | |
993 // : rest of args : |---size is java-arg-words
994 // | | |
995 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
996 // | | |
997 // : unused : |---Space for max Java stack, plus stack alignment
998 // | | |
999 // +--------------+ <--- SP + 16*wordsize
1000 // | |
1001 // : window :
1002 // | |
1003 // +--------------+ <--- SP
1005 // WE REPACK THE STACK. We use the common calling convention layout as
1006 // discovered by calling SharedRuntime::calling_convention. We assume it
1007 // causes an arbitrary shuffle of memory, which may require some register
1008 // temps to do the shuffle. We hope for (and optimize for) the case where
1009 // temps are not needed. We may have to resize the stack slightly, in case
1010 // we need alignment padding (32-bit interpreter can pass longs & doubles
1011 // misaligned, but the compilers expect them aligned).
1012 //
1013 // | |
1014 // : java stack :
1015 // | |
1016 // +--------------+ <--- start of outgoing args
1017 // | pad, align | |
1018 // +--------------+ |
1019 // | ints, floats | |---Outgoing stack args, packed low.
1020 // +--------------+ | First few args in registers.
1021 // : doubles : |
1022 // | longs | |
1023 // +--------------+ <--- SP' + 16*wordsize
1024 // | |
1025 // : window :
1026 // | |
1027 // +--------------+ <--- SP'
1029 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
1030 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
1031 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
1033 // Cut-out for having no stack args. Since up to 6 args are passed
1034 // in registers, we will commonly have no stack args.
1035 if (comp_args_on_stack > 0) {
1037 // Convert VMReg stack slots to words.
1038 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1039 // Round up to miminum stack alignment, in wordSize
1040 comp_words_on_stack = round_to(comp_words_on_stack, 2);
1041 // Now compute the distance from Lesp to SP. This calculation does not
1042 // include the space for total_args_passed because Lesp has not yet popped
1043 // the arguments.
1044 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
1045 }
1047 // Will jump to the compiled code just as if compiled code was doing it.
1048 // Pre-load the register-jump target early, to schedule it better.
1049 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1051 // Now generate the shuffle code. Pick up all register args and move the
1052 // rest through G1_scratch.
1053 for (int i=0; i<total_args_passed; i++) {
1054 if (sig_bt[i] == T_VOID) {
1055 // Longs and doubles are passed in native word order, but misaligned
1056 // in the 32-bit build.
1057 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1058 continue;
1059 }
1061 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
1062 // 32-bit build and aligned in the 64-bit build. Look for the obvious
1063 // ldx/lddf optimizations.
1065 // Load in argument order going down.
1066 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1067 set_Rdisp(G1_scratch);
1069 VMReg r_1 = regs[i].first();
1070 VMReg r_2 = regs[i].second();
1071 if (!r_1->is_valid()) {
1072 assert(!r_2->is_valid(), "");
1073 continue;
1074 }
1075 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
1076 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
1077 if (r_2->is_valid()) r_2 = r_1->next();
1078 }
1079 if (r_1->is_Register()) { // Register argument
1080 Register r = r_1->as_Register()->after_restore();
1081 if (!r_2->is_valid()) {
1082 __ ld(Gargs, arg_slot(ld_off), r);
1083 } else {
1084 #ifdef _LP64
1085 // In V9, longs are given 2 64-bit slots in the interpreter, but the
1086 // data is passed in only 1 slot.
1087 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
1088 next_arg_slot(ld_off) : arg_slot(ld_off);
1089 __ ldx(Gargs, slot, r);
1090 #else
1091 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1092 // stack shuffle. Load the first 2 longs into G1/G4 later.
1093 #endif
1094 }
1095 } else {
1096 assert(r_1->is_FloatRegister(), "");
1097 if (!r_2->is_valid()) {
1098 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1099 } else {
1100 #ifdef _LP64
1101 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1102 // data is passed in only 1 slot. This code also handles longs that
1103 // are passed on the stack, but need a stack-to-stack move through a
1104 // spare float register.
1105 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
1106 next_arg_slot(ld_off) : arg_slot(ld_off);
1107 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1108 #else
1109 // Need to marshal 64-bit value from misaligned Lesp loads
1110 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1111 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1112 #endif
1113 }
1114 }
1115 // Was the argument really intended to be on the stack, but was loaded
1116 // into F8/F9?
1117 if (regs[i].first()->is_stack()) {
1118 assert(r_1->as_FloatRegister() == F8, "fix this code");
1119 // Convert stack slot to an SP offset
1120 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1121 // Store down the shuffled stack word. Target address _is_ aligned.
1122 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
1123 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
1124 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1125 }
1126 }
1127 bool made_space = false;
1128 #ifndef _LP64
1129 // May need to pick up a few long args in G1/G4
1130 bool g4_crushed = false;
1131 bool g3_crushed = false;
1132 for (int i=0; i<total_args_passed; i++) {
1133 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1134 // Load in argument order going down
1135 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1136 // Need to marshal 64-bit value from misaligned Lesp loads
1137 Register r = regs[i].first()->as_Register()->after_restore();
1138 if (r == G1 || r == G4) {
1139 assert(!g4_crushed, "ordering problem");
1140 if (r == G4){
1141 g4_crushed = true;
1142 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1143 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1144 } else {
1145 // better schedule this way
1146 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1147 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1148 }
1149 g3_crushed = true;
1150 __ sllx(r, 32, r);
1151 __ or3(G3_scratch, r, r);
1152 } else {
1153 assert(r->is_out(), "longs passed in two O registers");
1154 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits
1155 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1156 }
1157 }
1158 }
1159 #endif
1161 // Jump to the compiled code just as if compiled code was doing it.
1162 //
1163 #ifndef _LP64
1164 if (g3_crushed) {
1165 // Rats load was wasted, at least it is in cache...
1166 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
1167 }
1168 #endif /* _LP64 */
1170 // 6243940 We might end up in handle_wrong_method if
1171 // the callee is deoptimized as we race thru here. If that
1172 // happens we don't want to take a safepoint because the
1173 // caller frame will look interpreted and arguments are now
1174 // "compiled" so it is much better to make this transition
1175 // invisible to the stack walking code. Unfortunately if
1176 // we try and find the callee by normal means a safepoint
1177 // is possible. So we stash the desired callee in the thread
1178 // and the vm will find there should this case occur.
1179 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1180 __ st_ptr(G5_method, callee_target_addr);
1182 if (StressNonEntrant) {
1183 // Open a big window for deopt failure
1184 __ save_frame(0);
1185 __ mov(G0, L0);
1186 Label loop;
1187 __ bind(loop);
1188 __ sub(L0, 1, L0);
1189 __ br_null_short(L0, Assembler::pt, loop);
1191 __ restore();
1192 }
1195 __ jmpl(G3, 0, G0);
1196 __ delayed()->nop();
1197 }
1199 // ---------------------------------------------------------------
1200 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1201 int total_args_passed,
1202 // VMReg max_arg,
1203 int comp_args_on_stack, // VMRegStackSlots
1204 const BasicType *sig_bt,
1205 const VMRegPair *regs,
1206 AdapterFingerPrint* fingerprint) {
1207 address i2c_entry = __ pc();
1209 AdapterGenerator agen(masm);
1211 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1214 // -------------------------------------------------------------------------
1215 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The
1216 // args start out packed in the compiled layout. They need to be unpacked
1217 // into the interpreter layout. This will almost always require some stack
1218 // space. We grow the current (compiled) stack, then repack the args. We
1219 // finally end in a jump to the generic interpreter entry point. On exit
1220 // from the interpreter, the interpreter will restore our SP (lest the
1221 // compiled code, which relys solely on SP and not FP, get sick).
1223 address c2i_unverified_entry = __ pc();
1224 Label skip_fixup;
1225 {
1226 #if !defined(_LP64) && defined(COMPILER2)
1227 Register R_temp = L0; // another scratch register
1228 #else
1229 Register R_temp = G1; // another scratch register
1230 #endif
1232 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1234 __ verify_oop(O0);
1235 __ verify_oop(G5_method);
1236 __ load_klass(O0, G3_scratch);
1237 __ verify_oop(G3_scratch);
1239 #if !defined(_LP64) && defined(COMPILER2)
1240 __ save(SP, -frame::register_save_words*wordSize, SP);
1241 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1242 __ verify_oop(R_temp);
1243 __ cmp(G3_scratch, R_temp);
1244 __ restore();
1245 #else
1246 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1247 __ verify_oop(R_temp);
1248 __ cmp(G3_scratch, R_temp);
1249 #endif
1251 Label ok, ok2;
1252 __ brx(Assembler::equal, false, Assembler::pt, ok);
1253 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1254 __ jump_to(ic_miss, G3_scratch);
1255 __ delayed()->nop();
1257 __ bind(ok);
1258 // Method might have been compiled since the call site was patched to
1259 // interpreted if that is the case treat it as a miss so we can get
1260 // the call site corrected.
1261 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1262 __ bind(ok2);
1263 __ br_null(G3_scratch, false, Assembler::pt, skip_fixup);
1264 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1265 __ jump_to(ic_miss, G3_scratch);
1266 __ delayed()->nop();
1268 }
1270 address c2i_entry = __ pc();
1272 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1274 __ flush();
1275 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1277 }
1279 // Helper function for native calling conventions
1280 static VMReg int_stk_helper( int i ) {
1281 // Bias any stack based VMReg we get by ignoring the window area
1282 // but not the register parameter save area.
1283 //
1284 // This is strange for the following reasons. We'd normally expect
1285 // the calling convention to return an VMReg for a stack slot
1286 // completely ignoring any abi reserved area. C2 thinks of that
1287 // abi area as only out_preserve_stack_slots. This does not include
1288 // the area allocated by the C abi to store down integer arguments
1289 // because the java calling convention does not use it. So
1290 // since c2 assumes that there are only out_preserve_stack_slots
1291 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1292 // location the c calling convention must add in this bias amount
1293 // to make up for the fact that the out_preserve_stack_slots is
1294 // insufficient for C calls. What a mess. I sure hope those 6
1295 // stack words were worth it on every java call!
1297 // Another way of cleaning this up would be for out_preserve_stack_slots
1298 // to take a parameter to say whether it was C or java calling conventions.
1299 // Then things might look a little better (but not much).
1301 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1302 if( mem_parm_offset < 0 ) {
1303 return as_oRegister(i)->as_VMReg();
1304 } else {
1305 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1306 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1307 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1308 }
1309 }
1312 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1313 VMRegPair *regs,
1314 int total_args_passed) {
1316 // Return the number of VMReg stack_slots needed for the args.
1317 // This value does not include an abi space (like register window
1318 // save area).
1320 // The native convention is V8 if !LP64
1321 // The LP64 convention is the V9 convention which is slightly more sane.
1323 // We return the amount of VMReg stack slots we need to reserve for all
1324 // the arguments NOT counting out_preserve_stack_slots. Since we always
1325 // have space for storing at least 6 registers to memory we start with that.
1326 // See int_stk_helper for a further discussion.
1327 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1329 #ifdef _LP64
1330 // V9 convention: All things "as-if" on double-wide stack slots.
1331 // Hoist any int/ptr/long's in the first 6 to int regs.
1332 // Hoist any flt/dbl's in the first 16 dbl regs.
1333 int j = 0; // Count of actual args, not HALVES
1334 for( int i=0; i<total_args_passed; i++, j++ ) {
1335 switch( sig_bt[i] ) {
1336 case T_BOOLEAN:
1337 case T_BYTE:
1338 case T_CHAR:
1339 case T_INT:
1340 case T_SHORT:
1341 regs[i].set1( int_stk_helper( j ) ); break;
1342 case T_LONG:
1343 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1344 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1345 case T_ARRAY:
1346 case T_OBJECT:
1347 regs[i].set2( int_stk_helper( j ) );
1348 break;
1349 case T_FLOAT:
1350 if ( j < 16 ) {
1351 // V9ism: floats go in ODD registers
1352 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1353 } else {
1354 // V9ism: floats go in ODD stack slot
1355 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1356 }
1357 break;
1358 case T_DOUBLE:
1359 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1360 if ( j < 16 ) {
1361 // V9ism: doubles go in EVEN/ODD regs
1362 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1363 } else {
1364 // V9ism: doubles go in EVEN/ODD stack slots
1365 regs[i].set2(VMRegImpl::stack2reg(j<<1));
1366 }
1367 break;
1368 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
1369 default:
1370 ShouldNotReachHere();
1371 }
1372 if (regs[i].first()->is_stack()) {
1373 int off = regs[i].first()->reg2stack();
1374 if (off > max_stack_slots) max_stack_slots = off;
1375 }
1376 if (regs[i].second()->is_stack()) {
1377 int off = regs[i].second()->reg2stack();
1378 if (off > max_stack_slots) max_stack_slots = off;
1379 }
1380 }
1382 #else // _LP64
1383 // V8 convention: first 6 things in O-regs, rest on stack.
1384 // Alignment is willy-nilly.
1385 for( int i=0; i<total_args_passed; i++ ) {
1386 switch( sig_bt[i] ) {
1387 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1388 case T_ARRAY:
1389 case T_BOOLEAN:
1390 case T_BYTE:
1391 case T_CHAR:
1392 case T_FLOAT:
1393 case T_INT:
1394 case T_OBJECT:
1395 case T_SHORT:
1396 regs[i].set1( int_stk_helper( i ) );
1397 break;
1398 case T_DOUBLE:
1399 case T_LONG:
1400 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1401 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1402 break;
1403 case T_VOID: regs[i].set_bad(); break;
1404 default:
1405 ShouldNotReachHere();
1406 }
1407 if (regs[i].first()->is_stack()) {
1408 int off = regs[i].first()->reg2stack();
1409 if (off > max_stack_slots) max_stack_slots = off;
1410 }
1411 if (regs[i].second()->is_stack()) {
1412 int off = regs[i].second()->reg2stack();
1413 if (off > max_stack_slots) max_stack_slots = off;
1414 }
1415 }
1416 #endif // _LP64
1418 return round_to(max_stack_slots + 1, 2);
1420 }
1423 // ---------------------------------------------------------------------------
1424 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1425 switch (ret_type) {
1426 case T_FLOAT:
1427 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1428 break;
1429 case T_DOUBLE:
1430 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1431 break;
1432 }
1433 }
1435 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1436 switch (ret_type) {
1437 case T_FLOAT:
1438 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1439 break;
1440 case T_DOUBLE:
1441 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1442 break;
1443 }
1444 }
1446 // Check and forward and pending exception. Thread is stored in
1447 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
1448 // is no exception handler. We merely pop this frame off and throw the
1449 // exception in the caller's frame.
1450 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1451 Label L;
1452 __ br_null(Rex_oop, false, Assembler::pt, L);
1453 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1454 // Since this is a native call, we *know* the proper exception handler
1455 // without calling into the VM: it's the empty function. Just pop this
1456 // frame and then jump to forward_exception_entry; O7 will contain the
1457 // native caller's return PC.
1458 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1459 __ jump_to(exception_entry, G3_scratch);
1460 __ delayed()->restore(); // Pop this frame off.
1461 __ bind(L);
1462 }
1464 // A simple move of integer like type
1465 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1466 if (src.first()->is_stack()) {
1467 if (dst.first()->is_stack()) {
1468 // stack to stack
1469 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1470 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1471 } else {
1472 // stack to reg
1473 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1474 }
1475 } else if (dst.first()->is_stack()) {
1476 // reg to stack
1477 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1478 } else {
1479 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1480 }
1481 }
1483 // On 64 bit we will store integer like items to the stack as
1484 // 64 bits items (sparc abi) even though java would only store
1485 // 32bits for a parameter. On 32bit it will simply be 32 bits
1486 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1487 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1488 if (src.first()->is_stack()) {
1489 if (dst.first()->is_stack()) {
1490 // stack to stack
1491 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1492 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1493 } else {
1494 // stack to reg
1495 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1496 }
1497 } else if (dst.first()->is_stack()) {
1498 // reg to stack
1499 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1500 } else {
1501 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1502 }
1503 }
1506 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1507 if (src.first()->is_stack()) {
1508 if (dst.first()->is_stack()) {
1509 // stack to stack
1510 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1511 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1512 } else {
1513 // stack to reg
1514 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1515 }
1516 } else if (dst.first()->is_stack()) {
1517 // reg to stack
1518 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1519 } else {
1520 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1521 }
1522 }
1525 // An oop arg. Must pass a handle not the oop itself
1526 static void object_move(MacroAssembler* masm,
1527 OopMap* map,
1528 int oop_handle_offset,
1529 int framesize_in_slots,
1530 VMRegPair src,
1531 VMRegPair dst,
1532 bool is_receiver,
1533 int* receiver_offset) {
1535 // must pass a handle. First figure out the location we use as a handle
1537 if (src.first()->is_stack()) {
1538 // Oop is already on the stack
1539 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1540 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1541 __ ld_ptr(rHandle, 0, L4);
1542 #ifdef _LP64
1543 __ movr( Assembler::rc_z, L4, G0, rHandle );
1544 #else
1545 __ tst( L4 );
1546 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1547 #endif
1548 if (dst.first()->is_stack()) {
1549 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1550 }
1551 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1552 if (is_receiver) {
1553 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1554 }
1555 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1556 } else {
1557 // Oop is in an input register pass we must flush it to the stack
1558 const Register rOop = src.first()->as_Register();
1559 const Register rHandle = L5;
1560 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1561 int offset = oop_slot*VMRegImpl::stack_slot_size;
1562 Label skip;
1563 __ st_ptr(rOop, SP, offset + STACK_BIAS);
1564 if (is_receiver) {
1565 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1566 }
1567 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1568 __ add(SP, offset + STACK_BIAS, rHandle);
1569 #ifdef _LP64
1570 __ movr( Assembler::rc_z, rOop, G0, rHandle );
1571 #else
1572 __ tst( rOop );
1573 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1574 #endif
1576 if (dst.first()->is_stack()) {
1577 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1578 } else {
1579 __ mov(rHandle, dst.first()->as_Register());
1580 }
1581 }
1582 }
1584 // A float arg may have to do float reg int reg conversion
1585 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1586 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1588 if (src.first()->is_stack()) {
1589 if (dst.first()->is_stack()) {
1590 // stack to stack the easiest of the bunch
1591 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1592 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1593 } else {
1594 // stack to reg
1595 if (dst.first()->is_Register()) {
1596 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1597 } else {
1598 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1599 }
1600 }
1601 } else if (dst.first()->is_stack()) {
1602 // reg to stack
1603 if (src.first()->is_Register()) {
1604 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1605 } else {
1606 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1607 }
1608 } else {
1609 // reg to reg
1610 if (src.first()->is_Register()) {
1611 if (dst.first()->is_Register()) {
1612 // gpr -> gpr
1613 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1614 } else {
1615 // gpr -> fpr
1616 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1617 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1618 }
1619 } else if (dst.first()->is_Register()) {
1620 // fpr -> gpr
1621 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1622 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1623 } else {
1624 // fpr -> fpr
1625 // In theory these overlap but the ordering is such that this is likely a nop
1626 if ( src.first() != dst.first()) {
1627 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1628 }
1629 }
1630 }
1631 }
1633 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1634 VMRegPair src_lo(src.first());
1635 VMRegPair src_hi(src.second());
1636 VMRegPair dst_lo(dst.first());
1637 VMRegPair dst_hi(dst.second());
1638 simple_move32(masm, src_lo, dst_lo);
1639 simple_move32(masm, src_hi, dst_hi);
1640 }
1642 // A long move
1643 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1645 // Do the simple ones here else do two int moves
1646 if (src.is_single_phys_reg() ) {
1647 if (dst.is_single_phys_reg()) {
1648 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1649 } else {
1650 // split src into two separate registers
1651 // Remember hi means hi address or lsw on sparc
1652 // Move msw to lsw
1653 if (dst.second()->is_reg()) {
1654 // MSW -> MSW
1655 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1656 // Now LSW -> LSW
1657 // this will only move lo -> lo and ignore hi
1658 VMRegPair split(dst.second());
1659 simple_move32(masm, src, split);
1660 } else {
1661 VMRegPair split(src.first(), L4->as_VMReg());
1662 // MSW -> MSW (lo ie. first word)
1663 __ srax(src.first()->as_Register(), 32, L4);
1664 split_long_move(masm, split, dst);
1665 }
1666 }
1667 } else if (dst.is_single_phys_reg()) {
1668 if (src.is_adjacent_aligned_on_stack(2)) {
1669 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1670 } else {
1671 // dst is a single reg.
1672 // Remember lo is low address not msb for stack slots
1673 // and lo is the "real" register for registers
1674 // src is
1676 VMRegPair split;
1678 if (src.first()->is_reg()) {
1679 // src.lo (msw) is a reg, src.hi is stk/reg
1680 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1681 split.set_pair(dst.first(), src.first());
1682 } else {
1683 // msw is stack move to L5
1684 // lsw is stack move to dst.lo (real reg)
1685 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1686 split.set_pair(dst.first(), L5->as_VMReg());
1687 }
1689 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1690 // msw -> src.lo/L5, lsw -> dst.lo
1691 split_long_move(masm, src, split);
1693 // So dst now has the low order correct position the
1694 // msw half
1695 __ sllx(split.first()->as_Register(), 32, L5);
1697 const Register d = dst.first()->as_Register();
1698 __ or3(L5, d, d);
1699 }
1700 } else {
1701 // For LP64 we can probably do better.
1702 split_long_move(masm, src, dst);
1703 }
1704 }
1706 // A double move
1707 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1709 // The painful thing here is that like long_move a VMRegPair might be
1710 // 1: a single physical register
1711 // 2: two physical registers (v8)
1712 // 3: a physical reg [lo] and a stack slot [hi] (v8)
1713 // 4: two stack slots
1715 // Since src is always a java calling convention we know that the src pair
1716 // is always either all registers or all stack (and aligned?)
1718 // in a register [lo] and a stack slot [hi]
1719 if (src.first()->is_stack()) {
1720 if (dst.first()->is_stack()) {
1721 // stack to stack the easiest of the bunch
1722 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1723 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1724 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1725 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1726 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1727 } else {
1728 // stack to reg
1729 if (dst.second()->is_stack()) {
1730 // stack -> reg, stack -> stack
1731 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1732 if (dst.first()->is_Register()) {
1733 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1734 } else {
1735 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1736 }
1737 // This was missing. (very rare case)
1738 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1739 } else {
1740 // stack -> reg
1741 // Eventually optimize for alignment QQQ
1742 if (dst.first()->is_Register()) {
1743 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1744 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1745 } else {
1746 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1747 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1748 }
1749 }
1750 }
1751 } else if (dst.first()->is_stack()) {
1752 // reg to stack
1753 if (src.first()->is_Register()) {
1754 // Eventually optimize for alignment QQQ
1755 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1756 if (src.second()->is_stack()) {
1757 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1758 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1759 } else {
1760 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1761 }
1762 } else {
1763 // fpr to stack
1764 if (src.second()->is_stack()) {
1765 ShouldNotReachHere();
1766 } else {
1767 // Is the stack aligned?
1768 if (reg2offset(dst.first()) & 0x7) {
1769 // No do as pairs
1770 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1771 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1772 } else {
1773 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1774 }
1775 }
1776 }
1777 } else {
1778 // reg to reg
1779 if (src.first()->is_Register()) {
1780 if (dst.first()->is_Register()) {
1781 // gpr -> gpr
1782 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1783 __ mov(src.second()->as_Register(), dst.second()->as_Register());
1784 } else {
1785 // gpr -> fpr
1786 // ought to be able to do a single store
1787 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1788 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1789 // ought to be able to do a single load
1790 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1791 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1792 }
1793 } else if (dst.first()->is_Register()) {
1794 // fpr -> gpr
1795 // ought to be able to do a single store
1796 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1797 // ought to be able to do a single load
1798 // REMEMBER first() is low address not LSB
1799 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1800 if (dst.second()->is_Register()) {
1801 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1802 } else {
1803 __ ld(FP, -4 + STACK_BIAS, L4);
1804 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1805 }
1806 } else {
1807 // fpr -> fpr
1808 // In theory these overlap but the ordering is such that this is likely a nop
1809 if ( src.first() != dst.first()) {
1810 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1811 }
1812 }
1813 }
1814 }
1816 // Creates an inner frame if one hasn't already been created, and
1817 // saves a copy of the thread in L7_thread_cache
1818 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1819 if (!*already_created) {
1820 __ save_frame(0);
1821 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1822 // Don't use save_thread because it smashes G2 and we merely want to save a
1823 // copy
1824 __ mov(G2_thread, L7_thread_cache);
1825 *already_created = true;
1826 }
1827 }
1830 static void save_or_restore_arguments(MacroAssembler* masm,
1831 const int stack_slots,
1832 const int total_in_args,
1833 const int arg_save_area,
1834 OopMap* map,
1835 VMRegPair* in_regs,
1836 BasicType* in_sig_bt) {
1837 // if map is non-NULL then the code should store the values,
1838 // otherwise it should load them.
1839 if (map != NULL) {
1840 // Fill in the map
1841 for (int i = 0; i < total_in_args; i++) {
1842 if (in_sig_bt[i] == T_ARRAY) {
1843 if (in_regs[i].first()->is_stack()) {
1844 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1845 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1846 } else if (in_regs[i].first()->is_Register()) {
1847 map->set_oop(in_regs[i].first());
1848 } else {
1849 ShouldNotReachHere();
1850 }
1851 }
1852 }
1853 }
1855 // Save or restore double word values
1856 int handle_index = 0;
1857 for (int i = 0; i < total_in_args; i++) {
1858 int slot = handle_index + arg_save_area;
1859 int offset = slot * VMRegImpl::stack_slot_size;
1860 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1861 const Register reg = in_regs[i].first()->as_Register();
1862 if (reg->is_global()) {
1863 handle_index += 2;
1864 assert(handle_index <= stack_slots, "overflow");
1865 if (map != NULL) {
1866 __ stx(reg, SP, offset + STACK_BIAS);
1867 } else {
1868 __ ldx(SP, offset + STACK_BIAS, reg);
1869 }
1870 }
1871 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1872 handle_index += 2;
1873 assert(handle_index <= stack_slots, "overflow");
1874 if (map != NULL) {
1875 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1876 } else {
1877 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1878 }
1879 }
1880 }
1881 // Save floats
1882 for (int i = 0; i < total_in_args; i++) {
1883 int slot = handle_index + arg_save_area;
1884 int offset = slot * VMRegImpl::stack_slot_size;
1885 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1886 handle_index++;
1887 assert(handle_index <= stack_slots, "overflow");
1888 if (map != NULL) {
1889 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1890 } else {
1891 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1892 }
1893 }
1894 }
1896 }
1899 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1900 // keeps a new JNI critical region from starting until a GC has been
1901 // forced. Save down any oops in registers and describe them in an
1902 // OopMap.
1903 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1904 const int stack_slots,
1905 const int total_in_args,
1906 const int arg_save_area,
1907 OopMapSet* oop_maps,
1908 VMRegPair* in_regs,
1909 BasicType* in_sig_bt) {
1910 __ block_comment("check GC_locker::needs_gc");
1911 Label cont;
1912 AddressLiteral sync_state(GC_locker::needs_gc_address());
1913 __ load_bool_contents(sync_state, G3_scratch);
1914 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1915 __ delayed()->nop();
1917 // Save down any values that are live in registers and call into the
1918 // runtime to halt for a GC
1919 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1920 save_or_restore_arguments(masm, stack_slots, total_in_args,
1921 arg_save_area, map, in_regs, in_sig_bt);
1923 __ mov(G2_thread, L7_thread_cache);
1925 __ set_last_Java_frame(SP, noreg);
1927 __ block_comment("block_for_jni_critical");
1928 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1929 __ delayed()->mov(L7_thread_cache, O0);
1930 oop_maps->add_gc_map( __ offset(), map);
1932 __ restore_thread(L7_thread_cache); // restore G2_thread
1933 __ reset_last_Java_frame();
1935 // Reload all the register arguments
1936 save_or_restore_arguments(masm, stack_slots, total_in_args,
1937 arg_save_area, NULL, in_regs, in_sig_bt);
1939 __ bind(cont);
1940 #ifdef ASSERT
1941 if (StressCriticalJNINatives) {
1942 // Stress register saving
1943 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1944 save_or_restore_arguments(masm, stack_slots, total_in_args,
1945 arg_save_area, map, in_regs, in_sig_bt);
1946 // Destroy argument registers
1947 for (int i = 0; i < total_in_args; i++) {
1948 if (in_regs[i].first()->is_Register()) {
1949 const Register reg = in_regs[i].first()->as_Register();
1950 if (reg->is_global()) {
1951 __ mov(G0, reg);
1952 }
1953 } else if (in_regs[i].first()->is_FloatRegister()) {
1954 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1955 }
1956 }
1958 save_or_restore_arguments(masm, stack_slots, total_in_args,
1959 arg_save_area, NULL, in_regs, in_sig_bt);
1960 }
1961 #endif
1962 }
1964 // Unpack an array argument into a pointer to the body and the length
1965 // if the array is non-null, otherwise pass 0 for both.
1966 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1967 // Pass the length, ptr pair
1968 Label is_null, done;
1969 if (reg.first()->is_stack()) {
1970 VMRegPair tmp = reg64_to_VMRegPair(L2);
1971 // Load the arg up from the stack
1972 move_ptr(masm, reg, tmp);
1973 reg = tmp;
1974 }
1975 __ cmp(reg.first()->as_Register(), G0);
1976 __ brx(Assembler::equal, false, Assembler::pt, is_null);
1977 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1978 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1979 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1980 move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1981 __ ba_short(done);
1982 __ bind(is_null);
1983 // Pass zeros
1984 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1985 move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1986 __ bind(done);
1987 }
1989 static void verify_oop_args(MacroAssembler* masm,
1990 int total_args_passed,
1991 const BasicType* sig_bt,
1992 const VMRegPair* regs) {
1993 Register temp_reg = G5_method; // not part of any compiled calling seq
1994 if (VerifyOops) {
1995 for (int i = 0; i < total_args_passed; i++) {
1996 if (sig_bt[i] == T_OBJECT ||
1997 sig_bt[i] == T_ARRAY) {
1998 VMReg r = regs[i].first();
1999 assert(r->is_valid(), "bad oop arg");
2000 if (r->is_stack()) {
2001 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
2002 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
2003 __ ld_ptr(SP, ld_off, temp_reg);
2004 __ verify_oop(temp_reg);
2005 } else {
2006 __ verify_oop(r->as_Register());
2007 }
2008 }
2009 }
2010 }
2011 }
2013 static void gen_special_dispatch(MacroAssembler* masm,
2014 int total_args_passed,
2015 int comp_args_on_stack,
2016 vmIntrinsics::ID special_dispatch,
2017 const BasicType* sig_bt,
2018 const VMRegPair* regs) {
2019 verify_oop_args(masm, total_args_passed, sig_bt, regs);
2021 // Now write the args into the outgoing interpreter space
2022 bool has_receiver = false;
2023 Register receiver_reg = noreg;
2024 int member_arg_pos = -1;
2025 Register member_reg = noreg;
2026 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
2027 if (ref_kind != 0) {
2028 member_arg_pos = total_args_passed - 1; // trailing MemberName argument
2029 member_reg = G5_method; // known to be free at this point
2030 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
2031 } else if (special_dispatch == vmIntrinsics::_invokeBasic) {
2032 has_receiver = true;
2033 } else {
2034 fatal(err_msg("special_dispatch=%d", special_dispatch));
2035 }
2037 if (member_reg != noreg) {
2038 // Load the member_arg into register, if necessary.
2039 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
2040 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
2041 VMReg r = regs[member_arg_pos].first();
2042 assert(r->is_valid(), "bad member arg");
2043 if (r->is_stack()) {
2044 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
2045 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
2046 __ ld_ptr(SP, ld_off, member_reg);
2047 } else {
2048 // no data motion is needed
2049 member_reg = r->as_Register();
2050 }
2051 }
2053 if (has_receiver) {
2054 // Make sure the receiver is loaded into a register.
2055 assert(total_args_passed > 0, "oob");
2056 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
2057 VMReg r = regs[0].first();
2058 assert(r->is_valid(), "bad receiver arg");
2059 if (r->is_stack()) {
2060 // Porting note: This assumes that compiled calling conventions always
2061 // pass the receiver oop in a register. If this is not true on some
2062 // platform, pick a temp and load the receiver from stack.
2063 assert(false, "receiver always in a register");
2064 receiver_reg = G3_scratch; // known to be free at this point
2065 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
2066 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
2067 __ ld_ptr(SP, ld_off, receiver_reg);
2068 } else {
2069 // no data motion is needed
2070 receiver_reg = r->as_Register();
2071 }
2072 }
2074 // Figure out which address we are really jumping to:
2075 MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
2076 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
2077 }
2079 // ---------------------------------------------------------------------------
2080 // Generate a native wrapper for a given method. The method takes arguments
2081 // in the Java compiled code convention, marshals them to the native
2082 // convention (handlizes oops, etc), transitions to native, makes the call,
2083 // returns to java state (possibly blocking), unhandlizes any result and
2084 // returns.
2085 //
2086 // Critical native functions are a shorthand for the use of
2087 // GetPrimtiveArrayCritical and disallow the use of any other JNI
2088 // functions. The wrapper is expected to unpack the arguments before
2089 // passing them to the callee and perform checks before and after the
2090 // native call to ensure that they GC_locker
2091 // lock_critical/unlock_critical semantics are followed. Some other
2092 // parts of JNI setup are skipped like the tear down of the JNI handle
2093 // block and the check for pending exceptions it's impossible for them
2094 // to be thrown.
2095 //
2096 // They are roughly structured like this:
2097 // if (GC_locker::needs_gc())
2098 // SharedRuntime::block_for_jni_critical();
2099 // tranistion to thread_in_native
2100 // unpack arrray arguments and call native entry point
2101 // check for safepoint in progress
2102 // check if any thread suspend flags are set
2103 // call into JVM and possible unlock the JNI critical
2104 // if a GC was suppressed while in the critical native.
2105 // transition back to thread_in_Java
2106 // return to caller
2107 //
2108 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
2109 methodHandle method,
2110 int compile_id,
2111 int total_in_args,
2112 int comp_args_on_stack, // in VMRegStackSlots
2113 BasicType* in_sig_bt,
2114 VMRegPair* in_regs,
2115 BasicType ret_type) {
2116 if (method->is_method_handle_intrinsic()) {
2117 vmIntrinsics::ID iid = method->intrinsic_id();
2118 intptr_t start = (intptr_t)__ pc();
2119 int vep_offset = ((intptr_t)__ pc()) - start;
2120 gen_special_dispatch(masm,
2121 total_in_args,
2122 comp_args_on_stack,
2123 method->intrinsic_id(),
2124 in_sig_bt,
2125 in_regs);
2126 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
2127 __ flush();
2128 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
2129 return nmethod::new_native_nmethod(method,
2130 compile_id,
2131 masm->code(),
2132 vep_offset,
2133 frame_complete,
2134 stack_slots / VMRegImpl::slots_per_word,
2135 in_ByteSize(-1),
2136 in_ByteSize(-1),
2137 (OopMapSet*)NULL);
2138 }
2139 bool is_critical_native = true;
2140 address native_func = method->critical_native_function();
2141 if (native_func == NULL) {
2142 native_func = method->native_function();
2143 is_critical_native = false;
2144 }
2145 assert(native_func != NULL, "must have function");
2147 // Native nmethod wrappers never take possesion of the oop arguments.
2148 // So the caller will gc the arguments. The only thing we need an
2149 // oopMap for is if the call is static
2150 //
2151 // An OopMap for lock (and class if static), and one for the VM call itself
2152 OopMapSet *oop_maps = new OopMapSet();
2153 intptr_t start = (intptr_t)__ pc();
2155 // First thing make an ic check to see if we should even be here
2156 {
2157 Label L;
2158 const Register temp_reg = G3_scratch;
2159 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2160 __ verify_oop(O0);
2161 __ load_klass(O0, temp_reg);
2162 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
2164 __ jump_to(ic_miss, temp_reg);
2165 __ delayed()->nop();
2166 __ align(CodeEntryAlignment);
2167 __ bind(L);
2168 }
2170 int vep_offset = ((intptr_t)__ pc()) - start;
2172 #ifdef COMPILER1
2173 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
2174 // Object.hashCode can pull the hashCode from the header word
2175 // instead of doing a full VM transition once it's been computed.
2176 // Since hashCode is usually polymorphic at call sites we can't do
2177 // this optimization at the call site without a lot of work.
2178 Label slowCase;
2179 Register receiver = O0;
2180 Register result = O0;
2181 Register header = G3_scratch;
2182 Register hash = G3_scratch; // overwrite header value with hash value
2183 Register mask = G1; // to get hash field from header
2185 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
2186 // We depend on hash_mask being at most 32 bits and avoid the use of
2187 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
2188 // vm: see markOop.hpp.
2189 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
2190 __ sethi(markOopDesc::hash_mask, mask);
2191 __ btst(markOopDesc::unlocked_value, header);
2192 __ br(Assembler::zero, false, Assembler::pn, slowCase);
2193 if (UseBiasedLocking) {
2194 // Check if biased and fall through to runtime if so
2195 __ delayed()->nop();
2196 __ btst(markOopDesc::biased_lock_bit_in_place, header);
2197 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
2198 }
2199 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
2201 // Check for a valid (non-zero) hash code and get its value.
2202 #ifdef _LP64
2203 __ srlx(header, markOopDesc::hash_shift, hash);
2204 #else
2205 __ srl(header, markOopDesc::hash_shift, hash);
2206 #endif
2207 __ andcc(hash, mask, hash);
2208 __ br(Assembler::equal, false, Assembler::pn, slowCase);
2209 __ delayed()->nop();
2211 // leaf return.
2212 __ retl();
2213 __ delayed()->mov(hash, result);
2214 __ bind(slowCase);
2215 }
2216 #endif // COMPILER1
2219 // We have received a description of where all the java arg are located
2220 // on entry to the wrapper. We need to convert these args to where
2221 // the jni function will expect them. To figure out where they go
2222 // we convert the java signature to a C signature by inserting
2223 // the hidden arguments as arg[0] and possibly arg[1] (static method)
2225 int total_c_args = total_in_args;
2226 int total_save_slots = 6 * VMRegImpl::slots_per_word;
2227 if (!is_critical_native) {
2228 total_c_args += 1;
2229 if (method->is_static()) {
2230 total_c_args++;
2231 }
2232 } else {
2233 for (int i = 0; i < total_in_args; i++) {
2234 if (in_sig_bt[i] == T_ARRAY) {
2235 // These have to be saved and restored across the safepoint
2236 total_c_args++;
2237 }
2238 }
2239 }
2241 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2242 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2243 BasicType* in_elem_bt = NULL;
2245 int argc = 0;
2246 if (!is_critical_native) {
2247 out_sig_bt[argc++] = T_ADDRESS;
2248 if (method->is_static()) {
2249 out_sig_bt[argc++] = T_OBJECT;
2250 }
2252 for (int i = 0; i < total_in_args ; i++ ) {
2253 out_sig_bt[argc++] = in_sig_bt[i];
2254 }
2255 } else {
2256 Thread* THREAD = Thread::current();
2257 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2258 SignatureStream ss(method->signature());
2259 for (int i = 0; i < total_in_args ; i++ ) {
2260 if (in_sig_bt[i] == T_ARRAY) {
2261 // Arrays are passed as int, elem* pair
2262 out_sig_bt[argc++] = T_INT;
2263 out_sig_bt[argc++] = T_ADDRESS;
2264 Symbol* atype = ss.as_symbol(CHECK_NULL);
2265 const char* at = atype->as_C_string();
2266 if (strlen(at) == 2) {
2267 assert(at[0] == '[', "must be");
2268 switch (at[1]) {
2269 case 'B': in_elem_bt[i] = T_BYTE; break;
2270 case 'C': in_elem_bt[i] = T_CHAR; break;
2271 case 'D': in_elem_bt[i] = T_DOUBLE; break;
2272 case 'F': in_elem_bt[i] = T_FLOAT; break;
2273 case 'I': in_elem_bt[i] = T_INT; break;
2274 case 'J': in_elem_bt[i] = T_LONG; break;
2275 case 'S': in_elem_bt[i] = T_SHORT; break;
2276 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
2277 default: ShouldNotReachHere();
2278 }
2279 }
2280 } else {
2281 out_sig_bt[argc++] = in_sig_bt[i];
2282 in_elem_bt[i] = T_VOID;
2283 }
2284 if (in_sig_bt[i] != T_VOID) {
2285 assert(in_sig_bt[i] == ss.type(), "must match");
2286 ss.next();
2287 }
2288 }
2289 }
2291 // Now figure out where the args must be stored and how much stack space
2292 // they require (neglecting out_preserve_stack_slots but space for storing
2293 // the 1st six register arguments). It's weird see int_stk_helper.
2294 //
2295 int out_arg_slots;
2296 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2298 if (is_critical_native) {
2299 // Critical natives may have to call out so they need a save area
2300 // for register arguments.
2301 int double_slots = 0;
2302 int single_slots = 0;
2303 for ( int i = 0; i < total_in_args; i++) {
2304 if (in_regs[i].first()->is_Register()) {
2305 const Register reg = in_regs[i].first()->as_Register();
2306 switch (in_sig_bt[i]) {
2307 case T_ARRAY:
2308 case T_BOOLEAN:
2309 case T_BYTE:
2310 case T_SHORT:
2311 case T_CHAR:
2312 case T_INT: assert(reg->is_in(), "don't need to save these"); break;
2313 case T_LONG: if (reg->is_global()) double_slots++; break;
2314 default: ShouldNotReachHere();
2315 }
2316 } else if (in_regs[i].first()->is_FloatRegister()) {
2317 switch (in_sig_bt[i]) {
2318 case T_FLOAT: single_slots++; break;
2319 case T_DOUBLE: double_slots++; break;
2320 default: ShouldNotReachHere();
2321 }
2322 }
2323 }
2324 total_save_slots = double_slots * 2 + single_slots;
2325 }
2327 // Compute framesize for the wrapper. We need to handlize all oops in
2328 // registers. We must create space for them here that is disjoint from
2329 // the windowed save area because we have no control over when we might
2330 // flush the window again and overwrite values that gc has since modified.
2331 // (The live window race)
2332 //
2333 // We always just allocate 6 word for storing down these object. This allow
2334 // us to simply record the base and use the Ireg number to decide which
2335 // slot to use. (Note that the reg number is the inbound number not the
2336 // outbound number).
2337 // We must shuffle args to match the native convention, and include var-args space.
2339 // Calculate the total number of stack slots we will need.
2341 // First count the abi requirement plus all of the outgoing args
2342 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2344 // Now the space for the inbound oop handle area
2346 int oop_handle_offset = round_to(stack_slots, 2);
2347 stack_slots += total_save_slots;
2349 // Now any space we need for handlizing a klass if static method
2351 int klass_slot_offset = 0;
2352 int klass_offset = -1;
2353 int lock_slot_offset = 0;
2354 bool is_static = false;
2356 if (method->is_static()) {
2357 klass_slot_offset = stack_slots;
2358 stack_slots += VMRegImpl::slots_per_word;
2359 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2360 is_static = true;
2361 }
2363 // Plus a lock if needed
2365 if (method->is_synchronized()) {
2366 lock_slot_offset = stack_slots;
2367 stack_slots += VMRegImpl::slots_per_word;
2368 }
2370 // Now a place to save return value or as a temporary for any gpr -> fpr moves
2371 stack_slots += 2;
2373 // Ok The space we have allocated will look like:
2374 //
2375 //
2376 // FP-> | |
2377 // |---------------------|
2378 // | 2 slots for moves |
2379 // |---------------------|
2380 // | lock box (if sync) |
2381 // |---------------------| <- lock_slot_offset
2382 // | klass (if static) |
2383 // |---------------------| <- klass_slot_offset
2384 // | oopHandle area |
2385 // |---------------------| <- oop_handle_offset
2386 // | outbound memory |
2387 // | based arguments |
2388 // | |
2389 // |---------------------|
2390 // | vararg area |
2391 // |---------------------|
2392 // | |
2393 // SP-> | out_preserved_slots |
2394 //
2395 //
2398 // Now compute actual number of stack words we need rounding to make
2399 // stack properly aligned.
2400 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2402 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2404 // Generate stack overflow check before creating frame
2405 __ generate_stack_overflow_check(stack_size);
2407 // Generate a new frame for the wrapper.
2408 __ save(SP, -stack_size, SP);
2410 int frame_complete = ((intptr_t)__ pc()) - start;
2412 __ verify_thread();
2414 if (is_critical_native) {
2415 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
2416 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2417 }
2419 //
2420 // We immediately shuffle the arguments so that any vm call we have to
2421 // make from here on out (sync slow path, jvmti, etc.) we will have
2422 // captured the oops from our caller and have a valid oopMap for
2423 // them.
2425 // -----------------
2426 // The Grand Shuffle
2427 //
2428 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2429 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2430 // the class mirror instead of a receiver. This pretty much guarantees that
2431 // register layout will not match. We ignore these extra arguments during
2432 // the shuffle. The shuffle is described by the two calling convention
2433 // vectors we have in our possession. We simply walk the java vector to
2434 // get the source locations and the c vector to get the destinations.
2435 // Because we have a new window and the argument registers are completely
2436 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2437 // here.
2439 // This is a trick. We double the stack slots so we can claim
2440 // the oops in the caller's frame. Since we are sure to have
2441 // more args than the caller doubling is enough to make
2442 // sure we can capture all the incoming oop args from the
2443 // caller.
2444 //
2445 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2446 // Record sp-based slot for receiver on stack for non-static methods
2447 int receiver_offset = -1;
2449 // We move the arguments backward because the floating point registers
2450 // destination will always be to a register with a greater or equal register
2451 // number or the stack.
2453 #ifdef ASSERT
2454 bool reg_destroyed[RegisterImpl::number_of_registers];
2455 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2456 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2457 reg_destroyed[r] = false;
2458 }
2459 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2460 freg_destroyed[f] = false;
2461 }
2463 #endif /* ASSERT */
2465 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2467 #ifdef ASSERT
2468 if (in_regs[i].first()->is_Register()) {
2469 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2470 } else if (in_regs[i].first()->is_FloatRegister()) {
2471 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2472 }
2473 if (out_regs[c_arg].first()->is_Register()) {
2474 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2475 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2476 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2477 }
2478 #endif /* ASSERT */
2480 switch (in_sig_bt[i]) {
2481 case T_ARRAY:
2482 if (is_critical_native) {
2483 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2484 c_arg--;
2485 break;
2486 }
2487 case T_OBJECT:
2488 assert(!is_critical_native, "no oop arguments");
2489 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2490 ((i == 0) && (!is_static)),
2491 &receiver_offset);
2492 break;
2493 case T_VOID:
2494 break;
2496 case T_FLOAT:
2497 float_move(masm, in_regs[i], out_regs[c_arg]);
2498 break;
2500 case T_DOUBLE:
2501 assert( i + 1 < total_in_args &&
2502 in_sig_bt[i + 1] == T_VOID &&
2503 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2504 double_move(masm, in_regs[i], out_regs[c_arg]);
2505 break;
2507 case T_LONG :
2508 long_move(masm, in_regs[i], out_regs[c_arg]);
2509 break;
2511 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2513 default:
2514 move32_64(masm, in_regs[i], out_regs[c_arg]);
2515 }
2516 }
2518 // Pre-load a static method's oop into O1. Used both by locking code and
2519 // the normal JNI call code.
2520 if (method->is_static() && !is_critical_native) {
2521 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2523 // Now handlize the static class mirror in O1. It's known not-null.
2524 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2525 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2526 __ add(SP, klass_offset + STACK_BIAS, O1);
2527 }
2530 const Register L6_handle = L6;
2532 if (method->is_synchronized()) {
2533 assert(!is_critical_native, "unhandled");
2534 __ mov(O1, L6_handle);
2535 }
2537 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2538 // except O6/O7. So if we must call out we must push a new frame. We immediately
2539 // push a new frame and flush the windows.
2540 #ifdef _LP64
2541 intptr_t thepc = (intptr_t) __ pc();
2542 {
2543 address here = __ pc();
2544 // Call the next instruction
2545 __ call(here + 8, relocInfo::none);
2546 __ delayed()->nop();
2547 }
2548 #else
2549 intptr_t thepc = __ load_pc_address(O7, 0);
2550 #endif /* _LP64 */
2552 // We use the same pc/oopMap repeatedly when we call out
2553 oop_maps->add_gc_map(thepc - start, map);
2555 // O7 now has the pc loaded that we will use when we finally call to native.
2557 // Save thread in L7; it crosses a bunch of VM calls below
2558 // Don't use save_thread because it smashes G2 and we merely
2559 // want to save a copy
2560 __ mov(G2_thread, L7_thread_cache);
2563 // If we create an inner frame once is plenty
2564 // when we create it we must also save G2_thread
2565 bool inner_frame_created = false;
2567 // dtrace method entry support
2568 {
2569 SkipIfEqual skip_if(
2570 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2571 // create inner frame
2572 __ save_frame(0);
2573 __ mov(G2_thread, L7_thread_cache);
2574 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2575 __ call_VM_leaf(L7_thread_cache,
2576 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2577 G2_thread, O1);
2578 __ restore();
2579 }
2581 // RedefineClasses() tracing support for obsolete method entry
2582 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2583 // create inner frame
2584 __ save_frame(0);
2585 __ mov(G2_thread, L7_thread_cache);
2586 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2587 __ call_VM_leaf(L7_thread_cache,
2588 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2589 G2_thread, O1);
2590 __ restore();
2591 }
2593 // We are in the jni frame unless saved_frame is true in which case
2594 // we are in one frame deeper (the "inner" frame). If we are in the
2595 // "inner" frames the args are in the Iregs and if the jni frame then
2596 // they are in the Oregs.
2597 // If we ever need to go to the VM (for locking, jvmti) then
2598 // we will always be in the "inner" frame.
2600 // Lock a synchronized method
2601 int lock_offset = -1; // Set if locked
2602 if (method->is_synchronized()) {
2603 Register Roop = O1;
2604 const Register L3_box = L3;
2606 create_inner_frame(masm, &inner_frame_created);
2608 __ ld_ptr(I1, 0, O1);
2609 Label done;
2611 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2612 __ add(FP, lock_offset+STACK_BIAS, L3_box);
2613 #ifdef ASSERT
2614 if (UseBiasedLocking) {
2615 // making the box point to itself will make it clear it went unused
2616 // but also be obviously invalid
2617 __ st_ptr(L3_box, L3_box, 0);
2618 }
2619 #endif // ASSERT
2620 //
2621 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2622 //
2623 __ compiler_lock_object(Roop, L1, L3_box, L2);
2624 __ br(Assembler::equal, false, Assembler::pt, done);
2625 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2628 // None of the above fast optimizations worked so we have to get into the
2629 // slow case of monitor enter. Inline a special case of call_VM that
2630 // disallows any pending_exception.
2631 __ mov(Roop, O0); // Need oop in O0
2632 __ mov(L3_box, O1);
2634 // Record last_Java_sp, in case the VM code releases the JVM lock.
2636 __ set_last_Java_frame(FP, I7);
2638 // do the call
2639 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2640 __ delayed()->mov(L7_thread_cache, O2);
2642 __ restore_thread(L7_thread_cache); // restore G2_thread
2643 __ reset_last_Java_frame();
2645 #ifdef ASSERT
2646 { Label L;
2647 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2648 __ br_null_short(O0, Assembler::pt, L);
2649 __ stop("no pending exception allowed on exit from IR::monitorenter");
2650 __ bind(L);
2651 }
2652 #endif
2653 __ bind(done);
2654 }
2657 // Finally just about ready to make the JNI call
2659 __ flush_windows();
2660 if (inner_frame_created) {
2661 __ restore();
2662 } else {
2663 // Store only what we need from this frame
2664 // QQQ I think that non-v9 (like we care) we don't need these saves
2665 // either as the flush traps and the current window goes too.
2666 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2667 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2668 }
2670 // get JNIEnv* which is first argument to native
2671 if (!is_critical_native) {
2672 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2673 }
2675 // Use that pc we placed in O7 a while back as the current frame anchor
2676 __ set_last_Java_frame(SP, O7);
2678 // We flushed the windows ages ago now mark them as flushed before transitioning.
2679 __ set(JavaFrameAnchor::flushed, G3_scratch);
2680 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2682 // Transition from _thread_in_Java to _thread_in_native.
2683 __ set(_thread_in_native, G3_scratch);
2685 #ifdef _LP64
2686 AddressLiteral dest(native_func);
2687 __ relocate(relocInfo::runtime_call_type);
2688 __ jumpl_to(dest, O7, O7);
2689 #else
2690 __ call(native_func, relocInfo::runtime_call_type);
2691 #endif
2692 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2694 __ restore_thread(L7_thread_cache); // restore G2_thread
2696 // Unpack native results. For int-types, we do any needed sign-extension
2697 // and move things into I0. The return value there will survive any VM
2698 // calls for blocking or unlocking. An FP or OOP result (handle) is done
2699 // specially in the slow-path code.
2700 switch (ret_type) {
2701 case T_VOID: break; // Nothing to do!
2702 case T_FLOAT: break; // Got it where we want it (unless slow-path)
2703 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
2704 // In 64 bits build result is in O0, in O0, O1 in 32bit build
2705 case T_LONG:
2706 #ifndef _LP64
2707 __ mov(O1, I1);
2708 #endif
2709 // Fall thru
2710 case T_OBJECT: // Really a handle
2711 case T_ARRAY:
2712 case T_INT:
2713 __ mov(O0, I0);
2714 break;
2715 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2716 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2717 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2718 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2719 break; // Cannot de-handlize until after reclaiming jvm_lock
2720 default:
2721 ShouldNotReachHere();
2722 }
2724 Label after_transition;
2725 // must we block?
2727 // Block, if necessary, before resuming in _thread_in_Java state.
2728 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2729 { Label no_block;
2730 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2732 // Switch thread to "native transition" state before reading the synchronization state.
2733 // This additional state is necessary because reading and testing the synchronization
2734 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2735 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2736 // VM thread changes sync state to synchronizing and suspends threads for GC.
2737 // Thread A is resumed to finish this native method, but doesn't block here since it
2738 // didn't see any synchronization is progress, and escapes.
2739 __ set(_thread_in_native_trans, G3_scratch);
2740 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2741 if(os::is_MP()) {
2742 if (UseMembar) {
2743 // Force this write out before the read below
2744 __ membar(Assembler::StoreLoad);
2745 } else {
2746 // Write serialization page so VM thread can do a pseudo remote membar.
2747 // We use the current thread pointer to calculate a thread specific
2748 // offset to write to within the page. This minimizes bus traffic
2749 // due to cache line collision.
2750 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2751 }
2752 }
2753 __ load_contents(sync_state, G3_scratch);
2754 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2756 Label L;
2757 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2758 __ br(Assembler::notEqual, false, Assembler::pn, L);
2759 __ delayed()->ld(suspend_state, G3_scratch);
2760 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2761 __ bind(L);
2763 // Block. Save any potential method result value before the operation and
2764 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2765 // lets us share the oopMap we used when we went native rather the create
2766 // a distinct one for this pc
2767 //
2768 save_native_result(masm, ret_type, stack_slots);
2769 if (!is_critical_native) {
2770 __ call_VM_leaf(L7_thread_cache,
2771 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2772 G2_thread);
2773 } else {
2774 __ call_VM_leaf(L7_thread_cache,
2775 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2776 G2_thread);
2777 }
2779 // Restore any method result value
2780 restore_native_result(masm, ret_type, stack_slots);
2782 if (is_critical_native) {
2783 // The call above performed the transition to thread_in_Java so
2784 // skip the transition logic below.
2785 __ ba(after_transition);
2786 __ delayed()->nop();
2787 }
2789 __ bind(no_block);
2790 }
2792 // thread state is thread_in_native_trans. Any safepoint blocking has already
2793 // happened so we can now change state to _thread_in_Java.
2794 __ set(_thread_in_Java, G3_scratch);
2795 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2796 __ bind(after_transition);
2798 Label no_reguard;
2799 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2800 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2802 save_native_result(masm, ret_type, stack_slots);
2803 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2804 __ delayed()->nop();
2806 __ restore_thread(L7_thread_cache); // restore G2_thread
2807 restore_native_result(masm, ret_type, stack_slots);
2809 __ bind(no_reguard);
2811 // Handle possible exception (will unlock if necessary)
2813 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2815 // Unlock
2816 if (method->is_synchronized()) {
2817 Label done;
2818 Register I2_ex_oop = I2;
2819 const Register L3_box = L3;
2820 // Get locked oop from the handle we passed to jni
2821 __ ld_ptr(L6_handle, 0, L4);
2822 __ add(SP, lock_offset+STACK_BIAS, L3_box);
2823 // Must save pending exception around the slow-path VM call. Since it's a
2824 // leaf call, the pending exception (if any) can be kept in a register.
2825 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2826 // Now unlock
2827 // (Roop, Rmark, Rbox, Rscratch)
2828 __ compiler_unlock_object(L4, L1, L3_box, L2);
2829 __ br(Assembler::equal, false, Assembler::pt, done);
2830 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2832 // save and restore any potential method result value around the unlocking
2833 // operation. Will save in I0 (or stack for FP returns).
2834 save_native_result(masm, ret_type, stack_slots);
2836 // Must clear pending-exception before re-entering the VM. Since this is
2837 // a leaf call, pending-exception-oop can be safely kept in a register.
2838 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2840 // slow case of monitor enter. Inline a special case of call_VM that
2841 // disallows any pending_exception.
2842 __ mov(L3_box, O1);
2844 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2845 __ delayed()->mov(L4, O0); // Need oop in O0
2847 __ restore_thread(L7_thread_cache); // restore G2_thread
2849 #ifdef ASSERT
2850 { Label L;
2851 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2852 __ br_null_short(O0, Assembler::pt, L);
2853 __ stop("no pending exception allowed on exit from IR::monitorexit");
2854 __ bind(L);
2855 }
2856 #endif
2857 restore_native_result(masm, ret_type, stack_slots);
2858 // check_forward_pending_exception jump to forward_exception if any pending
2859 // exception is set. The forward_exception routine expects to see the
2860 // exception in pending_exception and not in a register. Kind of clumsy,
2861 // since all folks who branch to forward_exception must have tested
2862 // pending_exception first and hence have it in a register already.
2863 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2864 __ bind(done);
2865 }
2867 // Tell dtrace about this method exit
2868 {
2869 SkipIfEqual skip_if(
2870 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2871 save_native_result(masm, ret_type, stack_slots);
2872 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2873 __ call_VM_leaf(L7_thread_cache,
2874 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2875 G2_thread, O1);
2876 restore_native_result(masm, ret_type, stack_slots);
2877 }
2879 // Clear "last Java frame" SP and PC.
2880 __ verify_thread(); // G2_thread must be correct
2881 __ reset_last_Java_frame();
2883 // Unpack oop result
2884 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2885 Label L;
2886 __ addcc(G0, I0, G0);
2887 __ brx(Assembler::notZero, true, Assembler::pt, L);
2888 __ delayed()->ld_ptr(I0, 0, I0);
2889 __ mov(G0, I0);
2890 __ bind(L);
2891 __ verify_oop(I0);
2892 }
2894 if (!is_critical_native) {
2895 // reset handle block
2896 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2897 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2899 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2900 check_forward_pending_exception(masm, G3_scratch);
2901 }
2904 // Return
2906 #ifndef _LP64
2907 if (ret_type == T_LONG) {
2909 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2910 __ sllx(I0, 32, G1); // Shift bits into high G1
2911 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
2912 __ or3 (I1, G1, G1); // OR 64 bits into G1
2913 }
2914 #endif
2916 __ ret();
2917 __ delayed()->restore();
2919 __ flush();
2921 nmethod *nm = nmethod::new_native_nmethod(method,
2922 compile_id,
2923 masm->code(),
2924 vep_offset,
2925 frame_complete,
2926 stack_slots / VMRegImpl::slots_per_word,
2927 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2928 in_ByteSize(lock_offset),
2929 oop_maps);
2931 if (is_critical_native) {
2932 nm->set_lazy_critical_native(true);
2933 }
2934 return nm;
2936 }
2938 #ifdef HAVE_DTRACE_H
2939 // ---------------------------------------------------------------------------
2940 // Generate a dtrace nmethod for a given signature. The method takes arguments
2941 // in the Java compiled code convention, marshals them to the native
2942 // abi and then leaves nops at the position you would expect to call a native
2943 // function. When the probe is enabled the nops are replaced with a trap
2944 // instruction that dtrace inserts and the trace will cause a notification
2945 // to dtrace.
2946 //
2947 // The probes are only able to take primitive types and java/lang/String as
2948 // arguments. No other java types are allowed. Strings are converted to utf8
2949 // strings so that from dtrace point of view java strings are converted to C
2950 // strings. There is an arbitrary fixed limit on the total space that a method
2951 // can use for converting the strings. (256 chars per string in the signature).
2952 // So any java string larger then this is truncated.
2954 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2955 static bool offsets_initialized = false;
2957 nmethod *SharedRuntime::generate_dtrace_nmethod(
2958 MacroAssembler *masm, methodHandle method) {
2961 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2962 // be single threaded in this method.
2963 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2965 // Fill in the signature array, for the calling-convention call.
2966 int total_args_passed = method->size_of_parameters();
2968 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2969 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2971 // The signature we are going to use for the trap that dtrace will see
2972 // java/lang/String is converted. We drop "this" and any other object
2973 // is converted to NULL. (A one-slot java/lang/Long object reference
2974 // is converted to a two-slot long, which is why we double the allocation).
2975 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2976 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2978 int i=0;
2979 int total_strings = 0;
2980 int first_arg_to_pass = 0;
2981 int total_c_args = 0;
2983 // Skip the receiver as dtrace doesn't want to see it
2984 if( !method->is_static() ) {
2985 in_sig_bt[i++] = T_OBJECT;
2986 first_arg_to_pass = 1;
2987 }
2989 SignatureStream ss(method->signature());
2990 for ( ; !ss.at_return_type(); ss.next()) {
2991 BasicType bt = ss.type();
2992 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2993 out_sig_bt[total_c_args++] = bt;
2994 if( bt == T_OBJECT) {
2995 Symbol* s = ss.as_symbol_or_null();
2996 if (s == vmSymbols::java_lang_String()) {
2997 total_strings++;
2998 out_sig_bt[total_c_args-1] = T_ADDRESS;
2999 } else if (s == vmSymbols::java_lang_Boolean() ||
3000 s == vmSymbols::java_lang_Byte()) {
3001 out_sig_bt[total_c_args-1] = T_BYTE;
3002 } else if (s == vmSymbols::java_lang_Character() ||
3003 s == vmSymbols::java_lang_Short()) {
3004 out_sig_bt[total_c_args-1] = T_SHORT;
3005 } else if (s == vmSymbols::java_lang_Integer() ||
3006 s == vmSymbols::java_lang_Float()) {
3007 out_sig_bt[total_c_args-1] = T_INT;
3008 } else if (s == vmSymbols::java_lang_Long() ||
3009 s == vmSymbols::java_lang_Double()) {
3010 out_sig_bt[total_c_args-1] = T_LONG;
3011 out_sig_bt[total_c_args++] = T_VOID;
3012 }
3013 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
3014 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3015 // We convert double to long
3016 out_sig_bt[total_c_args-1] = T_LONG;
3017 out_sig_bt[total_c_args++] = T_VOID;
3018 } else if ( bt == T_FLOAT) {
3019 // We convert float to int
3020 out_sig_bt[total_c_args-1] = T_INT;
3021 }
3022 }
3024 assert(i==total_args_passed, "validly parsed signature");
3026 // Now get the compiled-Java layout as input arguments
3027 int comp_args_on_stack;
3028 comp_args_on_stack = SharedRuntime::java_calling_convention(
3029 in_sig_bt, in_regs, total_args_passed, false);
3031 // We have received a description of where all the java arg are located
3032 // on entry to the wrapper. We need to convert these args to where
3033 // the a native (non-jni) function would expect them. To figure out
3034 // where they go we convert the java signature to a C signature and remove
3035 // T_VOID for any long/double we might have received.
3038 // Now figure out where the args must be stored and how much stack space
3039 // they require (neglecting out_preserve_stack_slots but space for storing
3040 // the 1st six register arguments). It's weird see int_stk_helper.
3041 //
3042 int out_arg_slots;
3043 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
3045 // Calculate the total number of stack slots we will need.
3047 // First count the abi requirement plus all of the outgoing args
3048 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
3050 // Plus a temp for possible converion of float/double/long register args
3052 int conversion_temp = stack_slots;
3053 stack_slots += 2;
3056 // Now space for the string(s) we must convert
3058 int string_locs = stack_slots;
3059 stack_slots += total_strings *
3060 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
3062 // Ok The space we have allocated will look like:
3063 //
3064 //
3065 // FP-> | |
3066 // |---------------------|
3067 // | string[n] |
3068 // |---------------------| <- string_locs[n]
3069 // | string[n-1] |
3070 // |---------------------| <- string_locs[n-1]
3071 // | ... |
3072 // | ... |
3073 // |---------------------| <- string_locs[1]
3074 // | string[0] |
3075 // |---------------------| <- string_locs[0]
3076 // | temp |
3077 // |---------------------| <- conversion_temp
3078 // | outbound memory |
3079 // | based arguments |
3080 // | |
3081 // |---------------------|
3082 // | |
3083 // SP-> | out_preserved_slots |
3084 //
3085 //
3087 // Now compute actual number of stack words we need rounding to make
3088 // stack properly aligned.
3089 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
3091 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
3093 intptr_t start = (intptr_t)__ pc();
3095 // First thing make an ic check to see if we should even be here
3097 {
3098 Label L;
3099 const Register temp_reg = G3_scratch;
3100 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
3101 __ verify_oop(O0);
3102 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
3103 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
3105 __ jump_to(ic_miss, temp_reg);
3106 __ delayed()->nop();
3107 __ align(CodeEntryAlignment);
3108 __ bind(L);
3109 }
3111 int vep_offset = ((intptr_t)__ pc()) - start;
3114 // The instruction at the verified entry point must be 5 bytes or longer
3115 // because it can be patched on the fly by make_non_entrant. The stack bang
3116 // instruction fits that requirement.
3118 // Generate stack overflow check before creating frame
3119 __ generate_stack_overflow_check(stack_size);
3121 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
3122 "valid size for make_non_entrant");
3124 // Generate a new frame for the wrapper.
3125 __ save(SP, -stack_size, SP);
3127 // Frame is now completed as far a size and linkage.
3129 int frame_complete = ((intptr_t)__ pc()) - start;
3131 #ifdef ASSERT
3132 bool reg_destroyed[RegisterImpl::number_of_registers];
3133 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
3134 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
3135 reg_destroyed[r] = false;
3136 }
3137 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
3138 freg_destroyed[f] = false;
3139 }
3141 #endif /* ASSERT */
3143 VMRegPair zero;
3144 const Register g0 = G0; // without this we get a compiler warning (why??)
3145 zero.set2(g0->as_VMReg());
3147 int c_arg, j_arg;
3149 Register conversion_off = noreg;
3151 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3152 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3154 VMRegPair src = in_regs[j_arg];
3155 VMRegPair dst = out_regs[c_arg];
3157 #ifdef ASSERT
3158 if (src.first()->is_Register()) {
3159 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
3160 } else if (src.first()->is_FloatRegister()) {
3161 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
3162 FloatRegisterImpl::S)], "ack!");
3163 }
3164 if (dst.first()->is_Register()) {
3165 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
3166 } else if (dst.first()->is_FloatRegister()) {
3167 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
3168 FloatRegisterImpl::S)] = true;
3169 }
3170 #endif /* ASSERT */
3172 switch (in_sig_bt[j_arg]) {
3173 case T_ARRAY:
3174 case T_OBJECT:
3175 {
3176 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
3177 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3178 // need to unbox a one-slot value
3179 Register in_reg = L0;
3180 Register tmp = L2;
3181 if ( src.first()->is_reg() ) {
3182 in_reg = src.first()->as_Register();
3183 } else {
3184 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
3185 "must be");
3186 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
3187 }
3188 // If the final destination is an acceptable register
3189 if ( dst.first()->is_reg() ) {
3190 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
3191 tmp = dst.first()->as_Register();
3192 }
3193 }
3195 Label skipUnbox;
3196 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
3197 __ mov(G0, tmp->successor());
3198 }
3199 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
3200 __ delayed()->mov(G0, tmp);
3202 BasicType bt = out_sig_bt[c_arg];
3203 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3204 switch (bt) {
3205 case T_BYTE:
3206 __ ldub(in_reg, box_offset, tmp); break;
3207 case T_SHORT:
3208 __ lduh(in_reg, box_offset, tmp); break;
3209 case T_INT:
3210 __ ld(in_reg, box_offset, tmp); break;
3211 case T_LONG:
3212 __ ld_long(in_reg, box_offset, tmp); break;
3213 default: ShouldNotReachHere();
3214 }
3216 __ bind(skipUnbox);
3217 // If tmp wasn't final destination copy to final destination
3218 if (tmp == L2) {
3219 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
3220 if (out_sig_bt[c_arg] == T_LONG) {
3221 long_move(masm, tmp_as_VM, dst);
3222 } else {
3223 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
3224 }
3225 }
3226 if (out_sig_bt[c_arg] == T_LONG) {
3227 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3228 ++c_arg; // move over the T_VOID to keep the loop indices in sync
3229 }
3230 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
3231 Register s =
3232 src.first()->is_reg() ? src.first()->as_Register() : L2;
3233 Register d =
3234 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
3236 // We store the oop now so that the conversion pass can reach
3237 // while in the inner frame. This will be the only store if
3238 // the oop is NULL.
3239 if (s != L2) {
3240 // src is register
3241 if (d != L2) {
3242 // dst is register
3243 __ mov(s, d);
3244 } else {
3245 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3246 STACK_BIAS), "must be");
3247 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
3248 }
3249 } else {
3250 // src not a register
3251 assert(Assembler::is_simm13(reg2offset(src.first()) +
3252 STACK_BIAS), "must be");
3253 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
3254 if (d == L2) {
3255 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3256 STACK_BIAS), "must be");
3257 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
3258 }
3259 }
3260 } else if (out_sig_bt[c_arg] != T_VOID) {
3261 // Convert the arg to NULL
3262 if (dst.first()->is_reg()) {
3263 __ mov(G0, dst.first()->as_Register());
3264 } else {
3265 assert(Assembler::is_simm13(reg2offset(dst.first()) +
3266 STACK_BIAS), "must be");
3267 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
3268 }
3269 }
3270 }
3271 break;
3272 case T_VOID:
3273 break;
3275 case T_FLOAT:
3276 if (src.first()->is_stack()) {
3277 // Stack to stack/reg is simple
3278 move32_64(masm, src, dst);
3279 } else {
3280 if (dst.first()->is_reg()) {
3281 // freg -> reg
3282 int off =
3283 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3284 Register d = dst.first()->as_Register();
3285 if (Assembler::is_simm13(off)) {
3286 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3287 SP, off);
3288 __ ld(SP, off, d);
3289 } else {
3290 if (conversion_off == noreg) {
3291 __ set(off, L6);
3292 conversion_off = L6;
3293 }
3294 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3295 SP, conversion_off);
3296 __ ld(SP, conversion_off , d);
3297 }
3298 } else {
3299 // freg -> mem
3300 int off = STACK_BIAS + reg2offset(dst.first());
3301 if (Assembler::is_simm13(off)) {
3302 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3303 SP, off);
3304 } else {
3305 if (conversion_off == noreg) {
3306 __ set(off, L6);
3307 conversion_off = L6;
3308 }
3309 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
3310 SP, conversion_off);
3311 }
3312 }
3313 }
3314 break;
3316 case T_DOUBLE:
3317 assert( j_arg + 1 < total_args_passed &&
3318 in_sig_bt[j_arg + 1] == T_VOID &&
3319 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
3320 if (src.first()->is_stack()) {
3321 // Stack to stack/reg is simple
3322 long_move(masm, src, dst);
3323 } else {
3324 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
3326 // Destination could be an odd reg on 32bit in which case
3327 // we can't load direct to the destination.
3329 if (!d->is_even() && wordSize == 4) {
3330 d = L2;
3331 }
3332 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3333 if (Assembler::is_simm13(off)) {
3334 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
3335 SP, off);
3336 __ ld_long(SP, off, d);
3337 } else {
3338 if (conversion_off == noreg) {
3339 __ set(off, L6);
3340 conversion_off = L6;
3341 }
3342 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
3343 SP, conversion_off);
3344 __ ld_long(SP, conversion_off, d);
3345 }
3346 if (d == L2) {
3347 long_move(masm, reg64_to_VMRegPair(L2), dst);
3348 }
3349 }
3350 break;
3352 case T_LONG :
3353 // 32bit can't do a split move of something like g1 -> O0, O1
3354 // so use a memory temp
3355 if (src.is_single_phys_reg() && wordSize == 4) {
3356 Register tmp = L2;
3357 if (dst.first()->is_reg() &&
3358 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
3359 tmp = dst.first()->as_Register();
3360 }
3362 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
3363 if (Assembler::is_simm13(off)) {
3364 __ stx(src.first()->as_Register(), SP, off);
3365 __ ld_long(SP, off, tmp);
3366 } else {
3367 if (conversion_off == noreg) {
3368 __ set(off, L6);
3369 conversion_off = L6;
3370 }
3371 __ stx(src.first()->as_Register(), SP, conversion_off);
3372 __ ld_long(SP, conversion_off, tmp);
3373 }
3375 if (tmp == L2) {
3376 long_move(masm, reg64_to_VMRegPair(L2), dst);
3377 }
3378 } else {
3379 long_move(masm, src, dst);
3380 }
3381 break;
3383 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
3385 default:
3386 move32_64(masm, src, dst);
3387 }
3388 }
3391 // If we have any strings we must store any register based arg to the stack
3392 // This includes any still live xmm registers too.
3394 if (total_strings > 0 ) {
3396 // protect all the arg registers
3397 __ save_frame(0);
3398 __ mov(G2_thread, L7_thread_cache);
3399 const Register L2_string_off = L2;
3401 // Get first string offset
3402 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
3404 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
3405 if (out_sig_bt[c_arg] == T_ADDRESS) {
3407 VMRegPair dst = out_regs[c_arg];
3408 const Register d = dst.first()->is_reg() ?
3409 dst.first()->as_Register()->after_save() : noreg;
3411 // It's a string the oop and it was already copied to the out arg
3412 // position
3413 if (d != noreg) {
3414 __ mov(d, O0);
3415 } else {
3416 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3417 "must be");
3418 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
3419 }
3420 Label skip;
3422 __ br_null(O0, false, Assembler::pn, skip);
3423 __ delayed()->add(FP, L2_string_off, O1);
3425 if (d != noreg) {
3426 __ mov(O1, d);
3427 } else {
3428 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3429 "must be");
3430 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
3431 }
3433 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
3434 relocInfo::runtime_call_type);
3435 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
3437 __ bind(skip);
3439 }
3441 }
3442 __ mov(L7_thread_cache, G2_thread);
3443 __ restore();
3445 }
3448 // Ok now we are done. Need to place the nop that dtrace wants in order to
3449 // patch in the trap
3451 int patch_offset = ((intptr_t)__ pc()) - start;
3453 __ nop();
3456 // Return
3458 __ ret();
3459 __ delayed()->restore();
3461 __ flush();
3463 nmethod *nm = nmethod::new_dtrace_nmethod(
3464 method, masm->code(), vep_offset, patch_offset, frame_complete,
3465 stack_slots / VMRegImpl::slots_per_word);
3466 return nm;
3468 }
3470 #endif // HAVE_DTRACE_H
3472 // this function returns the adjust size (in number of words) to a c2i adapter
3473 // activation for use during deoptimization
3474 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3475 assert(callee_locals >= callee_parameters,
3476 "test and remove; got more parms than locals");
3477 if (callee_locals < callee_parameters)
3478 return 0; // No adjustment for negative locals
3479 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3480 return round_to(diff, WordsPerLong);
3481 }
3483 // "Top of Stack" slots that may be unused by the calling convention but must
3484 // otherwise be preserved.
3485 // On Intel these are not necessary and the value can be zero.
3486 // On Sparc this describes the words reserved for storing a register window
3487 // when an interrupt occurs.
3488 uint SharedRuntime::out_preserve_stack_slots() {
3489 return frame::register_save_words * VMRegImpl::slots_per_word;
3490 }
3492 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3493 //
3494 // Common out the new frame generation for deopt and uncommon trap
3495 //
3496 Register G3pcs = G3_scratch; // Array of new pcs (input)
3497 Register Oreturn0 = O0;
3498 Register Oreturn1 = O1;
3499 Register O2UnrollBlock = O2;
3500 Register O3array = O3; // Array of frame sizes (input)
3501 Register O4array_size = O4; // number of frames (input)
3502 Register O7frame_size = O7; // number of frames (input)
3504 __ ld_ptr(O3array, 0, O7frame_size);
3505 __ sub(G0, O7frame_size, O7frame_size);
3506 __ save(SP, O7frame_size, SP);
3507 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
3509 #ifdef ASSERT
3510 // make sure that the frames are aligned properly
3511 #ifndef _LP64
3512 __ btst(wordSize*2-1, SP);
3513 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
3514 #endif
3515 #endif
3517 // Deopt needs to pass some extra live values from frame to frame
3519 if (deopt) {
3520 __ mov(Oreturn0->after_save(), Oreturn0);
3521 __ mov(Oreturn1->after_save(), Oreturn1);
3522 }
3524 __ mov(O4array_size->after_save(), O4array_size);
3525 __ sub(O4array_size, 1, O4array_size);
3526 __ mov(O3array->after_save(), O3array);
3527 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3528 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
3530 #ifdef ASSERT
3531 // trash registers to show a clear pattern in backtraces
3532 __ set(0xDEAD0000, I0);
3533 __ add(I0, 2, I1);
3534 __ add(I0, 4, I2);
3535 __ add(I0, 6, I3);
3536 __ add(I0, 8, I4);
3537 // Don't touch I5 could have valuable savedSP
3538 __ set(0xDEADBEEF, L0);
3539 __ mov(L0, L1);
3540 __ mov(L0, L2);
3541 __ mov(L0, L3);
3542 __ mov(L0, L4);
3543 __ mov(L0, L5);
3545 // trash the return value as there is nothing to return yet
3546 __ set(0xDEAD0001, O7);
3547 #endif
3549 __ mov(SP, O5_savedSP);
3550 }
3553 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3554 //
3555 // loop through the UnrollBlock info and create new frames
3556 //
3557 Register G3pcs = G3_scratch;
3558 Register Oreturn0 = O0;
3559 Register Oreturn1 = O1;
3560 Register O2UnrollBlock = O2;
3561 Register O3array = O3;
3562 Register O4array_size = O4;
3563 Label loop;
3565 // Before we make new frames, check to see if stack is available.
3566 // Do this after the caller's return address is on top of stack
3567 if (UseStackBanging) {
3568 // Get total frame size for interpreted frames
3569 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3570 __ bang_stack_size(O4, O3, G3_scratch);
3571 }
3573 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3574 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3575 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3577 // Adjust old interpreter frame to make space for new frame's extra java locals
3578 //
3579 // We capture the original sp for the transition frame only because it is needed in
3580 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3581 // every interpreter frame captures a savedSP it is only needed at the transition
3582 // (fortunately). If we had to have it correct everywhere then we would need to
3583 // be told the sp_adjustment for each frame we create. If the frame size array
3584 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3585 // for each frame we create and keep up the illusion every where.
3586 //
3588 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3589 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3590 __ sub(SP, O7, SP);
3592 #ifdef ASSERT
3593 // make sure that there is at least one entry in the array
3594 __ tst(O4array_size);
3595 __ breakpoint_trap(Assembler::zero, Assembler::icc);
3596 #endif
3598 // Now push the new interpreter frames
3599 __ bind(loop);
3601 // allocate a new frame, filling the registers
3603 gen_new_frame(masm, deopt); // allocate an interpreter frame
3605 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
3606 __ delayed()->add(O3array, wordSize, O3array);
3607 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
3609 }
3611 //------------------------------generate_deopt_blob----------------------------
3612 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3613 // instead.
3614 void SharedRuntime::generate_deopt_blob() {
3615 // allocate space for the code
3616 ResourceMark rm;
3617 // setup code generation tools
3618 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3619 if (UseStackBanging) {
3620 pad += StackShadowPages*16 + 32;
3621 }
3622 #ifdef _LP64
3623 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3624 #else
3625 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3626 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3627 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3628 #endif /* _LP64 */
3629 MacroAssembler* masm = new MacroAssembler(&buffer);
3630 FloatRegister Freturn0 = F0;
3631 Register Greturn1 = G1;
3632 Register Oreturn0 = O0;
3633 Register Oreturn1 = O1;
3634 Register O2UnrollBlock = O2;
3635 Register L0deopt_mode = L0;
3636 Register G4deopt_mode = G4_scratch;
3637 int frame_size_words;
3638 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3639 #if !defined(_LP64) && defined(COMPILER2)
3640 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3641 #endif
3642 Label cont;
3644 OopMapSet *oop_maps = new OopMapSet();
3646 //
3647 // This is the entry point for code which is returning to a de-optimized
3648 // frame.
3649 // The steps taken by this frame are as follows:
3650 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3651 // and all potentially live registers (at a pollpoint many registers can be live).
3652 //
3653 // - call the C routine: Deoptimization::fetch_unroll_info (this function
3654 // returns information about the number and size of interpreter frames
3655 // which are equivalent to the frame which is being deoptimized)
3656 // - deallocate the unpack frame, restoring only results values. Other
3657 // volatile registers will now be captured in the vframeArray as needed.
3658 // - deallocate the deoptimization frame
3659 // - in a loop using the information returned in the previous step
3660 // push new interpreter frames (take care to propagate the return
3661 // values through each new frame pushed)
3662 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3663 // - call the C routine: Deoptimization::unpack_frames (this function
3664 // lays out values on the interpreter frame which was just created)
3665 // - deallocate the dummy unpack_frame
3666 // - ensure that all the return values are correctly set and then do
3667 // a return to the interpreter entry point
3668 //
3669 // Refer to the following methods for more information:
3670 // - Deoptimization::fetch_unroll_info
3671 // - Deoptimization::unpack_frames
3673 OopMap* map = NULL;
3675 int start = __ offset();
3677 // restore G2, the trampoline destroyed it
3678 __ get_thread();
3680 // On entry we have been called by the deoptimized nmethod with a call that
3681 // replaced the original call (or safepoint polling location) so the deoptimizing
3682 // pc is now in O7. Return values are still in the expected places
3684 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3685 __ ba(cont);
3686 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3688 int exception_offset = __ offset() - start;
3690 // restore G2, the trampoline destroyed it
3691 __ get_thread();
3693 // On entry we have been jumped to by the exception handler (or exception_blob
3694 // for server). O0 contains the exception oop and O7 contains the original
3695 // exception pc. So if we push a frame here it will look to the
3696 // stack walking code (fetch_unroll_info) just like a normal call so
3697 // state will be extracted normally.
3699 // save exception oop in JavaThread and fall through into the
3700 // exception_in_tls case since they are handled in same way except
3701 // for where the pending exception is kept.
3702 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3704 //
3705 // Vanilla deoptimization with an exception pending in exception_oop
3706 //
3707 int exception_in_tls_offset = __ offset() - start;
3709 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3710 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3712 // Restore G2_thread
3713 __ get_thread();
3715 #ifdef ASSERT
3716 {
3717 // verify that there is really an exception oop in exception_oop
3718 Label has_exception;
3719 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3720 __ br_notnull_short(Oexception, Assembler::pt, has_exception);
3721 __ stop("no exception in thread");
3722 __ bind(has_exception);
3724 // verify that there is no pending exception
3725 Label no_pending_exception;
3726 Address exception_addr(G2_thread, Thread::pending_exception_offset());
3727 __ ld_ptr(exception_addr, Oexception);
3728 __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
3729 __ stop("must not have pending exception here");
3730 __ bind(no_pending_exception);
3731 }
3732 #endif
3734 __ ba(cont);
3735 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3737 //
3738 // Reexecute entry, similar to c2 uncommon trap
3739 //
3740 int reexecute_offset = __ offset() - start;
3742 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3743 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3745 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3747 __ bind(cont);
3749 __ set_last_Java_frame(SP, noreg);
3751 // do the call by hand so we can get the oopmap
3753 __ mov(G2_thread, L7_thread_cache);
3754 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3755 __ delayed()->mov(G2_thread, O0);
3757 // Set an oopmap for the call site this describes all our saved volatile registers
3759 oop_maps->add_gc_map( __ offset()-start, map);
3761 __ mov(L7_thread_cache, G2_thread);
3763 __ reset_last_Java_frame();
3765 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3766 // so this move will survive
3768 __ mov(L0deopt_mode, G4deopt_mode);
3770 __ mov(O0, O2UnrollBlock->after_save());
3772 RegisterSaver::restore_result_registers(masm);
3774 Label noException;
3775 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
3777 // Move the pending exception from exception_oop to Oexception so
3778 // the pending exception will be picked up the interpreter.
3779 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3780 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3781 __ bind(noException);
3783 // deallocate the deoptimization frame taking care to preserve the return values
3784 __ mov(Oreturn0, Oreturn0->after_save());
3785 __ mov(Oreturn1, Oreturn1->after_save());
3786 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3787 __ restore();
3789 // Allocate new interpreter frame(s) and possible c2i adapter frame
3791 make_new_frames(masm, true);
3793 // push a dummy "unpack_frame" taking care of float return values and
3794 // call Deoptimization::unpack_frames to have the unpacker layout
3795 // information in the interpreter frames just created and then return
3796 // to the interpreter entry point
3797 __ save(SP, -frame_size_words*wordSize, SP);
3798 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3799 #if !defined(_LP64)
3800 #if defined(COMPILER2)
3801 // 32-bit 1-register longs return longs in G1
3802 __ stx(Greturn1, saved_Greturn1_addr);
3803 #endif
3804 __ set_last_Java_frame(SP, noreg);
3805 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3806 #else
3807 // LP64 uses g4 in set_last_Java_frame
3808 __ mov(G4deopt_mode, O1);
3809 __ set_last_Java_frame(SP, G0);
3810 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3811 #endif
3812 __ reset_last_Java_frame();
3813 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3815 #if !defined(_LP64) && defined(COMPILER2)
3816 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3817 // I0/I1 if the return value is long.
3818 Label not_long;
3819 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
3820 __ ldd(saved_Greturn1_addr,I0);
3821 __ bind(not_long);
3822 #endif
3823 __ ret();
3824 __ delayed()->restore();
3826 masm->flush();
3827 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3828 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3829 }
3831 #ifdef COMPILER2
3833 //------------------------------generate_uncommon_trap_blob--------------------
3834 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3835 // instead.
3836 void SharedRuntime::generate_uncommon_trap_blob() {
3837 // allocate space for the code
3838 ResourceMark rm;
3839 // setup code generation tools
3840 int pad = VerifyThread ? 512 : 0;
3841 if (UseStackBanging) {
3842 pad += StackShadowPages*16 + 32;
3843 }
3844 #ifdef _LP64
3845 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3846 #else
3847 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3848 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3849 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3850 #endif
3851 MacroAssembler* masm = new MacroAssembler(&buffer);
3852 Register O2UnrollBlock = O2;
3853 Register O2klass_index = O2;
3855 //
3856 // This is the entry point for all traps the compiler takes when it thinks
3857 // it cannot handle further execution of compilation code. The frame is
3858 // deoptimized in these cases and converted into interpreter frames for
3859 // execution
3860 // The steps taken by this frame are as follows:
3861 // - push a fake "unpack_frame"
3862 // - call the C routine Deoptimization::uncommon_trap (this function
3863 // packs the current compiled frame into vframe arrays and returns
3864 // information about the number and size of interpreter frames which
3865 // are equivalent to the frame which is being deoptimized)
3866 // - deallocate the "unpack_frame"
3867 // - deallocate the deoptimization frame
3868 // - in a loop using the information returned in the previous step
3869 // push interpreter frames;
3870 // - create a dummy "unpack_frame"
3871 // - call the C routine: Deoptimization::unpack_frames (this function
3872 // lays out values on the interpreter frame which was just created)
3873 // - deallocate the dummy unpack_frame
3874 // - return to the interpreter entry point
3875 //
3876 // Refer to the following methods for more information:
3877 // - Deoptimization::uncommon_trap
3878 // - Deoptimization::unpack_frame
3880 // the unloaded class index is in O0 (first parameter to this blob)
3882 // push a dummy "unpack_frame"
3883 // and call Deoptimization::uncommon_trap to pack the compiled frame into
3884 // vframe array and return the UnrollBlock information
3885 __ save_frame(0);
3886 __ set_last_Java_frame(SP, noreg);
3887 __ mov(I0, O2klass_index);
3888 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3889 __ reset_last_Java_frame();
3890 __ mov(O0, O2UnrollBlock->after_save());
3891 __ restore();
3893 // deallocate the deoptimized frame taking care to preserve the return values
3894 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3895 __ restore();
3897 // Allocate new interpreter frame(s) and possible c2i adapter frame
3899 make_new_frames(masm, false);
3901 // push a dummy "unpack_frame" taking care of float return values and
3902 // call Deoptimization::unpack_frames to have the unpacker layout
3903 // information in the interpreter frames just created and then return
3904 // to the interpreter entry point
3905 __ save_frame(0);
3906 __ set_last_Java_frame(SP, noreg);
3907 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3908 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3909 __ reset_last_Java_frame();
3910 __ ret();
3911 __ delayed()->restore();
3913 masm->flush();
3914 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3915 }
3917 #endif // COMPILER2
3919 //------------------------------generate_handler_blob-------------------
3920 //
3921 // Generate a special Compile2Runtime blob that saves all registers, and sets
3922 // up an OopMap.
3923 //
3924 // This blob is jumped to (via a breakpoint and the signal handler) from a
3925 // safepoint in compiled code. On entry to this blob, O7 contains the
3926 // address in the original nmethod at which we should resume normal execution.
3927 // Thus, this blob looks like a subroutine which must preserve lots of
3928 // registers and return normally. Note that O7 is never register-allocated,
3929 // so it is guaranteed to be free here.
3930 //
3932 // The hardest part of what this blob must do is to save the 64-bit %o
3933 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
3934 // an interrupt will chop off their heads. Making space in the caller's frame
3935 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3936 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3937 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
3938 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3939 // Tricky, tricky, tricky...
3941 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
3942 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3944 // allocate space for the code
3945 ResourceMark rm;
3946 // setup code generation tools
3947 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3948 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3949 // even larger with TraceJumps
3950 int pad = TraceJumps ? 512 : 0;
3951 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3952 MacroAssembler* masm = new MacroAssembler(&buffer);
3953 int frame_size_words;
3954 OopMapSet *oop_maps = new OopMapSet();
3955 OopMap* map = NULL;
3957 int start = __ offset();
3959 // If this causes a return before the processing, then do a "restore"
3960 if (cause_return) {
3961 __ restore();
3962 } else {
3963 // Make it look like we were called via the poll
3964 // so that frame constructor always sees a valid return address
3965 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3966 __ sub(O7, frame::pc_return_offset, O7);
3967 }
3969 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3971 // setup last_Java_sp (blows G4)
3972 __ set_last_Java_frame(SP, noreg);
3974 // call into the runtime to handle illegal instructions exception
3975 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3976 __ mov(G2_thread, O0);
3977 __ save_thread(L7_thread_cache);
3978 __ call(call_ptr);
3979 __ delayed()->nop();
3981 // Set an oopmap for the call site.
3982 // We need this not only for callee-saved registers, but also for volatile
3983 // registers that the compiler might be keeping live across a safepoint.
3985 oop_maps->add_gc_map( __ offset() - start, map);
3987 __ restore_thread(L7_thread_cache);
3988 // clear last_Java_sp
3989 __ reset_last_Java_frame();
3991 // Check for exceptions
3992 Label pending;
3994 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3995 __ br_notnull_short(O1, Assembler::pn, pending);
3997 RegisterSaver::restore_live_registers(masm);
3999 // We are back the the original state on entry and ready to go.
4001 __ retl();
4002 __ delayed()->nop();
4004 // Pending exception after the safepoint
4006 __ bind(pending);
4008 RegisterSaver::restore_live_registers(masm);
4010 // We are back the the original state on entry.
4012 // Tail-call forward_exception_entry, with the issuing PC in O7,
4013 // so it looks like the original nmethod called forward_exception_entry.
4014 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
4015 __ JMP(O0, 0);
4016 __ delayed()->nop();
4018 // -------------
4019 // make sure all code is generated
4020 masm->flush();
4022 // return exception blob
4023 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
4024 }
4026 //
4027 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
4028 //
4029 // Generate a stub that calls into vm to find out the proper destination
4030 // of a java call. All the argument registers are live at this point
4031 // but since this is generic code we don't know what they are and the caller
4032 // must do any gc of the args.
4033 //
4034 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
4035 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
4037 // allocate space for the code
4038 ResourceMark rm;
4039 // setup code generation tools
4040 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
4041 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
4042 // even larger with TraceJumps
4043 int pad = TraceJumps ? 512 : 0;
4044 CodeBuffer buffer(name, 1600 + pad, 512);
4045 MacroAssembler* masm = new MacroAssembler(&buffer);
4046 int frame_size_words;
4047 OopMapSet *oop_maps = new OopMapSet();
4048 OopMap* map = NULL;
4050 int start = __ offset();
4052 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
4054 int frame_complete = __ offset();
4056 // setup last_Java_sp (blows G4)
4057 __ set_last_Java_frame(SP, noreg);
4059 // call into the runtime to handle illegal instructions exception
4060 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
4061 __ mov(G2_thread, O0);
4062 __ save_thread(L7_thread_cache);
4063 __ call(destination, relocInfo::runtime_call_type);
4064 __ delayed()->nop();
4066 // O0 contains the address we are going to jump to assuming no exception got installed
4068 // Set an oopmap for the call site.
4069 // We need this not only for callee-saved registers, but also for volatile
4070 // registers that the compiler might be keeping live across a safepoint.
4072 oop_maps->add_gc_map( __ offset() - start, map);
4074 __ restore_thread(L7_thread_cache);
4075 // clear last_Java_sp
4076 __ reset_last_Java_frame();
4078 // Check for exceptions
4079 Label pending;
4081 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
4082 __ br_notnull_short(O1, Assembler::pn, pending);
4084 // get the returned methodOop
4086 __ get_vm_result(G5_method);
4087 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
4089 // O0 is where we want to jump, overwrite G3 which is saved and scratch
4091 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
4093 RegisterSaver::restore_live_registers(masm);
4095 // We are back the the original state on entry and ready to go.
4097 __ JMP(G3, 0);
4098 __ delayed()->nop();
4100 // Pending exception after the safepoint
4102 __ bind(pending);
4104 RegisterSaver::restore_live_registers(masm);
4106 // We are back the the original state on entry.
4108 // Tail-call forward_exception_entry, with the issuing PC in O7,
4109 // so it looks like the original nmethod called forward_exception_entry.
4110 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
4111 __ JMP(O0, 0);
4112 __ delayed()->nop();
4114 // -------------
4115 // make sure all code is generated
4116 masm->flush();
4118 // return the blob
4119 // frame_size_words or bytes??
4120 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
4121 }