Thu, 27 May 2010 19:08:38 -0700
6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_sharedRuntime_sparc.cpp.incl"
28 #define __ masm->
30 #ifdef COMPILER2
31 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
32 #endif // COMPILER2
34 DeoptimizationBlob* SharedRuntime::_deopt_blob;
35 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
36 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
37 RuntimeStub* SharedRuntime::_wrong_method_blob;
38 RuntimeStub* SharedRuntime::_ic_miss_blob;
39 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
40 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
41 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
43 class RegisterSaver {
45 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
46 // The Oregs are problematic. In the 32bit build the compiler can
47 // have O registers live with 64 bit quantities. A window save will
48 // cut the heads off of the registers. We have to do a very extensive
49 // stack dance to save and restore these properly.
51 // Note that the Oregs problem only exists if we block at either a polling
52 // page exception a compiled code safepoint that was not originally a call
53 // or deoptimize following one of these kinds of safepoints.
55 // Lots of registers to save. For all builds, a window save will preserve
56 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
57 // builds a window-save will preserve the %o registers. In the LION build
58 // we need to save the 64-bit %o registers which requires we save them
59 // before the window-save (as then they become %i registers and get their
60 // heads chopped off on interrupt). We have to save some %g registers here
61 // as well.
62 enum {
63 // This frame's save area. Includes extra space for the native call:
64 // vararg's layout space and the like. Briefly holds the caller's
65 // register save area.
66 call_args_area = frame::register_save_words_sp_offset +
67 frame::memory_parameter_word_sp_offset*wordSize,
68 // Make sure save locations are always 8 byte aligned.
69 // can't use round_to because it doesn't produce compile time constant
70 start_of_extra_save_area = ((call_args_area + 7) & ~7),
71 g1_offset = start_of_extra_save_area, // g-regs needing saving
72 g3_offset = g1_offset+8,
73 g4_offset = g3_offset+8,
74 g5_offset = g4_offset+8,
75 o0_offset = g5_offset+8,
76 o1_offset = o0_offset+8,
77 o2_offset = o1_offset+8,
78 o3_offset = o2_offset+8,
79 o4_offset = o3_offset+8,
80 o5_offset = o4_offset+8,
81 start_of_flags_save_area = o5_offset+8,
82 ccr_offset = start_of_flags_save_area,
83 fsr_offset = ccr_offset + 8,
84 d00_offset = fsr_offset+8, // Start of float save area
85 register_save_size = d00_offset+8*32
86 };
89 public:
91 static int Oexception_offset() { return o0_offset; };
92 static int G3_offset() { return g3_offset; };
93 static int G5_offset() { return g5_offset; };
94 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
95 static void restore_live_registers(MacroAssembler* masm);
97 // During deoptimization only the result register need to be restored
98 // all the other values have already been extracted.
100 static void restore_result_registers(MacroAssembler* masm);
101 };
103 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
104 // Record volatile registers as callee-save values in an OopMap so their save locations will be
105 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
106 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
107 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
108 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
109 int i;
110 // Always make the frame size 16 byte aligned.
111 int frame_size = round_to(additional_frame_words + register_save_size, 16);
112 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
113 int frame_size_in_slots = frame_size / sizeof(jint);
114 // CodeBlob frame size is in words.
115 *total_frame_words = frame_size / wordSize;
116 // OopMap* map = new OopMap(*total_frame_words, 0);
117 OopMap* map = new OopMap(frame_size_in_slots, 0);
119 #if !defined(_LP64)
121 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
122 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
123 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
124 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
125 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
126 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
127 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
128 #endif /* _LP64 */
130 __ save(SP, -frame_size, SP);
132 #ifndef _LP64
133 // Reload the 64 bit Oregs. Although they are now Iregs we load them
134 // to Oregs here to avoid interrupts cutting off their heads
136 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
137 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
138 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
139 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
140 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
141 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
143 __ stx(O0, SP, o0_offset+STACK_BIAS);
144 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
146 __ stx(O1, SP, o1_offset+STACK_BIAS);
148 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
150 __ stx(O2, SP, o2_offset+STACK_BIAS);
151 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
153 __ stx(O3, SP, o3_offset+STACK_BIAS);
154 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
156 __ stx(O4, SP, o4_offset+STACK_BIAS);
157 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
159 __ stx(O5, SP, o5_offset+STACK_BIAS);
160 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
161 #endif /* _LP64 */
164 #ifdef _LP64
165 int debug_offset = 0;
166 #else
167 int debug_offset = 4;
168 #endif
169 // Save the G's
170 __ stx(G1, SP, g1_offset+STACK_BIAS);
171 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
173 __ stx(G3, SP, g3_offset+STACK_BIAS);
174 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
176 __ stx(G4, SP, g4_offset+STACK_BIAS);
177 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
179 __ stx(G5, SP, g5_offset+STACK_BIAS);
180 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
182 // This is really a waste but we'll keep things as they were for now
183 if (true) {
184 #ifndef _LP64
185 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
186 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
187 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
188 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
189 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
190 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
191 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
192 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
193 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
194 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
195 #endif /* _LP64 */
196 }
199 // Save the flags
200 __ rdccr( G5 );
201 __ stx(G5, SP, ccr_offset+STACK_BIAS);
202 __ stxfsr(SP, fsr_offset+STACK_BIAS);
204 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
205 int offset = d00_offset;
206 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
207 FloatRegister f = as_FloatRegister(i);
208 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
209 // Record as callee saved both halves of double registers (2 float registers).
210 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
211 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
212 offset += sizeof(double);
213 }
215 // And we're done.
217 return map;
218 }
221 // Pop the current frame and restore all the registers that we
222 // saved.
223 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
225 // Restore all the FP registers
226 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
227 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
228 }
230 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
231 __ wrccr (G1) ;
233 // Restore the G's
234 // Note that G2 (AKA GThread) must be saved and restored separately.
235 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
237 __ ldx(SP, g1_offset+STACK_BIAS, G1);
238 __ ldx(SP, g3_offset+STACK_BIAS, G3);
239 __ ldx(SP, g4_offset+STACK_BIAS, G4);
240 __ ldx(SP, g5_offset+STACK_BIAS, G5);
243 #if !defined(_LP64)
244 // Restore the 64-bit O's.
245 __ ldx(SP, o0_offset+STACK_BIAS, O0);
246 __ ldx(SP, o1_offset+STACK_BIAS, O1);
247 __ ldx(SP, o2_offset+STACK_BIAS, O2);
248 __ ldx(SP, o3_offset+STACK_BIAS, O3);
249 __ ldx(SP, o4_offset+STACK_BIAS, O4);
250 __ ldx(SP, o5_offset+STACK_BIAS, O5);
252 // And temporarily place them in TLS
254 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
255 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
256 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
257 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
258 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
259 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
260 #endif /* _LP64 */
262 // Restore flags
264 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
266 __ restore();
268 #if !defined(_LP64)
269 // Now reload the 64bit Oregs after we've restore the window.
270 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
271 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
272 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
273 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
274 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
275 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
276 #endif /* _LP64 */
278 }
280 // Pop the current frame and restore the registers that might be holding
281 // a result.
282 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
284 #if !defined(_LP64)
285 // 32bit build returns longs in G1
286 __ ldx(SP, g1_offset+STACK_BIAS, G1);
288 // Retrieve the 64-bit O's.
289 __ ldx(SP, o0_offset+STACK_BIAS, O0);
290 __ ldx(SP, o1_offset+STACK_BIAS, O1);
291 // and save to TLS
292 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
293 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
294 #endif /* _LP64 */
296 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
298 __ restore();
300 #if !defined(_LP64)
301 // Now reload the 64bit Oregs after we've restore the window.
302 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
303 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
304 #endif /* _LP64 */
306 }
308 // The java_calling_convention describes stack locations as ideal slots on
309 // a frame with no abi restrictions. Since we must observe abi restrictions
310 // (like the placement of the register window) the slots must be biased by
311 // the following value.
312 static int reg2offset(VMReg r) {
313 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
314 }
316 // ---------------------------------------------------------------------------
317 // Read the array of BasicTypes from a signature, and compute where the
318 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
319 // quantities. Values less than VMRegImpl::stack0 are registers, those above
320 // refer to 4-byte stack slots. All stack slots are based off of the window
321 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
322 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
323 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
324 // integer registers. Values 64-95 are the (32-bit only) float registers.
325 // Each 32-bit quantity is given its own number, so the integer registers
326 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
327 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
329 // Register results are passed in O0-O5, for outgoing call arguments. To
330 // convert to incoming arguments, convert all O's to I's. The regs array
331 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
332 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
333 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
334 // passed (used as a placeholder for the other half of longs and doubles in
335 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
336 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
337 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
338 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
339 // same VMRegPair.
341 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
342 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
343 // units regardless of build.
346 // ---------------------------------------------------------------------------
347 // The compiled Java calling convention. The Java convention always passes
348 // 64-bit values in adjacent aligned locations (either registers or stack),
349 // floats in float registers and doubles in aligned float pairs. Values are
350 // packed in the registers. There is no backing varargs store for values in
351 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be
352 // passed in I's, because longs in I's get their heads chopped off at
353 // interrupt).
354 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
355 VMRegPair *regs,
356 int total_args_passed,
357 int is_outgoing) {
358 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
360 // Convention is to pack the first 6 int/oop args into the first 6 registers
361 // (I0-I5), extras spill to the stack. Then pack the first 8 float args
362 // into F0-F7, extras spill to the stack. Then pad all register sets to
363 // align. Then put longs and doubles into the same registers as they fit,
364 // else spill to the stack.
365 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
366 const int flt_reg_max = 8;
367 //
368 // Where 32-bit 1-reg longs start being passed
369 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
370 // So make it look like we've filled all the G regs that c2 wants to use.
371 Register g_reg = TieredCompilation ? noreg : G1;
373 // Count int/oop and float args. See how many stack slots we'll need and
374 // where the longs & doubles will go.
375 int int_reg_cnt = 0;
376 int flt_reg_cnt = 0;
377 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
378 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
379 int stk_reg_pairs = 0;
380 for (int i = 0; i < total_args_passed; i++) {
381 switch (sig_bt[i]) {
382 case T_LONG: // LP64, longs compete with int args
383 assert(sig_bt[i+1] == T_VOID, "");
384 #ifdef _LP64
385 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
386 #endif
387 break;
388 case T_OBJECT:
389 case T_ARRAY:
390 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
391 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
392 #ifndef _LP64
393 else stk_reg_pairs++;
394 #endif
395 break;
396 case T_INT:
397 case T_SHORT:
398 case T_CHAR:
399 case T_BYTE:
400 case T_BOOLEAN:
401 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
402 else stk_reg_pairs++;
403 break;
404 case T_FLOAT:
405 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
406 else stk_reg_pairs++;
407 break;
408 case T_DOUBLE:
409 assert(sig_bt[i+1] == T_VOID, "");
410 break;
411 case T_VOID:
412 break;
413 default:
414 ShouldNotReachHere();
415 }
416 }
418 // This is where the longs/doubles start on the stack.
419 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
421 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
422 int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
424 // int stk_reg = frame::register_save_words*(wordSize>>2);
425 // int stk_reg = SharedRuntime::out_preserve_stack_slots();
426 int stk_reg = 0;
427 int int_reg = 0;
428 int flt_reg = 0;
430 // Now do the signature layout
431 for (int i = 0; i < total_args_passed; i++) {
432 switch (sig_bt[i]) {
433 case T_INT:
434 case T_SHORT:
435 case T_CHAR:
436 case T_BYTE:
437 case T_BOOLEAN:
438 #ifndef _LP64
439 case T_OBJECT:
440 case T_ARRAY:
441 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
442 #endif // _LP64
443 if (int_reg < int_reg_max) {
444 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
445 regs[i].set1(r->as_VMReg());
446 } else {
447 regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
448 }
449 break;
451 #ifdef _LP64
452 case T_OBJECT:
453 case T_ARRAY:
454 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
455 if (int_reg < int_reg_max) {
456 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
457 regs[i].set2(r->as_VMReg());
458 } else {
459 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
460 stk_reg_pairs += 2;
461 }
462 break;
463 #endif // _LP64
465 case T_LONG:
466 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
467 #ifdef _LP64
468 if (int_reg < int_reg_max) {
469 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
470 regs[i].set2(r->as_VMReg());
471 } else {
472 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
473 stk_reg_pairs += 2;
474 }
475 #else
476 #ifdef COMPILER2
477 // For 32-bit build, can't pass longs in O-regs because they become
478 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
479 // spare and available. This convention isn't used by the Sparc ABI or
480 // anywhere else. If we're tiered then we don't use G-regs because c1
481 // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
482 // G0: zero
483 // G1: 1st Long arg
484 // G2: global allocated to TLS
485 // G3: used in inline cache check
486 // G4: 2nd Long arg
487 // G5: used in inline cache check
488 // G6: used by OS
489 // G7: used by OS
491 if (g_reg == G1) {
492 regs[i].set2(G1->as_VMReg()); // This long arg in G1
493 g_reg = G4; // Where the next arg goes
494 } else if (g_reg == G4) {
495 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
496 g_reg = noreg; // No more longs in registers
497 } else {
498 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
499 stk_reg_pairs += 2;
500 }
501 #else // COMPILER2
502 if (int_reg_pairs + 1 < int_reg_max) {
503 if (is_outgoing) {
504 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
505 } else {
506 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
507 }
508 int_reg_pairs += 2;
509 } else {
510 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
511 stk_reg_pairs += 2;
512 }
513 #endif // COMPILER2
514 #endif // _LP64
515 break;
517 case T_FLOAT:
518 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
519 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++));
520 break;
521 case T_DOUBLE:
522 assert(sig_bt[i+1] == T_VOID, "expecting half");
523 if (flt_reg_pairs + 1 < flt_reg_max) {
524 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
525 flt_reg_pairs += 2;
526 } else {
527 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
528 stk_reg_pairs += 2;
529 }
530 break;
531 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles
532 default:
533 ShouldNotReachHere();
534 }
535 }
537 // retun the amount of stack space these arguments will need.
538 return stk_reg_pairs;
540 }
542 // Helper class mostly to avoid passing masm everywhere, and handle
543 // store displacement overflow logic.
544 class AdapterGenerator {
545 MacroAssembler *masm;
546 Register Rdisp;
547 void set_Rdisp(Register r) { Rdisp = r; }
549 void patch_callers_callsite();
551 // base+st_off points to top of argument
552 int arg_offset(const int st_off) { return st_off; }
553 int next_arg_offset(const int st_off) {
554 return st_off - Interpreter::stackElementSize;
555 }
557 // Argument slot values may be loaded first into a register because
558 // they might not fit into displacement.
559 RegisterOrConstant arg_slot(const int st_off);
560 RegisterOrConstant next_arg_slot(const int st_off);
562 // Stores long into offset pointed to by base
563 void store_c2i_long(Register r, Register base,
564 const int st_off, bool is_stack);
565 void store_c2i_object(Register r, Register base,
566 const int st_off);
567 void store_c2i_int(Register r, Register base,
568 const int st_off);
569 void store_c2i_double(VMReg r_2,
570 VMReg r_1, Register base, const int st_off);
571 void store_c2i_float(FloatRegister f, Register base,
572 const int st_off);
574 public:
575 void gen_c2i_adapter(int total_args_passed,
576 // VMReg max_arg,
577 int comp_args_on_stack, // VMRegStackSlots
578 const BasicType *sig_bt,
579 const VMRegPair *regs,
580 Label& skip_fixup);
581 void gen_i2c_adapter(int total_args_passed,
582 // VMReg max_arg,
583 int comp_args_on_stack, // VMRegStackSlots
584 const BasicType *sig_bt,
585 const VMRegPair *regs);
587 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
588 };
591 // Patch the callers callsite with entry to compiled code if it exists.
592 void AdapterGenerator::patch_callers_callsite() {
593 Label L;
594 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
595 __ br_null(G3_scratch, false, __ pt, L);
596 // Schedule the branch target address early.
597 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
598 // Call into the VM to patch the caller, then jump to compiled callee
599 __ save_frame(4); // Args in compiled layout; do not blow them
601 // Must save all the live Gregs the list is:
602 // G1: 1st Long arg (32bit build)
603 // G2: global allocated to TLS
604 // G3: used in inline cache check (scratch)
605 // G4: 2nd Long arg (32bit build);
606 // G5: used in inline cache check (methodOop)
608 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
610 #ifdef _LP64
611 // mov(s,d)
612 __ mov(G1, L1);
613 __ mov(G4, L4);
614 __ mov(G5_method, L5);
615 __ mov(G5_method, O0); // VM needs target method
616 __ mov(I7, O1); // VM needs caller's callsite
617 // Must be a leaf call...
618 // can be very far once the blob has been relocated
619 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
620 __ relocate(relocInfo::runtime_call_type);
621 __ jumpl_to(dest, O7, O7);
622 __ delayed()->mov(G2_thread, L7_thread_cache);
623 __ mov(L7_thread_cache, G2_thread);
624 __ mov(L1, G1);
625 __ mov(L4, G4);
626 __ mov(L5, G5_method);
627 #else
628 __ stx(G1, FP, -8 + STACK_BIAS);
629 __ stx(G4, FP, -16 + STACK_BIAS);
630 __ mov(G5_method, L5);
631 __ mov(G5_method, O0); // VM needs target method
632 __ mov(I7, O1); // VM needs caller's callsite
633 // Must be a leaf call...
634 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
635 __ delayed()->mov(G2_thread, L7_thread_cache);
636 __ mov(L7_thread_cache, G2_thread);
637 __ ldx(FP, -8 + STACK_BIAS, G1);
638 __ ldx(FP, -16 + STACK_BIAS, G4);
639 __ mov(L5, G5_method);
640 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
641 #endif /* _LP64 */
643 __ restore(); // Restore args
644 __ bind(L);
645 }
648 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
649 RegisterOrConstant roc(arg_offset(st_off));
650 return __ ensure_simm13_or_reg(roc, Rdisp);
651 }
653 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
654 RegisterOrConstant roc(next_arg_offset(st_off));
655 return __ ensure_simm13_or_reg(roc, Rdisp);
656 }
659 // Stores long into offset pointed to by base
660 void AdapterGenerator::store_c2i_long(Register r, Register base,
661 const int st_off, bool is_stack) {
662 #ifdef _LP64
663 // In V9, longs are given 2 64-bit slots in the interpreter, but the
664 // data is passed in only 1 slot.
665 __ stx(r, base, next_arg_slot(st_off));
666 #else
667 #ifdef COMPILER2
668 // Misaligned store of 64-bit data
669 __ stw(r, base, arg_slot(st_off)); // lo bits
670 __ srlx(r, 32, r);
671 __ stw(r, base, next_arg_slot(st_off)); // hi bits
672 #else
673 if (is_stack) {
674 // Misaligned store of 64-bit data
675 __ stw(r, base, arg_slot(st_off)); // lo bits
676 __ srlx(r, 32, r);
677 __ stw(r, base, next_arg_slot(st_off)); // hi bits
678 } else {
679 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
680 __ stw(r , base, next_arg_slot(st_off)); // hi bits
681 }
682 #endif // COMPILER2
683 #endif // _LP64
684 }
686 void AdapterGenerator::store_c2i_object(Register r, Register base,
687 const int st_off) {
688 __ st_ptr (r, base, arg_slot(st_off));
689 }
691 void AdapterGenerator::store_c2i_int(Register r, Register base,
692 const int st_off) {
693 __ st (r, base, arg_slot(st_off));
694 }
696 // Stores into offset pointed to by base
697 void AdapterGenerator::store_c2i_double(VMReg r_2,
698 VMReg r_1, Register base, const int st_off) {
699 #ifdef _LP64
700 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
701 // data is passed in only 1 slot.
702 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
703 #else
704 // Need to marshal 64-bit value from misaligned Lesp loads
705 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
706 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
707 #endif
708 }
710 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
711 const int st_off) {
712 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
713 }
715 void AdapterGenerator::gen_c2i_adapter(
716 int total_args_passed,
717 // VMReg max_arg,
718 int comp_args_on_stack, // VMRegStackSlots
719 const BasicType *sig_bt,
720 const VMRegPair *regs,
721 Label& skip_fixup) {
723 // Before we get into the guts of the C2I adapter, see if we should be here
724 // at all. We've come from compiled code and are attempting to jump to the
725 // interpreter, which means the caller made a static call to get here
726 // (vcalls always get a compiled target if there is one). Check for a
727 // compiled target. If there is one, we need to patch the caller's call.
728 // However we will run interpreted if we come thru here. The next pass
729 // thru the call site will run compiled. If we ran compiled here then
730 // we can (theorectically) do endless i2c->c2i->i2c transitions during
731 // deopt/uncommon trap cycles. If we always go interpreted here then
732 // we can have at most one and don't need to play any tricks to keep
733 // from endlessly growing the stack.
734 //
735 // Actually if we detected that we had an i2c->c2i transition here we
736 // ought to be able to reset the world back to the state of the interpreted
737 // call and not bother building another interpreter arg area. We don't
738 // do that at this point.
740 patch_callers_callsite();
742 __ bind(skip_fixup);
744 // Since all args are passed on the stack, total_args_passed*wordSize is the
745 // space we need. Add in varargs area needed by the interpreter. Round up
746 // to stack alignment.
747 const int arg_size = total_args_passed * Interpreter::stackElementSize;
748 const int varargs_area =
749 (frame::varargs_offset - frame::register_save_words)*wordSize;
750 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
752 int bias = STACK_BIAS;
753 const int interp_arg_offset = frame::varargs_offset*wordSize +
754 (total_args_passed-1)*Interpreter::stackElementSize;
756 Register base = SP;
758 #ifdef _LP64
759 // In the 64bit build because of wider slots and STACKBIAS we can run
760 // out of bits in the displacement to do loads and stores. Use g3 as
761 // temporary displacement.
762 if (! __ is_simm13(extraspace)) {
763 __ set(extraspace, G3_scratch);
764 __ sub(SP, G3_scratch, SP);
765 } else {
766 __ sub(SP, extraspace, SP);
767 }
768 set_Rdisp(G3_scratch);
769 #else
770 __ sub(SP, extraspace, SP);
771 #endif // _LP64
773 // First write G1 (if used) to where ever it must go
774 for (int i=0; i<total_args_passed; i++) {
775 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
776 VMReg r_1 = regs[i].first();
777 VMReg r_2 = regs[i].second();
778 if (r_1 == G1_scratch->as_VMReg()) {
779 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
780 store_c2i_object(G1_scratch, base, st_off);
781 } else if (sig_bt[i] == T_LONG) {
782 assert(!TieredCompilation, "should not use register args for longs");
783 store_c2i_long(G1_scratch, base, st_off, false);
784 } else {
785 store_c2i_int(G1_scratch, base, st_off);
786 }
787 }
788 }
790 // Now write the args into the outgoing interpreter space
791 for (int i=0; i<total_args_passed; i++) {
792 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
793 VMReg r_1 = regs[i].first();
794 VMReg r_2 = regs[i].second();
795 if (!r_1->is_valid()) {
796 assert(!r_2->is_valid(), "");
797 continue;
798 }
799 // Skip G1 if found as we did it first in order to free it up
800 if (r_1 == G1_scratch->as_VMReg()) {
801 continue;
802 }
803 #ifdef ASSERT
804 bool G1_forced = false;
805 #endif // ASSERT
806 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
807 #ifdef _LP64
808 Register ld_off = Rdisp;
809 __ set(reg2offset(r_1) + extraspace + bias, ld_off);
810 #else
811 int ld_off = reg2offset(r_1) + extraspace + bias;
812 #endif // _LP64
813 #ifdef ASSERT
814 G1_forced = true;
815 #endif // ASSERT
816 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
817 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
818 else __ ldx(base, ld_off, G1_scratch);
819 }
821 if (r_1->is_Register()) {
822 Register r = r_1->as_Register()->after_restore();
823 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
824 store_c2i_object(r, base, st_off);
825 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
826 #ifndef _LP64
827 if (TieredCompilation) {
828 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
829 }
830 #endif // _LP64
831 store_c2i_long(r, base, st_off, r_2->is_stack());
832 } else {
833 store_c2i_int(r, base, st_off);
834 }
835 } else {
836 assert(r_1->is_FloatRegister(), "");
837 if (sig_bt[i] == T_FLOAT) {
838 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
839 } else {
840 assert(sig_bt[i] == T_DOUBLE, "wrong type");
841 store_c2i_double(r_2, r_1, base, st_off);
842 }
843 }
844 }
846 #ifdef _LP64
847 // Need to reload G3_scratch, used for temporary displacements.
848 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
850 // Pass O5_savedSP as an argument to the interpreter.
851 // The interpreter will restore SP to this value before returning.
852 __ set(extraspace, G1);
853 __ add(SP, G1, O5_savedSP);
854 #else
855 // Pass O5_savedSP as an argument to the interpreter.
856 // The interpreter will restore SP to this value before returning.
857 __ add(SP, extraspace, O5_savedSP);
858 #endif // _LP64
860 __ mov((frame::varargs_offset)*wordSize -
861 1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
862 // Jump to the interpreter just as if interpreter was doing it.
863 __ jmpl(G3_scratch, 0, G0);
864 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
865 // (really L0) is in use by the compiled frame as a generic temp. However,
866 // the interpreter does not know where its args are without some kind of
867 // arg pointer being passed in. Pass it in Gargs.
868 __ delayed()->add(SP, G1, Gargs);
869 }
871 void AdapterGenerator::gen_i2c_adapter(
872 int total_args_passed,
873 // VMReg max_arg,
874 int comp_args_on_stack, // VMRegStackSlots
875 const BasicType *sig_bt,
876 const VMRegPair *regs) {
878 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
879 // layout. Lesp was saved by the calling I-frame and will be restored on
880 // return. Meanwhile, outgoing arg space is all owned by the callee
881 // C-frame, so we can mangle it at will. After adjusting the frame size,
882 // hoist register arguments and repack other args according to the compiled
883 // code convention. Finally, end in a jump to the compiled code. The entry
884 // point address is the start of the buffer.
886 // We will only enter here from an interpreted frame and never from after
887 // passing thru a c2i. Azul allowed this but we do not. If we lose the
888 // race and use a c2i we will remain interpreted for the race loser(s).
889 // This removes all sorts of headaches on the x86 side and also eliminates
890 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
892 // As you can see from the list of inputs & outputs there are not a lot
893 // of temp registers to work with: mostly G1, G3 & G4.
895 // Inputs:
896 // G2_thread - TLS
897 // G5_method - Method oop
898 // G4 (Gargs) - Pointer to interpreter's args
899 // O0..O4 - free for scratch
900 // O5_savedSP - Caller's saved SP, to be restored if needed
901 // O6 - Current SP!
902 // O7 - Valid return address
903 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
905 // Outputs:
906 // G2_thread - TLS
907 // G1, G4 - Outgoing long args in 32-bit build
908 // O0-O5 - Outgoing args in compiled layout
909 // O6 - Adjusted or restored SP
910 // O7 - Valid return address
911 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
912 // F0-F7 - more outgoing args
915 // Gargs is the incoming argument base, and also an outgoing argument.
916 __ sub(Gargs, BytesPerWord, Gargs);
918 #ifdef ASSERT
919 {
920 // on entry OsavedSP and SP should be equal
921 Label ok;
922 __ cmp(O5_savedSP, SP);
923 __ br(Assembler::equal, false, Assembler::pt, ok);
924 __ delayed()->nop();
925 __ stop("I5_savedSP not set");
926 __ should_not_reach_here();
927 __ bind(ok);
928 }
929 #endif
931 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
932 // WITH O7 HOLDING A VALID RETURN PC
933 //
934 // | |
935 // : java stack :
936 // | |
937 // +--------------+ <--- start of outgoing args
938 // | receiver | |
939 // : rest of args : |---size is java-arg-words
940 // | | |
941 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
942 // | | |
943 // : unused : |---Space for max Java stack, plus stack alignment
944 // | | |
945 // +--------------+ <--- SP + 16*wordsize
946 // | |
947 // : window :
948 // | |
949 // +--------------+ <--- SP
951 // WE REPACK THE STACK. We use the common calling convention layout as
952 // discovered by calling SharedRuntime::calling_convention. We assume it
953 // causes an arbitrary shuffle of memory, which may require some register
954 // temps to do the shuffle. We hope for (and optimize for) the case where
955 // temps are not needed. We may have to resize the stack slightly, in case
956 // we need alignment padding (32-bit interpreter can pass longs & doubles
957 // misaligned, but the compilers expect them aligned).
958 //
959 // | |
960 // : java stack :
961 // | |
962 // +--------------+ <--- start of outgoing args
963 // | pad, align | |
964 // +--------------+ |
965 // | ints, floats | |---Outgoing stack args, packed low.
966 // +--------------+ | First few args in registers.
967 // : doubles : |
968 // | longs | |
969 // +--------------+ <--- SP' + 16*wordsize
970 // | |
971 // : window :
972 // | |
973 // +--------------+ <--- SP'
975 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
976 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
977 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
979 // Cut-out for having no stack args. Since up to 6 args are passed
980 // in registers, we will commonly have no stack args.
981 if (comp_args_on_stack > 0) {
983 // Convert VMReg stack slots to words.
984 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
985 // Round up to miminum stack alignment, in wordSize
986 comp_words_on_stack = round_to(comp_words_on_stack, 2);
987 // Now compute the distance from Lesp to SP. This calculation does not
988 // include the space for total_args_passed because Lesp has not yet popped
989 // the arguments.
990 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
991 }
993 // Will jump to the compiled code just as if compiled code was doing it.
994 // Pre-load the register-jump target early, to schedule it better.
995 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
997 // Now generate the shuffle code. Pick up all register args and move the
998 // rest through G1_scratch.
999 for (int i=0; i<total_args_passed; i++) {
1000 if (sig_bt[i] == T_VOID) {
1001 // Longs and doubles are passed in native word order, but misaligned
1002 // in the 32-bit build.
1003 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1004 continue;
1005 }
1007 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
1008 // 32-bit build and aligned in the 64-bit build. Look for the obvious
1009 // ldx/lddf optimizations.
1011 // Load in argument order going down.
1012 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1013 set_Rdisp(G1_scratch);
1015 VMReg r_1 = regs[i].first();
1016 VMReg r_2 = regs[i].second();
1017 if (!r_1->is_valid()) {
1018 assert(!r_2->is_valid(), "");
1019 continue;
1020 }
1021 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
1022 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
1023 if (r_2->is_valid()) r_2 = r_1->next();
1024 }
1025 if (r_1->is_Register()) { // Register argument
1026 Register r = r_1->as_Register()->after_restore();
1027 if (!r_2->is_valid()) {
1028 __ ld(Gargs, arg_slot(ld_off), r);
1029 } else {
1030 #ifdef _LP64
1031 // In V9, longs are given 2 64-bit slots in the interpreter, but the
1032 // data is passed in only 1 slot.
1033 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
1034 next_arg_slot(ld_off) : arg_slot(ld_off);
1035 __ ldx(Gargs, slot, r);
1036 #else
1037 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1038 // stack shuffle. Load the first 2 longs into G1/G4 later.
1039 #endif
1040 }
1041 } else {
1042 assert(r_1->is_FloatRegister(), "");
1043 if (!r_2->is_valid()) {
1044 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1045 } else {
1046 #ifdef _LP64
1047 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1048 // data is passed in only 1 slot. This code also handles longs that
1049 // are passed on the stack, but need a stack-to-stack move through a
1050 // spare float register.
1051 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
1052 next_arg_slot(ld_off) : arg_slot(ld_off);
1053 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1054 #else
1055 // Need to marshal 64-bit value from misaligned Lesp loads
1056 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1057 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1058 #endif
1059 }
1060 }
1061 // Was the argument really intended to be on the stack, but was loaded
1062 // into F8/F9?
1063 if (regs[i].first()->is_stack()) {
1064 assert(r_1->as_FloatRegister() == F8, "fix this code");
1065 // Convert stack slot to an SP offset
1066 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1067 // Store down the shuffled stack word. Target address _is_ aligned.
1068 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
1069 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
1070 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1071 }
1072 }
1073 bool made_space = false;
1074 #ifndef _LP64
1075 // May need to pick up a few long args in G1/G4
1076 bool g4_crushed = false;
1077 bool g3_crushed = false;
1078 for (int i=0; i<total_args_passed; i++) {
1079 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1080 // Load in argument order going down
1081 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1082 // Need to marshal 64-bit value from misaligned Lesp loads
1083 Register r = regs[i].first()->as_Register()->after_restore();
1084 if (r == G1 || r == G4) {
1085 assert(!g4_crushed, "ordering problem");
1086 if (r == G4){
1087 g4_crushed = true;
1088 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1089 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1090 } else {
1091 // better schedule this way
1092 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1093 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1094 }
1095 g3_crushed = true;
1096 __ sllx(r, 32, r);
1097 __ or3(G3_scratch, r, r);
1098 } else {
1099 assert(r->is_out(), "longs passed in two O registers");
1100 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits
1101 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1102 }
1103 }
1104 }
1105 #endif
1107 // Jump to the compiled code just as if compiled code was doing it.
1108 //
1109 #ifndef _LP64
1110 if (g3_crushed) {
1111 // Rats load was wasted, at least it is in cache...
1112 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
1113 }
1114 #endif /* _LP64 */
1116 // 6243940 We might end up in handle_wrong_method if
1117 // the callee is deoptimized as we race thru here. If that
1118 // happens we don't want to take a safepoint because the
1119 // caller frame will look interpreted and arguments are now
1120 // "compiled" so it is much better to make this transition
1121 // invisible to the stack walking code. Unfortunately if
1122 // we try and find the callee by normal means a safepoint
1123 // is possible. So we stash the desired callee in the thread
1124 // and the vm will find there should this case occur.
1125 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1126 __ st_ptr(G5_method, callee_target_addr);
1128 if (StressNonEntrant) {
1129 // Open a big window for deopt failure
1130 __ save_frame(0);
1131 __ mov(G0, L0);
1132 Label loop;
1133 __ bind(loop);
1134 __ sub(L0, 1, L0);
1135 __ br_null(L0, false, Assembler::pt, loop);
1136 __ delayed()->nop();
1138 __ restore();
1139 }
1142 __ jmpl(G3, 0, G0);
1143 __ delayed()->nop();
1144 }
1146 // ---------------------------------------------------------------
1147 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1148 int total_args_passed,
1149 // VMReg max_arg,
1150 int comp_args_on_stack, // VMRegStackSlots
1151 const BasicType *sig_bt,
1152 const VMRegPair *regs,
1153 AdapterFingerPrint* fingerprint) {
1154 address i2c_entry = __ pc();
1156 AdapterGenerator agen(masm);
1158 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1161 // -------------------------------------------------------------------------
1162 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The
1163 // args start out packed in the compiled layout. They need to be unpacked
1164 // into the interpreter layout. This will almost always require some stack
1165 // space. We grow the current (compiled) stack, then repack the args. We
1166 // finally end in a jump to the generic interpreter entry point. On exit
1167 // from the interpreter, the interpreter will restore our SP (lest the
1168 // compiled code, which relys solely on SP and not FP, get sick).
1170 address c2i_unverified_entry = __ pc();
1171 Label skip_fixup;
1172 {
1173 #if !defined(_LP64) && defined(COMPILER2)
1174 Register R_temp = L0; // another scratch register
1175 #else
1176 Register R_temp = G1; // another scratch register
1177 #endif
1179 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1181 __ verify_oop(O0);
1182 __ verify_oop(G5_method);
1183 __ load_klass(O0, G3_scratch);
1184 __ verify_oop(G3_scratch);
1186 #if !defined(_LP64) && defined(COMPILER2)
1187 __ save(SP, -frame::register_save_words*wordSize, SP);
1188 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1189 __ verify_oop(R_temp);
1190 __ cmp(G3_scratch, R_temp);
1191 __ restore();
1192 #else
1193 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1194 __ verify_oop(R_temp);
1195 __ cmp(G3_scratch, R_temp);
1196 #endif
1198 Label ok, ok2;
1199 __ brx(Assembler::equal, false, Assembler::pt, ok);
1200 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1201 __ jump_to(ic_miss, G3_scratch);
1202 __ delayed()->nop();
1204 __ bind(ok);
1205 // Method might have been compiled since the call site was patched to
1206 // interpreted if that is the case treat it as a miss so we can get
1207 // the call site corrected.
1208 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1209 __ bind(ok2);
1210 __ br_null(G3_scratch, false, __ pt, skip_fixup);
1211 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1212 __ jump_to(ic_miss, G3_scratch);
1213 __ delayed()->nop();
1215 }
1217 address c2i_entry = __ pc();
1219 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1221 __ flush();
1222 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1224 }
1226 // Helper function for native calling conventions
1227 static VMReg int_stk_helper( int i ) {
1228 // Bias any stack based VMReg we get by ignoring the window area
1229 // but not the register parameter save area.
1230 //
1231 // This is strange for the following reasons. We'd normally expect
1232 // the calling convention to return an VMReg for a stack slot
1233 // completely ignoring any abi reserved area. C2 thinks of that
1234 // abi area as only out_preserve_stack_slots. This does not include
1235 // the area allocated by the C abi to store down integer arguments
1236 // because the java calling convention does not use it. So
1237 // since c2 assumes that there are only out_preserve_stack_slots
1238 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1239 // location the c calling convention must add in this bias amount
1240 // to make up for the fact that the out_preserve_stack_slots is
1241 // insufficient for C calls. What a mess. I sure hope those 6
1242 // stack words were worth it on every java call!
1244 // Another way of cleaning this up would be for out_preserve_stack_slots
1245 // to take a parameter to say whether it was C or java calling conventions.
1246 // Then things might look a little better (but not much).
1248 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1249 if( mem_parm_offset < 0 ) {
1250 return as_oRegister(i)->as_VMReg();
1251 } else {
1252 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1253 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1254 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1255 }
1256 }
1259 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1260 VMRegPair *regs,
1261 int total_args_passed) {
1263 // Return the number of VMReg stack_slots needed for the args.
1264 // This value does not include an abi space (like register window
1265 // save area).
1267 // The native convention is V8 if !LP64
1268 // The LP64 convention is the V9 convention which is slightly more sane.
1270 // We return the amount of VMReg stack slots we need to reserve for all
1271 // the arguments NOT counting out_preserve_stack_slots. Since we always
1272 // have space for storing at least 6 registers to memory we start with that.
1273 // See int_stk_helper for a further discussion.
1274 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1276 #ifdef _LP64
1277 // V9 convention: All things "as-if" on double-wide stack slots.
1278 // Hoist any int/ptr/long's in the first 6 to int regs.
1279 // Hoist any flt/dbl's in the first 16 dbl regs.
1280 int j = 0; // Count of actual args, not HALVES
1281 for( int i=0; i<total_args_passed; i++, j++ ) {
1282 switch( sig_bt[i] ) {
1283 case T_BOOLEAN:
1284 case T_BYTE:
1285 case T_CHAR:
1286 case T_INT:
1287 case T_SHORT:
1288 regs[i].set1( int_stk_helper( j ) ); break;
1289 case T_LONG:
1290 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1291 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1292 case T_ARRAY:
1293 case T_OBJECT:
1294 regs[i].set2( int_stk_helper( j ) );
1295 break;
1296 case T_FLOAT:
1297 if ( j < 16 ) {
1298 // V9ism: floats go in ODD registers
1299 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1300 } else {
1301 // V9ism: floats go in ODD stack slot
1302 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1303 }
1304 break;
1305 case T_DOUBLE:
1306 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1307 if ( j < 16 ) {
1308 // V9ism: doubles go in EVEN/ODD regs
1309 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1310 } else {
1311 // V9ism: doubles go in EVEN/ODD stack slots
1312 regs[i].set2(VMRegImpl::stack2reg(j<<1));
1313 }
1314 break;
1315 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
1316 default:
1317 ShouldNotReachHere();
1318 }
1319 if (regs[i].first()->is_stack()) {
1320 int off = regs[i].first()->reg2stack();
1321 if (off > max_stack_slots) max_stack_slots = off;
1322 }
1323 if (regs[i].second()->is_stack()) {
1324 int off = regs[i].second()->reg2stack();
1325 if (off > max_stack_slots) max_stack_slots = off;
1326 }
1327 }
1329 #else // _LP64
1330 // V8 convention: first 6 things in O-regs, rest on stack.
1331 // Alignment is willy-nilly.
1332 for( int i=0; i<total_args_passed; i++ ) {
1333 switch( sig_bt[i] ) {
1334 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1335 case T_ARRAY:
1336 case T_BOOLEAN:
1337 case T_BYTE:
1338 case T_CHAR:
1339 case T_FLOAT:
1340 case T_INT:
1341 case T_OBJECT:
1342 case T_SHORT:
1343 regs[i].set1( int_stk_helper( i ) );
1344 break;
1345 case T_DOUBLE:
1346 case T_LONG:
1347 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1348 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1349 break;
1350 case T_VOID: regs[i].set_bad(); break;
1351 default:
1352 ShouldNotReachHere();
1353 }
1354 if (regs[i].first()->is_stack()) {
1355 int off = regs[i].first()->reg2stack();
1356 if (off > max_stack_slots) max_stack_slots = off;
1357 }
1358 if (regs[i].second()->is_stack()) {
1359 int off = regs[i].second()->reg2stack();
1360 if (off > max_stack_slots) max_stack_slots = off;
1361 }
1362 }
1363 #endif // _LP64
1365 return round_to(max_stack_slots + 1, 2);
1367 }
1370 // ---------------------------------------------------------------------------
1371 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1372 switch (ret_type) {
1373 case T_FLOAT:
1374 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1375 break;
1376 case T_DOUBLE:
1377 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1378 break;
1379 }
1380 }
1382 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1383 switch (ret_type) {
1384 case T_FLOAT:
1385 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1386 break;
1387 case T_DOUBLE:
1388 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1389 break;
1390 }
1391 }
1393 // Check and forward and pending exception. Thread is stored in
1394 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
1395 // is no exception handler. We merely pop this frame off and throw the
1396 // exception in the caller's frame.
1397 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1398 Label L;
1399 __ br_null(Rex_oop, false, Assembler::pt, L);
1400 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1401 // Since this is a native call, we *know* the proper exception handler
1402 // without calling into the VM: it's the empty function. Just pop this
1403 // frame and then jump to forward_exception_entry; O7 will contain the
1404 // native caller's return PC.
1405 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1406 __ jump_to(exception_entry, G3_scratch);
1407 __ delayed()->restore(); // Pop this frame off.
1408 __ bind(L);
1409 }
1411 // A simple move of integer like type
1412 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1413 if (src.first()->is_stack()) {
1414 if (dst.first()->is_stack()) {
1415 // stack to stack
1416 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1417 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1418 } else {
1419 // stack to reg
1420 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1421 }
1422 } else if (dst.first()->is_stack()) {
1423 // reg to stack
1424 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1425 } else {
1426 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1427 }
1428 }
1430 // On 64 bit we will store integer like items to the stack as
1431 // 64 bits items (sparc abi) even though java would only store
1432 // 32bits for a parameter. On 32bit it will simply be 32 bits
1433 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1434 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1435 if (src.first()->is_stack()) {
1436 if (dst.first()->is_stack()) {
1437 // stack to stack
1438 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1439 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1440 } else {
1441 // stack to reg
1442 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1443 }
1444 } else if (dst.first()->is_stack()) {
1445 // reg to stack
1446 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1447 } else {
1448 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1449 }
1450 }
1453 // An oop arg. Must pass a handle not the oop itself
1454 static void object_move(MacroAssembler* masm,
1455 OopMap* map,
1456 int oop_handle_offset,
1457 int framesize_in_slots,
1458 VMRegPair src,
1459 VMRegPair dst,
1460 bool is_receiver,
1461 int* receiver_offset) {
1463 // must pass a handle. First figure out the location we use as a handle
1465 if (src.first()->is_stack()) {
1466 // Oop is already on the stack
1467 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1468 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1469 __ ld_ptr(rHandle, 0, L4);
1470 #ifdef _LP64
1471 __ movr( Assembler::rc_z, L4, G0, rHandle );
1472 #else
1473 __ tst( L4 );
1474 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1475 #endif
1476 if (dst.first()->is_stack()) {
1477 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1478 }
1479 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1480 if (is_receiver) {
1481 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1482 }
1483 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1484 } else {
1485 // Oop is in an input register pass we must flush it to the stack
1486 const Register rOop = src.first()->as_Register();
1487 const Register rHandle = L5;
1488 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1489 int offset = oop_slot*VMRegImpl::stack_slot_size;
1490 Label skip;
1491 __ st_ptr(rOop, SP, offset + STACK_BIAS);
1492 if (is_receiver) {
1493 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1494 }
1495 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1496 __ add(SP, offset + STACK_BIAS, rHandle);
1497 #ifdef _LP64
1498 __ movr( Assembler::rc_z, rOop, G0, rHandle );
1499 #else
1500 __ tst( rOop );
1501 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1502 #endif
1504 if (dst.first()->is_stack()) {
1505 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1506 } else {
1507 __ mov(rHandle, dst.first()->as_Register());
1508 }
1509 }
1510 }
1512 // A float arg may have to do float reg int reg conversion
1513 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1514 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1516 if (src.first()->is_stack()) {
1517 if (dst.first()->is_stack()) {
1518 // stack to stack the easiest of the bunch
1519 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1520 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1521 } else {
1522 // stack to reg
1523 if (dst.first()->is_Register()) {
1524 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1525 } else {
1526 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1527 }
1528 }
1529 } else if (dst.first()->is_stack()) {
1530 // reg to stack
1531 if (src.first()->is_Register()) {
1532 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1533 } else {
1534 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1535 }
1536 } else {
1537 // reg to reg
1538 if (src.first()->is_Register()) {
1539 if (dst.first()->is_Register()) {
1540 // gpr -> gpr
1541 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1542 } else {
1543 // gpr -> fpr
1544 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1545 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1546 }
1547 } else if (dst.first()->is_Register()) {
1548 // fpr -> gpr
1549 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1550 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1551 } else {
1552 // fpr -> fpr
1553 // In theory these overlap but the ordering is such that this is likely a nop
1554 if ( src.first() != dst.first()) {
1555 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1556 }
1557 }
1558 }
1559 }
1561 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1562 VMRegPair src_lo(src.first());
1563 VMRegPair src_hi(src.second());
1564 VMRegPair dst_lo(dst.first());
1565 VMRegPair dst_hi(dst.second());
1566 simple_move32(masm, src_lo, dst_lo);
1567 simple_move32(masm, src_hi, dst_hi);
1568 }
1570 // A long move
1571 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1573 // Do the simple ones here else do two int moves
1574 if (src.is_single_phys_reg() ) {
1575 if (dst.is_single_phys_reg()) {
1576 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1577 } else {
1578 // split src into two separate registers
1579 // Remember hi means hi address or lsw on sparc
1580 // Move msw to lsw
1581 if (dst.second()->is_reg()) {
1582 // MSW -> MSW
1583 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1584 // Now LSW -> LSW
1585 // this will only move lo -> lo and ignore hi
1586 VMRegPair split(dst.second());
1587 simple_move32(masm, src, split);
1588 } else {
1589 VMRegPair split(src.first(), L4->as_VMReg());
1590 // MSW -> MSW (lo ie. first word)
1591 __ srax(src.first()->as_Register(), 32, L4);
1592 split_long_move(masm, split, dst);
1593 }
1594 }
1595 } else if (dst.is_single_phys_reg()) {
1596 if (src.is_adjacent_aligned_on_stack(2)) {
1597 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1598 } else {
1599 // dst is a single reg.
1600 // Remember lo is low address not msb for stack slots
1601 // and lo is the "real" register for registers
1602 // src is
1604 VMRegPair split;
1606 if (src.first()->is_reg()) {
1607 // src.lo (msw) is a reg, src.hi is stk/reg
1608 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1609 split.set_pair(dst.first(), src.first());
1610 } else {
1611 // msw is stack move to L5
1612 // lsw is stack move to dst.lo (real reg)
1613 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1614 split.set_pair(dst.first(), L5->as_VMReg());
1615 }
1617 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1618 // msw -> src.lo/L5, lsw -> dst.lo
1619 split_long_move(masm, src, split);
1621 // So dst now has the low order correct position the
1622 // msw half
1623 __ sllx(split.first()->as_Register(), 32, L5);
1625 const Register d = dst.first()->as_Register();
1626 __ or3(L5, d, d);
1627 }
1628 } else {
1629 // For LP64 we can probably do better.
1630 split_long_move(masm, src, dst);
1631 }
1632 }
1634 // A double move
1635 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1637 // The painful thing here is that like long_move a VMRegPair might be
1638 // 1: a single physical register
1639 // 2: two physical registers (v8)
1640 // 3: a physical reg [lo] and a stack slot [hi] (v8)
1641 // 4: two stack slots
1643 // Since src is always a java calling convention we know that the src pair
1644 // is always either all registers or all stack (and aligned?)
1646 // in a register [lo] and a stack slot [hi]
1647 if (src.first()->is_stack()) {
1648 if (dst.first()->is_stack()) {
1649 // stack to stack the easiest of the bunch
1650 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1651 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1652 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1653 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1654 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1655 } else {
1656 // stack to reg
1657 if (dst.second()->is_stack()) {
1658 // stack -> reg, stack -> stack
1659 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1660 if (dst.first()->is_Register()) {
1661 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1662 } else {
1663 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1664 }
1665 // This was missing. (very rare case)
1666 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1667 } else {
1668 // stack -> reg
1669 // Eventually optimize for alignment QQQ
1670 if (dst.first()->is_Register()) {
1671 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1672 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1673 } else {
1674 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1675 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1676 }
1677 }
1678 }
1679 } else if (dst.first()->is_stack()) {
1680 // reg to stack
1681 if (src.first()->is_Register()) {
1682 // Eventually optimize for alignment QQQ
1683 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1684 if (src.second()->is_stack()) {
1685 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1686 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1687 } else {
1688 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1689 }
1690 } else {
1691 // fpr to stack
1692 if (src.second()->is_stack()) {
1693 ShouldNotReachHere();
1694 } else {
1695 // Is the stack aligned?
1696 if (reg2offset(dst.first()) & 0x7) {
1697 // No do as pairs
1698 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1699 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1700 } else {
1701 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1702 }
1703 }
1704 }
1705 } else {
1706 // reg to reg
1707 if (src.first()->is_Register()) {
1708 if (dst.first()->is_Register()) {
1709 // gpr -> gpr
1710 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1711 __ mov(src.second()->as_Register(), dst.second()->as_Register());
1712 } else {
1713 // gpr -> fpr
1714 // ought to be able to do a single store
1715 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1716 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1717 // ought to be able to do a single load
1718 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1719 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1720 }
1721 } else if (dst.first()->is_Register()) {
1722 // fpr -> gpr
1723 // ought to be able to do a single store
1724 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1725 // ought to be able to do a single load
1726 // REMEMBER first() is low address not LSB
1727 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1728 if (dst.second()->is_Register()) {
1729 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1730 } else {
1731 __ ld(FP, -4 + STACK_BIAS, L4);
1732 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1733 }
1734 } else {
1735 // fpr -> fpr
1736 // In theory these overlap but the ordering is such that this is likely a nop
1737 if ( src.first() != dst.first()) {
1738 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1739 }
1740 }
1741 }
1742 }
1744 // Creates an inner frame if one hasn't already been created, and
1745 // saves a copy of the thread in L7_thread_cache
1746 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1747 if (!*already_created) {
1748 __ save_frame(0);
1749 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1750 // Don't use save_thread because it smashes G2 and we merely want to save a
1751 // copy
1752 __ mov(G2_thread, L7_thread_cache);
1753 *already_created = true;
1754 }
1755 }
1757 // ---------------------------------------------------------------------------
1758 // Generate a native wrapper for a given method. The method takes arguments
1759 // in the Java compiled code convention, marshals them to the native
1760 // convention (handlizes oops, etc), transitions to native, makes the call,
1761 // returns to java state (possibly blocking), unhandlizes any result and
1762 // returns.
1763 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1764 methodHandle method,
1765 int total_in_args,
1766 int comp_args_on_stack, // in VMRegStackSlots
1767 BasicType *in_sig_bt,
1768 VMRegPair *in_regs,
1769 BasicType ret_type) {
1771 // Native nmethod wrappers never take possesion of the oop arguments.
1772 // So the caller will gc the arguments. The only thing we need an
1773 // oopMap for is if the call is static
1774 //
1775 // An OopMap for lock (and class if static), and one for the VM call itself
1776 OopMapSet *oop_maps = new OopMapSet();
1777 intptr_t start = (intptr_t)__ pc();
1779 // First thing make an ic check to see if we should even be here
1780 {
1781 Label L;
1782 const Register temp_reg = G3_scratch;
1783 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1784 __ verify_oop(O0);
1785 __ load_klass(O0, temp_reg);
1786 __ cmp(temp_reg, G5_inline_cache_reg);
1787 __ brx(Assembler::equal, true, Assembler::pt, L);
1788 __ delayed()->nop();
1790 __ jump_to(ic_miss, temp_reg);
1791 __ delayed()->nop();
1792 __ align(CodeEntryAlignment);
1793 __ bind(L);
1794 }
1796 int vep_offset = ((intptr_t)__ pc()) - start;
1798 #ifdef COMPILER1
1799 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1800 // Object.hashCode can pull the hashCode from the header word
1801 // instead of doing a full VM transition once it's been computed.
1802 // Since hashCode is usually polymorphic at call sites we can't do
1803 // this optimization at the call site without a lot of work.
1804 Label slowCase;
1805 Register receiver = O0;
1806 Register result = O0;
1807 Register header = G3_scratch;
1808 Register hash = G3_scratch; // overwrite header value with hash value
1809 Register mask = G1; // to get hash field from header
1811 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
1812 // We depend on hash_mask being at most 32 bits and avoid the use of
1813 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1814 // vm: see markOop.hpp.
1815 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
1816 __ sethi(markOopDesc::hash_mask, mask);
1817 __ btst(markOopDesc::unlocked_value, header);
1818 __ br(Assembler::zero, false, Assembler::pn, slowCase);
1819 if (UseBiasedLocking) {
1820 // Check if biased and fall through to runtime if so
1821 __ delayed()->nop();
1822 __ btst(markOopDesc::biased_lock_bit_in_place, header);
1823 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
1824 }
1825 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
1827 // Check for a valid (non-zero) hash code and get its value.
1828 #ifdef _LP64
1829 __ srlx(header, markOopDesc::hash_shift, hash);
1830 #else
1831 __ srl(header, markOopDesc::hash_shift, hash);
1832 #endif
1833 __ andcc(hash, mask, hash);
1834 __ br(Assembler::equal, false, Assembler::pn, slowCase);
1835 __ delayed()->nop();
1837 // leaf return.
1838 __ retl();
1839 __ delayed()->mov(hash, result);
1840 __ bind(slowCase);
1841 }
1842 #endif // COMPILER1
1845 // We have received a description of where all the java arg are located
1846 // on entry to the wrapper. We need to convert these args to where
1847 // the jni function will expect them. To figure out where they go
1848 // we convert the java signature to a C signature by inserting
1849 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1851 int total_c_args = total_in_args + 1;
1852 if (method->is_static()) {
1853 total_c_args++;
1854 }
1856 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1857 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1859 int argc = 0;
1860 out_sig_bt[argc++] = T_ADDRESS;
1861 if (method->is_static()) {
1862 out_sig_bt[argc++] = T_OBJECT;
1863 }
1865 for (int i = 0; i < total_in_args ; i++ ) {
1866 out_sig_bt[argc++] = in_sig_bt[i];
1867 }
1869 // Now figure out where the args must be stored and how much stack space
1870 // they require (neglecting out_preserve_stack_slots but space for storing
1871 // the 1st six register arguments). It's weird see int_stk_helper.
1872 //
1873 int out_arg_slots;
1874 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1876 // Compute framesize for the wrapper. We need to handlize all oops in
1877 // registers. We must create space for them here that is disjoint from
1878 // the windowed save area because we have no control over when we might
1879 // flush the window again and overwrite values that gc has since modified.
1880 // (The live window race)
1881 //
1882 // We always just allocate 6 word for storing down these object. This allow
1883 // us to simply record the base and use the Ireg number to decide which
1884 // slot to use. (Note that the reg number is the inbound number not the
1885 // outbound number).
1886 // We must shuffle args to match the native convention, and include var-args space.
1888 // Calculate the total number of stack slots we will need.
1890 // First count the abi requirement plus all of the outgoing args
1891 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1893 // Now the space for the inbound oop handle area
1895 int oop_handle_offset = stack_slots;
1896 stack_slots += 6*VMRegImpl::slots_per_word;
1898 // Now any space we need for handlizing a klass if static method
1900 int oop_temp_slot_offset = 0;
1901 int klass_slot_offset = 0;
1902 int klass_offset = -1;
1903 int lock_slot_offset = 0;
1904 bool is_static = false;
1906 if (method->is_static()) {
1907 klass_slot_offset = stack_slots;
1908 stack_slots += VMRegImpl::slots_per_word;
1909 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1910 is_static = true;
1911 }
1913 // Plus a lock if needed
1915 if (method->is_synchronized()) {
1916 lock_slot_offset = stack_slots;
1917 stack_slots += VMRegImpl::slots_per_word;
1918 }
1920 // Now a place to save return value or as a temporary for any gpr -> fpr moves
1921 stack_slots += 2;
1923 // Ok The space we have allocated will look like:
1924 //
1925 //
1926 // FP-> | |
1927 // |---------------------|
1928 // | 2 slots for moves |
1929 // |---------------------|
1930 // | lock box (if sync) |
1931 // |---------------------| <- lock_slot_offset
1932 // | klass (if static) |
1933 // |---------------------| <- klass_slot_offset
1934 // | oopHandle area |
1935 // |---------------------| <- oop_handle_offset
1936 // | outbound memory |
1937 // | based arguments |
1938 // | |
1939 // |---------------------|
1940 // | vararg area |
1941 // |---------------------|
1942 // | |
1943 // SP-> | out_preserved_slots |
1944 //
1945 //
1948 // Now compute actual number of stack words we need rounding to make
1949 // stack properly aligned.
1950 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
1952 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1954 // Generate stack overflow check before creating frame
1955 __ generate_stack_overflow_check(stack_size);
1957 // Generate a new frame for the wrapper.
1958 __ save(SP, -stack_size, SP);
1960 int frame_complete = ((intptr_t)__ pc()) - start;
1962 __ verify_thread();
1965 //
1966 // We immediately shuffle the arguments so that any vm call we have to
1967 // make from here on out (sync slow path, jvmti, etc.) we will have
1968 // captured the oops from our caller and have a valid oopMap for
1969 // them.
1971 // -----------------
1972 // The Grand Shuffle
1973 //
1974 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1975 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
1976 // the class mirror instead of a receiver. This pretty much guarantees that
1977 // register layout will not match. We ignore these extra arguments during
1978 // the shuffle. The shuffle is described by the two calling convention
1979 // vectors we have in our possession. We simply walk the java vector to
1980 // get the source locations and the c vector to get the destinations.
1981 // Because we have a new window and the argument registers are completely
1982 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
1983 // here.
1985 // This is a trick. We double the stack slots so we can claim
1986 // the oops in the caller's frame. Since we are sure to have
1987 // more args than the caller doubling is enough to make
1988 // sure we can capture all the incoming oop args from the
1989 // caller.
1990 //
1991 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1992 int c_arg = total_c_args - 1;
1993 // Record sp-based slot for receiver on stack for non-static methods
1994 int receiver_offset = -1;
1996 // We move the arguments backward because the floating point registers
1997 // destination will always be to a register with a greater or equal register
1998 // number or the stack.
2000 #ifdef ASSERT
2001 bool reg_destroyed[RegisterImpl::number_of_registers];
2002 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2003 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2004 reg_destroyed[r] = false;
2005 }
2006 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2007 freg_destroyed[f] = false;
2008 }
2010 #endif /* ASSERT */
2012 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
2014 #ifdef ASSERT
2015 if (in_regs[i].first()->is_Register()) {
2016 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2017 } else if (in_regs[i].first()->is_FloatRegister()) {
2018 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2019 }
2020 if (out_regs[c_arg].first()->is_Register()) {
2021 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2022 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2023 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2024 }
2025 #endif /* ASSERT */
2027 switch (in_sig_bt[i]) {
2028 case T_ARRAY:
2029 case T_OBJECT:
2030 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2031 ((i == 0) && (!is_static)),
2032 &receiver_offset);
2033 break;
2034 case T_VOID:
2035 break;
2037 case T_FLOAT:
2038 float_move(masm, in_regs[i], out_regs[c_arg]);
2039 break;
2041 case T_DOUBLE:
2042 assert( i + 1 < total_in_args &&
2043 in_sig_bt[i + 1] == T_VOID &&
2044 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2045 double_move(masm, in_regs[i], out_regs[c_arg]);
2046 break;
2048 case T_LONG :
2049 long_move(masm, in_regs[i], out_regs[c_arg]);
2050 break;
2052 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2054 default:
2055 move32_64(masm, in_regs[i], out_regs[c_arg]);
2056 }
2057 }
2059 // Pre-load a static method's oop into O1. Used both by locking code and
2060 // the normal JNI call code.
2061 if (method->is_static()) {
2062 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2064 // Now handlize the static class mirror in O1. It's known not-null.
2065 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2066 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2067 __ add(SP, klass_offset + STACK_BIAS, O1);
2068 }
2071 const Register L6_handle = L6;
2073 if (method->is_synchronized()) {
2074 __ mov(O1, L6_handle);
2075 }
2077 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2078 // except O6/O7. So if we must call out we must push a new frame. We immediately
2079 // push a new frame and flush the windows.
2081 #ifdef _LP64
2082 intptr_t thepc = (intptr_t) __ pc();
2083 {
2084 address here = __ pc();
2085 // Call the next instruction
2086 __ call(here + 8, relocInfo::none);
2087 __ delayed()->nop();
2088 }
2089 #else
2090 intptr_t thepc = __ load_pc_address(O7, 0);
2091 #endif /* _LP64 */
2093 // We use the same pc/oopMap repeatedly when we call out
2094 oop_maps->add_gc_map(thepc - start, map);
2096 // O7 now has the pc loaded that we will use when we finally call to native.
2098 // Save thread in L7; it crosses a bunch of VM calls below
2099 // Don't use save_thread because it smashes G2 and we merely
2100 // want to save a copy
2101 __ mov(G2_thread, L7_thread_cache);
2104 // If we create an inner frame once is plenty
2105 // when we create it we must also save G2_thread
2106 bool inner_frame_created = false;
2108 // dtrace method entry support
2109 {
2110 SkipIfEqual skip_if(
2111 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2112 // create inner frame
2113 __ save_frame(0);
2114 __ mov(G2_thread, L7_thread_cache);
2115 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2116 __ call_VM_leaf(L7_thread_cache,
2117 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2118 G2_thread, O1);
2119 __ restore();
2120 }
2122 // RedefineClasses() tracing support for obsolete method entry
2123 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2124 // create inner frame
2125 __ save_frame(0);
2126 __ mov(G2_thread, L7_thread_cache);
2127 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2128 __ call_VM_leaf(L7_thread_cache,
2129 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2130 G2_thread, O1);
2131 __ restore();
2132 }
2134 // We are in the jni frame unless saved_frame is true in which case
2135 // we are in one frame deeper (the "inner" frame). If we are in the
2136 // "inner" frames the args are in the Iregs and if the jni frame then
2137 // they are in the Oregs.
2138 // If we ever need to go to the VM (for locking, jvmti) then
2139 // we will always be in the "inner" frame.
2141 // Lock a synchronized method
2142 int lock_offset = -1; // Set if locked
2143 if (method->is_synchronized()) {
2144 Register Roop = O1;
2145 const Register L3_box = L3;
2147 create_inner_frame(masm, &inner_frame_created);
2149 __ ld_ptr(I1, 0, O1);
2150 Label done;
2152 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2153 __ add(FP, lock_offset+STACK_BIAS, L3_box);
2154 #ifdef ASSERT
2155 if (UseBiasedLocking) {
2156 // making the box point to itself will make it clear it went unused
2157 // but also be obviously invalid
2158 __ st_ptr(L3_box, L3_box, 0);
2159 }
2160 #endif // ASSERT
2161 //
2162 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2163 //
2164 __ compiler_lock_object(Roop, L1, L3_box, L2);
2165 __ br(Assembler::equal, false, Assembler::pt, done);
2166 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2169 // None of the above fast optimizations worked so we have to get into the
2170 // slow case of monitor enter. Inline a special case of call_VM that
2171 // disallows any pending_exception.
2172 __ mov(Roop, O0); // Need oop in O0
2173 __ mov(L3_box, O1);
2175 // Record last_Java_sp, in case the VM code releases the JVM lock.
2177 __ set_last_Java_frame(FP, I7);
2179 // do the call
2180 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2181 __ delayed()->mov(L7_thread_cache, O2);
2183 __ restore_thread(L7_thread_cache); // restore G2_thread
2184 __ reset_last_Java_frame();
2186 #ifdef ASSERT
2187 { Label L;
2188 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2189 __ br_null(O0, false, Assembler::pt, L);
2190 __ delayed()->nop();
2191 __ stop("no pending exception allowed on exit from IR::monitorenter");
2192 __ bind(L);
2193 }
2194 #endif
2195 __ bind(done);
2196 }
2199 // Finally just about ready to make the JNI call
2201 __ flush_windows();
2202 if (inner_frame_created) {
2203 __ restore();
2204 } else {
2205 // Store only what we need from this frame
2206 // QQQ I think that non-v9 (like we care) we don't need these saves
2207 // either as the flush traps and the current window goes too.
2208 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2209 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2210 }
2212 // get JNIEnv* which is first argument to native
2214 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2216 // Use that pc we placed in O7 a while back as the current frame anchor
2218 __ set_last_Java_frame(SP, O7);
2220 // Transition from _thread_in_Java to _thread_in_native.
2221 __ set(_thread_in_native, G3_scratch);
2222 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2224 // We flushed the windows ages ago now mark them as flushed
2226 // mark windows as flushed
2227 __ set(JavaFrameAnchor::flushed, G3_scratch);
2229 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2231 #ifdef _LP64
2232 AddressLiteral dest(method->native_function());
2233 __ relocate(relocInfo::runtime_call_type);
2234 __ jumpl_to(dest, O7, O7);
2235 #else
2236 __ call(method->native_function(), relocInfo::runtime_call_type);
2237 #endif
2238 __ delayed()->st(G3_scratch, flags);
2240 __ restore_thread(L7_thread_cache); // restore G2_thread
2242 // Unpack native results. For int-types, we do any needed sign-extension
2243 // and move things into I0. The return value there will survive any VM
2244 // calls for blocking or unlocking. An FP or OOP result (handle) is done
2245 // specially in the slow-path code.
2246 switch (ret_type) {
2247 case T_VOID: break; // Nothing to do!
2248 case T_FLOAT: break; // Got it where we want it (unless slow-path)
2249 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
2250 // In 64 bits build result is in O0, in O0, O1 in 32bit build
2251 case T_LONG:
2252 #ifndef _LP64
2253 __ mov(O1, I1);
2254 #endif
2255 // Fall thru
2256 case T_OBJECT: // Really a handle
2257 case T_ARRAY:
2258 case T_INT:
2259 __ mov(O0, I0);
2260 break;
2261 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2262 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2263 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2264 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2265 break; // Cannot de-handlize until after reclaiming jvm_lock
2266 default:
2267 ShouldNotReachHere();
2268 }
2270 // must we block?
2272 // Block, if necessary, before resuming in _thread_in_Java state.
2273 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2274 { Label no_block;
2275 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2277 // Switch thread to "native transition" state before reading the synchronization state.
2278 // This additional state is necessary because reading and testing the synchronization
2279 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2280 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2281 // VM thread changes sync state to synchronizing and suspends threads for GC.
2282 // Thread A is resumed to finish this native method, but doesn't block here since it
2283 // didn't see any synchronization is progress, and escapes.
2284 __ set(_thread_in_native_trans, G3_scratch);
2285 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2286 if(os::is_MP()) {
2287 if (UseMembar) {
2288 // Force this write out before the read below
2289 __ membar(Assembler::StoreLoad);
2290 } else {
2291 // Write serialization page so VM thread can do a pseudo remote membar.
2292 // We use the current thread pointer to calculate a thread specific
2293 // offset to write to within the page. This minimizes bus traffic
2294 // due to cache line collision.
2295 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2296 }
2297 }
2298 __ load_contents(sync_state, G3_scratch);
2299 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2301 Label L;
2302 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2303 __ br(Assembler::notEqual, false, Assembler::pn, L);
2304 __ delayed()->ld(suspend_state, G3_scratch);
2305 __ cmp(G3_scratch, 0);
2306 __ br(Assembler::equal, false, Assembler::pt, no_block);
2307 __ delayed()->nop();
2308 __ bind(L);
2310 // Block. Save any potential method result value before the operation and
2311 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2312 // lets us share the oopMap we used when we went native rather the create
2313 // a distinct one for this pc
2314 //
2315 save_native_result(masm, ret_type, stack_slots);
2316 __ call_VM_leaf(L7_thread_cache,
2317 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2318 G2_thread);
2320 // Restore any method result value
2321 restore_native_result(masm, ret_type, stack_slots);
2322 __ bind(no_block);
2323 }
2325 // thread state is thread_in_native_trans. Any safepoint blocking has already
2326 // happened so we can now change state to _thread_in_Java.
2329 __ set(_thread_in_Java, G3_scratch);
2330 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2333 Label no_reguard;
2334 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2335 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
2336 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
2337 __ delayed()->nop();
2339 save_native_result(masm, ret_type, stack_slots);
2340 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2341 __ delayed()->nop();
2343 __ restore_thread(L7_thread_cache); // restore G2_thread
2344 restore_native_result(masm, ret_type, stack_slots);
2346 __ bind(no_reguard);
2348 // Handle possible exception (will unlock if necessary)
2350 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2352 // Unlock
2353 if (method->is_synchronized()) {
2354 Label done;
2355 Register I2_ex_oop = I2;
2356 const Register L3_box = L3;
2357 // Get locked oop from the handle we passed to jni
2358 __ ld_ptr(L6_handle, 0, L4);
2359 __ add(SP, lock_offset+STACK_BIAS, L3_box);
2360 // Must save pending exception around the slow-path VM call. Since it's a
2361 // leaf call, the pending exception (if any) can be kept in a register.
2362 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2363 // Now unlock
2364 // (Roop, Rmark, Rbox, Rscratch)
2365 __ compiler_unlock_object(L4, L1, L3_box, L2);
2366 __ br(Assembler::equal, false, Assembler::pt, done);
2367 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2369 // save and restore any potential method result value around the unlocking
2370 // operation. Will save in I0 (or stack for FP returns).
2371 save_native_result(masm, ret_type, stack_slots);
2373 // Must clear pending-exception before re-entering the VM. Since this is
2374 // a leaf call, pending-exception-oop can be safely kept in a register.
2375 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2377 // slow case of monitor enter. Inline a special case of call_VM that
2378 // disallows any pending_exception.
2379 __ mov(L3_box, O1);
2381 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2382 __ delayed()->mov(L4, O0); // Need oop in O0
2384 __ restore_thread(L7_thread_cache); // restore G2_thread
2386 #ifdef ASSERT
2387 { Label L;
2388 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2389 __ br_null(O0, false, Assembler::pt, L);
2390 __ delayed()->nop();
2391 __ stop("no pending exception allowed on exit from IR::monitorexit");
2392 __ bind(L);
2393 }
2394 #endif
2395 restore_native_result(masm, ret_type, stack_slots);
2396 // check_forward_pending_exception jump to forward_exception if any pending
2397 // exception is set. The forward_exception routine expects to see the
2398 // exception in pending_exception and not in a register. Kind of clumsy,
2399 // since all folks who branch to forward_exception must have tested
2400 // pending_exception first and hence have it in a register already.
2401 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2402 __ bind(done);
2403 }
2405 // Tell dtrace about this method exit
2406 {
2407 SkipIfEqual skip_if(
2408 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2409 save_native_result(masm, ret_type, stack_slots);
2410 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2411 __ call_VM_leaf(L7_thread_cache,
2412 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2413 G2_thread, O1);
2414 restore_native_result(masm, ret_type, stack_slots);
2415 }
2417 // Clear "last Java frame" SP and PC.
2418 __ verify_thread(); // G2_thread must be correct
2419 __ reset_last_Java_frame();
2421 // Unpack oop result
2422 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2423 Label L;
2424 __ addcc(G0, I0, G0);
2425 __ brx(Assembler::notZero, true, Assembler::pt, L);
2426 __ delayed()->ld_ptr(I0, 0, I0);
2427 __ mov(G0, I0);
2428 __ bind(L);
2429 __ verify_oop(I0);
2430 }
2432 // reset handle block
2433 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2434 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2436 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2437 check_forward_pending_exception(masm, G3_scratch);
2440 // Return
2442 #ifndef _LP64
2443 if (ret_type == T_LONG) {
2445 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2446 __ sllx(I0, 32, G1); // Shift bits into high G1
2447 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
2448 __ or3 (I1, G1, G1); // OR 64 bits into G1
2449 }
2450 #endif
2452 __ ret();
2453 __ delayed()->restore();
2455 __ flush();
2457 nmethod *nm = nmethod::new_native_nmethod(method,
2458 masm->code(),
2459 vep_offset,
2460 frame_complete,
2461 stack_slots / VMRegImpl::slots_per_word,
2462 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2463 in_ByteSize(lock_offset),
2464 oop_maps);
2465 return nm;
2467 }
2469 #ifdef HAVE_DTRACE_H
2470 // ---------------------------------------------------------------------------
2471 // Generate a dtrace nmethod for a given signature. The method takes arguments
2472 // in the Java compiled code convention, marshals them to the native
2473 // abi and then leaves nops at the position you would expect to call a native
2474 // function. When the probe is enabled the nops are replaced with a trap
2475 // instruction that dtrace inserts and the trace will cause a notification
2476 // to dtrace.
2477 //
2478 // The probes are only able to take primitive types and java/lang/String as
2479 // arguments. No other java types are allowed. Strings are converted to utf8
2480 // strings so that from dtrace point of view java strings are converted to C
2481 // strings. There is an arbitrary fixed limit on the total space that a method
2482 // can use for converting the strings. (256 chars per string in the signature).
2483 // So any java string larger then this is truncated.
2485 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2486 static bool offsets_initialized = false;
2488 static VMRegPair reg64_to_VMRegPair(Register r) {
2489 VMRegPair ret;
2490 if (wordSize == 8) {
2491 ret.set2(r->as_VMReg());
2492 } else {
2493 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
2494 }
2495 return ret;
2496 }
2499 nmethod *SharedRuntime::generate_dtrace_nmethod(
2500 MacroAssembler *masm, methodHandle method) {
2503 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2504 // be single threaded in this method.
2505 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2507 // Fill in the signature array, for the calling-convention call.
2508 int total_args_passed = method->size_of_parameters();
2510 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2511 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2513 // The signature we are going to use for the trap that dtrace will see
2514 // java/lang/String is converted. We drop "this" and any other object
2515 // is converted to NULL. (A one-slot java/lang/Long object reference
2516 // is converted to a two-slot long, which is why we double the allocation).
2517 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2518 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2520 int i=0;
2521 int total_strings = 0;
2522 int first_arg_to_pass = 0;
2523 int total_c_args = 0;
2525 // Skip the receiver as dtrace doesn't want to see it
2526 if( !method->is_static() ) {
2527 in_sig_bt[i++] = T_OBJECT;
2528 first_arg_to_pass = 1;
2529 }
2531 SignatureStream ss(method->signature());
2532 for ( ; !ss.at_return_type(); ss.next()) {
2533 BasicType bt = ss.type();
2534 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2535 out_sig_bt[total_c_args++] = bt;
2536 if( bt == T_OBJECT) {
2537 symbolOop s = ss.as_symbol_or_null();
2538 if (s == vmSymbols::java_lang_String()) {
2539 total_strings++;
2540 out_sig_bt[total_c_args-1] = T_ADDRESS;
2541 } else if (s == vmSymbols::java_lang_Boolean() ||
2542 s == vmSymbols::java_lang_Byte()) {
2543 out_sig_bt[total_c_args-1] = T_BYTE;
2544 } else if (s == vmSymbols::java_lang_Character() ||
2545 s == vmSymbols::java_lang_Short()) {
2546 out_sig_bt[total_c_args-1] = T_SHORT;
2547 } else if (s == vmSymbols::java_lang_Integer() ||
2548 s == vmSymbols::java_lang_Float()) {
2549 out_sig_bt[total_c_args-1] = T_INT;
2550 } else if (s == vmSymbols::java_lang_Long() ||
2551 s == vmSymbols::java_lang_Double()) {
2552 out_sig_bt[total_c_args-1] = T_LONG;
2553 out_sig_bt[total_c_args++] = T_VOID;
2554 }
2555 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2556 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2557 // We convert double to long
2558 out_sig_bt[total_c_args-1] = T_LONG;
2559 out_sig_bt[total_c_args++] = T_VOID;
2560 } else if ( bt == T_FLOAT) {
2561 // We convert float to int
2562 out_sig_bt[total_c_args-1] = T_INT;
2563 }
2564 }
2566 assert(i==total_args_passed, "validly parsed signature");
2568 // Now get the compiled-Java layout as input arguments
2569 int comp_args_on_stack;
2570 comp_args_on_stack = SharedRuntime::java_calling_convention(
2571 in_sig_bt, in_regs, total_args_passed, false);
2573 // We have received a description of where all the java arg are located
2574 // on entry to the wrapper. We need to convert these args to where
2575 // the a native (non-jni) function would expect them. To figure out
2576 // where they go we convert the java signature to a C signature and remove
2577 // T_VOID for any long/double we might have received.
2580 // Now figure out where the args must be stored and how much stack space
2581 // they require (neglecting out_preserve_stack_slots but space for storing
2582 // the 1st six register arguments). It's weird see int_stk_helper.
2583 //
2584 int out_arg_slots;
2585 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2587 // Calculate the total number of stack slots we will need.
2589 // First count the abi requirement plus all of the outgoing args
2590 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2592 // Plus a temp for possible converion of float/double/long register args
2594 int conversion_temp = stack_slots;
2595 stack_slots += 2;
2598 // Now space for the string(s) we must convert
2600 int string_locs = stack_slots;
2601 stack_slots += total_strings *
2602 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2604 // Ok The space we have allocated will look like:
2605 //
2606 //
2607 // FP-> | |
2608 // |---------------------|
2609 // | string[n] |
2610 // |---------------------| <- string_locs[n]
2611 // | string[n-1] |
2612 // |---------------------| <- string_locs[n-1]
2613 // | ... |
2614 // | ... |
2615 // |---------------------| <- string_locs[1]
2616 // | string[0] |
2617 // |---------------------| <- string_locs[0]
2618 // | temp |
2619 // |---------------------| <- conversion_temp
2620 // | outbound memory |
2621 // | based arguments |
2622 // | |
2623 // |---------------------|
2624 // | |
2625 // SP-> | out_preserved_slots |
2626 //
2627 //
2629 // Now compute actual number of stack words we need rounding to make
2630 // stack properly aligned.
2631 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2633 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2635 intptr_t start = (intptr_t)__ pc();
2637 // First thing make an ic check to see if we should even be here
2639 {
2640 Label L;
2641 const Register temp_reg = G3_scratch;
2642 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2643 __ verify_oop(O0);
2644 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2645 __ cmp(temp_reg, G5_inline_cache_reg);
2646 __ brx(Assembler::equal, true, Assembler::pt, L);
2647 __ delayed()->nop();
2649 __ jump_to(ic_miss, temp_reg);
2650 __ delayed()->nop();
2651 __ align(CodeEntryAlignment);
2652 __ bind(L);
2653 }
2655 int vep_offset = ((intptr_t)__ pc()) - start;
2658 // The instruction at the verified entry point must be 5 bytes or longer
2659 // because it can be patched on the fly by make_non_entrant. The stack bang
2660 // instruction fits that requirement.
2662 // Generate stack overflow check before creating frame
2663 __ generate_stack_overflow_check(stack_size);
2665 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2666 "valid size for make_non_entrant");
2668 // Generate a new frame for the wrapper.
2669 __ save(SP, -stack_size, SP);
2671 // Frame is now completed as far a size and linkage.
2673 int frame_complete = ((intptr_t)__ pc()) - start;
2675 #ifdef ASSERT
2676 bool reg_destroyed[RegisterImpl::number_of_registers];
2677 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2678 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2679 reg_destroyed[r] = false;
2680 }
2681 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2682 freg_destroyed[f] = false;
2683 }
2685 #endif /* ASSERT */
2687 VMRegPair zero;
2688 const Register g0 = G0; // without this we get a compiler warning (why??)
2689 zero.set2(g0->as_VMReg());
2691 int c_arg, j_arg;
2693 Register conversion_off = noreg;
2695 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2696 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2698 VMRegPair src = in_regs[j_arg];
2699 VMRegPair dst = out_regs[c_arg];
2701 #ifdef ASSERT
2702 if (src.first()->is_Register()) {
2703 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2704 } else if (src.first()->is_FloatRegister()) {
2705 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2706 FloatRegisterImpl::S)], "ack!");
2707 }
2708 if (dst.first()->is_Register()) {
2709 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2710 } else if (dst.first()->is_FloatRegister()) {
2711 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2712 FloatRegisterImpl::S)] = true;
2713 }
2714 #endif /* ASSERT */
2716 switch (in_sig_bt[j_arg]) {
2717 case T_ARRAY:
2718 case T_OBJECT:
2719 {
2720 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
2721 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2722 // need to unbox a one-slot value
2723 Register in_reg = L0;
2724 Register tmp = L2;
2725 if ( src.first()->is_reg() ) {
2726 in_reg = src.first()->as_Register();
2727 } else {
2728 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
2729 "must be");
2730 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
2731 }
2732 // If the final destination is an acceptable register
2733 if ( dst.first()->is_reg() ) {
2734 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
2735 tmp = dst.first()->as_Register();
2736 }
2737 }
2739 Label skipUnbox;
2740 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
2741 __ mov(G0, tmp->successor());
2742 }
2743 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
2744 __ delayed()->mov(G0, tmp);
2746 BasicType bt = out_sig_bt[c_arg];
2747 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2748 switch (bt) {
2749 case T_BYTE:
2750 __ ldub(in_reg, box_offset, tmp); break;
2751 case T_SHORT:
2752 __ lduh(in_reg, box_offset, tmp); break;
2753 case T_INT:
2754 __ ld(in_reg, box_offset, tmp); break;
2755 case T_LONG:
2756 __ ld_long(in_reg, box_offset, tmp); break;
2757 default: ShouldNotReachHere();
2758 }
2760 __ bind(skipUnbox);
2761 // If tmp wasn't final destination copy to final destination
2762 if (tmp == L2) {
2763 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
2764 if (out_sig_bt[c_arg] == T_LONG) {
2765 long_move(masm, tmp_as_VM, dst);
2766 } else {
2767 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
2768 }
2769 }
2770 if (out_sig_bt[c_arg] == T_LONG) {
2771 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2772 ++c_arg; // move over the T_VOID to keep the loop indices in sync
2773 }
2774 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
2775 Register s =
2776 src.first()->is_reg() ? src.first()->as_Register() : L2;
2777 Register d =
2778 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2780 // We store the oop now so that the conversion pass can reach
2781 // while in the inner frame. This will be the only store if
2782 // the oop is NULL.
2783 if (s != L2) {
2784 // src is register
2785 if (d != L2) {
2786 // dst is register
2787 __ mov(s, d);
2788 } else {
2789 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2790 STACK_BIAS), "must be");
2791 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
2792 }
2793 } else {
2794 // src not a register
2795 assert(Assembler::is_simm13(reg2offset(src.first()) +
2796 STACK_BIAS), "must be");
2797 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
2798 if (d == L2) {
2799 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2800 STACK_BIAS), "must be");
2801 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
2802 }
2803 }
2804 } else if (out_sig_bt[c_arg] != T_VOID) {
2805 // Convert the arg to NULL
2806 if (dst.first()->is_reg()) {
2807 __ mov(G0, dst.first()->as_Register());
2808 } else {
2809 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2810 STACK_BIAS), "must be");
2811 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
2812 }
2813 }
2814 }
2815 break;
2816 case T_VOID:
2817 break;
2819 case T_FLOAT:
2820 if (src.first()->is_stack()) {
2821 // Stack to stack/reg is simple
2822 move32_64(masm, src, dst);
2823 } else {
2824 if (dst.first()->is_reg()) {
2825 // freg -> reg
2826 int off =
2827 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2828 Register d = dst.first()->as_Register();
2829 if (Assembler::is_simm13(off)) {
2830 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2831 SP, off);
2832 __ ld(SP, off, d);
2833 } else {
2834 if (conversion_off == noreg) {
2835 __ set(off, L6);
2836 conversion_off = L6;
2837 }
2838 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2839 SP, conversion_off);
2840 __ ld(SP, conversion_off , d);
2841 }
2842 } else {
2843 // freg -> mem
2844 int off = STACK_BIAS + reg2offset(dst.first());
2845 if (Assembler::is_simm13(off)) {
2846 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2847 SP, off);
2848 } else {
2849 if (conversion_off == noreg) {
2850 __ set(off, L6);
2851 conversion_off = L6;
2852 }
2853 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2854 SP, conversion_off);
2855 }
2856 }
2857 }
2858 break;
2860 case T_DOUBLE:
2861 assert( j_arg + 1 < total_args_passed &&
2862 in_sig_bt[j_arg + 1] == T_VOID &&
2863 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2864 if (src.first()->is_stack()) {
2865 // Stack to stack/reg is simple
2866 long_move(masm, src, dst);
2867 } else {
2868 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2870 // Destination could be an odd reg on 32bit in which case
2871 // we can't load direct to the destination.
2873 if (!d->is_even() && wordSize == 4) {
2874 d = L2;
2875 }
2876 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2877 if (Assembler::is_simm13(off)) {
2878 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2879 SP, off);
2880 __ ld_long(SP, off, d);
2881 } else {
2882 if (conversion_off == noreg) {
2883 __ set(off, L6);
2884 conversion_off = L6;
2885 }
2886 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2887 SP, conversion_off);
2888 __ ld_long(SP, conversion_off, d);
2889 }
2890 if (d == L2) {
2891 long_move(masm, reg64_to_VMRegPair(L2), dst);
2892 }
2893 }
2894 break;
2896 case T_LONG :
2897 // 32bit can't do a split move of something like g1 -> O0, O1
2898 // so use a memory temp
2899 if (src.is_single_phys_reg() && wordSize == 4) {
2900 Register tmp = L2;
2901 if (dst.first()->is_reg() &&
2902 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
2903 tmp = dst.first()->as_Register();
2904 }
2906 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2907 if (Assembler::is_simm13(off)) {
2908 __ stx(src.first()->as_Register(), SP, off);
2909 __ ld_long(SP, off, tmp);
2910 } else {
2911 if (conversion_off == noreg) {
2912 __ set(off, L6);
2913 conversion_off = L6;
2914 }
2915 __ stx(src.first()->as_Register(), SP, conversion_off);
2916 __ ld_long(SP, conversion_off, tmp);
2917 }
2919 if (tmp == L2) {
2920 long_move(masm, reg64_to_VMRegPair(L2), dst);
2921 }
2922 } else {
2923 long_move(masm, src, dst);
2924 }
2925 break;
2927 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2929 default:
2930 move32_64(masm, src, dst);
2931 }
2932 }
2935 // If we have any strings we must store any register based arg to the stack
2936 // This includes any still live xmm registers too.
2938 if (total_strings > 0 ) {
2940 // protect all the arg registers
2941 __ save_frame(0);
2942 __ mov(G2_thread, L7_thread_cache);
2943 const Register L2_string_off = L2;
2945 // Get first string offset
2946 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
2948 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
2949 if (out_sig_bt[c_arg] == T_ADDRESS) {
2951 VMRegPair dst = out_regs[c_arg];
2952 const Register d = dst.first()->is_reg() ?
2953 dst.first()->as_Register()->after_save() : noreg;
2955 // It's a string the oop and it was already copied to the out arg
2956 // position
2957 if (d != noreg) {
2958 __ mov(d, O0);
2959 } else {
2960 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2961 "must be");
2962 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
2963 }
2964 Label skip;
2966 __ br_null(O0, false, Assembler::pn, skip);
2967 __ delayed()->add(FP, L2_string_off, O1);
2969 if (d != noreg) {
2970 __ mov(O1, d);
2971 } else {
2972 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2973 "must be");
2974 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
2975 }
2977 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
2978 relocInfo::runtime_call_type);
2979 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
2981 __ bind(skip);
2983 }
2985 }
2986 __ mov(L7_thread_cache, G2_thread);
2987 __ restore();
2989 }
2992 // Ok now we are done. Need to place the nop that dtrace wants in order to
2993 // patch in the trap
2995 int patch_offset = ((intptr_t)__ pc()) - start;
2997 __ nop();
3000 // Return
3002 __ ret();
3003 __ delayed()->restore();
3005 __ flush();
3007 nmethod *nm = nmethod::new_dtrace_nmethod(
3008 method, masm->code(), vep_offset, patch_offset, frame_complete,
3009 stack_slots / VMRegImpl::slots_per_word);
3010 return nm;
3012 }
3014 #endif // HAVE_DTRACE_H
3016 // this function returns the adjust size (in number of words) to a c2i adapter
3017 // activation for use during deoptimization
3018 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3019 assert(callee_locals >= callee_parameters,
3020 "test and remove; got more parms than locals");
3021 if (callee_locals < callee_parameters)
3022 return 0; // No adjustment for negative locals
3023 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3024 return round_to(diff, WordsPerLong);
3025 }
3027 // "Top of Stack" slots that may be unused by the calling convention but must
3028 // otherwise be preserved.
3029 // On Intel these are not necessary and the value can be zero.
3030 // On Sparc this describes the words reserved for storing a register window
3031 // when an interrupt occurs.
3032 uint SharedRuntime::out_preserve_stack_slots() {
3033 return frame::register_save_words * VMRegImpl::slots_per_word;
3034 }
3036 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3037 //
3038 // Common out the new frame generation for deopt and uncommon trap
3039 //
3040 Register G3pcs = G3_scratch; // Array of new pcs (input)
3041 Register Oreturn0 = O0;
3042 Register Oreturn1 = O1;
3043 Register O2UnrollBlock = O2;
3044 Register O3array = O3; // Array of frame sizes (input)
3045 Register O4array_size = O4; // number of frames (input)
3046 Register O7frame_size = O7; // number of frames (input)
3048 __ ld_ptr(O3array, 0, O7frame_size);
3049 __ sub(G0, O7frame_size, O7frame_size);
3050 __ save(SP, O7frame_size, SP);
3051 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
3053 #ifdef ASSERT
3054 // make sure that the frames are aligned properly
3055 #ifndef _LP64
3056 __ btst(wordSize*2-1, SP);
3057 __ breakpoint_trap(Assembler::notZero);
3058 #endif
3059 #endif
3061 // Deopt needs to pass some extra live values from frame to frame
3063 if (deopt) {
3064 __ mov(Oreturn0->after_save(), Oreturn0);
3065 __ mov(Oreturn1->after_save(), Oreturn1);
3066 }
3068 __ mov(O4array_size->after_save(), O4array_size);
3069 __ sub(O4array_size, 1, O4array_size);
3070 __ mov(O3array->after_save(), O3array);
3071 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3072 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
3074 #ifdef ASSERT
3075 // trash registers to show a clear pattern in backtraces
3076 __ set(0xDEAD0000, I0);
3077 __ add(I0, 2, I1);
3078 __ add(I0, 4, I2);
3079 __ add(I0, 6, I3);
3080 __ add(I0, 8, I4);
3081 // Don't touch I5 could have valuable savedSP
3082 __ set(0xDEADBEEF, L0);
3083 __ mov(L0, L1);
3084 __ mov(L0, L2);
3085 __ mov(L0, L3);
3086 __ mov(L0, L4);
3087 __ mov(L0, L5);
3089 // trash the return value as there is nothing to return yet
3090 __ set(0xDEAD0001, O7);
3091 #endif
3093 __ mov(SP, O5_savedSP);
3094 }
3097 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3098 //
3099 // loop through the UnrollBlock info and create new frames
3100 //
3101 Register G3pcs = G3_scratch;
3102 Register Oreturn0 = O0;
3103 Register Oreturn1 = O1;
3104 Register O2UnrollBlock = O2;
3105 Register O3array = O3;
3106 Register O4array_size = O4;
3107 Label loop;
3109 // Before we make new frames, check to see if stack is available.
3110 // Do this after the caller's return address is on top of stack
3111 if (UseStackBanging) {
3112 // Get total frame size for interpreted frames
3113 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3114 __ bang_stack_size(O4, O3, G3_scratch);
3115 }
3117 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3118 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3119 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3121 // Adjust old interpreter frame to make space for new frame's extra java locals
3122 //
3123 // We capture the original sp for the transition frame only because it is needed in
3124 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3125 // every interpreter frame captures a savedSP it is only needed at the transition
3126 // (fortunately). If we had to have it correct everywhere then we would need to
3127 // be told the sp_adjustment for each frame we create. If the frame size array
3128 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3129 // for each frame we create and keep up the illusion every where.
3130 //
3132 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3133 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3134 __ sub(SP, O7, SP);
3136 #ifdef ASSERT
3137 // make sure that there is at least one entry in the array
3138 __ tst(O4array_size);
3139 __ breakpoint_trap(Assembler::zero);
3140 #endif
3142 // Now push the new interpreter frames
3143 __ bind(loop);
3145 // allocate a new frame, filling the registers
3147 gen_new_frame(masm, deopt); // allocate an interpreter frame
3149 __ tst(O4array_size);
3150 __ br(Assembler::notZero, false, Assembler::pn, loop);
3151 __ delayed()->add(O3array, wordSize, O3array);
3152 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
3154 }
3156 //------------------------------generate_deopt_blob----------------------------
3157 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3158 // instead.
3159 void SharedRuntime::generate_deopt_blob() {
3160 // allocate space for the code
3161 ResourceMark rm;
3162 // setup code generation tools
3163 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3164 #ifdef _LP64
3165 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3166 #else
3167 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3168 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3169 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3170 #endif /* _LP64 */
3171 MacroAssembler* masm = new MacroAssembler(&buffer);
3172 FloatRegister Freturn0 = F0;
3173 Register Greturn1 = G1;
3174 Register Oreturn0 = O0;
3175 Register Oreturn1 = O1;
3176 Register O2UnrollBlock = O2;
3177 Register L0deopt_mode = L0;
3178 Register G4deopt_mode = G4_scratch;
3179 int frame_size_words;
3180 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3181 #if !defined(_LP64) && defined(COMPILER2)
3182 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3183 #endif
3184 Label cont;
3186 OopMapSet *oop_maps = new OopMapSet();
3188 //
3189 // This is the entry point for code which is returning to a de-optimized
3190 // frame.
3191 // The steps taken by this frame are as follows:
3192 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3193 // and all potentially live registers (at a pollpoint many registers can be live).
3194 //
3195 // - call the C routine: Deoptimization::fetch_unroll_info (this function
3196 // returns information about the number and size of interpreter frames
3197 // which are equivalent to the frame which is being deoptimized)
3198 // - deallocate the unpack frame, restoring only results values. Other
3199 // volatile registers will now be captured in the vframeArray as needed.
3200 // - deallocate the deoptimization frame
3201 // - in a loop using the information returned in the previous step
3202 // push new interpreter frames (take care to propagate the return
3203 // values through each new frame pushed)
3204 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3205 // - call the C routine: Deoptimization::unpack_frames (this function
3206 // lays out values on the interpreter frame which was just created)
3207 // - deallocate the dummy unpack_frame
3208 // - ensure that all the return values are correctly set and then do
3209 // a return to the interpreter entry point
3210 //
3211 // Refer to the following methods for more information:
3212 // - Deoptimization::fetch_unroll_info
3213 // - Deoptimization::unpack_frames
3215 OopMap* map = NULL;
3217 int start = __ offset();
3219 // restore G2, the trampoline destroyed it
3220 __ get_thread();
3222 // On entry we have been called by the deoptimized nmethod with a call that
3223 // replaced the original call (or safepoint polling location) so the deoptimizing
3224 // pc is now in O7. Return values are still in the expected places
3226 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3227 __ ba(false, cont);
3228 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3230 int exception_offset = __ offset() - start;
3232 // restore G2, the trampoline destroyed it
3233 __ get_thread();
3235 // On entry we have been jumped to by the exception handler (or exception_blob
3236 // for server). O0 contains the exception oop and O7 contains the original
3237 // exception pc. So if we push a frame here it will look to the
3238 // stack walking code (fetch_unroll_info) just like a normal call so
3239 // state will be extracted normally.
3241 // save exception oop in JavaThread and fall through into the
3242 // exception_in_tls case since they are handled in same way except
3243 // for where the pending exception is kept.
3244 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3246 //
3247 // Vanilla deoptimization with an exception pending in exception_oop
3248 //
3249 int exception_in_tls_offset = __ offset() - start;
3251 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3252 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3254 // Restore G2_thread
3255 __ get_thread();
3257 #ifdef ASSERT
3258 {
3259 // verify that there is really an exception oop in exception_oop
3260 Label has_exception;
3261 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3262 __ br_notnull(Oexception, false, Assembler::pt, has_exception);
3263 __ delayed()-> nop();
3264 __ stop("no exception in thread");
3265 __ bind(has_exception);
3267 // verify that there is no pending exception
3268 Label no_pending_exception;
3269 Address exception_addr(G2_thread, Thread::pending_exception_offset());
3270 __ ld_ptr(exception_addr, Oexception);
3271 __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
3272 __ delayed()->nop();
3273 __ stop("must not have pending exception here");
3274 __ bind(no_pending_exception);
3275 }
3276 #endif
3278 __ ba(false, cont);
3279 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3281 //
3282 // Reexecute entry, similar to c2 uncommon trap
3283 //
3284 int reexecute_offset = __ offset() - start;
3286 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
3287 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3289 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3291 __ bind(cont);
3293 __ set_last_Java_frame(SP, noreg);
3295 // do the call by hand so we can get the oopmap
3297 __ mov(G2_thread, L7_thread_cache);
3298 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3299 __ delayed()->mov(G2_thread, O0);
3301 // Set an oopmap for the call site this describes all our saved volatile registers
3303 oop_maps->add_gc_map( __ offset()-start, map);
3305 __ mov(L7_thread_cache, G2_thread);
3307 __ reset_last_Java_frame();
3309 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3310 // so this move will survive
3312 __ mov(L0deopt_mode, G4deopt_mode);
3314 __ mov(O0, O2UnrollBlock->after_save());
3316 RegisterSaver::restore_result_registers(masm);
3318 Label noException;
3319 __ cmp(G4deopt_mode, Deoptimization::Unpack_exception); // Was exception pending?
3320 __ br(Assembler::notEqual, false, Assembler::pt, noException);
3321 __ delayed()->nop();
3323 // Move the pending exception from exception_oop to Oexception so
3324 // the pending exception will be picked up the interpreter.
3325 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3326 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3327 __ bind(noException);
3329 // deallocate the deoptimization frame taking care to preserve the return values
3330 __ mov(Oreturn0, Oreturn0->after_save());
3331 __ mov(Oreturn1, Oreturn1->after_save());
3332 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3333 __ restore();
3335 // Allocate new interpreter frame(s) and possible c2i adapter frame
3337 make_new_frames(masm, true);
3339 // push a dummy "unpack_frame" taking care of float return values and
3340 // call Deoptimization::unpack_frames to have the unpacker layout
3341 // information in the interpreter frames just created and then return
3342 // to the interpreter entry point
3343 __ save(SP, -frame_size_words*wordSize, SP);
3344 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3345 #if !defined(_LP64)
3346 #if defined(COMPILER2)
3347 if (!TieredCompilation) {
3348 // 32-bit 1-register longs return longs in G1
3349 __ stx(Greturn1, saved_Greturn1_addr);
3350 }
3351 #endif
3352 __ set_last_Java_frame(SP, noreg);
3353 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3354 #else
3355 // LP64 uses g4 in set_last_Java_frame
3356 __ mov(G4deopt_mode, O1);
3357 __ set_last_Java_frame(SP, G0);
3358 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3359 #endif
3360 __ reset_last_Java_frame();
3361 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3363 // In tiered we never use C2 to compile methods returning longs so
3364 // the result is where we expect it already.
3366 #if !defined(_LP64) && defined(COMPILER2)
3367 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3368 // I0/I1 if the return value is long. In the tiered world there is
3369 // a mismatch between how C1 and C2 return longs compiles and so
3370 // currently compilation of methods which return longs is disabled
3371 // for C2 and so is this code. Eventually C1 and C2 will do the
3372 // same thing for longs in the tiered world.
3373 if (!TieredCompilation) {
3374 Label not_long;
3375 __ cmp(O0,T_LONG);
3376 __ br(Assembler::notEqual, false, Assembler::pt, not_long);
3377 __ delayed()->nop();
3378 __ ldd(saved_Greturn1_addr,I0);
3379 __ bind(not_long);
3380 }
3381 #endif
3382 __ ret();
3383 __ delayed()->restore();
3385 masm->flush();
3386 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3387 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3388 }
3390 #ifdef COMPILER2
3392 //------------------------------generate_uncommon_trap_blob--------------------
3393 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3394 // instead.
3395 void SharedRuntime::generate_uncommon_trap_blob() {
3396 // allocate space for the code
3397 ResourceMark rm;
3398 // setup code generation tools
3399 int pad = VerifyThread ? 512 : 0;
3400 #ifdef _LP64
3401 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3402 #else
3403 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3404 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3405 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3406 #endif
3407 MacroAssembler* masm = new MacroAssembler(&buffer);
3408 Register O2UnrollBlock = O2;
3409 Register O2klass_index = O2;
3411 //
3412 // This is the entry point for all traps the compiler takes when it thinks
3413 // it cannot handle further execution of compilation code. The frame is
3414 // deoptimized in these cases and converted into interpreter frames for
3415 // execution
3416 // The steps taken by this frame are as follows:
3417 // - push a fake "unpack_frame"
3418 // - call the C routine Deoptimization::uncommon_trap (this function
3419 // packs the current compiled frame into vframe arrays and returns
3420 // information about the number and size of interpreter frames which
3421 // are equivalent to the frame which is being deoptimized)
3422 // - deallocate the "unpack_frame"
3423 // - deallocate the deoptimization frame
3424 // - in a loop using the information returned in the previous step
3425 // push interpreter frames;
3426 // - create a dummy "unpack_frame"
3427 // - call the C routine: Deoptimization::unpack_frames (this function
3428 // lays out values on the interpreter frame which was just created)
3429 // - deallocate the dummy unpack_frame
3430 // - return to the interpreter entry point
3431 //
3432 // Refer to the following methods for more information:
3433 // - Deoptimization::uncommon_trap
3434 // - Deoptimization::unpack_frame
3436 // the unloaded class index is in O0 (first parameter to this blob)
3438 // push a dummy "unpack_frame"
3439 // and call Deoptimization::uncommon_trap to pack the compiled frame into
3440 // vframe array and return the UnrollBlock information
3441 __ save_frame(0);
3442 __ set_last_Java_frame(SP, noreg);
3443 __ mov(I0, O2klass_index);
3444 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3445 __ reset_last_Java_frame();
3446 __ mov(O0, O2UnrollBlock->after_save());
3447 __ restore();
3449 // deallocate the deoptimized frame taking care to preserve the return values
3450 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3451 __ restore();
3453 // Allocate new interpreter frame(s) and possible c2i adapter frame
3455 make_new_frames(masm, false);
3457 // push a dummy "unpack_frame" taking care of float return values and
3458 // call Deoptimization::unpack_frames to have the unpacker layout
3459 // information in the interpreter frames just created and then return
3460 // to the interpreter entry point
3461 __ save_frame(0);
3462 __ set_last_Java_frame(SP, noreg);
3463 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3464 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3465 __ reset_last_Java_frame();
3466 __ ret();
3467 __ delayed()->restore();
3469 masm->flush();
3470 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3471 }
3473 #endif // COMPILER2
3475 //------------------------------generate_handler_blob-------------------
3476 //
3477 // Generate a special Compile2Runtime blob that saves all registers, and sets
3478 // up an OopMap.
3479 //
3480 // This blob is jumped to (via a breakpoint and the signal handler) from a
3481 // safepoint in compiled code. On entry to this blob, O7 contains the
3482 // address in the original nmethod at which we should resume normal execution.
3483 // Thus, this blob looks like a subroutine which must preserve lots of
3484 // registers and return normally. Note that O7 is never register-allocated,
3485 // so it is guaranteed to be free here.
3486 //
3488 // The hardest part of what this blob must do is to save the 64-bit %o
3489 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
3490 // an interrupt will chop off their heads. Making space in the caller's frame
3491 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3492 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3493 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
3494 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3495 // Tricky, tricky, tricky...
3497 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
3498 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3500 // allocate space for the code
3501 ResourceMark rm;
3502 // setup code generation tools
3503 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3504 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3505 // even larger with TraceJumps
3506 int pad = TraceJumps ? 512 : 0;
3507 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3508 MacroAssembler* masm = new MacroAssembler(&buffer);
3509 int frame_size_words;
3510 OopMapSet *oop_maps = new OopMapSet();
3511 OopMap* map = NULL;
3513 int start = __ offset();
3515 // If this causes a return before the processing, then do a "restore"
3516 if (cause_return) {
3517 __ restore();
3518 } else {
3519 // Make it look like we were called via the poll
3520 // so that frame constructor always sees a valid return address
3521 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3522 __ sub(O7, frame::pc_return_offset, O7);
3523 }
3525 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3527 // setup last_Java_sp (blows G4)
3528 __ set_last_Java_frame(SP, noreg);
3530 // call into the runtime to handle illegal instructions exception
3531 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3532 __ mov(G2_thread, O0);
3533 __ save_thread(L7_thread_cache);
3534 __ call(call_ptr);
3535 __ delayed()->nop();
3537 // Set an oopmap for the call site.
3538 // We need this not only for callee-saved registers, but also for volatile
3539 // registers that the compiler might be keeping live across a safepoint.
3541 oop_maps->add_gc_map( __ offset() - start, map);
3543 __ restore_thread(L7_thread_cache);
3544 // clear last_Java_sp
3545 __ reset_last_Java_frame();
3547 // Check for exceptions
3548 Label pending;
3550 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3551 __ tst(O1);
3552 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3553 __ delayed()->nop();
3555 RegisterSaver::restore_live_registers(masm);
3557 // We are back the the original state on entry and ready to go.
3559 __ retl();
3560 __ delayed()->nop();
3562 // Pending exception after the safepoint
3564 __ bind(pending);
3566 RegisterSaver::restore_live_registers(masm);
3568 // We are back the the original state on entry.
3570 // Tail-call forward_exception_entry, with the issuing PC in O7,
3571 // so it looks like the original nmethod called forward_exception_entry.
3572 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3573 __ JMP(O0, 0);
3574 __ delayed()->nop();
3576 // -------------
3577 // make sure all code is generated
3578 masm->flush();
3580 // return exception blob
3581 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3582 }
3584 //
3585 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3586 //
3587 // Generate a stub that calls into vm to find out the proper destination
3588 // of a java call. All the argument registers are live at this point
3589 // but since this is generic code we don't know what they are and the caller
3590 // must do any gc of the args.
3591 //
3592 static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
3593 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3595 // allocate space for the code
3596 ResourceMark rm;
3597 // setup code generation tools
3598 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3599 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3600 // even larger with TraceJumps
3601 int pad = TraceJumps ? 512 : 0;
3602 CodeBuffer buffer(name, 1600 + pad, 512);
3603 MacroAssembler* masm = new MacroAssembler(&buffer);
3604 int frame_size_words;
3605 OopMapSet *oop_maps = new OopMapSet();
3606 OopMap* map = NULL;
3608 int start = __ offset();
3610 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3612 int frame_complete = __ offset();
3614 // setup last_Java_sp (blows G4)
3615 __ set_last_Java_frame(SP, noreg);
3617 // call into the runtime to handle illegal instructions exception
3618 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3619 __ mov(G2_thread, O0);
3620 __ save_thread(L7_thread_cache);
3621 __ call(destination, relocInfo::runtime_call_type);
3622 __ delayed()->nop();
3624 // O0 contains the address we are going to jump to assuming no exception got installed
3626 // Set an oopmap for the call site.
3627 // We need this not only for callee-saved registers, but also for volatile
3628 // registers that the compiler might be keeping live across a safepoint.
3630 oop_maps->add_gc_map( __ offset() - start, map);
3632 __ restore_thread(L7_thread_cache);
3633 // clear last_Java_sp
3634 __ reset_last_Java_frame();
3636 // Check for exceptions
3637 Label pending;
3639 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3640 __ tst(O1);
3641 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3642 __ delayed()->nop();
3644 // get the returned methodOop
3646 __ get_vm_result(G5_method);
3647 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3649 // O0 is where we want to jump, overwrite G3 which is saved and scratch
3651 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3653 RegisterSaver::restore_live_registers(masm);
3655 // We are back the the original state on entry and ready to go.
3657 __ JMP(G3, 0);
3658 __ delayed()->nop();
3660 // Pending exception after the safepoint
3662 __ bind(pending);
3664 RegisterSaver::restore_live_registers(masm);
3666 // We are back the the original state on entry.
3668 // Tail-call forward_exception_entry, with the issuing PC in O7,
3669 // so it looks like the original nmethod called forward_exception_entry.
3670 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3671 __ JMP(O0, 0);
3672 __ delayed()->nop();
3674 // -------------
3675 // make sure all code is generated
3676 masm->flush();
3678 // return the blob
3679 // frame_size_words or bytes??
3680 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3681 }
3683 void SharedRuntime::generate_stubs() {
3685 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
3686 "wrong_method_stub");
3688 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
3689 "ic_miss_stub");
3691 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
3692 "resolve_opt_virtual_call");
3694 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
3695 "resolve_virtual_call");
3697 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
3698 "resolve_static_call");
3700 _polling_page_safepoint_handler_blob =
3701 generate_handler_blob(CAST_FROM_FN_PTR(address,
3702 SafepointSynchronize::handle_polling_page_exception), false);
3704 _polling_page_return_handler_blob =
3705 generate_handler_blob(CAST_FROM_FN_PTR(address,
3706 SafepointSynchronize::handle_polling_page_exception), true);
3708 generate_deopt_blob();
3710 #ifdef COMPILER2
3711 generate_uncommon_trap_blob();
3712 #endif // COMPILER2
3713 }