Sun, 13 Apr 2008 17:43:42 -0400
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
1 /*
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_sharedRuntime_sparc.cpp.incl"
28 #define __ masm->
30 #ifdef COMPILER2
31 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
32 #endif // COMPILER2
34 DeoptimizationBlob* SharedRuntime::_deopt_blob;
35 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
36 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
37 RuntimeStub* SharedRuntime::_wrong_method_blob;
38 RuntimeStub* SharedRuntime::_ic_miss_blob;
39 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
40 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
41 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
43 class RegisterSaver {
45 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
46 // The Oregs are problematic. In the 32bit build the compiler can
47 // have O registers live with 64 bit quantities. A window save will
48 // cut the heads off of the registers. We have to do a very extensive
49 // stack dance to save and restore these properly.
51 // Note that the Oregs problem only exists if we block at either a polling
52 // page exception a compiled code safepoint that was not originally a call
53 // or deoptimize following one of these kinds of safepoints.
55 // Lots of registers to save. For all builds, a window save will preserve
56 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
57 // builds a window-save will preserve the %o registers. In the LION build
58 // we need to save the 64-bit %o registers which requires we save them
59 // before the window-save (as then they become %i registers and get their
60 // heads chopped off on interrupt). We have to save some %g registers here
61 // as well.
62 enum {
63 // This frame's save area. Includes extra space for the native call:
64 // vararg's layout space and the like. Briefly holds the caller's
65 // register save area.
66 call_args_area = frame::register_save_words_sp_offset +
67 frame::memory_parameter_word_sp_offset*wordSize,
68 // Make sure save locations are always 8 byte aligned.
69 // can't use round_to because it doesn't produce compile time constant
70 start_of_extra_save_area = ((call_args_area + 7) & ~7),
71 g1_offset = start_of_extra_save_area, // g-regs needing saving
72 g3_offset = g1_offset+8,
73 g4_offset = g3_offset+8,
74 g5_offset = g4_offset+8,
75 o0_offset = g5_offset+8,
76 o1_offset = o0_offset+8,
77 o2_offset = o1_offset+8,
78 o3_offset = o2_offset+8,
79 o4_offset = o3_offset+8,
80 o5_offset = o4_offset+8,
81 start_of_flags_save_area = o5_offset+8,
82 ccr_offset = start_of_flags_save_area,
83 fsr_offset = ccr_offset + 8,
84 d00_offset = fsr_offset+8, // Start of float save area
85 register_save_size = d00_offset+8*32
86 };
89 public:
91 static int Oexception_offset() { return o0_offset; };
92 static int G3_offset() { return g3_offset; };
93 static int G5_offset() { return g5_offset; };
94 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
95 static void restore_live_registers(MacroAssembler* masm);
97 // During deoptimization only the result register need to be restored
98 // all the other values have already been extracted.
100 static void restore_result_registers(MacroAssembler* masm);
101 };
103 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
104 // Record volatile registers as callee-save values in an OopMap so their save locations will be
105 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
106 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
107 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
108 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
109 int i;
110 // Always make the frame size 16 bytr aligned.
111 int frame_size = round_to(additional_frame_words + register_save_size, 16);
112 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
113 int frame_size_in_slots = frame_size / sizeof(jint);
114 // CodeBlob frame size is in words.
115 *total_frame_words = frame_size / wordSize;
116 // OopMap* map = new OopMap(*total_frame_words, 0);
117 OopMap* map = new OopMap(frame_size_in_slots, 0);
119 #if !defined(_LP64)
121 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
122 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
123 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
124 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
125 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
126 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
127 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
128 #endif /* _LP64 */
130 __ save(SP, -frame_size, SP);
132 #ifndef _LP64
133 // Reload the 64 bit Oregs. Although they are now Iregs we load them
134 // to Oregs here to avoid interrupts cutting off their heads
136 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
137 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
138 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
139 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
140 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
141 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
143 __ stx(O0, SP, o0_offset+STACK_BIAS);
144 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
146 __ stx(O1, SP, o1_offset+STACK_BIAS);
148 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
150 __ stx(O2, SP, o2_offset+STACK_BIAS);
151 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
153 __ stx(O3, SP, o3_offset+STACK_BIAS);
154 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
156 __ stx(O4, SP, o4_offset+STACK_BIAS);
157 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
159 __ stx(O5, SP, o5_offset+STACK_BIAS);
160 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
161 #endif /* _LP64 */
164 #ifdef _LP64
165 int debug_offset = 0;
166 #else
167 int debug_offset = 4;
168 #endif
169 // Save the G's
170 __ stx(G1, SP, g1_offset+STACK_BIAS);
171 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
173 __ stx(G3, SP, g3_offset+STACK_BIAS);
174 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
176 __ stx(G4, SP, g4_offset+STACK_BIAS);
177 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
179 __ stx(G5, SP, g5_offset+STACK_BIAS);
180 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
182 // This is really a waste but we'll keep things as they were for now
183 if (true) {
184 #ifndef _LP64
185 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
186 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
187 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
188 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
189 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
190 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
191 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
192 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
193 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
194 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
195 #endif /* _LP64 */
196 }
199 // Save the flags
200 __ rdccr( G5 );
201 __ stx(G5, SP, ccr_offset+STACK_BIAS);
202 __ stxfsr(SP, fsr_offset+STACK_BIAS);
204 // Save all the FP registers
205 int offset = d00_offset;
206 for( int i=0; i<64; i+=2 ) {
207 FloatRegister f = as_FloatRegister(i);
208 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
209 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
210 if (true) {
211 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
212 }
213 offset += sizeof(double);
214 }
216 // And we're done.
218 return map;
219 }
222 // Pop the current frame and restore all the registers that we
223 // saved.
224 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
226 // Restore all the FP registers
227 for( int i=0; i<64; i+=2 ) {
228 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
229 }
231 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
232 __ wrccr (G1) ;
234 // Restore the G's
235 // Note that G2 (AKA GThread) must be saved and restored separately.
236 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
238 __ ldx(SP, g1_offset+STACK_BIAS, G1);
239 __ ldx(SP, g3_offset+STACK_BIAS, G3);
240 __ ldx(SP, g4_offset+STACK_BIAS, G4);
241 __ ldx(SP, g5_offset+STACK_BIAS, G5);
244 #if !defined(_LP64)
245 // Restore the 64-bit O's.
246 __ ldx(SP, o0_offset+STACK_BIAS, O0);
247 __ ldx(SP, o1_offset+STACK_BIAS, O1);
248 __ ldx(SP, o2_offset+STACK_BIAS, O2);
249 __ ldx(SP, o3_offset+STACK_BIAS, O3);
250 __ ldx(SP, o4_offset+STACK_BIAS, O4);
251 __ ldx(SP, o5_offset+STACK_BIAS, O5);
253 // And temporarily place them in TLS
255 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
256 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
257 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
258 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
259 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
260 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
261 #endif /* _LP64 */
263 // Restore flags
265 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
267 __ restore();
269 #if !defined(_LP64)
270 // Now reload the 64bit Oregs after we've restore the window.
271 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
272 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
273 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
274 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
275 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
276 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
277 #endif /* _LP64 */
279 }
281 // Pop the current frame and restore the registers that might be holding
282 // a result.
283 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
285 #if !defined(_LP64)
286 // 32bit build returns longs in G1
287 __ ldx(SP, g1_offset+STACK_BIAS, G1);
289 // Retrieve the 64-bit O's.
290 __ ldx(SP, o0_offset+STACK_BIAS, O0);
291 __ ldx(SP, o1_offset+STACK_BIAS, O1);
292 // and save to TLS
293 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
294 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
295 #endif /* _LP64 */
297 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
299 __ restore();
301 #if !defined(_LP64)
302 // Now reload the 64bit Oregs after we've restore the window.
303 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
304 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
305 #endif /* _LP64 */
307 }
309 // The java_calling_convention describes stack locations as ideal slots on
310 // a frame with no abi restrictions. Since we must observe abi restrictions
311 // (like the placement of the register window) the slots must be biased by
312 // the following value.
313 static int reg2offset(VMReg r) {
314 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
315 }
317 // ---------------------------------------------------------------------------
318 // Read the array of BasicTypes from a signature, and compute where the
319 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
320 // quantities. Values less than VMRegImpl::stack0 are registers, those above
321 // refer to 4-byte stack slots. All stack slots are based off of the window
322 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
323 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
324 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
325 // integer registers. Values 64-95 are the (32-bit only) float registers.
326 // Each 32-bit quantity is given its own number, so the integer registers
327 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
328 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
330 // Register results are passed in O0-O5, for outgoing call arguments. To
331 // convert to incoming arguments, convert all O's to I's. The regs array
332 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
333 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
334 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
335 // passed (used as a placeholder for the other half of longs and doubles in
336 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
337 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
338 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
339 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
340 // same VMRegPair.
342 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
343 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
344 // units regardless of build.
347 // ---------------------------------------------------------------------------
348 // The compiled Java calling convention. The Java convention always passes
349 // 64-bit values in adjacent aligned locations (either registers or stack),
350 // floats in float registers and doubles in aligned float pairs. Values are
351 // packed in the registers. There is no backing varargs store for values in
352 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be
353 // passed in I's, because longs in I's get their heads chopped off at
354 // interrupt).
355 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
356 VMRegPair *regs,
357 int total_args_passed,
358 int is_outgoing) {
359 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
361 // Convention is to pack the first 6 int/oop args into the first 6 registers
362 // (I0-I5), extras spill to the stack. Then pack the first 8 float args
363 // into F0-F7, extras spill to the stack. Then pad all register sets to
364 // align. Then put longs and doubles into the same registers as they fit,
365 // else spill to the stack.
366 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
367 const int flt_reg_max = 8;
368 //
369 // Where 32-bit 1-reg longs start being passed
370 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
371 // So make it look like we've filled all the G regs that c2 wants to use.
372 Register g_reg = TieredCompilation ? noreg : G1;
374 // Count int/oop and float args. See how many stack slots we'll need and
375 // where the longs & doubles will go.
376 int int_reg_cnt = 0;
377 int flt_reg_cnt = 0;
378 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
379 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
380 int stk_reg_pairs = 0;
381 for (int i = 0; i < total_args_passed; i++) {
382 switch (sig_bt[i]) {
383 case T_LONG: // LP64, longs compete with int args
384 assert(sig_bt[i+1] == T_VOID, "");
385 #ifdef _LP64
386 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
387 #endif
388 break;
389 case T_OBJECT:
390 case T_ARRAY:
391 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
392 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
393 #ifndef _LP64
394 else stk_reg_pairs++;
395 #endif
396 break;
397 case T_INT:
398 case T_SHORT:
399 case T_CHAR:
400 case T_BYTE:
401 case T_BOOLEAN:
402 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
403 else stk_reg_pairs++;
404 break;
405 case T_FLOAT:
406 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
407 else stk_reg_pairs++;
408 break;
409 case T_DOUBLE:
410 assert(sig_bt[i+1] == T_VOID, "");
411 break;
412 case T_VOID:
413 break;
414 default:
415 ShouldNotReachHere();
416 }
417 }
419 // This is where the longs/doubles start on the stack.
420 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
422 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
423 int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
425 // int stk_reg = frame::register_save_words*(wordSize>>2);
426 // int stk_reg = SharedRuntime::out_preserve_stack_slots();
427 int stk_reg = 0;
428 int int_reg = 0;
429 int flt_reg = 0;
431 // Now do the signature layout
432 for (int i = 0; i < total_args_passed; i++) {
433 switch (sig_bt[i]) {
434 case T_INT:
435 case T_SHORT:
436 case T_CHAR:
437 case T_BYTE:
438 case T_BOOLEAN:
439 #ifndef _LP64
440 case T_OBJECT:
441 case T_ARRAY:
442 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
443 #endif // _LP64
444 if (int_reg < int_reg_max) {
445 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
446 regs[i].set1(r->as_VMReg());
447 } else {
448 regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
449 }
450 break;
452 #ifdef _LP64
453 case T_OBJECT:
454 case T_ARRAY:
455 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
456 if (int_reg < int_reg_max) {
457 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
458 regs[i].set2(r->as_VMReg());
459 } else {
460 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
461 stk_reg_pairs += 2;
462 }
463 break;
464 #endif // _LP64
466 case T_LONG:
467 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
468 #ifdef COMPILER2
469 #ifdef _LP64
470 // Can't be tiered (yet)
471 if (int_reg < int_reg_max) {
472 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
473 regs[i].set2(r->as_VMReg());
474 } else {
475 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
476 stk_reg_pairs += 2;
477 }
478 #else
479 // For 32-bit build, can't pass longs in O-regs because they become
480 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
481 // spare and available. This convention isn't used by the Sparc ABI or
482 // anywhere else. If we're tiered then we don't use G-regs because c1
483 // can't deal with them as a "pair".
484 // G0: zero
485 // G1: 1st Long arg
486 // G2: global allocated to TLS
487 // G3: used in inline cache check
488 // G4: 2nd Long arg
489 // G5: used in inline cache check
490 // G6: used by OS
491 // G7: used by OS
493 if (g_reg == G1) {
494 regs[i].set2(G1->as_VMReg()); // This long arg in G1
495 g_reg = G4; // Where the next arg goes
496 } else if (g_reg == G4) {
497 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
498 g_reg = noreg; // No more longs in registers
499 } else {
500 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
501 stk_reg_pairs += 2;
502 }
503 #endif // _LP64
504 #else // COMPILER2
505 if (int_reg_pairs + 1 < int_reg_max) {
506 if (is_outgoing) {
507 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
508 } else {
509 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
510 }
511 int_reg_pairs += 2;
512 } else {
513 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
514 stk_reg_pairs += 2;
515 }
516 #endif // COMPILER2
517 break;
519 case T_FLOAT:
520 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
521 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++));
522 break;
523 case T_DOUBLE:
524 assert(sig_bt[i+1] == T_VOID, "expecting half");
525 if (flt_reg_pairs + 1 < flt_reg_max) {
526 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
527 flt_reg_pairs += 2;
528 } else {
529 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
530 stk_reg_pairs += 2;
531 }
532 break;
533 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles
534 default:
535 ShouldNotReachHere();
536 }
537 }
539 // retun the amount of stack space these arguments will need.
540 return stk_reg_pairs;
542 }
544 // Helper class mostly to avoid passing masm everywhere, and handle store
545 // displacement overflow logic for LP64
546 class AdapterGenerator {
547 MacroAssembler *masm;
548 #ifdef _LP64
549 Register Rdisp;
550 void set_Rdisp(Register r) { Rdisp = r; }
551 #endif // _LP64
553 void patch_callers_callsite();
554 void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
556 // base+st_off points to top of argument
557 int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
558 int next_arg_offset(const int st_off) {
559 return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
560 }
562 #ifdef _LP64
563 // On _LP64 argument slot values are loaded first into a register
564 // because they might not fit into displacement.
565 Register arg_slot(const int st_off);
566 Register next_arg_slot(const int st_off);
567 #else
568 int arg_slot(const int st_off) { return arg_offset(st_off); }
569 int next_arg_slot(const int st_off) { return next_arg_offset(st_off); }
570 #endif // _LP64
572 // Stores long into offset pointed to by base
573 void store_c2i_long(Register r, Register base,
574 const int st_off, bool is_stack);
575 void store_c2i_object(Register r, Register base,
576 const int st_off);
577 void store_c2i_int(Register r, Register base,
578 const int st_off);
579 void store_c2i_double(VMReg r_2,
580 VMReg r_1, Register base, const int st_off);
581 void store_c2i_float(FloatRegister f, Register base,
582 const int st_off);
584 public:
585 void gen_c2i_adapter(int total_args_passed,
586 // VMReg max_arg,
587 int comp_args_on_stack, // VMRegStackSlots
588 const BasicType *sig_bt,
589 const VMRegPair *regs,
590 Label& skip_fixup);
591 void gen_i2c_adapter(int total_args_passed,
592 // VMReg max_arg,
593 int comp_args_on_stack, // VMRegStackSlots
594 const BasicType *sig_bt,
595 const VMRegPair *regs);
597 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
598 };
601 // Patch the callers callsite with entry to compiled code if it exists.
602 void AdapterGenerator::patch_callers_callsite() {
603 Label L;
604 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
605 __ br_null(G3_scratch, false, __ pt, L);
606 // Schedule the branch target address early.
607 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
608 // Call into the VM to patch the caller, then jump to compiled callee
609 __ save_frame(4); // Args in compiled layout; do not blow them
611 // Must save all the live Gregs the list is:
612 // G1: 1st Long arg (32bit build)
613 // G2: global allocated to TLS
614 // G3: used in inline cache check (scratch)
615 // G4: 2nd Long arg (32bit build);
616 // G5: used in inline cache check (methodOop)
618 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
620 #ifdef _LP64
621 // mov(s,d)
622 __ mov(G1, L1);
623 __ mov(G4, L4);
624 __ mov(G5_method, L5);
625 __ mov(G5_method, O0); // VM needs target method
626 __ mov(I7, O1); // VM needs caller's callsite
627 // Must be a leaf call...
628 // can be very far once the blob has been relocated
629 Address dest(O7, CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
630 __ relocate(relocInfo::runtime_call_type);
631 __ jumpl_to(dest, O7);
632 __ delayed()->mov(G2_thread, L7_thread_cache);
633 __ mov(L7_thread_cache, G2_thread);
634 __ mov(L1, G1);
635 __ mov(L4, G4);
636 __ mov(L5, G5_method);
637 #else
638 __ stx(G1, FP, -8 + STACK_BIAS);
639 __ stx(G4, FP, -16 + STACK_BIAS);
640 __ mov(G5_method, L5);
641 __ mov(G5_method, O0); // VM needs target method
642 __ mov(I7, O1); // VM needs caller's callsite
643 // Must be a leaf call...
644 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
645 __ delayed()->mov(G2_thread, L7_thread_cache);
646 __ mov(L7_thread_cache, G2_thread);
647 __ ldx(FP, -8 + STACK_BIAS, G1);
648 __ ldx(FP, -16 + STACK_BIAS, G4);
649 __ mov(L5, G5_method);
650 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
651 #endif /* _LP64 */
653 __ restore(); // Restore args
654 __ bind(L);
655 }
657 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
658 Register scratch) {
659 if (TaggedStackInterpreter) {
660 int tag_off = st_off + Interpreter::tag_offset_in_bytes();
661 #ifdef _LP64
662 Register tag_slot = Rdisp;
663 __ set(tag_off, tag_slot);
664 #else
665 int tag_slot = tag_off;
666 #endif // _LP64
667 // have to store zero because local slots can be reused (rats!)
668 if (t == frame::TagValue) {
669 __ st_ptr(G0, base, tag_slot);
670 } else if (t == frame::TagCategory2) {
671 __ st_ptr(G0, base, tag_slot);
672 int next_tag_off = st_off - Interpreter::stackElementSize() +
673 Interpreter::tag_offset_in_bytes();
674 #ifdef _LP64
675 __ set(next_tag_off, tag_slot);
676 #else
677 tag_slot = next_tag_off;
678 #endif // _LP64
679 __ st_ptr(G0, base, tag_slot);
680 } else {
681 __ mov(t, scratch);
682 __ st_ptr(scratch, base, tag_slot);
683 }
684 }
685 }
687 #ifdef _LP64
688 Register AdapterGenerator::arg_slot(const int st_off) {
689 __ set( arg_offset(st_off), Rdisp);
690 return Rdisp;
691 }
693 Register AdapterGenerator::next_arg_slot(const int st_off){
694 __ set( next_arg_offset(st_off), Rdisp);
695 return Rdisp;
696 }
697 #endif // _LP64
699 // Stores long into offset pointed to by base
700 void AdapterGenerator::store_c2i_long(Register r, Register base,
701 const int st_off, bool is_stack) {
702 #ifdef COMPILER2
703 #ifdef _LP64
704 // In V9, longs are given 2 64-bit slots in the interpreter, but the
705 // data is passed in only 1 slot.
706 __ stx(r, base, next_arg_slot(st_off));
707 #else
708 // Misaligned store of 64-bit data
709 __ stw(r, base, arg_slot(st_off)); // lo bits
710 __ srlx(r, 32, r);
711 __ stw(r, base, next_arg_slot(st_off)); // hi bits
712 #endif // _LP64
713 #else
714 if (is_stack) {
715 // Misaligned store of 64-bit data
716 __ stw(r, base, arg_slot(st_off)); // lo bits
717 __ srlx(r, 32, r);
718 __ stw(r, base, next_arg_slot(st_off)); // hi bits
719 } else {
720 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
721 __ stw(r , base, next_arg_slot(st_off)); // hi bits
722 }
723 #endif // COMPILER2
724 tag_c2i_arg(frame::TagCategory2, base, st_off, r);
725 }
727 void AdapterGenerator::store_c2i_object(Register r, Register base,
728 const int st_off) {
729 __ st_ptr (r, base, arg_slot(st_off));
730 tag_c2i_arg(frame::TagReference, base, st_off, r);
731 }
733 void AdapterGenerator::store_c2i_int(Register r, Register base,
734 const int st_off) {
735 __ st (r, base, arg_slot(st_off));
736 tag_c2i_arg(frame::TagValue, base, st_off, r);
737 }
739 // Stores into offset pointed to by base
740 void AdapterGenerator::store_c2i_double(VMReg r_2,
741 VMReg r_1, Register base, const int st_off) {
742 #ifdef _LP64
743 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
744 // data is passed in only 1 slot.
745 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
746 #else
747 // Need to marshal 64-bit value from misaligned Lesp loads
748 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
749 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
750 #endif
751 tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
752 }
754 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
755 const int st_off) {
756 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
757 tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
758 }
760 void AdapterGenerator::gen_c2i_adapter(
761 int total_args_passed,
762 // VMReg max_arg,
763 int comp_args_on_stack, // VMRegStackSlots
764 const BasicType *sig_bt,
765 const VMRegPair *regs,
766 Label& skip_fixup) {
768 // Before we get into the guts of the C2I adapter, see if we should be here
769 // at all. We've come from compiled code and are attempting to jump to the
770 // interpreter, which means the caller made a static call to get here
771 // (vcalls always get a compiled target if there is one). Check for a
772 // compiled target. If there is one, we need to patch the caller's call.
773 // However we will run interpreted if we come thru here. The next pass
774 // thru the call site will run compiled. If we ran compiled here then
775 // we can (theorectically) do endless i2c->c2i->i2c transitions during
776 // deopt/uncommon trap cycles. If we always go interpreted here then
777 // we can have at most one and don't need to play any tricks to keep
778 // from endlessly growing the stack.
779 //
780 // Actually if we detected that we had an i2c->c2i transition here we
781 // ought to be able to reset the world back to the state of the interpreted
782 // call and not bother building another interpreter arg area. We don't
783 // do that at this point.
785 patch_callers_callsite();
787 __ bind(skip_fixup);
789 // Since all args are passed on the stack, total_args_passed*wordSize is the
790 // space we need. Add in varargs area needed by the interpreter. Round up
791 // to stack alignment.
792 const int arg_size = total_args_passed * Interpreter::stackElementSize();
793 const int varargs_area =
794 (frame::varargs_offset - frame::register_save_words)*wordSize;
795 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
797 int bias = STACK_BIAS;
798 const int interp_arg_offset = frame::varargs_offset*wordSize +
799 (total_args_passed-1)*Interpreter::stackElementSize();
801 Register base = SP;
803 #ifdef _LP64
804 // In the 64bit build because of wider slots and STACKBIAS we can run
805 // out of bits in the displacement to do loads and stores. Use g3 as
806 // temporary displacement.
807 if (! __ is_simm13(extraspace)) {
808 __ set(extraspace, G3_scratch);
809 __ sub(SP, G3_scratch, SP);
810 } else {
811 __ sub(SP, extraspace, SP);
812 }
813 set_Rdisp(G3_scratch);
814 #else
815 __ sub(SP, extraspace, SP);
816 #endif // _LP64
818 // First write G1 (if used) to where ever it must go
819 for (int i=0; i<total_args_passed; i++) {
820 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
821 VMReg r_1 = regs[i].first();
822 VMReg r_2 = regs[i].second();
823 if (r_1 == G1_scratch->as_VMReg()) {
824 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
825 store_c2i_object(G1_scratch, base, st_off);
826 } else if (sig_bt[i] == T_LONG) {
827 assert(!TieredCompilation, "should not use register args for longs");
828 store_c2i_long(G1_scratch, base, st_off, false);
829 } else {
830 store_c2i_int(G1_scratch, base, st_off);
831 }
832 }
833 }
835 // Now write the args into the outgoing interpreter space
836 for (int i=0; i<total_args_passed; i++) {
837 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
838 VMReg r_1 = regs[i].first();
839 VMReg r_2 = regs[i].second();
840 if (!r_1->is_valid()) {
841 assert(!r_2->is_valid(), "");
842 continue;
843 }
844 // Skip G1 if found as we did it first in order to free it up
845 if (r_1 == G1_scratch->as_VMReg()) {
846 continue;
847 }
848 #ifdef ASSERT
849 bool G1_forced = false;
850 #endif // ASSERT
851 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
852 #ifdef _LP64
853 Register ld_off = Rdisp;
854 __ set(reg2offset(r_1) + extraspace + bias, ld_off);
855 #else
856 int ld_off = reg2offset(r_1) + extraspace + bias;
857 #ifdef ASSERT
858 G1_forced = true;
859 #endif // ASSERT
860 #endif // _LP64
861 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
862 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
863 else __ ldx(base, ld_off, G1_scratch);
864 }
866 if (r_1->is_Register()) {
867 Register r = r_1->as_Register()->after_restore();
868 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
869 store_c2i_object(r, base, st_off);
870 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
871 if (TieredCompilation) {
872 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
873 }
874 store_c2i_long(r, base, st_off, r_2->is_stack());
875 } else {
876 store_c2i_int(r, base, st_off);
877 }
878 } else {
879 assert(r_1->is_FloatRegister(), "");
880 if (sig_bt[i] == T_FLOAT) {
881 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
882 } else {
883 assert(sig_bt[i] == T_DOUBLE, "wrong type");
884 store_c2i_double(r_2, r_1, base, st_off);
885 }
886 }
887 }
889 #ifdef _LP64
890 // Need to reload G3_scratch, used for temporary displacements.
891 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
893 // Pass O5_savedSP as an argument to the interpreter.
894 // The interpreter will restore SP to this value before returning.
895 __ set(extraspace, G1);
896 __ add(SP, G1, O5_savedSP);
897 #else
898 // Pass O5_savedSP as an argument to the interpreter.
899 // The interpreter will restore SP to this value before returning.
900 __ add(SP, extraspace, O5_savedSP);
901 #endif // _LP64
903 __ mov((frame::varargs_offset)*wordSize -
904 1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
905 // Jump to the interpreter just as if interpreter was doing it.
906 __ jmpl(G3_scratch, 0, G0);
907 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
908 // (really L0) is in use by the compiled frame as a generic temp. However,
909 // the interpreter does not know where its args are without some kind of
910 // arg pointer being passed in. Pass it in Gargs.
911 __ delayed()->add(SP, G1, Gargs);
912 }
914 void AdapterGenerator::gen_i2c_adapter(
915 int total_args_passed,
916 // VMReg max_arg,
917 int comp_args_on_stack, // VMRegStackSlots
918 const BasicType *sig_bt,
919 const VMRegPair *regs) {
921 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
922 // layout. Lesp was saved by the calling I-frame and will be restored on
923 // return. Meanwhile, outgoing arg space is all owned by the callee
924 // C-frame, so we can mangle it at will. After adjusting the frame size,
925 // hoist register arguments and repack other args according to the compiled
926 // code convention. Finally, end in a jump to the compiled code. The entry
927 // point address is the start of the buffer.
929 // We will only enter here from an interpreted frame and never from after
930 // passing thru a c2i. Azul allowed this but we do not. If we lose the
931 // race and use a c2i we will remain interpreted for the race loser(s).
932 // This removes all sorts of headaches on the x86 side and also eliminates
933 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
935 // As you can see from the list of inputs & outputs there are not a lot
936 // of temp registers to work with: mostly G1, G3 & G4.
938 // Inputs:
939 // G2_thread - TLS
940 // G5_method - Method oop
941 // O0 - Flag telling us to restore SP from O5
942 // O4_args - Pointer to interpreter's args
943 // O5 - Caller's saved SP, to be restored if needed
944 // O6 - Current SP!
945 // O7 - Valid return address
946 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
948 // Outputs:
949 // G2_thread - TLS
950 // G1, G4 - Outgoing long args in 32-bit build
951 // O0-O5 - Outgoing args in compiled layout
952 // O6 - Adjusted or restored SP
953 // O7 - Valid return address
954 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
955 // F0-F7 - more outgoing args
958 // O4 is about to get loaded up with compiled callee's args
959 __ sub(Gargs, BytesPerWord, Gargs);
961 #ifdef ASSERT
962 {
963 // on entry OsavedSP and SP should be equal
964 Label ok;
965 __ cmp(O5_savedSP, SP);
966 __ br(Assembler::equal, false, Assembler::pt, ok);
967 __ delayed()->nop();
968 __ stop("I5_savedSP not set");
969 __ should_not_reach_here();
970 __ bind(ok);
971 }
972 #endif
974 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
975 // WITH O7 HOLDING A VALID RETURN PC
976 //
977 // | |
978 // : java stack :
979 // | |
980 // +--------------+ <--- start of outgoing args
981 // | receiver | |
982 // : rest of args : |---size is java-arg-words
983 // | | |
984 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
985 // | | |
986 // : unused : |---Space for max Java stack, plus stack alignment
987 // | | |
988 // +--------------+ <--- SP + 16*wordsize
989 // | |
990 // : window :
991 // | |
992 // +--------------+ <--- SP
994 // WE REPACK THE STACK. We use the common calling convention layout as
995 // discovered by calling SharedRuntime::calling_convention. We assume it
996 // causes an arbitrary shuffle of memory, which may require some register
997 // temps to do the shuffle. We hope for (and optimize for) the case where
998 // temps are not needed. We may have to resize the stack slightly, in case
999 // we need alignment padding (32-bit interpreter can pass longs & doubles
1000 // misaligned, but the compilers expect them aligned).
1001 //
1002 // | |
1003 // : java stack :
1004 // | |
1005 // +--------------+ <--- start of outgoing args
1006 // | pad, align | |
1007 // +--------------+ |
1008 // | ints, floats | |---Outgoing stack args, packed low.
1009 // +--------------+ | First few args in registers.
1010 // : doubles : |
1011 // | longs | |
1012 // +--------------+ <--- SP' + 16*wordsize
1013 // | |
1014 // : window :
1015 // | |
1016 // +--------------+ <--- SP'
1018 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
1019 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
1020 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
1022 // Cut-out for having no stack args. Since up to 6 args are passed
1023 // in registers, we will commonly have no stack args.
1024 if (comp_args_on_stack > 0) {
1026 // Convert VMReg stack slots to words.
1027 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1028 // Round up to miminum stack alignment, in wordSize
1029 comp_words_on_stack = round_to(comp_words_on_stack, 2);
1030 // Now compute the distance from Lesp to SP. This calculation does not
1031 // include the space for total_args_passed because Lesp has not yet popped
1032 // the arguments.
1033 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
1034 }
1036 // Will jump to the compiled code just as if compiled code was doing it.
1037 // Pre-load the register-jump target early, to schedule it better.
1038 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1040 // Now generate the shuffle code. Pick up all register args and move the
1041 // rest through G1_scratch.
1042 for (int i=0; i<total_args_passed; i++) {
1043 if (sig_bt[i] == T_VOID) {
1044 // Longs and doubles are passed in native word order, but misaligned
1045 // in the 32-bit build.
1046 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1047 continue;
1048 }
1050 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
1051 // 32-bit build and aligned in the 64-bit build. Look for the obvious
1052 // ldx/lddf optimizations.
1054 // Load in argument order going down.
1055 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1056 #ifdef _LP64
1057 set_Rdisp(G1_scratch);
1058 #endif // _LP64
1060 VMReg r_1 = regs[i].first();
1061 VMReg r_2 = regs[i].second();
1062 if (!r_1->is_valid()) {
1063 assert(!r_2->is_valid(), "");
1064 continue;
1065 }
1066 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
1067 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
1068 if (r_2->is_valid()) r_2 = r_1->next();
1069 }
1070 if (r_1->is_Register()) { // Register argument
1071 Register r = r_1->as_Register()->after_restore();
1072 if (!r_2->is_valid()) {
1073 __ ld(Gargs, arg_slot(ld_off), r);
1074 } else {
1075 #ifdef _LP64
1076 // In V9, longs are given 2 64-bit slots in the interpreter, but the
1077 // data is passed in only 1 slot.
1078 Register slot = (sig_bt[i]==T_LONG) ?
1079 next_arg_slot(ld_off) : arg_slot(ld_off);
1080 __ ldx(Gargs, slot, r);
1081 #else
1082 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1083 // stack shuffle. Load the first 2 longs into G1/G4 later.
1084 #endif
1085 }
1086 } else {
1087 assert(r_1->is_FloatRegister(), "");
1088 if (!r_2->is_valid()) {
1089 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1090 } else {
1091 #ifdef _LP64
1092 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1093 // data is passed in only 1 slot. This code also handles longs that
1094 // are passed on the stack, but need a stack-to-stack move through a
1095 // spare float register.
1096 Register slot = (sig_bt[i]==T_LONG || sig_bt[i] == T_DOUBLE) ?
1097 next_arg_slot(ld_off) : arg_slot(ld_off);
1098 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1099 #else
1100 // Need to marshal 64-bit value from misaligned Lesp loads
1101 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1102 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1103 #endif
1104 }
1105 }
1106 // Was the argument really intended to be on the stack, but was loaded
1107 // into F8/F9?
1108 if (regs[i].first()->is_stack()) {
1109 assert(r_1->as_FloatRegister() == F8, "fix this code");
1110 // Convert stack slot to an SP offset
1111 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1112 // Store down the shuffled stack word. Target address _is_ aligned.
1113 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, st_off);
1114 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, st_off);
1115 }
1116 }
1117 bool made_space = false;
1118 #ifndef _LP64
1119 // May need to pick up a few long args in G1/G4
1120 bool g4_crushed = false;
1121 bool g3_crushed = false;
1122 for (int i=0; i<total_args_passed; i++) {
1123 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1124 // Load in argument order going down
1125 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1126 // Need to marshal 64-bit value from misaligned Lesp loads
1127 Register r = regs[i].first()->as_Register()->after_restore();
1128 if (r == G1 || r == G4) {
1129 assert(!g4_crushed, "ordering problem");
1130 if (r == G4){
1131 g4_crushed = true;
1132 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1133 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1134 } else {
1135 // better schedule this way
1136 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1137 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
1138 }
1139 g3_crushed = true;
1140 __ sllx(r, 32, r);
1141 __ or3(G3_scratch, r, r);
1142 } else {
1143 assert(r->is_out(), "longs passed in two O registers");
1144 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits
1145 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
1146 }
1147 }
1148 }
1149 #endif
1151 // Jump to the compiled code just as if compiled code was doing it.
1152 //
1153 #ifndef _LP64
1154 if (g3_crushed) {
1155 // Rats load was wasted, at least it is in cache...
1156 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1157 }
1158 #endif /* _LP64 */
1160 // 6243940 We might end up in handle_wrong_method if
1161 // the callee is deoptimized as we race thru here. If that
1162 // happens we don't want to take a safepoint because the
1163 // caller frame will look interpreted and arguments are now
1164 // "compiled" so it is much better to make this transition
1165 // invisible to the stack walking code. Unfortunately if
1166 // we try and find the callee by normal means a safepoint
1167 // is possible. So we stash the desired callee in the thread
1168 // and the vm will find there should this case occur.
1169 Address callee_target_addr(G2_thread, 0, in_bytes(JavaThread::callee_target_offset()));
1170 __ st_ptr(G5_method, callee_target_addr);
1172 if (StressNonEntrant) {
1173 // Open a big window for deopt failure
1174 __ save_frame(0);
1175 __ mov(G0, L0);
1176 Label loop;
1177 __ bind(loop);
1178 __ sub(L0, 1, L0);
1179 __ br_null(L0, false, Assembler::pt, loop);
1180 __ delayed()->nop();
1182 __ restore();
1183 }
1186 __ jmpl(G3, 0, G0);
1187 __ delayed()->nop();
1188 }
1190 // ---------------------------------------------------------------
1191 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1192 int total_args_passed,
1193 // VMReg max_arg,
1194 int comp_args_on_stack, // VMRegStackSlots
1195 const BasicType *sig_bt,
1196 const VMRegPair *regs) {
1197 address i2c_entry = __ pc();
1199 AdapterGenerator agen(masm);
1201 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1204 // -------------------------------------------------------------------------
1205 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The
1206 // args start out packed in the compiled layout. They need to be unpacked
1207 // into the interpreter layout. This will almost always require some stack
1208 // space. We grow the current (compiled) stack, then repack the args. We
1209 // finally end in a jump to the generic interpreter entry point. On exit
1210 // from the interpreter, the interpreter will restore our SP (lest the
1211 // compiled code, which relys solely on SP and not FP, get sick).
1213 address c2i_unverified_entry = __ pc();
1214 Label skip_fixup;
1215 {
1216 #if !defined(_LP64) && defined(COMPILER2)
1217 Register R_temp = L0; // another scratch register
1218 #else
1219 Register R_temp = G1; // another scratch register
1220 #endif
1222 Address ic_miss(G3_scratch, SharedRuntime::get_ic_miss_stub());
1224 __ verify_oop(O0);
1225 __ verify_oop(G5_method);
1226 __ load_klass(O0, G3_scratch);
1227 __ verify_oop(G3_scratch);
1229 #if !defined(_LP64) && defined(COMPILER2)
1230 __ save(SP, -frame::register_save_words*wordSize, SP);
1231 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1232 __ verify_oop(R_temp);
1233 __ cmp(G3_scratch, R_temp);
1234 __ restore();
1235 #else
1236 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1237 __ verify_oop(R_temp);
1238 __ cmp(G3_scratch, R_temp);
1239 #endif
1241 Label ok, ok2;
1242 __ brx(Assembler::equal, false, Assembler::pt, ok);
1243 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1244 __ jump_to(ic_miss);
1245 __ delayed()->nop();
1247 __ bind(ok);
1248 // Method might have been compiled since the call site was patched to
1249 // interpreted if that is the case treat it as a miss so we can get
1250 // the call site corrected.
1251 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1252 __ bind(ok2);
1253 __ br_null(G3_scratch, false, __ pt, skip_fixup);
1254 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1255 __ jump_to(ic_miss);
1256 __ delayed()->nop();
1258 }
1260 address c2i_entry = __ pc();
1262 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1264 __ flush();
1265 return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
1267 }
1269 // Helper function for native calling conventions
1270 static VMReg int_stk_helper( int i ) {
1271 // Bias any stack based VMReg we get by ignoring the window area
1272 // but not the register parameter save area.
1273 //
1274 // This is strange for the following reasons. We'd normally expect
1275 // the calling convention to return an VMReg for a stack slot
1276 // completely ignoring any abi reserved area. C2 thinks of that
1277 // abi area as only out_preserve_stack_slots. This does not include
1278 // the area allocated by the C abi to store down integer arguments
1279 // because the java calling convention does not use it. So
1280 // since c2 assumes that there are only out_preserve_stack_slots
1281 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1282 // location the c calling convention must add in this bias amount
1283 // to make up for the fact that the out_preserve_stack_slots is
1284 // insufficient for C calls. What a mess. I sure hope those 6
1285 // stack words were worth it on every java call!
1287 // Another way of cleaning this up would be for out_preserve_stack_slots
1288 // to take a parameter to say whether it was C or java calling conventions.
1289 // Then things might look a little better (but not much).
1291 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1292 if( mem_parm_offset < 0 ) {
1293 return as_oRegister(i)->as_VMReg();
1294 } else {
1295 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1296 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1297 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1298 }
1299 }
1302 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1303 VMRegPair *regs,
1304 int total_args_passed) {
1306 // Return the number of VMReg stack_slots needed for the args.
1307 // This value does not include an abi space (like register window
1308 // save area).
1310 // The native convention is V8 if !LP64
1311 // The LP64 convention is the V9 convention which is slightly more sane.
1313 // We return the amount of VMReg stack slots we need to reserve for all
1314 // the arguments NOT counting out_preserve_stack_slots. Since we always
1315 // have space for storing at least 6 registers to memory we start with that.
1316 // See int_stk_helper for a further discussion.
1317 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1319 #ifdef _LP64
1320 // V9 convention: All things "as-if" on double-wide stack slots.
1321 // Hoist any int/ptr/long's in the first 6 to int regs.
1322 // Hoist any flt/dbl's in the first 16 dbl regs.
1323 int j = 0; // Count of actual args, not HALVES
1324 for( int i=0; i<total_args_passed; i++, j++ ) {
1325 switch( sig_bt[i] ) {
1326 case T_BOOLEAN:
1327 case T_BYTE:
1328 case T_CHAR:
1329 case T_INT:
1330 case T_SHORT:
1331 regs[i].set1( int_stk_helper( j ) ); break;
1332 case T_LONG:
1333 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1334 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1335 case T_ARRAY:
1336 case T_OBJECT:
1337 regs[i].set2( int_stk_helper( j ) );
1338 break;
1339 case T_FLOAT:
1340 if ( j < 16 ) {
1341 // V9ism: floats go in ODD registers
1342 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1343 } else {
1344 // V9ism: floats go in ODD stack slot
1345 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1346 }
1347 break;
1348 case T_DOUBLE:
1349 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1350 if ( j < 16 ) {
1351 // V9ism: doubles go in EVEN/ODD regs
1352 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1353 } else {
1354 // V9ism: doubles go in EVEN/ODD stack slots
1355 regs[i].set2(VMRegImpl::stack2reg(j<<1));
1356 }
1357 break;
1358 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
1359 default:
1360 ShouldNotReachHere();
1361 }
1362 if (regs[i].first()->is_stack()) {
1363 int off = regs[i].first()->reg2stack();
1364 if (off > max_stack_slots) max_stack_slots = off;
1365 }
1366 if (regs[i].second()->is_stack()) {
1367 int off = regs[i].second()->reg2stack();
1368 if (off > max_stack_slots) max_stack_slots = off;
1369 }
1370 }
1372 #else // _LP64
1373 // V8 convention: first 6 things in O-regs, rest on stack.
1374 // Alignment is willy-nilly.
1375 for( int i=0; i<total_args_passed; i++ ) {
1376 switch( sig_bt[i] ) {
1377 case T_ADDRESS: // raw pointers, like current thread, for VM calls
1378 case T_ARRAY:
1379 case T_BOOLEAN:
1380 case T_BYTE:
1381 case T_CHAR:
1382 case T_FLOAT:
1383 case T_INT:
1384 case T_OBJECT:
1385 case T_SHORT:
1386 regs[i].set1( int_stk_helper( i ) );
1387 break;
1388 case T_DOUBLE:
1389 case T_LONG:
1390 assert( sig_bt[i+1] == T_VOID, "expecting half" );
1391 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1392 break;
1393 case T_VOID: regs[i].set_bad(); break;
1394 default:
1395 ShouldNotReachHere();
1396 }
1397 if (regs[i].first()->is_stack()) {
1398 int off = regs[i].first()->reg2stack();
1399 if (off > max_stack_slots) max_stack_slots = off;
1400 }
1401 if (regs[i].second()->is_stack()) {
1402 int off = regs[i].second()->reg2stack();
1403 if (off > max_stack_slots) max_stack_slots = off;
1404 }
1405 }
1406 #endif // _LP64
1408 return round_to(max_stack_slots + 1, 2);
1410 }
1413 // ---------------------------------------------------------------------------
1414 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1415 switch (ret_type) {
1416 case T_FLOAT:
1417 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1418 break;
1419 case T_DOUBLE:
1420 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1421 break;
1422 }
1423 }
1425 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1426 switch (ret_type) {
1427 case T_FLOAT:
1428 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1429 break;
1430 case T_DOUBLE:
1431 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1432 break;
1433 }
1434 }
1436 // Check and forward and pending exception. Thread is stored in
1437 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
1438 // is no exception handler. We merely pop this frame off and throw the
1439 // exception in the caller's frame.
1440 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1441 Label L;
1442 __ br_null(Rex_oop, false, Assembler::pt, L);
1443 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1444 // Since this is a native call, we *know* the proper exception handler
1445 // without calling into the VM: it's the empty function. Just pop this
1446 // frame and then jump to forward_exception_entry; O7 will contain the
1447 // native caller's return PC.
1448 Address exception_entry(G3_scratch, StubRoutines::forward_exception_entry());
1449 __ jump_to(exception_entry);
1450 __ delayed()->restore(); // Pop this frame off.
1451 __ bind(L);
1452 }
1454 // A simple move of integer like type
1455 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1456 if (src.first()->is_stack()) {
1457 if (dst.first()->is_stack()) {
1458 // stack to stack
1459 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1460 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1461 } else {
1462 // stack to reg
1463 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1464 }
1465 } else if (dst.first()->is_stack()) {
1466 // reg to stack
1467 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1468 } else {
1469 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1470 }
1471 }
1473 // On 64 bit we will store integer like items to the stack as
1474 // 64 bits items (sparc abi) even though java would only store
1475 // 32bits for a parameter. On 32bit it will simply be 32 bits
1476 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1477 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1478 if (src.first()->is_stack()) {
1479 if (dst.first()->is_stack()) {
1480 // stack to stack
1481 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1482 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1483 } else {
1484 // stack to reg
1485 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1486 }
1487 } else if (dst.first()->is_stack()) {
1488 // reg to stack
1489 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1490 } else {
1491 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1492 }
1493 }
1496 // An oop arg. Must pass a handle not the oop itself
1497 static void object_move(MacroAssembler* masm,
1498 OopMap* map,
1499 int oop_handle_offset,
1500 int framesize_in_slots,
1501 VMRegPair src,
1502 VMRegPair dst,
1503 bool is_receiver,
1504 int* receiver_offset) {
1506 // must pass a handle. First figure out the location we use as a handle
1508 if (src.first()->is_stack()) {
1509 // Oop is already on the stack
1510 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1511 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1512 __ ld_ptr(rHandle, 0, L4);
1513 #ifdef _LP64
1514 __ movr( Assembler::rc_z, L4, G0, rHandle );
1515 #else
1516 __ tst( L4 );
1517 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1518 #endif
1519 if (dst.first()->is_stack()) {
1520 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1521 }
1522 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1523 if (is_receiver) {
1524 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1525 }
1526 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1527 } else {
1528 // Oop is in an input register pass we must flush it to the stack
1529 const Register rOop = src.first()->as_Register();
1530 const Register rHandle = L5;
1531 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1532 int offset = oop_slot*VMRegImpl::stack_slot_size;
1533 Label skip;
1534 __ st_ptr(rOop, SP, offset + STACK_BIAS);
1535 if (is_receiver) {
1536 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1537 }
1538 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1539 __ add(SP, offset + STACK_BIAS, rHandle);
1540 #ifdef _LP64
1541 __ movr( Assembler::rc_z, rOop, G0, rHandle );
1542 #else
1543 __ tst( rOop );
1544 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1545 #endif
1547 if (dst.first()->is_stack()) {
1548 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1549 } else {
1550 __ mov(rHandle, dst.first()->as_Register());
1551 }
1552 }
1553 }
1555 // A float arg may have to do float reg int reg conversion
1556 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1557 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1559 if (src.first()->is_stack()) {
1560 if (dst.first()->is_stack()) {
1561 // stack to stack the easiest of the bunch
1562 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1563 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1564 } else {
1565 // stack to reg
1566 if (dst.first()->is_Register()) {
1567 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1568 } else {
1569 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1570 }
1571 }
1572 } else if (dst.first()->is_stack()) {
1573 // reg to stack
1574 if (src.first()->is_Register()) {
1575 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1576 } else {
1577 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1578 }
1579 } else {
1580 // reg to reg
1581 if (src.first()->is_Register()) {
1582 if (dst.first()->is_Register()) {
1583 // gpr -> gpr
1584 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1585 } else {
1586 // gpr -> fpr
1587 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1588 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1589 }
1590 } else if (dst.first()->is_Register()) {
1591 // fpr -> gpr
1592 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1593 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1594 } else {
1595 // fpr -> fpr
1596 // In theory these overlap but the ordering is such that this is likely a nop
1597 if ( src.first() != dst.first()) {
1598 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1599 }
1600 }
1601 }
1602 }
1604 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1605 VMRegPair src_lo(src.first());
1606 VMRegPair src_hi(src.second());
1607 VMRegPair dst_lo(dst.first());
1608 VMRegPair dst_hi(dst.second());
1609 simple_move32(masm, src_lo, dst_lo);
1610 simple_move32(masm, src_hi, dst_hi);
1611 }
1613 // A long move
1614 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1616 // Do the simple ones here else do two int moves
1617 if (src.is_single_phys_reg() ) {
1618 if (dst.is_single_phys_reg()) {
1619 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1620 } else {
1621 // split src into two separate registers
1622 // Remember hi means hi address or lsw on sparc
1623 // Move msw to lsw
1624 if (dst.second()->is_reg()) {
1625 // MSW -> MSW
1626 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1627 // Now LSW -> LSW
1628 // this will only move lo -> lo and ignore hi
1629 VMRegPair split(dst.second());
1630 simple_move32(masm, src, split);
1631 } else {
1632 VMRegPair split(src.first(), L4->as_VMReg());
1633 // MSW -> MSW (lo ie. first word)
1634 __ srax(src.first()->as_Register(), 32, L4);
1635 split_long_move(masm, split, dst);
1636 }
1637 }
1638 } else if (dst.is_single_phys_reg()) {
1639 if (src.is_adjacent_aligned_on_stack(2)) {
1640 __ ldd(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1641 } else {
1642 // dst is a single reg.
1643 // Remember lo is low address not msb for stack slots
1644 // and lo is the "real" register for registers
1645 // src is
1647 VMRegPair split;
1649 if (src.first()->is_reg()) {
1650 // src.lo (msw) is a reg, src.hi is stk/reg
1651 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1652 split.set_pair(dst.first(), src.first());
1653 } else {
1654 // msw is stack move to L5
1655 // lsw is stack move to dst.lo (real reg)
1656 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1657 split.set_pair(dst.first(), L5->as_VMReg());
1658 }
1660 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1661 // msw -> src.lo/L5, lsw -> dst.lo
1662 split_long_move(masm, src, split);
1664 // So dst now has the low order correct position the
1665 // msw half
1666 __ sllx(split.first()->as_Register(), 32, L5);
1668 const Register d = dst.first()->as_Register();
1669 __ or3(L5, d, d);
1670 }
1671 } else {
1672 // For LP64 we can probably do better.
1673 split_long_move(masm, src, dst);
1674 }
1675 }
1677 // A double move
1678 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1680 // The painful thing here is that like long_move a VMRegPair might be
1681 // 1: a single physical register
1682 // 2: two physical registers (v8)
1683 // 3: a physical reg [lo] and a stack slot [hi] (v8)
1684 // 4: two stack slots
1686 // Since src is always a java calling convention we know that the src pair
1687 // is always either all registers or all stack (and aligned?)
1689 // in a register [lo] and a stack slot [hi]
1690 if (src.first()->is_stack()) {
1691 if (dst.first()->is_stack()) {
1692 // stack to stack the easiest of the bunch
1693 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1694 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1695 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1696 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1697 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1698 } else {
1699 // stack to reg
1700 if (dst.second()->is_stack()) {
1701 // stack -> reg, stack -> stack
1702 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1703 if (dst.first()->is_Register()) {
1704 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1705 } else {
1706 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1707 }
1708 // This was missing. (very rare case)
1709 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1710 } else {
1711 // stack -> reg
1712 // Eventually optimize for alignment QQQ
1713 if (dst.first()->is_Register()) {
1714 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1715 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1716 } else {
1717 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1718 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1719 }
1720 }
1721 }
1722 } else if (dst.first()->is_stack()) {
1723 // reg to stack
1724 if (src.first()->is_Register()) {
1725 // Eventually optimize for alignment QQQ
1726 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1727 if (src.second()->is_stack()) {
1728 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1729 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1730 } else {
1731 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1732 }
1733 } else {
1734 // fpr to stack
1735 if (src.second()->is_stack()) {
1736 ShouldNotReachHere();
1737 } else {
1738 // Is the stack aligned?
1739 if (reg2offset(dst.first()) & 0x7) {
1740 // No do as pairs
1741 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1742 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1743 } else {
1744 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1745 }
1746 }
1747 }
1748 } else {
1749 // reg to reg
1750 if (src.first()->is_Register()) {
1751 if (dst.first()->is_Register()) {
1752 // gpr -> gpr
1753 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1754 __ mov(src.second()->as_Register(), dst.second()->as_Register());
1755 } else {
1756 // gpr -> fpr
1757 // ought to be able to do a single store
1758 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1759 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1760 // ought to be able to do a single load
1761 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1762 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1763 }
1764 } else if (dst.first()->is_Register()) {
1765 // fpr -> gpr
1766 // ought to be able to do a single store
1767 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1768 // ought to be able to do a single load
1769 // REMEMBER first() is low address not LSB
1770 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1771 if (dst.second()->is_Register()) {
1772 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1773 } else {
1774 __ ld(FP, -4 + STACK_BIAS, L4);
1775 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1776 }
1777 } else {
1778 // fpr -> fpr
1779 // In theory these overlap but the ordering is such that this is likely a nop
1780 if ( src.first() != dst.first()) {
1781 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1782 }
1783 }
1784 }
1785 }
1787 // Creates an inner frame if one hasn't already been created, and
1788 // saves a copy of the thread in L7_thread_cache
1789 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1790 if (!*already_created) {
1791 __ save_frame(0);
1792 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1793 // Don't use save_thread because it smashes G2 and we merely want to save a
1794 // copy
1795 __ mov(G2_thread, L7_thread_cache);
1796 *already_created = true;
1797 }
1798 }
1800 // ---------------------------------------------------------------------------
1801 // Generate a native wrapper for a given method. The method takes arguments
1802 // in the Java compiled code convention, marshals them to the native
1803 // convention (handlizes oops, etc), transitions to native, makes the call,
1804 // returns to java state (possibly blocking), unhandlizes any result and
1805 // returns.
1806 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1807 methodHandle method,
1808 int total_in_args,
1809 int comp_args_on_stack, // in VMRegStackSlots
1810 BasicType *in_sig_bt,
1811 VMRegPair *in_regs,
1812 BasicType ret_type) {
1815 // Native nmethod wrappers never take possesion of the oop arguments.
1816 // So the caller will gc the arguments. The only thing we need an
1817 // oopMap for is if the call is static
1818 //
1819 // An OopMap for lock (and class if static), and one for the VM call itself
1820 OopMapSet *oop_maps = new OopMapSet();
1821 intptr_t start = (intptr_t)__ pc();
1823 // First thing make an ic check to see if we should even be here
1824 {
1825 Label L;
1826 const Register temp_reg = G3_scratch;
1827 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
1828 __ verify_oop(O0);
1829 __ load_klass(O0, temp_reg);
1830 __ cmp(temp_reg, G5_inline_cache_reg);
1831 __ brx(Assembler::equal, true, Assembler::pt, L);
1832 __ delayed()->nop();
1834 __ jump_to(ic_miss, 0);
1835 __ delayed()->nop();
1836 __ align(CodeEntryAlignment);
1837 __ bind(L);
1838 }
1840 int vep_offset = ((intptr_t)__ pc()) - start;
1842 #ifdef COMPILER1
1843 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1844 // Object.hashCode can pull the hashCode from the header word
1845 // instead of doing a full VM transition once it's been computed.
1846 // Since hashCode is usually polymorphic at call sites we can't do
1847 // this optimization at the call site without a lot of work.
1848 Label slowCase;
1849 Register receiver = O0;
1850 Register result = O0;
1851 Register header = G3_scratch;
1852 Register hash = G3_scratch; // overwrite header value with hash value
1853 Register mask = G1; // to get hash field from header
1855 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
1856 // We depend on hash_mask being at most 32 bits and avoid the use of
1857 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1858 // vm: see markOop.hpp.
1859 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
1860 __ sethi(markOopDesc::hash_mask, mask);
1861 __ btst(markOopDesc::unlocked_value, header);
1862 __ br(Assembler::zero, false, Assembler::pn, slowCase);
1863 if (UseBiasedLocking) {
1864 // Check if biased and fall through to runtime if so
1865 __ delayed()->nop();
1866 __ btst(markOopDesc::biased_lock_bit_in_place, header);
1867 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
1868 }
1869 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
1871 // Check for a valid (non-zero) hash code and get its value.
1872 #ifdef _LP64
1873 __ srlx(header, markOopDesc::hash_shift, hash);
1874 #else
1875 __ srl(header, markOopDesc::hash_shift, hash);
1876 #endif
1877 __ andcc(hash, mask, hash);
1878 __ br(Assembler::equal, false, Assembler::pn, slowCase);
1879 __ delayed()->nop();
1881 // leaf return.
1882 __ retl();
1883 __ delayed()->mov(hash, result);
1884 __ bind(slowCase);
1885 }
1886 #endif // COMPILER1
1889 // We have received a description of where all the java arg are located
1890 // on entry to the wrapper. We need to convert these args to where
1891 // the jni function will expect them. To figure out where they go
1892 // we convert the java signature to a C signature by inserting
1893 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1895 int total_c_args = total_in_args + 1;
1896 if (method->is_static()) {
1897 total_c_args++;
1898 }
1900 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1901 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1903 int argc = 0;
1904 out_sig_bt[argc++] = T_ADDRESS;
1905 if (method->is_static()) {
1906 out_sig_bt[argc++] = T_OBJECT;
1907 }
1909 for (int i = 0; i < total_in_args ; i++ ) {
1910 out_sig_bt[argc++] = in_sig_bt[i];
1911 }
1913 // Now figure out where the args must be stored and how much stack space
1914 // they require (neglecting out_preserve_stack_slots but space for storing
1915 // the 1st six register arguments). It's weird see int_stk_helper.
1916 //
1917 int out_arg_slots;
1918 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1920 // Compute framesize for the wrapper. We need to handlize all oops in
1921 // registers. We must create space for them here that is disjoint from
1922 // the windowed save area because we have no control over when we might
1923 // flush the window again and overwrite values that gc has since modified.
1924 // (The live window race)
1925 //
1926 // We always just allocate 6 word for storing down these object. This allow
1927 // us to simply record the base and use the Ireg number to decide which
1928 // slot to use. (Note that the reg number is the inbound number not the
1929 // outbound number).
1930 // We must shuffle args to match the native convention, and include var-args space.
1932 // Calculate the total number of stack slots we will need.
1934 // First count the abi requirement plus all of the outgoing args
1935 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1937 // Now the space for the inbound oop handle area
1939 int oop_handle_offset = stack_slots;
1940 stack_slots += 6*VMRegImpl::slots_per_word;
1942 // Now any space we need for handlizing a klass if static method
1944 int oop_temp_slot_offset = 0;
1945 int klass_slot_offset = 0;
1946 int klass_offset = -1;
1947 int lock_slot_offset = 0;
1948 bool is_static = false;
1950 if (method->is_static()) {
1951 klass_slot_offset = stack_slots;
1952 stack_slots += VMRegImpl::slots_per_word;
1953 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1954 is_static = true;
1955 }
1957 // Plus a lock if needed
1959 if (method->is_synchronized()) {
1960 lock_slot_offset = stack_slots;
1961 stack_slots += VMRegImpl::slots_per_word;
1962 }
1964 // Now a place to save return value or as a temporary for any gpr -> fpr moves
1965 stack_slots += 2;
1967 // Ok The space we have allocated will look like:
1968 //
1969 //
1970 // FP-> | |
1971 // |---------------------|
1972 // | 2 slots for moves |
1973 // |---------------------|
1974 // | lock box (if sync) |
1975 // |---------------------| <- lock_slot_offset
1976 // | klass (if static) |
1977 // |---------------------| <- klass_slot_offset
1978 // | oopHandle area |
1979 // |---------------------| <- oop_handle_offset
1980 // | outbound memory |
1981 // | based arguments |
1982 // | |
1983 // |---------------------|
1984 // | vararg area |
1985 // |---------------------|
1986 // | |
1987 // SP-> | out_preserved_slots |
1988 //
1989 //
1992 // Now compute actual number of stack words we need rounding to make
1993 // stack properly aligned.
1994 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
1996 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1998 // Generate stack overflow check before creating frame
1999 __ generate_stack_overflow_check(stack_size);
2001 // Generate a new frame for the wrapper.
2002 __ save(SP, -stack_size, SP);
2004 int frame_complete = ((intptr_t)__ pc()) - start;
2006 __ verify_thread();
2009 //
2010 // We immediately shuffle the arguments so that any vm call we have to
2011 // make from here on out (sync slow path, jvmti, etc.) we will have
2012 // captured the oops from our caller and have a valid oopMap for
2013 // them.
2015 // -----------------
2016 // The Grand Shuffle
2017 //
2018 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2019 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2020 // the class mirror instead of a receiver. This pretty much guarantees that
2021 // register layout will not match. We ignore these extra arguments during
2022 // the shuffle. The shuffle is described by the two calling convention
2023 // vectors we have in our possession. We simply walk the java vector to
2024 // get the source locations and the c vector to get the destinations.
2025 // Because we have a new window and the argument registers are completely
2026 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2027 // here.
2029 // This is a trick. We double the stack slots so we can claim
2030 // the oops in the caller's frame. Since we are sure to have
2031 // more args than the caller doubling is enough to make
2032 // sure we can capture all the incoming oop args from the
2033 // caller.
2034 //
2035 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2036 int c_arg = total_c_args - 1;
2037 // Record sp-based slot for receiver on stack for non-static methods
2038 int receiver_offset = -1;
2040 // We move the arguments backward because the floating point registers
2041 // destination will always be to a register with a greater or equal register
2042 // number or the stack.
2044 #ifdef ASSERT
2045 bool reg_destroyed[RegisterImpl::number_of_registers];
2046 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2047 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2048 reg_destroyed[r] = false;
2049 }
2050 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2051 freg_destroyed[f] = false;
2052 }
2054 #endif /* ASSERT */
2056 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
2058 #ifdef ASSERT
2059 if (in_regs[i].first()->is_Register()) {
2060 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2061 } else if (in_regs[i].first()->is_FloatRegister()) {
2062 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2063 }
2064 if (out_regs[c_arg].first()->is_Register()) {
2065 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2066 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2067 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2068 }
2069 #endif /* ASSERT */
2071 switch (in_sig_bt[i]) {
2072 case T_ARRAY:
2073 case T_OBJECT:
2074 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2075 ((i == 0) && (!is_static)),
2076 &receiver_offset);
2077 break;
2078 case T_VOID:
2079 break;
2081 case T_FLOAT:
2082 float_move(masm, in_regs[i], out_regs[c_arg]);
2083 break;
2085 case T_DOUBLE:
2086 assert( i + 1 < total_in_args &&
2087 in_sig_bt[i + 1] == T_VOID &&
2088 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2089 double_move(masm, in_regs[i], out_regs[c_arg]);
2090 break;
2092 case T_LONG :
2093 long_move(masm, in_regs[i], out_regs[c_arg]);
2094 break;
2096 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2098 default:
2099 move32_64(masm, in_regs[i], out_regs[c_arg]);
2100 }
2101 }
2103 // Pre-load a static method's oop into O1. Used both by locking code and
2104 // the normal JNI call code.
2105 if (method->is_static()) {
2106 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2108 // Now handlize the static class mirror in O1. It's known not-null.
2109 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2110 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2111 __ add(SP, klass_offset + STACK_BIAS, O1);
2112 }
2115 const Register L6_handle = L6;
2117 if (method->is_synchronized()) {
2118 __ mov(O1, L6_handle);
2119 }
2121 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2122 // except O6/O7. So if we must call out we must push a new frame. We immediately
2123 // push a new frame and flush the windows.
2125 #ifdef _LP64
2126 intptr_t thepc = (intptr_t) __ pc();
2127 {
2128 address here = __ pc();
2129 // Call the next instruction
2130 __ call(here + 8, relocInfo::none);
2131 __ delayed()->nop();
2132 }
2133 #else
2134 intptr_t thepc = __ load_pc_address(O7, 0);
2135 #endif /* _LP64 */
2137 // We use the same pc/oopMap repeatedly when we call out
2138 oop_maps->add_gc_map(thepc - start, map);
2140 // O7 now has the pc loaded that we will use when we finally call to native.
2142 // Save thread in L7; it crosses a bunch of VM calls below
2143 // Don't use save_thread because it smashes G2 and we merely
2144 // want to save a copy
2145 __ mov(G2_thread, L7_thread_cache);
2148 // If we create an inner frame once is plenty
2149 // when we create it we must also save G2_thread
2150 bool inner_frame_created = false;
2152 // dtrace method entry support
2153 {
2154 SkipIfEqual skip_if(
2155 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2156 // create inner frame
2157 __ save_frame(0);
2158 __ mov(G2_thread, L7_thread_cache);
2159 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2160 __ call_VM_leaf(L7_thread_cache,
2161 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2162 G2_thread, O1);
2163 __ restore();
2164 }
2166 // We are in the jni frame unless saved_frame is true in which case
2167 // we are in one frame deeper (the "inner" frame). If we are in the
2168 // "inner" frames the args are in the Iregs and if the jni frame then
2169 // they are in the Oregs.
2170 // If we ever need to go to the VM (for locking, jvmti) then
2171 // we will always be in the "inner" frame.
2173 // Lock a synchronized method
2174 int lock_offset = -1; // Set if locked
2175 if (method->is_synchronized()) {
2176 Register Roop = O1;
2177 const Register L3_box = L3;
2179 create_inner_frame(masm, &inner_frame_created);
2181 __ ld_ptr(I1, 0, O1);
2182 Label done;
2184 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2185 __ add(FP, lock_offset+STACK_BIAS, L3_box);
2186 #ifdef ASSERT
2187 if (UseBiasedLocking) {
2188 // making the box point to itself will make it clear it went unused
2189 // but also be obviously invalid
2190 __ st_ptr(L3_box, L3_box, 0);
2191 }
2192 #endif // ASSERT
2193 //
2194 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2195 //
2196 __ compiler_lock_object(Roop, L1, L3_box, L2);
2197 __ br(Assembler::equal, false, Assembler::pt, done);
2198 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2201 // None of the above fast optimizations worked so we have to get into the
2202 // slow case of monitor enter. Inline a special case of call_VM that
2203 // disallows any pending_exception.
2204 __ mov(Roop, O0); // Need oop in O0
2205 __ mov(L3_box, O1);
2207 // Record last_Java_sp, in case the VM code releases the JVM lock.
2209 __ set_last_Java_frame(FP, I7);
2211 // do the call
2212 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2213 __ delayed()->mov(L7_thread_cache, O2);
2215 __ restore_thread(L7_thread_cache); // restore G2_thread
2216 __ reset_last_Java_frame();
2218 #ifdef ASSERT
2219 { Label L;
2220 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2221 __ br_null(O0, false, Assembler::pt, L);
2222 __ delayed()->nop();
2223 __ stop("no pending exception allowed on exit from IR::monitorenter");
2224 __ bind(L);
2225 }
2226 #endif
2227 __ bind(done);
2228 }
2231 // Finally just about ready to make the JNI call
2233 __ flush_windows();
2234 if (inner_frame_created) {
2235 __ restore();
2236 } else {
2237 // Store only what we need from this frame
2238 // QQQ I think that non-v9 (like we care) we don't need these saves
2239 // either as the flush traps and the current window goes too.
2240 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2241 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2242 }
2244 // get JNIEnv* which is first argument to native
2246 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2248 // Use that pc we placed in O7 a while back as the current frame anchor
2250 __ set_last_Java_frame(SP, O7);
2252 // Transition from _thread_in_Java to _thread_in_native.
2253 __ set(_thread_in_native, G3_scratch);
2254 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset()));
2256 // We flushed the windows ages ago now mark them as flushed
2258 // mark windows as flushed
2259 __ set(JavaFrameAnchor::flushed, G3_scratch);
2261 Address flags(G2_thread,
2262 0,
2263 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
2265 #ifdef _LP64
2266 Address dest(O7, method->native_function());
2267 __ relocate(relocInfo::runtime_call_type);
2268 __ jumpl_to(dest, O7);
2269 #else
2270 __ call(method->native_function(), relocInfo::runtime_call_type);
2271 #endif
2272 __ delayed()->st(G3_scratch, flags);
2274 __ restore_thread(L7_thread_cache); // restore G2_thread
2276 // Unpack native results. For int-types, we do any needed sign-extension
2277 // and move things into I0. The return value there will survive any VM
2278 // calls for blocking or unlocking. An FP or OOP result (handle) is done
2279 // specially in the slow-path code.
2280 switch (ret_type) {
2281 case T_VOID: break; // Nothing to do!
2282 case T_FLOAT: break; // Got it where we want it (unless slow-path)
2283 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
2284 // In 64 bits build result is in O0, in O0, O1 in 32bit build
2285 case T_LONG:
2286 #ifndef _LP64
2287 __ mov(O1, I1);
2288 #endif
2289 // Fall thru
2290 case T_OBJECT: // Really a handle
2291 case T_ARRAY:
2292 case T_INT:
2293 __ mov(O0, I0);
2294 break;
2295 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2296 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2297 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2298 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2299 break; // Cannot de-handlize until after reclaiming jvm_lock
2300 default:
2301 ShouldNotReachHere();
2302 }
2304 // must we block?
2306 // Block, if necessary, before resuming in _thread_in_Java state.
2307 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2308 { Label no_block;
2309 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
2311 // Switch thread to "native transition" state before reading the synchronization state.
2312 // This additional state is necessary because reading and testing the synchronization
2313 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2314 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2315 // VM thread changes sync state to synchronizing and suspends threads for GC.
2316 // Thread A is resumed to finish this native method, but doesn't block here since it
2317 // didn't see any synchronization is progress, and escapes.
2318 __ set(_thread_in_native_trans, G3_scratch);
2319 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset()));
2320 if(os::is_MP()) {
2321 if (UseMembar) {
2322 // Force this write out before the read below
2323 __ membar(Assembler::StoreLoad);
2324 } else {
2325 // Write serialization page so VM thread can do a pseudo remote membar.
2326 // We use the current thread pointer to calculate a thread specific
2327 // offset to write to within the page. This minimizes bus traffic
2328 // due to cache line collision.
2329 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2330 }
2331 }
2332 __ load_contents(sync_state, G3_scratch);
2333 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2335 Label L;
2336 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
2337 __ br(Assembler::notEqual, false, Assembler::pn, L);
2338 __ delayed()->
2339 ld(suspend_state, G3_scratch);
2340 __ cmp(G3_scratch, 0);
2341 __ br(Assembler::equal, false, Assembler::pt, no_block);
2342 __ delayed()->nop();
2343 __ bind(L);
2345 // Block. Save any potential method result value before the operation and
2346 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2347 // lets us share the oopMap we used when we went native rather the create
2348 // a distinct one for this pc
2349 //
2350 save_native_result(masm, ret_type, stack_slots);
2351 __ call_VM_leaf(L7_thread_cache,
2352 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2353 G2_thread);
2355 // Restore any method result value
2356 restore_native_result(masm, ret_type, stack_slots);
2357 __ bind(no_block);
2358 }
2360 // thread state is thread_in_native_trans. Any safepoint blocking has already
2361 // happened so we can now change state to _thread_in_Java.
2364 __ set(_thread_in_Java, G3_scratch);
2365 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset()));
2368 Label no_reguard;
2369 __ ld(G2_thread, in_bytes(JavaThread::stack_guard_state_offset()), G3_scratch);
2370 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
2371 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
2372 __ delayed()->nop();
2374 save_native_result(masm, ret_type, stack_slots);
2375 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2376 __ delayed()->nop();
2378 __ restore_thread(L7_thread_cache); // restore G2_thread
2379 restore_native_result(masm, ret_type, stack_slots);
2381 __ bind(no_reguard);
2383 // Handle possible exception (will unlock if necessary)
2385 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2387 // Unlock
2388 if (method->is_synchronized()) {
2389 Label done;
2390 Register I2_ex_oop = I2;
2391 const Register L3_box = L3;
2392 // Get locked oop from the handle we passed to jni
2393 __ ld_ptr(L6_handle, 0, L4);
2394 __ add(SP, lock_offset+STACK_BIAS, L3_box);
2395 // Must save pending exception around the slow-path VM call. Since it's a
2396 // leaf call, the pending exception (if any) can be kept in a register.
2397 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2398 // Now unlock
2399 // (Roop, Rmark, Rbox, Rscratch)
2400 __ compiler_unlock_object(L4, L1, L3_box, L2);
2401 __ br(Assembler::equal, false, Assembler::pt, done);
2402 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2404 // save and restore any potential method result value around the unlocking
2405 // operation. Will save in I0 (or stack for FP returns).
2406 save_native_result(masm, ret_type, stack_slots);
2408 // Must clear pending-exception before re-entering the VM. Since this is
2409 // a leaf call, pending-exception-oop can be safely kept in a register.
2410 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2412 // slow case of monitor enter. Inline a special case of call_VM that
2413 // disallows any pending_exception.
2414 __ mov(L3_box, O1);
2416 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2417 __ delayed()->mov(L4, O0); // Need oop in O0
2419 __ restore_thread(L7_thread_cache); // restore G2_thread
2421 #ifdef ASSERT
2422 { Label L;
2423 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2424 __ br_null(O0, false, Assembler::pt, L);
2425 __ delayed()->nop();
2426 __ stop("no pending exception allowed on exit from IR::monitorexit");
2427 __ bind(L);
2428 }
2429 #endif
2430 restore_native_result(masm, ret_type, stack_slots);
2431 // check_forward_pending_exception jump to forward_exception if any pending
2432 // exception is set. The forward_exception routine expects to see the
2433 // exception in pending_exception and not in a register. Kind of clumsy,
2434 // since all folks who branch to forward_exception must have tested
2435 // pending_exception first and hence have it in a register already.
2436 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2437 __ bind(done);
2438 }
2440 // Tell dtrace about this method exit
2441 {
2442 SkipIfEqual skip_if(
2443 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2444 save_native_result(masm, ret_type, stack_slots);
2445 __ set_oop_constant(JNIHandles::make_local(method()), O1);
2446 __ call_VM_leaf(L7_thread_cache,
2447 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2448 G2_thread, O1);
2449 restore_native_result(masm, ret_type, stack_slots);
2450 }
2452 // Clear "last Java frame" SP and PC.
2453 __ verify_thread(); // G2_thread must be correct
2454 __ reset_last_Java_frame();
2456 // Unpack oop result
2457 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2458 Label L;
2459 __ addcc(G0, I0, G0);
2460 __ brx(Assembler::notZero, true, Assembler::pt, L);
2461 __ delayed()->ld_ptr(I0, 0, I0);
2462 __ mov(G0, I0);
2463 __ bind(L);
2464 __ verify_oop(I0);
2465 }
2467 // reset handle block
2468 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2469 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2471 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2472 check_forward_pending_exception(masm, G3_scratch);
2475 // Return
2477 #ifndef _LP64
2478 if (ret_type == T_LONG) {
2480 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2481 __ sllx(I0, 32, G1); // Shift bits into high G1
2482 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
2483 __ or3 (I1, G1, G1); // OR 64 bits into G1
2484 }
2485 #endif
2487 __ ret();
2488 __ delayed()->restore();
2490 __ flush();
2492 nmethod *nm = nmethod::new_native_nmethod(method,
2493 masm->code(),
2494 vep_offset,
2495 frame_complete,
2496 stack_slots / VMRegImpl::slots_per_word,
2497 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2498 in_ByteSize(lock_offset),
2499 oop_maps);
2500 return nm;
2502 }
2504 // this function returns the adjust size (in number of words) to a c2i adapter
2505 // activation for use during deoptimization
2506 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2507 assert(callee_locals >= callee_parameters,
2508 "test and remove; got more parms than locals");
2509 if (callee_locals < callee_parameters)
2510 return 0; // No adjustment for negative locals
2511 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
2512 return round_to(diff, WordsPerLong);
2513 }
2515 // "Top of Stack" slots that may be unused by the calling convention but must
2516 // otherwise be preserved.
2517 // On Intel these are not necessary and the value can be zero.
2518 // On Sparc this describes the words reserved for storing a register window
2519 // when an interrupt occurs.
2520 uint SharedRuntime::out_preserve_stack_slots() {
2521 return frame::register_save_words * VMRegImpl::slots_per_word;
2522 }
2524 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
2525 //
2526 // Common out the new frame generation for deopt and uncommon trap
2527 //
2528 Register G3pcs = G3_scratch; // Array of new pcs (input)
2529 Register Oreturn0 = O0;
2530 Register Oreturn1 = O1;
2531 Register O2UnrollBlock = O2;
2532 Register O3array = O3; // Array of frame sizes (input)
2533 Register O4array_size = O4; // number of frames (input)
2534 Register O7frame_size = O7; // number of frames (input)
2536 __ ld_ptr(O3array, 0, O7frame_size);
2537 __ sub(G0, O7frame_size, O7frame_size);
2538 __ save(SP, O7frame_size, SP);
2539 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
2541 #ifdef ASSERT
2542 // make sure that the frames are aligned properly
2543 #ifndef _LP64
2544 __ btst(wordSize*2-1, SP);
2545 __ breakpoint_trap(Assembler::notZero);
2546 #endif
2547 #endif
2549 // Deopt needs to pass some extra live values from frame to frame
2551 if (deopt) {
2552 __ mov(Oreturn0->after_save(), Oreturn0);
2553 __ mov(Oreturn1->after_save(), Oreturn1);
2554 }
2556 __ mov(O4array_size->after_save(), O4array_size);
2557 __ sub(O4array_size, 1, O4array_size);
2558 __ mov(O3array->after_save(), O3array);
2559 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
2560 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
2562 #ifdef ASSERT
2563 // trash registers to show a clear pattern in backtraces
2564 __ set(0xDEAD0000, I0);
2565 __ add(I0, 2, I1);
2566 __ add(I0, 4, I2);
2567 __ add(I0, 6, I3);
2568 __ add(I0, 8, I4);
2569 // Don't touch I5 could have valuable savedSP
2570 __ set(0xDEADBEEF, L0);
2571 __ mov(L0, L1);
2572 __ mov(L0, L2);
2573 __ mov(L0, L3);
2574 __ mov(L0, L4);
2575 __ mov(L0, L5);
2577 // trash the return value as there is nothing to return yet
2578 __ set(0xDEAD0001, O7);
2579 #endif
2581 __ mov(SP, O5_savedSP);
2582 }
2585 static void make_new_frames(MacroAssembler* masm, bool deopt) {
2586 //
2587 // loop through the UnrollBlock info and create new frames
2588 //
2589 Register G3pcs = G3_scratch;
2590 Register Oreturn0 = O0;
2591 Register Oreturn1 = O1;
2592 Register O2UnrollBlock = O2;
2593 Register O3array = O3;
2594 Register O4array_size = O4;
2595 Label loop;
2597 // Before we make new frames, check to see if stack is available.
2598 // Do this after the caller's return address is on top of stack
2599 if (UseStackBanging) {
2600 // Get total frame size for interpreted frames
2601 __ ld(Address(O2UnrollBlock, 0,
2602 Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4);
2603 __ bang_stack_size(O4, O3, G3_scratch);
2604 }
2606 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size);
2607 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs);
2609 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array);
2611 // Adjust old interpreter frame to make space for new frame's extra java locals
2612 //
2613 // We capture the original sp for the transition frame only because it is needed in
2614 // order to properly calculate interpreter_sp_adjustment. Even though in real life
2615 // every interpreter frame captures a savedSP it is only needed at the transition
2616 // (fortunately). If we had to have it correct everywhere then we would need to
2617 // be told the sp_adjustment for each frame we create. If the frame size array
2618 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
2619 // for each frame we create and keep up the illusion every where.
2620 //
2622 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7);
2623 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
2624 __ sub(SP, O7, SP);
2626 #ifdef ASSERT
2627 // make sure that there is at least one entry in the array
2628 __ tst(O4array_size);
2629 __ breakpoint_trap(Assembler::zero);
2630 #endif
2632 // Now push the new interpreter frames
2633 __ bind(loop);
2635 // allocate a new frame, filling the registers
2637 gen_new_frame(masm, deopt); // allocate an interpreter frame
2639 __ tst(O4array_size);
2640 __ br(Assembler::notZero, false, Assembler::pn, loop);
2641 __ delayed()->add(O3array, wordSize, O3array);
2642 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
2644 }
2646 //------------------------------generate_deopt_blob----------------------------
2647 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
2648 // instead.
2649 void SharedRuntime::generate_deopt_blob() {
2650 // allocate space for the code
2651 ResourceMark rm;
2652 // setup code generation tools
2653 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
2654 #ifdef _LP64
2655 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
2656 #else
2657 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
2658 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
2659 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
2660 #endif /* _LP64 */
2661 MacroAssembler* masm = new MacroAssembler(&buffer);
2662 FloatRegister Freturn0 = F0;
2663 Register Greturn1 = G1;
2664 Register Oreturn0 = O0;
2665 Register Oreturn1 = O1;
2666 Register O2UnrollBlock = O2;
2667 Register O3tmp = O3;
2668 Register I5exception_tmp = I5;
2669 Register G4exception_tmp = G4_scratch;
2670 int frame_size_words;
2671 Address saved_Freturn0_addr(FP, 0, -sizeof(double) + STACK_BIAS);
2672 #if !defined(_LP64) && defined(COMPILER2)
2673 Address saved_Greturn1_addr(FP, 0, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
2674 #endif
2675 Label cont;
2677 OopMapSet *oop_maps = new OopMapSet();
2679 //
2680 // This is the entry point for code which is returning to a de-optimized
2681 // frame.
2682 // The steps taken by this frame are as follows:
2683 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
2684 // and all potentially live registers (at a pollpoint many registers can be live).
2685 //
2686 // - call the C routine: Deoptimization::fetch_unroll_info (this function
2687 // returns information about the number and size of interpreter frames
2688 // which are equivalent to the frame which is being deoptimized)
2689 // - deallocate the unpack frame, restoring only results values. Other
2690 // volatile registers will now be captured in the vframeArray as needed.
2691 // - deallocate the deoptimization frame
2692 // - in a loop using the information returned in the previous step
2693 // push new interpreter frames (take care to propagate the return
2694 // values through each new frame pushed)
2695 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
2696 // - call the C routine: Deoptimization::unpack_frames (this function
2697 // lays out values on the interpreter frame which was just created)
2698 // - deallocate the dummy unpack_frame
2699 // - ensure that all the return values are correctly set and then do
2700 // a return to the interpreter entry point
2701 //
2702 // Refer to the following methods for more information:
2703 // - Deoptimization::fetch_unroll_info
2704 // - Deoptimization::unpack_frames
2706 OopMap* map = NULL;
2708 int start = __ offset();
2710 // restore G2, the trampoline destroyed it
2711 __ get_thread();
2713 // On entry we have been called by the deoptimized nmethod with a call that
2714 // replaced the original call (or safepoint polling location) so the deoptimizing
2715 // pc is now in O7. Return values are still in the expected places
2717 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2718 __ ba(false, cont);
2719 __ delayed()->mov(Deoptimization::Unpack_deopt, I5exception_tmp);
2721 int exception_offset = __ offset() - start;
2723 // restore G2, the trampoline destroyed it
2724 __ get_thread();
2726 // On entry we have been jumped to by the exception handler (or exception_blob
2727 // for server). O0 contains the exception oop and O7 contains the original
2728 // exception pc. So if we push a frame here it will look to the
2729 // stack walking code (fetch_unroll_info) just like a normal call so
2730 // state will be extracted normally.
2732 // save exception oop in JavaThread and fall through into the
2733 // exception_in_tls case since they are handled in same way except
2734 // for where the pending exception is kept.
2735 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
2737 //
2738 // Vanilla deoptimization with an exception pending in exception_oop
2739 //
2740 int exception_in_tls_offset = __ offset() - start;
2742 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
2743 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2745 // Restore G2_thread
2746 __ get_thread();
2748 #ifdef ASSERT
2749 {
2750 // verify that there is really an exception oop in exception_oop
2751 Label has_exception;
2752 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
2753 __ br_notnull(Oexception, false, Assembler::pt, has_exception);
2754 __ delayed()-> nop();
2755 __ stop("no exception in thread");
2756 __ bind(has_exception);
2758 // verify that there is no pending exception
2759 Label no_pending_exception;
2760 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
2761 __ ld_ptr(exception_addr, Oexception);
2762 __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
2763 __ delayed()->nop();
2764 __ stop("must not have pending exception here");
2765 __ bind(no_pending_exception);
2766 }
2767 #endif
2769 __ ba(false, cont);
2770 __ delayed()->mov(Deoptimization::Unpack_exception, I5exception_tmp);;
2772 //
2773 // Reexecute entry, similar to c2 uncommon trap
2774 //
2775 int reexecute_offset = __ offset() - start;
2777 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
2778 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2780 __ mov(Deoptimization::Unpack_reexecute, I5exception_tmp);
2782 __ bind(cont);
2784 __ set_last_Java_frame(SP, noreg);
2786 // do the call by hand so we can get the oopmap
2788 __ mov(G2_thread, L7_thread_cache);
2789 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
2790 __ delayed()->mov(G2_thread, O0);
2792 // Set an oopmap for the call site this describes all our saved volatile registers
2794 oop_maps->add_gc_map( __ offset()-start, map);
2796 __ mov(L7_thread_cache, G2_thread);
2798 __ reset_last_Java_frame();
2800 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
2801 // so this move will survive
2803 __ mov(I5exception_tmp, G4exception_tmp);
2805 __ mov(O0, O2UnrollBlock->after_save());
2807 RegisterSaver::restore_result_registers(masm);
2809 Label noException;
2810 __ cmp(G4exception_tmp, Deoptimization::Unpack_exception); // Was exception pending?
2811 __ br(Assembler::notEqual, false, Assembler::pt, noException);
2812 __ delayed()->nop();
2814 // Move the pending exception from exception_oop to Oexception so
2815 // the pending exception will be picked up the interpreter.
2816 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
2817 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
2818 __ bind(noException);
2820 // deallocate the deoptimization frame taking care to preserve the return values
2821 __ mov(Oreturn0, Oreturn0->after_save());
2822 __ mov(Oreturn1, Oreturn1->after_save());
2823 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
2824 __ restore();
2826 // Allocate new interpreter frame(s) and possible c2i adapter frame
2828 make_new_frames(masm, true);
2830 // push a dummy "unpack_frame" taking care of float return values and
2831 // call Deoptimization::unpack_frames to have the unpacker layout
2832 // information in the interpreter frames just created and then return
2833 // to the interpreter entry point
2834 __ save(SP, -frame_size_words*wordSize, SP);
2835 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
2836 #if !defined(_LP64)
2837 #if defined(COMPILER2)
2838 if (!TieredCompilation) {
2839 // 32-bit 1-register longs return longs in G1
2840 __ stx(Greturn1, saved_Greturn1_addr);
2841 }
2842 #endif
2843 __ set_last_Java_frame(SP, noreg);
2844 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4exception_tmp);
2845 #else
2846 // LP64 uses g4 in set_last_Java_frame
2847 __ mov(G4exception_tmp, O1);
2848 __ set_last_Java_frame(SP, G0);
2849 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
2850 #endif
2851 __ reset_last_Java_frame();
2852 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
2854 // In tiered we never use C2 to compile methods returning longs so
2855 // the result is where we expect it already.
2857 #if !defined(_LP64) && defined(COMPILER2)
2858 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
2859 // I0/I1 if the return value is long. In the tiered world there is
2860 // a mismatch between how C1 and C2 return longs compiles and so
2861 // currently compilation of methods which return longs is disabled
2862 // for C2 and so is this code. Eventually C1 and C2 will do the
2863 // same thing for longs in the tiered world.
2864 if (!TieredCompilation) {
2865 Label not_long;
2866 __ cmp(O0,T_LONG);
2867 __ br(Assembler::notEqual, false, Assembler::pt, not_long);
2868 __ delayed()->nop();
2869 __ ldd(saved_Greturn1_addr,I0);
2870 __ bind(not_long);
2871 }
2872 #endif
2873 __ ret();
2874 __ delayed()->restore();
2876 masm->flush();
2877 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
2878 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2879 }
2881 #ifdef COMPILER2
2883 //------------------------------generate_uncommon_trap_blob--------------------
2884 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
2885 // instead.
2886 void SharedRuntime::generate_uncommon_trap_blob() {
2887 // allocate space for the code
2888 ResourceMark rm;
2889 // setup code generation tools
2890 int pad = VerifyThread ? 512 : 0;
2891 #ifdef _LP64
2892 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
2893 #else
2894 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
2895 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
2896 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
2897 #endif
2898 MacroAssembler* masm = new MacroAssembler(&buffer);
2899 Register O2UnrollBlock = O2;
2900 Register O3tmp = O3;
2901 Register O2klass_index = O2;
2903 //
2904 // This is the entry point for all traps the compiler takes when it thinks
2905 // it cannot handle further execution of compilation code. The frame is
2906 // deoptimized in these cases and converted into interpreter frames for
2907 // execution
2908 // The steps taken by this frame are as follows:
2909 // - push a fake "unpack_frame"
2910 // - call the C routine Deoptimization::uncommon_trap (this function
2911 // packs the current compiled frame into vframe arrays and returns
2912 // information about the number and size of interpreter frames which
2913 // are equivalent to the frame which is being deoptimized)
2914 // - deallocate the "unpack_frame"
2915 // - deallocate the deoptimization frame
2916 // - in a loop using the information returned in the previous step
2917 // push interpreter frames;
2918 // - create a dummy "unpack_frame"
2919 // - call the C routine: Deoptimization::unpack_frames (this function
2920 // lays out values on the interpreter frame which was just created)
2921 // - deallocate the dummy unpack_frame
2922 // - return to the interpreter entry point
2923 //
2924 // Refer to the following methods for more information:
2925 // - Deoptimization::uncommon_trap
2926 // - Deoptimization::unpack_frame
2928 // the unloaded class index is in O0 (first parameter to this blob)
2930 // push a dummy "unpack_frame"
2931 // and call Deoptimization::uncommon_trap to pack the compiled frame into
2932 // vframe array and return the UnrollBlock information
2933 __ save_frame(0);
2934 __ set_last_Java_frame(SP, noreg);
2935 __ mov(I0, O2klass_index);
2936 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
2937 __ reset_last_Java_frame();
2938 __ mov(O0, O2UnrollBlock->after_save());
2939 __ restore();
2941 // deallocate the deoptimized frame taking care to preserve the return values
2942 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
2943 __ restore();
2945 // Allocate new interpreter frame(s) and possible c2i adapter frame
2947 make_new_frames(masm, false);
2949 // push a dummy "unpack_frame" taking care of float return values and
2950 // call Deoptimization::unpack_frames to have the unpacker layout
2951 // information in the interpreter frames just created and then return
2952 // to the interpreter entry point
2953 __ save_frame(0);
2954 __ set_last_Java_frame(SP, noreg);
2955 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
2956 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
2957 __ reset_last_Java_frame();
2958 __ ret();
2959 __ delayed()->restore();
2961 masm->flush();
2962 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
2963 }
2965 #endif // COMPILER2
2967 //------------------------------generate_handler_blob-------------------
2968 //
2969 // Generate a special Compile2Runtime blob that saves all registers, and sets
2970 // up an OopMap.
2971 //
2972 // This blob is jumped to (via a breakpoint and the signal handler) from a
2973 // safepoint in compiled code. On entry to this blob, O7 contains the
2974 // address in the original nmethod at which we should resume normal execution.
2975 // Thus, this blob looks like a subroutine which must preserve lots of
2976 // registers and return normally. Note that O7 is never register-allocated,
2977 // so it is guaranteed to be free here.
2978 //
2980 // The hardest part of what this blob must do is to save the 64-bit %o
2981 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
2982 // an interrupt will chop off their heads. Making space in the caller's frame
2983 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
2984 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
2985 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
2986 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
2987 // Tricky, tricky, tricky...
2989 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
2990 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2992 // allocate space for the code
2993 ResourceMark rm;
2994 // setup code generation tools
2995 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
2996 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
2997 // even larger with TraceJumps
2998 int pad = TraceJumps ? 512 : 0;
2999 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3000 MacroAssembler* masm = new MacroAssembler(&buffer);
3001 int frame_size_words;
3002 OopMapSet *oop_maps = new OopMapSet();
3003 OopMap* map = NULL;
3005 int start = __ offset();
3007 // If this causes a return before the processing, then do a "restore"
3008 if (cause_return) {
3009 __ restore();
3010 } else {
3011 // Make it look like we were called via the poll
3012 // so that frame constructor always sees a valid return address
3013 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3014 __ sub(O7, frame::pc_return_offset, O7);
3015 }
3017 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3019 // setup last_Java_sp (blows G4)
3020 __ set_last_Java_frame(SP, noreg);
3022 // call into the runtime to handle illegal instructions exception
3023 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3024 __ mov(G2_thread, O0);
3025 __ save_thread(L7_thread_cache);
3026 __ call(call_ptr);
3027 __ delayed()->nop();
3029 // Set an oopmap for the call site.
3030 // We need this not only for callee-saved registers, but also for volatile
3031 // registers that the compiler might be keeping live across a safepoint.
3033 oop_maps->add_gc_map( __ offset() - start, map);
3035 __ restore_thread(L7_thread_cache);
3036 // clear last_Java_sp
3037 __ reset_last_Java_frame();
3039 // Check for exceptions
3040 Label pending;
3042 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3043 __ tst(O1);
3044 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3045 __ delayed()->nop();
3047 RegisterSaver::restore_live_registers(masm);
3049 // We are back the the original state on entry and ready to go.
3051 __ retl();
3052 __ delayed()->nop();
3054 // Pending exception after the safepoint
3056 __ bind(pending);
3058 RegisterSaver::restore_live_registers(masm);
3060 // We are back the the original state on entry.
3062 // Tail-call forward_exception_entry, with the issuing PC in O7,
3063 // so it looks like the original nmethod called forward_exception_entry.
3064 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3065 __ JMP(O0, 0);
3066 __ delayed()->nop();
3068 // -------------
3069 // make sure all code is generated
3070 masm->flush();
3072 // return exception blob
3073 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3074 }
3076 //
3077 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3078 //
3079 // Generate a stub that calls into vm to find out the proper destination
3080 // of a java call. All the argument registers are live at this point
3081 // but since this is generic code we don't know what they are and the caller
3082 // must do any gc of the args.
3083 //
3084 static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
3085 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3087 // allocate space for the code
3088 ResourceMark rm;
3089 // setup code generation tools
3090 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3091 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3092 // even larger with TraceJumps
3093 int pad = TraceJumps ? 512 : 0;
3094 CodeBuffer buffer(name, 1600 + pad, 512);
3095 MacroAssembler* masm = new MacroAssembler(&buffer);
3096 int frame_size_words;
3097 OopMapSet *oop_maps = new OopMapSet();
3098 OopMap* map = NULL;
3100 int start = __ offset();
3102 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3104 int frame_complete = __ offset();
3106 // setup last_Java_sp (blows G4)
3107 __ set_last_Java_frame(SP, noreg);
3109 // call into the runtime to handle illegal instructions exception
3110 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3111 __ mov(G2_thread, O0);
3112 __ save_thread(L7_thread_cache);
3113 __ call(destination, relocInfo::runtime_call_type);
3114 __ delayed()->nop();
3116 // O0 contains the address we are going to jump to assuming no exception got installed
3118 // Set an oopmap for the call site.
3119 // We need this not only for callee-saved registers, but also for volatile
3120 // registers that the compiler might be keeping live across a safepoint.
3122 oop_maps->add_gc_map( __ offset() - start, map);
3124 __ restore_thread(L7_thread_cache);
3125 // clear last_Java_sp
3126 __ reset_last_Java_frame();
3128 // Check for exceptions
3129 Label pending;
3131 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3132 __ tst(O1);
3133 __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3134 __ delayed()->nop();
3136 // get the returned methodOop
3138 __ get_vm_result(G5_method);
3139 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3141 // O0 is where we want to jump, overwrite G3 which is saved and scratch
3143 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3145 RegisterSaver::restore_live_registers(masm);
3147 // We are back the the original state on entry and ready to go.
3149 __ JMP(G3, 0);
3150 __ delayed()->nop();
3152 // Pending exception after the safepoint
3154 __ bind(pending);
3156 RegisterSaver::restore_live_registers(masm);
3158 // We are back the the original state on entry.
3160 // Tail-call forward_exception_entry, with the issuing PC in O7,
3161 // so it looks like the original nmethod called forward_exception_entry.
3162 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3163 __ JMP(O0, 0);
3164 __ delayed()->nop();
3166 // -------------
3167 // make sure all code is generated
3168 masm->flush();
3170 // return the blob
3171 // frame_size_words or bytes??
3172 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3173 }
3175 void SharedRuntime::generate_stubs() {
3177 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
3178 "wrong_method_stub");
3180 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
3181 "ic_miss_stub");
3183 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
3184 "resolve_opt_virtual_call");
3186 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
3187 "resolve_virtual_call");
3189 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
3190 "resolve_static_call");
3192 _polling_page_safepoint_handler_blob =
3193 generate_handler_blob(CAST_FROM_FN_PTR(address,
3194 SafepointSynchronize::handle_polling_page_exception), false);
3196 _polling_page_return_handler_blob =
3197 generate_handler_blob(CAST_FROM_FN_PTR(address,
3198 SafepointSynchronize::handle_polling_page_exception), true);
3200 generate_deopt_blob();
3202 #ifdef COMPILER2
3203 generate_uncommon_trap_blob();
3204 #endif // COMPILER2
3205 }