src/cpu/sparc/vm/sharedRuntime_sparc.cpp

Tue, 06 Nov 2012 15:09:37 -0500

author
coleenp
date
Tue, 06 Nov 2012 15:09:37 -0500
changeset 4251
18fb7da42534
parent 4103
137868b7aa6f
child 4323
f0c2369fda5a
permissions
-rw-r--r--

8000725: NPG: method_holder() and pool_holder() and pool_holder field should be InstanceKlass
Summary: Change types of above methods and field to InstanceKlass and remove unneeded casts from the source files.
Reviewed-by: dholmes, coleenp, zgu
Contributed-by: harold.seigel@oracle.com

duke@435 1 /*
never@3500 2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "asm/assembler.hpp"
stefank@2314 27 #include "assembler_sparc.inline.hpp"
stefank@2314 28 #include "code/debugInfoRec.hpp"
stefank@2314 29 #include "code/icBuffer.hpp"
stefank@2314 30 #include "code/vtableStubs.hpp"
stefank@2314 31 #include "interpreter/interpreter.hpp"
coleenp@4037 32 #include "oops/compiledICHolder.hpp"
stefank@2314 33 #include "prims/jvmtiRedefineClassesTrace.hpp"
stefank@2314 34 #include "runtime/sharedRuntime.hpp"
stefank@2314 35 #include "runtime/vframeArray.hpp"
stefank@2314 36 #include "vmreg_sparc.inline.hpp"
stefank@2314 37 #ifdef COMPILER1
stefank@2314 38 #include "c1/c1_Runtime1.hpp"
stefank@2314 39 #endif
stefank@2314 40 #ifdef COMPILER2
stefank@2314 41 #include "opto/runtime.hpp"
stefank@2314 42 #endif
stefank@2314 43 #ifdef SHARK
stefank@2314 44 #include "compiler/compileBroker.hpp"
stefank@2314 45 #include "shark/sharkCompiler.hpp"
stefank@2314 46 #endif
duke@435 47
duke@435 48 #define __ masm->
duke@435 49
duke@435 50
duke@435 51 class RegisterSaver {
duke@435 52
duke@435 53 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
duke@435 54 // The Oregs are problematic. In the 32bit build the compiler can
duke@435 55 // have O registers live with 64 bit quantities. A window save will
duke@435 56 // cut the heads off of the registers. We have to do a very extensive
duke@435 57 // stack dance to save and restore these properly.
duke@435 58
duke@435 59 // Note that the Oregs problem only exists if we block at either a polling
duke@435 60 // page exception a compiled code safepoint that was not originally a call
duke@435 61 // or deoptimize following one of these kinds of safepoints.
duke@435 62
duke@435 63 // Lots of registers to save. For all builds, a window save will preserve
duke@435 64 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
duke@435 65 // builds a window-save will preserve the %o registers. In the LION build
duke@435 66 // we need to save the 64-bit %o registers which requires we save them
duke@435 67 // before the window-save (as then they become %i registers and get their
duke@435 68 // heads chopped off on interrupt). We have to save some %g registers here
duke@435 69 // as well.
duke@435 70 enum {
duke@435 71 // This frame's save area. Includes extra space for the native call:
duke@435 72 // vararg's layout space and the like. Briefly holds the caller's
duke@435 73 // register save area.
duke@435 74 call_args_area = frame::register_save_words_sp_offset +
duke@435 75 frame::memory_parameter_word_sp_offset*wordSize,
duke@435 76 // Make sure save locations are always 8 byte aligned.
duke@435 77 // can't use round_to because it doesn't produce compile time constant
duke@435 78 start_of_extra_save_area = ((call_args_area + 7) & ~7),
duke@435 79 g1_offset = start_of_extra_save_area, // g-regs needing saving
duke@435 80 g3_offset = g1_offset+8,
duke@435 81 g4_offset = g3_offset+8,
duke@435 82 g5_offset = g4_offset+8,
duke@435 83 o0_offset = g5_offset+8,
duke@435 84 o1_offset = o0_offset+8,
duke@435 85 o2_offset = o1_offset+8,
duke@435 86 o3_offset = o2_offset+8,
duke@435 87 o4_offset = o3_offset+8,
duke@435 88 o5_offset = o4_offset+8,
duke@435 89 start_of_flags_save_area = o5_offset+8,
duke@435 90 ccr_offset = start_of_flags_save_area,
duke@435 91 fsr_offset = ccr_offset + 8,
duke@435 92 d00_offset = fsr_offset+8, // Start of float save area
duke@435 93 register_save_size = d00_offset+8*32
duke@435 94 };
duke@435 95
duke@435 96
duke@435 97 public:
duke@435 98
duke@435 99 static int Oexception_offset() { return o0_offset; };
duke@435 100 static int G3_offset() { return g3_offset; };
duke@435 101 static int G5_offset() { return g5_offset; };
duke@435 102 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
duke@435 103 static void restore_live_registers(MacroAssembler* masm);
duke@435 104
duke@435 105 // During deoptimization only the result register need to be restored
duke@435 106 // all the other values have already been extracted.
duke@435 107
duke@435 108 static void restore_result_registers(MacroAssembler* masm);
duke@435 109 };
duke@435 110
duke@435 111 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
duke@435 112 // Record volatile registers as callee-save values in an OopMap so their save locations will be
duke@435 113 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
duke@435 114 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
duke@435 115 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
duke@435 116 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
duke@435 117 int i;
kvn@1442 118 // Always make the frame size 16 byte aligned.
duke@435 119 int frame_size = round_to(additional_frame_words + register_save_size, 16);
duke@435 120 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
duke@435 121 int frame_size_in_slots = frame_size / sizeof(jint);
duke@435 122 // CodeBlob frame size is in words.
duke@435 123 *total_frame_words = frame_size / wordSize;
duke@435 124 // OopMap* map = new OopMap(*total_frame_words, 0);
duke@435 125 OopMap* map = new OopMap(frame_size_in_slots, 0);
duke@435 126
duke@435 127 #if !defined(_LP64)
duke@435 128
duke@435 129 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
duke@435 130 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
duke@435 131 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
duke@435 132 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
duke@435 133 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
duke@435 134 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
duke@435 135 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
duke@435 136 #endif /* _LP64 */
duke@435 137
duke@435 138 __ save(SP, -frame_size, SP);
duke@435 139
duke@435 140 #ifndef _LP64
duke@435 141 // Reload the 64 bit Oregs. Although they are now Iregs we load them
duke@435 142 // to Oregs here to avoid interrupts cutting off their heads
duke@435 143
duke@435 144 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
duke@435 145 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
duke@435 146 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
duke@435 147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
duke@435 148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
duke@435 149 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
duke@435 150
duke@435 151 __ stx(O0, SP, o0_offset+STACK_BIAS);
duke@435 152 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
duke@435 153
duke@435 154 __ stx(O1, SP, o1_offset+STACK_BIAS);
duke@435 155
duke@435 156 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
duke@435 157
duke@435 158 __ stx(O2, SP, o2_offset+STACK_BIAS);
duke@435 159 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
duke@435 160
duke@435 161 __ stx(O3, SP, o3_offset+STACK_BIAS);
duke@435 162 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
duke@435 163
duke@435 164 __ stx(O4, SP, o4_offset+STACK_BIAS);
duke@435 165 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
duke@435 166
duke@435 167 __ stx(O5, SP, o5_offset+STACK_BIAS);
duke@435 168 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
duke@435 169 #endif /* _LP64 */
duke@435 170
coleenp@548 171
coleenp@548 172 #ifdef _LP64
coleenp@548 173 int debug_offset = 0;
coleenp@548 174 #else
coleenp@548 175 int debug_offset = 4;
coleenp@548 176 #endif
duke@435 177 // Save the G's
duke@435 178 __ stx(G1, SP, g1_offset+STACK_BIAS);
coleenp@548 179 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
duke@435 180
duke@435 181 __ stx(G3, SP, g3_offset+STACK_BIAS);
coleenp@548 182 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
duke@435 183
duke@435 184 __ stx(G4, SP, g4_offset+STACK_BIAS);
coleenp@548 185 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
duke@435 186
duke@435 187 __ stx(G5, SP, g5_offset+STACK_BIAS);
coleenp@548 188 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
duke@435 189
duke@435 190 // This is really a waste but we'll keep things as they were for now
duke@435 191 if (true) {
duke@435 192 #ifndef _LP64
duke@435 193 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
duke@435 194 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
duke@435 195 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
duke@435 196 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
duke@435 197 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
duke@435 198 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
duke@435 199 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
duke@435 200 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
duke@435 201 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
duke@435 202 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
coleenp@548 203 #endif /* _LP64 */
duke@435 204 }
duke@435 205
duke@435 206
duke@435 207 // Save the flags
duke@435 208 __ rdccr( G5 );
duke@435 209 __ stx(G5, SP, ccr_offset+STACK_BIAS);
duke@435 210 __ stxfsr(SP, fsr_offset+STACK_BIAS);
duke@435 211
kvn@1442 212 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
duke@435 213 int offset = d00_offset;
kvn@1442 214 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
duke@435 215 FloatRegister f = as_FloatRegister(i);
duke@435 216 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
kvn@1442 217 // Record as callee saved both halves of double registers (2 float registers).
duke@435 218 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
kvn@1442 219 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
duke@435 220 offset += sizeof(double);
duke@435 221 }
duke@435 222
duke@435 223 // And we're done.
duke@435 224
duke@435 225 return map;
duke@435 226 }
duke@435 227
duke@435 228
duke@435 229 // Pop the current frame and restore all the registers that we
duke@435 230 // saved.
duke@435 231 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
duke@435 232
duke@435 233 // Restore all the FP registers
kvn@1442 234 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
duke@435 235 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
duke@435 236 }
duke@435 237
duke@435 238 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
duke@435 239 __ wrccr (G1) ;
duke@435 240
duke@435 241 // Restore the G's
duke@435 242 // Note that G2 (AKA GThread) must be saved and restored separately.
duke@435 243 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
duke@435 244
duke@435 245 __ ldx(SP, g1_offset+STACK_BIAS, G1);
duke@435 246 __ ldx(SP, g3_offset+STACK_BIAS, G3);
duke@435 247 __ ldx(SP, g4_offset+STACK_BIAS, G4);
duke@435 248 __ ldx(SP, g5_offset+STACK_BIAS, G5);
duke@435 249
duke@435 250
duke@435 251 #if !defined(_LP64)
duke@435 252 // Restore the 64-bit O's.
duke@435 253 __ ldx(SP, o0_offset+STACK_BIAS, O0);
duke@435 254 __ ldx(SP, o1_offset+STACK_BIAS, O1);
duke@435 255 __ ldx(SP, o2_offset+STACK_BIAS, O2);
duke@435 256 __ ldx(SP, o3_offset+STACK_BIAS, O3);
duke@435 257 __ ldx(SP, o4_offset+STACK_BIAS, O4);
duke@435 258 __ ldx(SP, o5_offset+STACK_BIAS, O5);
duke@435 259
duke@435 260 // And temporarily place them in TLS
duke@435 261
duke@435 262 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
duke@435 263 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
duke@435 264 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
duke@435 265 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
duke@435 266 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
duke@435 267 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
duke@435 268 #endif /* _LP64 */
duke@435 269
duke@435 270 // Restore flags
duke@435 271
duke@435 272 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
duke@435 273
duke@435 274 __ restore();
duke@435 275
duke@435 276 #if !defined(_LP64)
duke@435 277 // Now reload the 64bit Oregs after we've restore the window.
duke@435 278 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
duke@435 279 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
duke@435 280 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
duke@435 281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
duke@435 282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
duke@435 283 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
duke@435 284 #endif /* _LP64 */
duke@435 285
duke@435 286 }
duke@435 287
duke@435 288 // Pop the current frame and restore the registers that might be holding
duke@435 289 // a result.
duke@435 290 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
duke@435 291
duke@435 292 #if !defined(_LP64)
duke@435 293 // 32bit build returns longs in G1
duke@435 294 __ ldx(SP, g1_offset+STACK_BIAS, G1);
duke@435 295
duke@435 296 // Retrieve the 64-bit O's.
duke@435 297 __ ldx(SP, o0_offset+STACK_BIAS, O0);
duke@435 298 __ ldx(SP, o1_offset+STACK_BIAS, O1);
duke@435 299 // and save to TLS
duke@435 300 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
duke@435 301 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
duke@435 302 #endif /* _LP64 */
duke@435 303
duke@435 304 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
duke@435 305
duke@435 306 __ restore();
duke@435 307
duke@435 308 #if !defined(_LP64)
duke@435 309 // Now reload the 64bit Oregs after we've restore the window.
duke@435 310 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
duke@435 311 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
duke@435 312 #endif /* _LP64 */
duke@435 313
duke@435 314 }
duke@435 315
kvn@4103 316 // Is vector's size (in bytes) bigger than a size saved by default?
kvn@4103 317 // 8 bytes FP registers are saved by default on SPARC.
kvn@4103 318 bool SharedRuntime::is_wide_vector(int size) {
kvn@4103 319 // Note, MaxVectorSize == 8 on SPARC.
kvn@4103 320 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
kvn@4103 321 return size > 8;
kvn@4103 322 }
kvn@4103 323
duke@435 324 // The java_calling_convention describes stack locations as ideal slots on
duke@435 325 // a frame with no abi restrictions. Since we must observe abi restrictions
duke@435 326 // (like the placement of the register window) the slots must be biased by
duke@435 327 // the following value.
duke@435 328 static int reg2offset(VMReg r) {
duke@435 329 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
duke@435 330 }
duke@435 331
never@3500 332 static VMRegPair reg64_to_VMRegPair(Register r) {
never@3500 333 VMRegPair ret;
never@3500 334 if (wordSize == 8) {
never@3500 335 ret.set2(r->as_VMReg());
never@3500 336 } else {
never@3500 337 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
never@3500 338 }
never@3500 339 return ret;
never@3500 340 }
never@3500 341
duke@435 342 // ---------------------------------------------------------------------------
duke@435 343 // Read the array of BasicTypes from a signature, and compute where the
duke@435 344 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
duke@435 345 // quantities. Values less than VMRegImpl::stack0 are registers, those above
duke@435 346 // refer to 4-byte stack slots. All stack slots are based off of the window
duke@435 347 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
duke@435 348 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
duke@435 349 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
duke@435 350 // integer registers. Values 64-95 are the (32-bit only) float registers.
duke@435 351 // Each 32-bit quantity is given its own number, so the integer registers
duke@435 352 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
duke@435 353 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
duke@435 354
duke@435 355 // Register results are passed in O0-O5, for outgoing call arguments. To
duke@435 356 // convert to incoming arguments, convert all O's to I's. The regs array
duke@435 357 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
duke@435 358 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
duke@435 359 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
duke@435 360 // passed (used as a placeholder for the other half of longs and doubles in
duke@435 361 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
duke@435 362 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
duke@435 363 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
duke@435 364 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
duke@435 365 // same VMRegPair.
duke@435 366
duke@435 367 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
duke@435 368 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
duke@435 369 // units regardless of build.
duke@435 370
duke@435 371
duke@435 372 // ---------------------------------------------------------------------------
duke@435 373 // The compiled Java calling convention. The Java convention always passes
duke@435 374 // 64-bit values in adjacent aligned locations (either registers or stack),
twisti@4101 375 // floats in float registers and doubles in aligned float pairs. There is
twisti@4101 376 // no backing varargs store for values in registers.
twisti@4101 377 // In the 32-bit build, longs are passed on the stack (cannot be
duke@435 378 // passed in I's, because longs in I's get their heads chopped off at
duke@435 379 // interrupt).
duke@435 380 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
duke@435 381 VMRegPair *regs,
duke@435 382 int total_args_passed,
duke@435 383 int is_outgoing) {
duke@435 384 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
duke@435 385
duke@435 386 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
duke@435 387 const int flt_reg_max = 8;
twisti@4101 388
duke@435 389 int int_reg = 0;
duke@435 390 int flt_reg = 0;
twisti@4101 391 int slot = 0;
twisti@4101 392
duke@435 393 for (int i = 0; i < total_args_passed; i++) {
duke@435 394 switch (sig_bt[i]) {
duke@435 395 case T_INT:
duke@435 396 case T_SHORT:
duke@435 397 case T_CHAR:
duke@435 398 case T_BYTE:
duke@435 399 case T_BOOLEAN:
duke@435 400 #ifndef _LP64
duke@435 401 case T_OBJECT:
duke@435 402 case T_ARRAY:
duke@435 403 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
duke@435 404 #endif // _LP64
duke@435 405 if (int_reg < int_reg_max) {
duke@435 406 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
duke@435 407 regs[i].set1(r->as_VMReg());
duke@435 408 } else {
twisti@4101 409 regs[i].set1(VMRegImpl::stack2reg(slot++));
duke@435 410 }
duke@435 411 break;
duke@435 412
duke@435 413 #ifdef _LP64
twisti@4101 414 case T_LONG:
twisti@4101 415 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
twisti@4101 416 // fall-through
duke@435 417 case T_OBJECT:
duke@435 418 case T_ARRAY:
duke@435 419 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
duke@435 420 if (int_reg < int_reg_max) {
duke@435 421 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
duke@435 422 regs[i].set2(r->as_VMReg());
duke@435 423 } else {
twisti@4101 424 slot = round_to(slot, 2); // align
twisti@4101 425 regs[i].set2(VMRegImpl::stack2reg(slot));
twisti@4101 426 slot += 2;
duke@435 427 }
duke@435 428 break;
twisti@4101 429 #else
duke@435 430 case T_LONG:
duke@435 431 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
twisti@4101 432 // On 32-bit SPARC put longs always on the stack to keep the pressure off
twisti@4101 433 // integer argument registers. They should be used for oops.
twisti@4101 434 slot = round_to(slot, 2); // align
twisti@4101 435 regs[i].set2(VMRegImpl::stack2reg(slot));
twisti@4101 436 slot += 2;
twisti@4101 437 #endif
duke@435 438 break;
duke@435 439
duke@435 440 case T_FLOAT:
twisti@4101 441 if (flt_reg < flt_reg_max) {
twisti@4101 442 FloatRegister r = as_FloatRegister(flt_reg++);
twisti@4101 443 regs[i].set1(r->as_VMReg());
twisti@4101 444 } else {
twisti@4101 445 regs[i].set1(VMRegImpl::stack2reg(slot++));
twisti@4101 446 }
duke@435 447 break;
twisti@4101 448
duke@435 449 case T_DOUBLE:
duke@435 450 assert(sig_bt[i+1] == T_VOID, "expecting half");
twisti@4101 451 if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
twisti@4101 452 flt_reg = round_to(flt_reg, 2); // align
twisti@4101 453 FloatRegister r = as_FloatRegister(flt_reg);
twisti@4101 454 regs[i].set2(r->as_VMReg());
twisti@4101 455 flt_reg += 2;
duke@435 456 } else {
twisti@4101 457 slot = round_to(slot, 2); // align
twisti@4101 458 regs[i].set2(VMRegImpl::stack2reg(slot));
twisti@4101 459 slot += 2;
duke@435 460 }
duke@435 461 break;
twisti@4101 462
twisti@4101 463 case T_VOID:
twisti@4101 464 regs[i].set_bad(); // Halves of longs & doubles
twisti@4101 465 break;
twisti@4101 466
duke@435 467 default:
twisti@4101 468 fatal(err_msg_res("unknown basic type %d", sig_bt[i]));
twisti@4101 469 break;
duke@435 470 }
duke@435 471 }
duke@435 472
duke@435 473 // retun the amount of stack space these arguments will need.
twisti@4101 474 return slot;
duke@435 475 }
duke@435 476
twisti@1441 477 // Helper class mostly to avoid passing masm everywhere, and handle
twisti@1441 478 // store displacement overflow logic.
duke@435 479 class AdapterGenerator {
duke@435 480 MacroAssembler *masm;
duke@435 481 Register Rdisp;
duke@435 482 void set_Rdisp(Register r) { Rdisp = r; }
duke@435 483
duke@435 484 void patch_callers_callsite();
duke@435 485
duke@435 486 // base+st_off points to top of argument
twisti@1861 487 int arg_offset(const int st_off) { return st_off; }
duke@435 488 int next_arg_offset(const int st_off) {
twisti@1861 489 return st_off - Interpreter::stackElementSize;
twisti@1441 490 }
twisti@1441 491
twisti@1441 492 // Argument slot values may be loaded first into a register because
twisti@1441 493 // they might not fit into displacement.
twisti@1441 494 RegisterOrConstant arg_slot(const int st_off);
twisti@1441 495 RegisterOrConstant next_arg_slot(const int st_off);
twisti@1441 496
duke@435 497 // Stores long into offset pointed to by base
duke@435 498 void store_c2i_long(Register r, Register base,
duke@435 499 const int st_off, bool is_stack);
duke@435 500 void store_c2i_object(Register r, Register base,
duke@435 501 const int st_off);
duke@435 502 void store_c2i_int(Register r, Register base,
duke@435 503 const int st_off);
duke@435 504 void store_c2i_double(VMReg r_2,
duke@435 505 VMReg r_1, Register base, const int st_off);
duke@435 506 void store_c2i_float(FloatRegister f, Register base,
duke@435 507 const int st_off);
duke@435 508
duke@435 509 public:
duke@435 510 void gen_c2i_adapter(int total_args_passed,
duke@435 511 // VMReg max_arg,
duke@435 512 int comp_args_on_stack, // VMRegStackSlots
duke@435 513 const BasicType *sig_bt,
duke@435 514 const VMRegPair *regs,
duke@435 515 Label& skip_fixup);
duke@435 516 void gen_i2c_adapter(int total_args_passed,
duke@435 517 // VMReg max_arg,
duke@435 518 int comp_args_on_stack, // VMRegStackSlots
duke@435 519 const BasicType *sig_bt,
duke@435 520 const VMRegPair *regs);
duke@435 521
duke@435 522 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
duke@435 523 };
duke@435 524
duke@435 525
duke@435 526 // Patch the callers callsite with entry to compiled code if it exists.
duke@435 527 void AdapterGenerator::patch_callers_callsite() {
duke@435 528 Label L;
coleenp@4037 529 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
kvn@3037 530 __ br_null(G3_scratch, false, Assembler::pt, L);
twisti@4101 531 __ delayed()->nop();
duke@435 532 // Call into the VM to patch the caller, then jump to compiled callee
duke@435 533 __ save_frame(4); // Args in compiled layout; do not blow them
duke@435 534
duke@435 535 // Must save all the live Gregs the list is:
duke@435 536 // G1: 1st Long arg (32bit build)
duke@435 537 // G2: global allocated to TLS
duke@435 538 // G3: used in inline cache check (scratch)
duke@435 539 // G4: 2nd Long arg (32bit build);
coleenp@4037 540 // G5: used in inline cache check (Method*)
duke@435 541
duke@435 542 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
duke@435 543
duke@435 544 #ifdef _LP64
duke@435 545 // mov(s,d)
duke@435 546 __ mov(G1, L1);
duke@435 547 __ mov(G4, L4);
duke@435 548 __ mov(G5_method, L5);
duke@435 549 __ mov(G5_method, O0); // VM needs target method
duke@435 550 __ mov(I7, O1); // VM needs caller's callsite
duke@435 551 // Must be a leaf call...
duke@435 552 // can be very far once the blob has been relocated
twisti@1162 553 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
duke@435 554 __ relocate(relocInfo::runtime_call_type);
twisti@1162 555 __ jumpl_to(dest, O7, O7);
duke@435 556 __ delayed()->mov(G2_thread, L7_thread_cache);
duke@435 557 __ mov(L7_thread_cache, G2_thread);
duke@435 558 __ mov(L1, G1);
duke@435 559 __ mov(L4, G4);
duke@435 560 __ mov(L5, G5_method);
duke@435 561 #else
duke@435 562 __ stx(G1, FP, -8 + STACK_BIAS);
duke@435 563 __ stx(G4, FP, -16 + STACK_BIAS);
duke@435 564 __ mov(G5_method, L5);
duke@435 565 __ mov(G5_method, O0); // VM needs target method
duke@435 566 __ mov(I7, O1); // VM needs caller's callsite
duke@435 567 // Must be a leaf call...
duke@435 568 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
duke@435 569 __ delayed()->mov(G2_thread, L7_thread_cache);
duke@435 570 __ mov(L7_thread_cache, G2_thread);
duke@435 571 __ ldx(FP, -8 + STACK_BIAS, G1);
duke@435 572 __ ldx(FP, -16 + STACK_BIAS, G4);
duke@435 573 __ mov(L5, G5_method);
duke@435 574 #endif /* _LP64 */
duke@435 575
duke@435 576 __ restore(); // Restore args
duke@435 577 __ bind(L);
duke@435 578 }
duke@435 579
twisti@1441 580
twisti@1441 581 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
twisti@1441 582 RegisterOrConstant roc(arg_offset(st_off));
twisti@1441 583 return __ ensure_simm13_or_reg(roc, Rdisp);
duke@435 584 }
duke@435 585
twisti@1441 586 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
twisti@1441 587 RegisterOrConstant roc(next_arg_offset(st_off));
twisti@1441 588 return __ ensure_simm13_or_reg(roc, Rdisp);
duke@435 589 }
twisti@1441 590
twisti@1441 591
duke@435 592 // Stores long into offset pointed to by base
duke@435 593 void AdapterGenerator::store_c2i_long(Register r, Register base,
duke@435 594 const int st_off, bool is_stack) {
duke@435 595 #ifdef _LP64
duke@435 596 // In V9, longs are given 2 64-bit slots in the interpreter, but the
duke@435 597 // data is passed in only 1 slot.
duke@435 598 __ stx(r, base, next_arg_slot(st_off));
duke@435 599 #else
ysr@777 600 #ifdef COMPILER2
duke@435 601 // Misaligned store of 64-bit data
duke@435 602 __ stw(r, base, arg_slot(st_off)); // lo bits
duke@435 603 __ srlx(r, 32, r);
duke@435 604 __ stw(r, base, next_arg_slot(st_off)); // hi bits
duke@435 605 #else
duke@435 606 if (is_stack) {
duke@435 607 // Misaligned store of 64-bit data
duke@435 608 __ stw(r, base, arg_slot(st_off)); // lo bits
duke@435 609 __ srlx(r, 32, r);
duke@435 610 __ stw(r, base, next_arg_slot(st_off)); // hi bits
duke@435 611 } else {
duke@435 612 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
duke@435 613 __ stw(r , base, next_arg_slot(st_off)); // hi bits
duke@435 614 }
duke@435 615 #endif // COMPILER2
ysr@777 616 #endif // _LP64
duke@435 617 }
duke@435 618
duke@435 619 void AdapterGenerator::store_c2i_object(Register r, Register base,
duke@435 620 const int st_off) {
duke@435 621 __ st_ptr (r, base, arg_slot(st_off));
duke@435 622 }
duke@435 623
duke@435 624 void AdapterGenerator::store_c2i_int(Register r, Register base,
duke@435 625 const int st_off) {
duke@435 626 __ st (r, base, arg_slot(st_off));
duke@435 627 }
duke@435 628
duke@435 629 // Stores into offset pointed to by base
duke@435 630 void AdapterGenerator::store_c2i_double(VMReg r_2,
duke@435 631 VMReg r_1, Register base, const int st_off) {
duke@435 632 #ifdef _LP64
duke@435 633 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
duke@435 634 // data is passed in only 1 slot.
duke@435 635 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
duke@435 636 #else
duke@435 637 // Need to marshal 64-bit value from misaligned Lesp loads
duke@435 638 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
duke@435 639 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
duke@435 640 #endif
duke@435 641 }
duke@435 642
duke@435 643 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
duke@435 644 const int st_off) {
duke@435 645 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
duke@435 646 }
duke@435 647
duke@435 648 void AdapterGenerator::gen_c2i_adapter(
duke@435 649 int total_args_passed,
duke@435 650 // VMReg max_arg,
duke@435 651 int comp_args_on_stack, // VMRegStackSlots
duke@435 652 const BasicType *sig_bt,
duke@435 653 const VMRegPair *regs,
twisti@4101 654 Label& L_skip_fixup) {
duke@435 655
duke@435 656 // Before we get into the guts of the C2I adapter, see if we should be here
duke@435 657 // at all. We've come from compiled code and are attempting to jump to the
duke@435 658 // interpreter, which means the caller made a static call to get here
duke@435 659 // (vcalls always get a compiled target if there is one). Check for a
duke@435 660 // compiled target. If there is one, we need to patch the caller's call.
duke@435 661 // However we will run interpreted if we come thru here. The next pass
duke@435 662 // thru the call site will run compiled. If we ran compiled here then
duke@435 663 // we can (theorectically) do endless i2c->c2i->i2c transitions during
duke@435 664 // deopt/uncommon trap cycles. If we always go interpreted here then
duke@435 665 // we can have at most one and don't need to play any tricks to keep
duke@435 666 // from endlessly growing the stack.
duke@435 667 //
duke@435 668 // Actually if we detected that we had an i2c->c2i transition here we
duke@435 669 // ought to be able to reset the world back to the state of the interpreted
duke@435 670 // call and not bother building another interpreter arg area. We don't
duke@435 671 // do that at this point.
duke@435 672
duke@435 673 patch_callers_callsite();
duke@435 674
twisti@4101 675 __ bind(L_skip_fixup);
duke@435 676
duke@435 677 // Since all args are passed on the stack, total_args_passed*wordSize is the
duke@435 678 // space we need. Add in varargs area needed by the interpreter. Round up
duke@435 679 // to stack alignment.
twisti@1861 680 const int arg_size = total_args_passed * Interpreter::stackElementSize;
duke@435 681 const int varargs_area =
duke@435 682 (frame::varargs_offset - frame::register_save_words)*wordSize;
duke@435 683 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
duke@435 684
twisti@4101 685 const int bias = STACK_BIAS;
duke@435 686 const int interp_arg_offset = frame::varargs_offset*wordSize +
twisti@1861 687 (total_args_passed-1)*Interpreter::stackElementSize;
duke@435 688
twisti@4101 689 const Register base = SP;
twisti@4101 690
twisti@4101 691 // Make some extra space on the stack.
twisti@4101 692 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP);
duke@435 693 set_Rdisp(G3_scratch);
twisti@4101 694
twisti@4101 695 // Write the args into the outgoing interpreter space.
twisti@4101 696 for (int i = 0; i < total_args_passed; i++) {
twisti@1861 697 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
duke@435 698 VMReg r_1 = regs[i].first();
duke@435 699 VMReg r_2 = regs[i].second();
duke@435 700 if (!r_1->is_valid()) {
duke@435 701 assert(!r_2->is_valid(), "");
duke@435 702 continue;
duke@435 703 }
duke@435 704 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
twisti@4101 705 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias;
twisti@4101 706 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp);
duke@435 707 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
duke@435 708 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
duke@435 709 else __ ldx(base, ld_off, G1_scratch);
duke@435 710 }
duke@435 711
duke@435 712 if (r_1->is_Register()) {
duke@435 713 Register r = r_1->as_Register()->after_restore();
duke@435 714 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
duke@435 715 store_c2i_object(r, base, st_off);
duke@435 716 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
duke@435 717 store_c2i_long(r, base, st_off, r_2->is_stack());
duke@435 718 } else {
duke@435 719 store_c2i_int(r, base, st_off);
duke@435 720 }
duke@435 721 } else {
duke@435 722 assert(r_1->is_FloatRegister(), "");
duke@435 723 if (sig_bt[i] == T_FLOAT) {
duke@435 724 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
duke@435 725 } else {
duke@435 726 assert(sig_bt[i] == T_DOUBLE, "wrong type");
duke@435 727 store_c2i_double(r_2, r_1, base, st_off);
duke@435 728 }
duke@435 729 }
duke@435 730 }
duke@435 731
twisti@4101 732 // Load the interpreter entry point.
coleenp@4037 733 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
duke@435 734
duke@435 735 // Pass O5_savedSP as an argument to the interpreter.
duke@435 736 // The interpreter will restore SP to this value before returning.
twisti@4101 737 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP);
duke@435 738
duke@435 739 __ mov((frame::varargs_offset)*wordSize -
twisti@1861 740 1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
duke@435 741 // Jump to the interpreter just as if interpreter was doing it.
duke@435 742 __ jmpl(G3_scratch, 0, G0);
duke@435 743 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
duke@435 744 // (really L0) is in use by the compiled frame as a generic temp. However,
duke@435 745 // the interpreter does not know where its args are without some kind of
duke@435 746 // arg pointer being passed in. Pass it in Gargs.
duke@435 747 __ delayed()->add(SP, G1, Gargs);
duke@435 748 }
duke@435 749
twisti@3969 750 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
twisti@3969 751 address code_start, address code_end,
twisti@3969 752 Label& L_ok) {
twisti@3969 753 Label L_fail;
twisti@3969 754 __ set(ExternalAddress(code_start), temp_reg);
twisti@3969 755 __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
twisti@3969 756 __ cmp(pc_reg, temp_reg);
twisti@3969 757 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
twisti@3969 758 __ delayed()->add(temp_reg, temp2_reg, temp_reg);
twisti@3969 759 __ cmp(pc_reg, temp_reg);
twisti@3969 760 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
twisti@3969 761 __ bind(L_fail);
twisti@3969 762 }
twisti@3969 763
duke@435 764 void AdapterGenerator::gen_i2c_adapter(
duke@435 765 int total_args_passed,
duke@435 766 // VMReg max_arg,
duke@435 767 int comp_args_on_stack, // VMRegStackSlots
duke@435 768 const BasicType *sig_bt,
duke@435 769 const VMRegPair *regs) {
duke@435 770
duke@435 771 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
duke@435 772 // layout. Lesp was saved by the calling I-frame and will be restored on
duke@435 773 // return. Meanwhile, outgoing arg space is all owned by the callee
duke@435 774 // C-frame, so we can mangle it at will. After adjusting the frame size,
duke@435 775 // hoist register arguments and repack other args according to the compiled
duke@435 776 // code convention. Finally, end in a jump to the compiled code. The entry
duke@435 777 // point address is the start of the buffer.
duke@435 778
duke@435 779 // We will only enter here from an interpreted frame and never from after
duke@435 780 // passing thru a c2i. Azul allowed this but we do not. If we lose the
duke@435 781 // race and use a c2i we will remain interpreted for the race loser(s).
duke@435 782 // This removes all sorts of headaches on the x86 side and also eliminates
duke@435 783 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
duke@435 784
twisti@3969 785 // More detail:
twisti@3969 786 // Adapters can be frameless because they do not require the caller
twisti@3969 787 // to perform additional cleanup work, such as correcting the stack pointer.
twisti@3969 788 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
twisti@3969 789 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
twisti@3969 790 // even if a callee has modified the stack pointer.
twisti@3969 791 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
twisti@3969 792 // routinely repairs its caller's stack pointer (from sender_sp, which is set
twisti@3969 793 // up via the senderSP register).
twisti@3969 794 // In other words, if *either* the caller or callee is interpreted, we can
twisti@3969 795 // get the stack pointer repaired after a call.
twisti@3969 796 // This is why c2i and i2c adapters cannot be indefinitely composed.
twisti@3969 797 // In particular, if a c2i adapter were to somehow call an i2c adapter,
twisti@3969 798 // both caller and callee would be compiled methods, and neither would
twisti@3969 799 // clean up the stack pointer changes performed by the two adapters.
twisti@3969 800 // If this happens, control eventually transfers back to the compiled
twisti@3969 801 // caller, but with an uncorrected stack, causing delayed havoc.
twisti@3969 802
twisti@3969 803 if (VerifyAdapterCalls &&
twisti@3969 804 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
twisti@3969 805 // So, let's test for cascading c2i/i2c adapters right now.
twisti@3969 806 // assert(Interpreter::contains($return_addr) ||
twisti@3969 807 // StubRoutines::contains($return_addr),
twisti@3969 808 // "i2c adapter must return to an interpreter frame");
twisti@3969 809 __ block_comment("verify_i2c { ");
twisti@3969 810 Label L_ok;
twisti@3969 811 if (Interpreter::code() != NULL)
twisti@3969 812 range_check(masm, O7, O0, O1,
twisti@3969 813 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
twisti@3969 814 L_ok);
twisti@3969 815 if (StubRoutines::code1() != NULL)
twisti@3969 816 range_check(masm, O7, O0, O1,
twisti@3969 817 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
twisti@3969 818 L_ok);
twisti@3969 819 if (StubRoutines::code2() != NULL)
twisti@3969 820 range_check(masm, O7, O0, O1,
twisti@3969 821 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
twisti@3969 822 L_ok);
twisti@3969 823 const char* msg = "i2c adapter must return to an interpreter frame";
twisti@3969 824 __ block_comment(msg);
twisti@3969 825 __ stop(msg);
twisti@3969 826 __ bind(L_ok);
twisti@3969 827 __ block_comment("} verify_i2ce ");
twisti@3969 828 }
twisti@3969 829
duke@435 830 // As you can see from the list of inputs & outputs there are not a lot
duke@435 831 // of temp registers to work with: mostly G1, G3 & G4.
duke@435 832
duke@435 833 // Inputs:
duke@435 834 // G2_thread - TLS
duke@435 835 // G5_method - Method oop
jrose@1145 836 // G4 (Gargs) - Pointer to interpreter's args
jrose@1145 837 // O0..O4 - free for scratch
jrose@1145 838 // O5_savedSP - Caller's saved SP, to be restored if needed
duke@435 839 // O6 - Current SP!
duke@435 840 // O7 - Valid return address
jrose@1145 841 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
duke@435 842
duke@435 843 // Outputs:
duke@435 844 // G2_thread - TLS
duke@435 845 // O0-O5 - Outgoing args in compiled layout
duke@435 846 // O6 - Adjusted or restored SP
duke@435 847 // O7 - Valid return address
twisti@1919 848 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
duke@435 849 // F0-F7 - more outgoing args
duke@435 850
duke@435 851
jrose@1145 852 // Gargs is the incoming argument base, and also an outgoing argument.
duke@435 853 __ sub(Gargs, BytesPerWord, Gargs);
duke@435 854
duke@435 855 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
duke@435 856 // WITH O7 HOLDING A VALID RETURN PC
duke@435 857 //
duke@435 858 // | |
duke@435 859 // : java stack :
duke@435 860 // | |
duke@435 861 // +--------------+ <--- start of outgoing args
duke@435 862 // | receiver | |
duke@435 863 // : rest of args : |---size is java-arg-words
duke@435 864 // | | |
duke@435 865 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
duke@435 866 // | | |
duke@435 867 // : unused : |---Space for max Java stack, plus stack alignment
duke@435 868 // | | |
duke@435 869 // +--------------+ <--- SP + 16*wordsize
duke@435 870 // | |
duke@435 871 // : window :
duke@435 872 // | |
duke@435 873 // +--------------+ <--- SP
duke@435 874
duke@435 875 // WE REPACK THE STACK. We use the common calling convention layout as
duke@435 876 // discovered by calling SharedRuntime::calling_convention. We assume it
duke@435 877 // causes an arbitrary shuffle of memory, which may require some register
duke@435 878 // temps to do the shuffle. We hope for (and optimize for) the case where
duke@435 879 // temps are not needed. We may have to resize the stack slightly, in case
duke@435 880 // we need alignment padding (32-bit interpreter can pass longs & doubles
duke@435 881 // misaligned, but the compilers expect them aligned).
duke@435 882 //
duke@435 883 // | |
duke@435 884 // : java stack :
duke@435 885 // | |
duke@435 886 // +--------------+ <--- start of outgoing args
duke@435 887 // | pad, align | |
duke@435 888 // +--------------+ |
twisti@4101 889 // | ints, longs, | |
twisti@4101 890 // | floats, | |---Outgoing stack args.
twisti@4101 891 // : doubles : | First few args in registers.
twisti@4101 892 // | | |
duke@435 893 // +--------------+ <--- SP' + 16*wordsize
duke@435 894 // | |
duke@435 895 // : window :
duke@435 896 // | |
duke@435 897 // +--------------+ <--- SP'
duke@435 898
duke@435 899 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
duke@435 900 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
duke@435 901 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
duke@435 902
duke@435 903 // Cut-out for having no stack args. Since up to 6 args are passed
duke@435 904 // in registers, we will commonly have no stack args.
duke@435 905 if (comp_args_on_stack > 0) {
duke@435 906 // Convert VMReg stack slots to words.
duke@435 907 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
duke@435 908 // Round up to miminum stack alignment, in wordSize
duke@435 909 comp_words_on_stack = round_to(comp_words_on_stack, 2);
duke@435 910 // Now compute the distance from Lesp to SP. This calculation does not
duke@435 911 // include the space for total_args_passed because Lesp has not yet popped
duke@435 912 // the arguments.
duke@435 913 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
duke@435 914 }
duke@435 915
duke@435 916 // Now generate the shuffle code. Pick up all register args and move the
duke@435 917 // rest through G1_scratch.
twisti@4101 918 for (int i = 0; i < total_args_passed; i++) {
duke@435 919 if (sig_bt[i] == T_VOID) {
duke@435 920 // Longs and doubles are passed in native word order, but misaligned
duke@435 921 // in the 32-bit build.
duke@435 922 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
duke@435 923 continue;
duke@435 924 }
duke@435 925
duke@435 926 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
duke@435 927 // 32-bit build and aligned in the 64-bit build. Look for the obvious
duke@435 928 // ldx/lddf optimizations.
duke@435 929
duke@435 930 // Load in argument order going down.
twisti@1861 931 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
duke@435 932 set_Rdisp(G1_scratch);
duke@435 933
duke@435 934 VMReg r_1 = regs[i].first();
duke@435 935 VMReg r_2 = regs[i].second();
duke@435 936 if (!r_1->is_valid()) {
duke@435 937 assert(!r_2->is_valid(), "");
duke@435 938 continue;
duke@435 939 }
duke@435 940 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
duke@435 941 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
duke@435 942 if (r_2->is_valid()) r_2 = r_1->next();
duke@435 943 }
duke@435 944 if (r_1->is_Register()) { // Register argument
duke@435 945 Register r = r_1->as_Register()->after_restore();
duke@435 946 if (!r_2->is_valid()) {
duke@435 947 __ ld(Gargs, arg_slot(ld_off), r);
duke@435 948 } else {
duke@435 949 #ifdef _LP64
duke@435 950 // In V9, longs are given 2 64-bit slots in the interpreter, but the
duke@435 951 // data is passed in only 1 slot.
twisti@1441 952 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
duke@435 953 next_arg_slot(ld_off) : arg_slot(ld_off);
duke@435 954 __ ldx(Gargs, slot, r);
duke@435 955 #else
twisti@4101 956 fatal("longs should be on stack");
duke@435 957 #endif
duke@435 958 }
duke@435 959 } else {
duke@435 960 assert(r_1->is_FloatRegister(), "");
duke@435 961 if (!r_2->is_valid()) {
twisti@4101 962 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
duke@435 963 } else {
duke@435 964 #ifdef _LP64
duke@435 965 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
duke@435 966 // data is passed in only 1 slot. This code also handles longs that
duke@435 967 // are passed on the stack, but need a stack-to-stack move through a
duke@435 968 // spare float register.
twisti@1441 969 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
duke@435 970 next_arg_slot(ld_off) : arg_slot(ld_off);
twisti@4101 971 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
duke@435 972 #else
duke@435 973 // Need to marshal 64-bit value from misaligned Lesp loads
duke@435 974 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
twisti@4101 975 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
duke@435 976 #endif
duke@435 977 }
duke@435 978 }
duke@435 979 // Was the argument really intended to be on the stack, but was loaded
duke@435 980 // into F8/F9?
duke@435 981 if (regs[i].first()->is_stack()) {
duke@435 982 assert(r_1->as_FloatRegister() == F8, "fix this code");
duke@435 983 // Convert stack slot to an SP offset
duke@435 984 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
duke@435 985 // Store down the shuffled stack word. Target address _is_ aligned.
twisti@1441 986 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
twisti@1441 987 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
twisti@1441 988 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
duke@435 989 }
duke@435 990 }
twisti@4101 991
twisti@4101 992 // Jump to the compiled code just as if compiled code was doing it.
twisti@4101 993 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
twisti@4101 994
twisti@4101 995 // 6243940 We might end up in handle_wrong_method if
twisti@4101 996 // the callee is deoptimized as we race thru here. If that
twisti@4101 997 // happens we don't want to take a safepoint because the
twisti@4101 998 // caller frame will look interpreted and arguments are now
twisti@4101 999 // "compiled" so it is much better to make this transition
twisti@4101 1000 // invisible to the stack walking code. Unfortunately if
twisti@4101 1001 // we try and find the callee by normal means a safepoint
twisti@4101 1002 // is possible. So we stash the desired callee in the thread
twisti@4101 1003 // and the vm will find there should this case occur.
twisti@4101 1004 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
twisti@4101 1005 __ st_ptr(G5_method, callee_target_addr);
twisti@4101 1006
twisti@4101 1007 if (StressNonEntrant) {
twisti@4101 1008 // Open a big window for deopt failure
twisti@4101 1009 __ save_frame(0);
twisti@4101 1010 __ mov(G0, L0);
twisti@4101 1011 Label loop;
twisti@4101 1012 __ bind(loop);
twisti@4101 1013 __ sub(L0, 1, L0);
twisti@4101 1014 __ br_null_short(L0, Assembler::pt, loop);
twisti@4101 1015 __ restore();
duke@435 1016 }
twisti@4101 1017
twisti@4101 1018 __ jmpl(G3, 0, G0);
twisti@4101 1019 __ delayed()->nop();
duke@435 1020 }
duke@435 1021
duke@435 1022 // ---------------------------------------------------------------
duke@435 1023 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
duke@435 1024 int total_args_passed,
duke@435 1025 // VMReg max_arg,
duke@435 1026 int comp_args_on_stack, // VMRegStackSlots
duke@435 1027 const BasicType *sig_bt,
never@1622 1028 const VMRegPair *regs,
never@1622 1029 AdapterFingerPrint* fingerprint) {
duke@435 1030 address i2c_entry = __ pc();
duke@435 1031
duke@435 1032 AdapterGenerator agen(masm);
duke@435 1033
duke@435 1034 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
duke@435 1035
duke@435 1036
duke@435 1037 // -------------------------------------------------------------------------
coleenp@4037 1038 // Generate a C2I adapter. On entry we know G5 holds the Method*. The
duke@435 1039 // args start out packed in the compiled layout. They need to be unpacked
duke@435 1040 // into the interpreter layout. This will almost always require some stack
duke@435 1041 // space. We grow the current (compiled) stack, then repack the args. We
duke@435 1042 // finally end in a jump to the generic interpreter entry point. On exit
duke@435 1043 // from the interpreter, the interpreter will restore our SP (lest the
duke@435 1044 // compiled code, which relys solely on SP and not FP, get sick).
duke@435 1045
duke@435 1046 address c2i_unverified_entry = __ pc();
twisti@4101 1047 Label L_skip_fixup;
duke@435 1048 {
twisti@4101 1049 Register R_temp = G1; // another scratch register
duke@435 1050
twisti@1162 1051 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
duke@435 1052
duke@435 1053 __ verify_oop(O0);
coleenp@548 1054 __ load_klass(O0, G3_scratch);
duke@435 1055
coleenp@4037 1056 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
duke@435 1057 __ cmp(G3_scratch, R_temp);
duke@435 1058
duke@435 1059 Label ok, ok2;
duke@435 1060 __ brx(Assembler::equal, false, Assembler::pt, ok);
coleenp@4037 1061 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
twisti@1162 1062 __ jump_to(ic_miss, G3_scratch);
duke@435 1063 __ delayed()->nop();
duke@435 1064
duke@435 1065 __ bind(ok);
duke@435 1066 // Method might have been compiled since the call site was patched to
duke@435 1067 // interpreted if that is the case treat it as a miss so we can get
duke@435 1068 // the call site corrected.
coleenp@4037 1069 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
duke@435 1070 __ bind(ok2);
twisti@4101 1071 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup);
twisti@4101 1072 __ delayed()->nop();
twisti@1162 1073 __ jump_to(ic_miss, G3_scratch);
duke@435 1074 __ delayed()->nop();
duke@435 1075
duke@435 1076 }
duke@435 1077
duke@435 1078 address c2i_entry = __ pc();
duke@435 1079
twisti@4101 1080 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
duke@435 1081
duke@435 1082 __ flush();
never@1622 1083 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
duke@435 1084
duke@435 1085 }
duke@435 1086
duke@435 1087 // Helper function for native calling conventions
duke@435 1088 static VMReg int_stk_helper( int i ) {
duke@435 1089 // Bias any stack based VMReg we get by ignoring the window area
duke@435 1090 // but not the register parameter save area.
duke@435 1091 //
duke@435 1092 // This is strange for the following reasons. We'd normally expect
duke@435 1093 // the calling convention to return an VMReg for a stack slot
duke@435 1094 // completely ignoring any abi reserved area. C2 thinks of that
duke@435 1095 // abi area as only out_preserve_stack_slots. This does not include
duke@435 1096 // the area allocated by the C abi to store down integer arguments
duke@435 1097 // because the java calling convention does not use it. So
duke@435 1098 // since c2 assumes that there are only out_preserve_stack_slots
duke@435 1099 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
duke@435 1100 // location the c calling convention must add in this bias amount
duke@435 1101 // to make up for the fact that the out_preserve_stack_slots is
duke@435 1102 // insufficient for C calls. What a mess. I sure hope those 6
duke@435 1103 // stack words were worth it on every java call!
duke@435 1104
duke@435 1105 // Another way of cleaning this up would be for out_preserve_stack_slots
duke@435 1106 // to take a parameter to say whether it was C or java calling conventions.
duke@435 1107 // Then things might look a little better (but not much).
duke@435 1108
duke@435 1109 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
duke@435 1110 if( mem_parm_offset < 0 ) {
duke@435 1111 return as_oRegister(i)->as_VMReg();
duke@435 1112 } else {
duke@435 1113 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
duke@435 1114 // Now return a biased offset that will be correct when out_preserve_slots is added back in
duke@435 1115 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
duke@435 1116 }
duke@435 1117 }
duke@435 1118
duke@435 1119
duke@435 1120 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
duke@435 1121 VMRegPair *regs,
duke@435 1122 int total_args_passed) {
duke@435 1123
duke@435 1124 // Return the number of VMReg stack_slots needed for the args.
duke@435 1125 // This value does not include an abi space (like register window
duke@435 1126 // save area).
duke@435 1127
duke@435 1128 // The native convention is V8 if !LP64
duke@435 1129 // The LP64 convention is the V9 convention which is slightly more sane.
duke@435 1130
duke@435 1131 // We return the amount of VMReg stack slots we need to reserve for all
duke@435 1132 // the arguments NOT counting out_preserve_stack_slots. Since we always
duke@435 1133 // have space for storing at least 6 registers to memory we start with that.
duke@435 1134 // See int_stk_helper for a further discussion.
duke@435 1135 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
duke@435 1136
duke@435 1137 #ifdef _LP64
duke@435 1138 // V9 convention: All things "as-if" on double-wide stack slots.
duke@435 1139 // Hoist any int/ptr/long's in the first 6 to int regs.
duke@435 1140 // Hoist any flt/dbl's in the first 16 dbl regs.
duke@435 1141 int j = 0; // Count of actual args, not HALVES
duke@435 1142 for( int i=0; i<total_args_passed; i++, j++ ) {
duke@435 1143 switch( sig_bt[i] ) {
duke@435 1144 case T_BOOLEAN:
duke@435 1145 case T_BYTE:
duke@435 1146 case T_CHAR:
duke@435 1147 case T_INT:
duke@435 1148 case T_SHORT:
duke@435 1149 regs[i].set1( int_stk_helper( j ) ); break;
duke@435 1150 case T_LONG:
duke@435 1151 assert( sig_bt[i+1] == T_VOID, "expecting half" );
duke@435 1152 case T_ADDRESS: // raw pointers, like current thread, for VM calls
duke@435 1153 case T_ARRAY:
duke@435 1154 case T_OBJECT:
roland@4051 1155 case T_METADATA:
duke@435 1156 regs[i].set2( int_stk_helper( j ) );
duke@435 1157 break;
duke@435 1158 case T_FLOAT:
duke@435 1159 if ( j < 16 ) {
duke@435 1160 // V9ism: floats go in ODD registers
duke@435 1161 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
duke@435 1162 } else {
duke@435 1163 // V9ism: floats go in ODD stack slot
duke@435 1164 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
duke@435 1165 }
duke@435 1166 break;
duke@435 1167 case T_DOUBLE:
duke@435 1168 assert( sig_bt[i+1] == T_VOID, "expecting half" );
duke@435 1169 if ( j < 16 ) {
duke@435 1170 // V9ism: doubles go in EVEN/ODD regs
duke@435 1171 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
duke@435 1172 } else {
duke@435 1173 // V9ism: doubles go in EVEN/ODD stack slots
duke@435 1174 regs[i].set2(VMRegImpl::stack2reg(j<<1));
duke@435 1175 }
duke@435 1176 break;
duke@435 1177 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
duke@435 1178 default:
duke@435 1179 ShouldNotReachHere();
duke@435 1180 }
duke@435 1181 if (regs[i].first()->is_stack()) {
duke@435 1182 int off = regs[i].first()->reg2stack();
duke@435 1183 if (off > max_stack_slots) max_stack_slots = off;
duke@435 1184 }
duke@435 1185 if (regs[i].second()->is_stack()) {
duke@435 1186 int off = regs[i].second()->reg2stack();
duke@435 1187 if (off > max_stack_slots) max_stack_slots = off;
duke@435 1188 }
duke@435 1189 }
duke@435 1190
duke@435 1191 #else // _LP64
duke@435 1192 // V8 convention: first 6 things in O-regs, rest on stack.
duke@435 1193 // Alignment is willy-nilly.
duke@435 1194 for( int i=0; i<total_args_passed; i++ ) {
duke@435 1195 switch( sig_bt[i] ) {
duke@435 1196 case T_ADDRESS: // raw pointers, like current thread, for VM calls
duke@435 1197 case T_ARRAY:
duke@435 1198 case T_BOOLEAN:
duke@435 1199 case T_BYTE:
duke@435 1200 case T_CHAR:
duke@435 1201 case T_FLOAT:
duke@435 1202 case T_INT:
duke@435 1203 case T_OBJECT:
roland@4051 1204 case T_METADATA:
duke@435 1205 case T_SHORT:
duke@435 1206 regs[i].set1( int_stk_helper( i ) );
duke@435 1207 break;
duke@435 1208 case T_DOUBLE:
duke@435 1209 case T_LONG:
duke@435 1210 assert( sig_bt[i+1] == T_VOID, "expecting half" );
duke@435 1211 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
duke@435 1212 break;
duke@435 1213 case T_VOID: regs[i].set_bad(); break;
duke@435 1214 default:
duke@435 1215 ShouldNotReachHere();
duke@435 1216 }
duke@435 1217 if (regs[i].first()->is_stack()) {
duke@435 1218 int off = regs[i].first()->reg2stack();
duke@435 1219 if (off > max_stack_slots) max_stack_slots = off;
duke@435 1220 }
duke@435 1221 if (regs[i].second()->is_stack()) {
duke@435 1222 int off = regs[i].second()->reg2stack();
duke@435 1223 if (off > max_stack_slots) max_stack_slots = off;
duke@435 1224 }
duke@435 1225 }
duke@435 1226 #endif // _LP64
duke@435 1227
duke@435 1228 return round_to(max_stack_slots + 1, 2);
duke@435 1229
duke@435 1230 }
duke@435 1231
duke@435 1232
duke@435 1233 // ---------------------------------------------------------------------------
duke@435 1234 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
duke@435 1235 switch (ret_type) {
duke@435 1236 case T_FLOAT:
duke@435 1237 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
duke@435 1238 break;
duke@435 1239 case T_DOUBLE:
duke@435 1240 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
duke@435 1241 break;
duke@435 1242 }
duke@435 1243 }
duke@435 1244
duke@435 1245 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
duke@435 1246 switch (ret_type) {
duke@435 1247 case T_FLOAT:
duke@435 1248 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
duke@435 1249 break;
duke@435 1250 case T_DOUBLE:
duke@435 1251 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
duke@435 1252 break;
duke@435 1253 }
duke@435 1254 }
duke@435 1255
duke@435 1256 // Check and forward and pending exception. Thread is stored in
duke@435 1257 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
duke@435 1258 // is no exception handler. We merely pop this frame off and throw the
duke@435 1259 // exception in the caller's frame.
duke@435 1260 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
duke@435 1261 Label L;
duke@435 1262 __ br_null(Rex_oop, false, Assembler::pt, L);
duke@435 1263 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
duke@435 1264 // Since this is a native call, we *know* the proper exception handler
duke@435 1265 // without calling into the VM: it's the empty function. Just pop this
duke@435 1266 // frame and then jump to forward_exception_entry; O7 will contain the
duke@435 1267 // native caller's return PC.
twisti@1162 1268 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
twisti@1162 1269 __ jump_to(exception_entry, G3_scratch);
duke@435 1270 __ delayed()->restore(); // Pop this frame off.
duke@435 1271 __ bind(L);
duke@435 1272 }
duke@435 1273
duke@435 1274 // A simple move of integer like type
duke@435 1275 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@435 1276 if (src.first()->is_stack()) {
duke@435 1277 if (dst.first()->is_stack()) {
duke@435 1278 // stack to stack
duke@435 1279 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@435 1280 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1281 } else {
duke@435 1282 // stack to reg
duke@435 1283 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@435 1284 }
duke@435 1285 } else if (dst.first()->is_stack()) {
duke@435 1286 // reg to stack
duke@435 1287 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1288 } else {
duke@435 1289 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@435 1290 }
duke@435 1291 }
duke@435 1292
duke@435 1293 // On 64 bit we will store integer like items to the stack as
duke@435 1294 // 64 bits items (sparc abi) even though java would only store
duke@435 1295 // 32bits for a parameter. On 32bit it will simply be 32 bits
duke@435 1296 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
duke@435 1297 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@435 1298 if (src.first()->is_stack()) {
duke@435 1299 if (dst.first()->is_stack()) {
duke@435 1300 // stack to stack
duke@435 1301 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@435 1302 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1303 } else {
duke@435 1304 // stack to reg
duke@435 1305 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@435 1306 }
duke@435 1307 } else if (dst.first()->is_stack()) {
duke@435 1308 // reg to stack
duke@435 1309 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1310 } else {
duke@435 1311 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@435 1312 }
duke@435 1313 }
duke@435 1314
duke@435 1315
never@3500 1316 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
never@3500 1317 if (src.first()->is_stack()) {
never@3500 1318 if (dst.first()->is_stack()) {
never@3500 1319 // stack to stack
never@3500 1320 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
never@3500 1321 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
never@3500 1322 } else {
never@3500 1323 // stack to reg
never@3500 1324 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
never@3500 1325 }
never@3500 1326 } else if (dst.first()->is_stack()) {
never@3500 1327 // reg to stack
never@3500 1328 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
never@3500 1329 } else {
never@3500 1330 __ mov(src.first()->as_Register(), dst.first()->as_Register());
never@3500 1331 }
never@3500 1332 }
never@3500 1333
never@3500 1334
duke@435 1335 // An oop arg. Must pass a handle not the oop itself
duke@435 1336 static void object_move(MacroAssembler* masm,
duke@435 1337 OopMap* map,
duke@435 1338 int oop_handle_offset,
duke@435 1339 int framesize_in_slots,
duke@435 1340 VMRegPair src,
duke@435 1341 VMRegPair dst,
duke@435 1342 bool is_receiver,
duke@435 1343 int* receiver_offset) {
duke@435 1344
duke@435 1345 // must pass a handle. First figure out the location we use as a handle
duke@435 1346
duke@435 1347 if (src.first()->is_stack()) {
duke@435 1348 // Oop is already on the stack
duke@435 1349 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
duke@435 1350 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
duke@435 1351 __ ld_ptr(rHandle, 0, L4);
duke@435 1352 #ifdef _LP64
duke@435 1353 __ movr( Assembler::rc_z, L4, G0, rHandle );
duke@435 1354 #else
duke@435 1355 __ tst( L4 );
duke@435 1356 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
duke@435 1357 #endif
duke@435 1358 if (dst.first()->is_stack()) {
duke@435 1359 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1360 }
duke@435 1361 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
duke@435 1362 if (is_receiver) {
duke@435 1363 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
duke@435 1364 }
duke@435 1365 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
duke@435 1366 } else {
duke@435 1367 // Oop is in an input register pass we must flush it to the stack
duke@435 1368 const Register rOop = src.first()->as_Register();
duke@435 1369 const Register rHandle = L5;
duke@435 1370 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
duke@435 1371 int offset = oop_slot*VMRegImpl::stack_slot_size;
duke@435 1372 Label skip;
duke@435 1373 __ st_ptr(rOop, SP, offset + STACK_BIAS);
duke@435 1374 if (is_receiver) {
duke@435 1375 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
duke@435 1376 }
duke@435 1377 map->set_oop(VMRegImpl::stack2reg(oop_slot));
duke@435 1378 __ add(SP, offset + STACK_BIAS, rHandle);
duke@435 1379 #ifdef _LP64
duke@435 1380 __ movr( Assembler::rc_z, rOop, G0, rHandle );
duke@435 1381 #else
duke@435 1382 __ tst( rOop );
duke@435 1383 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
duke@435 1384 #endif
duke@435 1385
duke@435 1386 if (dst.first()->is_stack()) {
duke@435 1387 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1388 } else {
duke@435 1389 __ mov(rHandle, dst.first()->as_Register());
duke@435 1390 }
duke@435 1391 }
duke@435 1392 }
duke@435 1393
duke@435 1394 // A float arg may have to do float reg int reg conversion
duke@435 1395 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@435 1396 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
duke@435 1397
duke@435 1398 if (src.first()->is_stack()) {
duke@435 1399 if (dst.first()->is_stack()) {
duke@435 1400 // stack to stack the easiest of the bunch
duke@435 1401 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@435 1402 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1403 } else {
duke@435 1404 // stack to reg
duke@435 1405 if (dst.first()->is_Register()) {
duke@435 1406 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@435 1407 } else {
duke@435 1408 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
duke@435 1409 }
duke@435 1410 }
duke@435 1411 } else if (dst.first()->is_stack()) {
duke@435 1412 // reg to stack
duke@435 1413 if (src.first()->is_Register()) {
duke@435 1414 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1415 } else {
duke@435 1416 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1417 }
duke@435 1418 } else {
duke@435 1419 // reg to reg
duke@435 1420 if (src.first()->is_Register()) {
duke@435 1421 if (dst.first()->is_Register()) {
duke@435 1422 // gpr -> gpr
duke@435 1423 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@435 1424 } else {
duke@435 1425 // gpr -> fpr
duke@435 1426 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
duke@435 1427 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
duke@435 1428 }
duke@435 1429 } else if (dst.first()->is_Register()) {
duke@435 1430 // fpr -> gpr
duke@435 1431 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
duke@435 1432 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
duke@435 1433 } else {
duke@435 1434 // fpr -> fpr
duke@435 1435 // In theory these overlap but the ordering is such that this is likely a nop
duke@435 1436 if ( src.first() != dst.first()) {
duke@435 1437 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
duke@435 1438 }
duke@435 1439 }
duke@435 1440 }
duke@435 1441 }
duke@435 1442
duke@435 1443 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@435 1444 VMRegPair src_lo(src.first());
duke@435 1445 VMRegPair src_hi(src.second());
duke@435 1446 VMRegPair dst_lo(dst.first());
duke@435 1447 VMRegPair dst_hi(dst.second());
duke@435 1448 simple_move32(masm, src_lo, dst_lo);
duke@435 1449 simple_move32(masm, src_hi, dst_hi);
duke@435 1450 }
duke@435 1451
duke@435 1452 // A long move
duke@435 1453 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@435 1454
duke@435 1455 // Do the simple ones here else do two int moves
duke@435 1456 if (src.is_single_phys_reg() ) {
duke@435 1457 if (dst.is_single_phys_reg()) {
duke@435 1458 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@435 1459 } else {
duke@435 1460 // split src into two separate registers
duke@435 1461 // Remember hi means hi address or lsw on sparc
duke@435 1462 // Move msw to lsw
duke@435 1463 if (dst.second()->is_reg()) {
duke@435 1464 // MSW -> MSW
duke@435 1465 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
duke@435 1466 // Now LSW -> LSW
duke@435 1467 // this will only move lo -> lo and ignore hi
duke@435 1468 VMRegPair split(dst.second());
duke@435 1469 simple_move32(masm, src, split);
duke@435 1470 } else {
duke@435 1471 VMRegPair split(src.first(), L4->as_VMReg());
duke@435 1472 // MSW -> MSW (lo ie. first word)
duke@435 1473 __ srax(src.first()->as_Register(), 32, L4);
duke@435 1474 split_long_move(masm, split, dst);
duke@435 1475 }
duke@435 1476 }
duke@435 1477 } else if (dst.is_single_phys_reg()) {
duke@435 1478 if (src.is_adjacent_aligned_on_stack(2)) {
never@739 1479 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@435 1480 } else {
duke@435 1481 // dst is a single reg.
duke@435 1482 // Remember lo is low address not msb for stack slots
duke@435 1483 // and lo is the "real" register for registers
duke@435 1484 // src is
duke@435 1485
duke@435 1486 VMRegPair split;
duke@435 1487
duke@435 1488 if (src.first()->is_reg()) {
duke@435 1489 // src.lo (msw) is a reg, src.hi is stk/reg
duke@435 1490 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
duke@435 1491 split.set_pair(dst.first(), src.first());
duke@435 1492 } else {
duke@435 1493 // msw is stack move to L5
duke@435 1494 // lsw is stack move to dst.lo (real reg)
duke@435 1495 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
duke@435 1496 split.set_pair(dst.first(), L5->as_VMReg());
duke@435 1497 }
duke@435 1498
duke@435 1499 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
duke@435 1500 // msw -> src.lo/L5, lsw -> dst.lo
duke@435 1501 split_long_move(masm, src, split);
duke@435 1502
duke@435 1503 // So dst now has the low order correct position the
duke@435 1504 // msw half
duke@435 1505 __ sllx(split.first()->as_Register(), 32, L5);
duke@435 1506
duke@435 1507 const Register d = dst.first()->as_Register();
duke@435 1508 __ or3(L5, d, d);
duke@435 1509 }
duke@435 1510 } else {
duke@435 1511 // For LP64 we can probably do better.
duke@435 1512 split_long_move(masm, src, dst);
duke@435 1513 }
duke@435 1514 }
duke@435 1515
duke@435 1516 // A double move
duke@435 1517 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@435 1518
duke@435 1519 // The painful thing here is that like long_move a VMRegPair might be
duke@435 1520 // 1: a single physical register
duke@435 1521 // 2: two physical registers (v8)
duke@435 1522 // 3: a physical reg [lo] and a stack slot [hi] (v8)
duke@435 1523 // 4: two stack slots
duke@435 1524
duke@435 1525 // Since src is always a java calling convention we know that the src pair
duke@435 1526 // is always either all registers or all stack (and aligned?)
duke@435 1527
duke@435 1528 // in a register [lo] and a stack slot [hi]
duke@435 1529 if (src.first()->is_stack()) {
duke@435 1530 if (dst.first()->is_stack()) {
duke@435 1531 // stack to stack the easiest of the bunch
duke@435 1532 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
duke@435 1533 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@435 1534 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
duke@435 1535 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1536 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@435 1537 } else {
duke@435 1538 // stack to reg
duke@435 1539 if (dst.second()->is_stack()) {
duke@435 1540 // stack -> reg, stack -> stack
duke@435 1541 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
duke@435 1542 if (dst.first()->is_Register()) {
duke@435 1543 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@435 1544 } else {
duke@435 1545 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
duke@435 1546 }
duke@435 1547 // This was missing. (very rare case)
duke@435 1548 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@435 1549 } else {
duke@435 1550 // stack -> reg
duke@435 1551 // Eventually optimize for alignment QQQ
duke@435 1552 if (dst.first()->is_Register()) {
duke@435 1553 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@435 1554 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
duke@435 1555 } else {
duke@435 1556 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
duke@435 1557 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
duke@435 1558 }
duke@435 1559 }
duke@435 1560 }
duke@435 1561 } else if (dst.first()->is_stack()) {
duke@435 1562 // reg to stack
duke@435 1563 if (src.first()->is_Register()) {
duke@435 1564 // Eventually optimize for alignment QQQ
duke@435 1565 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1566 if (src.second()->is_stack()) {
duke@435 1567 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
duke@435 1568 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@435 1569 } else {
duke@435 1570 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
duke@435 1571 }
duke@435 1572 } else {
duke@435 1573 // fpr to stack
duke@435 1574 if (src.second()->is_stack()) {
duke@435 1575 ShouldNotReachHere();
duke@435 1576 } else {
duke@435 1577 // Is the stack aligned?
duke@435 1578 if (reg2offset(dst.first()) & 0x7) {
duke@435 1579 // No do as pairs
duke@435 1580 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1581 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
duke@435 1582 } else {
duke@435 1583 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@435 1584 }
duke@435 1585 }
duke@435 1586 }
duke@435 1587 } else {
duke@435 1588 // reg to reg
duke@435 1589 if (src.first()->is_Register()) {
duke@435 1590 if (dst.first()->is_Register()) {
duke@435 1591 // gpr -> gpr
duke@435 1592 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@435 1593 __ mov(src.second()->as_Register(), dst.second()->as_Register());
duke@435 1594 } else {
duke@435 1595 // gpr -> fpr
duke@435 1596 // ought to be able to do a single store
duke@435 1597 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
duke@435 1598 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
duke@435 1599 // ought to be able to do a single load
duke@435 1600 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
duke@435 1601 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
duke@435 1602 }
duke@435 1603 } else if (dst.first()->is_Register()) {
duke@435 1604 // fpr -> gpr
duke@435 1605 // ought to be able to do a single store
duke@435 1606 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
duke@435 1607 // ought to be able to do a single load
duke@435 1608 // REMEMBER first() is low address not LSB
duke@435 1609 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
duke@435 1610 if (dst.second()->is_Register()) {
duke@435 1611 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
duke@435 1612 } else {
duke@435 1613 __ ld(FP, -4 + STACK_BIAS, L4);
duke@435 1614 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@435 1615 }
duke@435 1616 } else {
duke@435 1617 // fpr -> fpr
duke@435 1618 // In theory these overlap but the ordering is such that this is likely a nop
duke@435 1619 if ( src.first() != dst.first()) {
duke@435 1620 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
duke@435 1621 }
duke@435 1622 }
duke@435 1623 }
duke@435 1624 }
duke@435 1625
duke@435 1626 // Creates an inner frame if one hasn't already been created, and
duke@435 1627 // saves a copy of the thread in L7_thread_cache
duke@435 1628 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
duke@435 1629 if (!*already_created) {
duke@435 1630 __ save_frame(0);
duke@435 1631 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
duke@435 1632 // Don't use save_thread because it smashes G2 and we merely want to save a
duke@435 1633 // copy
duke@435 1634 __ mov(G2_thread, L7_thread_cache);
duke@435 1635 *already_created = true;
duke@435 1636 }
duke@435 1637 }
duke@435 1638
never@3500 1639
never@3500 1640 static void save_or_restore_arguments(MacroAssembler* masm,
never@3500 1641 const int stack_slots,
never@3500 1642 const int total_in_args,
never@3500 1643 const int arg_save_area,
never@3500 1644 OopMap* map,
never@3500 1645 VMRegPair* in_regs,
never@3500 1646 BasicType* in_sig_bt) {
never@3500 1647 // if map is non-NULL then the code should store the values,
never@3500 1648 // otherwise it should load them.
never@3500 1649 if (map != NULL) {
never@3500 1650 // Fill in the map
never@3500 1651 for (int i = 0; i < total_in_args; i++) {
never@3500 1652 if (in_sig_bt[i] == T_ARRAY) {
never@3500 1653 if (in_regs[i].first()->is_stack()) {
never@3500 1654 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
never@3500 1655 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
never@3500 1656 } else if (in_regs[i].first()->is_Register()) {
never@3500 1657 map->set_oop(in_regs[i].first());
never@3500 1658 } else {
never@3500 1659 ShouldNotReachHere();
never@3500 1660 }
never@3500 1661 }
never@3500 1662 }
never@3500 1663 }
never@3500 1664
never@3500 1665 // Save or restore double word values
never@3500 1666 int handle_index = 0;
never@3500 1667 for (int i = 0; i < total_in_args; i++) {
never@3500 1668 int slot = handle_index + arg_save_area;
never@3500 1669 int offset = slot * VMRegImpl::stack_slot_size;
never@3500 1670 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
never@3500 1671 const Register reg = in_regs[i].first()->as_Register();
never@3500 1672 if (reg->is_global()) {
never@3500 1673 handle_index += 2;
never@3500 1674 assert(handle_index <= stack_slots, "overflow");
never@3500 1675 if (map != NULL) {
never@3500 1676 __ stx(reg, SP, offset + STACK_BIAS);
never@3500 1677 } else {
never@3500 1678 __ ldx(SP, offset + STACK_BIAS, reg);
never@3500 1679 }
never@3500 1680 }
never@3500 1681 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
never@3500 1682 handle_index += 2;
never@3500 1683 assert(handle_index <= stack_slots, "overflow");
never@3500 1684 if (map != NULL) {
never@3500 1685 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
never@3500 1686 } else {
never@3500 1687 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
never@3500 1688 }
never@3500 1689 }
never@3500 1690 }
never@3500 1691 // Save floats
never@3500 1692 for (int i = 0; i < total_in_args; i++) {
never@3500 1693 int slot = handle_index + arg_save_area;
never@3500 1694 int offset = slot * VMRegImpl::stack_slot_size;
never@3500 1695 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
never@3500 1696 handle_index++;
never@3500 1697 assert(handle_index <= stack_slots, "overflow");
never@3500 1698 if (map != NULL) {
never@3500 1699 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
never@3500 1700 } else {
never@3500 1701 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
never@3500 1702 }
never@3500 1703 }
never@3500 1704 }
never@3500 1705
never@3500 1706 }
never@3500 1707
never@3500 1708
never@3500 1709 // Check GC_locker::needs_gc and enter the runtime if it's true. This
never@3500 1710 // keeps a new JNI critical region from starting until a GC has been
never@3500 1711 // forced. Save down any oops in registers and describe them in an
never@3500 1712 // OopMap.
never@3500 1713 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
never@3500 1714 const int stack_slots,
never@3500 1715 const int total_in_args,
never@3500 1716 const int arg_save_area,
never@3500 1717 OopMapSet* oop_maps,
never@3500 1718 VMRegPair* in_regs,
never@3500 1719 BasicType* in_sig_bt) {
never@3500 1720 __ block_comment("check GC_locker::needs_gc");
never@3500 1721 Label cont;
never@3500 1722 AddressLiteral sync_state(GC_locker::needs_gc_address());
never@3500 1723 __ load_bool_contents(sync_state, G3_scratch);
never@3500 1724 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
never@3500 1725 __ delayed()->nop();
never@3500 1726
never@3500 1727 // Save down any values that are live in registers and call into the
never@3500 1728 // runtime to halt for a GC
never@3500 1729 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
never@3500 1730 save_or_restore_arguments(masm, stack_slots, total_in_args,
never@3500 1731 arg_save_area, map, in_regs, in_sig_bt);
never@3500 1732
never@3500 1733 __ mov(G2_thread, L7_thread_cache);
never@3500 1734
never@3500 1735 __ set_last_Java_frame(SP, noreg);
never@3500 1736
never@3500 1737 __ block_comment("block_for_jni_critical");
never@3500 1738 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
never@3500 1739 __ delayed()->mov(L7_thread_cache, O0);
never@3500 1740 oop_maps->add_gc_map( __ offset(), map);
never@3500 1741
never@3500 1742 __ restore_thread(L7_thread_cache); // restore G2_thread
never@3500 1743 __ reset_last_Java_frame();
never@3500 1744
never@3500 1745 // Reload all the register arguments
never@3500 1746 save_or_restore_arguments(masm, stack_slots, total_in_args,
never@3500 1747 arg_save_area, NULL, in_regs, in_sig_bt);
never@3500 1748
never@3500 1749 __ bind(cont);
never@3500 1750 #ifdef ASSERT
never@3500 1751 if (StressCriticalJNINatives) {
never@3500 1752 // Stress register saving
never@3500 1753 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
never@3500 1754 save_or_restore_arguments(masm, stack_slots, total_in_args,
never@3500 1755 arg_save_area, map, in_regs, in_sig_bt);
never@3500 1756 // Destroy argument registers
never@3500 1757 for (int i = 0; i < total_in_args; i++) {
never@3500 1758 if (in_regs[i].first()->is_Register()) {
never@3500 1759 const Register reg = in_regs[i].first()->as_Register();
never@3500 1760 if (reg->is_global()) {
never@3500 1761 __ mov(G0, reg);
never@3500 1762 }
never@3500 1763 } else if (in_regs[i].first()->is_FloatRegister()) {
never@3500 1764 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
never@3500 1765 }
never@3500 1766 }
never@3500 1767
never@3500 1768 save_or_restore_arguments(masm, stack_slots, total_in_args,
never@3500 1769 arg_save_area, NULL, in_regs, in_sig_bt);
never@3500 1770 }
never@3500 1771 #endif
never@3500 1772 }
never@3500 1773
never@3500 1774 // Unpack an array argument into a pointer to the body and the length
never@3500 1775 // if the array is non-null, otherwise pass 0 for both.
never@3500 1776 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
never@3500 1777 // Pass the length, ptr pair
never@3500 1778 Label is_null, done;
never@3500 1779 if (reg.first()->is_stack()) {
never@3500 1780 VMRegPair tmp = reg64_to_VMRegPair(L2);
never@3500 1781 // Load the arg up from the stack
never@3500 1782 move_ptr(masm, reg, tmp);
never@3500 1783 reg = tmp;
never@3500 1784 }
never@3500 1785 __ cmp(reg.first()->as_Register(), G0);
never@3500 1786 __ brx(Assembler::equal, false, Assembler::pt, is_null);
never@3500 1787 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
never@3500 1788 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
never@3500 1789 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
never@3500 1790 move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
never@3500 1791 __ ba_short(done);
never@3500 1792 __ bind(is_null);
never@3500 1793 // Pass zeros
never@3500 1794 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
never@3500 1795 move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
never@3500 1796 __ bind(done);
never@3500 1797 }
never@3500 1798
twisti@3969 1799 static void verify_oop_args(MacroAssembler* masm,
twisti@4101 1800 methodHandle method,
twisti@3969 1801 const BasicType* sig_bt,
twisti@3969 1802 const VMRegPair* regs) {
twisti@3969 1803 Register temp_reg = G5_method; // not part of any compiled calling seq
twisti@3969 1804 if (VerifyOops) {
twisti@4101 1805 for (int i = 0; i < method->size_of_parameters(); i++) {
twisti@3969 1806 if (sig_bt[i] == T_OBJECT ||
twisti@3969 1807 sig_bt[i] == T_ARRAY) {
twisti@3969 1808 VMReg r = regs[i].first();
twisti@3969 1809 assert(r->is_valid(), "bad oop arg");
twisti@3969 1810 if (r->is_stack()) {
twisti@3969 1811 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
twisti@3969 1812 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
twisti@3969 1813 __ ld_ptr(SP, ld_off, temp_reg);
twisti@3969 1814 __ verify_oop(temp_reg);
twisti@3969 1815 } else {
twisti@3969 1816 __ verify_oop(r->as_Register());
twisti@3969 1817 }
twisti@3969 1818 }
twisti@3969 1819 }
twisti@3969 1820 }
twisti@3969 1821 }
twisti@3969 1822
twisti@3969 1823 static void gen_special_dispatch(MacroAssembler* masm,
twisti@4101 1824 methodHandle method,
twisti@3969 1825 const BasicType* sig_bt,
twisti@3969 1826 const VMRegPair* regs) {
twisti@4101 1827 verify_oop_args(masm, method, sig_bt, regs);
twisti@4101 1828 vmIntrinsics::ID iid = method->intrinsic_id();
twisti@3969 1829
twisti@3969 1830 // Now write the args into the outgoing interpreter space
twisti@3969 1831 bool has_receiver = false;
twisti@3969 1832 Register receiver_reg = noreg;
twisti@3969 1833 int member_arg_pos = -1;
twisti@3969 1834 Register member_reg = noreg;
twisti@4101 1835 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
twisti@3969 1836 if (ref_kind != 0) {
twisti@4101 1837 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
twisti@3969 1838 member_reg = G5_method; // known to be free at this point
twisti@3969 1839 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
twisti@4101 1840 } else if (iid == vmIntrinsics::_invokeBasic) {
twisti@3969 1841 has_receiver = true;
twisti@3969 1842 } else {
twisti@4101 1843 fatal(err_msg_res("unexpected intrinsic id %d", iid));
twisti@3969 1844 }
twisti@3969 1845
twisti@3969 1846 if (member_reg != noreg) {
twisti@3969 1847 // Load the member_arg into register, if necessary.
twisti@4101 1848 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
twisti@3969 1849 VMReg r = regs[member_arg_pos].first();
twisti@3969 1850 if (r->is_stack()) {
twisti@3969 1851 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
twisti@3969 1852 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
twisti@3969 1853 __ ld_ptr(SP, ld_off, member_reg);
twisti@3969 1854 } else {
twisti@3969 1855 // no data motion is needed
twisti@3969 1856 member_reg = r->as_Register();
twisti@3969 1857 }
twisti@3969 1858 }
twisti@3969 1859
twisti@3969 1860 if (has_receiver) {
twisti@3969 1861 // Make sure the receiver is loaded into a register.
twisti@4101 1862 assert(method->size_of_parameters() > 0, "oob");
twisti@3969 1863 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
twisti@3969 1864 VMReg r = regs[0].first();
twisti@3969 1865 assert(r->is_valid(), "bad receiver arg");
twisti@3969 1866 if (r->is_stack()) {
twisti@3969 1867 // Porting note: This assumes that compiled calling conventions always
twisti@3969 1868 // pass the receiver oop in a register. If this is not true on some
twisti@3969 1869 // platform, pick a temp and load the receiver from stack.
twisti@4101 1870 fatal("receiver always in a register");
twisti@3969 1871 receiver_reg = G3_scratch; // known to be free at this point
twisti@3969 1872 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
twisti@3969 1873 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
twisti@3969 1874 __ ld_ptr(SP, ld_off, receiver_reg);
twisti@3969 1875 } else {
twisti@3969 1876 // no data motion is needed
twisti@3969 1877 receiver_reg = r->as_Register();
twisti@3969 1878 }
twisti@3969 1879 }
twisti@3969 1880
twisti@3969 1881 // Figure out which address we are really jumping to:
twisti@4101 1882 MethodHandles::generate_method_handle_dispatch(masm, iid,
twisti@3969 1883 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
twisti@3969 1884 }
twisti@3969 1885
duke@435 1886 // ---------------------------------------------------------------------------
duke@435 1887 // Generate a native wrapper for a given method. The method takes arguments
duke@435 1888 // in the Java compiled code convention, marshals them to the native
duke@435 1889 // convention (handlizes oops, etc), transitions to native, makes the call,
duke@435 1890 // returns to java state (possibly blocking), unhandlizes any result and
duke@435 1891 // returns.
twisti@3969 1892 //
twisti@3969 1893 // Critical native functions are a shorthand for the use of
twisti@3969 1894 // GetPrimtiveArrayCritical and disallow the use of any other JNI
twisti@3969 1895 // functions. The wrapper is expected to unpack the arguments before
twisti@3969 1896 // passing them to the callee and perform checks before and after the
twisti@3969 1897 // native call to ensure that they GC_locker
twisti@3969 1898 // lock_critical/unlock_critical semantics are followed. Some other
twisti@3969 1899 // parts of JNI setup are skipped like the tear down of the JNI handle
twisti@3969 1900 // block and the check for pending exceptions it's impossible for them
twisti@3969 1901 // to be thrown.
twisti@3969 1902 //
twisti@3969 1903 // They are roughly structured like this:
twisti@3969 1904 // if (GC_locker::needs_gc())
twisti@3969 1905 // SharedRuntime::block_for_jni_critical();
twisti@3969 1906 // tranistion to thread_in_native
twisti@3969 1907 // unpack arrray arguments and call native entry point
twisti@3969 1908 // check for safepoint in progress
twisti@3969 1909 // check if any thread suspend flags are set
twisti@3969 1910 // call into JVM and possible unlock the JNI critical
twisti@3969 1911 // if a GC was suppressed while in the critical native.
twisti@3969 1912 // transition back to thread_in_Java
twisti@3969 1913 // return to caller
twisti@3969 1914 //
twisti@4101 1915 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
duke@435 1916 methodHandle method,
twisti@2687 1917 int compile_id,
twisti@3969 1918 BasicType* in_sig_bt,
twisti@3969 1919 VMRegPair* in_regs,
duke@435 1920 BasicType ret_type) {
twisti@3969 1921 if (method->is_method_handle_intrinsic()) {
twisti@3969 1922 vmIntrinsics::ID iid = method->intrinsic_id();
twisti@3969 1923 intptr_t start = (intptr_t)__ pc();
twisti@3969 1924 int vep_offset = ((intptr_t)__ pc()) - start;
twisti@3969 1925 gen_special_dispatch(masm,
twisti@4101 1926 method,
twisti@3969 1927 in_sig_bt,
twisti@3969 1928 in_regs);
twisti@3969 1929 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
twisti@3969 1930 __ flush();
twisti@3969 1931 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
twisti@3969 1932 return nmethod::new_native_nmethod(method,
twisti@3969 1933 compile_id,
twisti@3969 1934 masm->code(),
twisti@3969 1935 vep_offset,
twisti@3969 1936 frame_complete,
twisti@3969 1937 stack_slots / VMRegImpl::slots_per_word,
twisti@3969 1938 in_ByteSize(-1),
twisti@3969 1939 in_ByteSize(-1),
twisti@3969 1940 (OopMapSet*)NULL);
twisti@3969 1941 }
never@3500 1942 bool is_critical_native = true;
never@3500 1943 address native_func = method->critical_native_function();
never@3500 1944 if (native_func == NULL) {
never@3500 1945 native_func = method->native_function();
never@3500 1946 is_critical_native = false;
never@3500 1947 }
never@3500 1948 assert(native_func != NULL, "must have function");
duke@435 1949
duke@435 1950 // Native nmethod wrappers never take possesion of the oop arguments.
duke@435 1951 // So the caller will gc the arguments. The only thing we need an
duke@435 1952 // oopMap for is if the call is static
duke@435 1953 //
duke@435 1954 // An OopMap for lock (and class if static), and one for the VM call itself
duke@435 1955 OopMapSet *oop_maps = new OopMapSet();
duke@435 1956 intptr_t start = (intptr_t)__ pc();
duke@435 1957
duke@435 1958 // First thing make an ic check to see if we should even be here
duke@435 1959 {
duke@435 1960 Label L;
duke@435 1961 const Register temp_reg = G3_scratch;
twisti@1162 1962 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
duke@435 1963 __ verify_oop(O0);
coleenp@548 1964 __ load_klass(O0, temp_reg);
kvn@3037 1965 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
duke@435 1966
twisti@1162 1967 __ jump_to(ic_miss, temp_reg);
duke@435 1968 __ delayed()->nop();
duke@435 1969 __ align(CodeEntryAlignment);
duke@435 1970 __ bind(L);
duke@435 1971 }
duke@435 1972
duke@435 1973 int vep_offset = ((intptr_t)__ pc()) - start;
duke@435 1974
duke@435 1975 #ifdef COMPILER1
duke@435 1976 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
duke@435 1977 // Object.hashCode can pull the hashCode from the header word
duke@435 1978 // instead of doing a full VM transition once it's been computed.
duke@435 1979 // Since hashCode is usually polymorphic at call sites we can't do
duke@435 1980 // this optimization at the call site without a lot of work.
duke@435 1981 Label slowCase;
duke@435 1982 Register receiver = O0;
duke@435 1983 Register result = O0;
duke@435 1984 Register header = G3_scratch;
duke@435 1985 Register hash = G3_scratch; // overwrite header value with hash value
duke@435 1986 Register mask = G1; // to get hash field from header
duke@435 1987
duke@435 1988 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
duke@435 1989 // We depend on hash_mask being at most 32 bits and avoid the use of
duke@435 1990 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
duke@435 1991 // vm: see markOop.hpp.
duke@435 1992 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
duke@435 1993 __ sethi(markOopDesc::hash_mask, mask);
duke@435 1994 __ btst(markOopDesc::unlocked_value, header);
duke@435 1995 __ br(Assembler::zero, false, Assembler::pn, slowCase);
duke@435 1996 if (UseBiasedLocking) {
duke@435 1997 // Check if biased and fall through to runtime if so
duke@435 1998 __ delayed()->nop();
duke@435 1999 __ btst(markOopDesc::biased_lock_bit_in_place, header);
duke@435 2000 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
duke@435 2001 }
duke@435 2002 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
duke@435 2003
duke@435 2004 // Check for a valid (non-zero) hash code and get its value.
duke@435 2005 #ifdef _LP64
duke@435 2006 __ srlx(header, markOopDesc::hash_shift, hash);
duke@435 2007 #else
duke@435 2008 __ srl(header, markOopDesc::hash_shift, hash);
duke@435 2009 #endif
duke@435 2010 __ andcc(hash, mask, hash);
duke@435 2011 __ br(Assembler::equal, false, Assembler::pn, slowCase);
duke@435 2012 __ delayed()->nop();
duke@435 2013
duke@435 2014 // leaf return.
duke@435 2015 __ retl();
duke@435 2016 __ delayed()->mov(hash, result);
duke@435 2017 __ bind(slowCase);
duke@435 2018 }
duke@435 2019 #endif // COMPILER1
duke@435 2020
duke@435 2021
duke@435 2022 // We have received a description of where all the java arg are located
duke@435 2023 // on entry to the wrapper. We need to convert these args to where
duke@435 2024 // the jni function will expect them. To figure out where they go
duke@435 2025 // we convert the java signature to a C signature by inserting
duke@435 2026 // the hidden arguments as arg[0] and possibly arg[1] (static method)
duke@435 2027
twisti@4101 2028 const int total_in_args = method->size_of_parameters();
never@3500 2029 int total_c_args = total_in_args;
never@3500 2030 int total_save_slots = 6 * VMRegImpl::slots_per_word;
never@3500 2031 if (!is_critical_native) {
never@3500 2032 total_c_args += 1;
never@3500 2033 if (method->is_static()) {
never@3500 2034 total_c_args++;
never@3500 2035 }
never@3500 2036 } else {
never@3500 2037 for (int i = 0; i < total_in_args; i++) {
never@3500 2038 if (in_sig_bt[i] == T_ARRAY) {
never@3500 2039 // These have to be saved and restored across the safepoint
never@3500 2040 total_c_args++;
never@3500 2041 }
never@3500 2042 }
duke@435 2043 }
duke@435 2044
duke@435 2045 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
never@3500 2046 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
never@3500 2047 BasicType* in_elem_bt = NULL;
duke@435 2048
duke@435 2049 int argc = 0;
never@3500 2050 if (!is_critical_native) {
never@3500 2051 out_sig_bt[argc++] = T_ADDRESS;
never@3500 2052 if (method->is_static()) {
never@3500 2053 out_sig_bt[argc++] = T_OBJECT;
never@3500 2054 }
never@3500 2055
never@3500 2056 for (int i = 0; i < total_in_args ; i++ ) {
never@3500 2057 out_sig_bt[argc++] = in_sig_bt[i];
never@3500 2058 }
never@3500 2059 } else {
never@3500 2060 Thread* THREAD = Thread::current();
never@3500 2061 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
never@3500 2062 SignatureStream ss(method->signature());
never@3500 2063 for (int i = 0; i < total_in_args ; i++ ) {
never@3500 2064 if (in_sig_bt[i] == T_ARRAY) {
never@3500 2065 // Arrays are passed as int, elem* pair
never@3500 2066 out_sig_bt[argc++] = T_INT;
never@3500 2067 out_sig_bt[argc++] = T_ADDRESS;
never@3500 2068 Symbol* atype = ss.as_symbol(CHECK_NULL);
never@3500 2069 const char* at = atype->as_C_string();
never@3500 2070 if (strlen(at) == 2) {
never@3500 2071 assert(at[0] == '[', "must be");
never@3500 2072 switch (at[1]) {
never@3500 2073 case 'B': in_elem_bt[i] = T_BYTE; break;
never@3500 2074 case 'C': in_elem_bt[i] = T_CHAR; break;
never@3500 2075 case 'D': in_elem_bt[i] = T_DOUBLE; break;
never@3500 2076 case 'F': in_elem_bt[i] = T_FLOAT; break;
never@3500 2077 case 'I': in_elem_bt[i] = T_INT; break;
never@3500 2078 case 'J': in_elem_bt[i] = T_LONG; break;
never@3500 2079 case 'S': in_elem_bt[i] = T_SHORT; break;
never@3500 2080 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
never@3500 2081 default: ShouldNotReachHere();
never@3500 2082 }
never@3500 2083 }
never@3500 2084 } else {
never@3500 2085 out_sig_bt[argc++] = in_sig_bt[i];
never@3500 2086 in_elem_bt[i] = T_VOID;
never@3500 2087 }
never@3500 2088 if (in_sig_bt[i] != T_VOID) {
never@3500 2089 assert(in_sig_bt[i] == ss.type(), "must match");
never@3500 2090 ss.next();
never@3500 2091 }
never@3500 2092 }
duke@435 2093 }
duke@435 2094
duke@435 2095 // Now figure out where the args must be stored and how much stack space
duke@435 2096 // they require (neglecting out_preserve_stack_slots but space for storing
duke@435 2097 // the 1st six register arguments). It's weird see int_stk_helper.
duke@435 2098 //
duke@435 2099 int out_arg_slots;
duke@435 2100 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
duke@435 2101
never@3500 2102 if (is_critical_native) {
never@3500 2103 // Critical natives may have to call out so they need a save area
never@3500 2104 // for register arguments.
never@3500 2105 int double_slots = 0;
never@3500 2106 int single_slots = 0;
never@3500 2107 for ( int i = 0; i < total_in_args; i++) {
never@3500 2108 if (in_regs[i].first()->is_Register()) {
never@3500 2109 const Register reg = in_regs[i].first()->as_Register();
never@3500 2110 switch (in_sig_bt[i]) {
never@3500 2111 case T_ARRAY:
never@3500 2112 case T_BOOLEAN:
never@3500 2113 case T_BYTE:
never@3500 2114 case T_SHORT:
never@3500 2115 case T_CHAR:
never@3500 2116 case T_INT: assert(reg->is_in(), "don't need to save these"); break;
never@3500 2117 case T_LONG: if (reg->is_global()) double_slots++; break;
never@3500 2118 default: ShouldNotReachHere();
never@3500 2119 }
never@3500 2120 } else if (in_regs[i].first()->is_FloatRegister()) {
never@3500 2121 switch (in_sig_bt[i]) {
never@3500 2122 case T_FLOAT: single_slots++; break;
never@3500 2123 case T_DOUBLE: double_slots++; break;
never@3500 2124 default: ShouldNotReachHere();
never@3500 2125 }
never@3500 2126 }
never@3500 2127 }
never@3500 2128 total_save_slots = double_slots * 2 + single_slots;
never@3500 2129 }
never@3500 2130
duke@435 2131 // Compute framesize for the wrapper. We need to handlize all oops in
duke@435 2132 // registers. We must create space for them here that is disjoint from
duke@435 2133 // the windowed save area because we have no control over when we might
duke@435 2134 // flush the window again and overwrite values that gc has since modified.
duke@435 2135 // (The live window race)
duke@435 2136 //
duke@435 2137 // We always just allocate 6 word for storing down these object. This allow
duke@435 2138 // us to simply record the base and use the Ireg number to decide which
duke@435 2139 // slot to use. (Note that the reg number is the inbound number not the
duke@435 2140 // outbound number).
duke@435 2141 // We must shuffle args to match the native convention, and include var-args space.
duke@435 2142
duke@435 2143 // Calculate the total number of stack slots we will need.
duke@435 2144
duke@435 2145 // First count the abi requirement plus all of the outgoing args
duke@435 2146 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
duke@435 2147
duke@435 2148 // Now the space for the inbound oop handle area
duke@435 2149
never@3500 2150 int oop_handle_offset = round_to(stack_slots, 2);
never@3500 2151 stack_slots += total_save_slots;
duke@435 2152
duke@435 2153 // Now any space we need for handlizing a klass if static method
duke@435 2154
duke@435 2155 int klass_slot_offset = 0;
duke@435 2156 int klass_offset = -1;
duke@435 2157 int lock_slot_offset = 0;
duke@435 2158 bool is_static = false;
duke@435 2159
duke@435 2160 if (method->is_static()) {
duke@435 2161 klass_slot_offset = stack_slots;
duke@435 2162 stack_slots += VMRegImpl::slots_per_word;
duke@435 2163 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
duke@435 2164 is_static = true;
duke@435 2165 }
duke@435 2166
duke@435 2167 // Plus a lock if needed
duke@435 2168
duke@435 2169 if (method->is_synchronized()) {
duke@435 2170 lock_slot_offset = stack_slots;
duke@435 2171 stack_slots += VMRegImpl::slots_per_word;
duke@435 2172 }
duke@435 2173
duke@435 2174 // Now a place to save return value or as a temporary for any gpr -> fpr moves
duke@435 2175 stack_slots += 2;
duke@435 2176
duke@435 2177 // Ok The space we have allocated will look like:
duke@435 2178 //
duke@435 2179 //
duke@435 2180 // FP-> | |
duke@435 2181 // |---------------------|
duke@435 2182 // | 2 slots for moves |
duke@435 2183 // |---------------------|
duke@435 2184 // | lock box (if sync) |
duke@435 2185 // |---------------------| <- lock_slot_offset
duke@435 2186 // | klass (if static) |
duke@435 2187 // |---------------------| <- klass_slot_offset
duke@435 2188 // | oopHandle area |
duke@435 2189 // |---------------------| <- oop_handle_offset
duke@435 2190 // | outbound memory |
duke@435 2191 // | based arguments |
duke@435 2192 // | |
duke@435 2193 // |---------------------|
duke@435 2194 // | vararg area |
duke@435 2195 // |---------------------|
duke@435 2196 // | |
duke@435 2197 // SP-> | out_preserved_slots |
duke@435 2198 //
duke@435 2199 //
duke@435 2200
duke@435 2201
duke@435 2202 // Now compute actual number of stack words we need rounding to make
duke@435 2203 // stack properly aligned.
duke@435 2204 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
duke@435 2205
duke@435 2206 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
duke@435 2207
duke@435 2208 // Generate stack overflow check before creating frame
duke@435 2209 __ generate_stack_overflow_check(stack_size);
duke@435 2210
duke@435 2211 // Generate a new frame for the wrapper.
duke@435 2212 __ save(SP, -stack_size, SP);
duke@435 2213
duke@435 2214 int frame_complete = ((intptr_t)__ pc()) - start;
duke@435 2215
duke@435 2216 __ verify_thread();
duke@435 2217
never@3500 2218 if (is_critical_native) {
never@3500 2219 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
never@3500 2220 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
never@3500 2221 }
duke@435 2222
duke@435 2223 //
duke@435 2224 // We immediately shuffle the arguments so that any vm call we have to
duke@435 2225 // make from here on out (sync slow path, jvmti, etc.) we will have
duke@435 2226 // captured the oops from our caller and have a valid oopMap for
duke@435 2227 // them.
duke@435 2228
duke@435 2229 // -----------------
duke@435 2230 // The Grand Shuffle
duke@435 2231 //
duke@435 2232 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
duke@435 2233 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
duke@435 2234 // the class mirror instead of a receiver. This pretty much guarantees that
duke@435 2235 // register layout will not match. We ignore these extra arguments during
duke@435 2236 // the shuffle. The shuffle is described by the two calling convention
duke@435 2237 // vectors we have in our possession. We simply walk the java vector to
duke@435 2238 // get the source locations and the c vector to get the destinations.
duke@435 2239 // Because we have a new window and the argument registers are completely
duke@435 2240 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
duke@435 2241 // here.
duke@435 2242
duke@435 2243 // This is a trick. We double the stack slots so we can claim
duke@435 2244 // the oops in the caller's frame. Since we are sure to have
duke@435 2245 // more args than the caller doubling is enough to make
duke@435 2246 // sure we can capture all the incoming oop args from the
duke@435 2247 // caller.
duke@435 2248 //
duke@435 2249 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
duke@435 2250 // Record sp-based slot for receiver on stack for non-static methods
duke@435 2251 int receiver_offset = -1;
duke@435 2252
duke@435 2253 // We move the arguments backward because the floating point registers
duke@435 2254 // destination will always be to a register with a greater or equal register
duke@435 2255 // number or the stack.
duke@435 2256
duke@435 2257 #ifdef ASSERT
duke@435 2258 bool reg_destroyed[RegisterImpl::number_of_registers];
duke@435 2259 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
duke@435 2260 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
duke@435 2261 reg_destroyed[r] = false;
duke@435 2262 }
duke@435 2263 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
duke@435 2264 freg_destroyed[f] = false;
duke@435 2265 }
duke@435 2266
duke@435 2267 #endif /* ASSERT */
duke@435 2268
never@3500 2269 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
duke@435 2270
duke@435 2271 #ifdef ASSERT
duke@435 2272 if (in_regs[i].first()->is_Register()) {
duke@435 2273 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
duke@435 2274 } else if (in_regs[i].first()->is_FloatRegister()) {
duke@435 2275 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
duke@435 2276 }
duke@435 2277 if (out_regs[c_arg].first()->is_Register()) {
duke@435 2278 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
duke@435 2279 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
duke@435 2280 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
duke@435 2281 }
duke@435 2282 #endif /* ASSERT */
duke@435 2283
duke@435 2284 switch (in_sig_bt[i]) {
duke@435 2285 case T_ARRAY:
never@3500 2286 if (is_critical_native) {
never@3500 2287 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
never@3500 2288 c_arg--;
never@3500 2289 break;
never@3500 2290 }
duke@435 2291 case T_OBJECT:
never@3500 2292 assert(!is_critical_native, "no oop arguments");
duke@435 2293 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
duke@435 2294 ((i == 0) && (!is_static)),
duke@435 2295 &receiver_offset);
duke@435 2296 break;
duke@435 2297 case T_VOID:
duke@435 2298 break;
duke@435 2299
duke@435 2300 case T_FLOAT:
duke@435 2301 float_move(masm, in_regs[i], out_regs[c_arg]);
never@3500 2302 break;
duke@435 2303
duke@435 2304 case T_DOUBLE:
duke@435 2305 assert( i + 1 < total_in_args &&
duke@435 2306 in_sig_bt[i + 1] == T_VOID &&
duke@435 2307 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
duke@435 2308 double_move(masm, in_regs[i], out_regs[c_arg]);
duke@435 2309 break;
duke@435 2310
duke@435 2311 case T_LONG :
duke@435 2312 long_move(masm, in_regs[i], out_regs[c_arg]);
duke@435 2313 break;
duke@435 2314
duke@435 2315 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
duke@435 2316
duke@435 2317 default:
duke@435 2318 move32_64(masm, in_regs[i], out_regs[c_arg]);
duke@435 2319 }
duke@435 2320 }
duke@435 2321
duke@435 2322 // Pre-load a static method's oop into O1. Used both by locking code and
duke@435 2323 // the normal JNI call code.
never@3500 2324 if (method->is_static() && !is_critical_native) {
coleenp@4251 2325 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
duke@435 2326
duke@435 2327 // Now handlize the static class mirror in O1. It's known not-null.
duke@435 2328 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
duke@435 2329 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
duke@435 2330 __ add(SP, klass_offset + STACK_BIAS, O1);
duke@435 2331 }
duke@435 2332
duke@435 2333
duke@435 2334 const Register L6_handle = L6;
duke@435 2335
duke@435 2336 if (method->is_synchronized()) {
never@3500 2337 assert(!is_critical_native, "unhandled");
duke@435 2338 __ mov(O1, L6_handle);
duke@435 2339 }
duke@435 2340
duke@435 2341 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
duke@435 2342 // except O6/O7. So if we must call out we must push a new frame. We immediately
duke@435 2343 // push a new frame and flush the windows.
duke@435 2344 #ifdef _LP64
duke@435 2345 intptr_t thepc = (intptr_t) __ pc();
duke@435 2346 {
duke@435 2347 address here = __ pc();
duke@435 2348 // Call the next instruction
duke@435 2349 __ call(here + 8, relocInfo::none);
duke@435 2350 __ delayed()->nop();
duke@435 2351 }
duke@435 2352 #else
duke@435 2353 intptr_t thepc = __ load_pc_address(O7, 0);
duke@435 2354 #endif /* _LP64 */
duke@435 2355
duke@435 2356 // We use the same pc/oopMap repeatedly when we call out
duke@435 2357 oop_maps->add_gc_map(thepc - start, map);
duke@435 2358
duke@435 2359 // O7 now has the pc loaded that we will use when we finally call to native.
duke@435 2360
duke@435 2361 // Save thread in L7; it crosses a bunch of VM calls below
duke@435 2362 // Don't use save_thread because it smashes G2 and we merely
duke@435 2363 // want to save a copy
duke@435 2364 __ mov(G2_thread, L7_thread_cache);
duke@435 2365
duke@435 2366
duke@435 2367 // If we create an inner frame once is plenty
duke@435 2368 // when we create it we must also save G2_thread
duke@435 2369 bool inner_frame_created = false;
duke@435 2370
duke@435 2371 // dtrace method entry support
duke@435 2372 {
duke@435 2373 SkipIfEqual skip_if(
duke@435 2374 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
duke@435 2375 // create inner frame
duke@435 2376 __ save_frame(0);
duke@435 2377 __ mov(G2_thread, L7_thread_cache);
coleenp@4037 2378 __ set_metadata_constant(method(), O1);
duke@435 2379 __ call_VM_leaf(L7_thread_cache,
duke@435 2380 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
duke@435 2381 G2_thread, O1);
duke@435 2382 __ restore();
duke@435 2383 }
duke@435 2384
dcubed@1045 2385 // RedefineClasses() tracing support for obsolete method entry
dcubed@1045 2386 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
dcubed@1045 2387 // create inner frame
dcubed@1045 2388 __ save_frame(0);
dcubed@1045 2389 __ mov(G2_thread, L7_thread_cache);
coleenp@4037 2390 __ set_metadata_constant(method(), O1);
dcubed@1045 2391 __ call_VM_leaf(L7_thread_cache,
dcubed@1045 2392 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
dcubed@1045 2393 G2_thread, O1);
dcubed@1045 2394 __ restore();
dcubed@1045 2395 }
dcubed@1045 2396
duke@435 2397 // We are in the jni frame unless saved_frame is true in which case
duke@435 2398 // we are in one frame deeper (the "inner" frame). If we are in the
duke@435 2399 // "inner" frames the args are in the Iregs and if the jni frame then
duke@435 2400 // they are in the Oregs.
duke@435 2401 // If we ever need to go to the VM (for locking, jvmti) then
duke@435 2402 // we will always be in the "inner" frame.
duke@435 2403
duke@435 2404 // Lock a synchronized method
duke@435 2405 int lock_offset = -1; // Set if locked
duke@435 2406 if (method->is_synchronized()) {
duke@435 2407 Register Roop = O1;
duke@435 2408 const Register L3_box = L3;
duke@435 2409
duke@435 2410 create_inner_frame(masm, &inner_frame_created);
duke@435 2411
duke@435 2412 __ ld_ptr(I1, 0, O1);
duke@435 2413 Label done;
duke@435 2414
duke@435 2415 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
duke@435 2416 __ add(FP, lock_offset+STACK_BIAS, L3_box);
duke@435 2417 #ifdef ASSERT
duke@435 2418 if (UseBiasedLocking) {
duke@435 2419 // making the box point to itself will make it clear it went unused
duke@435 2420 // but also be obviously invalid
duke@435 2421 __ st_ptr(L3_box, L3_box, 0);
duke@435 2422 }
duke@435 2423 #endif // ASSERT
duke@435 2424 //
duke@435 2425 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
duke@435 2426 //
duke@435 2427 __ compiler_lock_object(Roop, L1, L3_box, L2);
duke@435 2428 __ br(Assembler::equal, false, Assembler::pt, done);
duke@435 2429 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
duke@435 2430
duke@435 2431
duke@435 2432 // None of the above fast optimizations worked so we have to get into the
duke@435 2433 // slow case of monitor enter. Inline a special case of call_VM that
duke@435 2434 // disallows any pending_exception.
duke@435 2435 __ mov(Roop, O0); // Need oop in O0
duke@435 2436 __ mov(L3_box, O1);
duke@435 2437
duke@435 2438 // Record last_Java_sp, in case the VM code releases the JVM lock.
duke@435 2439
duke@435 2440 __ set_last_Java_frame(FP, I7);
duke@435 2441
duke@435 2442 // do the call
duke@435 2443 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
duke@435 2444 __ delayed()->mov(L7_thread_cache, O2);
duke@435 2445
duke@435 2446 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@435 2447 __ reset_last_Java_frame();
duke@435 2448
duke@435 2449 #ifdef ASSERT
duke@435 2450 { Label L;
duke@435 2451 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
kvn@3037 2452 __ br_null_short(O0, Assembler::pt, L);
duke@435 2453 __ stop("no pending exception allowed on exit from IR::monitorenter");
duke@435 2454 __ bind(L);
duke@435 2455 }
duke@435 2456 #endif
duke@435 2457 __ bind(done);
duke@435 2458 }
duke@435 2459
duke@435 2460
duke@435 2461 // Finally just about ready to make the JNI call
duke@435 2462
duke@435 2463 __ flush_windows();
duke@435 2464 if (inner_frame_created) {
duke@435 2465 __ restore();
duke@435 2466 } else {
duke@435 2467 // Store only what we need from this frame
duke@435 2468 // QQQ I think that non-v9 (like we care) we don't need these saves
duke@435 2469 // either as the flush traps and the current window goes too.
duke@435 2470 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
duke@435 2471 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
duke@435 2472 }
duke@435 2473
duke@435 2474 // get JNIEnv* which is first argument to native
never@3500 2475 if (!is_critical_native) {
never@3500 2476 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
never@3500 2477 }
duke@435 2478
duke@435 2479 // Use that pc we placed in O7 a while back as the current frame anchor
duke@435 2480 __ set_last_Java_frame(SP, O7);
duke@435 2481
never@3500 2482 // We flushed the windows ages ago now mark them as flushed before transitioning.
never@3500 2483 __ set(JavaFrameAnchor::flushed, G3_scratch);
never@3500 2484 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
never@3500 2485
duke@435 2486 // Transition from _thread_in_Java to _thread_in_native.
duke@435 2487 __ set(_thread_in_native, G3_scratch);
duke@435 2488
duke@435 2489 #ifdef _LP64
never@3500 2490 AddressLiteral dest(native_func);
duke@435 2491 __ relocate(relocInfo::runtime_call_type);
twisti@1162 2492 __ jumpl_to(dest, O7, O7);
duke@435 2493 #else
never@3500 2494 __ call(native_func, relocInfo::runtime_call_type);
duke@435 2495 #endif
never@3500 2496 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
duke@435 2497
duke@435 2498 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@435 2499
duke@435 2500 // Unpack native results. For int-types, we do any needed sign-extension
duke@435 2501 // and move things into I0. The return value there will survive any VM
duke@435 2502 // calls for blocking or unlocking. An FP or OOP result (handle) is done
duke@435 2503 // specially in the slow-path code.
duke@435 2504 switch (ret_type) {
duke@435 2505 case T_VOID: break; // Nothing to do!
duke@435 2506 case T_FLOAT: break; // Got it where we want it (unless slow-path)
duke@435 2507 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
duke@435 2508 // In 64 bits build result is in O0, in O0, O1 in 32bit build
duke@435 2509 case T_LONG:
duke@435 2510 #ifndef _LP64
duke@435 2511 __ mov(O1, I1);
duke@435 2512 #endif
duke@435 2513 // Fall thru
duke@435 2514 case T_OBJECT: // Really a handle
duke@435 2515 case T_ARRAY:
duke@435 2516 case T_INT:
duke@435 2517 __ mov(O0, I0);
duke@435 2518 break;
duke@435 2519 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
duke@435 2520 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
duke@435 2521 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
duke@435 2522 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
duke@435 2523 break; // Cannot de-handlize until after reclaiming jvm_lock
duke@435 2524 default:
duke@435 2525 ShouldNotReachHere();
duke@435 2526 }
duke@435 2527
never@3500 2528 Label after_transition;
duke@435 2529 // must we block?
duke@435 2530
duke@435 2531 // Block, if necessary, before resuming in _thread_in_Java state.
duke@435 2532 // In order for GC to work, don't clear the last_Java_sp until after blocking.
duke@435 2533 { Label no_block;
twisti@1162 2534 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
duke@435 2535
duke@435 2536 // Switch thread to "native transition" state before reading the synchronization state.
duke@435 2537 // This additional state is necessary because reading and testing the synchronization
duke@435 2538 // state is not atomic w.r.t. GC, as this scenario demonstrates:
duke@435 2539 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
duke@435 2540 // VM thread changes sync state to synchronizing and suspends threads for GC.
duke@435 2541 // Thread A is resumed to finish this native method, but doesn't block here since it
duke@435 2542 // didn't see any synchronization is progress, and escapes.
duke@435 2543 __ set(_thread_in_native_trans, G3_scratch);
twisti@1162 2544 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
duke@435 2545 if(os::is_MP()) {
duke@435 2546 if (UseMembar) {
duke@435 2547 // Force this write out before the read below
duke@435 2548 __ membar(Assembler::StoreLoad);
duke@435 2549 } else {
duke@435 2550 // Write serialization page so VM thread can do a pseudo remote membar.
duke@435 2551 // We use the current thread pointer to calculate a thread specific
duke@435 2552 // offset to write to within the page. This minimizes bus traffic
duke@435 2553 // due to cache line collision.
duke@435 2554 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
duke@435 2555 }
duke@435 2556 }
duke@435 2557 __ load_contents(sync_state, G3_scratch);
duke@435 2558 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
duke@435 2559
duke@435 2560 Label L;
twisti@1162 2561 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
duke@435 2562 __ br(Assembler::notEqual, false, Assembler::pn, L);
twisti@1162 2563 __ delayed()->ld(suspend_state, G3_scratch);
kvn@3037 2564 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
duke@435 2565 __ bind(L);
duke@435 2566
duke@435 2567 // Block. Save any potential method result value before the operation and
duke@435 2568 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
duke@435 2569 // lets us share the oopMap we used when we went native rather the create
duke@435 2570 // a distinct one for this pc
duke@435 2571 //
duke@435 2572 save_native_result(masm, ret_type, stack_slots);
never@3500 2573 if (!is_critical_native) {
never@3500 2574 __ call_VM_leaf(L7_thread_cache,
never@3500 2575 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
never@3500 2576 G2_thread);
never@3500 2577 } else {
never@3500 2578 __ call_VM_leaf(L7_thread_cache,
never@3500 2579 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
never@3500 2580 G2_thread);
never@3500 2581 }
duke@435 2582
duke@435 2583 // Restore any method result value
duke@435 2584 restore_native_result(masm, ret_type, stack_slots);
never@3500 2585
never@3500 2586 if (is_critical_native) {
never@3500 2587 // The call above performed the transition to thread_in_Java so
never@3500 2588 // skip the transition logic below.
never@3500 2589 __ ba(after_transition);
never@3500 2590 __ delayed()->nop();
never@3500 2591 }
never@3500 2592
duke@435 2593 __ bind(no_block);
duke@435 2594 }
duke@435 2595
duke@435 2596 // thread state is thread_in_native_trans. Any safepoint blocking has already
duke@435 2597 // happened so we can now change state to _thread_in_Java.
duke@435 2598 __ set(_thread_in_Java, G3_scratch);
twisti@1162 2599 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
never@3500 2600 __ bind(after_transition);
duke@435 2601
duke@435 2602 Label no_reguard;
twisti@1162 2603 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
kvn@3037 2604 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
duke@435 2605
duke@435 2606 save_native_result(masm, ret_type, stack_slots);
duke@435 2607 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
duke@435 2608 __ delayed()->nop();
duke@435 2609
duke@435 2610 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@435 2611 restore_native_result(masm, ret_type, stack_slots);
duke@435 2612
duke@435 2613 __ bind(no_reguard);
duke@435 2614
duke@435 2615 // Handle possible exception (will unlock if necessary)
duke@435 2616
duke@435 2617 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
duke@435 2618
duke@435 2619 // Unlock
duke@435 2620 if (method->is_synchronized()) {
duke@435 2621 Label done;
duke@435 2622 Register I2_ex_oop = I2;
duke@435 2623 const Register L3_box = L3;
duke@435 2624 // Get locked oop from the handle we passed to jni
duke@435 2625 __ ld_ptr(L6_handle, 0, L4);
duke@435 2626 __ add(SP, lock_offset+STACK_BIAS, L3_box);
duke@435 2627 // Must save pending exception around the slow-path VM call. Since it's a
duke@435 2628 // leaf call, the pending exception (if any) can be kept in a register.
duke@435 2629 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
duke@435 2630 // Now unlock
duke@435 2631 // (Roop, Rmark, Rbox, Rscratch)
duke@435 2632 __ compiler_unlock_object(L4, L1, L3_box, L2);
duke@435 2633 __ br(Assembler::equal, false, Assembler::pt, done);
duke@435 2634 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
duke@435 2635
duke@435 2636 // save and restore any potential method result value around the unlocking
duke@435 2637 // operation. Will save in I0 (or stack for FP returns).
duke@435 2638 save_native_result(masm, ret_type, stack_slots);
duke@435 2639
duke@435 2640 // Must clear pending-exception before re-entering the VM. Since this is
duke@435 2641 // a leaf call, pending-exception-oop can be safely kept in a register.
duke@435 2642 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
duke@435 2643
duke@435 2644 // slow case of monitor enter. Inline a special case of call_VM that
duke@435 2645 // disallows any pending_exception.
duke@435 2646 __ mov(L3_box, O1);
duke@435 2647
duke@435 2648 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
duke@435 2649 __ delayed()->mov(L4, O0); // Need oop in O0
duke@435 2650
duke@435 2651 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@435 2652
duke@435 2653 #ifdef ASSERT
duke@435 2654 { Label L;
duke@435 2655 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
kvn@3037 2656 __ br_null_short(O0, Assembler::pt, L);
duke@435 2657 __ stop("no pending exception allowed on exit from IR::monitorexit");
duke@435 2658 __ bind(L);
duke@435 2659 }
duke@435 2660 #endif
duke@435 2661 restore_native_result(masm, ret_type, stack_slots);
duke@435 2662 // check_forward_pending_exception jump to forward_exception if any pending
duke@435 2663 // exception is set. The forward_exception routine expects to see the
duke@435 2664 // exception in pending_exception and not in a register. Kind of clumsy,
duke@435 2665 // since all folks who branch to forward_exception must have tested
duke@435 2666 // pending_exception first and hence have it in a register already.
duke@435 2667 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
duke@435 2668 __ bind(done);
duke@435 2669 }
duke@435 2670
duke@435 2671 // Tell dtrace about this method exit
duke@435 2672 {
duke@435 2673 SkipIfEqual skip_if(
duke@435 2674 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
duke@435 2675 save_native_result(masm, ret_type, stack_slots);
coleenp@4037 2676 __ set_metadata_constant(method(), O1);
duke@435 2677 __ call_VM_leaf(L7_thread_cache,
duke@435 2678 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
duke@435 2679 G2_thread, O1);
duke@435 2680 restore_native_result(masm, ret_type, stack_slots);
duke@435 2681 }
duke@435 2682
duke@435 2683 // Clear "last Java frame" SP and PC.
duke@435 2684 __ verify_thread(); // G2_thread must be correct
duke@435 2685 __ reset_last_Java_frame();
duke@435 2686
duke@435 2687 // Unpack oop result
duke@435 2688 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
duke@435 2689 Label L;
duke@435 2690 __ addcc(G0, I0, G0);
duke@435 2691 __ brx(Assembler::notZero, true, Assembler::pt, L);
duke@435 2692 __ delayed()->ld_ptr(I0, 0, I0);
duke@435 2693 __ mov(G0, I0);
duke@435 2694 __ bind(L);
duke@435 2695 __ verify_oop(I0);
duke@435 2696 }
duke@435 2697
never@3500 2698 if (!is_critical_native) {
never@3500 2699 // reset handle block
never@3500 2700 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
never@3500 2701 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
never@3500 2702
never@3500 2703 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
never@3500 2704 check_forward_pending_exception(masm, G3_scratch);
never@3500 2705 }
duke@435 2706
duke@435 2707
duke@435 2708 // Return
duke@435 2709
duke@435 2710 #ifndef _LP64
duke@435 2711 if (ret_type == T_LONG) {
duke@435 2712
duke@435 2713 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
duke@435 2714 __ sllx(I0, 32, G1); // Shift bits into high G1
duke@435 2715 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
duke@435 2716 __ or3 (I1, G1, G1); // OR 64 bits into G1
duke@435 2717 }
duke@435 2718 #endif
duke@435 2719
duke@435 2720 __ ret();
duke@435 2721 __ delayed()->restore();
duke@435 2722
duke@435 2723 __ flush();
duke@435 2724
duke@435 2725 nmethod *nm = nmethod::new_native_nmethod(method,
twisti@2687 2726 compile_id,
duke@435 2727 masm->code(),
duke@435 2728 vep_offset,
duke@435 2729 frame_complete,
duke@435 2730 stack_slots / VMRegImpl::slots_per_word,
duke@435 2731 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
duke@435 2732 in_ByteSize(lock_offset),
duke@435 2733 oop_maps);
never@3500 2734
never@3500 2735 if (is_critical_native) {
never@3500 2736 nm->set_lazy_critical_native(true);
never@3500 2737 }
duke@435 2738 return nm;
duke@435 2739
duke@435 2740 }
duke@435 2741
kamg@551 2742 #ifdef HAVE_DTRACE_H
kamg@551 2743 // ---------------------------------------------------------------------------
kamg@551 2744 // Generate a dtrace nmethod for a given signature. The method takes arguments
kamg@551 2745 // in the Java compiled code convention, marshals them to the native
kamg@551 2746 // abi and then leaves nops at the position you would expect to call a native
kamg@551 2747 // function. When the probe is enabled the nops are replaced with a trap
kamg@551 2748 // instruction that dtrace inserts and the trace will cause a notification
kamg@551 2749 // to dtrace.
kamg@551 2750 //
kamg@551 2751 // The probes are only able to take primitive types and java/lang/String as
kamg@551 2752 // arguments. No other java types are allowed. Strings are converted to utf8
kamg@551 2753 // strings so that from dtrace point of view java strings are converted to C
kamg@551 2754 // strings. There is an arbitrary fixed limit on the total space that a method
kamg@551 2755 // can use for converting the strings. (256 chars per string in the signature).
kamg@551 2756 // So any java string larger then this is truncated.
kamg@551 2757
kamg@551 2758 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
kamg@551 2759 static bool offsets_initialized = false;
kamg@551 2760
kamg@551 2761 nmethod *SharedRuntime::generate_dtrace_nmethod(
kamg@551 2762 MacroAssembler *masm, methodHandle method) {
kamg@551 2763
kamg@551 2764
kamg@551 2765 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
kamg@551 2766 // be single threaded in this method.
kamg@551 2767 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
kamg@551 2768
kamg@551 2769 // Fill in the signature array, for the calling-convention call.
kamg@551 2770 int total_args_passed = method->size_of_parameters();
kamg@551 2771
kamg@551 2772 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
kamg@551 2773 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
kamg@551 2774
kamg@551 2775 // The signature we are going to use for the trap that dtrace will see
kamg@551 2776 // java/lang/String is converted. We drop "this" and any other object
kamg@551 2777 // is converted to NULL. (A one-slot java/lang/Long object reference
kamg@551 2778 // is converted to a two-slot long, which is why we double the allocation).
kamg@551 2779 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
kamg@551 2780 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
kamg@551 2781
kamg@551 2782 int i=0;
kamg@551 2783 int total_strings = 0;
kamg@551 2784 int first_arg_to_pass = 0;
kamg@551 2785 int total_c_args = 0;
kamg@551 2786
kamg@551 2787 // Skip the receiver as dtrace doesn't want to see it
kamg@551 2788 if( !method->is_static() ) {
kamg@551 2789 in_sig_bt[i++] = T_OBJECT;
kamg@551 2790 first_arg_to_pass = 1;
kamg@551 2791 }
kamg@551 2792
kamg@551 2793 SignatureStream ss(method->signature());
kamg@551 2794 for ( ; !ss.at_return_type(); ss.next()) {
kamg@551 2795 BasicType bt = ss.type();
kamg@551 2796 in_sig_bt[i++] = bt; // Collect remaining bits of signature
kamg@551 2797 out_sig_bt[total_c_args++] = bt;
kamg@551 2798 if( bt == T_OBJECT) {
coleenp@2497 2799 Symbol* s = ss.as_symbol_or_null();
kamg@551 2800 if (s == vmSymbols::java_lang_String()) {
kamg@551 2801 total_strings++;
kamg@551 2802 out_sig_bt[total_c_args-1] = T_ADDRESS;
kamg@551 2803 } else if (s == vmSymbols::java_lang_Boolean() ||
kamg@551 2804 s == vmSymbols::java_lang_Byte()) {
kamg@551 2805 out_sig_bt[total_c_args-1] = T_BYTE;
kamg@551 2806 } else if (s == vmSymbols::java_lang_Character() ||
kamg@551 2807 s == vmSymbols::java_lang_Short()) {
kamg@551 2808 out_sig_bt[total_c_args-1] = T_SHORT;
kamg@551 2809 } else if (s == vmSymbols::java_lang_Integer() ||
kamg@551 2810 s == vmSymbols::java_lang_Float()) {
kamg@551 2811 out_sig_bt[total_c_args-1] = T_INT;
kamg@551 2812 } else if (s == vmSymbols::java_lang_Long() ||
kamg@551 2813 s == vmSymbols::java_lang_Double()) {
kamg@551 2814 out_sig_bt[total_c_args-1] = T_LONG;
kamg@551 2815 out_sig_bt[total_c_args++] = T_VOID;
kamg@551 2816 }
kamg@551 2817 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
kamg@551 2818 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
kamg@551 2819 // We convert double to long
kamg@551 2820 out_sig_bt[total_c_args-1] = T_LONG;
kamg@551 2821 out_sig_bt[total_c_args++] = T_VOID;
kamg@551 2822 } else if ( bt == T_FLOAT) {
kamg@551 2823 // We convert float to int
kamg@551 2824 out_sig_bt[total_c_args-1] = T_INT;
kamg@551 2825 }
kamg@551 2826 }
kamg@551 2827
kamg@551 2828 assert(i==total_args_passed, "validly parsed signature");
kamg@551 2829
kamg@551 2830 // Now get the compiled-Java layout as input arguments
kamg@551 2831 int comp_args_on_stack;
kamg@551 2832 comp_args_on_stack = SharedRuntime::java_calling_convention(
kamg@551 2833 in_sig_bt, in_regs, total_args_passed, false);
kamg@551 2834
kamg@551 2835 // We have received a description of where all the java arg are located
kamg@551 2836 // on entry to the wrapper. We need to convert these args to where
kamg@551 2837 // the a native (non-jni) function would expect them. To figure out
kamg@551 2838 // where they go we convert the java signature to a C signature and remove
kamg@551 2839 // T_VOID for any long/double we might have received.
kamg@551 2840
kamg@551 2841
kamg@551 2842 // Now figure out where the args must be stored and how much stack space
kamg@551 2843 // they require (neglecting out_preserve_stack_slots but space for storing
kamg@551 2844 // the 1st six register arguments). It's weird see int_stk_helper.
kamg@551 2845 //
kamg@551 2846 int out_arg_slots;
kamg@551 2847 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
kamg@551 2848
kamg@551 2849 // Calculate the total number of stack slots we will need.
kamg@551 2850
kamg@551 2851 // First count the abi requirement plus all of the outgoing args
kamg@551 2852 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
kamg@551 2853
kamg@551 2854 // Plus a temp for possible converion of float/double/long register args
kamg@551 2855
kamg@551 2856 int conversion_temp = stack_slots;
kamg@551 2857 stack_slots += 2;
kamg@551 2858
kamg@551 2859
kamg@551 2860 // Now space for the string(s) we must convert
kamg@551 2861
kamg@551 2862 int string_locs = stack_slots;
kamg@551 2863 stack_slots += total_strings *
kamg@551 2864 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
kamg@551 2865
kamg@551 2866 // Ok The space we have allocated will look like:
kamg@551 2867 //
kamg@551 2868 //
kamg@551 2869 // FP-> | |
kamg@551 2870 // |---------------------|
kamg@551 2871 // | string[n] |
kamg@551 2872 // |---------------------| <- string_locs[n]
kamg@551 2873 // | string[n-1] |
kamg@551 2874 // |---------------------| <- string_locs[n-1]
kamg@551 2875 // | ... |
kamg@551 2876 // | ... |
kamg@551 2877 // |---------------------| <- string_locs[1]
kamg@551 2878 // | string[0] |
kamg@551 2879 // |---------------------| <- string_locs[0]
kamg@551 2880 // | temp |
kamg@551 2881 // |---------------------| <- conversion_temp
kamg@551 2882 // | outbound memory |
kamg@551 2883 // | based arguments |
kamg@551 2884 // | |
kamg@551 2885 // |---------------------|
kamg@551 2886 // | |
kamg@551 2887 // SP-> | out_preserved_slots |
kamg@551 2888 //
kamg@551 2889 //
kamg@551 2890
kamg@551 2891 // Now compute actual number of stack words we need rounding to make
kamg@551 2892 // stack properly aligned.
kamg@551 2893 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
kamg@551 2894
kamg@551 2895 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
kamg@551 2896
kamg@551 2897 intptr_t start = (intptr_t)__ pc();
kamg@551 2898
kamg@551 2899 // First thing make an ic check to see if we should even be here
kamg@551 2900
kamg@551 2901 {
kamg@551 2902 Label L;
kamg@551 2903 const Register temp_reg = G3_scratch;
twisti@1162 2904 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
kamg@551 2905 __ verify_oop(O0);
kamg@551 2906 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
kvn@3037 2907 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
kamg@551 2908
twisti@1162 2909 __ jump_to(ic_miss, temp_reg);
kamg@551 2910 __ delayed()->nop();
kamg@551 2911 __ align(CodeEntryAlignment);
kamg@551 2912 __ bind(L);
kamg@551 2913 }
kamg@551 2914
kamg@551 2915 int vep_offset = ((intptr_t)__ pc()) - start;
kamg@551 2916
kamg@551 2917
kamg@551 2918 // The instruction at the verified entry point must be 5 bytes or longer
kamg@551 2919 // because it can be patched on the fly by make_non_entrant. The stack bang
kamg@551 2920 // instruction fits that requirement.
kamg@551 2921
kamg@551 2922 // Generate stack overflow check before creating frame
kamg@551 2923 __ generate_stack_overflow_check(stack_size);
kamg@551 2924
kamg@551 2925 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
kamg@551 2926 "valid size for make_non_entrant");
kamg@551 2927
kamg@551 2928 // Generate a new frame for the wrapper.
kamg@551 2929 __ save(SP, -stack_size, SP);
kamg@551 2930
kamg@551 2931 // Frame is now completed as far a size and linkage.
kamg@551 2932
kamg@551 2933 int frame_complete = ((intptr_t)__ pc()) - start;
kamg@551 2934
kamg@551 2935 #ifdef ASSERT
kamg@551 2936 bool reg_destroyed[RegisterImpl::number_of_registers];
kamg@551 2937 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
kamg@551 2938 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
kamg@551 2939 reg_destroyed[r] = false;
kamg@551 2940 }
kamg@551 2941 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
kamg@551 2942 freg_destroyed[f] = false;
kamg@551 2943 }
kamg@551 2944
kamg@551 2945 #endif /* ASSERT */
kamg@551 2946
kamg@551 2947 VMRegPair zero;
kamg@611 2948 const Register g0 = G0; // without this we get a compiler warning (why??)
kamg@611 2949 zero.set2(g0->as_VMReg());
kamg@551 2950
kamg@551 2951 int c_arg, j_arg;
kamg@551 2952
kamg@551 2953 Register conversion_off = noreg;
kamg@551 2954
kamg@551 2955 for (j_arg = first_arg_to_pass, c_arg = 0 ;
kamg@551 2956 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
kamg@551 2957
kamg@551 2958 VMRegPair src = in_regs[j_arg];
kamg@551 2959 VMRegPair dst = out_regs[c_arg];
kamg@551 2960
kamg@551 2961 #ifdef ASSERT
kamg@551 2962 if (src.first()->is_Register()) {
kamg@551 2963 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
kamg@551 2964 } else if (src.first()->is_FloatRegister()) {
kamg@551 2965 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
kamg@551 2966 FloatRegisterImpl::S)], "ack!");
kamg@551 2967 }
kamg@551 2968 if (dst.first()->is_Register()) {
kamg@551 2969 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
kamg@551 2970 } else if (dst.first()->is_FloatRegister()) {
kamg@551 2971 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
kamg@551 2972 FloatRegisterImpl::S)] = true;
kamg@551 2973 }
kamg@551 2974 #endif /* ASSERT */
kamg@551 2975
kamg@551 2976 switch (in_sig_bt[j_arg]) {
kamg@551 2977 case T_ARRAY:
kamg@551 2978 case T_OBJECT:
kamg@551 2979 {
kamg@551 2980 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
kamg@551 2981 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
kamg@551 2982 // need to unbox a one-slot value
kamg@551 2983 Register in_reg = L0;
kamg@551 2984 Register tmp = L2;
kamg@551 2985 if ( src.first()->is_reg() ) {
kamg@551 2986 in_reg = src.first()->as_Register();
kamg@551 2987 } else {
kamg@551 2988 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
kamg@551 2989 "must be");
kamg@551 2990 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
kamg@551 2991 }
kamg@551 2992 // If the final destination is an acceptable register
kamg@551 2993 if ( dst.first()->is_reg() ) {
kamg@551 2994 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
kamg@551 2995 tmp = dst.first()->as_Register();
kamg@551 2996 }
kamg@551 2997 }
kamg@551 2998
kamg@551 2999 Label skipUnbox;
kamg@551 3000 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
kamg@551 3001 __ mov(G0, tmp->successor());
kamg@551 3002 }
kamg@551 3003 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
kamg@551 3004 __ delayed()->mov(G0, tmp);
kamg@551 3005
kvn@600 3006 BasicType bt = out_sig_bt[c_arg];
kvn@600 3007 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
kvn@600 3008 switch (bt) {
kamg@551 3009 case T_BYTE:
kamg@551 3010 __ ldub(in_reg, box_offset, tmp); break;
kamg@551 3011 case T_SHORT:
kamg@551 3012 __ lduh(in_reg, box_offset, tmp); break;
kamg@551 3013 case T_INT:
kamg@551 3014 __ ld(in_reg, box_offset, tmp); break;
kamg@551 3015 case T_LONG:
kamg@551 3016 __ ld_long(in_reg, box_offset, tmp); break;
kamg@551 3017 default: ShouldNotReachHere();
kamg@551 3018 }
kamg@551 3019
kamg@551 3020 __ bind(skipUnbox);
kamg@551 3021 // If tmp wasn't final destination copy to final destination
kamg@551 3022 if (tmp == L2) {
kamg@551 3023 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
kamg@551 3024 if (out_sig_bt[c_arg] == T_LONG) {
kamg@551 3025 long_move(masm, tmp_as_VM, dst);
kamg@551 3026 } else {
kamg@551 3027 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
kamg@551 3028 }
kamg@551 3029 }
kamg@551 3030 if (out_sig_bt[c_arg] == T_LONG) {
kamg@551 3031 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
kamg@551 3032 ++c_arg; // move over the T_VOID to keep the loop indices in sync
kamg@551 3033 }
kamg@551 3034 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
kamg@551 3035 Register s =
kamg@551 3036 src.first()->is_reg() ? src.first()->as_Register() : L2;
kamg@551 3037 Register d =
kamg@551 3038 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
kamg@551 3039
kamg@551 3040 // We store the oop now so that the conversion pass can reach
kamg@551 3041 // while in the inner frame. This will be the only store if
kamg@551 3042 // the oop is NULL.
kamg@551 3043 if (s != L2) {
kamg@551 3044 // src is register
kamg@551 3045 if (d != L2) {
kamg@551 3046 // dst is register
kamg@551 3047 __ mov(s, d);
kamg@551 3048 } else {
kamg@551 3049 assert(Assembler::is_simm13(reg2offset(dst.first()) +
kamg@551 3050 STACK_BIAS), "must be");
kamg@551 3051 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
kamg@551 3052 }
kamg@551 3053 } else {
kamg@551 3054 // src not a register
kamg@551 3055 assert(Assembler::is_simm13(reg2offset(src.first()) +
kamg@551 3056 STACK_BIAS), "must be");
kamg@551 3057 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
kamg@551 3058 if (d == L2) {
kamg@551 3059 assert(Assembler::is_simm13(reg2offset(dst.first()) +
kamg@551 3060 STACK_BIAS), "must be");
kamg@551 3061 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
kamg@551 3062 }
kamg@551 3063 }
kamg@551 3064 } else if (out_sig_bt[c_arg] != T_VOID) {
kamg@551 3065 // Convert the arg to NULL
kamg@551 3066 if (dst.first()->is_reg()) {
kamg@551 3067 __ mov(G0, dst.first()->as_Register());
kamg@551 3068 } else {
kamg@551 3069 assert(Assembler::is_simm13(reg2offset(dst.first()) +
kamg@551 3070 STACK_BIAS), "must be");
kamg@551 3071 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
kamg@551 3072 }
kamg@551 3073 }
kamg@551 3074 }
kamg@551 3075 break;
kamg@551 3076 case T_VOID:
kamg@551 3077 break;
kamg@551 3078
kamg@551 3079 case T_FLOAT:
kamg@551 3080 if (src.first()->is_stack()) {
kamg@551 3081 // Stack to stack/reg is simple
kamg@551 3082 move32_64(masm, src, dst);
kamg@551 3083 } else {
kamg@551 3084 if (dst.first()->is_reg()) {
kamg@551 3085 // freg -> reg
kamg@551 3086 int off =
kamg@551 3087 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
kamg@551 3088 Register d = dst.first()->as_Register();
kamg@551 3089 if (Assembler::is_simm13(off)) {
kamg@551 3090 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@551 3091 SP, off);
kamg@551 3092 __ ld(SP, off, d);
kamg@551 3093 } else {
kamg@551 3094 if (conversion_off == noreg) {
kamg@551 3095 __ set(off, L6);
kamg@551 3096 conversion_off = L6;
kamg@551 3097 }
kamg@551 3098 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@551 3099 SP, conversion_off);
kamg@551 3100 __ ld(SP, conversion_off , d);
kamg@551 3101 }
kamg@551 3102 } else {
kamg@551 3103 // freg -> mem
kamg@551 3104 int off = STACK_BIAS + reg2offset(dst.first());
kamg@551 3105 if (Assembler::is_simm13(off)) {
kamg@551 3106 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@551 3107 SP, off);
kamg@551 3108 } else {
kamg@551 3109 if (conversion_off == noreg) {
kamg@551 3110 __ set(off, L6);
kamg@551 3111 conversion_off = L6;
kamg@551 3112 }
kamg@551 3113 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@551 3114 SP, conversion_off);
kamg@551 3115 }
kamg@551 3116 }
kamg@551 3117 }
kamg@551 3118 break;
kamg@551 3119
kamg@551 3120 case T_DOUBLE:
kamg@551 3121 assert( j_arg + 1 < total_args_passed &&
kamg@551 3122 in_sig_bt[j_arg + 1] == T_VOID &&
kamg@551 3123 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
kamg@551 3124 if (src.first()->is_stack()) {
kamg@551 3125 // Stack to stack/reg is simple
kamg@551 3126 long_move(masm, src, dst);
kamg@551 3127 } else {
kamg@551 3128 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
kamg@551 3129
kamg@551 3130 // Destination could be an odd reg on 32bit in which case
kamg@551 3131 // we can't load direct to the destination.
kamg@551 3132
kamg@551 3133 if (!d->is_even() && wordSize == 4) {
kamg@551 3134 d = L2;
kamg@551 3135 }
kamg@551 3136 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
kamg@551 3137 if (Assembler::is_simm13(off)) {
kamg@551 3138 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
kamg@551 3139 SP, off);
kamg@551 3140 __ ld_long(SP, off, d);
kamg@551 3141 } else {
kamg@551 3142 if (conversion_off == noreg) {
kamg@551 3143 __ set(off, L6);
kamg@551 3144 conversion_off = L6;
kamg@551 3145 }
kamg@551 3146 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
kamg@551 3147 SP, conversion_off);
kamg@551 3148 __ ld_long(SP, conversion_off, d);
kamg@551 3149 }
kamg@551 3150 if (d == L2) {
kamg@551 3151 long_move(masm, reg64_to_VMRegPair(L2), dst);
kamg@551 3152 }
kamg@551 3153 }
kamg@551 3154 break;
kamg@551 3155
kamg@551 3156 case T_LONG :
kamg@551 3157 // 32bit can't do a split move of something like g1 -> O0, O1
kamg@551 3158 // so use a memory temp
kamg@551 3159 if (src.is_single_phys_reg() && wordSize == 4) {
kamg@551 3160 Register tmp = L2;
kamg@551 3161 if (dst.first()->is_reg() &&
kamg@551 3162 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
kamg@551 3163 tmp = dst.first()->as_Register();
kamg@551 3164 }
kamg@551 3165
kamg@551 3166 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
kamg@551 3167 if (Assembler::is_simm13(off)) {
kamg@551 3168 __ stx(src.first()->as_Register(), SP, off);
kamg@551 3169 __ ld_long(SP, off, tmp);
kamg@551 3170 } else {
kamg@551 3171 if (conversion_off == noreg) {
kamg@551 3172 __ set(off, L6);
kamg@551 3173 conversion_off = L6;
kamg@551 3174 }
kamg@551 3175 __ stx(src.first()->as_Register(), SP, conversion_off);
kamg@551 3176 __ ld_long(SP, conversion_off, tmp);
kamg@551 3177 }
kamg@551 3178
kamg@551 3179 if (tmp == L2) {
kamg@551 3180 long_move(masm, reg64_to_VMRegPair(L2), dst);
kamg@551 3181 }
kamg@551 3182 } else {
kamg@551 3183 long_move(masm, src, dst);
kamg@551 3184 }
kamg@551 3185 break;
kamg@551 3186
kamg@551 3187 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
kamg@551 3188
kamg@551 3189 default:
kamg@551 3190 move32_64(masm, src, dst);
kamg@551 3191 }
kamg@551 3192 }
kamg@551 3193
kamg@551 3194
kamg@551 3195 // If we have any strings we must store any register based arg to the stack
kamg@551 3196 // This includes any still live xmm registers too.
kamg@551 3197
kamg@551 3198 if (total_strings > 0 ) {
kamg@551 3199
kamg@551 3200 // protect all the arg registers
kamg@551 3201 __ save_frame(0);
kamg@551 3202 __ mov(G2_thread, L7_thread_cache);
kamg@551 3203 const Register L2_string_off = L2;
kamg@551 3204
kamg@551 3205 // Get first string offset
kamg@551 3206 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
kamg@551 3207
kamg@551 3208 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
kamg@551 3209 if (out_sig_bt[c_arg] == T_ADDRESS) {
kamg@551 3210
kamg@551 3211 VMRegPair dst = out_regs[c_arg];
kamg@551 3212 const Register d = dst.first()->is_reg() ?
kamg@551 3213 dst.first()->as_Register()->after_save() : noreg;
kamg@551 3214
kamg@551 3215 // It's a string the oop and it was already copied to the out arg
kamg@551 3216 // position
kamg@551 3217 if (d != noreg) {
kamg@551 3218 __ mov(d, O0);
kamg@551 3219 } else {
kamg@551 3220 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
kamg@551 3221 "must be");
kamg@551 3222 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
kamg@551 3223 }
kamg@551 3224 Label skip;
kamg@551 3225
kamg@551 3226 __ br_null(O0, false, Assembler::pn, skip);
kamg@551 3227 __ delayed()->add(FP, L2_string_off, O1);
kamg@551 3228
kamg@551 3229 if (d != noreg) {
kamg@551 3230 __ mov(O1, d);
kamg@551 3231 } else {
kamg@551 3232 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
kamg@551 3233 "must be");
kamg@551 3234 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
kamg@551 3235 }
kamg@551 3236
kamg@551 3237 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
kamg@551 3238 relocInfo::runtime_call_type);
kamg@551 3239 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
kamg@551 3240
kamg@551 3241 __ bind(skip);
kamg@551 3242
kamg@551 3243 }
kamg@551 3244
kamg@551 3245 }
kamg@551 3246 __ mov(L7_thread_cache, G2_thread);
kamg@551 3247 __ restore();
kamg@551 3248
kamg@551 3249 }
kamg@551 3250
kamg@551 3251
kamg@551 3252 // Ok now we are done. Need to place the nop that dtrace wants in order to
kamg@551 3253 // patch in the trap
kamg@551 3254
kamg@551 3255 int patch_offset = ((intptr_t)__ pc()) - start;
kamg@551 3256
kamg@551 3257 __ nop();
kamg@551 3258
kamg@551 3259
kamg@551 3260 // Return
kamg@551 3261
kamg@551 3262 __ ret();
kamg@551 3263 __ delayed()->restore();
kamg@551 3264
kamg@551 3265 __ flush();
kamg@551 3266
kamg@551 3267 nmethod *nm = nmethod::new_dtrace_nmethod(
kamg@551 3268 method, masm->code(), vep_offset, patch_offset, frame_complete,
kamg@551 3269 stack_slots / VMRegImpl::slots_per_word);
kamg@551 3270 return nm;
kamg@551 3271
kamg@551 3272 }
kamg@551 3273
kamg@551 3274 #endif // HAVE_DTRACE_H
kamg@551 3275
duke@435 3276 // this function returns the adjust size (in number of words) to a c2i adapter
duke@435 3277 // activation for use during deoptimization
duke@435 3278 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
duke@435 3279 assert(callee_locals >= callee_parameters,
duke@435 3280 "test and remove; got more parms than locals");
duke@435 3281 if (callee_locals < callee_parameters)
duke@435 3282 return 0; // No adjustment for negative locals
twisti@1861 3283 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
duke@435 3284 return round_to(diff, WordsPerLong);
duke@435 3285 }
duke@435 3286
duke@435 3287 // "Top of Stack" slots that may be unused by the calling convention but must
duke@435 3288 // otherwise be preserved.
duke@435 3289 // On Intel these are not necessary and the value can be zero.
duke@435 3290 // On Sparc this describes the words reserved for storing a register window
duke@435 3291 // when an interrupt occurs.
duke@435 3292 uint SharedRuntime::out_preserve_stack_slots() {
duke@435 3293 return frame::register_save_words * VMRegImpl::slots_per_word;
duke@435 3294 }
duke@435 3295
duke@435 3296 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
duke@435 3297 //
duke@435 3298 // Common out the new frame generation for deopt and uncommon trap
duke@435 3299 //
duke@435 3300 Register G3pcs = G3_scratch; // Array of new pcs (input)
duke@435 3301 Register Oreturn0 = O0;
duke@435 3302 Register Oreturn1 = O1;
duke@435 3303 Register O2UnrollBlock = O2;
duke@435 3304 Register O3array = O3; // Array of frame sizes (input)
duke@435 3305 Register O4array_size = O4; // number of frames (input)
duke@435 3306 Register O7frame_size = O7; // number of frames (input)
duke@435 3307
duke@435 3308 __ ld_ptr(O3array, 0, O7frame_size);
duke@435 3309 __ sub(G0, O7frame_size, O7frame_size);
duke@435 3310 __ save(SP, O7frame_size, SP);
duke@435 3311 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
duke@435 3312
duke@435 3313 #ifdef ASSERT
duke@435 3314 // make sure that the frames are aligned properly
duke@435 3315 #ifndef _LP64
duke@435 3316 __ btst(wordSize*2-1, SP);
coleenp@3627 3317 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
duke@435 3318 #endif
duke@435 3319 #endif
duke@435 3320
duke@435 3321 // Deopt needs to pass some extra live values from frame to frame
duke@435 3322
duke@435 3323 if (deopt) {
duke@435 3324 __ mov(Oreturn0->after_save(), Oreturn0);
duke@435 3325 __ mov(Oreturn1->after_save(), Oreturn1);
duke@435 3326 }
duke@435 3327
duke@435 3328 __ mov(O4array_size->after_save(), O4array_size);
duke@435 3329 __ sub(O4array_size, 1, O4array_size);
duke@435 3330 __ mov(O3array->after_save(), O3array);
duke@435 3331 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
duke@435 3332 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
duke@435 3333
duke@435 3334 #ifdef ASSERT
duke@435 3335 // trash registers to show a clear pattern in backtraces
duke@435 3336 __ set(0xDEAD0000, I0);
duke@435 3337 __ add(I0, 2, I1);
duke@435 3338 __ add(I0, 4, I2);
duke@435 3339 __ add(I0, 6, I3);
duke@435 3340 __ add(I0, 8, I4);
duke@435 3341 // Don't touch I5 could have valuable savedSP
duke@435 3342 __ set(0xDEADBEEF, L0);
duke@435 3343 __ mov(L0, L1);
duke@435 3344 __ mov(L0, L2);
duke@435 3345 __ mov(L0, L3);
duke@435 3346 __ mov(L0, L4);
duke@435 3347 __ mov(L0, L5);
duke@435 3348
duke@435 3349 // trash the return value as there is nothing to return yet
duke@435 3350 __ set(0xDEAD0001, O7);
duke@435 3351 #endif
duke@435 3352
duke@435 3353 __ mov(SP, O5_savedSP);
duke@435 3354 }
duke@435 3355
duke@435 3356
duke@435 3357 static void make_new_frames(MacroAssembler* masm, bool deopt) {
duke@435 3358 //
duke@435 3359 // loop through the UnrollBlock info and create new frames
duke@435 3360 //
duke@435 3361 Register G3pcs = G3_scratch;
duke@435 3362 Register Oreturn0 = O0;
duke@435 3363 Register Oreturn1 = O1;
duke@435 3364 Register O2UnrollBlock = O2;
duke@435 3365 Register O3array = O3;
duke@435 3366 Register O4array_size = O4;
duke@435 3367 Label loop;
duke@435 3368
duke@435 3369 // Before we make new frames, check to see if stack is available.
duke@435 3370 // Do this after the caller's return address is on top of stack
duke@435 3371 if (UseStackBanging) {
duke@435 3372 // Get total frame size for interpreted frames
twisti@1162 3373 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
duke@435 3374 __ bang_stack_size(O4, O3, G3_scratch);
duke@435 3375 }
duke@435 3376
twisti@1162 3377 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
twisti@1162 3378 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
twisti@1162 3379 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
duke@435 3380
duke@435 3381 // Adjust old interpreter frame to make space for new frame's extra java locals
duke@435 3382 //
duke@435 3383 // We capture the original sp for the transition frame only because it is needed in
duke@435 3384 // order to properly calculate interpreter_sp_adjustment. Even though in real life
duke@435 3385 // every interpreter frame captures a savedSP it is only needed at the transition
duke@435 3386 // (fortunately). If we had to have it correct everywhere then we would need to
duke@435 3387 // be told the sp_adjustment for each frame we create. If the frame size array
duke@435 3388 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
duke@435 3389 // for each frame we create and keep up the illusion every where.
duke@435 3390 //
duke@435 3391
twisti@1162 3392 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
duke@435 3393 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
duke@435 3394 __ sub(SP, O7, SP);
duke@435 3395
duke@435 3396 #ifdef ASSERT
duke@435 3397 // make sure that there is at least one entry in the array
duke@435 3398 __ tst(O4array_size);
coleenp@3627 3399 __ breakpoint_trap(Assembler::zero, Assembler::icc);
duke@435 3400 #endif
duke@435 3401
duke@435 3402 // Now push the new interpreter frames
duke@435 3403 __ bind(loop);
duke@435 3404
duke@435 3405 // allocate a new frame, filling the registers
duke@435 3406
duke@435 3407 gen_new_frame(masm, deopt); // allocate an interpreter frame
duke@435 3408
kvn@3037 3409 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
duke@435 3410 __ delayed()->add(O3array, wordSize, O3array);
duke@435 3411 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
duke@435 3412
duke@435 3413 }
duke@435 3414
duke@435 3415 //------------------------------generate_deopt_blob----------------------------
duke@435 3416 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
duke@435 3417 // instead.
duke@435 3418 void SharedRuntime::generate_deopt_blob() {
duke@435 3419 // allocate space for the code
duke@435 3420 ResourceMark rm;
duke@435 3421 // setup code generation tools
duke@435 3422 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
kvn@3582 3423 if (UseStackBanging) {
kvn@3582 3424 pad += StackShadowPages*16 + 32;
kvn@3582 3425 }
duke@435 3426 #ifdef _LP64
duke@435 3427 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
duke@435 3428 #else
duke@435 3429 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
duke@435 3430 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
duke@435 3431 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
duke@435 3432 #endif /* _LP64 */
duke@435 3433 MacroAssembler* masm = new MacroAssembler(&buffer);
duke@435 3434 FloatRegister Freturn0 = F0;
duke@435 3435 Register Greturn1 = G1;
duke@435 3436 Register Oreturn0 = O0;
duke@435 3437 Register Oreturn1 = O1;
duke@435 3438 Register O2UnrollBlock = O2;
never@1472 3439 Register L0deopt_mode = L0;
never@1472 3440 Register G4deopt_mode = G4_scratch;
duke@435 3441 int frame_size_words;
twisti@1162 3442 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
duke@435 3443 #if !defined(_LP64) && defined(COMPILER2)
twisti@1162 3444 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
duke@435 3445 #endif
duke@435 3446 Label cont;
duke@435 3447
duke@435 3448 OopMapSet *oop_maps = new OopMapSet();
duke@435 3449
duke@435 3450 //
duke@435 3451 // This is the entry point for code which is returning to a de-optimized
duke@435 3452 // frame.
duke@435 3453 // The steps taken by this frame are as follows:
duke@435 3454 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
duke@435 3455 // and all potentially live registers (at a pollpoint many registers can be live).
duke@435 3456 //
duke@435 3457 // - call the C routine: Deoptimization::fetch_unroll_info (this function
duke@435 3458 // returns information about the number and size of interpreter frames
duke@435 3459 // which are equivalent to the frame which is being deoptimized)
duke@435 3460 // - deallocate the unpack frame, restoring only results values. Other
duke@435 3461 // volatile registers will now be captured in the vframeArray as needed.
duke@435 3462 // - deallocate the deoptimization frame
duke@435 3463 // - in a loop using the information returned in the previous step
duke@435 3464 // push new interpreter frames (take care to propagate the return
duke@435 3465 // values through each new frame pushed)
duke@435 3466 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
duke@435 3467 // - call the C routine: Deoptimization::unpack_frames (this function
duke@435 3468 // lays out values on the interpreter frame which was just created)
duke@435 3469 // - deallocate the dummy unpack_frame
duke@435 3470 // - ensure that all the return values are correctly set and then do
duke@435 3471 // a return to the interpreter entry point
duke@435 3472 //
duke@435 3473 // Refer to the following methods for more information:
duke@435 3474 // - Deoptimization::fetch_unroll_info
duke@435 3475 // - Deoptimization::unpack_frames
duke@435 3476
duke@435 3477 OopMap* map = NULL;
duke@435 3478
duke@435 3479 int start = __ offset();
duke@435 3480
duke@435 3481 // restore G2, the trampoline destroyed it
duke@435 3482 __ get_thread();
duke@435 3483
duke@435 3484 // On entry we have been called by the deoptimized nmethod with a call that
duke@435 3485 // replaced the original call (or safepoint polling location) so the deoptimizing
duke@435 3486 // pc is now in O7. Return values are still in the expected places
duke@435 3487
duke@435 3488 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
kvn@3037 3489 __ ba(cont);
never@1472 3490 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
duke@435 3491
duke@435 3492 int exception_offset = __ offset() - start;
duke@435 3493
duke@435 3494 // restore G2, the trampoline destroyed it
duke@435 3495 __ get_thread();
duke@435 3496
duke@435 3497 // On entry we have been jumped to by the exception handler (or exception_blob
duke@435 3498 // for server). O0 contains the exception oop and O7 contains the original
duke@435 3499 // exception pc. So if we push a frame here it will look to the
duke@435 3500 // stack walking code (fetch_unroll_info) just like a normal call so
duke@435 3501 // state will be extracted normally.
duke@435 3502
duke@435 3503 // save exception oop in JavaThread and fall through into the
duke@435 3504 // exception_in_tls case since they are handled in same way except
duke@435 3505 // for where the pending exception is kept.
twisti@1162 3506 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
duke@435 3507
duke@435 3508 //
duke@435 3509 // Vanilla deoptimization with an exception pending in exception_oop
duke@435 3510 //
duke@435 3511 int exception_in_tls_offset = __ offset() - start;
duke@435 3512
duke@435 3513 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
duke@435 3514 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
duke@435 3515
duke@435 3516 // Restore G2_thread
duke@435 3517 __ get_thread();
duke@435 3518
duke@435 3519 #ifdef ASSERT
duke@435 3520 {
duke@435 3521 // verify that there is really an exception oop in exception_oop
duke@435 3522 Label has_exception;
twisti@1162 3523 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
kvn@3037 3524 __ br_notnull_short(Oexception, Assembler::pt, has_exception);
duke@435 3525 __ stop("no exception in thread");
duke@435 3526 __ bind(has_exception);
duke@435 3527
duke@435 3528 // verify that there is no pending exception
duke@435 3529 Label no_pending_exception;
twisti@1162 3530 Address exception_addr(G2_thread, Thread::pending_exception_offset());
duke@435 3531 __ ld_ptr(exception_addr, Oexception);
kvn@3037 3532 __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
duke@435 3533 __ stop("must not have pending exception here");
duke@435 3534 __ bind(no_pending_exception);
duke@435 3535 }
duke@435 3536 #endif
duke@435 3537
kvn@3037 3538 __ ba(cont);
never@1472 3539 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
duke@435 3540
duke@435 3541 //
duke@435 3542 // Reexecute entry, similar to c2 uncommon trap
duke@435 3543 //
duke@435 3544 int reexecute_offset = __ offset() - start;
duke@435 3545
duke@435 3546 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
duke@435 3547 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
duke@435 3548
never@1472 3549 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
duke@435 3550
duke@435 3551 __ bind(cont);
duke@435 3552
duke@435 3553 __ set_last_Java_frame(SP, noreg);
duke@435 3554
duke@435 3555 // do the call by hand so we can get the oopmap
duke@435 3556
duke@435 3557 __ mov(G2_thread, L7_thread_cache);
duke@435 3558 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
duke@435 3559 __ delayed()->mov(G2_thread, O0);
duke@435 3560
duke@435 3561 // Set an oopmap for the call site this describes all our saved volatile registers
duke@435 3562
duke@435 3563 oop_maps->add_gc_map( __ offset()-start, map);
duke@435 3564
duke@435 3565 __ mov(L7_thread_cache, G2_thread);
duke@435 3566
duke@435 3567 __ reset_last_Java_frame();
duke@435 3568
duke@435 3569 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
duke@435 3570 // so this move will survive
duke@435 3571
never@1472 3572 __ mov(L0deopt_mode, G4deopt_mode);
duke@435 3573
duke@435 3574 __ mov(O0, O2UnrollBlock->after_save());
duke@435 3575
duke@435 3576 RegisterSaver::restore_result_registers(masm);
duke@435 3577
duke@435 3578 Label noException;
kvn@3037 3579 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
duke@435 3580
duke@435 3581 // Move the pending exception from exception_oop to Oexception so
duke@435 3582 // the pending exception will be picked up the interpreter.
duke@435 3583 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
duke@435 3584 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
duke@435 3585 __ bind(noException);
duke@435 3586
duke@435 3587 // deallocate the deoptimization frame taking care to preserve the return values
duke@435 3588 __ mov(Oreturn0, Oreturn0->after_save());
duke@435 3589 __ mov(Oreturn1, Oreturn1->after_save());
duke@435 3590 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
duke@435 3591 __ restore();
duke@435 3592
duke@435 3593 // Allocate new interpreter frame(s) and possible c2i adapter frame
duke@435 3594
duke@435 3595 make_new_frames(masm, true);
duke@435 3596
duke@435 3597 // push a dummy "unpack_frame" taking care of float return values and
duke@435 3598 // call Deoptimization::unpack_frames to have the unpacker layout
duke@435 3599 // information in the interpreter frames just created and then return
duke@435 3600 // to the interpreter entry point
duke@435 3601 __ save(SP, -frame_size_words*wordSize, SP);
duke@435 3602 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
duke@435 3603 #if !defined(_LP64)
duke@435 3604 #if defined(COMPILER2)
iveresov@2138 3605 // 32-bit 1-register longs return longs in G1
iveresov@2138 3606 __ stx(Greturn1, saved_Greturn1_addr);
duke@435 3607 #endif
duke@435 3608 __ set_last_Java_frame(SP, noreg);
never@1472 3609 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
duke@435 3610 #else
duke@435 3611 // LP64 uses g4 in set_last_Java_frame
never@1472 3612 __ mov(G4deopt_mode, O1);
duke@435 3613 __ set_last_Java_frame(SP, G0);
duke@435 3614 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
duke@435 3615 #endif
duke@435 3616 __ reset_last_Java_frame();
duke@435 3617 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
duke@435 3618
duke@435 3619 #if !defined(_LP64) && defined(COMPILER2)
duke@435 3620 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
iveresov@2138 3621 // I0/I1 if the return value is long.
iveresov@2138 3622 Label not_long;
kvn@3037 3623 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
iveresov@2138 3624 __ ldd(saved_Greturn1_addr,I0);
iveresov@2138 3625 __ bind(not_long);
duke@435 3626 #endif
duke@435 3627 __ ret();
duke@435 3628 __ delayed()->restore();
duke@435 3629
duke@435 3630 masm->flush();
duke@435 3631 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
duke@435 3632 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
duke@435 3633 }
duke@435 3634
duke@435 3635 #ifdef COMPILER2
duke@435 3636
duke@435 3637 //------------------------------generate_uncommon_trap_blob--------------------
duke@435 3638 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
duke@435 3639 // instead.
duke@435 3640 void SharedRuntime::generate_uncommon_trap_blob() {
duke@435 3641 // allocate space for the code
duke@435 3642 ResourceMark rm;
duke@435 3643 // setup code generation tools
duke@435 3644 int pad = VerifyThread ? 512 : 0;
kvn@3582 3645 if (UseStackBanging) {
kvn@3582 3646 pad += StackShadowPages*16 + 32;
kvn@3582 3647 }
duke@435 3648 #ifdef _LP64
duke@435 3649 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
duke@435 3650 #else
duke@435 3651 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
duke@435 3652 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
duke@435 3653 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
duke@435 3654 #endif
duke@435 3655 MacroAssembler* masm = new MacroAssembler(&buffer);
duke@435 3656 Register O2UnrollBlock = O2;
duke@435 3657 Register O2klass_index = O2;
duke@435 3658
duke@435 3659 //
duke@435 3660 // This is the entry point for all traps the compiler takes when it thinks
duke@435 3661 // it cannot handle further execution of compilation code. The frame is
duke@435 3662 // deoptimized in these cases and converted into interpreter frames for
duke@435 3663 // execution
duke@435 3664 // The steps taken by this frame are as follows:
duke@435 3665 // - push a fake "unpack_frame"
duke@435 3666 // - call the C routine Deoptimization::uncommon_trap (this function
duke@435 3667 // packs the current compiled frame into vframe arrays and returns
duke@435 3668 // information about the number and size of interpreter frames which
duke@435 3669 // are equivalent to the frame which is being deoptimized)
duke@435 3670 // - deallocate the "unpack_frame"
duke@435 3671 // - deallocate the deoptimization frame
duke@435 3672 // - in a loop using the information returned in the previous step
duke@435 3673 // push interpreter frames;
duke@435 3674 // - create a dummy "unpack_frame"
duke@435 3675 // - call the C routine: Deoptimization::unpack_frames (this function
duke@435 3676 // lays out values on the interpreter frame which was just created)
duke@435 3677 // - deallocate the dummy unpack_frame
duke@435 3678 // - return to the interpreter entry point
duke@435 3679 //
duke@435 3680 // Refer to the following methods for more information:
duke@435 3681 // - Deoptimization::uncommon_trap
duke@435 3682 // - Deoptimization::unpack_frame
duke@435 3683
duke@435 3684 // the unloaded class index is in O0 (first parameter to this blob)
duke@435 3685
duke@435 3686 // push a dummy "unpack_frame"
duke@435 3687 // and call Deoptimization::uncommon_trap to pack the compiled frame into
duke@435 3688 // vframe array and return the UnrollBlock information
duke@435 3689 __ save_frame(0);
duke@435 3690 __ set_last_Java_frame(SP, noreg);
duke@435 3691 __ mov(I0, O2klass_index);
duke@435 3692 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
duke@435 3693 __ reset_last_Java_frame();
duke@435 3694 __ mov(O0, O2UnrollBlock->after_save());
duke@435 3695 __ restore();
duke@435 3696
duke@435 3697 // deallocate the deoptimized frame taking care to preserve the return values
duke@435 3698 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
duke@435 3699 __ restore();
duke@435 3700
duke@435 3701 // Allocate new interpreter frame(s) and possible c2i adapter frame
duke@435 3702
duke@435 3703 make_new_frames(masm, false);
duke@435 3704
duke@435 3705 // push a dummy "unpack_frame" taking care of float return values and
duke@435 3706 // call Deoptimization::unpack_frames to have the unpacker layout
duke@435 3707 // information in the interpreter frames just created and then return
duke@435 3708 // to the interpreter entry point
duke@435 3709 __ save_frame(0);
duke@435 3710 __ set_last_Java_frame(SP, noreg);
duke@435 3711 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
duke@435 3712 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
duke@435 3713 __ reset_last_Java_frame();
duke@435 3714 __ ret();
duke@435 3715 __ delayed()->restore();
duke@435 3716
duke@435 3717 masm->flush();
duke@435 3718 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
duke@435 3719 }
duke@435 3720
duke@435 3721 #endif // COMPILER2
duke@435 3722
duke@435 3723 //------------------------------generate_handler_blob-------------------
duke@435 3724 //
duke@435 3725 // Generate a special Compile2Runtime blob that saves all registers, and sets
duke@435 3726 // up an OopMap.
duke@435 3727 //
duke@435 3728 // This blob is jumped to (via a breakpoint and the signal handler) from a
duke@435 3729 // safepoint in compiled code. On entry to this blob, O7 contains the
duke@435 3730 // address in the original nmethod at which we should resume normal execution.
duke@435 3731 // Thus, this blob looks like a subroutine which must preserve lots of
duke@435 3732 // registers and return normally. Note that O7 is never register-allocated,
duke@435 3733 // so it is guaranteed to be free here.
duke@435 3734 //
duke@435 3735
duke@435 3736 // The hardest part of what this blob must do is to save the 64-bit %o
duke@435 3737 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
duke@435 3738 // an interrupt will chop off their heads. Making space in the caller's frame
duke@435 3739 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
duke@435 3740 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
duke@435 3741 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
duke@435 3742 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
duke@435 3743 // Tricky, tricky, tricky...
duke@435 3744
kvn@4103 3745 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
duke@435 3746 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
duke@435 3747
duke@435 3748 // allocate space for the code
duke@435 3749 ResourceMark rm;
duke@435 3750 // setup code generation tools
duke@435 3751 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
duke@435 3752 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
duke@435 3753 // even larger with TraceJumps
duke@435 3754 int pad = TraceJumps ? 512 : 0;
duke@435 3755 CodeBuffer buffer("handler_blob", 1600 + pad, 512);
duke@435 3756 MacroAssembler* masm = new MacroAssembler(&buffer);
duke@435 3757 int frame_size_words;
duke@435 3758 OopMapSet *oop_maps = new OopMapSet();
duke@435 3759 OopMap* map = NULL;
duke@435 3760
duke@435 3761 int start = __ offset();
duke@435 3762
kvn@4103 3763 bool cause_return = (poll_type == POLL_AT_RETURN);
duke@435 3764 // If this causes a return before the processing, then do a "restore"
duke@435 3765 if (cause_return) {
duke@435 3766 __ restore();
duke@435 3767 } else {
duke@435 3768 // Make it look like we were called via the poll
duke@435 3769 // so that frame constructor always sees a valid return address
duke@435 3770 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
duke@435 3771 __ sub(O7, frame::pc_return_offset, O7);
duke@435 3772 }
duke@435 3773
duke@435 3774 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
duke@435 3775
duke@435 3776 // setup last_Java_sp (blows G4)
duke@435 3777 __ set_last_Java_frame(SP, noreg);
duke@435 3778
duke@435 3779 // call into the runtime to handle illegal instructions exception
duke@435 3780 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
duke@435 3781 __ mov(G2_thread, O0);
duke@435 3782 __ save_thread(L7_thread_cache);
duke@435 3783 __ call(call_ptr);
duke@435 3784 __ delayed()->nop();
duke@435 3785
duke@435 3786 // Set an oopmap for the call site.
duke@435 3787 // We need this not only for callee-saved registers, but also for volatile
duke@435 3788 // registers that the compiler might be keeping live across a safepoint.
duke@435 3789
duke@435 3790 oop_maps->add_gc_map( __ offset() - start, map);
duke@435 3791
duke@435 3792 __ restore_thread(L7_thread_cache);
duke@435 3793 // clear last_Java_sp
duke@435 3794 __ reset_last_Java_frame();
duke@435 3795
duke@435 3796 // Check for exceptions
duke@435 3797 Label pending;
duke@435 3798
duke@435 3799 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
kvn@3037 3800 __ br_notnull_short(O1, Assembler::pn, pending);
duke@435 3801
duke@435 3802 RegisterSaver::restore_live_registers(masm);
duke@435 3803
duke@435 3804 // We are back the the original state on entry and ready to go.
duke@435 3805
duke@435 3806 __ retl();
duke@435 3807 __ delayed()->nop();
duke@435 3808
duke@435 3809 // Pending exception after the safepoint
duke@435 3810
duke@435 3811 __ bind(pending);
duke@435 3812
duke@435 3813 RegisterSaver::restore_live_registers(masm);
duke@435 3814
duke@435 3815 // We are back the the original state on entry.
duke@435 3816
duke@435 3817 // Tail-call forward_exception_entry, with the issuing PC in O7,
duke@435 3818 // so it looks like the original nmethod called forward_exception_entry.
duke@435 3819 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
duke@435 3820 __ JMP(O0, 0);
duke@435 3821 __ delayed()->nop();
duke@435 3822
duke@435 3823 // -------------
duke@435 3824 // make sure all code is generated
duke@435 3825 masm->flush();
duke@435 3826
duke@435 3827 // return exception blob
duke@435 3828 return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
duke@435 3829 }
duke@435 3830
duke@435 3831 //
duke@435 3832 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
duke@435 3833 //
duke@435 3834 // Generate a stub that calls into vm to find out the proper destination
duke@435 3835 // of a java call. All the argument registers are live at this point
duke@435 3836 // but since this is generic code we don't know what they are and the caller
duke@435 3837 // must do any gc of the args.
duke@435 3838 //
never@2950 3839 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
duke@435 3840 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
duke@435 3841
duke@435 3842 // allocate space for the code
duke@435 3843 ResourceMark rm;
duke@435 3844 // setup code generation tools
duke@435 3845 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
duke@435 3846 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
duke@435 3847 // even larger with TraceJumps
duke@435 3848 int pad = TraceJumps ? 512 : 0;
duke@435 3849 CodeBuffer buffer(name, 1600 + pad, 512);
duke@435 3850 MacroAssembler* masm = new MacroAssembler(&buffer);
duke@435 3851 int frame_size_words;
duke@435 3852 OopMapSet *oop_maps = new OopMapSet();
duke@435 3853 OopMap* map = NULL;
duke@435 3854
duke@435 3855 int start = __ offset();
duke@435 3856
duke@435 3857 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
duke@435 3858
duke@435 3859 int frame_complete = __ offset();
duke@435 3860
duke@435 3861 // setup last_Java_sp (blows G4)
duke@435 3862 __ set_last_Java_frame(SP, noreg);
duke@435 3863
duke@435 3864 // call into the runtime to handle illegal instructions exception
duke@435 3865 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
duke@435 3866 __ mov(G2_thread, O0);
duke@435 3867 __ save_thread(L7_thread_cache);
duke@435 3868 __ call(destination, relocInfo::runtime_call_type);
duke@435 3869 __ delayed()->nop();
duke@435 3870
duke@435 3871 // O0 contains the address we are going to jump to assuming no exception got installed
duke@435 3872
duke@435 3873 // Set an oopmap for the call site.
duke@435 3874 // We need this not only for callee-saved registers, but also for volatile
duke@435 3875 // registers that the compiler might be keeping live across a safepoint.
duke@435 3876
duke@435 3877 oop_maps->add_gc_map( __ offset() - start, map);
duke@435 3878
duke@435 3879 __ restore_thread(L7_thread_cache);
duke@435 3880 // clear last_Java_sp
duke@435 3881 __ reset_last_Java_frame();
duke@435 3882
duke@435 3883 // Check for exceptions
duke@435 3884 Label pending;
duke@435 3885
duke@435 3886 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
kvn@3037 3887 __ br_notnull_short(O1, Assembler::pn, pending);
duke@435 3888
coleenp@4037 3889 // get the returned Method*
coleenp@4037 3890
coleenp@4037 3891 __ get_vm_result_2(G5_method);
duke@435 3892 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
duke@435 3893
duke@435 3894 // O0 is where we want to jump, overwrite G3 which is saved and scratch
duke@435 3895
duke@435 3896 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
duke@435 3897
duke@435 3898 RegisterSaver::restore_live_registers(masm);
duke@435 3899
duke@435 3900 // We are back the the original state on entry and ready to go.
duke@435 3901
duke@435 3902 __ JMP(G3, 0);
duke@435 3903 __ delayed()->nop();
duke@435 3904
duke@435 3905 // Pending exception after the safepoint
duke@435 3906
duke@435 3907 __ bind(pending);
duke@435 3908
duke@435 3909 RegisterSaver::restore_live_registers(masm);
duke@435 3910
duke@435 3911 // We are back the the original state on entry.
duke@435 3912
duke@435 3913 // Tail-call forward_exception_entry, with the issuing PC in O7,
duke@435 3914 // so it looks like the original nmethod called forward_exception_entry.
duke@435 3915 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
duke@435 3916 __ JMP(O0, 0);
duke@435 3917 __ delayed()->nop();
duke@435 3918
duke@435 3919 // -------------
duke@435 3920 // make sure all code is generated
duke@435 3921 masm->flush();
duke@435 3922
duke@435 3923 // return the blob
duke@435 3924 // frame_size_words or bytes??
duke@435 3925 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
duke@435 3926 }

mercurial