src/cpu/sparc/vm/templateInterpreter_sparc.cpp

Tue, 24 Dec 2013 11:48:39 -0800

author
mikael
date
Tue, 24 Dec 2013 11:48:39 -0800
changeset 6198
55fb97c4c58d
parent 6039
bd3237e0e18d
child 6223
add2caa66e7e
permissions
-rw-r--r--

8029233: Update copyright year to match last edit in jdk8 hotspot repository for 2013
Summary: Copyright year updated for files modified during 2013
Reviewed-by: twisti, iveresov

duke@435 1 /*
jiangli@4936 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
twisti@4323 26 #include "asm/macroAssembler.hpp"
stefank@2314 27 #include "interpreter/bytecodeHistogram.hpp"
stefank@2314 28 #include "interpreter/interpreter.hpp"
stefank@2314 29 #include "interpreter/interpreterGenerator.hpp"
stefank@2314 30 #include "interpreter/interpreterRuntime.hpp"
stefank@2314 31 #include "interpreter/templateTable.hpp"
stefank@2314 32 #include "oops/arrayOop.hpp"
coleenp@4037 33 #include "oops/methodData.hpp"
coleenp@4037 34 #include "oops/method.hpp"
stefank@2314 35 #include "oops/oop.inline.hpp"
stefank@2314 36 #include "prims/jvmtiExport.hpp"
stefank@2314 37 #include "prims/jvmtiThreadState.hpp"
stefank@2314 38 #include "runtime/arguments.hpp"
stefank@2314 39 #include "runtime/deoptimization.hpp"
stefank@2314 40 #include "runtime/frame.inline.hpp"
stefank@2314 41 #include "runtime/sharedRuntime.hpp"
stefank@2314 42 #include "runtime/stubRoutines.hpp"
stefank@2314 43 #include "runtime/synchronizer.hpp"
stefank@2314 44 #include "runtime/timer.hpp"
stefank@2314 45 #include "runtime/vframeArray.hpp"
stefank@2314 46 #include "utilities/debug.hpp"
jprovino@4542 47 #include "utilities/macros.hpp"
duke@435 48
duke@435 49 #ifndef CC_INTERP
duke@435 50 #ifndef FAST_DISPATCH
duke@435 51 #define FAST_DISPATCH 1
duke@435 52 #endif
duke@435 53 #undef FAST_DISPATCH
duke@435 54
duke@435 55
duke@435 56 // Generation of Interpreter
duke@435 57 //
duke@435 58 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
duke@435 59
duke@435 60
duke@435 61 #define __ _masm->
duke@435 62
duke@435 63
duke@435 64 //----------------------------------------------------------------------------------------------------
duke@435 65
duke@435 66
duke@435 67 void InterpreterGenerator::save_native_result(void) {
duke@435 68 // result potentially in O0/O1: save it across calls
duke@435 69 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
duke@435 70
duke@435 71 // result potentially in F0/F1: save it across calls
duke@435 72 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
duke@435 73
duke@435 74 // save and restore any potential method result value around the unlocking operation
duke@435 75 __ stf(FloatRegisterImpl::D, F0, d_tmp);
duke@435 76 #ifdef _LP64
duke@435 77 __ stx(O0, l_tmp);
duke@435 78 #else
duke@435 79 __ std(O0, l_tmp);
duke@435 80 #endif
duke@435 81 }
duke@435 82
duke@435 83 void InterpreterGenerator::restore_native_result(void) {
duke@435 84 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
duke@435 85 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
duke@435 86
duke@435 87 // Restore any method result value
duke@435 88 __ ldf(FloatRegisterImpl::D, d_tmp, F0);
duke@435 89 #ifdef _LP64
duke@435 90 __ ldx(l_tmp, O0);
duke@435 91 #else
duke@435 92 __ ldd(l_tmp, O0);
duke@435 93 #endif
duke@435 94 }
duke@435 95
duke@435 96 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
duke@435 97 assert(!pass_oop || message == NULL, "either oop or message but not both");
duke@435 98 address entry = __ pc();
duke@435 99 // expression stack must be empty before entering the VM if an exception happened
duke@435 100 __ empty_expression_stack();
duke@435 101 // load exception object
duke@435 102 __ set((intptr_t)name, G3_scratch);
duke@435 103 if (pass_oop) {
duke@435 104 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
duke@435 105 } else {
duke@435 106 __ set((intptr_t)message, G4_scratch);
duke@435 107 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
duke@435 108 }
duke@435 109 // throw exception
duke@435 110 assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
twisti@1162 111 AddressLiteral thrower(Interpreter::throw_exception_entry());
twisti@1162 112 __ jump_to(thrower, G3_scratch);
duke@435 113 __ delayed()->nop();
duke@435 114 return entry;
duke@435 115 }
duke@435 116
duke@435 117 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
duke@435 118 address entry = __ pc();
duke@435 119 // expression stack must be empty before entering the VM if an exception
duke@435 120 // happened
duke@435 121 __ empty_expression_stack();
duke@435 122 // load exception object
duke@435 123 __ call_VM(Oexception,
duke@435 124 CAST_FROM_FN_PTR(address,
duke@435 125 InterpreterRuntime::throw_ClassCastException),
duke@435 126 Otos_i);
duke@435 127 __ should_not_reach_here();
duke@435 128 return entry;
duke@435 129 }
duke@435 130
duke@435 131
duke@435 132 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
duke@435 133 address entry = __ pc();
duke@435 134 // expression stack must be empty before entering the VM if an exception happened
duke@435 135 __ empty_expression_stack();
duke@435 136 // convention: expect aberrant index in register G3_scratch, then shuffle the
duke@435 137 // index to G4_scratch for the VM call
duke@435 138 __ mov(G3_scratch, G4_scratch);
duke@435 139 __ set((intptr_t)name, G3_scratch);
duke@435 140 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
duke@435 141 __ should_not_reach_here();
duke@435 142 return entry;
duke@435 143 }
duke@435 144
duke@435 145
duke@435 146 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
duke@435 147 address entry = __ pc();
duke@435 148 // expression stack must be empty before entering the VM if an exception happened
duke@435 149 __ empty_expression_stack();
duke@435 150 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
duke@435 151 __ should_not_reach_here();
duke@435 152 return entry;
duke@435 153 }
duke@435 154
duke@435 155
twisti@6039 156 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
twisti@6039 157 address entry = __ pc();
twisti@1858 158
duke@435 159 #if !defined(_LP64) && defined(COMPILER2)
duke@435 160 // All return values are where we want them, except for Longs. C2 returns
duke@435 161 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
duke@435 162 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
duke@435 163 // build even if we are returning from interpreted we just do a little
duke@435 164 // stupid shuffing.
duke@435 165 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
duke@435 166 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
duke@435 167 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
duke@435 168
twisti@6039 169 if (state == ltos) {
twisti@1858 170 __ srl (G1, 0, O1);
twisti@1858 171 __ srlx(G1, 32, O0);
duke@435 172 }
twisti@1858 173 #endif // !_LP64 && COMPILER2
duke@435 174
duke@435 175 // The callee returns with the stack possibly adjusted by adapter transition
duke@435 176 // We remove that possible adjustment here.
duke@435 177 // All interpreter local registers are untouched. Any result is passed back
duke@435 178 // in the O0/O1 or float registers. Before continuing, the arguments must be
duke@435 179 // popped from the java expression stack; i.e., Lesp must be adjusted.
duke@435 180
duke@435 181 __ mov(Llast_SP, SP); // Remove any adapter added stack space.
duke@435 182
duke@435 183 const Register cache = G3_scratch;
twisti@6039 184 const Register index = G1_scratch;
twisti@6039 185 __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
twisti@6039 186
twisti@6039 187 const Register flags = cache;
twisti@6039 188 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
twisti@6039 189 const Register parameter_size = flags;
twisti@6039 190 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words
twisti@6039 191 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes
twisti@6039 192 __ add(Lesp, parameter_size, Lesp); // pop arguments
duke@435 193 __ dispatch_next(state, step);
duke@435 194
duke@435 195 return entry;
duke@435 196 }
duke@435 197
duke@435 198
duke@435 199 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
duke@435 200 address entry = __ pc();
duke@435 201 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
duke@435 202 { Label L;
twisti@1162 203 Address exception_addr(G2_thread, Thread::pending_exception_offset());
twisti@1162 204 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
kvn@3037 205 __ br_null_short(Gtemp, Assembler::pt, L);
duke@435 206 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
duke@435 207 __ should_not_reach_here();
duke@435 208 __ bind(L);
duke@435 209 }
duke@435 210 __ dispatch_next(state, step);
duke@435 211 return entry;
duke@435 212 }
duke@435 213
duke@435 214 // A result handler converts/unboxes a native call result into
duke@435 215 // a java interpreter/compiler result. The current frame is an
duke@435 216 // interpreter frame. The activation frame unwind code must be
duke@435 217 // consistent with that of TemplateTable::_return(...). In the
duke@435 218 // case of native methods, the caller's SP was not modified.
duke@435 219 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
duke@435 220 address entry = __ pc();
duke@435 221 Register Itos_i = Otos_i ->after_save();
duke@435 222 Register Itos_l = Otos_l ->after_save();
duke@435 223 Register Itos_l1 = Otos_l1->after_save();
duke@435 224 Register Itos_l2 = Otos_l2->after_save();
duke@435 225 switch (type) {
duke@435 226 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
duke@435 227 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
duke@435 228 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
duke@435 229 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
duke@435 230 case T_LONG :
duke@435 231 #ifndef _LP64
duke@435 232 __ mov(O1, Itos_l2); // move other half of long
duke@435 233 #endif // ifdef or no ifdef, fall through to the T_INT case
duke@435 234 case T_INT : __ mov(O0, Itos_i); break;
duke@435 235 case T_VOID : /* nothing to do */ break;
duke@435 236 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
duke@435 237 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
duke@435 238 case T_OBJECT :
duke@435 239 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
duke@435 240 __ verify_oop(Itos_i);
duke@435 241 break;
duke@435 242 default : ShouldNotReachHere();
duke@435 243 }
duke@435 244 __ ret(); // return from interpreter activation
duke@435 245 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
twisti@4412 246 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly
duke@435 247 return entry;
duke@435 248 }
duke@435 249
duke@435 250 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
duke@435 251 address entry = __ pc();
duke@435 252 __ push(state);
duke@435 253 __ call_VM(noreg, runtime_entry);
duke@435 254 __ dispatch_via(vtos, Interpreter::normal_table(vtos));
duke@435 255 return entry;
duke@435 256 }
duke@435 257
duke@435 258
duke@435 259 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
duke@435 260 address entry = __ pc();
duke@435 261 __ dispatch_next(state);
duke@435 262 return entry;
duke@435 263 }
duke@435 264
duke@435 265 //
duke@435 266 // Helpers for commoning out cases in the various type of method entries.
duke@435 267 //
duke@435 268
duke@435 269 // increment invocation count & check for overflow
duke@435 270 //
duke@435 271 // Note: checking for negative value instead of overflow
duke@435 272 // so we have a 'sticky' overflow test
duke@435 273 //
duke@435 274 // Lmethod: method
duke@435 275 // ??: invocation counter
duke@435 276 //
duke@435 277 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
jiangli@4936 278 // Note: In tiered we increment either counters in MethodCounters* or in
jiangli@4936 279 // MDO depending if we're profiling or not.
jiangli@4936 280 const Register Rcounters = G3_scratch;
jiangli@4936 281 Label done;
jiangli@4936 282
iveresov@2138 283 if (TieredCompilation) {
iveresov@2138 284 const int increment = InvocationCounter::count_increment;
iveresov@2138 285 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
jiangli@4936 286 Label no_mdo;
iveresov@2138 287 if (ProfileInterpreter) {
iveresov@2138 288 // If no method data exists, go to profile_continue.
coleenp@4037 289 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
kvn@3037 290 __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
iveresov@2138 291 // Increment counter
iveresov@2138 292 Address mdo_invocation_counter(G4_scratch,
coleenp@4037 293 in_bytes(MethodData::invocation_counter_offset()) +
iveresov@2138 294 in_bytes(InvocationCounter::counter_offset()));
iveresov@2138 295 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
iveresov@2138 296 G3_scratch, Lscratch,
iveresov@2138 297 Assembler::zero, overflow);
kvn@3037 298 __ ba_short(done);
iveresov@2138 299 }
iveresov@2138 300
jiangli@4936 301 // Increment counter in MethodCounters*
iveresov@2138 302 __ bind(no_mdo);
jiangli@4936 303 Address invocation_counter(Rcounters,
jiangli@4936 304 in_bytes(MethodCounters::invocation_counter_offset()) +
jiangli@4936 305 in_bytes(InvocationCounter::counter_offset()));
jiangli@4936 306 __ get_method_counters(Lmethod, Rcounters, done);
iveresov@2138 307 __ increment_mask_and_jump(invocation_counter, increment, mask,
jiangli@4936 308 G4_scratch, Lscratch,
iveresov@2138 309 Assembler::zero, overflow);
iveresov@2138 310 __ bind(done);
iveresov@2138 311 } else {
iveresov@2138 312 // Update standard invocation counters
jiangli@4936 313 __ get_method_counters(Lmethod, Rcounters, done);
jiangli@4936 314 __ increment_invocation_counter(Rcounters, O0, G4_scratch);
jiangli@4936 315 if (ProfileInterpreter) {
jiangli@4936 316 Address interpreter_invocation_counter(Rcounters,
jiangli@4936 317 in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
jiangli@4936 318 __ ld(interpreter_invocation_counter, G4_scratch);
jiangli@4936 319 __ inc(G4_scratch);
jiangli@4936 320 __ st(G4_scratch, interpreter_invocation_counter);
iveresov@2138 321 }
iveresov@2138 322
iveresov@2138 323 if (ProfileInterpreter && profile_method != NULL) {
iveresov@2138 324 // Test to see if we should create a method data oop
iveresov@2138 325 AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
iveresov@2138 326 __ load_contents(profile_limit, G3_scratch);
kvn@3037 327 __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
iveresov@2138 328
iveresov@2138 329 // if no method data exists, go to profile_method
iveresov@2138 330 __ test_method_data_pointer(*profile_method);
iveresov@2138 331 }
iveresov@2138 332
iveresov@2138 333 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
iveresov@2138 334 __ load_contents(invocation_limit, G3_scratch);
iveresov@2138 335 __ cmp(O0, G3_scratch);
kvn@3037 336 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
iveresov@2138 337 __ delayed()->nop();
jiangli@4936 338 __ bind(done);
duke@435 339 }
duke@435 340
duke@435 341 }
duke@435 342
duke@435 343 // Allocate monitor and lock method (asm interpreter)
coleenp@4037 344 // ebx - Method*
duke@435 345 //
duke@435 346 void InterpreterGenerator::lock_method(void) {
coleenp@4037 347 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags.
duke@435 348
duke@435 349 #ifdef ASSERT
duke@435 350 { Label ok;
duke@435 351 __ btst(JVM_ACC_SYNCHRONIZED, O0);
duke@435 352 __ br( Assembler::notZero, false, Assembler::pt, ok);
duke@435 353 __ delayed()->nop();
duke@435 354 __ stop("method doesn't need synchronization");
duke@435 355 __ bind(ok);
duke@435 356 }
duke@435 357 #endif // ASSERT
duke@435 358
duke@435 359 // get synchronization object to O0
duke@435 360 { Label done;
stefank@3391 361 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
duke@435 362 __ btst(JVM_ACC_STATIC, O0);
duke@435 363 __ br( Assembler::zero, true, Assembler::pt, done);
duke@435 364 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
duke@435 365
coleenp@4037 366 __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
coleenp@4037 367 __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
coleenp@4037 368 __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
duke@435 369
coleenp@4037 370 // lock the mirror, not the Klass*
duke@435 371 __ ld_ptr( O0, mirror_offset, O0);
duke@435 372
duke@435 373 #ifdef ASSERT
duke@435 374 __ tst(O0);
coleenp@3627 375 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
duke@435 376 #endif // ASSERT
duke@435 377
duke@435 378 __ bind(done);
duke@435 379 }
duke@435 380
duke@435 381 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem
duke@435 382 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object
duke@435 383 // __ untested("lock_object from method entry");
duke@435 384 __ lock_object(Lmonitors, O0);
duke@435 385 }
duke@435 386
duke@435 387
duke@435 388 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
duke@435 389 Register Rscratch,
duke@435 390 Register Rscratch2) {
duke@435 391 const int page_size = os::vm_page_size();
duke@435 392 Label after_frame_check;
duke@435 393
duke@435 394 assert_different_registers(Rframe_size, Rscratch, Rscratch2);
duke@435 395
kvn@3037 396 __ set(page_size, Rscratch);
kvn@3037 397 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
duke@435 398
duke@435 399 // get the stack base, and in debug, verify it is non-zero
twisti@1162 400 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
duke@435 401 #ifdef ASSERT
duke@435 402 Label base_not_zero;
kvn@3037 403 __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
duke@435 404 __ stop("stack base is zero in generate_stack_overflow_check");
duke@435 405 __ bind(base_not_zero);
duke@435 406 #endif
duke@435 407
duke@435 408 // get the stack size, and in debug, verify it is non-zero
duke@435 409 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
twisti@1162 410 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
duke@435 411 #ifdef ASSERT
duke@435 412 Label size_not_zero;
kvn@3037 413 __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
duke@435 414 __ stop("stack size is zero in generate_stack_overflow_check");
duke@435 415 __ bind(size_not_zero);
duke@435 416 #endif
duke@435 417
duke@435 418 // compute the beginning of the protected zone minus the requested frame size
duke@435 419 __ sub( Rscratch, Rscratch2, Rscratch );
duke@435 420 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
duke@435 421 __ add( Rscratch, Rscratch2, Rscratch );
duke@435 422
duke@435 423 // Add in the size of the frame (which is the same as subtracting it from the
duke@435 424 // SP, which would take another register
duke@435 425 __ add( Rscratch, Rframe_size, Rscratch );
duke@435 426
duke@435 427 // the frame is greater than one page in size, so check against
duke@435 428 // the bottom of the stack
kvn@4352 429 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
duke@435 430
bdelsart@3372 431 // the stack will overflow, throw an exception
duke@435 432
bdelsart@3372 433 // Note that SP is restored to sender's sp (in the delay slot). This
bdelsart@3372 434 // is necessary if the sender's frame is an extended compiled frame
bdelsart@3372 435 // (see gen_c2i_adapter()) and safer anyway in case of JSR292
bdelsart@3372 436 // adaptations.
bdelsart@3372 437
bdelsart@3372 438 // Note also that the restored frame is not necessarily interpreted.
bdelsart@3372 439 // Use the shared runtime version of the StackOverflowError.
bdelsart@3372 440 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
bdelsart@3372 441 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
bdelsart@3372 442 __ jump_to(stub, Rscratch);
bdelsart@3372 443 __ delayed()->mov(O5_savedSP, SP);
duke@435 444
duke@435 445 // if you get to here, then there is enough stack space
duke@435 446 __ bind( after_frame_check );
duke@435 447 }
duke@435 448
duke@435 449
duke@435 450 //
duke@435 451 // Generate a fixed interpreter frame. This is identical setup for interpreted
duke@435 452 // methods and for native methods hence the shared code.
duke@435 453
duke@435 454 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
duke@435 455 //
duke@435 456 //
duke@435 457 // The entry code sets up a new interpreter frame in 4 steps:
duke@435 458 //
duke@435 459 // 1) Increase caller's SP by for the extra local space needed:
duke@435 460 // (check for overflow)
duke@435 461 // Efficient implementation of xload/xstore bytecodes requires
duke@435 462 // that arguments and non-argument locals are in a contigously
duke@435 463 // addressable memory block => non-argument locals must be
duke@435 464 // allocated in the caller's frame.
duke@435 465 //
duke@435 466 // 2) Create a new stack frame and register window:
duke@435 467 // The new stack frame must provide space for the standard
duke@435 468 // register save area, the maximum java expression stack size,
duke@435 469 // the monitor slots (0 slots initially), and some frame local
duke@435 470 // scratch locations.
duke@435 471 //
duke@435 472 // 3) The following interpreter activation registers must be setup:
duke@435 473 // Lesp : expression stack pointer
duke@435 474 // Lbcp : bytecode pointer
duke@435 475 // Lmethod : method
duke@435 476 // Llocals : locals pointer
duke@435 477 // Lmonitors : monitor pointer
duke@435 478 // LcpoolCache: constant pool cache
duke@435 479 //
duke@435 480 // 4) Initialize the non-argument locals if necessary:
duke@435 481 // Non-argument locals may need to be initialized to NULL
duke@435 482 // for GC to work. If the oop-map information is accurate
duke@435 483 // (in the absence of the JSR problem), no initialization
duke@435 484 // is necessary.
duke@435 485 //
duke@435 486 // (gri - 2/25/2000)
duke@435 487
duke@435 488
duke@435 489 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
duke@435 490
duke@435 491 const int extra_space =
duke@435 492 rounded_vm_local_words + // frame local scratch space
roland@5225 493 Method::extra_stack_entries() + // extra stack for jsr 292
duke@435 494 frame::memory_parameter_word_sp_offset + // register save area
duke@435 495 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
duke@435 496
duke@435 497 const Register Glocals_size = G3;
jiangli@4338 498 const Register RconstMethod = Glocals_size;
duke@435 499 const Register Otmp1 = O3;
duke@435 500 const Register Otmp2 = O4;
duke@435 501 // Lscratch can't be used as a temporary because the call_stub uses
duke@435 502 // it to assert that the stack frame was setup correctly.
jiangli@4338 503 const Address constMethod (G5_method, Method::const_offset());
jiangli@4338 504 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
duke@435 505
jiangli@4338 506 __ ld_ptr( constMethod, RconstMethod );
duke@435 507 __ lduh( size_of_parameters, Glocals_size);
duke@435 508
duke@435 509 // Gargs points to first local + BytesPerWord
duke@435 510 // Set the saved SP after the register window save
duke@435 511 //
duke@435 512 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
twisti@1861 513 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
duke@435 514 __ add(Gargs, Otmp1, Gargs);
duke@435 515
duke@435 516 if (native_call) {
duke@435 517 __ calc_mem_param_words( Glocals_size, Gframe_size );
duke@435 518 __ add( Gframe_size, extra_space, Gframe_size);
duke@435 519 __ round_to( Gframe_size, WordsPerLong );
duke@435 520 __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
duke@435 521 } else {
duke@435 522
duke@435 523 //
duke@435 524 // Compute number of locals in method apart from incoming parameters
duke@435 525 //
jiangli@4338 526 const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset());
jiangli@4338 527 __ ld_ptr( constMethod, Otmp1 );
duke@435 528 __ lduh( size_of_locals, Otmp1 );
duke@435 529 __ sub( Otmp1, Glocals_size, Glocals_size );
duke@435 530 __ round_to( Glocals_size, WordsPerLong );
twisti@1861 531 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
duke@435 532
duke@435 533 // see if the frame is greater than one page in size. If so,
duke@435 534 // then we need to verify there is enough stack space remaining
duke@435 535 // Frame_size = (max_stack + extra_space) * BytesPerWord;
jiangli@4302 536 __ ld_ptr( constMethod, Gframe_size );
jiangli@4302 537 __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
duke@435 538 __ add( Gframe_size, extra_space, Gframe_size );
duke@435 539 __ round_to( Gframe_size, WordsPerLong );
twisti@1861 540 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
duke@435 541
duke@435 542 // Add in java locals size for stack overflow check only
duke@435 543 __ add( Gframe_size, Glocals_size, Gframe_size );
duke@435 544
duke@435 545 const Register Otmp2 = O4;
duke@435 546 assert_different_registers(Otmp1, Otmp2, O5_savedSP);
duke@435 547 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
duke@435 548
duke@435 549 __ sub( Gframe_size, Glocals_size, Gframe_size);
duke@435 550
duke@435 551 //
duke@435 552 // bump SP to accomodate the extra locals
duke@435 553 //
duke@435 554 __ sub( SP, Glocals_size, SP );
duke@435 555 }
duke@435 556
duke@435 557 //
duke@435 558 // now set up a stack frame with the size computed above
duke@435 559 //
duke@435 560 __ neg( Gframe_size );
duke@435 561 __ save( SP, Gframe_size, SP );
duke@435 562
duke@435 563 //
duke@435 564 // now set up all the local cache registers
duke@435 565 //
duke@435 566 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
duke@435 567 // that all present references to Lbyte_code initialize the register
duke@435 568 // immediately before use
duke@435 569 if (native_call) {
duke@435 570 __ mov(G0, Lbcp);
duke@435 571 } else {
coleenp@4037 572 __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
coleenp@4037 573 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
duke@435 574 }
duke@435 575 __ mov( G5_method, Lmethod); // set Lmethod
duke@435 576 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
duke@435 577 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
duke@435 578 #ifdef _LP64
duke@435 579 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
duke@435 580 #endif
duke@435 581 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
duke@435 582
duke@435 583 // setup interpreter activation registers
duke@435 584 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals
duke@435 585
duke@435 586 if (ProfileInterpreter) {
duke@435 587 #ifdef FAST_DISPATCH
duke@435 588 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
duke@435 589 // they both use I2.
duke@435 590 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
duke@435 591 #endif // FAST_DISPATCH
duke@435 592 __ set_method_data_pointer();
duke@435 593 }
duke@435 594
duke@435 595 }
duke@435 596
duke@435 597 // Empty method, generate a very fast return.
duke@435 598
duke@435 599 address InterpreterGenerator::generate_empty_entry(void) {
duke@435 600
duke@435 601 // A method that does nother but return...
duke@435 602
duke@435 603 address entry = __ pc();
duke@435 604 Label slow_path;
duke@435 605
duke@435 606 // do nothing for empty methods (do not even increment invocation counter)
duke@435 607 if ( UseFastEmptyMethods) {
duke@435 608 // If we need a safepoint check, generate full interpreter entry.
twisti@1162 609 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
twisti@1162 610 __ set(sync_state, G3_scratch);
kvn@3037 611 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
duke@435 612
duke@435 613 // Code: _return
duke@435 614 __ retl();
duke@435 615 __ delayed()->mov(O5_savedSP, SP);
duke@435 616
duke@435 617 __ bind(slow_path);
duke@435 618 (void) generate_normal_entry(false);
duke@435 619
duke@435 620 return entry;
duke@435 621 }
duke@435 622 return NULL;
duke@435 623 }
duke@435 624
duke@435 625 // Call an accessor method (assuming it is resolved, otherwise drop into
duke@435 626 // vanilla (slow path) entry
duke@435 627
duke@435 628 // Generates code to elide accessor methods
duke@435 629 // Uses G3_scratch and G1_scratch as scratch
duke@435 630 address InterpreterGenerator::generate_accessor_entry(void) {
duke@435 631
duke@435 632 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
duke@435 633 // parameter size = 1
duke@435 634 // Note: We can only use this code if the getfield has been resolved
duke@435 635 // and if we don't have a null-pointer exception => check for
duke@435 636 // these conditions first and use slow path if necessary.
duke@435 637 address entry = __ pc();
duke@435 638 Label slow_path;
duke@435 639
coleenp@548 640
coleenp@548 641 // XXX: for compressed oops pointer loading and decoding doesn't fit in
coleenp@548 642 // delay slot and damages G1
coleenp@548 643 if ( UseFastAccessorMethods && !UseCompressedOops ) {
duke@435 644 // Check if we need to reach a safepoint and generate full interpreter
duke@435 645 // frame if so.
twisti@1162 646 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
duke@435 647 __ load_contents(sync_state, G3_scratch);
duke@435 648 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
kvn@3037 649 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
duke@435 650
duke@435 651 // Check if local 0 != NULL
duke@435 652 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
kvn@3037 653 // check if local 0 == NULL and go the slow path
kvn@3037 654 __ br_null_short(Otos_i, Assembler::pn, slow_path);
duke@435 655
duke@435 656
duke@435 657 // read first instruction word and extract bytecode @ 1 and index @ 2
duke@435 658 // get first 4 bytes of the bytecodes (big endian!)
coleenp@4037 659 __ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
coleenp@4037 660 __ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
duke@435 661
duke@435 662 // move index @ 2 far left then to the right most two bytes.
duke@435 663 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
duke@435 664 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
duke@435 665 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
duke@435 666
duke@435 667 // get constant pool cache
coleenp@4037 668 __ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
coleenp@4037 669 __ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
coleenp@4037 670 __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
duke@435 671
duke@435 672 // get specific constant pool cache entry
duke@435 673 __ add(G3_scratch, G1_scratch, G3_scratch);
duke@435 674
duke@435 675 // Check the constant Pool cache entry to see if it has been resolved.
duke@435 676 // If not, need the slow path.
coleenp@4037 677 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
twisti@1162 678 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
duke@435 679 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
duke@435 680 __ and3(G1_scratch, 0xFF, G1_scratch);
kvn@3037 681 __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
duke@435 682
duke@435 683 // Get the type and return field offset from the constant pool cache
twisti@1162 684 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
twisti@1162 685 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
duke@435 686
duke@435 687 Label xreturn_path;
duke@435 688 // Need to differentiate between igetfield, agetfield, bgetfield etc.
duke@435 689 // because they are different sizes.
duke@435 690 // Get the type from the constant pool cache
twisti@3969 691 __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
twisti@3969 692 // Make sure we don't need to mask G1_scratch after the above shift
twisti@3969 693 ConstantPoolCacheEntry::verify_tos_state_shift();
duke@435 694 __ cmp(G1_scratch, atos );
duke@435 695 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
duke@435 696 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
duke@435 697 __ cmp(G1_scratch, itos);
duke@435 698 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
duke@435 699 __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
duke@435 700 __ cmp(G1_scratch, stos);
duke@435 701 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
duke@435 702 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
duke@435 703 __ cmp(G1_scratch, ctos);
duke@435 704 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
duke@435 705 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
duke@435 706 #ifdef ASSERT
duke@435 707 __ cmp(G1_scratch, btos);
duke@435 708 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
duke@435 709 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
duke@435 710 __ should_not_reach_here();
duke@435 711 #endif
duke@435 712 __ ldsb(Otos_i, G3_scratch, Otos_i);
duke@435 713 __ bind(xreturn_path);
duke@435 714
duke@435 715 // _ireturn/_areturn
duke@435 716 __ retl(); // return from leaf routine
duke@435 717 __ delayed()->mov(O5_savedSP, SP);
duke@435 718
duke@435 719 // Generate regular method entry
duke@435 720 __ bind(slow_path);
duke@435 721 (void) generate_normal_entry(false);
duke@435 722 return entry;
duke@435 723 }
duke@435 724 return NULL;
duke@435 725 }
duke@435 726
johnc@2781 727 // Method entry for java.lang.ref.Reference.get.
johnc@2781 728 address InterpreterGenerator::generate_Reference_get_entry(void) {
jprovino@4542 729 #if INCLUDE_ALL_GCS
johnc@2781 730 // Code: _aload_0, _getfield, _areturn
johnc@2781 731 // parameter size = 1
johnc@2781 732 //
johnc@2781 733 // The code that gets generated by this routine is split into 2 parts:
johnc@2781 734 // 1. The "intrinsified" code for G1 (or any SATB based GC),
johnc@2781 735 // 2. The slow path - which is an expansion of the regular method entry.
johnc@2781 736 //
johnc@2781 737 // Notes:-
johnc@2781 738 // * In the G1 code we do not check whether we need to block for
johnc@2781 739 // a safepoint. If G1 is enabled then we must execute the specialized
johnc@2781 740 // code for Reference.get (except when the Reference object is null)
johnc@2781 741 // so that we can log the value in the referent field with an SATB
johnc@2781 742 // update buffer.
johnc@2781 743 // If the code for the getfield template is modified so that the
johnc@2781 744 // G1 pre-barrier code is executed when the current method is
johnc@2781 745 // Reference.get() then going through the normal method entry
johnc@2781 746 // will be fine.
johnc@2781 747 // * The G1 code can, however, check the receiver object (the instance
johnc@2781 748 // of java.lang.Reference) and jump to the slow path if null. If the
johnc@2781 749 // Reference object is null then we obviously cannot fetch the referent
johnc@2781 750 // and so we don't need to call the G1 pre-barrier. Thus we can use the
johnc@2781 751 // regular method entry code to generate the NPE.
johnc@2781 752 //
johnc@2781 753 // This code is based on generate_accessor_enty.
johnc@2781 754
johnc@2781 755 address entry = __ pc();
johnc@2781 756
johnc@2781 757 const int referent_offset = java_lang_ref_Reference::referent_offset;
johnc@2781 758 guarantee(referent_offset > 0, "referent offset not initialized");
johnc@2781 759
johnc@2781 760 if (UseG1GC) {
johnc@2781 761 Label slow_path;
johnc@2781 762
johnc@2781 763 // In the G1 code we don't check if we need to reach a safepoint. We
johnc@2781 764 // continue and the thread will safepoint at the next bytecode dispatch.
johnc@2781 765
johnc@2781 766 // Check if local 0 != NULL
johnc@2781 767 // If the receiver is null then it is OK to jump to the slow path.
johnc@2781 768 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
kvn@3037 769 // check if local 0 == NULL and go the slow path
kvn@3037 770 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
johnc@2781 771
johnc@2781 772
johnc@2781 773 // Load the value of the referent field.
johnc@2781 774 if (Assembler::is_simm13(referent_offset)) {
johnc@2781 775 __ load_heap_oop(Otos_i, referent_offset, Otos_i);
johnc@2781 776 } else {
johnc@2781 777 __ set(referent_offset, G3_scratch);
johnc@2781 778 __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
johnc@2781 779 }
johnc@2781 780
johnc@2781 781 // Generate the G1 pre-barrier code to log the value of
johnc@2781 782 // the referent field in an SATB buffer. Note with
johnc@2781 783 // these parameters the pre-barrier does not generate
johnc@2781 784 // the load of the previous value
johnc@2781 785
johnc@2781 786 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
johnc@2781 787 Otos_i /* pre_val */,
johnc@2781 788 G3_scratch /* tmp */,
johnc@2781 789 true /* preserve_o_regs */);
johnc@2781 790
johnc@2781 791 // _areturn
johnc@2781 792 __ retl(); // return from leaf routine
johnc@2781 793 __ delayed()->mov(O5_savedSP, SP);
johnc@2781 794
johnc@2781 795 // Generate regular method entry
johnc@2781 796 __ bind(slow_path);
johnc@2781 797 (void) generate_normal_entry(false);
johnc@2781 798 return entry;
johnc@2781 799 }
jprovino@4542 800 #endif // INCLUDE_ALL_GCS
johnc@2781 801
johnc@2781 802 // If G1 is not enabled then attempt to go through the accessor entry point
johnc@2781 803 // Reference.get is an accessor
johnc@2781 804 return generate_accessor_entry();
johnc@2781 805 }
johnc@2781 806
duke@435 807 //
duke@435 808 // Interpreter stub for calling a native method. (asm interpreter)
duke@435 809 // This sets up a somewhat different looking stack for calling the native method
duke@435 810 // than the typical interpreter frame setup.
duke@435 811 //
duke@435 812
duke@435 813 address InterpreterGenerator::generate_native_entry(bool synchronized) {
duke@435 814 address entry = __ pc();
duke@435 815
duke@435 816 // the following temporary registers are used during frame creation
duke@435 817 const Register Gtmp1 = G3_scratch ;
duke@435 818 const Register Gtmp2 = G1_scratch;
duke@435 819 bool inc_counter = UseCompiler || CountCompiledCalls;
duke@435 820
duke@435 821 // make sure registers are different!
duke@435 822 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
duke@435 823
coleenp@4037 824 const Address Laccess_flags(Lmethod, Method::access_flags_offset());
duke@435 825
duke@435 826 const Register Glocals_size = G3;
duke@435 827 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
duke@435 828
duke@435 829 // make sure method is native & not abstract
duke@435 830 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
duke@435 831 #ifdef ASSERT
coleenp@4037 832 __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
duke@435 833 {
duke@435 834 Label L;
duke@435 835 __ btst(JVM_ACC_NATIVE, Gtmp1);
duke@435 836 __ br(Assembler::notZero, false, Assembler::pt, L);
duke@435 837 __ delayed()->nop();
duke@435 838 __ stop("tried to execute non-native method as native");
duke@435 839 __ bind(L);
duke@435 840 }
duke@435 841 { Label L;
duke@435 842 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
duke@435 843 __ br(Assembler::zero, false, Assembler::pt, L);
duke@435 844 __ delayed()->nop();
duke@435 845 __ stop("tried to execute abstract method as non-abstract");
duke@435 846 __ bind(L);
duke@435 847 }
duke@435 848 #endif // ASSERT
duke@435 849
duke@435 850 // generate the code to allocate the interpreter stack frame
duke@435 851 generate_fixed_frame(true);
duke@435 852
duke@435 853 //
duke@435 854 // No locals to initialize for native method
duke@435 855 //
duke@435 856
duke@435 857 // this slot will be set later, we initialize it to null here just in
duke@435 858 // case we get a GC before the actual value is stored later
twisti@1162 859 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
duke@435 860
twisti@1162 861 const Address do_not_unlock_if_synchronized(G2_thread,
twisti@1162 862 JavaThread::do_not_unlock_if_synchronized_offset());
duke@435 863 // Since at this point in the method invocation the exception handler
duke@435 864 // would try to exit the monitor of synchronized methods which hasn't
duke@435 865 // been entered yet, we set the thread local variable
duke@435 866 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
duke@435 867 // runtime, exception handling i.e. unlock_if_synchronized_method will
duke@435 868 // check this thread local flag.
duke@435 869 // This flag has two effects, one is to force an unwind in the topmost
duke@435 870 // interpreter frame and not perform an unlock while doing so.
duke@435 871
duke@435 872 __ movbool(true, G3_scratch);
duke@435 873 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
duke@435 874
duke@435 875 // increment invocation counter and check for overflow
duke@435 876 //
duke@435 877 // Note: checking for negative value instead of overflow
duke@435 878 // so we have a 'sticky' overflow test (may be of
duke@435 879 // importance as soon as we have true MT/MP)
duke@435 880 Label invocation_counter_overflow;
duke@435 881 Label Lcontinue;
duke@435 882 if (inc_counter) {
duke@435 883 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
duke@435 884
duke@435 885 }
duke@435 886 __ bind(Lcontinue);
duke@435 887
duke@435 888 bang_stack_shadow_pages(true);
duke@435 889
duke@435 890 // reset the _do_not_unlock_if_synchronized flag
duke@435 891 __ stbool(G0, do_not_unlock_if_synchronized);
duke@435 892
duke@435 893 // check for synchronized methods
duke@435 894 // Must happen AFTER invocation_counter check and stack overflow check,
duke@435 895 // so method is not locked if overflows.
duke@435 896
duke@435 897 if (synchronized) {
duke@435 898 lock_method();
duke@435 899 } else {
duke@435 900 #ifdef ASSERT
duke@435 901 { Label ok;
duke@435 902 __ ld(Laccess_flags, O0);
duke@435 903 __ btst(JVM_ACC_SYNCHRONIZED, O0);
duke@435 904 __ br( Assembler::zero, false, Assembler::pt, ok);
duke@435 905 __ delayed()->nop();
duke@435 906 __ stop("method needs synchronization");
duke@435 907 __ bind(ok);
duke@435 908 }
duke@435 909 #endif // ASSERT
duke@435 910 }
duke@435 911
duke@435 912
duke@435 913 // start execution
duke@435 914 __ verify_thread();
duke@435 915
duke@435 916 // JVMTI support
duke@435 917 __ notify_method_entry();
duke@435 918
duke@435 919 // native call
duke@435 920
duke@435 921 // (note that O0 is never an oop--at most it is a handle)
duke@435 922 // It is important not to smash any handles created by this call,
duke@435 923 // until any oop handle in O0 is dereferenced.
duke@435 924
duke@435 925 // (note that the space for outgoing params is preallocated)
duke@435 926
duke@435 927 // get signature handler
duke@435 928 { Label L;
coleenp@4037 929 Address signature_handler(Lmethod, Method::signature_handler_offset());
twisti@1162 930 __ ld_ptr(signature_handler, G3_scratch);
kvn@3037 931 __ br_notnull_short(G3_scratch, Assembler::pt, L);
duke@435 932 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
twisti@1162 933 __ ld_ptr(signature_handler, G3_scratch);
duke@435 934 __ bind(L);
duke@435 935 }
duke@435 936
duke@435 937 // Push a new frame so that the args will really be stored in
duke@435 938 // Copy a few locals across so the new frame has the variables
duke@435 939 // we need but these values will be dead at the jni call and
duke@435 940 // therefore not gc volatile like the values in the current
duke@435 941 // frame (Lmethod in particular)
duke@435 942
duke@435 943 // Flush the method pointer to the register save area
duke@435 944 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
duke@435 945 __ mov(Llocals, O1);
twisti@1162 946
duke@435 947 // calculate where the mirror handle body is allocated in the interpreter frame:
twisti@1162 948 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
duke@435 949
duke@435 950 // Calculate current frame size
duke@435 951 __ sub(SP, FP, O3); // Calculate negative of current frame size
duke@435 952 __ save(SP, O3, SP); // Allocate an identical sized frame
duke@435 953
duke@435 954 // Note I7 has leftover trash. Slow signature handler will fill it in
duke@435 955 // should we get there. Normal jni call will set reasonable last_Java_pc
duke@435 956 // below (and fix I7 so the stack trace doesn't have a meaningless frame
duke@435 957 // in it).
duke@435 958
duke@435 959 // Load interpreter frame's Lmethod into same register here
duke@435 960
duke@435 961 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
duke@435 962
duke@435 963 __ mov(I1, Llocals);
duke@435 964 __ mov(I2, Lscratch2); // save the address of the mirror
duke@435 965
duke@435 966
duke@435 967 // ONLY Lmethod and Llocals are valid here!
duke@435 968
duke@435 969 // call signature handler, It will move the arg properly since Llocals in current frame
duke@435 970 // matches that in outer frame
duke@435 971
duke@435 972 __ callr(G3_scratch, 0);
duke@435 973 __ delayed()->nop();
duke@435 974
duke@435 975 // Result handler is in Lscratch
duke@435 976
duke@435 977 // Reload interpreter frame's Lmethod since slow signature handler may block
duke@435 978 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
duke@435 979
duke@435 980 { Label not_static;
duke@435 981
duke@435 982 __ ld(Laccess_flags, O0);
duke@435 983 __ btst(JVM_ACC_STATIC, O0);
duke@435 984 __ br( Assembler::zero, false, Assembler::pt, not_static);
twisti@1162 985 // get native function entry point(O0 is a good temp until the very end)
coleenp@4037 986 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
duke@435 987 // for static methods insert the mirror argument
stefank@3391 988 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
duke@435 989
coleenp@4037 990 __ ld_ptr(Lmethod, Method:: const_offset(), O1);
coleenp@4037 991 __ ld_ptr(O1, ConstMethod::constants_offset(), O1);
coleenp@4037 992 __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
duke@435 993 __ ld_ptr(O1, mirror_offset, O1);
duke@435 994 #ifdef ASSERT
duke@435 995 if (!PrintSignatureHandlers) // do not dirty the output with this
duke@435 996 { Label L;
kvn@3037 997 __ br_notnull_short(O1, Assembler::pt, L);
duke@435 998 __ stop("mirror is missing");
duke@435 999 __ bind(L);
duke@435 1000 }
duke@435 1001 #endif // ASSERT
duke@435 1002 __ st_ptr(O1, Lscratch2, 0);
duke@435 1003 __ mov(Lscratch2, O1);
duke@435 1004 __ bind(not_static);
duke@435 1005 }
duke@435 1006
duke@435 1007 // At this point, arguments have been copied off of stack into
duke@435 1008 // their JNI positions, which are O1..O5 and SP[68..].
duke@435 1009 // Oops are boxed in-place on the stack, with handles copied to arguments.
duke@435 1010 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
duke@435 1011
duke@435 1012 #ifdef ASSERT
duke@435 1013 { Label L;
kvn@3037 1014 __ br_notnull_short(O0, Assembler::pt, L);
duke@435 1015 __ stop("native entry point is missing");
duke@435 1016 __ bind(L);
duke@435 1017 }
duke@435 1018 #endif // ASSERT
duke@435 1019
duke@435 1020 //
duke@435 1021 // setup the frame anchor
duke@435 1022 //
duke@435 1023 // The scavenge function only needs to know that the PC of this frame is
duke@435 1024 // in the interpreter method entry code, it doesn't need to know the exact
duke@435 1025 // PC and hence we can use O7 which points to the return address from the
duke@435 1026 // previous call in the code stream (signature handler function)
duke@435 1027 //
duke@435 1028 // The other trick is we set last_Java_sp to FP instead of the usual SP because
duke@435 1029 // we have pushed the extra frame in order to protect the volatile register(s)
duke@435 1030 // in that frame when we return from the jni call
duke@435 1031 //
duke@435 1032
duke@435 1033 __ set_last_Java_frame(FP, O7);
duke@435 1034 __ mov(O7, I7); // make dummy interpreter frame look like one above,
duke@435 1035 // not meaningless information that'll confuse me.
duke@435 1036
duke@435 1037 // flush the windows now. We don't care about the current (protection) frame
duke@435 1038 // only the outer frames
duke@435 1039
morris@5283 1040 __ flushw();
duke@435 1041
duke@435 1042 // mark windows as flushed
twisti@1162 1043 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
duke@435 1044 __ set(JavaFrameAnchor::flushed, G3_scratch);
duke@435 1045 __ st(G3_scratch, flags);
duke@435 1046
duke@435 1047 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
duke@435 1048
twisti@1162 1049 Address thread_state(G2_thread, JavaThread::thread_state_offset());
duke@435 1050 #ifdef ASSERT
duke@435 1051 { Label L;
duke@435 1052 __ ld(thread_state, G3_scratch);
kvn@3037 1053 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
duke@435 1054 __ stop("Wrong thread state in native stub");
duke@435 1055 __ bind(L);
duke@435 1056 }
duke@435 1057 #endif // ASSERT
duke@435 1058 __ set(_thread_in_native, G3_scratch);
duke@435 1059 __ st(G3_scratch, thread_state);
duke@435 1060
duke@435 1061 // Call the jni method, using the delay slot to set the JNIEnv* argument.
duke@435 1062 __ save_thread(L7_thread_cache); // save Gthread
duke@435 1063 __ callr(O0, 0);
duke@435 1064 __ delayed()->
duke@435 1065 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
duke@435 1066
duke@435 1067 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
duke@435 1068
duke@435 1069 __ restore_thread(L7_thread_cache); // restore G2_thread
coleenp@548 1070 __ reinit_heapbase();
duke@435 1071
duke@435 1072 // must we block?
duke@435 1073
duke@435 1074 // Block, if necessary, before resuming in _thread_in_Java state.
duke@435 1075 // In order for GC to work, don't clear the last_Java_sp until after blocking.
duke@435 1076 { Label no_block;
twisti@1162 1077 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
duke@435 1078
duke@435 1079 // Switch thread to "native transition" state before reading the synchronization state.
duke@435 1080 // This additional state is necessary because reading and testing the synchronization
duke@435 1081 // state is not atomic w.r.t. GC, as this scenario demonstrates:
duke@435 1082 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
duke@435 1083 // VM thread changes sync state to synchronizing and suspends threads for GC.
duke@435 1084 // Thread A is resumed to finish this native method, but doesn't block here since it
duke@435 1085 // didn't see any synchronization is progress, and escapes.
duke@435 1086 __ set(_thread_in_native_trans, G3_scratch);
duke@435 1087 __ st(G3_scratch, thread_state);
duke@435 1088 if(os::is_MP()) {
duke@435 1089 if (UseMembar) {
duke@435 1090 // Force this write out before the read below
duke@435 1091 __ membar(Assembler::StoreLoad);
duke@435 1092 } else {
duke@435 1093 // Write serialization page so VM thread can do a pseudo remote membar.
duke@435 1094 // We use the current thread pointer to calculate a thread specific
duke@435 1095 // offset to write to within the page. This minimizes bus traffic
duke@435 1096 // due to cache line collision.
duke@435 1097 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
duke@435 1098 }
duke@435 1099 }
duke@435 1100 __ load_contents(sync_state, G3_scratch);
duke@435 1101 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
duke@435 1102
duke@435 1103 Label L;
duke@435 1104 __ br(Assembler::notEqual, false, Assembler::pn, L);
twisti@1162 1105 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
kvn@3037 1106 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
duke@435 1107 __ bind(L);
duke@435 1108
duke@435 1109 // Block. Save any potential method result value before the operation and
duke@435 1110 // use a leaf call to leave the last_Java_frame setup undisturbed.
duke@435 1111 save_native_result();
duke@435 1112 __ call_VM_leaf(L7_thread_cache,
duke@435 1113 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
duke@435 1114 G2_thread);
duke@435 1115
duke@435 1116 // Restore any method result value
duke@435 1117 restore_native_result();
duke@435 1118 __ bind(no_block);
duke@435 1119 }
duke@435 1120
duke@435 1121 // Clear the frame anchor now
duke@435 1122
duke@435 1123 __ reset_last_Java_frame();
duke@435 1124
duke@435 1125 // Move the result handler address
duke@435 1126 __ mov(Lscratch, G3_scratch);
duke@435 1127 // return possible result to the outer frame
duke@435 1128 #ifndef __LP64
duke@435 1129 __ mov(O0, I0);
duke@435 1130 __ restore(O1, G0, O1);
duke@435 1131 #else
duke@435 1132 __ restore(O0, G0, O0);
duke@435 1133 #endif /* __LP64 */
duke@435 1134
duke@435 1135 // Move result handler to expected register
duke@435 1136 __ mov(G3_scratch, Lscratch);
duke@435 1137
duke@435 1138 // Back in normal (native) interpreter frame. State is thread_in_native_trans
duke@435 1139 // switch to thread_in_Java.
duke@435 1140
duke@435 1141 __ set(_thread_in_Java, G3_scratch);
duke@435 1142 __ st(G3_scratch, thread_state);
duke@435 1143
duke@435 1144 // reset handle block
twisti@1162 1145 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
duke@435 1146 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
duke@435 1147
duke@435 1148 // If we have an oop result store it where it will be safe for any further gc
duke@435 1149 // until we return now that we've released the handle it might be protected by
duke@435 1150
duke@435 1151 {
duke@435 1152 Label no_oop, store_result;
duke@435 1153
duke@435 1154 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
kvn@3037 1155 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
duke@435 1156 __ addcc(G0, O0, O0);
duke@435 1157 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
duke@435 1158 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
duke@435 1159 __ mov(G0, O0);
duke@435 1160
duke@435 1161 __ bind(store_result);
duke@435 1162 // Store it where gc will look for it and result handler expects it.
duke@435 1163 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
duke@435 1164
duke@435 1165 __ bind(no_oop);
duke@435 1166
duke@435 1167 }
duke@435 1168
duke@435 1169
duke@435 1170 // handle exceptions (exception handling will handle unlocking!)
duke@435 1171 { Label L;
twisti@1162 1172 Address exception_addr(G2_thread, Thread::pending_exception_offset());
duke@435 1173 __ ld_ptr(exception_addr, Gtemp);
kvn@3037 1174 __ br_null_short(Gtemp, Assembler::pt, L);
duke@435 1175 // Note: This could be handled more efficiently since we know that the native
duke@435 1176 // method doesn't have an exception handler. We could directly return
duke@435 1177 // to the exception handler for the caller.
duke@435 1178 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
duke@435 1179 __ should_not_reach_here();
duke@435 1180 __ bind(L);
duke@435 1181 }
duke@435 1182
duke@435 1183 // JVMTI support (preserves thread register)
duke@435 1184 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
duke@435 1185
duke@435 1186 if (synchronized) {
duke@435 1187 // save and restore any potential method result value around the unlocking operation
duke@435 1188 save_native_result();
duke@435 1189
duke@435 1190 __ add( __ top_most_monitor(), O1);
duke@435 1191 __ unlock_object(O1);
duke@435 1192
duke@435 1193 restore_native_result();
duke@435 1194 }
duke@435 1195
duke@435 1196 #if defined(COMPILER2) && !defined(_LP64)
duke@435 1197
duke@435 1198 // C2 expects long results in G1 we can't tell if we're returning to interpreted
duke@435 1199 // or compiled so just be safe.
duke@435 1200
duke@435 1201 __ sllx(O0, 32, G1); // Shift bits into high G1
duke@435 1202 __ srl (O1, 0, O1); // Zero extend O1
duke@435 1203 __ or3 (O1, G1, G1); // OR 64 bits into G1
duke@435 1204
duke@435 1205 #endif /* COMPILER2 && !_LP64 */
duke@435 1206
duke@435 1207 // dispose of return address and remove activation
duke@435 1208 #ifdef ASSERT
duke@435 1209 {
duke@435 1210 Label ok;
kvn@3037 1211 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
duke@435 1212 __ stop("bad I5_savedSP value");
duke@435 1213 __ should_not_reach_here();
duke@435 1214 __ bind(ok);
duke@435 1215 }
duke@435 1216 #endif
duke@435 1217 if (TraceJumps) {
duke@435 1218 // Move target to register that is recordable
duke@435 1219 __ mov(Lscratch, G3_scratch);
duke@435 1220 __ JMP(G3_scratch, 0);
duke@435 1221 } else {
duke@435 1222 __ jmp(Lscratch, 0);
duke@435 1223 }
duke@435 1224 __ delayed()->nop();
duke@435 1225
duke@435 1226
duke@435 1227 if (inc_counter) {
duke@435 1228 // handle invocation counter overflow
duke@435 1229 __ bind(invocation_counter_overflow);
duke@435 1230 generate_counter_overflow(Lcontinue);
duke@435 1231 }
duke@435 1232
duke@435 1233
duke@435 1234
duke@435 1235 return entry;
duke@435 1236 }
duke@435 1237
duke@435 1238
duke@435 1239 // Generic method entry to (asm) interpreter
duke@435 1240 //------------------------------------------------------------------------------------------------------------------------
duke@435 1241 //
duke@435 1242 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
duke@435 1243 address entry = __ pc();
duke@435 1244
duke@435 1245 bool inc_counter = UseCompiler || CountCompiledCalls;
duke@435 1246
duke@435 1247 // the following temporary registers are used during frame creation
duke@435 1248 const Register Gtmp1 = G3_scratch ;
duke@435 1249 const Register Gtmp2 = G1_scratch;
duke@435 1250
duke@435 1251 // make sure registers are different!
duke@435 1252 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
duke@435 1253
jiangli@4338 1254 const Address constMethod (G5_method, Method::const_offset());
duke@435 1255 // Seems like G5_method is live at the point this is used. So we could make this look consistent
duke@435 1256 // and use in the asserts.
coleenp@4037 1257 const Address access_flags (Lmethod, Method::access_flags_offset());
duke@435 1258
duke@435 1259 const Register Glocals_size = G3;
duke@435 1260 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
duke@435 1261
duke@435 1262 // make sure method is not native & not abstract
duke@435 1263 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
duke@435 1264 #ifdef ASSERT
coleenp@4037 1265 __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
duke@435 1266 {
duke@435 1267 Label L;
duke@435 1268 __ btst(JVM_ACC_NATIVE, Gtmp1);
duke@435 1269 __ br(Assembler::zero, false, Assembler::pt, L);
duke@435 1270 __ delayed()->nop();
duke@435 1271 __ stop("tried to execute native method as non-native");
duke@435 1272 __ bind(L);
duke@435 1273 }
duke@435 1274 { Label L;
duke@435 1275 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
duke@435 1276 __ br(Assembler::zero, false, Assembler::pt, L);
duke@435 1277 __ delayed()->nop();
duke@435 1278 __ stop("tried to execute abstract method as non-abstract");
duke@435 1279 __ bind(L);
duke@435 1280 }
duke@435 1281 #endif // ASSERT
duke@435 1282
duke@435 1283 // generate the code to allocate the interpreter stack frame
duke@435 1284
duke@435 1285 generate_fixed_frame(false);
duke@435 1286
duke@435 1287 #ifdef FAST_DISPATCH
duke@435 1288 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
duke@435 1289 // set bytecode dispatch table base
duke@435 1290 #endif
duke@435 1291
duke@435 1292 //
duke@435 1293 // Code to initialize the extra (i.e. non-parm) locals
duke@435 1294 //
duke@435 1295 Register init_value = noreg; // will be G0 if we must clear locals
duke@435 1296 // The way the code was setup before zerolocals was always true for vanilla java entries.
duke@435 1297 // It could only be false for the specialized entries like accessor or empty which have
duke@435 1298 // no extra locals so the testing was a waste of time and the extra locals were always
duke@435 1299 // initialized. We removed this extra complication to already over complicated code.
duke@435 1300
duke@435 1301 init_value = G0;
duke@435 1302 Label clear_loop;
duke@435 1303
jiangli@4338 1304 const Register RconstMethod = O1;
jiangli@4338 1305 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
jiangli@4338 1306 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset());
jiangli@4338 1307
duke@435 1308 // NOTE: If you change the frame layout, this code will need to
duke@435 1309 // be updated!
jiangli@4338 1310 __ ld_ptr( constMethod, RconstMethod );
duke@435 1311 __ lduh( size_of_locals, O2 );
duke@435 1312 __ lduh( size_of_parameters, O1 );
twisti@1861 1313 __ sll( O2, Interpreter::logStackElementSize, O2);
twisti@1861 1314 __ sll( O1, Interpreter::logStackElementSize, O1 );
duke@435 1315 __ sub( Llocals, O2, O2 );
duke@435 1316 __ sub( Llocals, O1, O1 );
duke@435 1317
duke@435 1318 __ bind( clear_loop );
duke@435 1319 __ inc( O2, wordSize );
duke@435 1320
duke@435 1321 __ cmp( O2, O1 );
duke@435 1322 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
duke@435 1323 __ delayed()->st_ptr( init_value, O2, 0 );
duke@435 1324
twisti@1162 1325 const Address do_not_unlock_if_synchronized(G2_thread,
twisti@1162 1326 JavaThread::do_not_unlock_if_synchronized_offset());
duke@435 1327 // Since at this point in the method invocation the exception handler
duke@435 1328 // would try to exit the monitor of synchronized methods which hasn't
duke@435 1329 // been entered yet, we set the thread local variable
duke@435 1330 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
duke@435 1331 // runtime, exception handling i.e. unlock_if_synchronized_method will
duke@435 1332 // check this thread local flag.
duke@435 1333 __ movbool(true, G3_scratch);
duke@435 1334 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
duke@435 1335
duke@435 1336 // increment invocation counter and check for overflow
duke@435 1337 //
duke@435 1338 // Note: checking for negative value instead of overflow
duke@435 1339 // so we have a 'sticky' overflow test (may be of
duke@435 1340 // importance as soon as we have true MT/MP)
duke@435 1341 Label invocation_counter_overflow;
duke@435 1342 Label profile_method;
duke@435 1343 Label profile_method_continue;
duke@435 1344 Label Lcontinue;
duke@435 1345 if (inc_counter) {
duke@435 1346 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
duke@435 1347 if (ProfileInterpreter) {
duke@435 1348 __ bind(profile_method_continue);
duke@435 1349 }
duke@435 1350 }
duke@435 1351 __ bind(Lcontinue);
duke@435 1352
duke@435 1353 bang_stack_shadow_pages(false);
duke@435 1354
duke@435 1355 // reset the _do_not_unlock_if_synchronized flag
duke@435 1356 __ stbool(G0, do_not_unlock_if_synchronized);
duke@435 1357
duke@435 1358 // check for synchronized methods
duke@435 1359 // Must happen AFTER invocation_counter check and stack overflow check,
duke@435 1360 // so method is not locked if overflows.
duke@435 1361
duke@435 1362 if (synchronized) {
duke@435 1363 lock_method();
duke@435 1364 } else {
duke@435 1365 #ifdef ASSERT
duke@435 1366 { Label ok;
duke@435 1367 __ ld(access_flags, O0);
duke@435 1368 __ btst(JVM_ACC_SYNCHRONIZED, O0);
duke@435 1369 __ br( Assembler::zero, false, Assembler::pt, ok);
duke@435 1370 __ delayed()->nop();
duke@435 1371 __ stop("method needs synchronization");
duke@435 1372 __ bind(ok);
duke@435 1373 }
duke@435 1374 #endif // ASSERT
duke@435 1375 }
duke@435 1376
duke@435 1377 // start execution
duke@435 1378
duke@435 1379 __ verify_thread();
duke@435 1380
duke@435 1381 // jvmti support
duke@435 1382 __ notify_method_entry();
duke@435 1383
duke@435 1384 // start executing instructions
duke@435 1385 __ dispatch_next(vtos);
duke@435 1386
duke@435 1387
duke@435 1388 if (inc_counter) {
duke@435 1389 if (ProfileInterpreter) {
duke@435 1390 // We have decided to profile this method in the interpreter
duke@435 1391 __ bind(profile_method);
duke@435 1392
iveresov@2438 1393 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
iveresov@2438 1394 __ set_method_data_pointer_for_bcp();
kvn@3037 1395 __ ba_short(profile_method_continue);
duke@435 1396 }
duke@435 1397
duke@435 1398 // handle invocation counter overflow
duke@435 1399 __ bind(invocation_counter_overflow);
duke@435 1400 generate_counter_overflow(Lcontinue);
duke@435 1401 }
duke@435 1402
duke@435 1403
duke@435 1404 return entry;
duke@435 1405 }
duke@435 1406
duke@435 1407
duke@435 1408 //----------------------------------------------------------------------------------------------------
duke@435 1409 // Entry points & stack frame layout
duke@435 1410 //
duke@435 1411 // Here we generate the various kind of entries into the interpreter.
duke@435 1412 // The two main entry type are generic bytecode methods and native call method.
duke@435 1413 // These both come in synchronized and non-synchronized versions but the
duke@435 1414 // frame layout they create is very similar. The other method entry
duke@435 1415 // types are really just special purpose entries that are really entry
duke@435 1416 // and interpretation all in one. These are for trivial methods like
duke@435 1417 // accessor, empty, or special math methods.
duke@435 1418 //
duke@435 1419 // When control flow reaches any of the entry types for the interpreter
duke@435 1420 // the following holds ->
duke@435 1421 //
duke@435 1422 // C2 Calling Conventions:
duke@435 1423 //
duke@435 1424 // The entry code below assumes that the following registers are set
duke@435 1425 // when coming in:
coleenp@4037 1426 // G5_method: holds the Method* of the method to call
duke@435 1427 // Lesp: points to the TOS of the callers expression stack
duke@435 1428 // after having pushed all the parameters
duke@435 1429 //
duke@435 1430 // The entry code does the following to setup an interpreter frame
duke@435 1431 // pop parameters from the callers stack by adjusting Lesp
duke@435 1432 // set O0 to Lesp
duke@435 1433 // compute X = (max_locals - num_parameters)
duke@435 1434 // bump SP up by X to accomadate the extra locals
duke@435 1435 // compute X = max_expression_stack
duke@435 1436 // + vm_local_words
duke@435 1437 // + 16 words of register save area
duke@435 1438 // save frame doing a save sp, -X, sp growing towards lower addresses
duke@435 1439 // set Lbcp, Lmethod, LcpoolCache
duke@435 1440 // set Llocals to i0
duke@435 1441 // set Lmonitors to FP - rounded_vm_local_words
duke@435 1442 // set Lesp to Lmonitors - 4
duke@435 1443 //
duke@435 1444 // The frame has now been setup to do the rest of the entry code
duke@435 1445
duke@435 1446 // Try this optimization: Most method entries could live in a
duke@435 1447 // "one size fits all" stack frame without all the dynamic size
duke@435 1448 // calculations. It might be profitable to do all this calculation
duke@435 1449 // statically and approximately for "small enough" methods.
duke@435 1450
duke@435 1451 //-----------------------------------------------------------------------------------------------
duke@435 1452
duke@435 1453 // C1 Calling conventions
duke@435 1454 //
duke@435 1455 // Upon method entry, the following registers are setup:
duke@435 1456 //
duke@435 1457 // g2 G2_thread: current thread
duke@435 1458 // g5 G5_method: method to activate
duke@435 1459 // g4 Gargs : pointer to last argument
duke@435 1460 //
duke@435 1461 //
duke@435 1462 // Stack:
duke@435 1463 //
duke@435 1464 // +---------------+ <--- sp
duke@435 1465 // | |
duke@435 1466 // : reg save area :
duke@435 1467 // | |
duke@435 1468 // +---------------+ <--- sp + 0x40
duke@435 1469 // | |
duke@435 1470 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
duke@435 1471 // | |
duke@435 1472 // +---------------+ <--- sp + 0x5c
duke@435 1473 // | |
duke@435 1474 // : free :
duke@435 1475 // | |
duke@435 1476 // +---------------+ <--- Gargs
duke@435 1477 // | |
duke@435 1478 // : arguments :
duke@435 1479 // | |
duke@435 1480 // +---------------+
duke@435 1481 // | |
duke@435 1482 //
duke@435 1483 //
duke@435 1484 //
duke@435 1485 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
duke@435 1486 //
duke@435 1487 // +---------------+ <--- sp
duke@435 1488 // | |
duke@435 1489 // : reg save area :
duke@435 1490 // | |
duke@435 1491 // +---------------+ <--- sp + 0x40
duke@435 1492 // | |
duke@435 1493 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
duke@435 1494 // | |
duke@435 1495 // +---------------+ <--- sp + 0x5c
duke@435 1496 // | |
duke@435 1497 // : :
duke@435 1498 // | | <--- Lesp
duke@435 1499 // +---------------+ <--- Lmonitors (fp - 0x18)
duke@435 1500 // | VM locals |
duke@435 1501 // +---------------+ <--- fp
duke@435 1502 // | |
duke@435 1503 // : reg save area :
duke@435 1504 // | |
duke@435 1505 // +---------------+ <--- fp + 0x40
duke@435 1506 // | |
duke@435 1507 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
duke@435 1508 // | |
duke@435 1509 // +---------------+ <--- fp + 0x5c
duke@435 1510 // | |
duke@435 1511 // : free :
duke@435 1512 // | |
duke@435 1513 // +---------------+
duke@435 1514 // | |
duke@435 1515 // : nonarg locals :
duke@435 1516 // | |
duke@435 1517 // +---------------+
duke@435 1518 // | |
duke@435 1519 // : arguments :
duke@435 1520 // | | <--- Llocals
duke@435 1521 // +---------------+ <--- Gargs
duke@435 1522 // | |
duke@435 1523
duke@435 1524 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
duke@435 1525
duke@435 1526 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
duke@435 1527 // expression stack, the callee will have callee_extra_locals (so we can account for
duke@435 1528 // frame extension) and monitor_size for monitors. Basically we need to calculate
duke@435 1529 // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
duke@435 1530 //
duke@435 1531 //
duke@435 1532 // The big complicating thing here is that we must ensure that the stack stays properly
duke@435 1533 // aligned. This would be even uglier if monitor size wasn't modulo what the stack
duke@435 1534 // needs to be aligned for). We are given that the sp (fp) is already aligned by
duke@435 1535 // the caller so we must ensure that it is properly aligned for our callee.
duke@435 1536 //
duke@435 1537 const int rounded_vm_local_words =
duke@435 1538 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
duke@435 1539 // callee_locals and max_stack are counts, not the size in frame.
duke@435 1540 const int locals_size =
twisti@1861 1541 round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
twisti@1861 1542 const int max_stack_words = max_stack * Interpreter::stackElementWords;
duke@435 1543 return (round_to((max_stack_words
duke@435 1544 + rounded_vm_local_words
duke@435 1545 + frame::memory_parameter_word_sp_offset), WordsPerLong)
duke@435 1546 // already rounded
duke@435 1547 + locals_size + monitor_size);
duke@435 1548 }
duke@435 1549
duke@435 1550 // How much stack a method top interpreter activation needs in words.
coleenp@4037 1551 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
duke@435 1552
duke@435 1553 // See call_stub code
duke@435 1554 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
duke@435 1555 WordsPerLong); // 7 + register save area
duke@435 1556
duke@435 1557 // Save space for one monitor to get into the interpreted method in case
duke@435 1558 // the method is synchronized
duke@435 1559 int monitor_size = method->is_synchronized() ?
duke@435 1560 1*frame::interpreter_frame_monitor_size() : 0;
duke@435 1561 return size_activation_helper(method->max_locals(), method->max_stack(),
duke@435 1562 monitor_size) + call_stub_size;
duke@435 1563 }
duke@435 1564
coleenp@4037 1565 int AbstractInterpreter::layout_activation(Method* method,
duke@435 1566 int tempcount,
duke@435 1567 int popframe_extra_args,
duke@435 1568 int moncount,
never@2901 1569 int caller_actual_parameters,
duke@435 1570 int callee_param_count,
duke@435 1571 int callee_local_count,
duke@435 1572 frame* caller,
duke@435 1573 frame* interpreter_frame,
roland@4727 1574 bool is_top_frame,
roland@4727 1575 bool is_bottom_frame) {
duke@435 1576 // Note: This calculation must exactly parallel the frame setup
duke@435 1577 // in InterpreterGenerator::generate_fixed_frame.
duke@435 1578 // If f!=NULL, set up the following variables:
duke@435 1579 // - Lmethod
duke@435 1580 // - Llocals
duke@435 1581 // - Lmonitors (to the indicated number of monitors)
duke@435 1582 // - Lesp (to the indicated number of temps)
duke@435 1583 // The frame f (if not NULL) on entry is a description of the caller of the frame
duke@435 1584 // we are about to layout. We are guaranteed that we will be able to fill in a
duke@435 1585 // new interpreter frame as its callee (i.e. the stack space is allocated and
duke@435 1586 // the amount was determined by an earlier call to this method with f == NULL).
duke@435 1587 // On return f (if not NULL) while describe the interpreter frame we just layed out.
duke@435 1588
duke@435 1589 int monitor_size = moncount * frame::interpreter_frame_monitor_size();
duke@435 1590 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
duke@435 1591
duke@435 1592 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
duke@435 1593 //
duke@435 1594 // Note: if you look closely this appears to be doing something much different
duke@435 1595 // than generate_fixed_frame. What is happening is this. On sparc we have to do
duke@435 1596 // this dance with interpreter_sp_adjustment because the window save area would
duke@435 1597 // appear just below the bottom (tos) of the caller's java expression stack. Because
duke@435 1598 // the interpreter want to have the locals completely contiguous generate_fixed_frame
duke@435 1599 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
duke@435 1600 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
duke@435 1601 // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
duke@435 1602 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
duke@435 1603 // because the oldest frame would have adjust its callers frame and yet that frame
duke@435 1604 // already exists and isn't part of this array of frames we are unpacking. So at first
duke@435 1605 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
duke@435 1606 // will after it calculates all of the frame's on_stack_size()'s will then figure out the
duke@435 1607 // amount to adjust the caller of the initial (oldest) frame and the calculation will all
duke@435 1608 // add up. It does seem like it simpler to account for the adjustment here (and remove the
duke@435 1609 // callee... parameters here). However this would mean that this routine would have to take
duke@435 1610 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
duke@435 1611 // and run the calling loop in the reverse order. This would also would appear to mean making
duke@435 1612 // this code aware of what the interactions are when that initial caller fram was an osr or
duke@435 1613 // other adapter frame. deoptimization is complicated enough and hard enough to debug that
duke@435 1614 // there is no sense in messing working code.
duke@435 1615 //
duke@435 1616
duke@435 1617 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
duke@435 1618 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
duke@435 1619
duke@435 1620 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
duke@435 1621 monitor_size);
duke@435 1622
duke@435 1623 if (interpreter_frame != NULL) {
duke@435 1624 // The skeleton frame must already look like an interpreter frame
duke@435 1625 // even if not fully filled out.
duke@435 1626 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
duke@435 1627
duke@435 1628 intptr_t* fp = interpreter_frame->fp();
duke@435 1629
duke@435 1630 JavaThread* thread = JavaThread::current();
duke@435 1631 RegisterMap map(thread, false);
duke@435 1632 // More verification that skeleton frame is properly walkable
duke@435 1633 assert(fp == caller->sp(), "fp must match");
duke@435 1634
duke@435 1635 intptr_t* montop = fp - rounded_vm_local_words;
duke@435 1636
duke@435 1637 // preallocate monitors (cf. __ add_monitor_to_stack)
duke@435 1638 intptr_t* monitors = montop - monitor_size;
duke@435 1639
duke@435 1640 // preallocate stack space
duke@435 1641 intptr_t* esp = monitors - 1 -
twisti@1861 1642 (tempcount * Interpreter::stackElementWords) -
duke@435 1643 popframe_extra_args;
duke@435 1644
twisti@1861 1645 int local_words = method->max_locals() * Interpreter::stackElementWords;
never@2901 1646 NEEDS_CLEANUP;
duke@435 1647 intptr_t* locals;
never@2901 1648 if (caller->is_interpreted_frame()) {
never@2901 1649 // Can force the locals area to end up properly overlapping the top of the expression stack.
never@2901 1650 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
never@2901 1651 // Note that this computation means we replace size_of_parameters() values from the caller
never@2901 1652 // interpreter frame's expression stack with our argument locals
never@2901 1653 int parm_words = caller_actual_parameters * Interpreter::stackElementWords;
never@2901 1654 locals = Lesp_ptr + parm_words;
never@2901 1655 int delta = local_words - parm_words;
never@2901 1656 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
never@2901 1657 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
roland@4727 1658 if (!is_bottom_frame) {
roland@4727 1659 // Llast_SP is set below for the current frame to SP (with the
roland@4727 1660 // extra space for the callee's locals). Here we adjust
roland@4727 1661 // Llast_SP for the caller's frame, removing the extra space
roland@4727 1662 // for the current method's locals.
roland@4727 1663 *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
roland@4727 1664 } else {
roland@4727 1665 assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
roland@4727 1666 }
duke@435 1667 } else {
twisti@3969 1668 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
never@2901 1669 // Don't have Lesp available; lay out locals block in the caller
never@2901 1670 // adjacent to the register window save area.
never@2901 1671 //
never@2901 1672 // Compiled frames do not allocate a varargs area which is why this if
never@2901 1673 // statement is needed.
never@2901 1674 //
never@2901 1675 if (caller->is_compiled_frame()) {
never@2901 1676 locals = fp + frame::register_save_words + local_words - 1;
never@2901 1677 } else {
never@2901 1678 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
never@2901 1679 }
never@2901 1680 if (!caller->is_entry_frame()) {
never@2901 1681 // Caller wants his own SP back
never@2901 1682 int caller_frame_size = caller->cb()->frame_size();
never@2901 1683 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
duke@435 1684 }
duke@435 1685 }
duke@435 1686 if (TraceDeoptimization) {
duke@435 1687 if (caller->is_entry_frame()) {
duke@435 1688 // make sure I5_savedSP and the entry frames notion of saved SP
duke@435 1689 // agree. This assertion duplicate a check in entry frame code
duke@435 1690 // but catches the failure earlier.
duke@435 1691 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
duke@435 1692 "would change callers SP");
duke@435 1693 }
duke@435 1694 if (caller->is_entry_frame()) {
duke@435 1695 tty->print("entry ");
duke@435 1696 }
duke@435 1697 if (caller->is_compiled_frame()) {
duke@435 1698 tty->print("compiled ");
duke@435 1699 if (caller->is_deoptimized_frame()) {
duke@435 1700 tty->print("(deopt) ");
duke@435 1701 }
duke@435 1702 }
duke@435 1703 if (caller->is_interpreted_frame()) {
duke@435 1704 tty->print("interpreted ");
duke@435 1705 }
duke@435 1706 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
duke@435 1707 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
duke@435 1708 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
duke@435 1709 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
duke@435 1710 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
duke@435 1711 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
duke@435 1712 tty->print_cr("Llocals = 0x%x", locals);
duke@435 1713 tty->print_cr("Lesp = 0x%x", esp);
duke@435 1714 tty->print_cr("Lmonitors = 0x%x", monitors);
duke@435 1715 }
duke@435 1716
duke@435 1717 if (method->max_locals() > 0) {
duke@435 1718 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
duke@435 1719 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
duke@435 1720 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
duke@435 1721 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
duke@435 1722 }
duke@435 1723 #ifdef _LP64
duke@435 1724 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
duke@435 1725 #endif
duke@435 1726
duke@435 1727 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
duke@435 1728 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
duke@435 1729 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors;
duke@435 1730 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp;
duke@435 1731 // Llast_SP will be same as SP as there is no adapter space
duke@435 1732 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
duke@435 1733 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
duke@435 1734 #ifdef FAST_DISPATCH
duke@435 1735 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
duke@435 1736 #endif
duke@435 1737
duke@435 1738
duke@435 1739 #ifdef ASSERT
duke@435 1740 BasicObjectLock* mp = (BasicObjectLock*)monitors;
duke@435 1741
duke@435 1742 assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
twisti@1861 1743 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
duke@435 1744 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
duke@435 1745 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
duke@435 1746 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
duke@435 1747
duke@435 1748 // check bounds
duke@435 1749 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
duke@435 1750 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
duke@435 1751 assert(lo < monitors && montop <= hi, "monitors in bounds");
duke@435 1752 assert(lo <= esp && esp < monitors, "esp in bounds");
duke@435 1753 #endif // ASSERT
duke@435 1754 }
duke@435 1755
duke@435 1756 return raw_frame_size;
duke@435 1757 }
duke@435 1758
duke@435 1759 //----------------------------------------------------------------------------------------------------
duke@435 1760 // Exceptions
duke@435 1761 void TemplateInterpreterGenerator::generate_throw_exception() {
duke@435 1762
duke@435 1763 // Entry point in previous activation (i.e., if the caller was interpreted)
duke@435 1764 Interpreter::_rethrow_exception_entry = __ pc();
duke@435 1765 // O0: exception
duke@435 1766
duke@435 1767 // entry point for exceptions thrown within interpreter code
duke@435 1768 Interpreter::_throw_exception_entry = __ pc();
duke@435 1769 __ verify_thread();
duke@435 1770 // expression stack is undefined here
duke@435 1771 // O0: exception, i.e. Oexception
duke@435 1772 // Lbcp: exception bcx
duke@435 1773 __ verify_oop(Oexception);
duke@435 1774
duke@435 1775
duke@435 1776 // expression stack must be empty before entering the VM in case of an exception
duke@435 1777 __ empty_expression_stack();
duke@435 1778 // find exception handler address and preserve exception oop
duke@435 1779 // call C routine to find handler and jump to it
duke@435 1780 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
duke@435 1781 __ push_ptr(O1); // push exception for exception handler bytecodes
duke@435 1782
duke@435 1783 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
duke@435 1784 __ delayed()->nop();
duke@435 1785
duke@435 1786
duke@435 1787 // if the exception is not handled in the current frame
duke@435 1788 // the frame is removed and the exception is rethrown
duke@435 1789 // (i.e. exception continuation is _rethrow_exception)
duke@435 1790 //
duke@435 1791 // Note: At this point the bci is still the bxi for the instruction which caused
duke@435 1792 // the exception and the expression stack is empty. Thus, for any VM calls
duke@435 1793 // at this point, GC will find a legal oop map (with empty expression stack).
duke@435 1794
duke@435 1795 // in current activation
duke@435 1796 // tos: exception
duke@435 1797 // Lbcp: exception bcp
duke@435 1798
duke@435 1799 //
duke@435 1800 // JVMTI PopFrame support
duke@435 1801 //
duke@435 1802
duke@435 1803 Interpreter::_remove_activation_preserving_args_entry = __ pc();
twisti@1162 1804 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
duke@435 1805 // Set the popframe_processing bit in popframe_condition indicating that we are
duke@435 1806 // currently handling popframe, so that call_VMs that may happen later do not trigger new
duke@435 1807 // popframe handling cycles.
duke@435 1808
duke@435 1809 __ ld(popframe_condition_addr, G3_scratch);
duke@435 1810 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
duke@435 1811 __ stw(G3_scratch, popframe_condition_addr);
duke@435 1812
duke@435 1813 // Empty the expression stack, as in normal exception handling
duke@435 1814 __ empty_expression_stack();
duke@435 1815 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
duke@435 1816
duke@435 1817 {
duke@435 1818 // Check to see whether we are returning to a deoptimized frame.
duke@435 1819 // (The PopFrame call ensures that the caller of the popped frame is
duke@435 1820 // either interpreted or compiled and deoptimizes it if compiled.)
duke@435 1821 // In this case, we can't call dispatch_next() after the frame is
duke@435 1822 // popped, but instead must save the incoming arguments and restore
duke@435 1823 // them after deoptimization has occurred.
duke@435 1824 //
duke@435 1825 // Note that we don't compare the return PC against the
duke@435 1826 // deoptimization blob's unpack entry because of the presence of
duke@435 1827 // adapter frames in C2.
duke@435 1828 Label caller_not_deoptimized;
duke@435 1829 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
kvn@3037 1830 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
duke@435 1831
duke@435 1832 const Register Gtmp1 = G3_scratch;
duke@435 1833 const Register Gtmp2 = G1_scratch;
jiangli@4338 1834 const Register RconstMethod = Gtmp1;
jiangli@4338 1835 const Address constMethod(Lmethod, Method::const_offset());
jiangli@4338 1836 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
duke@435 1837
duke@435 1838 // Compute size of arguments for saving when returning to deoptimized caller
jiangli@4338 1839 __ ld_ptr(constMethod, RconstMethod);
jiangli@4338 1840 __ lduh(size_of_parameters, Gtmp1);
twisti@1861 1841 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
duke@435 1842 __ sub(Llocals, Gtmp1, Gtmp2);
duke@435 1843 __ add(Gtmp2, wordSize, Gtmp2);
duke@435 1844 // Save these arguments
duke@435 1845 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
duke@435 1846 // Inform deoptimization that it is responsible for restoring these arguments
duke@435 1847 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
twisti@1162 1848 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
duke@435 1849 __ st(Gtmp1, popframe_condition_addr);
duke@435 1850
duke@435 1851 // Return from the current method
duke@435 1852 // The caller's SP was adjusted upon method entry to accomodate
duke@435 1853 // the callee's non-argument locals. Undo that adjustment.
duke@435 1854 __ ret();
duke@435 1855 __ delayed()->restore(I5_savedSP, G0, SP);
duke@435 1856
duke@435 1857 __ bind(caller_not_deoptimized);
duke@435 1858 }
duke@435 1859
duke@435 1860 // Clear the popframe condition flag
duke@435 1861 __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
duke@435 1862
duke@435 1863 // Get out of the current method (how this is done depends on the particular compiler calling
duke@435 1864 // convention that the interpreter currently follows)
duke@435 1865 // The caller's SP was adjusted upon method entry to accomodate
duke@435 1866 // the callee's non-argument locals. Undo that adjustment.
duke@435 1867 __ restore(I5_savedSP, G0, SP);
duke@435 1868 // The method data pointer was incremented already during
duke@435 1869 // call profiling. We have to restore the mdp for the current bcp.
duke@435 1870 if (ProfileInterpreter) {
duke@435 1871 __ set_method_data_pointer_for_bcp();
duke@435 1872 }
sspitsyn@5496 1873
sspitsyn@5496 1874 #if INCLUDE_JVMTI
sspitsyn@5496 1875 if (EnableInvokeDynamic) {
sspitsyn@5496 1876 Label L_done;
sspitsyn@5496 1877
sspitsyn@5496 1878 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode
sspitsyn@5496 1879 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
sspitsyn@5496 1880
sspitsyn@5496 1881 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
sspitsyn@5496 1882 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
sspitsyn@5496 1883
sspitsyn@5496 1884 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
sspitsyn@5496 1885
sspitsyn@5496 1886 __ br_null(G1_scratch, false, Assembler::pn, L_done);
sspitsyn@5496 1887 __ delayed()->nop();
sspitsyn@5496 1888
sspitsyn@5496 1889 __ st_ptr(G1_scratch, Lesp, wordSize);
sspitsyn@5496 1890 __ bind(L_done);
sspitsyn@5496 1891 }
sspitsyn@5496 1892 #endif // INCLUDE_JVMTI
sspitsyn@5496 1893
duke@435 1894 // Resume bytecode interpretation at the current bcp
duke@435 1895 __ dispatch_next(vtos);
duke@435 1896 // end of JVMTI PopFrame support
duke@435 1897
duke@435 1898 Interpreter::_remove_activation_entry = __ pc();
duke@435 1899
duke@435 1900 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
duke@435 1901 __ pop_ptr(Oexception); // get exception
duke@435 1902
duke@435 1903 // Intel has the following comment:
duke@435 1904 //// remove the activation (without doing throws on illegalMonitorExceptions)
duke@435 1905 // They remove the activation without checking for bad monitor state.
duke@435 1906 // %%% We should make sure this is the right semantics before implementing.
duke@435 1907
duke@435 1908 __ set_vm_result(Oexception);
duke@435 1909 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
duke@435 1910
duke@435 1911 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
duke@435 1912
duke@435 1913 __ get_vm_result(Oexception);
duke@435 1914 __ verify_oop(Oexception);
duke@435 1915
duke@435 1916 const int return_reg_adjustment = frame::pc_return_offset;
twisti@1162 1917 Address issuing_pc_addr(I7, return_reg_adjustment);
duke@435 1918
duke@435 1919 // We are done with this activation frame; find out where to go next.
duke@435 1920 // The continuation point will be an exception handler, which expects
duke@435 1921 // the following registers set up:
duke@435 1922 //
duke@435 1923 // Oexception: exception
duke@435 1924 // Oissuing_pc: the local call that threw exception
duke@435 1925 // Other On: garbage
duke@435 1926 // In/Ln: the contents of the caller's register window
duke@435 1927 //
duke@435 1928 // We do the required restore at the last possible moment, because we
duke@435 1929 // need to preserve some state across a runtime call.
duke@435 1930 // (Remember that the caller activation is unknown--it might not be
duke@435 1931 // interpreted, so things like Lscratch are useless in the caller.)
duke@435 1932
duke@435 1933 // Although the Intel version uses call_C, we can use the more
duke@435 1934 // compact call_VM. (The only real difference on SPARC is a
duke@435 1935 // harmlessly ignored [re]set_last_Java_frame, compared with
duke@435 1936 // the Intel code which lacks this.)
duke@435 1937 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
duke@435 1938 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
duke@435 1939 __ super_call_VM_leaf(L7_thread_cache,
duke@435 1940 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
twisti@1730 1941 G2_thread, Oissuing_pc->after_save());
duke@435 1942
duke@435 1943 // The caller's SP was adjusted upon method entry to accomodate
duke@435 1944 // the callee's non-argument locals. Undo that adjustment.
duke@435 1945 __ JMP(O0, 0); // return exception handler in caller
duke@435 1946 __ delayed()->restore(I5_savedSP, G0, SP);
duke@435 1947
duke@435 1948 // (same old exception object is already in Oexception; see above)
duke@435 1949 // Note that an "issuing PC" is actually the next PC after the call
duke@435 1950 }
duke@435 1951
duke@435 1952
duke@435 1953 //
duke@435 1954 // JVMTI ForceEarlyReturn support
duke@435 1955 //
duke@435 1956
duke@435 1957 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
duke@435 1958 address entry = __ pc();
duke@435 1959
duke@435 1960 __ empty_expression_stack();
duke@435 1961 __ load_earlyret_value(state);
duke@435 1962
twisti@1162 1963 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
twisti@1162 1964 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
duke@435 1965
duke@435 1966 // Clear the earlyret state
duke@435 1967 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
duke@435 1968
duke@435 1969 __ remove_activation(state,
duke@435 1970 /* throw_monitor_exception */ false,
duke@435 1971 /* install_monitor_exception */ false);
duke@435 1972
duke@435 1973 // The caller's SP was adjusted upon method entry to accomodate
duke@435 1974 // the callee's non-argument locals. Undo that adjustment.
duke@435 1975 __ ret(); // return to caller
duke@435 1976 __ delayed()->restore(I5_savedSP, G0, SP);
duke@435 1977
duke@435 1978 return entry;
duke@435 1979 } // end of JVMTI ForceEarlyReturn support
duke@435 1980
duke@435 1981
duke@435 1982 //------------------------------------------------------------------------------------------------------------------------
duke@435 1983 // Helper for vtos entry point generation
duke@435 1984
duke@435 1985 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
duke@435 1986 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
duke@435 1987 Label L;
kvn@3037 1988 aep = __ pc(); __ push_ptr(); __ ba_short(L);
kvn@3037 1989 fep = __ pc(); __ push_f(); __ ba_short(L);
kvn@3037 1990 dep = __ pc(); __ push_d(); __ ba_short(L);
kvn@3037 1991 lep = __ pc(); __ push_l(); __ ba_short(L);
duke@435 1992 iep = __ pc(); __ push_i();
duke@435 1993 bep = cep = sep = iep; // there aren't any
duke@435 1994 vep = __ pc(); __ bind(L); // fall through
duke@435 1995 generate_and_dispatch(t);
duke@435 1996 }
duke@435 1997
duke@435 1998 // --------------------------------------------------------------------------------
duke@435 1999
duke@435 2000
duke@435 2001 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
duke@435 2002 : TemplateInterpreterGenerator(code) {
duke@435 2003 generate_all(); // down here so it can be "virtual"
duke@435 2004 }
duke@435 2005
duke@435 2006 // --------------------------------------------------------------------------------
duke@435 2007
duke@435 2008 // Non-product code
duke@435 2009 #ifndef PRODUCT
duke@435 2010 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
duke@435 2011 address entry = __ pc();
duke@435 2012
duke@435 2013 __ push(state);
duke@435 2014 __ mov(O7, Lscratch); // protect return address within interpreter
duke@435 2015
duke@435 2016 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
duke@435 2017 __ mov( Otos_l2, G3_scratch );
duke@435 2018 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
duke@435 2019 __ mov(Lscratch, O7); // restore return address
duke@435 2020 __ pop(state);
duke@435 2021 __ retl();
duke@435 2022 __ delayed()->nop();
duke@435 2023
duke@435 2024 return entry;
duke@435 2025 }
duke@435 2026
duke@435 2027
duke@435 2028 // helpers for generate_and_dispatch
duke@435 2029
duke@435 2030 void TemplateInterpreterGenerator::count_bytecode() {
twisti@1162 2031 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
duke@435 2032 }
duke@435 2033
duke@435 2034
duke@435 2035 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
twisti@1162 2036 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
duke@435 2037 }
duke@435 2038
duke@435 2039
duke@435 2040 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
twisti@1162 2041 AddressLiteral index (&BytecodePairHistogram::_index);
twisti@1162 2042 AddressLiteral counters((address) &BytecodePairHistogram::_counters);
duke@435 2043
duke@435 2044 // get index, shift out old bytecode, bring in new bytecode, and store it
duke@435 2045 // _index = (_index >> log2_number_of_codes) |
duke@435 2046 // (bytecode << log2_number_of_codes);
duke@435 2047
twisti@1162 2048 __ load_contents(index, G4_scratch);
duke@435 2049 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
duke@435 2050 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
duke@435 2051 __ or3( G3_scratch, G4_scratch, G4_scratch );
twisti@1162 2052 __ store_contents(G4_scratch, index, G3_scratch);
duke@435 2053
duke@435 2054 // bump bucket contents
duke@435 2055 // _counters[_index] ++;
duke@435 2056
twisti@1162 2057 __ set(counters, G3_scratch); // loads into G3_scratch
duke@435 2058 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
duke@435 2059 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
duke@435 2060 __ ld (G3_scratch, 0, G4_scratch);
duke@435 2061 __ inc (G4_scratch);
duke@435 2062 __ st (G4_scratch, 0, G3_scratch);
duke@435 2063 }
duke@435 2064
duke@435 2065
duke@435 2066 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
duke@435 2067 // Call a little run-time stub to avoid blow-up for each bytecode.
duke@435 2068 // The run-time runtime saves the right registers, depending on
duke@435 2069 // the tosca in-state for the given template.
duke@435 2070 address entry = Interpreter::trace_code(t->tos_in());
duke@435 2071 guarantee(entry != NULL, "entry must have been generated");
duke@435 2072 __ call(entry, relocInfo::none);
duke@435 2073 __ delayed()->nop();
duke@435 2074 }
duke@435 2075
duke@435 2076
duke@435 2077 void TemplateInterpreterGenerator::stop_interpreter_at() {
twisti@1162 2078 AddressLiteral counter(&BytecodeCounter::_counter_value);
twisti@1162 2079 __ load_contents(counter, G3_scratch);
twisti@1162 2080 AddressLiteral stop_at(&StopInterpreterAt);
duke@435 2081 __ load_ptr_contents(stop_at, G4_scratch);
duke@435 2082 __ cmp(G3_scratch, G4_scratch);
coleenp@3627 2083 __ breakpoint_trap(Assembler::equal, Assembler::icc);
duke@435 2084 }
duke@435 2085 #endif // not PRODUCT
duke@435 2086 #endif // !CC_INTERP

mercurial