src/cpu/x86/vm/templateInterpreter_x86_64.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2552
638119ce7cfd
child 2784
92add02409c9
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
iveresov@2438 2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "asm/assembler.hpp"
stefank@2314 27 #include "interpreter/bytecodeHistogram.hpp"
stefank@2314 28 #include "interpreter/interpreter.hpp"
stefank@2314 29 #include "interpreter/interpreterGenerator.hpp"
stefank@2314 30 #include "interpreter/interpreterRuntime.hpp"
stefank@2314 31 #include "interpreter/templateTable.hpp"
stefank@2314 32 #include "oops/arrayOop.hpp"
stefank@2314 33 #include "oops/methodDataOop.hpp"
stefank@2314 34 #include "oops/methodOop.hpp"
stefank@2314 35 #include "oops/oop.inline.hpp"
stefank@2314 36 #include "prims/jvmtiExport.hpp"
stefank@2314 37 #include "prims/jvmtiThreadState.hpp"
stefank@2314 38 #include "runtime/arguments.hpp"
stefank@2314 39 #include "runtime/deoptimization.hpp"
stefank@2314 40 #include "runtime/frame.inline.hpp"
stefank@2314 41 #include "runtime/sharedRuntime.hpp"
stefank@2314 42 #include "runtime/stubRoutines.hpp"
stefank@2314 43 #include "runtime/synchronizer.hpp"
stefank@2314 44 #include "runtime/timer.hpp"
stefank@2314 45 #include "runtime/vframeArray.hpp"
stefank@2314 46 #include "utilities/debug.hpp"
duke@435 47
duke@435 48 #define __ _masm->
duke@435 49
never@739 50 #ifndef CC_INTERP
never@739 51
duke@435 52 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
duke@435 53 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
duke@435 54 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
duke@435 55
duke@435 56 //-----------------------------------------------------------------------------
duke@435 57
duke@435 58 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
duke@435 59 address entry = __ pc();
duke@435 60
duke@435 61 #ifdef ASSERT
duke@435 62 {
duke@435 63 Label L;
never@739 64 __ lea(rax, Address(rbp,
never@739 65 frame::interpreter_frame_monitor_block_top_offset *
never@739 66 wordSize));
never@739 67 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
never@739 68 // grows negative)
duke@435 69 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
duke@435 70 __ stop ("interpreter frame not set up");
duke@435 71 __ bind(L);
duke@435 72 }
duke@435 73 #endif // ASSERT
duke@435 74 // Restore bcp under the assumption that the current frame is still
duke@435 75 // interpreted
duke@435 76 __ restore_bcp();
duke@435 77
duke@435 78 // expression stack must be empty before entering the VM if an
duke@435 79 // exception happened
duke@435 80 __ empty_expression_stack();
duke@435 81 // throw exception
duke@435 82 __ call_VM(noreg,
duke@435 83 CAST_FROM_FN_PTR(address,
duke@435 84 InterpreterRuntime::throw_StackOverflowError));
duke@435 85 return entry;
duke@435 86 }
duke@435 87
duke@435 88 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
duke@435 89 const char* name) {
duke@435 90 address entry = __ pc();
duke@435 91 // expression stack must be empty before entering the VM if an
duke@435 92 // exception happened
duke@435 93 __ empty_expression_stack();
duke@435 94 // setup parameters
duke@435 95 // ??? convention: expect aberrant index in register ebx
duke@435 96 __ lea(c_rarg1, ExternalAddress((address)name));
duke@435 97 __ call_VM(noreg,
duke@435 98 CAST_FROM_FN_PTR(address,
duke@435 99 InterpreterRuntime::
duke@435 100 throw_ArrayIndexOutOfBoundsException),
duke@435 101 c_rarg1, rbx);
duke@435 102 return entry;
duke@435 103 }
duke@435 104
duke@435 105 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
duke@435 106 address entry = __ pc();
duke@435 107
duke@435 108 // object is at TOS
never@739 109 __ pop(c_rarg1);
duke@435 110
duke@435 111 // expression stack must be empty before entering the VM if an
duke@435 112 // exception happened
duke@435 113 __ empty_expression_stack();
duke@435 114
duke@435 115 __ call_VM(noreg,
duke@435 116 CAST_FROM_FN_PTR(address,
duke@435 117 InterpreterRuntime::
duke@435 118 throw_ClassCastException),
duke@435 119 c_rarg1);
duke@435 120 return entry;
duke@435 121 }
duke@435 122
twisti@1543 123 // Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
jrose@1145 124 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
jrose@1145 125 address entry = __ pc();
jrose@1145 126
jrose@1145 127 __ pop(c_rarg2); // failing object is at TOS
jrose@1145 128 __ pop(c_rarg1); // required type is at TOS+8
jrose@1145 129
twisti@1543 130 __ verify_oop(c_rarg1);
twisti@1543 131 __ verify_oop(c_rarg2);
twisti@1543 132
twisti@1543 133 // Various method handle types use interpreter registers as temps.
twisti@1543 134 __ restore_bcp();
twisti@1543 135 __ restore_locals();
twisti@1543 136
twisti@1543 137 // Expression stack must be empty before entering the VM for an exception.
jrose@1145 138 __ empty_expression_stack();
jrose@1145 139
jrose@1145 140 __ call_VM(noreg,
jrose@1145 141 CAST_FROM_FN_PTR(address,
twisti@1543 142 InterpreterRuntime::throw_WrongMethodTypeException),
jrose@1145 143 // pass required type, failing object (or NULL)
jrose@1145 144 c_rarg1, c_rarg2);
jrose@1145 145 return entry;
jrose@1145 146 }
jrose@1145 147
duke@435 148 address TemplateInterpreterGenerator::generate_exception_handler_common(
duke@435 149 const char* name, const char* message, bool pass_oop) {
duke@435 150 assert(!pass_oop || message == NULL, "either oop or message but not both");
duke@435 151 address entry = __ pc();
duke@435 152 if (pass_oop) {
duke@435 153 // object is at TOS
never@739 154 __ pop(c_rarg2);
duke@435 155 }
duke@435 156 // expression stack must be empty before entering the VM if an
duke@435 157 // exception happened
duke@435 158 __ empty_expression_stack();
duke@435 159 // setup parameters
duke@435 160 __ lea(c_rarg1, ExternalAddress((address)name));
duke@435 161 if (pass_oop) {
duke@435 162 __ call_VM(rax, CAST_FROM_FN_PTR(address,
duke@435 163 InterpreterRuntime::
duke@435 164 create_klass_exception),
duke@435 165 c_rarg1, c_rarg2);
duke@435 166 } else {
duke@435 167 // kind of lame ExternalAddress can't take NULL because
duke@435 168 // external_word_Relocation will assert.
duke@435 169 if (message != NULL) {
duke@435 170 __ lea(c_rarg2, ExternalAddress((address)message));
duke@435 171 } else {
duke@435 172 __ movptr(c_rarg2, NULL_WORD);
duke@435 173 }
duke@435 174 __ call_VM(rax,
duke@435 175 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
duke@435 176 c_rarg1, c_rarg2);
duke@435 177 }
duke@435 178 // throw exception
duke@435 179 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
duke@435 180 return entry;
duke@435 181 }
duke@435 182
duke@435 183
duke@435 184 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
duke@435 185 address entry = __ pc();
duke@435 186 // NULL last_sp until next java call
never@739 187 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
duke@435 188 __ dispatch_next(state);
duke@435 189 return entry;
duke@435 190 }
duke@435 191
duke@435 192
twisti@2552 193 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
duke@435 194 address entry = __ pc();
duke@435 195
duke@435 196 // Restore stack bottom in case i2c adjusted stack
never@739 197 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
duke@435 198 // and NULL it as marker that esp is now tos until next java call
never@739 199 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
duke@435 200
duke@435 201 __ restore_bcp();
duke@435 202 __ restore_locals();
never@739 203
twisti@1543 204 Label L_got_cache, L_giant_index;
twisti@1543 205 if (EnableInvokeDynamic) {
twisti@1543 206 __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
twisti@1543 207 __ jcc(Assembler::equal, L_giant_index);
twisti@1543 208 }
jrose@1920 209 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
twisti@1543 210 __ bind(L_got_cache);
duke@435 211 __ movl(rbx, Address(rbx, rcx,
twisti@1543 212 Address::times_ptr,
duke@435 213 in_bytes(constantPoolCacheOopDesc::base_offset()) +
duke@435 214 3 * wordSize));
duke@435 215 __ andl(rbx, 0xFF);
never@739 216 __ lea(rsp, Address(rsp, rbx, Address::times_8));
duke@435 217 __ dispatch_next(state, step);
twisti@1543 218
twisti@1543 219 // out of the main line of code...
twisti@1543 220 if (EnableInvokeDynamic) {
twisti@1543 221 __ bind(L_giant_index);
jrose@1920 222 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
twisti@1543 223 __ jmp(L_got_cache);
twisti@1543 224 }
twisti@1543 225
duke@435 226 return entry;
duke@435 227 }
duke@435 228
duke@435 229
duke@435 230 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
duke@435 231 int step) {
duke@435 232 address entry = __ pc();
duke@435 233 // NULL last_sp until next java call
never@739 234 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
duke@435 235 __ restore_bcp();
duke@435 236 __ restore_locals();
duke@435 237 // handle exceptions
duke@435 238 {
duke@435 239 Label L;
never@739 240 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
duke@435 241 __ jcc(Assembler::zero, L);
duke@435 242 __ call_VM(noreg,
duke@435 243 CAST_FROM_FN_PTR(address,
duke@435 244 InterpreterRuntime::throw_pending_exception));
duke@435 245 __ should_not_reach_here();
duke@435 246 __ bind(L);
duke@435 247 }
duke@435 248 __ dispatch_next(state, step);
duke@435 249 return entry;
duke@435 250 }
duke@435 251
duke@435 252 int AbstractInterpreter::BasicType_as_index(BasicType type) {
duke@435 253 int i = 0;
duke@435 254 switch (type) {
duke@435 255 case T_BOOLEAN: i = 0; break;
duke@435 256 case T_CHAR : i = 1; break;
duke@435 257 case T_BYTE : i = 2; break;
duke@435 258 case T_SHORT : i = 3; break;
duke@435 259 case T_INT : i = 4; break;
duke@435 260 case T_LONG : i = 5; break;
duke@435 261 case T_VOID : i = 6; break;
duke@435 262 case T_FLOAT : i = 7; break;
duke@435 263 case T_DOUBLE : i = 8; break;
duke@435 264 case T_OBJECT : i = 9; break;
duke@435 265 case T_ARRAY : i = 9; break;
duke@435 266 default : ShouldNotReachHere();
duke@435 267 }
duke@435 268 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
duke@435 269 "index out of bounds");
duke@435 270 return i;
duke@435 271 }
duke@435 272
duke@435 273
duke@435 274 address TemplateInterpreterGenerator::generate_result_handler_for(
duke@435 275 BasicType type) {
duke@435 276 address entry = __ pc();
duke@435 277 switch (type) {
duke@435 278 case T_BOOLEAN: __ c2bool(rax); break;
duke@435 279 case T_CHAR : __ movzwl(rax, rax); break;
duke@435 280 case T_BYTE : __ sign_extend_byte(rax); break;
duke@435 281 case T_SHORT : __ sign_extend_short(rax); break;
duke@435 282 case T_INT : /* nothing to do */ break;
duke@435 283 case T_LONG : /* nothing to do */ break;
duke@435 284 case T_VOID : /* nothing to do */ break;
duke@435 285 case T_FLOAT : /* nothing to do */ break;
duke@435 286 case T_DOUBLE : /* nothing to do */ break;
duke@435 287 case T_OBJECT :
duke@435 288 // retrieve result from frame
never@739 289 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
duke@435 290 // and verify it
duke@435 291 __ verify_oop(rax);
duke@435 292 break;
duke@435 293 default : ShouldNotReachHere();
duke@435 294 }
duke@435 295 __ ret(0); // return from result handler
duke@435 296 return entry;
duke@435 297 }
duke@435 298
duke@435 299 address TemplateInterpreterGenerator::generate_safept_entry_for(
duke@435 300 TosState state,
duke@435 301 address runtime_entry) {
duke@435 302 address entry = __ pc();
duke@435 303 __ push(state);
duke@435 304 __ call_VM(noreg, runtime_entry);
duke@435 305 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
duke@435 306 return entry;
duke@435 307 }
duke@435 308
duke@435 309
duke@435 310
duke@435 311 // Helpers for commoning out cases in the various type of method entries.
duke@435 312 //
duke@435 313
duke@435 314
duke@435 315 // increment invocation count & check for overflow
duke@435 316 //
duke@435 317 // Note: checking for negative value instead of overflow
duke@435 318 // so we have a 'sticky' overflow test
duke@435 319 //
duke@435 320 // rbx: method
duke@435 321 // ecx: invocation counter
duke@435 322 //
duke@435 323 void InterpreterGenerator::generate_counter_incr(
duke@435 324 Label* overflow,
duke@435 325 Label* profile_method,
duke@435 326 Label* profile_method_continue) {
iveresov@2138 327 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
iveresov@2138 328 in_bytes(InvocationCounter::counter_offset()));
iveresov@2138 329 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
iveresov@2138 330 if (TieredCompilation) {
iveresov@2138 331 int increment = InvocationCounter::count_increment;
iveresov@2138 332 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
iveresov@2138 333 Label no_mdo, done;
iveresov@2138 334 if (ProfileInterpreter) {
iveresov@2138 335 // Are we profiling?
iveresov@2138 336 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
iveresov@2138 337 __ testptr(rax, rax);
iveresov@2138 338 __ jccb(Assembler::zero, no_mdo);
iveresov@2138 339 // Increment counter in the MDO
iveresov@2138 340 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
iveresov@2138 341 in_bytes(InvocationCounter::counter_offset()));
iveresov@2138 342 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
iveresov@2138 343 __ jmpb(done);
iveresov@2138 344 }
iveresov@2138 345 __ bind(no_mdo);
iveresov@2138 346 // Increment counter in methodOop (we don't need to load it, it's in ecx).
iveresov@2138 347 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
iveresov@2138 348 __ bind(done);
iveresov@2138 349 } else {
iveresov@2138 350 const Address backedge_counter(rbx,
iveresov@2138 351 methodOopDesc::backedge_counter_offset() +
iveresov@2138 352 InvocationCounter::counter_offset());
duke@435 353
iveresov@2138 354 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
iveresov@2138 355 __ incrementl(Address(rbx,
iveresov@2138 356 methodOopDesc::interpreter_invocation_counter_offset()));
iveresov@2138 357 }
iveresov@2138 358 // Update standard invocation counters
iveresov@2138 359 __ movl(rax, backedge_counter); // load backedge counter
duke@435 360
iveresov@2138 361 __ incrementl(rcx, InvocationCounter::count_increment);
iveresov@2138 362 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
iveresov@2138 363
iveresov@2138 364 __ movl(invocation_counter, rcx); // save invocation count
iveresov@2138 365 __ addl(rcx, rax); // add both counters
iveresov@2138 366
iveresov@2138 367 // profile_method is non-null only for interpreted method so
iveresov@2138 368 // profile_method != NULL == !native_call
iveresov@2138 369
iveresov@2138 370 if (ProfileInterpreter && profile_method != NULL) {
iveresov@2138 371 // Test to see if we should create a method data oop
iveresov@2138 372 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
iveresov@2138 373 __ jcc(Assembler::less, *profile_method_continue);
iveresov@2138 374
iveresov@2138 375 // if no method data exists, go to profile_method
iveresov@2138 376 __ test_method_data_pointer(rax, *profile_method);
iveresov@2138 377 }
iveresov@2138 378
iveresov@2138 379 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
iveresov@2138 380 __ jcc(Assembler::aboveEqual, *overflow);
duke@435 381 }
duke@435 382 }
duke@435 383
duke@435 384 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
duke@435 385
duke@435 386 // Asm interpreter on entry
duke@435 387 // r14 - locals
duke@435 388 // r13 - bcp
duke@435 389 // rbx - method
duke@435 390 // edx - cpool --- DOES NOT APPEAR TO BE TRUE
duke@435 391 // rbp - interpreter frame
duke@435 392
duke@435 393 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
duke@435 394 // Everything as it was on entry
duke@435 395 // rdx is not restored. Doesn't appear to really be set.
duke@435 396
duke@435 397 const Address size_of_parameters(rbx,
duke@435 398 methodOopDesc::size_of_parameters_offset());
duke@435 399
duke@435 400 // InterpreterRuntime::frequency_counter_overflow takes two
duke@435 401 // arguments, the first (thread) is passed by call_VM, the second
duke@435 402 // indicates if the counter overflow occurs at a backwards branch
duke@435 403 // (NULL bcp). We pass zero for it. The call returns the address
duke@435 404 // of the verified entry point for the method or NULL if the
duke@435 405 // compilation did not complete (either went background or bailed
duke@435 406 // out).
duke@435 407 __ movl(c_rarg1, 0);
duke@435 408 __ call_VM(noreg,
duke@435 409 CAST_FROM_FN_PTR(address,
duke@435 410 InterpreterRuntime::frequency_counter_overflow),
duke@435 411 c_rarg1);
duke@435 412
never@739 413 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
duke@435 414 // Preserve invariant that r13/r14 contain bcp/locals of sender frame
duke@435 415 // and jump to the interpreted entry.
duke@435 416 __ jmp(*do_continue, relocInfo::none);
duke@435 417 }
duke@435 418
duke@435 419 // See if we've got enough room on the stack for locals plus overhead.
duke@435 420 // The expression stack grows down incrementally, so the normal guard
duke@435 421 // page mechanism will work for that.
duke@435 422 //
duke@435 423 // NOTE: Since the additional locals are also always pushed (wasn't
duke@435 424 // obvious in generate_method_entry) so the guard should work for them
duke@435 425 // too.
duke@435 426 //
duke@435 427 // Args:
duke@435 428 // rdx: number of additional locals this frame needs (what we must check)
duke@435 429 // rbx: methodOop
duke@435 430 //
duke@435 431 // Kills:
duke@435 432 // rax
duke@435 433 void InterpreterGenerator::generate_stack_overflow_check(void) {
duke@435 434
duke@435 435 // monitor entry size: see picture of stack set
duke@435 436 // (generate_method_entry) and frame_amd64.hpp
duke@435 437 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
duke@435 438
duke@435 439 // total overhead size: entry_size + (saved rbp through expr stack
duke@435 440 // bottom). be sure to change this if you add/subtract anything
duke@435 441 // to/from the overhead area
duke@435 442 const int overhead_size =
duke@435 443 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
duke@435 444
duke@435 445 const int page_size = os::vm_page_size();
duke@435 446
duke@435 447 Label after_frame_check;
duke@435 448
duke@435 449 // see if the frame is greater than one page in size. If so,
duke@435 450 // then we need to verify there is enough stack space remaining
duke@435 451 // for the additional locals.
twisti@1861 452 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
duke@435 453 __ jcc(Assembler::belowEqual, after_frame_check);
duke@435 454
duke@435 455 // compute rsp as if this were going to be the last frame on
duke@435 456 // the stack before the red zone
duke@435 457
duke@435 458 const Address stack_base(r15_thread, Thread::stack_base_offset());
duke@435 459 const Address stack_size(r15_thread, Thread::stack_size_offset());
duke@435 460
duke@435 461 // locals + overhead, in bytes
never@739 462 __ mov(rax, rdx);
twisti@1861 463 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter.
never@739 464 __ addptr(rax, overhead_size);
duke@435 465
duke@435 466 #ifdef ASSERT
duke@435 467 Label stack_base_okay, stack_size_okay;
duke@435 468 // verify that thread stack base is non-zero
never@739 469 __ cmpptr(stack_base, (int32_t)NULL_WORD);
duke@435 470 __ jcc(Assembler::notEqual, stack_base_okay);
duke@435 471 __ stop("stack base is zero");
duke@435 472 __ bind(stack_base_okay);
duke@435 473 // verify that thread stack size is non-zero
never@739 474 __ cmpptr(stack_size, 0);
duke@435 475 __ jcc(Assembler::notEqual, stack_size_okay);
duke@435 476 __ stop("stack size is zero");
duke@435 477 __ bind(stack_size_okay);
duke@435 478 #endif
duke@435 479
duke@435 480 // Add stack base to locals and subtract stack size
never@739 481 __ addptr(rax, stack_base);
never@739 482 __ subptr(rax, stack_size);
duke@435 483
twisti@1570 484 // Use the maximum number of pages we might bang.
twisti@1570 485 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
twisti@1570 486 (StackRedPages+StackYellowPages);
twisti@1570 487
duke@435 488 // add in the red and yellow zone sizes
twisti@1570 489 __ addptr(rax, max_pages * page_size);
duke@435 490
duke@435 491 // check against the current stack bottom
never@739 492 __ cmpptr(rsp, rax);
duke@435 493 __ jcc(Assembler::above, after_frame_check);
duke@435 494
never@739 495 __ pop(rax); // get return address
duke@435 496 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
duke@435 497
duke@435 498 // all done with frame size check
duke@435 499 __ bind(after_frame_check);
duke@435 500 }
duke@435 501
duke@435 502 // Allocate monitor and lock method (asm interpreter)
duke@435 503 //
duke@435 504 // Args:
duke@435 505 // rbx: methodOop
duke@435 506 // r14: locals
duke@435 507 //
duke@435 508 // Kills:
duke@435 509 // rax
duke@435 510 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
duke@435 511 // rscratch1, rscratch2 (scratch regs)
duke@435 512 void InterpreterGenerator::lock_method(void) {
duke@435 513 // synchronize method
duke@435 514 const Address access_flags(rbx, methodOopDesc::access_flags_offset());
duke@435 515 const Address monitor_block_top(
duke@435 516 rbp,
duke@435 517 frame::interpreter_frame_monitor_block_top_offset * wordSize);
duke@435 518 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
duke@435 519
duke@435 520 #ifdef ASSERT
duke@435 521 {
duke@435 522 Label L;
duke@435 523 __ movl(rax, access_flags);
duke@435 524 __ testl(rax, JVM_ACC_SYNCHRONIZED);
duke@435 525 __ jcc(Assembler::notZero, L);
duke@435 526 __ stop("method doesn't need synchronization");
duke@435 527 __ bind(L);
duke@435 528 }
duke@435 529 #endif // ASSERT
duke@435 530
duke@435 531 // get synchronization object
duke@435 532 {
duke@435 533 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
duke@435 534 Klass::java_mirror_offset_in_bytes();
duke@435 535 Label done;
duke@435 536 __ movl(rax, access_flags);
duke@435 537 __ testl(rax, JVM_ACC_STATIC);
duke@435 538 // get receiver (assume this is frequent case)
never@739 539 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
duke@435 540 __ jcc(Assembler::zero, done);
never@739 541 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
never@739 542 __ movptr(rax, Address(rax,
never@739 543 constantPoolOopDesc::pool_holder_offset_in_bytes()));
never@739 544 __ movptr(rax, Address(rax, mirror_offset));
duke@435 545
duke@435 546 #ifdef ASSERT
duke@435 547 {
duke@435 548 Label L;
never@739 549 __ testptr(rax, rax);
duke@435 550 __ jcc(Assembler::notZero, L);
duke@435 551 __ stop("synchronization object is NULL");
duke@435 552 __ bind(L);
duke@435 553 }
duke@435 554 #endif // ASSERT
duke@435 555
duke@435 556 __ bind(done);
duke@435 557 }
duke@435 558
duke@435 559 // add space for monitor & lock
never@739 560 __ subptr(rsp, entry_size); // add space for a monitor entry
never@739 561 __ movptr(monitor_block_top, rsp); // set new monitor block top
duke@435 562 // store object
never@739 563 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
never@739 564 __ movptr(c_rarg1, rsp); // object address
duke@435 565 __ lock_object(c_rarg1);
duke@435 566 }
duke@435 567
duke@435 568 // Generate a fixed interpreter frame. This is identical setup for
duke@435 569 // interpreted methods and for native methods hence the shared code.
duke@435 570 //
duke@435 571 // Args:
duke@435 572 // rax: return address
duke@435 573 // rbx: methodOop
duke@435 574 // r14: pointer to locals
duke@435 575 // r13: sender sp
duke@435 576 // rdx: cp cache
duke@435 577 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
duke@435 578 // initialize fixed part of activation frame
never@739 579 __ push(rax); // save return address
duke@435 580 __ enter(); // save old & set new rbp
never@739 581 __ push(r13); // set sender sp
never@739 582 __ push((int)NULL_WORD); // leave last_sp as null
never@739 583 __ movptr(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop
never@739 584 __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
never@739 585 __ push(rbx); // save methodOop
duke@435 586 if (ProfileInterpreter) {
duke@435 587 Label method_data_continue;
never@739 588 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
never@739 589 __ testptr(rdx, rdx);
duke@435 590 __ jcc(Assembler::zero, method_data_continue);
never@739 591 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
duke@435 592 __ bind(method_data_continue);
never@739 593 __ push(rdx); // set the mdp (method data pointer)
duke@435 594 } else {
never@739 595 __ push(0);
duke@435 596 }
duke@435 597
never@739 598 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
never@739 599 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
never@739 600 __ push(rdx); // set constant pool cache
never@739 601 __ push(r14); // set locals pointer
duke@435 602 if (native_call) {
never@739 603 __ push(0); // no bcp
duke@435 604 } else {
never@739 605 __ push(r13); // set bcp
duke@435 606 }
never@739 607 __ push(0); // reserve word for pointer to expression stack bottom
never@739 608 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
duke@435 609 }
duke@435 610
duke@435 611 // End of helpers
duke@435 612
never@739 613 // Various method entries
never@739 614 //------------------------------------------------------------------------------------------------------------------------
never@739 615 //
never@739 616 //
never@739 617
never@739 618 // Call an accessor method (assuming it is resolved, otherwise drop
never@739 619 // into vanilla (slow path) entry
never@739 620 address InterpreterGenerator::generate_accessor_entry(void) {
never@739 621 // rbx: methodOop
never@739 622
never@739 623 // r13: senderSP must preserver for slow path, set SP to it on fast path
never@739 624
never@739 625 address entry_point = __ pc();
never@739 626 Label xreturn_path;
never@739 627
never@739 628 // do fastpath for resolved accessor methods
never@739 629 if (UseFastAccessorMethods) {
never@739 630 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
never@739 631 // thereof; parameter size = 1
never@739 632 // Note: We can only use this code if the getfield has been resolved
never@739 633 // and if we don't have a null-pointer exception => check for
never@739 634 // these conditions first and use slow path if necessary.
never@739 635 Label slow_path;
never@739 636 // If we need a safepoint check, generate full interpreter entry.
never@739 637 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
never@739 638 SafepointSynchronize::_not_synchronized);
never@739 639
never@739 640 __ jcc(Assembler::notEqual, slow_path);
never@739 641 // rbx: method
never@739 642 __ movptr(rax, Address(rsp, wordSize));
never@739 643
never@739 644 // check if local 0 != NULL and read field
never@739 645 __ testptr(rax, rax);
never@739 646 __ jcc(Assembler::zero, slow_path);
never@739 647
never@739 648 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
never@739 649 // read first instruction word and extract bytecode @ 1 and index @ 2
never@739 650 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
never@739 651 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
never@739 652 // Shift codes right to get the index on the right.
never@739 653 // The bytecode fetched looks like <index><0xb4><0x2a>
never@739 654 __ shrl(rdx, 2 * BitsPerByte);
never@739 655 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
never@739 656 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
never@739 657
never@739 658 // rax: local 0
never@739 659 // rbx: method
never@739 660 // rdx: constant pool cache index
never@739 661 // rdi: constant pool cache
never@739 662
never@739 663 // check if getfield has been resolved and read constant pool cache entry
never@739 664 // check the validity of the cache entry by testing whether _indices field
never@739 665 // contains Bytecode::_getfield in b1 byte.
never@739 666 assert(in_words(ConstantPoolCacheEntry::size()) == 4,
never@739 667 "adjust shift below");
never@739 668 __ movl(rcx,
never@739 669 Address(rdi,
never@739 670 rdx,
never@739 671 Address::times_8,
never@739 672 constantPoolCacheOopDesc::base_offset() +
never@739 673 ConstantPoolCacheEntry::indices_offset()));
never@739 674 __ shrl(rcx, 2 * BitsPerByte);
never@739 675 __ andl(rcx, 0xFF);
never@739 676 __ cmpl(rcx, Bytecodes::_getfield);
never@739 677 __ jcc(Assembler::notEqual, slow_path);
never@739 678
never@739 679 // Note: constant pool entry is not valid before bytecode is resolved
never@739 680 __ movptr(rcx,
never@739 681 Address(rdi,
never@739 682 rdx,
never@739 683 Address::times_8,
never@739 684 constantPoolCacheOopDesc::base_offset() +
never@739 685 ConstantPoolCacheEntry::f2_offset()));
never@739 686 // edx: flags
never@739 687 __ movl(rdx,
never@739 688 Address(rdi,
never@739 689 rdx,
never@739 690 Address::times_8,
never@739 691 constantPoolCacheOopDesc::base_offset() +
never@739 692 ConstantPoolCacheEntry::flags_offset()));
never@739 693
never@739 694 Label notObj, notInt, notByte, notShort;
never@739 695 const Address field_address(rax, rcx, Address::times_1);
never@739 696
never@739 697 // Need to differentiate between igetfield, agetfield, bgetfield etc.
never@739 698 // because they are different sizes.
never@739 699 // Use the type from the constant pool cache
never@739 700 __ shrl(rdx, ConstantPoolCacheEntry::tosBits);
never@739 701 // Make sure we don't need to mask edx for tosBits after the above shift
never@739 702 ConstantPoolCacheEntry::verify_tosBits();
never@739 703
never@739 704 __ cmpl(rdx, atos);
never@739 705 __ jcc(Assembler::notEqual, notObj);
never@739 706 // atos
never@739 707 __ load_heap_oop(rax, field_address);
never@739 708 __ jmp(xreturn_path);
never@739 709
never@739 710 __ bind(notObj);
never@739 711 __ cmpl(rdx, itos);
never@739 712 __ jcc(Assembler::notEqual, notInt);
never@739 713 // itos
never@739 714 __ movl(rax, field_address);
never@739 715 __ jmp(xreturn_path);
never@739 716
never@739 717 __ bind(notInt);
never@739 718 __ cmpl(rdx, btos);
never@739 719 __ jcc(Assembler::notEqual, notByte);
never@739 720 // btos
never@739 721 __ load_signed_byte(rax, field_address);
never@739 722 __ jmp(xreturn_path);
never@739 723
never@739 724 __ bind(notByte);
never@739 725 __ cmpl(rdx, stos);
never@739 726 __ jcc(Assembler::notEqual, notShort);
never@739 727 // stos
jrose@1057 728 __ load_signed_short(rax, field_address);
never@739 729 __ jmp(xreturn_path);
never@739 730
never@739 731 __ bind(notShort);
never@739 732 #ifdef ASSERT
never@739 733 Label okay;
never@739 734 __ cmpl(rdx, ctos);
never@739 735 __ jcc(Assembler::equal, okay);
never@739 736 __ stop("what type is this?");
never@739 737 __ bind(okay);
never@739 738 #endif
never@739 739 // ctos
jrose@1057 740 __ load_unsigned_short(rax, field_address);
never@739 741
never@739 742 __ bind(xreturn_path);
never@739 743
never@739 744 // _ireturn/_areturn
never@739 745 __ pop(rdi);
never@739 746 __ mov(rsp, r13);
never@739 747 __ jmp(rdi);
never@739 748 __ ret(0);
never@739 749
never@739 750 // generate a vanilla interpreter entry as the slow path
never@739 751 __ bind(slow_path);
never@739 752 (void) generate_normal_entry(false);
never@739 753 } else {
never@739 754 (void) generate_normal_entry(false);
never@739 755 }
never@739 756
never@739 757 return entry_point;
never@739 758 }
never@739 759
johnc@2781 760 // Method entry for java.lang.ref.Reference.get.
johnc@2781 761 address InterpreterGenerator::generate_Reference_get_entry(void) {
johnc@2781 762 #ifndef SERIALGC
johnc@2781 763 // Code: _aload_0, _getfield, _areturn
johnc@2781 764 // parameter size = 1
johnc@2781 765 //
johnc@2781 766 // The code that gets generated by this routine is split into 2 parts:
johnc@2781 767 // 1. The "intrinsified" code for G1 (or any SATB based GC),
johnc@2781 768 // 2. The slow path - which is an expansion of the regular method entry.
johnc@2781 769 //
johnc@2781 770 // Notes:-
johnc@2781 771 // * In the G1 code we do not check whether we need to block for
johnc@2781 772 // a safepoint. If G1 is enabled then we must execute the specialized
johnc@2781 773 // code for Reference.get (except when the Reference object is null)
johnc@2781 774 // so that we can log the value in the referent field with an SATB
johnc@2781 775 // update buffer.
johnc@2781 776 // If the code for the getfield template is modified so that the
johnc@2781 777 // G1 pre-barrier code is executed when the current method is
johnc@2781 778 // Reference.get() then going through the normal method entry
johnc@2781 779 // will be fine.
johnc@2781 780 // * The G1 code can, however, check the receiver object (the instance
johnc@2781 781 // of java.lang.Reference) and jump to the slow path if null. If the
johnc@2781 782 // Reference object is null then we obviously cannot fetch the referent
johnc@2781 783 // and so we don't need to call the G1 pre-barrier. Thus we can use the
johnc@2781 784 // regular method entry code to generate the NPE.
johnc@2781 785 //
johnc@2781 786 // This code is based on generate_accessor_enty.
johnc@2781 787 //
johnc@2781 788 // rbx: methodOop
johnc@2781 789
johnc@2781 790 // r13: senderSP must preserve for slow path, set SP to it on fast path
johnc@2781 791
johnc@2781 792 address entry = __ pc();
johnc@2781 793
johnc@2781 794 const int referent_offset = java_lang_ref_Reference::referent_offset;
johnc@2781 795 guarantee(referent_offset > 0, "referent offset not initialized");
johnc@2781 796
johnc@2781 797 if (UseG1GC) {
johnc@2781 798 Label slow_path;
johnc@2781 799 // rbx: method
johnc@2781 800
johnc@2781 801 // Check if local 0 != NULL
johnc@2781 802 // If the receiver is null then it is OK to jump to the slow path.
johnc@2781 803 __ movptr(rax, Address(rsp, wordSize));
johnc@2781 804
johnc@2781 805 __ testptr(rax, rax);
johnc@2781 806 __ jcc(Assembler::zero, slow_path);
johnc@2781 807
johnc@2781 808 // rax: local 0
johnc@2781 809 // rbx: method (but can be used as scratch now)
johnc@2781 810 // rdx: scratch
johnc@2781 811 // rdi: scratch
johnc@2781 812
johnc@2781 813 // Generate the G1 pre-barrier code to log the value of
johnc@2781 814 // the referent field in an SATB buffer.
johnc@2781 815
johnc@2781 816 // Load the value of the referent field.
johnc@2781 817 const Address field_address(rax, referent_offset);
johnc@2781 818 __ load_heap_oop(rax, field_address);
johnc@2781 819
johnc@2781 820 // Generate the G1 pre-barrier code to log the value of
johnc@2781 821 // the referent field in an SATB buffer.
johnc@2781 822 __ g1_write_barrier_pre(noreg /* obj */,
johnc@2781 823 rax /* pre_val */,
johnc@2781 824 r15_thread /* thread */,
johnc@2781 825 rbx /* tmp */,
johnc@2781 826 true /* tosca_live */,
johnc@2781 827 true /* expand_call */);
johnc@2781 828
johnc@2781 829 // _areturn
johnc@2781 830 __ pop(rdi); // get return address
johnc@2781 831 __ mov(rsp, r13); // set sp to sender sp
johnc@2781 832 __ jmp(rdi);
johnc@2781 833 __ ret(0);
johnc@2781 834
johnc@2781 835 // generate a vanilla interpreter entry as the slow path
johnc@2781 836 __ bind(slow_path);
johnc@2781 837 (void) generate_normal_entry(false);
johnc@2781 838
johnc@2781 839 return entry;
johnc@2781 840 }
johnc@2781 841 #endif // SERIALGC
johnc@2781 842
johnc@2781 843 // If G1 is not enabled then attempt to go through the accessor entry point
johnc@2781 844 // Reference.get is an accessor
johnc@2781 845 return generate_accessor_entry();
johnc@2781 846 }
johnc@2781 847
johnc@2781 848
duke@435 849 // Interpreter stub for calling a native method. (asm interpreter)
duke@435 850 // This sets up a somewhat different looking stack for calling the
duke@435 851 // native method than the typical interpreter frame setup.
duke@435 852 address InterpreterGenerator::generate_native_entry(bool synchronized) {
duke@435 853 // determine code generation flags
duke@435 854 bool inc_counter = UseCompiler || CountCompiledCalls;
duke@435 855
duke@435 856 // rbx: methodOop
duke@435 857 // r13: sender sp
duke@435 858
duke@435 859 address entry_point = __ pc();
duke@435 860
duke@435 861 const Address size_of_parameters(rbx, methodOopDesc::
duke@435 862 size_of_parameters_offset());
duke@435 863 const Address invocation_counter(rbx, methodOopDesc::
duke@435 864 invocation_counter_offset() +
duke@435 865 InvocationCounter::counter_offset());
duke@435 866 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
duke@435 867
duke@435 868 // get parameter size (always needed)
jrose@1057 869 __ load_unsigned_short(rcx, size_of_parameters);
duke@435 870
duke@435 871 // native calls don't need the stack size check since they have no
duke@435 872 // expression stack and the arguments are already on the stack and
duke@435 873 // we only add a handful of words to the stack
duke@435 874
duke@435 875 // rbx: methodOop
duke@435 876 // rcx: size of parameters
duke@435 877 // r13: sender sp
never@739 878 __ pop(rax); // get return address
duke@435 879
duke@435 880 // for natives the size of locals is zero
duke@435 881
duke@435 882 // compute beginning of parameters (r14)
never@739 883 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
duke@435 884
duke@435 885 // add 2 zero-initialized slots for native calls
duke@435 886 // initialize result_handler slot
never@739 887 __ push((int) NULL_WORD);
duke@435 888 // slot for oop temp
duke@435 889 // (static native method holder mirror/jni oop result)
never@739 890 __ push((int) NULL_WORD);
duke@435 891
duke@435 892 if (inc_counter) {
duke@435 893 __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
duke@435 894 }
duke@435 895
duke@435 896 // initialize fixed part of activation frame
duke@435 897 generate_fixed_frame(true);
duke@435 898
duke@435 899 // make sure method is native & not abstract
duke@435 900 #ifdef ASSERT
duke@435 901 __ movl(rax, access_flags);
duke@435 902 {
duke@435 903 Label L;
duke@435 904 __ testl(rax, JVM_ACC_NATIVE);
duke@435 905 __ jcc(Assembler::notZero, L);
duke@435 906 __ stop("tried to execute non-native method as native");
duke@435 907 __ bind(L);
duke@435 908 }
duke@435 909 {
duke@435 910 Label L;
duke@435 911 __ testl(rax, JVM_ACC_ABSTRACT);
duke@435 912 __ jcc(Assembler::zero, L);
duke@435 913 __ stop("tried to execute abstract method in interpreter");
duke@435 914 __ bind(L);
duke@435 915 }
duke@435 916 #endif
duke@435 917
duke@435 918 // Since at this point in the method invocation the exception handler
duke@435 919 // would try to exit the monitor of synchronized methods which hasn't
duke@435 920 // been entered yet, we set the thread local variable
duke@435 921 // _do_not_unlock_if_synchronized to true. The remove_activation will
duke@435 922 // check this flag.
duke@435 923
duke@435 924 const Address do_not_unlock_if_synchronized(r15_thread,
duke@435 925 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
duke@435 926 __ movbool(do_not_unlock_if_synchronized, true);
duke@435 927
duke@435 928 // increment invocation count & check for overflow
duke@435 929 Label invocation_counter_overflow;
duke@435 930 if (inc_counter) {
duke@435 931 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
duke@435 932 }
duke@435 933
duke@435 934 Label continue_after_compile;
duke@435 935 __ bind(continue_after_compile);
duke@435 936
duke@435 937 bang_stack_shadow_pages(true);
duke@435 938
duke@435 939 // reset the _do_not_unlock_if_synchronized flag
duke@435 940 __ movbool(do_not_unlock_if_synchronized, false);
duke@435 941
duke@435 942 // check for synchronized methods
duke@435 943 // Must happen AFTER invocation_counter check and stack overflow check,
duke@435 944 // so method is not locked if overflows.
duke@435 945 if (synchronized) {
duke@435 946 lock_method();
duke@435 947 } else {
duke@435 948 // no synchronization necessary
duke@435 949 #ifdef ASSERT
duke@435 950 {
duke@435 951 Label L;
duke@435 952 __ movl(rax, access_flags);
duke@435 953 __ testl(rax, JVM_ACC_SYNCHRONIZED);
duke@435 954 __ jcc(Assembler::zero, L);
duke@435 955 __ stop("method needs synchronization");
duke@435 956 __ bind(L);
duke@435 957 }
duke@435 958 #endif
duke@435 959 }
duke@435 960
duke@435 961 // start execution
duke@435 962 #ifdef ASSERT
duke@435 963 {
duke@435 964 Label L;
duke@435 965 const Address monitor_block_top(rbp,
duke@435 966 frame::interpreter_frame_monitor_block_top_offset * wordSize);
never@739 967 __ movptr(rax, monitor_block_top);
never@739 968 __ cmpptr(rax, rsp);
duke@435 969 __ jcc(Assembler::equal, L);
duke@435 970 __ stop("broken stack frame setup in interpreter");
duke@435 971 __ bind(L);
duke@435 972 }
duke@435 973 #endif
duke@435 974
duke@435 975 // jvmti support
duke@435 976 __ notify_method_entry();
duke@435 977
duke@435 978 // work registers
duke@435 979 const Register method = rbx;
coleenp@548 980 const Register t = r11;
duke@435 981
duke@435 982 // allocate space for parameters
duke@435 983 __ get_method(method);
duke@435 984 __ verify_oop(method);
jrose@1057 985 __ load_unsigned_short(t,
jrose@1057 986 Address(method,
jrose@1057 987 methodOopDesc::size_of_parameters_offset()));
twisti@1861 988 __ shll(t, Interpreter::logStackElementSize);
duke@435 989
never@739 990 __ subptr(rsp, t);
never@739 991 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
twisti@1040 992 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
duke@435 993
duke@435 994 // get signature handler
duke@435 995 {
duke@435 996 Label L;
never@739 997 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
never@739 998 __ testptr(t, t);
duke@435 999 __ jcc(Assembler::notZero, L);
duke@435 1000 __ call_VM(noreg,
duke@435 1001 CAST_FROM_FN_PTR(address,
duke@435 1002 InterpreterRuntime::prepare_native_call),
duke@435 1003 method);
duke@435 1004 __ get_method(method);
never@739 1005 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
duke@435 1006 __ bind(L);
duke@435 1007 }
duke@435 1008
duke@435 1009 // call signature handler
duke@435 1010 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
duke@435 1011 "adjust this code");
duke@435 1012 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
duke@435 1013 "adjust this code");
duke@435 1014 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
duke@435 1015 "adjust this code");
duke@435 1016
duke@435 1017 // The generated handlers do not touch RBX (the method oop).
duke@435 1018 // However, large signatures cannot be cached and are generated
duke@435 1019 // each time here. The slow-path generator can do a GC on return,
duke@435 1020 // so we must reload it after the call.
duke@435 1021 __ call(t);
duke@435 1022 __ get_method(method); // slow path can do a GC, reload RBX
duke@435 1023
duke@435 1024
duke@435 1025 // result handler is in rax
duke@435 1026 // set result handler
never@739 1027 __ movptr(Address(rbp,
never@739 1028 (frame::interpreter_frame_result_handler_offset) * wordSize),
never@739 1029 rax);
duke@435 1030
duke@435 1031 // pass mirror handle if static call
duke@435 1032 {
duke@435 1033 Label L;
duke@435 1034 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
duke@435 1035 Klass::java_mirror_offset_in_bytes();
duke@435 1036 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
duke@435 1037 __ testl(t, JVM_ACC_STATIC);
duke@435 1038 __ jcc(Assembler::zero, L);
duke@435 1039 // get mirror
never@739 1040 __ movptr(t, Address(method, methodOopDesc::constants_offset()));
never@739 1041 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
never@739 1042 __ movptr(t, Address(t, mirror_offset));
duke@435 1043 // copy mirror into activation frame
never@739 1044 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
duke@435 1045 t);
duke@435 1046 // pass handle to mirror
never@739 1047 __ lea(c_rarg1,
never@739 1048 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
duke@435 1049 __ bind(L);
duke@435 1050 }
duke@435 1051
duke@435 1052 // get native function entry point
duke@435 1053 {
duke@435 1054 Label L;
never@739 1055 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
duke@435 1056 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
duke@435 1057 __ movptr(rscratch2, unsatisfied.addr());
never@739 1058 __ cmpptr(rax, rscratch2);
duke@435 1059 __ jcc(Assembler::notEqual, L);
duke@435 1060 __ call_VM(noreg,
duke@435 1061 CAST_FROM_FN_PTR(address,
duke@435 1062 InterpreterRuntime::prepare_native_call),
duke@435 1063 method);
duke@435 1064 __ get_method(method);
duke@435 1065 __ verify_oop(method);
never@739 1066 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
duke@435 1067 __ bind(L);
duke@435 1068 }
duke@435 1069
duke@435 1070 // pass JNIEnv
never@739 1071 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
duke@435 1072
duke@435 1073 // It is enough that the pc() points into the right code
duke@435 1074 // segment. It does not have to be the correct return pc.
duke@435 1075 __ set_last_Java_frame(rsp, rbp, (address) __ pc());
duke@435 1076
duke@435 1077 // change thread state
duke@435 1078 #ifdef ASSERT
duke@435 1079 {
duke@435 1080 Label L;
duke@435 1081 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
duke@435 1082 __ cmpl(t, _thread_in_Java);
duke@435 1083 __ jcc(Assembler::equal, L);
duke@435 1084 __ stop("Wrong thread state in native stub");
duke@435 1085 __ bind(L);
duke@435 1086 }
duke@435 1087 #endif
duke@435 1088
duke@435 1089 // Change state to native
duke@435 1090
duke@435 1091 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
duke@435 1092 _thread_in_native);
duke@435 1093
duke@435 1094 // Call the native method.
duke@435 1095 __ call(rax);
duke@435 1096 // result potentially in rax or xmm0
duke@435 1097
duke@435 1098 // Depending on runtime options, either restore the MXCSR
duke@435 1099 // register after returning from the JNI Call or verify that
duke@435 1100 // it wasn't changed during -Xcheck:jni.
duke@435 1101 if (RestoreMXCSROnJNICalls) {
never@739 1102 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
duke@435 1103 }
duke@435 1104 else if (CheckJNICalls) {
never@739 1105 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
duke@435 1106 }
duke@435 1107
duke@435 1108 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
duke@435 1109 // in order to extract the result of a method call. If the order of these
duke@435 1110 // pushes change or anything else is added to the stack then the code in
duke@435 1111 // interpreter_frame_result must also change.
duke@435 1112
duke@435 1113 __ push(dtos);
duke@435 1114 __ push(ltos);
duke@435 1115
duke@435 1116 // change thread state
duke@435 1117 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
duke@435 1118 _thread_in_native_trans);
duke@435 1119
duke@435 1120 if (os::is_MP()) {
duke@435 1121 if (UseMembar) {
duke@435 1122 // Force this write out before the read below
duke@435 1123 __ membar(Assembler::Membar_mask_bits(
duke@435 1124 Assembler::LoadLoad | Assembler::LoadStore |
duke@435 1125 Assembler::StoreLoad | Assembler::StoreStore));
duke@435 1126 } else {
duke@435 1127 // Write serialization page so VM thread can do a pseudo remote membar.
duke@435 1128 // We use the current thread pointer to calculate a thread specific
duke@435 1129 // offset to write to within the page. This minimizes bus traffic
duke@435 1130 // due to cache line collision.
duke@435 1131 __ serialize_memory(r15_thread, rscratch2);
duke@435 1132 }
duke@435 1133 }
duke@435 1134
duke@435 1135 // check for safepoint operation in progress and/or pending suspend requests
duke@435 1136 {
duke@435 1137 Label Continue;
duke@435 1138 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
duke@435 1139 SafepointSynchronize::_not_synchronized);
duke@435 1140
duke@435 1141 Label L;
duke@435 1142 __ jcc(Assembler::notEqual, L);
duke@435 1143 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
duke@435 1144 __ jcc(Assembler::equal, Continue);
duke@435 1145 __ bind(L);
duke@435 1146
duke@435 1147 // Don't use call_VM as it will see a possible pending exception
duke@435 1148 // and forward it and never return here preventing us from
duke@435 1149 // clearing _last_native_pc down below. Also can't use
duke@435 1150 // call_VM_leaf either as it will check to see if r13 & r14 are
duke@435 1151 // preserved and correspond to the bcp/locals pointers. So we do a
duke@435 1152 // runtime call by hand.
duke@435 1153 //
never@739 1154 __ mov(c_rarg0, r15_thread);
coleenp@2318 1155 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
never@739 1156 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
never@739 1157 __ andptr(rsp, -16); // align stack as required by ABI
duke@435 1158 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
never@739 1159 __ mov(rsp, r12); // restore sp
coleenp@548 1160 __ reinit_heapbase();
duke@435 1161 __ bind(Continue);
duke@435 1162 }
duke@435 1163
duke@435 1164 // change thread state
duke@435 1165 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
duke@435 1166
duke@435 1167 // reset_last_Java_frame
duke@435 1168 __ reset_last_Java_frame(true, true);
duke@435 1169
duke@435 1170 // reset handle block
never@739 1171 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
never@739 1172 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
duke@435 1173
duke@435 1174 // If result is an oop unbox and store it in frame where gc will see it
duke@435 1175 // and result handler will pick it up
duke@435 1176
duke@435 1177 {
duke@435 1178 Label no_oop, store_result;
duke@435 1179 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
never@739 1180 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
duke@435 1181 __ jcc(Assembler::notEqual, no_oop);
duke@435 1182 // retrieve result
duke@435 1183 __ pop(ltos);
never@739 1184 __ testptr(rax, rax);
duke@435 1185 __ jcc(Assembler::zero, store_result);
never@739 1186 __ movptr(rax, Address(rax, 0));
duke@435 1187 __ bind(store_result);
never@739 1188 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
duke@435 1189 // keep stack depth as expected by pushing oop which will eventually be discarde
duke@435 1190 __ push(ltos);
duke@435 1191 __ bind(no_oop);
duke@435 1192 }
duke@435 1193
duke@435 1194
duke@435 1195 {
duke@435 1196 Label no_reguard;
duke@435 1197 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
duke@435 1198 JavaThread::stack_guard_yellow_disabled);
duke@435 1199 __ jcc(Assembler::notEqual, no_reguard);
duke@435 1200
never@739 1201 __ pusha(); // XXX only save smashed registers
coleenp@2318 1202 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
never@739 1203 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
never@739 1204 __ andptr(rsp, -16); // align stack as required by ABI
duke@435 1205 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
never@739 1206 __ mov(rsp, r12); // restore sp
never@739 1207 __ popa(); // XXX only restore smashed registers
coleenp@548 1208 __ reinit_heapbase();
duke@435 1209
duke@435 1210 __ bind(no_reguard);
duke@435 1211 }
duke@435 1212
duke@435 1213
duke@435 1214 // The method register is junk from after the thread_in_native transition
duke@435 1215 // until here. Also can't call_VM until the bcp has been
duke@435 1216 // restored. Need bcp for throwing exception below so get it now.
duke@435 1217 __ get_method(method);
duke@435 1218 __ verify_oop(method);
duke@435 1219
duke@435 1220 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
duke@435 1221 // r13 == code_base()
never@739 1222 __ movptr(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop
never@739 1223 __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
duke@435 1224 // handle exceptions (exception handling will handle unlocking!)
duke@435 1225 {
duke@435 1226 Label L;
never@739 1227 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
duke@435 1228 __ jcc(Assembler::zero, L);
duke@435 1229 // Note: At some point we may want to unify this with the code
duke@435 1230 // used in call_VM_base(); i.e., we should use the
duke@435 1231 // StubRoutines::forward_exception code. For now this doesn't work
duke@435 1232 // here because the rsp is not correctly set at this point.
duke@435 1233 __ MacroAssembler::call_VM(noreg,
duke@435 1234 CAST_FROM_FN_PTR(address,
duke@435 1235 InterpreterRuntime::throw_pending_exception));
duke@435 1236 __ should_not_reach_here();
duke@435 1237 __ bind(L);
duke@435 1238 }
duke@435 1239
duke@435 1240 // do unlocking if necessary
duke@435 1241 {
duke@435 1242 Label L;
duke@435 1243 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
duke@435 1244 __ testl(t, JVM_ACC_SYNCHRONIZED);
duke@435 1245 __ jcc(Assembler::zero, L);
duke@435 1246 // the code below should be shared with interpreter macro
duke@435 1247 // assembler implementation
duke@435 1248 {
duke@435 1249 Label unlock;
duke@435 1250 // BasicObjectLock will be first in list, since this is a
duke@435 1251 // synchronized method. However, need to check that the object
duke@435 1252 // has not been unlocked by an explicit monitorexit bytecode.
duke@435 1253 const Address monitor(rbp,
duke@435 1254 (intptr_t)(frame::interpreter_frame_initial_sp_offset *
duke@435 1255 wordSize - sizeof(BasicObjectLock)));
duke@435 1256
duke@435 1257 // monitor expect in c_rarg1 for slow unlock path
never@739 1258 __ lea(c_rarg1, monitor); // address of first monitor
duke@435 1259
never@739 1260 __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
never@739 1261 __ testptr(t, t);
duke@435 1262 __ jcc(Assembler::notZero, unlock);
duke@435 1263
duke@435 1264 // Entry already unlocked, need to throw exception
duke@435 1265 __ MacroAssembler::call_VM(noreg,
duke@435 1266 CAST_FROM_FN_PTR(address,
duke@435 1267 InterpreterRuntime::throw_illegal_monitor_state_exception));
duke@435 1268 __ should_not_reach_here();
duke@435 1269
duke@435 1270 __ bind(unlock);
duke@435 1271 __ unlock_object(c_rarg1);
duke@435 1272 }
duke@435 1273 __ bind(L);
duke@435 1274 }
duke@435 1275
duke@435 1276 // jvmti support
duke@435 1277 // Note: This must happen _after_ handling/throwing any exceptions since
duke@435 1278 // the exception handler code notifies the runtime of method exits
duke@435 1279 // too. If this happens before, method entry/exit notifications are
duke@435 1280 // not properly paired (was bug - gri 11/22/99).
duke@435 1281 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
duke@435 1282
duke@435 1283 // restore potential result in edx:eax, call result handler to
duke@435 1284 // restore potential result in ST0 & handle result
duke@435 1285
duke@435 1286 __ pop(ltos);
duke@435 1287 __ pop(dtos);
duke@435 1288
never@739 1289 __ movptr(t, Address(rbp,
never@739 1290 (frame::interpreter_frame_result_handler_offset) * wordSize));
duke@435 1291 __ call(t);
duke@435 1292
duke@435 1293 // remove activation
never@739 1294 __ movptr(t, Address(rbp,
never@739 1295 frame::interpreter_frame_sender_sp_offset *
never@739 1296 wordSize)); // get sender sp
duke@435 1297 __ leave(); // remove frame anchor
never@739 1298 __ pop(rdi); // get return address
never@739 1299 __ mov(rsp, t); // set sp to sender sp
duke@435 1300 __ jmp(rdi);
duke@435 1301
duke@435 1302 if (inc_counter) {
duke@435 1303 // Handle overflow of counter and compile method
duke@435 1304 __ bind(invocation_counter_overflow);
duke@435 1305 generate_counter_overflow(&continue_after_compile);
duke@435 1306 }
duke@435 1307
duke@435 1308 return entry_point;
duke@435 1309 }
duke@435 1310
duke@435 1311 //
duke@435 1312 // Generic interpreted method entry to (asm) interpreter
duke@435 1313 //
duke@435 1314 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
duke@435 1315 // determine code generation flags
duke@435 1316 bool inc_counter = UseCompiler || CountCompiledCalls;
duke@435 1317
duke@435 1318 // ebx: methodOop
duke@435 1319 // r13: sender sp
duke@435 1320 address entry_point = __ pc();
duke@435 1321
duke@435 1322 const Address size_of_parameters(rbx,
duke@435 1323 methodOopDesc::size_of_parameters_offset());
duke@435 1324 const Address size_of_locals(rbx, methodOopDesc::size_of_locals_offset());
duke@435 1325 const Address invocation_counter(rbx,
duke@435 1326 methodOopDesc::invocation_counter_offset() +
duke@435 1327 InvocationCounter::counter_offset());
duke@435 1328 const Address access_flags(rbx, methodOopDesc::access_flags_offset());
duke@435 1329
duke@435 1330 // get parameter size (always needed)
jrose@1057 1331 __ load_unsigned_short(rcx, size_of_parameters);
duke@435 1332
duke@435 1333 // rbx: methodOop
duke@435 1334 // rcx: size of parameters
duke@435 1335 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
duke@435 1336
jrose@1057 1337 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
duke@435 1338 __ subl(rdx, rcx); // rdx = no. of additional locals
duke@435 1339
duke@435 1340 // YYY
duke@435 1341 // __ incrementl(rdx);
duke@435 1342 // __ andl(rdx, -2);
duke@435 1343
duke@435 1344 // see if we've got enough room on the stack for locals plus overhead.
duke@435 1345 generate_stack_overflow_check();
duke@435 1346
duke@435 1347 // get return address
never@739 1348 __ pop(rax);
duke@435 1349
duke@435 1350 // compute beginning of parameters (r14)
never@739 1351 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
duke@435 1352
duke@435 1353 // rdx - # of additional locals
duke@435 1354 // allocate space for locals
duke@435 1355 // explicitly initialize locals
duke@435 1356 {
duke@435 1357 Label exit, loop;
duke@435 1358 __ testl(rdx, rdx);
duke@435 1359 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
duke@435 1360 __ bind(loop);
never@739 1361 __ push((int) NULL_WORD); // initialize local variables
duke@435 1362 __ decrementl(rdx); // until everything initialized
duke@435 1363 __ jcc(Assembler::greater, loop);
duke@435 1364 __ bind(exit);
duke@435 1365 }
duke@435 1366
duke@435 1367 // (pre-)fetch invocation count
duke@435 1368 if (inc_counter) {
duke@435 1369 __ movl(rcx, invocation_counter);
duke@435 1370 }
duke@435 1371 // initialize fixed part of activation frame
duke@435 1372 generate_fixed_frame(false);
duke@435 1373
duke@435 1374 // make sure method is not native & not abstract
duke@435 1375 #ifdef ASSERT
duke@435 1376 __ movl(rax, access_flags);
duke@435 1377 {
duke@435 1378 Label L;
duke@435 1379 __ testl(rax, JVM_ACC_NATIVE);
duke@435 1380 __ jcc(Assembler::zero, L);
duke@435 1381 __ stop("tried to execute native method as non-native");
duke@435 1382 __ bind(L);
duke@435 1383 }
duke@435 1384 {
duke@435 1385 Label L;
duke@435 1386 __ testl(rax, JVM_ACC_ABSTRACT);
duke@435 1387 __ jcc(Assembler::zero, L);
duke@435 1388 __ stop("tried to execute abstract method in interpreter");
duke@435 1389 __ bind(L);
duke@435 1390 }
duke@435 1391 #endif
duke@435 1392
duke@435 1393 // Since at this point in the method invocation the exception
duke@435 1394 // handler would try to exit the monitor of synchronized methods
duke@435 1395 // which hasn't been entered yet, we set the thread local variable
duke@435 1396 // _do_not_unlock_if_synchronized to true. The remove_activation
duke@435 1397 // will check this flag.
duke@435 1398
duke@435 1399 const Address do_not_unlock_if_synchronized(r15_thread,
duke@435 1400 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
duke@435 1401 __ movbool(do_not_unlock_if_synchronized, true);
duke@435 1402
duke@435 1403 // increment invocation count & check for overflow
duke@435 1404 Label invocation_counter_overflow;
duke@435 1405 Label profile_method;
duke@435 1406 Label profile_method_continue;
duke@435 1407 if (inc_counter) {
duke@435 1408 generate_counter_incr(&invocation_counter_overflow,
duke@435 1409 &profile_method,
duke@435 1410 &profile_method_continue);
duke@435 1411 if (ProfileInterpreter) {
duke@435 1412 __ bind(profile_method_continue);
duke@435 1413 }
duke@435 1414 }
duke@435 1415
duke@435 1416 Label continue_after_compile;
duke@435 1417 __ bind(continue_after_compile);
duke@435 1418
duke@435 1419 // check for synchronized interpreted methods
duke@435 1420 bang_stack_shadow_pages(false);
duke@435 1421
duke@435 1422 // reset the _do_not_unlock_if_synchronized flag
duke@435 1423 __ movbool(do_not_unlock_if_synchronized, false);
duke@435 1424
duke@435 1425 // check for synchronized methods
duke@435 1426 // Must happen AFTER invocation_counter check and stack overflow check,
duke@435 1427 // so method is not locked if overflows.
duke@435 1428 if (synchronized) {
duke@435 1429 // Allocate monitor and lock method
duke@435 1430 lock_method();
duke@435 1431 } else {
duke@435 1432 // no synchronization necessary
duke@435 1433 #ifdef ASSERT
duke@435 1434 {
duke@435 1435 Label L;
duke@435 1436 __ movl(rax, access_flags);
duke@435 1437 __ testl(rax, JVM_ACC_SYNCHRONIZED);
duke@435 1438 __ jcc(Assembler::zero, L);
duke@435 1439 __ stop("method needs synchronization");
duke@435 1440 __ bind(L);
duke@435 1441 }
duke@435 1442 #endif
duke@435 1443 }
duke@435 1444
duke@435 1445 // start execution
duke@435 1446 #ifdef ASSERT
duke@435 1447 {
duke@435 1448 Label L;
duke@435 1449 const Address monitor_block_top (rbp,
duke@435 1450 frame::interpreter_frame_monitor_block_top_offset * wordSize);
never@739 1451 __ movptr(rax, monitor_block_top);
never@739 1452 __ cmpptr(rax, rsp);
duke@435 1453 __ jcc(Assembler::equal, L);
duke@435 1454 __ stop("broken stack frame setup in interpreter");
duke@435 1455 __ bind(L);
duke@435 1456 }
duke@435 1457 #endif
duke@435 1458
duke@435 1459 // jvmti support
duke@435 1460 __ notify_method_entry();
duke@435 1461
duke@435 1462 __ dispatch_next(vtos);
duke@435 1463
duke@435 1464 // invocation counter overflow
duke@435 1465 if (inc_counter) {
duke@435 1466 if (ProfileInterpreter) {
duke@435 1467 // We have decided to profile this method in the interpreter
duke@435 1468 __ bind(profile_method);
iveresov@2438 1469 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
iveresov@2438 1470 __ set_method_data_pointer_for_bcp();
iveresov@2461 1471 __ get_method(rbx);
duke@435 1472 __ jmp(profile_method_continue);
duke@435 1473 }
duke@435 1474 // Handle overflow of counter and compile method
duke@435 1475 __ bind(invocation_counter_overflow);
duke@435 1476 generate_counter_overflow(&continue_after_compile);
duke@435 1477 }
duke@435 1478
duke@435 1479 return entry_point;
duke@435 1480 }
duke@435 1481
duke@435 1482 // Entry points
duke@435 1483 //
duke@435 1484 // Here we generate the various kind of entries into the interpreter.
duke@435 1485 // The two main entry type are generic bytecode methods and native
duke@435 1486 // call method. These both come in synchronized and non-synchronized
duke@435 1487 // versions but the frame layout they create is very similar. The
duke@435 1488 // other method entry types are really just special purpose entries
duke@435 1489 // that are really entry and interpretation all in one. These are for
duke@435 1490 // trivial methods like accessor, empty, or special math methods.
duke@435 1491 //
duke@435 1492 // When control flow reaches any of the entry types for the interpreter
duke@435 1493 // the following holds ->
duke@435 1494 //
duke@435 1495 // Arguments:
duke@435 1496 //
duke@435 1497 // rbx: methodOop
duke@435 1498 //
duke@435 1499 // Stack layout immediately at entry
duke@435 1500 //
duke@435 1501 // [ return address ] <--- rsp
duke@435 1502 // [ parameter n ]
duke@435 1503 // ...
duke@435 1504 // [ parameter 1 ]
duke@435 1505 // [ expression stack ] (caller's java expression stack)
duke@435 1506
duke@435 1507 // Assuming that we don't go to one of the trivial specialized entries
duke@435 1508 // the stack will look like below when we are ready to execute the
duke@435 1509 // first bytecode (or call the native routine). The register usage
duke@435 1510 // will be as the template based interpreter expects (see
duke@435 1511 // interpreter_amd64.hpp).
duke@435 1512 //
duke@435 1513 // local variables follow incoming parameters immediately; i.e.
duke@435 1514 // the return address is moved to the end of the locals).
duke@435 1515 //
duke@435 1516 // [ monitor entry ] <--- rsp
duke@435 1517 // ...
duke@435 1518 // [ monitor entry ]
duke@435 1519 // [ expr. stack bottom ]
duke@435 1520 // [ saved r13 ]
duke@435 1521 // [ current r14 ]
duke@435 1522 // [ methodOop ]
duke@435 1523 // [ saved ebp ] <--- rbp
duke@435 1524 // [ return address ]
duke@435 1525 // [ local variable m ]
duke@435 1526 // ...
duke@435 1527 // [ local variable 1 ]
duke@435 1528 // [ parameter n ]
duke@435 1529 // ...
duke@435 1530 // [ parameter 1 ] <--- r14
duke@435 1531
duke@435 1532 address AbstractInterpreterGenerator::generate_method_entry(
duke@435 1533 AbstractInterpreter::MethodKind kind) {
duke@435 1534 // determine code generation flags
duke@435 1535 bool synchronized = false;
duke@435 1536 address entry_point = NULL;
duke@435 1537
duke@435 1538 switch (kind) {
duke@435 1539 case Interpreter::zerolocals : break;
duke@435 1540 case Interpreter::zerolocals_synchronized: synchronized = true; break;
duke@435 1541 case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
duke@435 1542 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
duke@435 1543 case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
duke@435 1544 case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
duke@435 1545 case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
jrose@1145 1546 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();break;
never@1141 1547
never@1141 1548 case Interpreter::java_lang_math_sin : // fall thru
never@1141 1549 case Interpreter::java_lang_math_cos : // fall thru
never@1141 1550 case Interpreter::java_lang_math_tan : // fall thru
never@1141 1551 case Interpreter::java_lang_math_abs : // fall thru
never@1141 1552 case Interpreter::java_lang_math_log : // fall thru
never@1141 1553 case Interpreter::java_lang_math_log10 : // fall thru
duke@435 1554 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
johnc@2781 1555 case Interpreter::java_lang_ref_reference_get
johnc@2781 1556 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
duke@435 1557 default : ShouldNotReachHere(); break;
duke@435 1558 }
duke@435 1559
duke@435 1560 if (entry_point) {
duke@435 1561 return entry_point;
duke@435 1562 }
duke@435 1563
duke@435 1564 return ((InterpreterGenerator*) this)->
duke@435 1565 generate_normal_entry(synchronized);
duke@435 1566 }
duke@435 1567
never@1609 1568 // These should never be compiled since the interpreter will prefer
never@1609 1569 // the compiled version to the intrinsic version.
never@1609 1570 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
never@1609 1571 switch (method_kind(m)) {
never@1609 1572 case Interpreter::java_lang_math_sin : // fall thru
never@1609 1573 case Interpreter::java_lang_math_cos : // fall thru
never@1609 1574 case Interpreter::java_lang_math_tan : // fall thru
never@1609 1575 case Interpreter::java_lang_math_abs : // fall thru
never@1609 1576 case Interpreter::java_lang_math_log : // fall thru
never@1609 1577 case Interpreter::java_lang_math_log10 : // fall thru
never@1609 1578 case Interpreter::java_lang_math_sqrt :
never@1609 1579 return false;
never@1609 1580 default:
never@1609 1581 return true;
never@1609 1582 }
never@1609 1583 }
never@1609 1584
duke@435 1585 // How much stack a method activation needs in words.
duke@435 1586 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
duke@435 1587 const int entry_size = frame::interpreter_frame_monitor_size();
duke@435 1588
duke@435 1589 // total overhead size: entry_size + (saved rbp thru expr stack
duke@435 1590 // bottom). be sure to change this if you add/subtract anything
duke@435 1591 // to/from the overhead area
duke@435 1592 const int overhead_size =
duke@435 1593 -(frame::interpreter_frame_initial_sp_offset) + entry_size;
duke@435 1594
duke@435 1595 const int stub_code = frame::entry_frame_after_call_words;
jrose@1145 1596 const int extra_stack = methodOopDesc::extra_stack_entries();
jrose@1145 1597 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
twisti@1861 1598 Interpreter::stackElementWords;
duke@435 1599 return (overhead_size + method_stack + stub_code);
duke@435 1600 }
duke@435 1601
duke@435 1602 int AbstractInterpreter::layout_activation(methodOop method,
duke@435 1603 int tempcount,
duke@435 1604 int popframe_extra_args,
duke@435 1605 int moncount,
duke@435 1606 int callee_param_count,
duke@435 1607 int callee_locals,
duke@435 1608 frame* caller,
duke@435 1609 frame* interpreter_frame,
duke@435 1610 bool is_top_frame) {
duke@435 1611 // Note: This calculation must exactly parallel the frame setup
duke@435 1612 // in AbstractInterpreterGenerator::generate_method_entry.
duke@435 1613 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
duke@435 1614 // The frame interpreter_frame, if not NULL, is guaranteed to be the
duke@435 1615 // right size, as determined by a previous call to this method.
duke@435 1616 // It is also guaranteed to be walkable even though it is in a skeletal state
duke@435 1617
duke@435 1618 // fixed size of an interpreter frame:
twisti@1861 1619 int max_locals = method->max_locals() * Interpreter::stackElementWords;
duke@435 1620 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
twisti@1861 1621 Interpreter::stackElementWords;
duke@435 1622
duke@435 1623 int overhead = frame::sender_sp_offset -
duke@435 1624 frame::interpreter_frame_initial_sp_offset;
duke@435 1625 // Our locals were accounted for by the caller (or last_frame_adjust
duke@435 1626 // on the transistion) Since the callee parameters already account
duke@435 1627 // for the callee's params we only need to account for the extra
duke@435 1628 // locals.
duke@435 1629 int size = overhead +
twisti@1861 1630 (callee_locals - callee_param_count)*Interpreter::stackElementWords +
duke@435 1631 moncount * frame::interpreter_frame_monitor_size() +
twisti@1861 1632 tempcount* Interpreter::stackElementWords + popframe_extra_args;
duke@435 1633 if (interpreter_frame != NULL) {
duke@435 1634 #ifdef ASSERT
twisti@1570 1635 if (!EnableMethodHandles)
twisti@1570 1636 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
twisti@1570 1637 // Probably, since deoptimization doesn't work yet.
twisti@1570 1638 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
duke@435 1639 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
duke@435 1640 #endif
duke@435 1641
duke@435 1642 interpreter_frame->interpreter_frame_set_method(method);
duke@435 1643 // NOTE the difference in using sender_sp and
duke@435 1644 // interpreter_frame_sender_sp interpreter_frame_sender_sp is
duke@435 1645 // the original sp of the caller (the unextended_sp) and
duke@435 1646 // sender_sp is fp+16 XXX
duke@435 1647 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
duke@435 1648
duke@435 1649 interpreter_frame->interpreter_frame_set_locals(locals);
duke@435 1650 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
duke@435 1651 BasicObjectLock* monbot = montop - moncount;
duke@435 1652 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
duke@435 1653
duke@435 1654 // Set last_sp
duke@435 1655 intptr_t* esp = (intptr_t*) monbot -
twisti@1861 1656 tempcount*Interpreter::stackElementWords -
duke@435 1657 popframe_extra_args;
duke@435 1658 interpreter_frame->interpreter_frame_set_last_sp(esp);
duke@435 1659
duke@435 1660 // All frames but the initial (oldest) interpreter frame we fill in have
duke@435 1661 // a value for sender_sp that allows walking the stack but isn't
duke@435 1662 // truly correct. Correct the value here.
duke@435 1663 if (extra_locals != 0 &&
duke@435 1664 interpreter_frame->sender_sp() ==
duke@435 1665 interpreter_frame->interpreter_frame_sender_sp()) {
duke@435 1666 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
duke@435 1667 extra_locals);
duke@435 1668 }
duke@435 1669 *interpreter_frame->interpreter_frame_cache_addr() =
duke@435 1670 method->constants()->cache();
duke@435 1671 }
duke@435 1672 return size;
duke@435 1673 }
duke@435 1674
duke@435 1675 //-----------------------------------------------------------------------------
duke@435 1676 // Exceptions
duke@435 1677
duke@435 1678 void TemplateInterpreterGenerator::generate_throw_exception() {
duke@435 1679 // Entry point in previous activation (i.e., if the caller was
duke@435 1680 // interpreted)
duke@435 1681 Interpreter::_rethrow_exception_entry = __ pc();
duke@435 1682 // Restore sp to interpreter_frame_last_sp even though we are going
duke@435 1683 // to empty the expression stack for the exception processing.
never@739 1684 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
duke@435 1685 // rax: exception
duke@435 1686 // rdx: return address/pc that threw exception
duke@435 1687 __ restore_bcp(); // r13 points to call/send
duke@435 1688 __ restore_locals();
coleenp@548 1689 __ reinit_heapbase(); // restore r12 as heapbase.
duke@435 1690 // Entry point for exceptions thrown within interpreter code
duke@435 1691 Interpreter::_throw_exception_entry = __ pc();
duke@435 1692 // expression stack is undefined here
duke@435 1693 // rax: exception
duke@435 1694 // r13: exception bcp
duke@435 1695 __ verify_oop(rax);
never@739 1696 __ mov(c_rarg1, rax);
duke@435 1697
duke@435 1698 // expression stack must be empty before entering the VM in case of
duke@435 1699 // an exception
duke@435 1700 __ empty_expression_stack();
duke@435 1701 // find exception handler address and preserve exception oop
duke@435 1702 __ call_VM(rdx,
duke@435 1703 CAST_FROM_FN_PTR(address,
duke@435 1704 InterpreterRuntime::exception_handler_for_exception),
duke@435 1705 c_rarg1);
duke@435 1706 // rax: exception handler entry point
duke@435 1707 // rdx: preserved exception oop
duke@435 1708 // r13: bcp for exception handler
duke@435 1709 __ push_ptr(rdx); // push exception which is now the only value on the stack
duke@435 1710 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
duke@435 1711
duke@435 1712 // If the exception is not handled in the current frame the frame is
duke@435 1713 // removed and the exception is rethrown (i.e. exception
duke@435 1714 // continuation is _rethrow_exception).
duke@435 1715 //
duke@435 1716 // Note: At this point the bci is still the bxi for the instruction
duke@435 1717 // which caused the exception and the expression stack is
duke@435 1718 // empty. Thus, for any VM calls at this point, GC will find a legal
duke@435 1719 // oop map (with empty expression stack).
duke@435 1720
duke@435 1721 // In current activation
duke@435 1722 // tos: exception
duke@435 1723 // esi: exception bcp
duke@435 1724
duke@435 1725 //
duke@435 1726 // JVMTI PopFrame support
duke@435 1727 //
duke@435 1728
duke@435 1729 Interpreter::_remove_activation_preserving_args_entry = __ pc();
duke@435 1730 __ empty_expression_stack();
duke@435 1731 // Set the popframe_processing bit in pending_popframe_condition
duke@435 1732 // indicating that we are currently handling popframe, so that
duke@435 1733 // call_VMs that may happen later do not trigger new popframe
duke@435 1734 // handling cycles.
duke@435 1735 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
duke@435 1736 __ orl(rdx, JavaThread::popframe_processing_bit);
duke@435 1737 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
duke@435 1738
duke@435 1739 {
duke@435 1740 // Check to see whether we are returning to a deoptimized frame.
duke@435 1741 // (The PopFrame call ensures that the caller of the popped frame is
duke@435 1742 // either interpreted or compiled and deoptimizes it if compiled.)
duke@435 1743 // In this case, we can't call dispatch_next() after the frame is
duke@435 1744 // popped, but instead must save the incoming arguments and restore
duke@435 1745 // them after deoptimization has occurred.
duke@435 1746 //
duke@435 1747 // Note that we don't compare the return PC against the
duke@435 1748 // deoptimization blob's unpack entry because of the presence of
duke@435 1749 // adapter frames in C2.
duke@435 1750 Label caller_not_deoptimized;
never@739 1751 __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
duke@435 1752 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
duke@435 1753 InterpreterRuntime::interpreter_contains), c_rarg1);
duke@435 1754 __ testl(rax, rax);
duke@435 1755 __ jcc(Assembler::notZero, caller_not_deoptimized);
duke@435 1756
duke@435 1757 // Compute size of arguments for saving when returning to
duke@435 1758 // deoptimized caller
duke@435 1759 __ get_method(rax);
jrose@1057 1760 __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::
duke@435 1761 size_of_parameters_offset())));
twisti@1861 1762 __ shll(rax, Interpreter::logStackElementSize);
duke@435 1763 __ restore_locals(); // XXX do we need this?
never@739 1764 __ subptr(r14, rax);
never@739 1765 __ addptr(r14, wordSize);
duke@435 1766 // Save these arguments
duke@435 1767 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
duke@435 1768 Deoptimization::
duke@435 1769 popframe_preserve_args),
duke@435 1770 r15_thread, rax, r14);
duke@435 1771
duke@435 1772 __ remove_activation(vtos, rdx,
duke@435 1773 /* throw_monitor_exception */ false,
duke@435 1774 /* install_monitor_exception */ false,
duke@435 1775 /* notify_jvmdi */ false);
duke@435 1776
duke@435 1777 // Inform deoptimization that it is responsible for restoring
duke@435 1778 // these arguments
duke@435 1779 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
duke@435 1780 JavaThread::popframe_force_deopt_reexecution_bit);
duke@435 1781
duke@435 1782 // Continue in deoptimization handler
duke@435 1783 __ jmp(rdx);
duke@435 1784
duke@435 1785 __ bind(caller_not_deoptimized);
duke@435 1786 }
duke@435 1787
duke@435 1788 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
duke@435 1789 /* throw_monitor_exception */ false,
duke@435 1790 /* install_monitor_exception */ false,
duke@435 1791 /* notify_jvmdi */ false);
duke@435 1792
duke@435 1793 // Finish with popframe handling
duke@435 1794 // A previous I2C followed by a deoptimization might have moved the
duke@435 1795 // outgoing arguments further up the stack. PopFrame expects the
duke@435 1796 // mutations to those outgoing arguments to be preserved and other
duke@435 1797 // constraints basically require this frame to look exactly as
duke@435 1798 // though it had previously invoked an interpreted activation with
duke@435 1799 // no space between the top of the expression stack (current
duke@435 1800 // last_sp) and the top of stack. Rather than force deopt to
duke@435 1801 // maintain this kind of invariant all the time we call a small
duke@435 1802 // fixup routine to move the mutated arguments onto the top of our
duke@435 1803 // expression stack if necessary.
never@739 1804 __ mov(c_rarg1, rsp);
never@739 1805 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
duke@435 1806 // PC must point into interpreter here
duke@435 1807 __ set_last_Java_frame(noreg, rbp, __ pc());
duke@435 1808 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
duke@435 1809 __ reset_last_Java_frame(true, true);
duke@435 1810 // Restore the last_sp and null it out
never@739 1811 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
never@739 1812 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
duke@435 1813
duke@435 1814 __ restore_bcp(); // XXX do we need this?
duke@435 1815 __ restore_locals(); // XXX do we need this?
duke@435 1816 // The method data pointer was incremented already during
duke@435 1817 // call profiling. We have to restore the mdp for the current bcp.
duke@435 1818 if (ProfileInterpreter) {
duke@435 1819 __ set_method_data_pointer_for_bcp();
duke@435 1820 }
duke@435 1821
duke@435 1822 // Clear the popframe condition flag
duke@435 1823 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
duke@435 1824 JavaThread::popframe_inactive);
duke@435 1825
duke@435 1826 __ dispatch_next(vtos);
duke@435 1827 // end of PopFrame support
duke@435 1828
duke@435 1829 Interpreter::_remove_activation_entry = __ pc();
duke@435 1830
duke@435 1831 // preserve exception over this code sequence
duke@435 1832 __ pop_ptr(rax);
never@739 1833 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
duke@435 1834 // remove the activation (without doing throws on illegalMonitorExceptions)
duke@435 1835 __ remove_activation(vtos, rdx, false, true, false);
duke@435 1836 // restore exception
never@739 1837 __ movptr(rax, Address(r15_thread, JavaThread::vm_result_offset()));
never@739 1838 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
duke@435 1839 __ verify_oop(rax);
duke@435 1840
duke@435 1841 // In between activations - previous activation type unknown yet
duke@435 1842 // compute continuation point - the continuation point expects the
duke@435 1843 // following registers set up:
duke@435 1844 //
duke@435 1845 // rax: exception
duke@435 1846 // rdx: return address/pc that threw exception
duke@435 1847 // rsp: expression stack of caller
duke@435 1848 // rbp: ebp of caller
never@739 1849 __ push(rax); // save exception
never@739 1850 __ push(rdx); // save return address
duke@435 1851 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
duke@435 1852 SharedRuntime::exception_handler_for_return_address),
twisti@1730 1853 r15_thread, rdx);
never@739 1854 __ mov(rbx, rax); // save exception handler
never@739 1855 __ pop(rdx); // restore return address
never@739 1856 __ pop(rax); // restore exception
duke@435 1857 // Note that an "issuing PC" is actually the next PC after the call
duke@435 1858 __ jmp(rbx); // jump to exception
duke@435 1859 // handler of caller
duke@435 1860 }
duke@435 1861
duke@435 1862
duke@435 1863 //
duke@435 1864 // JVMTI ForceEarlyReturn support
duke@435 1865 //
duke@435 1866 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
duke@435 1867 address entry = __ pc();
duke@435 1868
duke@435 1869 __ restore_bcp();
duke@435 1870 __ restore_locals();
duke@435 1871 __ empty_expression_stack();
duke@435 1872 __ load_earlyret_value(state);
duke@435 1873
never@739 1874 __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
duke@435 1875 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
duke@435 1876
duke@435 1877 // Clear the earlyret state
duke@435 1878 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
duke@435 1879
duke@435 1880 __ remove_activation(state, rsi,
duke@435 1881 false, /* throw_monitor_exception */
duke@435 1882 false, /* install_monitor_exception */
duke@435 1883 true); /* notify_jvmdi */
duke@435 1884 __ jmp(rsi);
duke@435 1885
duke@435 1886 return entry;
duke@435 1887 } // end of ForceEarlyReturn support
duke@435 1888
duke@435 1889
duke@435 1890 //-----------------------------------------------------------------------------
duke@435 1891 // Helper for vtos entry point generation
duke@435 1892
duke@435 1893 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
duke@435 1894 address& bep,
duke@435 1895 address& cep,
duke@435 1896 address& sep,
duke@435 1897 address& aep,
duke@435 1898 address& iep,
duke@435 1899 address& lep,
duke@435 1900 address& fep,
duke@435 1901 address& dep,
duke@435 1902 address& vep) {
duke@435 1903 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
duke@435 1904 Label L;
duke@435 1905 aep = __ pc(); __ push_ptr(); __ jmp(L);
duke@435 1906 fep = __ pc(); __ push_f(); __ jmp(L);
duke@435 1907 dep = __ pc(); __ push_d(); __ jmp(L);
duke@435 1908 lep = __ pc(); __ push_l(); __ jmp(L);
duke@435 1909 bep = cep = sep =
duke@435 1910 iep = __ pc(); __ push_i();
duke@435 1911 vep = __ pc();
duke@435 1912 __ bind(L);
duke@435 1913 generate_and_dispatch(t);
duke@435 1914 }
duke@435 1915
duke@435 1916
duke@435 1917 //-----------------------------------------------------------------------------
duke@435 1918 // Generation of individual instructions
duke@435 1919
duke@435 1920 // helpers for generate_and_dispatch
duke@435 1921
duke@435 1922
duke@435 1923 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
duke@435 1924 : TemplateInterpreterGenerator(code) {
duke@435 1925 generate_all(); // down here so it can be "virtual"
duke@435 1926 }
duke@435 1927
duke@435 1928 //-----------------------------------------------------------------------------
duke@435 1929
duke@435 1930 // Non-product code
duke@435 1931 #ifndef PRODUCT
duke@435 1932 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
duke@435 1933 address entry = __ pc();
duke@435 1934
duke@435 1935 __ push(state);
never@739 1936 __ push(c_rarg0);
never@739 1937 __ push(c_rarg1);
never@739 1938 __ push(c_rarg2);
never@739 1939 __ push(c_rarg3);
never@739 1940 __ mov(c_rarg2, rax); // Pass itos
duke@435 1941 #ifdef _WIN64
duke@435 1942 __ movflt(xmm3, xmm0); // Pass ftos
duke@435 1943 #endif
duke@435 1944 __ call_VM(noreg,
duke@435 1945 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
duke@435 1946 c_rarg1, c_rarg2, c_rarg3);
never@739 1947 __ pop(c_rarg3);
never@739 1948 __ pop(c_rarg2);
never@739 1949 __ pop(c_rarg1);
never@739 1950 __ pop(c_rarg0);
duke@435 1951 __ pop(state);
duke@435 1952 __ ret(0); // return from result handler
duke@435 1953
duke@435 1954 return entry;
duke@435 1955 }
duke@435 1956
duke@435 1957 void TemplateInterpreterGenerator::count_bytecode() {
duke@435 1958 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
duke@435 1959 }
duke@435 1960
duke@435 1961 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
duke@435 1962 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
duke@435 1963 }
duke@435 1964
duke@435 1965 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
duke@435 1966 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
duke@435 1967 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
duke@435 1968 __ orl(rbx,
duke@435 1969 ((int) t->bytecode()) <<
duke@435 1970 BytecodePairHistogram::log2_number_of_codes);
duke@435 1971 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
duke@435 1972 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
duke@435 1973 __ incrementl(Address(rscratch1, rbx, Address::times_4));
duke@435 1974 }
duke@435 1975
duke@435 1976
duke@435 1977 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
duke@435 1978 // Call a little run-time stub to avoid blow-up for each bytecode.
duke@435 1979 // The run-time runtime saves the right registers, depending on
duke@435 1980 // the tosca in-state for the given template.
duke@435 1981
duke@435 1982 assert(Interpreter::trace_code(t->tos_in()) != NULL,
duke@435 1983 "entry must have been generated");
coleenp@2318 1984 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
never@739 1985 __ andptr(rsp, -16); // align stack as required by ABI
duke@435 1986 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
never@739 1987 __ mov(rsp, r12); // restore sp
coleenp@548 1988 __ reinit_heapbase();
duke@435 1989 }
duke@435 1990
duke@435 1991
duke@435 1992 void TemplateInterpreterGenerator::stop_interpreter_at() {
duke@435 1993 Label L;
duke@435 1994 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
duke@435 1995 StopInterpreterAt);
duke@435 1996 __ jcc(Assembler::notEqual, L);
duke@435 1997 __ int3();
duke@435 1998 __ bind(L);
duke@435 1999 }
duke@435 2000 #endif // !PRODUCT
never@739 2001 #endif // ! CC_INTERP

mercurial