Mon, 20 Aug 2012 09:58:58 -0700
7190310: Inlining WeakReference.get(), and hoisting $referent may lead to non-terminating loops
Summary: In C2 add software membar after load from Reference.referent field to prevent commoning of loads across safepoint since GC can change its value. In C1 always generate Reference.get() intrinsic.
Reviewed-by: roland, twisti, dholmes, johnc
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterGenerator.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/methodDataOop.hpp"
34 #include "oops/methodOop.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "prims/jvmtiThreadState.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/synchronizer.hpp"
44 #include "runtime/timer.hpp"
45 #include "runtime/vframeArray.hpp"
46 #include "utilities/debug.hpp"
48 #define __ _masm->
51 #ifndef CC_INTERP
52 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
53 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
54 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
56 //------------------------------------------------------------------------------------------------------------------------
58 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
59 address entry = __ pc();
61 // Note: There should be a minimal interpreter frame set up when stack
62 // overflow occurs since we check explicitly for it now.
63 //
64 #ifdef ASSERT
65 { Label L;
66 __ lea(rax, Address(rbp,
67 frame::interpreter_frame_monitor_block_top_offset * wordSize));
68 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
69 // (stack grows negative)
70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
71 __ stop ("interpreter frame not set up");
72 __ bind(L);
73 }
74 #endif // ASSERT
75 // Restore bcp under the assumption that the current frame is still
76 // interpreted
77 __ restore_bcp();
79 // expression stack must be empty before entering the VM if an exception
80 // happened
81 __ empty_expression_stack();
82 __ empty_FPU_stack();
83 // throw exception
84 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
85 return entry;
86 }
88 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
89 address entry = __ pc();
90 // expression stack must be empty before entering the VM if an exception happened
91 __ empty_expression_stack();
92 __ empty_FPU_stack();
93 // setup parameters
94 // ??? convention: expect aberrant index in register rbx,
95 __ lea(rax, ExternalAddress((address)name));
96 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx);
97 return entry;
98 }
100 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
101 address entry = __ pc();
102 // object is at TOS
103 __ pop(rax);
104 // expression stack must be empty before entering the VM if an exception
105 // happened
106 __ empty_expression_stack();
107 __ empty_FPU_stack();
108 __ call_VM(noreg,
109 CAST_FROM_FN_PTR(address,
110 InterpreterRuntime::throw_ClassCastException),
111 rax);
112 return entry;
113 }
115 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
116 assert(!pass_oop || message == NULL, "either oop or message but not both");
117 address entry = __ pc();
118 if (pass_oop) {
119 // object is at TOS
120 __ pop(rbx);
121 }
122 // expression stack must be empty before entering the VM if an exception happened
123 __ empty_expression_stack();
124 __ empty_FPU_stack();
125 // setup parameters
126 __ lea(rax, ExternalAddress((address)name));
127 if (pass_oop) {
128 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
129 } else {
130 if (message != NULL) {
131 __ lea(rbx, ExternalAddress((address)message));
132 } else {
133 __ movptr(rbx, NULL_WORD);
134 }
135 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
136 }
137 // throw exception
138 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
139 return entry;
140 }
143 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
144 address entry = __ pc();
145 // NULL last_sp until next java call
146 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
147 __ dispatch_next(state);
148 return entry;
149 }
152 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
153 TosState incoming_state = state;
154 address entry = __ pc();
156 #ifdef COMPILER2
157 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
158 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
159 for (int i = 1; i < 8; i++) {
160 __ ffree(i);
161 }
162 } else if (UseSSE < 2) {
163 __ empty_FPU_stack();
164 }
165 #endif
166 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
167 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
168 } else {
169 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
170 }
172 // In SSE mode, interpreter returns FP results in xmm0 but they need
173 // to end up back on the FPU so it can operate on them.
174 if (incoming_state == ftos && UseSSE >= 1) {
175 __ subptr(rsp, wordSize);
176 __ movflt(Address(rsp, 0), xmm0);
177 __ fld_s(Address(rsp, 0));
178 __ addptr(rsp, wordSize);
179 } else if (incoming_state == dtos && UseSSE >= 2) {
180 __ subptr(rsp, 2*wordSize);
181 __ movdbl(Address(rsp, 0), xmm0);
182 __ fld_d(Address(rsp, 0));
183 __ addptr(rsp, 2*wordSize);
184 }
186 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
188 // Restore stack bottom in case i2c adjusted stack
189 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
190 // and NULL it as marker that rsp is now tos until next java call
191 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
193 __ restore_bcp();
194 __ restore_locals();
196 Label L_got_cache, L_giant_index;
197 if (EnableInvokeDynamic) {
198 __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
199 __ jcc(Assembler::equal, L_giant_index);
200 }
201 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
202 __ bind(L_got_cache);
203 __ movl(rbx, Address(rbx, rcx,
204 Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
205 ConstantPoolCacheEntry::flags_offset()));
206 __ andptr(rbx, 0xFF);
207 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
208 __ dispatch_next(state, step);
210 // out of the main line of code...
211 if (EnableInvokeDynamic) {
212 __ bind(L_giant_index);
213 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
214 __ jmp(L_got_cache);
215 }
217 return entry;
218 }
221 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
222 address entry = __ pc();
224 // In SSE mode, FP results are in xmm0
225 if (state == ftos && UseSSE > 0) {
226 __ subptr(rsp, wordSize);
227 __ movflt(Address(rsp, 0), xmm0);
228 __ fld_s(Address(rsp, 0));
229 __ addptr(rsp, wordSize);
230 } else if (state == dtos && UseSSE >= 2) {
231 __ subptr(rsp, 2*wordSize);
232 __ movdbl(Address(rsp, 0), xmm0);
233 __ fld_d(Address(rsp, 0));
234 __ addptr(rsp, 2*wordSize);
235 }
237 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
239 // The stack is not extended by deopt but we must NULL last_sp as this
240 // entry is like a "return".
241 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
242 __ restore_bcp();
243 __ restore_locals();
244 // handle exceptions
245 { Label L;
246 const Register thread = rcx;
247 __ get_thread(thread);
248 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
249 __ jcc(Assembler::zero, L);
250 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
251 __ should_not_reach_here();
252 __ bind(L);
253 }
254 __ dispatch_next(state, step);
255 return entry;
256 }
259 int AbstractInterpreter::BasicType_as_index(BasicType type) {
260 int i = 0;
261 switch (type) {
262 case T_BOOLEAN: i = 0; break;
263 case T_CHAR : i = 1; break;
264 case T_BYTE : i = 2; break;
265 case T_SHORT : i = 3; break;
266 case T_INT : // fall through
267 case T_LONG : // fall through
268 case T_VOID : i = 4; break;
269 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE
270 case T_DOUBLE : i = 6; break;
271 case T_OBJECT : // fall through
272 case T_ARRAY : i = 7; break;
273 default : ShouldNotReachHere();
274 }
275 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
276 return i;
277 }
280 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
281 address entry = __ pc();
282 switch (type) {
283 case T_BOOLEAN: __ c2bool(rax); break;
284 case T_CHAR : __ andptr(rax, 0xFFFF); break;
285 case T_BYTE : __ sign_extend_byte (rax); break;
286 case T_SHORT : __ sign_extend_short(rax); break;
287 case T_INT : /* nothing to do */ break;
288 case T_DOUBLE :
289 case T_FLOAT :
290 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
291 __ pop(t); // remove return address first
292 // Must return a result for interpreter or compiler. In SSE
293 // mode, results are returned in xmm0 and the FPU stack must
294 // be empty.
295 if (type == T_FLOAT && UseSSE >= 1) {
296 // Load ST0
297 __ fld_d(Address(rsp, 0));
298 // Store as float and empty fpu stack
299 __ fstp_s(Address(rsp, 0));
300 // and reload
301 __ movflt(xmm0, Address(rsp, 0));
302 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
303 __ movdbl(xmm0, Address(rsp, 0));
304 } else {
305 // restore ST0
306 __ fld_d(Address(rsp, 0));
307 }
308 // and pop the temp
309 __ addptr(rsp, 2 * wordSize);
310 __ push(t); // restore return address
311 }
312 break;
313 case T_OBJECT :
314 // retrieve result from frame
315 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
316 // and verify it
317 __ verify_oop(rax);
318 break;
319 default : ShouldNotReachHere();
320 }
321 __ ret(0); // return from result handler
322 return entry;
323 }
325 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
326 address entry = __ pc();
327 __ push(state);
328 __ call_VM(noreg, runtime_entry);
329 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
330 return entry;
331 }
334 // Helpers for commoning out cases in the various type of method entries.
335 //
337 // increment invocation count & check for overflow
338 //
339 // Note: checking for negative value instead of overflow
340 // so we have a 'sticky' overflow test
341 //
342 // rbx,: method
343 // rcx: invocation counter
344 //
345 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
346 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
347 in_bytes(InvocationCounter::counter_offset()));
348 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
349 if (TieredCompilation) {
350 int increment = InvocationCounter::count_increment;
351 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
352 Label no_mdo, done;
353 if (ProfileInterpreter) {
354 // Are we profiling?
355 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
356 __ testptr(rax, rax);
357 __ jccb(Assembler::zero, no_mdo);
358 // Increment counter in the MDO
359 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
360 in_bytes(InvocationCounter::counter_offset()));
361 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
362 __ jmpb(done);
363 }
364 __ bind(no_mdo);
365 // Increment counter in methodOop (we don't need to load it, it's in rcx).
366 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
367 __ bind(done);
368 } else {
369 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() +
370 InvocationCounter::counter_offset());
372 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
373 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
374 }
375 // Update standard invocation counters
376 __ movl(rax, backedge_counter); // load backedge counter
378 __ incrementl(rcx, InvocationCounter::count_increment);
379 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
381 __ movl(invocation_counter, rcx); // save invocation count
382 __ addl(rcx, rax); // add both counters
384 // profile_method is non-null only for interpreted method so
385 // profile_method != NULL == !native_call
386 // BytecodeInterpreter only calls for native so code is elided.
388 if (ProfileInterpreter && profile_method != NULL) {
389 // Test to see if we should create a method data oop
390 __ cmp32(rcx,
391 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
392 __ jcc(Assembler::less, *profile_method_continue);
394 // if no method data exists, go to profile_method
395 __ test_method_data_pointer(rax, *profile_method);
396 }
398 __ cmp32(rcx,
399 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
400 __ jcc(Assembler::aboveEqual, *overflow);
401 }
402 }
404 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
406 // Asm interpreter on entry
407 // rdi - locals
408 // rsi - bcp
409 // rbx, - method
410 // rdx - cpool
411 // rbp, - interpreter frame
413 // C++ interpreter on entry
414 // rsi - new interpreter state pointer
415 // rbp - interpreter frame pointer
416 // rbx - method
418 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
419 // rbx, - method
420 // rcx - rcvr (assuming there is one)
421 // top of stack return address of interpreter caller
422 // rsp - sender_sp
424 // C++ interpreter only
425 // rsi - previous interpreter state pointer
427 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
429 // InterpreterRuntime::frequency_counter_overflow takes one argument
430 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
431 // The call returns the address of the verified entry point for the method or NULL
432 // if the compilation did not complete (either went background or bailed out).
433 __ movptr(rax, (intptr_t)false);
434 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
436 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
438 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
439 // and jump to the interpreted entry.
440 __ jmp(*do_continue, relocInfo::none);
442 }
444 void InterpreterGenerator::generate_stack_overflow_check(void) {
445 // see if we've got enough room on the stack for locals plus overhead.
446 // the expression stack grows down incrementally, so the normal guard
447 // page mechanism will work for that.
448 //
449 // Registers live on entry:
450 //
451 // Asm interpreter
452 // rdx: number of additional locals this frame needs (what we must check)
453 // rbx,: methodOop
455 // destroyed on exit
456 // rax,
458 // NOTE: since the additional locals are also always pushed (wasn't obvious in
459 // generate_method_entry) so the guard should work for them too.
460 //
462 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
463 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
465 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
466 // be sure to change this if you add/subtract anything to/from the overhead area
467 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size;
469 const int page_size = os::vm_page_size();
471 Label after_frame_check;
473 // see if the frame is greater than one page in size. If so,
474 // then we need to verify there is enough stack space remaining
475 // for the additional locals.
476 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
477 __ jcc(Assembler::belowEqual, after_frame_check);
479 // compute rsp as if this were going to be the last frame on
480 // the stack before the red zone
482 Label after_frame_check_pop;
484 __ push(rsi);
486 const Register thread = rsi;
488 __ get_thread(thread);
490 const Address stack_base(thread, Thread::stack_base_offset());
491 const Address stack_size(thread, Thread::stack_size_offset());
493 // locals + overhead, in bytes
494 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
496 #ifdef ASSERT
497 Label stack_base_okay, stack_size_okay;
498 // verify that thread stack base is non-zero
499 __ cmpptr(stack_base, (int32_t)NULL_WORD);
500 __ jcc(Assembler::notEqual, stack_base_okay);
501 __ stop("stack base is zero");
502 __ bind(stack_base_okay);
503 // verify that thread stack size is non-zero
504 __ cmpptr(stack_size, 0);
505 __ jcc(Assembler::notEqual, stack_size_okay);
506 __ stop("stack size is zero");
507 __ bind(stack_size_okay);
508 #endif
510 // Add stack base to locals and subtract stack size
511 __ addptr(rax, stack_base);
512 __ subptr(rax, stack_size);
514 // Use the maximum number of pages we might bang.
515 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
516 (StackRedPages+StackYellowPages);
517 __ addptr(rax, max_pages * page_size);
519 // check against the current stack bottom
520 __ cmpptr(rsp, rax);
521 __ jcc(Assembler::above, after_frame_check_pop);
523 __ pop(rsi); // get saved bcp / (c++ prev state ).
525 // Restore sender's sp as SP. This is necessary if the sender's
526 // frame is an extended compiled frame (see gen_c2i_adapter())
527 // and safer anyway in case of JSR292 adaptations.
529 __ pop(rax); // return address must be moved if SP is changed
530 __ mov(rsp, rsi);
531 __ push(rax);
533 // Note: the restored frame is not necessarily interpreted.
534 // Use the shared runtime version of the StackOverflowError.
535 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
536 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
537 // all done with frame size check
538 __ bind(after_frame_check_pop);
539 __ pop(rsi);
541 __ bind(after_frame_check);
542 }
544 // Allocate monitor and lock method (asm interpreter)
545 // rbx, - methodOop
546 //
547 void InterpreterGenerator::lock_method(void) {
548 // synchronize method
549 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
550 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
551 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
553 #ifdef ASSERT
554 { Label L;
555 __ movl(rax, access_flags);
556 __ testl(rax, JVM_ACC_SYNCHRONIZED);
557 __ jcc(Assembler::notZero, L);
558 __ stop("method doesn't need synchronization");
559 __ bind(L);
560 }
561 #endif // ASSERT
562 // get synchronization object
563 { Label done;
564 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
565 __ movl(rax, access_flags);
566 __ testl(rax, JVM_ACC_STATIC);
567 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
568 __ jcc(Assembler::zero, done);
569 __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
570 __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
571 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
572 __ movptr(rax, Address(rax, mirror_offset));
573 __ bind(done);
574 }
575 // add space for monitor & lock
576 __ subptr(rsp, entry_size); // add space for a monitor entry
577 __ movptr(monitor_block_top, rsp); // set new monitor block top
578 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
579 __ mov(rdx, rsp); // object address
580 __ lock_object(rdx);
581 }
583 //
584 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
585 // and for native methods hence the shared code.
587 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
588 // initialize fixed part of activation frame
589 __ push(rax); // save return address
590 __ enter(); // save old & set new rbp,
593 __ push(rsi); // set sender sp
594 __ push((int32_t)NULL_WORD); // leave last_sp as null
595 __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
596 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
597 __ push(rbx); // save methodOop
598 if (ProfileInterpreter) {
599 Label method_data_continue;
600 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
601 __ testptr(rdx, rdx);
602 __ jcc(Assembler::zero, method_data_continue);
603 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
604 __ bind(method_data_continue);
605 __ push(rdx); // set the mdp (method data pointer)
606 } else {
607 __ push(0);
608 }
610 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
611 __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
612 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
613 __ push(rdx); // set constant pool cache
614 __ push(rdi); // set locals pointer
615 if (native_call) {
616 __ push(0); // no bcp
617 } else {
618 __ push(rsi); // set bcp
619 }
620 __ push(0); // reserve word for pointer to expression stack bottom
621 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
622 }
624 // End of helpers
626 //
627 // Various method entries
628 //------------------------------------------------------------------------------------------------------------------------
629 //
630 //
632 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
634 address InterpreterGenerator::generate_accessor_entry(void) {
636 // rbx,: methodOop
637 // rcx: receiver (preserve for slow entry into asm interpreter)
639 // rsi: senderSP must preserved for slow path, set SP to it on fast path
641 address entry_point = __ pc();
642 Label xreturn_path;
644 // do fastpath for resolved accessor methods
645 if (UseFastAccessorMethods) {
646 Label slow_path;
647 // If we need a safepoint check, generate full interpreter entry.
648 ExternalAddress state(SafepointSynchronize::address_of_state());
649 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
650 SafepointSynchronize::_not_synchronized);
652 __ jcc(Assembler::notEqual, slow_path);
653 // ASM/C++ Interpreter
654 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
655 // Note: We can only use this code if the getfield has been resolved
656 // and if we don't have a null-pointer exception => check for
657 // these conditions first and use slow path if necessary.
658 // rbx,: method
659 // rcx: receiver
660 __ movptr(rax, Address(rsp, wordSize));
662 // check if local 0 != NULL and read field
663 __ testptr(rax, rax);
664 __ jcc(Assembler::zero, slow_path);
666 // read first instruction word and extract bytecode @ 1 and index @ 2
667 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
668 __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
669 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
670 // Shift codes right to get the index on the right.
671 // The bytecode fetched looks like <index><0xb4><0x2a>
672 __ shrl(rdx, 2*BitsPerByte);
673 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
674 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
676 // rax,: local 0
677 // rbx,: method
678 // rcx: receiver - do not destroy since it is needed for slow path!
679 // rcx: scratch
680 // rdx: constant pool cache index
681 // rdi: constant pool cache
682 // rsi: sender sp
684 // check if getfield has been resolved and read constant pool cache entry
685 // check the validity of the cache entry by testing whether _indices field
686 // contains Bytecode::_getfield in b1 byte.
687 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
688 __ movl(rcx,
689 Address(rdi,
690 rdx,
691 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
692 __ shrl(rcx, 2*BitsPerByte);
693 __ andl(rcx, 0xFF);
694 __ cmpl(rcx, Bytecodes::_getfield);
695 __ jcc(Assembler::notEqual, slow_path);
697 // Note: constant pool entry is not valid before bytecode is resolved
698 __ movptr(rcx,
699 Address(rdi,
700 rdx,
701 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
702 __ movl(rdx,
703 Address(rdi,
704 rdx,
705 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
707 Label notByte, notShort, notChar;
708 const Address field_address (rax, rcx, Address::times_1);
710 // Need to differentiate between igetfield, agetfield, bgetfield etc.
711 // because they are different sizes.
712 // Use the type from the constant pool cache
713 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
714 // Make sure we don't need to mask rdx after the above shift
715 ConstantPoolCacheEntry::verify_tos_state_shift();
716 __ cmpl(rdx, btos);
717 __ jcc(Assembler::notEqual, notByte);
718 __ load_signed_byte(rax, field_address);
719 __ jmp(xreturn_path);
721 __ bind(notByte);
722 __ cmpl(rdx, stos);
723 __ jcc(Assembler::notEqual, notShort);
724 __ load_signed_short(rax, field_address);
725 __ jmp(xreturn_path);
727 __ bind(notShort);
728 __ cmpl(rdx, ctos);
729 __ jcc(Assembler::notEqual, notChar);
730 __ load_unsigned_short(rax, field_address);
731 __ jmp(xreturn_path);
733 __ bind(notChar);
734 #ifdef ASSERT
735 Label okay;
736 __ cmpl(rdx, atos);
737 __ jcc(Assembler::equal, okay);
738 __ cmpl(rdx, itos);
739 __ jcc(Assembler::equal, okay);
740 __ stop("what type is this?");
741 __ bind(okay);
742 #endif // ASSERT
743 // All the rest are a 32 bit wordsize
744 // This is ok for now. Since fast accessors should be going away
745 __ movptr(rax, field_address);
747 __ bind(xreturn_path);
749 // _ireturn/_areturn
750 __ pop(rdi); // get return address
751 __ mov(rsp, rsi); // set sp to sender sp
752 __ jmp(rdi);
754 // generate a vanilla interpreter entry as the slow path
755 __ bind(slow_path);
757 (void) generate_normal_entry(false);
758 return entry_point;
759 }
760 return NULL;
762 }
764 // Method entry for java.lang.ref.Reference.get.
765 address InterpreterGenerator::generate_Reference_get_entry(void) {
766 #ifndef SERIALGC
767 // Code: _aload_0, _getfield, _areturn
768 // parameter size = 1
769 //
770 // The code that gets generated by this routine is split into 2 parts:
771 // 1. The "intrinsified" code for G1 (or any SATB based GC),
772 // 2. The slow path - which is an expansion of the regular method entry.
773 //
774 // Notes:-
775 // * In the G1 code we do not check whether we need to block for
776 // a safepoint. If G1 is enabled then we must execute the specialized
777 // code for Reference.get (except when the Reference object is null)
778 // so that we can log the value in the referent field with an SATB
779 // update buffer.
780 // If the code for the getfield template is modified so that the
781 // G1 pre-barrier code is executed when the current method is
782 // Reference.get() then going through the normal method entry
783 // will be fine.
784 // * The G1 code below can, however, check the receiver object (the instance
785 // of java.lang.Reference) and jump to the slow path if null. If the
786 // Reference object is null then we obviously cannot fetch the referent
787 // and so we don't need to call the G1 pre-barrier. Thus we can use the
788 // regular method entry code to generate the NPE.
789 //
790 // This code is based on generate_accessor_enty.
792 // rbx,: methodOop
793 // rcx: receiver (preserve for slow entry into asm interpreter)
795 // rsi: senderSP must preserved for slow path, set SP to it on fast path
797 address entry = __ pc();
799 const int referent_offset = java_lang_ref_Reference::referent_offset;
800 guarantee(referent_offset > 0, "referent offset not initialized");
802 if (UseG1GC) {
803 Label slow_path;
805 // Check if local 0 != NULL
806 // If the receiver is null then it is OK to jump to the slow path.
807 __ movptr(rax, Address(rsp, wordSize));
808 __ testptr(rax, rax);
809 __ jcc(Assembler::zero, slow_path);
811 // rax: local 0 (must be preserved across the G1 barrier call)
812 //
813 // rbx: method (at this point it's scratch)
814 // rcx: receiver (at this point it's scratch)
815 // rdx: scratch
816 // rdi: scratch
817 //
818 // rsi: sender sp
820 // Preserve the sender sp in case the pre-barrier
821 // calls the runtime
822 __ push(rsi);
824 // Load the value of the referent field.
825 const Address field_address(rax, referent_offset);
826 __ movptr(rax, field_address);
828 // Generate the G1 pre-barrier code to log the value of
829 // the referent field in an SATB buffer.
830 __ get_thread(rcx);
831 __ g1_write_barrier_pre(noreg /* obj */,
832 rax /* pre_val */,
833 rcx /* thread */,
834 rbx /* tmp */,
835 true /* tosca_save */,
836 true /* expand_call */);
838 // _areturn
839 __ pop(rsi); // get sender sp
840 __ pop(rdi); // get return address
841 __ mov(rsp, rsi); // set sp to sender sp
842 __ jmp(rdi);
844 __ bind(slow_path);
845 (void) generate_normal_entry(false);
847 return entry;
848 }
849 #endif // SERIALGC
851 // If G1 is not enabled then attempt to go through the accessor entry point
852 // Reference.get is an accessor
853 return generate_accessor_entry();
854 }
856 //
857 // Interpreter stub for calling a native method. (asm interpreter)
858 // This sets up a somewhat different looking stack for calling the native method
859 // than the typical interpreter frame setup.
860 //
862 address InterpreterGenerator::generate_native_entry(bool synchronized) {
863 // determine code generation flags
864 bool inc_counter = UseCompiler || CountCompiledCalls;
866 // rbx,: methodOop
867 // rsi: sender sp
868 // rsi: previous interpreter state (C++ interpreter) must preserve
869 address entry_point = __ pc();
872 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
873 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
874 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
876 // get parameter size (always needed)
877 __ load_unsigned_short(rcx, size_of_parameters);
879 // native calls don't need the stack size check since they have no expression stack
880 // and the arguments are already on the stack and we only add a handful of words
881 // to the stack
883 // rbx,: methodOop
884 // rcx: size of parameters
885 // rsi: sender sp
887 __ pop(rax); // get return address
888 // for natives the size of locals is zero
890 // compute beginning of parameters (rdi)
891 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
894 // add 2 zero-initialized slots for native calls
895 // NULL result handler
896 __ push((int32_t)NULL_WORD);
897 // NULL oop temp (mirror or jni oop result)
898 __ push((int32_t)NULL_WORD);
900 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
901 // initialize fixed part of activation frame
903 generate_fixed_frame(true);
905 // make sure method is native & not abstract
906 #ifdef ASSERT
907 __ movl(rax, access_flags);
908 {
909 Label L;
910 __ testl(rax, JVM_ACC_NATIVE);
911 __ jcc(Assembler::notZero, L);
912 __ stop("tried to execute non-native method as native");
913 __ bind(L);
914 }
915 { Label L;
916 __ testl(rax, JVM_ACC_ABSTRACT);
917 __ jcc(Assembler::zero, L);
918 __ stop("tried to execute abstract method in interpreter");
919 __ bind(L);
920 }
921 #endif
923 // Since at this point in the method invocation the exception handler
924 // would try to exit the monitor of synchronized methods which hasn't
925 // been entered yet, we set the thread local variable
926 // _do_not_unlock_if_synchronized to true. The remove_activation will
927 // check this flag.
929 __ get_thread(rax);
930 const Address do_not_unlock_if_synchronized(rax,
931 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
932 __ movbool(do_not_unlock_if_synchronized, true);
934 // increment invocation count & check for overflow
935 Label invocation_counter_overflow;
936 if (inc_counter) {
937 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
938 }
940 Label continue_after_compile;
941 __ bind(continue_after_compile);
943 bang_stack_shadow_pages(true);
945 // reset the _do_not_unlock_if_synchronized flag
946 __ get_thread(rax);
947 __ movbool(do_not_unlock_if_synchronized, false);
949 // check for synchronized methods
950 // Must happen AFTER invocation_counter check and stack overflow check,
951 // so method is not locked if overflows.
952 //
953 if (synchronized) {
954 lock_method();
955 } else {
956 // no synchronization necessary
957 #ifdef ASSERT
958 { Label L;
959 __ movl(rax, access_flags);
960 __ testl(rax, JVM_ACC_SYNCHRONIZED);
961 __ jcc(Assembler::zero, L);
962 __ stop("method needs synchronization");
963 __ bind(L);
964 }
965 #endif
966 }
968 // start execution
969 #ifdef ASSERT
970 { Label L;
971 const Address monitor_block_top (rbp,
972 frame::interpreter_frame_monitor_block_top_offset * wordSize);
973 __ movptr(rax, monitor_block_top);
974 __ cmpptr(rax, rsp);
975 __ jcc(Assembler::equal, L);
976 __ stop("broken stack frame setup in interpreter");
977 __ bind(L);
978 }
979 #endif
981 // jvmti/dtrace support
982 __ notify_method_entry();
984 // work registers
985 const Register method = rbx;
986 const Register thread = rdi;
987 const Register t = rcx;
989 // allocate space for parameters
990 __ get_method(method);
991 __ verify_oop(method);
992 __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
993 __ shlptr(t, Interpreter::logStackElementSize);
994 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
995 __ subptr(rsp, t);
996 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
998 // get signature handler
999 { Label L;
1000 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
1001 __ testptr(t, t);
1002 __ jcc(Assembler::notZero, L);
1003 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1004 __ get_method(method);
1005 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
1006 __ bind(L);
1007 }
1009 // call signature handler
1010 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
1011 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
1012 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code");
1013 // The generated handlers do not touch RBX (the method oop).
1014 // However, large signatures cannot be cached and are generated
1015 // each time here. The slow-path generator will blow RBX
1016 // sometime, so we must reload it after the call.
1017 __ call(t);
1018 __ get_method(method); // slow path call blows RBX on DevStudio 5.0
1020 // result handler is in rax,
1021 // set result handler
1022 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
1024 // pass mirror handle if static call
1025 { Label L;
1026 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1027 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1028 __ testl(t, JVM_ACC_STATIC);
1029 __ jcc(Assembler::zero, L);
1030 // get mirror
1031 __ movptr(t, Address(method, methodOopDesc:: const_offset()));
1032 __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
1033 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
1034 __ movptr(t, Address(t, mirror_offset));
1035 // copy mirror into activation frame
1036 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
1037 // pass handle to mirror
1038 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
1039 __ movptr(Address(rsp, wordSize), t);
1040 __ bind(L);
1041 }
1043 // get native function entry point
1044 { Label L;
1045 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1046 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1047 __ cmpptr(rax, unsatisfied.addr());
1048 __ jcc(Assembler::notEqual, L);
1049 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1050 __ get_method(method);
1051 __ verify_oop(method);
1052 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1053 __ bind(L);
1054 }
1056 // pass JNIEnv
1057 __ get_thread(thread);
1058 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1059 __ movptr(Address(rsp, 0), t);
1061 // set_last_Java_frame_before_call
1062 // It is enough that the pc()
1063 // points into the right code segment. It does not have to be the correct return pc.
1064 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1066 // change thread state
1067 #ifdef ASSERT
1068 { Label L;
1069 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1070 __ cmpl(t, _thread_in_Java);
1071 __ jcc(Assembler::equal, L);
1072 __ stop("Wrong thread state in native stub");
1073 __ bind(L);
1074 }
1075 #endif
1077 // Change state to native
1078 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1079 __ call(rax);
1081 // result potentially in rdx:rax or ST0
1083 // Either restore the MXCSR register after returning from the JNI Call
1084 // or verify that it wasn't changed.
1085 if (VM_Version::supports_sse()) {
1086 if (RestoreMXCSROnJNICalls) {
1087 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
1088 }
1089 else if (CheckJNICalls ) {
1090 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
1091 }
1092 }
1094 // Either restore the x87 floating pointer control word after returning
1095 // from the JNI call or verify that it wasn't changed.
1096 if (CheckJNICalls) {
1097 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
1098 }
1100 // save potential result in ST(0) & rdx:rax
1101 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1102 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1103 // It is safe to do this push because state is _thread_in_native and return address will be found
1104 // via _last_native_pc and not via _last_jave_sp
1106 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1107 // If the order changes or anything else is added to the stack the code in
1108 // interpreter_frame_result will have to be changed.
1110 { Label L;
1111 Label push_double;
1112 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1113 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1114 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1115 float_handler.addr());
1116 __ jcc(Assembler::equal, push_double);
1117 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1118 double_handler.addr());
1119 __ jcc(Assembler::notEqual, L);
1120 __ bind(push_double);
1121 __ push(dtos);
1122 __ bind(L);
1123 }
1124 __ push(ltos);
1126 // change thread state
1127 __ get_thread(thread);
1128 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1129 if(os::is_MP()) {
1130 if (UseMembar) {
1131 // Force this write out before the read below
1132 __ membar(Assembler::Membar_mask_bits(
1133 Assembler::LoadLoad | Assembler::LoadStore |
1134 Assembler::StoreLoad | Assembler::StoreStore));
1135 } else {
1136 // Write serialization page so VM thread can do a pseudo remote membar.
1137 // We use the current thread pointer to calculate a thread specific
1138 // offset to write to within the page. This minimizes bus traffic
1139 // due to cache line collision.
1140 __ serialize_memory(thread, rcx);
1141 }
1142 }
1144 if (AlwaysRestoreFPU) {
1145 // Make sure the control word is correct.
1146 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1147 }
1149 // check for safepoint operation in progress and/or pending suspend requests
1150 { Label Continue;
1152 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1153 SafepointSynchronize::_not_synchronized);
1155 Label L;
1156 __ jcc(Assembler::notEqual, L);
1157 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1158 __ jcc(Assembler::equal, Continue);
1159 __ bind(L);
1161 // Don't use call_VM as it will see a possible pending exception and forward it
1162 // and never return here preventing us from clearing _last_native_pc down below.
1163 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1164 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1165 // by hand.
1166 //
1167 __ push(thread);
1168 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1169 JavaThread::check_special_condition_for_native_trans)));
1170 __ increment(rsp, wordSize);
1171 __ get_thread(thread);
1173 __ bind(Continue);
1174 }
1176 // change thread state
1177 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1179 __ reset_last_Java_frame(thread, true, true);
1181 // reset handle block
1182 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1183 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
1185 // If result was an oop then unbox and save it in the frame
1186 { Label L;
1187 Label no_oop, store_result;
1188 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
1189 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
1190 handler.addr());
1191 __ jcc(Assembler::notEqual, no_oop);
1192 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
1193 __ pop(ltos);
1194 __ testptr(rax, rax);
1195 __ jcc(Assembler::zero, store_result);
1196 // unbox
1197 __ movptr(rax, Address(rax, 0));
1198 __ bind(store_result);
1199 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
1200 // keep stack depth as expected by pushing oop which will eventually be discarded
1201 __ push(ltos);
1202 __ bind(no_oop);
1203 }
1205 {
1206 Label no_reguard;
1207 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1208 __ jcc(Assembler::notEqual, no_reguard);
1210 __ pusha();
1211 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1212 __ popa();
1214 __ bind(no_reguard);
1215 }
1217 // restore rsi to have legal interpreter frame,
1218 // i.e., bci == 0 <=> rsi == code_base()
1219 // Can't call_VM until bcp is within reasonable.
1220 __ get_method(method); // method is junk from thread_in_native to now.
1221 __ verify_oop(method);
1222 __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
1223 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
1225 // handle exceptions (exception handling will handle unlocking!)
1226 { Label L;
1227 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1228 __ jcc(Assembler::zero, L);
1229 // Note: At some point we may want to unify this with the code used in call_VM_base();
1230 // i.e., we should use the StubRoutines::forward_exception code. For now this
1231 // doesn't work here because the rsp is not correctly set at this point.
1232 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1233 __ should_not_reach_here();
1234 __ bind(L);
1235 }
1237 // do unlocking if necessary
1238 { Label L;
1239 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1240 __ testl(t, JVM_ACC_SYNCHRONIZED);
1241 __ jcc(Assembler::zero, L);
1242 // the code below should be shared with interpreter macro assembler implementation
1243 { Label unlock;
1244 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1245 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1246 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1248 __ lea(rdx, monitor); // address of first monitor
1250 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
1251 __ testptr(t, t);
1252 __ jcc(Assembler::notZero, unlock);
1254 // Entry already unlocked, need to throw exception
1255 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1256 __ should_not_reach_here();
1258 __ bind(unlock);
1259 __ unlock_object(rdx);
1260 }
1261 __ bind(L);
1262 }
1264 // jvmti/dtrace support
1265 // Note: This must happen _after_ handling/throwing any exceptions since
1266 // the exception handler code notifies the runtime of method exits
1267 // too. If this happens before, method entry/exit notifications are
1268 // not properly paired (was bug - gri 11/22/99).
1269 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1271 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1272 __ pop(ltos);
1273 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1274 __ call(t);
1276 // remove activation
1277 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1278 __ leave(); // remove frame anchor
1279 __ pop(rdi); // get return address
1280 __ mov(rsp, t); // set sp to sender sp
1281 __ jmp(rdi);
1283 if (inc_counter) {
1284 // Handle overflow of counter and compile method
1285 __ bind(invocation_counter_overflow);
1286 generate_counter_overflow(&continue_after_compile);
1287 }
1289 return entry_point;
1290 }
1292 //
1293 // Generic interpreted method entry to (asm) interpreter
1294 //
1295 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1296 // determine code generation flags
1297 bool inc_counter = UseCompiler || CountCompiledCalls;
1299 // rbx,: methodOop
1300 // rsi: sender sp
1301 address entry_point = __ pc();
1304 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
1305 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
1306 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
1307 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
1309 // get parameter size (always needed)
1310 __ load_unsigned_short(rcx, size_of_parameters);
1312 // rbx,: methodOop
1313 // rcx: size of parameters
1315 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
1317 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1318 __ subl(rdx, rcx); // rdx = no. of additional locals
1320 // see if we've got enough room on the stack for locals plus overhead.
1321 generate_stack_overflow_check();
1323 // get return address
1324 __ pop(rax);
1326 // compute beginning of parameters (rdi)
1327 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1329 // rdx - # of additional locals
1330 // allocate space for locals
1331 // explicitly initialize locals
1332 {
1333 Label exit, loop;
1334 __ testl(rdx, rdx);
1335 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1336 __ bind(loop);
1337 __ push((int32_t)NULL_WORD); // initialize local variables
1338 __ decrement(rdx); // until everything initialized
1339 __ jcc(Assembler::greater, loop);
1340 __ bind(exit);
1341 }
1343 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
1344 // initialize fixed part of activation frame
1345 generate_fixed_frame(false);
1347 // make sure method is not native & not abstract
1348 #ifdef ASSERT
1349 __ movl(rax, access_flags);
1350 {
1351 Label L;
1352 __ testl(rax, JVM_ACC_NATIVE);
1353 __ jcc(Assembler::zero, L);
1354 __ stop("tried to execute native method as non-native");
1355 __ bind(L);
1356 }
1357 { Label L;
1358 __ testl(rax, JVM_ACC_ABSTRACT);
1359 __ jcc(Assembler::zero, L);
1360 __ stop("tried to execute abstract method in interpreter");
1361 __ bind(L);
1362 }
1363 #endif
1365 // Since at this point in the method invocation the exception handler
1366 // would try to exit the monitor of synchronized methods which hasn't
1367 // been entered yet, we set the thread local variable
1368 // _do_not_unlock_if_synchronized to true. The remove_activation will
1369 // check this flag.
1371 __ get_thread(rax);
1372 const Address do_not_unlock_if_synchronized(rax,
1373 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1374 __ movbool(do_not_unlock_if_synchronized, true);
1376 // increment invocation count & check for overflow
1377 Label invocation_counter_overflow;
1378 Label profile_method;
1379 Label profile_method_continue;
1380 if (inc_counter) {
1381 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1382 if (ProfileInterpreter) {
1383 __ bind(profile_method_continue);
1384 }
1385 }
1386 Label continue_after_compile;
1387 __ bind(continue_after_compile);
1389 bang_stack_shadow_pages(false);
1391 // reset the _do_not_unlock_if_synchronized flag
1392 __ get_thread(rax);
1393 __ movbool(do_not_unlock_if_synchronized, false);
1395 // check for synchronized methods
1396 // Must happen AFTER invocation_counter check and stack overflow check,
1397 // so method is not locked if overflows.
1398 //
1399 if (synchronized) {
1400 // Allocate monitor and lock method
1401 lock_method();
1402 } else {
1403 // no synchronization necessary
1404 #ifdef ASSERT
1405 { Label L;
1406 __ movl(rax, access_flags);
1407 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1408 __ jcc(Assembler::zero, L);
1409 __ stop("method needs synchronization");
1410 __ bind(L);
1411 }
1412 #endif
1413 }
1415 // start execution
1416 #ifdef ASSERT
1417 { Label L;
1418 const Address monitor_block_top (rbp,
1419 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1420 __ movptr(rax, monitor_block_top);
1421 __ cmpptr(rax, rsp);
1422 __ jcc(Assembler::equal, L);
1423 __ stop("broken stack frame setup in interpreter");
1424 __ bind(L);
1425 }
1426 #endif
1428 // jvmti support
1429 __ notify_method_entry();
1431 __ dispatch_next(vtos);
1433 // invocation counter overflow
1434 if (inc_counter) {
1435 if (ProfileInterpreter) {
1436 // We have decided to profile this method in the interpreter
1437 __ bind(profile_method);
1438 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1439 __ set_method_data_pointer_for_bcp();
1440 __ get_method(rbx);
1441 __ jmp(profile_method_continue);
1442 }
1443 // Handle overflow of counter and compile method
1444 __ bind(invocation_counter_overflow);
1445 generate_counter_overflow(&continue_after_compile);
1446 }
1448 return entry_point;
1449 }
1451 //------------------------------------------------------------------------------------------------------------------------
1452 // Entry points
1453 //
1454 // Here we generate the various kind of entries into the interpreter.
1455 // The two main entry type are generic bytecode methods and native call method.
1456 // These both come in synchronized and non-synchronized versions but the
1457 // frame layout they create is very similar. The other method entry
1458 // types are really just special purpose entries that are really entry
1459 // and interpretation all in one. These are for trivial methods like
1460 // accessor, empty, or special math methods.
1461 //
1462 // When control flow reaches any of the entry types for the interpreter
1463 // the following holds ->
1464 //
1465 // Arguments:
1466 //
1467 // rbx,: methodOop
1468 // rcx: receiver
1469 //
1470 //
1471 // Stack layout immediately at entry
1472 //
1473 // [ return address ] <--- rsp
1474 // [ parameter n ]
1475 // ...
1476 // [ parameter 1 ]
1477 // [ expression stack ] (caller's java expression stack)
1479 // Assuming that we don't go to one of the trivial specialized
1480 // entries the stack will look like below when we are ready to execute
1481 // the first bytecode (or call the native routine). The register usage
1482 // will be as the template based interpreter expects (see interpreter_x86.hpp).
1483 //
1484 // local variables follow incoming parameters immediately; i.e.
1485 // the return address is moved to the end of the locals).
1486 //
1487 // [ monitor entry ] <--- rsp
1488 // ...
1489 // [ monitor entry ]
1490 // [ expr. stack bottom ]
1491 // [ saved rsi ]
1492 // [ current rdi ]
1493 // [ methodOop ]
1494 // [ saved rbp, ] <--- rbp,
1495 // [ return address ]
1496 // [ local variable m ]
1497 // ...
1498 // [ local variable 1 ]
1499 // [ parameter n ]
1500 // ...
1501 // [ parameter 1 ] <--- rdi
1503 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
1504 // determine code generation flags
1505 bool synchronized = false;
1506 address entry_point = NULL;
1508 switch (kind) {
1509 case Interpreter::zerolocals : break;
1510 case Interpreter::zerolocals_synchronized: synchronized = true; break;
1511 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
1512 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
1513 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
1514 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
1515 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
1517 case Interpreter::java_lang_math_sin : // fall thru
1518 case Interpreter::java_lang_math_cos : // fall thru
1519 case Interpreter::java_lang_math_tan : // fall thru
1520 case Interpreter::java_lang_math_abs : // fall thru
1521 case Interpreter::java_lang_math_log : // fall thru
1522 case Interpreter::java_lang_math_log10 : // fall thru
1523 case Interpreter::java_lang_math_sqrt : // fall thru
1524 case Interpreter::java_lang_math_pow : // fall thru
1525 case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
1526 case Interpreter::java_lang_ref_reference_get
1527 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
1528 default:
1529 fatal(err_msg("unexpected method kind: %d", kind));
1530 break;
1531 }
1533 if (entry_point) return entry_point;
1535 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
1537 }
1539 // These should never be compiled since the interpreter will prefer
1540 // the compiled version to the intrinsic version.
1541 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1542 switch (method_kind(m)) {
1543 case Interpreter::java_lang_math_sin : // fall thru
1544 case Interpreter::java_lang_math_cos : // fall thru
1545 case Interpreter::java_lang_math_tan : // fall thru
1546 case Interpreter::java_lang_math_abs : // fall thru
1547 case Interpreter::java_lang_math_log : // fall thru
1548 case Interpreter::java_lang_math_log10 : // fall thru
1549 case Interpreter::java_lang_math_sqrt : // fall thru
1550 case Interpreter::java_lang_math_pow : // fall thru
1551 case Interpreter::java_lang_math_exp :
1552 return false;
1553 default:
1554 return true;
1555 }
1556 }
1558 // How much stack a method activation needs in words.
1559 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1561 const int stub_code = 4; // see generate_call_stub
1562 // Save space for one monitor to get into the interpreted method in case
1563 // the method is synchronized
1564 int monitor_size = method->is_synchronized() ?
1565 1*frame::interpreter_frame_monitor_size() : 0;
1567 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
1568 // be sure to change this if you add/subtract anything to/from the overhead area
1569 const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
1571 const int extra_stack = methodOopDesc::extra_stack_entries();
1572 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
1573 Interpreter::stackElementWords;
1574 return overhead_size + method_stack + stub_code;
1575 }
1577 // asm based interpreter deoptimization helpers
1579 int AbstractInterpreter::layout_activation(methodOop method,
1580 int tempcount,
1581 int popframe_extra_args,
1582 int moncount,
1583 int caller_actual_parameters,
1584 int callee_param_count,
1585 int callee_locals,
1586 frame* caller,
1587 frame* interpreter_frame,
1588 bool is_top_frame) {
1589 // Note: This calculation must exactly parallel the frame setup
1590 // in AbstractInterpreterGenerator::generate_method_entry.
1591 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
1592 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
1593 // as determined by a previous call to this method.
1594 // It is also guaranteed to be walkable even though it is in a skeletal state
1595 // NOTE: return size is in words not bytes
1597 // fixed size of an interpreter frame:
1598 int max_locals = method->max_locals() * Interpreter::stackElementWords;
1599 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1600 Interpreter::stackElementWords;
1602 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
1604 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
1605 // Since the callee parameters already account for the callee's params we only need to account for
1606 // the extra locals.
1609 int size = overhead +
1610 ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
1611 (moncount*frame::interpreter_frame_monitor_size()) +
1612 tempcount*Interpreter::stackElementWords + popframe_extra_args;
1614 if (interpreter_frame != NULL) {
1615 #ifdef ASSERT
1616 if (!EnableInvokeDynamic)
1617 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
1618 // Probably, since deoptimization doesn't work yet.
1619 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
1620 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
1621 #endif
1623 interpreter_frame->interpreter_frame_set_method(method);
1624 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
1625 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
1626 // and sender_sp is fp+8
1627 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1629 #ifdef ASSERT
1630 if (caller->is_interpreted_frame()) {
1631 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
1632 }
1633 #endif
1635 interpreter_frame->interpreter_frame_set_locals(locals);
1636 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1637 BasicObjectLock* monbot = montop - moncount;
1638 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1640 // Set last_sp
1641 intptr_t* rsp = (intptr_t*) monbot -
1642 tempcount*Interpreter::stackElementWords -
1643 popframe_extra_args;
1644 interpreter_frame->interpreter_frame_set_last_sp(rsp);
1646 // All frames but the initial (oldest) interpreter frame we fill in have a
1647 // value for sender_sp that allows walking the stack but isn't
1648 // truly correct. Correct the value here.
1650 if (extra_locals != 0 &&
1651 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
1652 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
1653 }
1654 *interpreter_frame->interpreter_frame_cache_addr() =
1655 method->constants()->cache();
1656 }
1657 return size;
1658 }
1661 //------------------------------------------------------------------------------------------------------------------------
1662 // Exceptions
1664 void TemplateInterpreterGenerator::generate_throw_exception() {
1665 // Entry point in previous activation (i.e., if the caller was interpreted)
1666 Interpreter::_rethrow_exception_entry = __ pc();
1667 const Register thread = rcx;
1669 // Restore sp to interpreter_frame_last_sp even though we are going
1670 // to empty the expression stack for the exception processing.
1671 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1672 // rax,: exception
1673 // rdx: return address/pc that threw exception
1674 __ restore_bcp(); // rsi points to call/send
1675 __ restore_locals();
1677 // Entry point for exceptions thrown within interpreter code
1678 Interpreter::_throw_exception_entry = __ pc();
1679 // expression stack is undefined here
1680 // rax,: exception
1681 // rsi: exception bcp
1682 __ verify_oop(rax);
1684 // expression stack must be empty before entering the VM in case of an exception
1685 __ empty_expression_stack();
1686 __ empty_FPU_stack();
1687 // find exception handler address and preserve exception oop
1688 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax);
1689 // rax,: exception handler entry point
1690 // rdx: preserved exception oop
1691 // rsi: bcp for exception handler
1692 __ push_ptr(rdx); // push exception which is now the only value on the stack
1693 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1695 // If the exception is not handled in the current frame the frame is removed and
1696 // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1697 //
1698 // Note: At this point the bci is still the bxi for the instruction which caused
1699 // the exception and the expression stack is empty. Thus, for any VM calls
1700 // at this point, GC will find a legal oop map (with empty expression stack).
1702 // In current activation
1703 // tos: exception
1704 // rsi: exception bcp
1706 //
1707 // JVMTI PopFrame support
1708 //
1710 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1711 __ empty_expression_stack();
1712 __ empty_FPU_stack();
1713 // Set the popframe_processing bit in pending_popframe_condition indicating that we are
1714 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1715 // popframe handling cycles.
1716 __ get_thread(thread);
1717 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1718 __ orl(rdx, JavaThread::popframe_processing_bit);
1719 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1721 {
1722 // Check to see whether we are returning to a deoptimized frame.
1723 // (The PopFrame call ensures that the caller of the popped frame is
1724 // either interpreted or compiled and deoptimizes it if compiled.)
1725 // In this case, we can't call dispatch_next() after the frame is
1726 // popped, but instead must save the incoming arguments and restore
1727 // them after deoptimization has occurred.
1728 //
1729 // Note that we don't compare the return PC against the
1730 // deoptimization blob's unpack entry because of the presence of
1731 // adapter frames in C2.
1732 Label caller_not_deoptimized;
1733 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
1734 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
1735 __ testl(rax, rax);
1736 __ jcc(Assembler::notZero, caller_not_deoptimized);
1738 // Compute size of arguments for saving when returning to deoptimized caller
1739 __ get_method(rax);
1740 __ verify_oop(rax);
1741 __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
1742 __ shlptr(rax, Interpreter::logStackElementSize);
1743 __ restore_locals();
1744 __ subptr(rdi, rax);
1745 __ addptr(rdi, wordSize);
1746 // Save these arguments
1747 __ get_thread(thread);
1748 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
1750 __ remove_activation(vtos, rdx,
1751 /* throw_monitor_exception */ false,
1752 /* install_monitor_exception */ false,
1753 /* notify_jvmdi */ false);
1755 // Inform deoptimization that it is responsible for restoring these arguments
1756 __ get_thread(thread);
1757 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
1759 // Continue in deoptimization handler
1760 __ jmp(rdx);
1762 __ bind(caller_not_deoptimized);
1763 }
1765 __ remove_activation(vtos, rdx,
1766 /* throw_monitor_exception */ false,
1767 /* install_monitor_exception */ false,
1768 /* notify_jvmdi */ false);
1770 // Finish with popframe handling
1771 // A previous I2C followed by a deoptimization might have moved the
1772 // outgoing arguments further up the stack. PopFrame expects the
1773 // mutations to those outgoing arguments to be preserved and other
1774 // constraints basically require this frame to look exactly as
1775 // though it had previously invoked an interpreted activation with
1776 // no space between the top of the expression stack (current
1777 // last_sp) and the top of stack. Rather than force deopt to
1778 // maintain this kind of invariant all the time we call a small
1779 // fixup routine to move the mutated arguments onto the top of our
1780 // expression stack if necessary.
1781 __ mov(rax, rsp);
1782 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1783 __ get_thread(thread);
1784 // PC must point into interpreter here
1785 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1786 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1787 __ get_thread(thread);
1788 __ reset_last_Java_frame(thread, true, true);
1789 // Restore the last_sp and null it out
1790 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1791 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1793 __ restore_bcp();
1794 __ restore_locals();
1795 // The method data pointer was incremented already during
1796 // call profiling. We have to restore the mdp for the current bcp.
1797 if (ProfileInterpreter) {
1798 __ set_method_data_pointer_for_bcp();
1799 }
1801 // Clear the popframe condition flag
1802 __ get_thread(thread);
1803 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
1805 __ dispatch_next(vtos);
1806 // end of PopFrame support
1808 Interpreter::_remove_activation_entry = __ pc();
1810 // preserve exception over this code sequence
1811 __ pop_ptr(rax);
1812 __ get_thread(thread);
1813 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1814 // remove the activation (without doing throws on illegalMonitorExceptions)
1815 __ remove_activation(vtos, rdx, false, true, false);
1816 // restore exception
1817 __ get_thread(thread);
1818 __ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
1819 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
1820 __ verify_oop(rax);
1822 // Inbetween activations - previous activation type unknown yet
1823 // compute continuation point - the continuation point expects
1824 // the following registers set up:
1825 //
1826 // rax: exception
1827 // rdx: return address/pc that threw exception
1828 // rsp: expression stack of caller
1829 // rbp: rbp, of caller
1830 __ push(rax); // save exception
1831 __ push(rdx); // save return address
1832 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
1833 __ mov(rbx, rax); // save exception handler
1834 __ pop(rdx); // restore return address
1835 __ pop(rax); // restore exception
1836 // Note that an "issuing PC" is actually the next PC after the call
1837 __ jmp(rbx); // jump to exception handler of caller
1838 }
1841 //
1842 // JVMTI ForceEarlyReturn support
1843 //
1844 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1845 address entry = __ pc();
1846 const Register thread = rcx;
1848 __ restore_bcp();
1849 __ restore_locals();
1850 __ empty_expression_stack();
1851 __ empty_FPU_stack();
1852 __ load_earlyret_value(state);
1854 __ get_thread(thread);
1855 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
1856 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1858 // Clear the earlyret state
1859 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1861 __ remove_activation(state, rsi,
1862 false, /* throw_monitor_exception */
1863 false, /* install_monitor_exception */
1864 true); /* notify_jvmdi */
1865 __ jmp(rsi);
1866 return entry;
1867 } // end of ForceEarlyReturn support
1870 //------------------------------------------------------------------------------------------------------------------------
1871 // Helper for vtos entry point generation
1873 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1874 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1875 Label L;
1876 fep = __ pc(); __ push(ftos); __ jmp(L);
1877 dep = __ pc(); __ push(dtos); __ jmp(L);
1878 lep = __ pc(); __ push(ltos); __ jmp(L);
1879 aep = __ pc(); __ push(atos); __ jmp(L);
1880 bep = cep = sep = // fall through
1881 iep = __ pc(); __ push(itos); // fall through
1882 vep = __ pc(); __ bind(L); // fall through
1883 generate_and_dispatch(t);
1884 }
1886 //------------------------------------------------------------------------------------------------------------------------
1887 // Generation of individual instructions
1889 // helpers for generate_and_dispatch
1893 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1894 : TemplateInterpreterGenerator(code) {
1895 generate_all(); // down here so it can be "virtual"
1896 }
1898 //------------------------------------------------------------------------------------------------------------------------
1900 // Non-product code
1901 #ifndef PRODUCT
1902 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1903 address entry = __ pc();
1905 // prepare expression stack
1906 __ pop(rcx); // pop return address so expression stack is 'pure'
1907 __ push(state); // save tosca
1909 // pass tosca registers as arguments & call tracer
1910 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
1911 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
1912 __ pop(state); // restore tosca
1914 // return
1915 __ jmp(rcx);
1917 return entry;
1918 }
1921 void TemplateInterpreterGenerator::count_bytecode() {
1922 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1923 }
1926 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1927 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1928 }
1931 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1932 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1933 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1934 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1935 ExternalAddress table((address) BytecodePairHistogram::_counters);
1936 Address index(noreg, rbx, Address::times_4);
1937 __ incrementl(ArrayAddress(table, index));
1938 }
1941 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1942 // Call a little run-time stub to avoid blow-up for each bytecode.
1943 // The run-time runtime saves the right registers, depending on
1944 // the tosca in-state for the given template.
1945 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1946 "entry must have been generated");
1947 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1948 }
1951 void TemplateInterpreterGenerator::stop_interpreter_at() {
1952 Label L;
1953 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1954 StopInterpreterAt);
1955 __ jcc(Assembler::notEqual, L);
1956 __ int3();
1957 __ bind(L);
1958 }
1959 #endif // !PRODUCT
1960 #endif // CC_INTERP