Sat, 12 Oct 2013 12:12:59 +0200
8026054: New type profiling points: type of return values at calls
Summary: x86 interpreter and c1 type profiling for return values at calls
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterGenerator.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "prims/jvmtiThreadState.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/synchronizer.hpp"
44 #include "runtime/timer.hpp"
45 #include "runtime/vframeArray.hpp"
46 #include "utilities/debug.hpp"
47 #include "utilities/macros.hpp"
49 #define __ _masm->
52 #ifndef CC_INTERP
53 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
57 //------------------------------------------------------------------------------------------------------------------------
59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
60 address entry = __ pc();
62 // Note: There should be a minimal interpreter frame set up when stack
63 // overflow occurs since we check explicitly for it now.
64 //
65 #ifdef ASSERT
66 { Label L;
67 __ lea(rax, Address(rbp,
68 frame::interpreter_frame_monitor_block_top_offset * wordSize));
69 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
70 // (stack grows negative)
71 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
72 __ stop ("interpreter frame not set up");
73 __ bind(L);
74 }
75 #endif // ASSERT
76 // Restore bcp under the assumption that the current frame is still
77 // interpreted
78 __ restore_bcp();
80 // expression stack must be empty before entering the VM if an exception
81 // happened
82 __ empty_expression_stack();
83 __ empty_FPU_stack();
84 // throw exception
85 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
86 return entry;
87 }
89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
90 address entry = __ pc();
91 // expression stack must be empty before entering the VM if an exception happened
92 __ empty_expression_stack();
93 __ empty_FPU_stack();
94 // setup parameters
95 // ??? convention: expect aberrant index in register rbx,
96 __ lea(rax, ExternalAddress((address)name));
97 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx);
98 return entry;
99 }
101 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
102 address entry = __ pc();
103 // object is at TOS
104 __ pop(rax);
105 // expression stack must be empty before entering the VM if an exception
106 // happened
107 __ empty_expression_stack();
108 __ empty_FPU_stack();
109 __ call_VM(noreg,
110 CAST_FROM_FN_PTR(address,
111 InterpreterRuntime::throw_ClassCastException),
112 rax);
113 return entry;
114 }
116 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
117 assert(!pass_oop || message == NULL, "either oop or message but not both");
118 address entry = __ pc();
119 if (pass_oop) {
120 // object is at TOS
121 __ pop(rbx);
122 }
123 // expression stack must be empty before entering the VM if an exception happened
124 __ empty_expression_stack();
125 __ empty_FPU_stack();
126 // setup parameters
127 __ lea(rax, ExternalAddress((address)name));
128 if (pass_oop) {
129 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
130 } else {
131 if (message != NULL) {
132 __ lea(rbx, ExternalAddress((address)message));
133 } else {
134 __ movptr(rbx, NULL_WORD);
135 }
136 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
137 }
138 // throw exception
139 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
140 return entry;
141 }
144 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
145 address entry = __ pc();
146 // NULL last_sp until next java call
147 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
148 __ dispatch_next(state);
149 return entry;
150 }
153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
154 TosState incoming_state = state;
155 address entry = __ pc();
157 #ifdef COMPILER2
158 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
159 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
160 for (int i = 1; i < 8; i++) {
161 __ ffree(i);
162 }
163 } else if (UseSSE < 2) {
164 __ empty_FPU_stack();
165 }
166 #endif
167 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
168 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
169 } else {
170 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
171 }
173 // In SSE mode, interpreter returns FP results in xmm0 but they need
174 // to end up back on the FPU so it can operate on them.
175 if (incoming_state == ftos && UseSSE >= 1) {
176 __ subptr(rsp, wordSize);
177 __ movflt(Address(rsp, 0), xmm0);
178 __ fld_s(Address(rsp, 0));
179 __ addptr(rsp, wordSize);
180 } else if (incoming_state == dtos && UseSSE >= 2) {
181 __ subptr(rsp, 2*wordSize);
182 __ movdbl(Address(rsp, 0), xmm0);
183 __ fld_d(Address(rsp, 0));
184 __ addptr(rsp, 2*wordSize);
185 }
187 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
189 // Restore stack bottom in case i2c adjusted stack
190 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
191 // and NULL it as marker that rsp is now tos until next java call
192 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
194 __ restore_bcp();
195 __ restore_locals();
197 if (incoming_state == atos) {
198 Register mdp = rbx;
199 Register tmp = rcx;
200 __ profile_return_type(mdp, rax, tmp);
201 }
203 Label L_got_cache, L_giant_index;
204 if (EnableInvokeDynamic) {
205 __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
206 __ jcc(Assembler::equal, L_giant_index);
207 }
208 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
209 __ bind(L_got_cache);
210 __ movl(rbx, Address(rbx, rcx,
211 Address::times_ptr, ConstantPoolCache::base_offset() +
212 ConstantPoolCacheEntry::flags_offset()));
213 __ andptr(rbx, 0xFF);
214 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
215 __ dispatch_next(state, step);
217 // out of the main line of code...
218 if (EnableInvokeDynamic) {
219 __ bind(L_giant_index);
220 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
221 __ jmp(L_got_cache);
222 }
224 return entry;
225 }
228 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
229 address entry = __ pc();
231 // In SSE mode, FP results are in xmm0
232 if (state == ftos && UseSSE > 0) {
233 __ subptr(rsp, wordSize);
234 __ movflt(Address(rsp, 0), xmm0);
235 __ fld_s(Address(rsp, 0));
236 __ addptr(rsp, wordSize);
237 } else if (state == dtos && UseSSE >= 2) {
238 __ subptr(rsp, 2*wordSize);
239 __ movdbl(Address(rsp, 0), xmm0);
240 __ fld_d(Address(rsp, 0));
241 __ addptr(rsp, 2*wordSize);
242 }
244 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
246 // The stack is not extended by deopt but we must NULL last_sp as this
247 // entry is like a "return".
248 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
249 __ restore_bcp();
250 __ restore_locals();
251 // handle exceptions
252 { Label L;
253 const Register thread = rcx;
254 __ get_thread(thread);
255 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
256 __ jcc(Assembler::zero, L);
257 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
258 __ should_not_reach_here();
259 __ bind(L);
260 }
261 __ dispatch_next(state, step);
262 return entry;
263 }
266 int AbstractInterpreter::BasicType_as_index(BasicType type) {
267 int i = 0;
268 switch (type) {
269 case T_BOOLEAN: i = 0; break;
270 case T_CHAR : i = 1; break;
271 case T_BYTE : i = 2; break;
272 case T_SHORT : i = 3; break;
273 case T_INT : // fall through
274 case T_LONG : // fall through
275 case T_VOID : i = 4; break;
276 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE
277 case T_DOUBLE : i = 6; break;
278 case T_OBJECT : // fall through
279 case T_ARRAY : i = 7; break;
280 default : ShouldNotReachHere();
281 }
282 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
283 return i;
284 }
287 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
288 address entry = __ pc();
289 switch (type) {
290 case T_BOOLEAN: __ c2bool(rax); break;
291 case T_CHAR : __ andptr(rax, 0xFFFF); break;
292 case T_BYTE : __ sign_extend_byte (rax); break;
293 case T_SHORT : __ sign_extend_short(rax); break;
294 case T_INT : /* nothing to do */ break;
295 case T_DOUBLE :
296 case T_FLOAT :
297 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
298 __ pop(t); // remove return address first
299 // Must return a result for interpreter or compiler. In SSE
300 // mode, results are returned in xmm0 and the FPU stack must
301 // be empty.
302 if (type == T_FLOAT && UseSSE >= 1) {
303 // Load ST0
304 __ fld_d(Address(rsp, 0));
305 // Store as float and empty fpu stack
306 __ fstp_s(Address(rsp, 0));
307 // and reload
308 __ movflt(xmm0, Address(rsp, 0));
309 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
310 __ movdbl(xmm0, Address(rsp, 0));
311 } else {
312 // restore ST0
313 __ fld_d(Address(rsp, 0));
314 }
315 // and pop the temp
316 __ addptr(rsp, 2 * wordSize);
317 __ push(t); // restore return address
318 }
319 break;
320 case T_OBJECT :
321 // retrieve result from frame
322 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
323 // and verify it
324 __ verify_oop(rax);
325 break;
326 default : ShouldNotReachHere();
327 }
328 __ ret(0); // return from result handler
329 return entry;
330 }
332 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
333 address entry = __ pc();
334 __ push(state);
335 __ call_VM(noreg, runtime_entry);
336 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
337 return entry;
338 }
341 // Helpers for commoning out cases in the various type of method entries.
342 //
344 // increment invocation count & check for overflow
345 //
346 // Note: checking for negative value instead of overflow
347 // so we have a 'sticky' overflow test
348 //
349 // rbx,: method
350 // rcx: invocation counter
351 //
352 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
353 Label done;
354 // Note: In tiered we increment either counters in MethodCounters* or in MDO
355 // depending if we're profiling or not.
356 if (TieredCompilation) {
357 int increment = InvocationCounter::count_increment;
358 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
359 Label no_mdo;
360 if (ProfileInterpreter) {
361 // Are we profiling?
362 __ movptr(rax, Address(rbx, Method::method_data_offset()));
363 __ testptr(rax, rax);
364 __ jccb(Assembler::zero, no_mdo);
365 // Increment counter in the MDO
366 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
367 in_bytes(InvocationCounter::counter_offset()));
368 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
369 __ jmp(done);
370 }
371 __ bind(no_mdo);
372 // Increment counter in MethodCounters
373 const Address invocation_counter(rax,
374 MethodCounters::invocation_counter_offset() +
375 InvocationCounter::counter_offset());
377 __ get_method_counters(rbx, rax, done);
378 __ increment_mask_and_jump(invocation_counter, increment, mask,
379 rcx, false, Assembler::zero, overflow);
380 __ bind(done);
381 } else {
382 const Address backedge_counter (rax,
383 MethodCounters::backedge_counter_offset() +
384 InvocationCounter::counter_offset());
385 const Address invocation_counter(rax,
386 MethodCounters::invocation_counter_offset() +
387 InvocationCounter::counter_offset());
389 __ get_method_counters(rbx, rax, done);
391 if (ProfileInterpreter) {
392 __ incrementl(Address(rax,
393 MethodCounters::interpreter_invocation_counter_offset()));
394 }
396 // Update standard invocation counters
397 __ movl(rcx, invocation_counter);
398 __ incrementl(rcx, InvocationCounter::count_increment);
399 __ movl(invocation_counter, rcx); // save invocation count
401 __ movl(rax, backedge_counter); // load backedge counter
402 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
404 __ addl(rcx, rax); // add both counters
406 // profile_method is non-null only for interpreted method so
407 // profile_method != NULL == !native_call
408 // BytecodeInterpreter only calls for native so code is elided.
410 if (ProfileInterpreter && profile_method != NULL) {
411 // Test to see if we should create a method data oop
412 __ cmp32(rcx,
413 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
414 __ jcc(Assembler::less, *profile_method_continue);
416 // if no method data exists, go to profile_method
417 __ test_method_data_pointer(rax, *profile_method);
418 }
420 __ cmp32(rcx,
421 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
422 __ jcc(Assembler::aboveEqual, *overflow);
423 __ bind(done);
424 }
425 }
427 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
429 // Asm interpreter on entry
430 // rdi - locals
431 // rsi - bcp
432 // rbx, - method
433 // rdx - cpool
434 // rbp, - interpreter frame
436 // C++ interpreter on entry
437 // rsi - new interpreter state pointer
438 // rbp - interpreter frame pointer
439 // rbx - method
441 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
442 // rbx, - method
443 // rcx - rcvr (assuming there is one)
444 // top of stack return address of interpreter caller
445 // rsp - sender_sp
447 // C++ interpreter only
448 // rsi - previous interpreter state pointer
450 // InterpreterRuntime::frequency_counter_overflow takes one argument
451 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
452 // The call returns the address of the verified entry point for the method or NULL
453 // if the compilation did not complete (either went background or bailed out).
454 __ movptr(rax, (intptr_t)false);
455 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
457 __ movptr(rbx, Address(rbp, method_offset)); // restore Method*
459 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
460 // and jump to the interpreted entry.
461 __ jmp(*do_continue, relocInfo::none);
463 }
465 void InterpreterGenerator::generate_stack_overflow_check(void) {
466 // see if we've got enough room on the stack for locals plus overhead.
467 // the expression stack grows down incrementally, so the normal guard
468 // page mechanism will work for that.
469 //
470 // Registers live on entry:
471 //
472 // Asm interpreter
473 // rdx: number of additional locals this frame needs (what we must check)
474 // rbx,: Method*
476 // destroyed on exit
477 // rax,
479 // NOTE: since the additional locals are also always pushed (wasn't obvious in
480 // generate_method_entry) so the guard should work for them too.
481 //
483 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
484 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
486 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
487 // be sure to change this if you add/subtract anything to/from the overhead area
488 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size;
490 const int page_size = os::vm_page_size();
492 Label after_frame_check;
494 // see if the frame is greater than one page in size. If so,
495 // then we need to verify there is enough stack space remaining
496 // for the additional locals.
497 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
498 __ jcc(Assembler::belowEqual, after_frame_check);
500 // compute rsp as if this were going to be the last frame on
501 // the stack before the red zone
503 Label after_frame_check_pop;
505 __ push(rsi);
507 const Register thread = rsi;
509 __ get_thread(thread);
511 const Address stack_base(thread, Thread::stack_base_offset());
512 const Address stack_size(thread, Thread::stack_size_offset());
514 // locals + overhead, in bytes
515 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
517 #ifdef ASSERT
518 Label stack_base_okay, stack_size_okay;
519 // verify that thread stack base is non-zero
520 __ cmpptr(stack_base, (int32_t)NULL_WORD);
521 __ jcc(Assembler::notEqual, stack_base_okay);
522 __ stop("stack base is zero");
523 __ bind(stack_base_okay);
524 // verify that thread stack size is non-zero
525 __ cmpptr(stack_size, 0);
526 __ jcc(Assembler::notEqual, stack_size_okay);
527 __ stop("stack size is zero");
528 __ bind(stack_size_okay);
529 #endif
531 // Add stack base to locals and subtract stack size
532 __ addptr(rax, stack_base);
533 __ subptr(rax, stack_size);
535 // Use the maximum number of pages we might bang.
536 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
537 (StackRedPages+StackYellowPages);
538 __ addptr(rax, max_pages * page_size);
540 // check against the current stack bottom
541 __ cmpptr(rsp, rax);
542 __ jcc(Assembler::above, after_frame_check_pop);
544 __ pop(rsi); // get saved bcp / (c++ prev state ).
546 // Restore sender's sp as SP. This is necessary if the sender's
547 // frame is an extended compiled frame (see gen_c2i_adapter())
548 // and safer anyway in case of JSR292 adaptations.
550 __ pop(rax); // return address must be moved if SP is changed
551 __ mov(rsp, rsi);
552 __ push(rax);
554 // Note: the restored frame is not necessarily interpreted.
555 // Use the shared runtime version of the StackOverflowError.
556 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
557 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
558 // all done with frame size check
559 __ bind(after_frame_check_pop);
560 __ pop(rsi);
562 __ bind(after_frame_check);
563 }
565 // Allocate monitor and lock method (asm interpreter)
566 // rbx, - Method*
567 //
568 void InterpreterGenerator::lock_method(void) {
569 // synchronize method
570 const Address access_flags (rbx, Method::access_flags_offset());
571 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
572 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
574 #ifdef ASSERT
575 { Label L;
576 __ movl(rax, access_flags);
577 __ testl(rax, JVM_ACC_SYNCHRONIZED);
578 __ jcc(Assembler::notZero, L);
579 __ stop("method doesn't need synchronization");
580 __ bind(L);
581 }
582 #endif // ASSERT
583 // get synchronization object
584 { Label done;
585 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
586 __ movl(rax, access_flags);
587 __ testl(rax, JVM_ACC_STATIC);
588 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
589 __ jcc(Assembler::zero, done);
590 __ movptr(rax, Address(rbx, Method::const_offset()));
591 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
592 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
593 __ movptr(rax, Address(rax, mirror_offset));
594 __ bind(done);
595 }
596 // add space for monitor & lock
597 __ subptr(rsp, entry_size); // add space for a monitor entry
598 __ movptr(monitor_block_top, rsp); // set new monitor block top
599 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
600 __ mov(rdx, rsp); // object address
601 __ lock_object(rdx);
602 }
604 //
605 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
606 // and for native methods hence the shared code.
608 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
609 // initialize fixed part of activation frame
610 __ push(rax); // save return address
611 __ enter(); // save old & set new rbp,
614 __ push(rsi); // set sender sp
615 __ push((int32_t)NULL_WORD); // leave last_sp as null
616 __ movptr(rsi, Address(rbx,Method::const_offset())); // get ConstMethod*
617 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase
618 __ push(rbx); // save Method*
619 if (ProfileInterpreter) {
620 Label method_data_continue;
621 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
622 __ testptr(rdx, rdx);
623 __ jcc(Assembler::zero, method_data_continue);
624 __ addptr(rdx, in_bytes(MethodData::data_offset()));
625 __ bind(method_data_continue);
626 __ push(rdx); // set the mdp (method data pointer)
627 } else {
628 __ push(0);
629 }
631 __ movptr(rdx, Address(rbx, Method::const_offset()));
632 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
633 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
634 __ push(rdx); // set constant pool cache
635 __ push(rdi); // set locals pointer
636 if (native_call) {
637 __ push(0); // no bcp
638 } else {
639 __ push(rsi); // set bcp
640 }
641 __ push(0); // reserve word for pointer to expression stack bottom
642 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
643 }
645 // End of helpers
647 //
648 // Various method entries
649 //------------------------------------------------------------------------------------------------------------------------
650 //
651 //
653 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
655 address InterpreterGenerator::generate_accessor_entry(void) {
657 // rbx,: Method*
658 // rcx: receiver (preserve for slow entry into asm interpreter)
660 // rsi: senderSP must preserved for slow path, set SP to it on fast path
662 address entry_point = __ pc();
663 Label xreturn_path;
665 // do fastpath for resolved accessor methods
666 if (UseFastAccessorMethods) {
667 Label slow_path;
668 // If we need a safepoint check, generate full interpreter entry.
669 ExternalAddress state(SafepointSynchronize::address_of_state());
670 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
671 SafepointSynchronize::_not_synchronized);
673 __ jcc(Assembler::notEqual, slow_path);
674 // ASM/C++ Interpreter
675 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
676 // Note: We can only use this code if the getfield has been resolved
677 // and if we don't have a null-pointer exception => check for
678 // these conditions first and use slow path if necessary.
679 // rbx,: method
680 // rcx: receiver
681 __ movptr(rax, Address(rsp, wordSize));
683 // check if local 0 != NULL and read field
684 __ testptr(rax, rax);
685 __ jcc(Assembler::zero, slow_path);
687 // read first instruction word and extract bytecode @ 1 and index @ 2
688 __ movptr(rdx, Address(rbx, Method::const_offset()));
689 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
690 __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
691 // Shift codes right to get the index on the right.
692 // The bytecode fetched looks like <index><0xb4><0x2a>
693 __ shrl(rdx, 2*BitsPerByte);
694 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
695 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
697 // rax,: local 0
698 // rbx,: method
699 // rcx: receiver - do not destroy since it is needed for slow path!
700 // rcx: scratch
701 // rdx: constant pool cache index
702 // rdi: constant pool cache
703 // rsi: sender sp
705 // check if getfield has been resolved and read constant pool cache entry
706 // check the validity of the cache entry by testing whether _indices field
707 // contains Bytecode::_getfield in b1 byte.
708 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
709 __ movl(rcx,
710 Address(rdi,
711 rdx,
712 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
713 __ shrl(rcx, 2*BitsPerByte);
714 __ andl(rcx, 0xFF);
715 __ cmpl(rcx, Bytecodes::_getfield);
716 __ jcc(Assembler::notEqual, slow_path);
718 // Note: constant pool entry is not valid before bytecode is resolved
719 __ movptr(rcx,
720 Address(rdi,
721 rdx,
722 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
723 __ movl(rdx,
724 Address(rdi,
725 rdx,
726 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
728 Label notByte, notShort, notChar;
729 const Address field_address (rax, rcx, Address::times_1);
731 // Need to differentiate between igetfield, agetfield, bgetfield etc.
732 // because they are different sizes.
733 // Use the type from the constant pool cache
734 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
735 // Make sure we don't need to mask rdx after the above shift
736 ConstantPoolCacheEntry::verify_tos_state_shift();
737 __ cmpl(rdx, btos);
738 __ jcc(Assembler::notEqual, notByte);
739 __ load_signed_byte(rax, field_address);
740 __ jmp(xreturn_path);
742 __ bind(notByte);
743 __ cmpl(rdx, stos);
744 __ jcc(Assembler::notEqual, notShort);
745 __ load_signed_short(rax, field_address);
746 __ jmp(xreturn_path);
748 __ bind(notShort);
749 __ cmpl(rdx, ctos);
750 __ jcc(Assembler::notEqual, notChar);
751 __ load_unsigned_short(rax, field_address);
752 __ jmp(xreturn_path);
754 __ bind(notChar);
755 #ifdef ASSERT
756 Label okay;
757 __ cmpl(rdx, atos);
758 __ jcc(Assembler::equal, okay);
759 __ cmpl(rdx, itos);
760 __ jcc(Assembler::equal, okay);
761 __ stop("what type is this?");
762 __ bind(okay);
763 #endif // ASSERT
764 // All the rest are a 32 bit wordsize
765 // This is ok for now. Since fast accessors should be going away
766 __ movptr(rax, field_address);
768 __ bind(xreturn_path);
770 // _ireturn/_areturn
771 __ pop(rdi); // get return address
772 __ mov(rsp, rsi); // set sp to sender sp
773 __ jmp(rdi);
775 // generate a vanilla interpreter entry as the slow path
776 __ bind(slow_path);
778 (void) generate_normal_entry(false);
779 return entry_point;
780 }
781 return NULL;
783 }
785 // Method entry for java.lang.ref.Reference.get.
786 address InterpreterGenerator::generate_Reference_get_entry(void) {
787 #if INCLUDE_ALL_GCS
788 // Code: _aload_0, _getfield, _areturn
789 // parameter size = 1
790 //
791 // The code that gets generated by this routine is split into 2 parts:
792 // 1. The "intrinsified" code for G1 (or any SATB based GC),
793 // 2. The slow path - which is an expansion of the regular method entry.
794 //
795 // Notes:-
796 // * In the G1 code we do not check whether we need to block for
797 // a safepoint. If G1 is enabled then we must execute the specialized
798 // code for Reference.get (except when the Reference object is null)
799 // so that we can log the value in the referent field with an SATB
800 // update buffer.
801 // If the code for the getfield template is modified so that the
802 // G1 pre-barrier code is executed when the current method is
803 // Reference.get() then going through the normal method entry
804 // will be fine.
805 // * The G1 code below can, however, check the receiver object (the instance
806 // of java.lang.Reference) and jump to the slow path if null. If the
807 // Reference object is null then we obviously cannot fetch the referent
808 // and so we don't need to call the G1 pre-barrier. Thus we can use the
809 // regular method entry code to generate the NPE.
810 //
811 // This code is based on generate_accessor_enty.
813 // rbx,: Method*
814 // rcx: receiver (preserve for slow entry into asm interpreter)
816 // rsi: senderSP must preserved for slow path, set SP to it on fast path
818 address entry = __ pc();
820 const int referent_offset = java_lang_ref_Reference::referent_offset;
821 guarantee(referent_offset > 0, "referent offset not initialized");
823 if (UseG1GC) {
824 Label slow_path;
826 // Check if local 0 != NULL
827 // If the receiver is null then it is OK to jump to the slow path.
828 __ movptr(rax, Address(rsp, wordSize));
829 __ testptr(rax, rax);
830 __ jcc(Assembler::zero, slow_path);
832 // rax: local 0 (must be preserved across the G1 barrier call)
833 //
834 // rbx: method (at this point it's scratch)
835 // rcx: receiver (at this point it's scratch)
836 // rdx: scratch
837 // rdi: scratch
838 //
839 // rsi: sender sp
841 // Preserve the sender sp in case the pre-barrier
842 // calls the runtime
843 __ push(rsi);
845 // Load the value of the referent field.
846 const Address field_address(rax, referent_offset);
847 __ movptr(rax, field_address);
849 // Generate the G1 pre-barrier code to log the value of
850 // the referent field in an SATB buffer.
851 __ get_thread(rcx);
852 __ g1_write_barrier_pre(noreg /* obj */,
853 rax /* pre_val */,
854 rcx /* thread */,
855 rbx /* tmp */,
856 true /* tosca_save */,
857 true /* expand_call */);
859 // _areturn
860 __ pop(rsi); // get sender sp
861 __ pop(rdi); // get return address
862 __ mov(rsp, rsi); // set sp to sender sp
863 __ jmp(rdi);
865 __ bind(slow_path);
866 (void) generate_normal_entry(false);
868 return entry;
869 }
870 #endif // INCLUDE_ALL_GCS
872 // If G1 is not enabled then attempt to go through the accessor entry point
873 // Reference.get is an accessor
874 return generate_accessor_entry();
875 }
877 /**
878 * Method entry for static native methods:
879 * int java.util.zip.CRC32.update(int crc, int b)
880 */
881 address InterpreterGenerator::generate_CRC32_update_entry() {
882 if (UseCRC32Intrinsics) {
883 address entry = __ pc();
885 // rbx,: Method*
886 // rsi: senderSP must preserved for slow path, set SP to it on fast path
887 // rdx: scratch
888 // rdi: scratch
890 Label slow_path;
891 // If we need a safepoint check, generate full interpreter entry.
892 ExternalAddress state(SafepointSynchronize::address_of_state());
893 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
894 SafepointSynchronize::_not_synchronized);
895 __ jcc(Assembler::notEqual, slow_path);
897 // We don't generate local frame and don't align stack because
898 // we call stub code and there is no safepoint on this path.
900 // Load parameters
901 const Register crc = rax; // crc
902 const Register val = rdx; // source java byte value
903 const Register tbl = rdi; // scratch
905 // Arguments are reversed on java expression stack
906 __ movl(val, Address(rsp, wordSize)); // byte value
907 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
909 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
910 __ notl(crc); // ~crc
911 __ update_byte_crc32(crc, val, tbl);
912 __ notl(crc); // ~crc
913 // result in rax
915 // _areturn
916 __ pop(rdi); // get return address
917 __ mov(rsp, rsi); // set sp to sender sp
918 __ jmp(rdi);
920 // generate a vanilla native entry as the slow path
921 __ bind(slow_path);
923 (void) generate_native_entry(false);
925 return entry;
926 }
927 return generate_native_entry(false);
928 }
930 /**
931 * Method entry for static native methods:
932 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
933 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
934 */
935 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
936 if (UseCRC32Intrinsics) {
937 address entry = __ pc();
939 // rbx,: Method*
940 // rsi: senderSP must preserved for slow path, set SP to it on fast path
941 // rdx: scratch
942 // rdi: scratch
944 Label slow_path;
945 // If we need a safepoint check, generate full interpreter entry.
946 ExternalAddress state(SafepointSynchronize::address_of_state());
947 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
948 SafepointSynchronize::_not_synchronized);
949 __ jcc(Assembler::notEqual, slow_path);
951 // We don't generate local frame and don't align stack because
952 // we call stub code and there is no safepoint on this path.
954 // Load parameters
955 const Register crc = rax; // crc
956 const Register buf = rdx; // source java byte array address
957 const Register len = rdi; // length
959 // Arguments are reversed on java expression stack
960 __ movl(len, Address(rsp, wordSize)); // Length
961 // Calculate address of start element
962 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
963 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
964 __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
965 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
966 } else {
967 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
968 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
969 __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
970 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
971 }
973 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
974 // result in rax
976 // _areturn
977 __ pop(rdi); // get return address
978 __ mov(rsp, rsi); // set sp to sender sp
979 __ jmp(rdi);
981 // generate a vanilla native entry as the slow path
982 __ bind(slow_path);
984 (void) generate_native_entry(false);
986 return entry;
987 }
988 return generate_native_entry(false);
989 }
991 //
992 // Interpreter stub for calling a native method. (asm interpreter)
993 // This sets up a somewhat different looking stack for calling the native method
994 // than the typical interpreter frame setup.
995 //
997 address InterpreterGenerator::generate_native_entry(bool synchronized) {
998 // determine code generation flags
999 bool inc_counter = UseCompiler || CountCompiledCalls;
1001 // rbx,: Method*
1002 // rsi: sender sp
1003 // rsi: previous interpreter state (C++ interpreter) must preserve
1004 address entry_point = __ pc();
1006 const Address constMethod (rbx, Method::const_offset());
1007 const Address access_flags (rbx, Method::access_flags_offset());
1008 const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
1010 // get parameter size (always needed)
1011 __ movptr(rcx, constMethod);
1012 __ load_unsigned_short(rcx, size_of_parameters);
1014 // native calls don't need the stack size check since they have no expression stack
1015 // and the arguments are already on the stack and we only add a handful of words
1016 // to the stack
1018 // rbx,: Method*
1019 // rcx: size of parameters
1020 // rsi: sender sp
1022 __ pop(rax); // get return address
1023 // for natives the size of locals is zero
1025 // compute beginning of parameters (rdi)
1026 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1029 // add 2 zero-initialized slots for native calls
1030 // NULL result handler
1031 __ push((int32_t)NULL_WORD);
1032 // NULL oop temp (mirror or jni oop result)
1033 __ push((int32_t)NULL_WORD);
1035 // initialize fixed part of activation frame
1036 generate_fixed_frame(true);
1038 // make sure method is native & not abstract
1039 #ifdef ASSERT
1040 __ movl(rax, access_flags);
1041 {
1042 Label L;
1043 __ testl(rax, JVM_ACC_NATIVE);
1044 __ jcc(Assembler::notZero, L);
1045 __ stop("tried to execute non-native method as native");
1046 __ bind(L);
1047 }
1048 { Label L;
1049 __ testl(rax, JVM_ACC_ABSTRACT);
1050 __ jcc(Assembler::zero, L);
1051 __ stop("tried to execute abstract method in interpreter");
1052 __ bind(L);
1053 }
1054 #endif
1056 // Since at this point in the method invocation the exception handler
1057 // would try to exit the monitor of synchronized methods which hasn't
1058 // been entered yet, we set the thread local variable
1059 // _do_not_unlock_if_synchronized to true. The remove_activation will
1060 // check this flag.
1062 __ get_thread(rax);
1063 const Address do_not_unlock_if_synchronized(rax,
1064 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1065 __ movbool(do_not_unlock_if_synchronized, true);
1067 // increment invocation count & check for overflow
1068 Label invocation_counter_overflow;
1069 if (inc_counter) {
1070 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1071 }
1073 Label continue_after_compile;
1074 __ bind(continue_after_compile);
1076 bang_stack_shadow_pages(true);
1078 // reset the _do_not_unlock_if_synchronized flag
1079 __ get_thread(rax);
1080 __ movbool(do_not_unlock_if_synchronized, false);
1082 // check for synchronized methods
1083 // Must happen AFTER invocation_counter check and stack overflow check,
1084 // so method is not locked if overflows.
1085 //
1086 if (synchronized) {
1087 lock_method();
1088 } else {
1089 // no synchronization necessary
1090 #ifdef ASSERT
1091 { Label L;
1092 __ movl(rax, access_flags);
1093 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1094 __ jcc(Assembler::zero, L);
1095 __ stop("method needs synchronization");
1096 __ bind(L);
1097 }
1098 #endif
1099 }
1101 // start execution
1102 #ifdef ASSERT
1103 { Label L;
1104 const Address monitor_block_top (rbp,
1105 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1106 __ movptr(rax, monitor_block_top);
1107 __ cmpptr(rax, rsp);
1108 __ jcc(Assembler::equal, L);
1109 __ stop("broken stack frame setup in interpreter");
1110 __ bind(L);
1111 }
1112 #endif
1114 // jvmti/dtrace support
1115 __ notify_method_entry();
1117 // work registers
1118 const Register method = rbx;
1119 const Register thread = rdi;
1120 const Register t = rcx;
1122 // allocate space for parameters
1123 __ get_method(method);
1124 __ movptr(t, Address(method, Method::const_offset()));
1125 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
1127 __ shlptr(t, Interpreter::logStackElementSize);
1128 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
1129 __ subptr(rsp, t);
1130 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
1132 // get signature handler
1133 { Label L;
1134 __ movptr(t, Address(method, Method::signature_handler_offset()));
1135 __ testptr(t, t);
1136 __ jcc(Assembler::notZero, L);
1137 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1138 __ get_method(method);
1139 __ movptr(t, Address(method, Method::signature_handler_offset()));
1140 __ bind(L);
1141 }
1143 // call signature handler
1144 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
1145 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
1146 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code");
1147 // The generated handlers do not touch RBX (the method oop).
1148 // However, large signatures cannot be cached and are generated
1149 // each time here. The slow-path generator will blow RBX
1150 // sometime, so we must reload it after the call.
1151 __ call(t);
1152 __ get_method(method); // slow path call blows RBX on DevStudio 5.0
1154 // result handler is in rax,
1155 // set result handler
1156 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
1158 // pass mirror handle if static call
1159 { Label L;
1160 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1161 __ movl(t, Address(method, Method::access_flags_offset()));
1162 __ testl(t, JVM_ACC_STATIC);
1163 __ jcc(Assembler::zero, L);
1164 // get mirror
1165 __ movptr(t, Address(method, Method:: const_offset()));
1166 __ movptr(t, Address(t, ConstMethod::constants_offset()));
1167 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
1168 __ movptr(t, Address(t, mirror_offset));
1169 // copy mirror into activation frame
1170 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
1171 // pass handle to mirror
1172 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
1173 __ movptr(Address(rsp, wordSize), t);
1174 __ bind(L);
1175 }
1177 // get native function entry point
1178 { Label L;
1179 __ movptr(rax, Address(method, Method::native_function_offset()));
1180 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1181 __ cmpptr(rax, unsatisfied.addr());
1182 __ jcc(Assembler::notEqual, L);
1183 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1184 __ get_method(method);
1185 __ movptr(rax, Address(method, Method::native_function_offset()));
1186 __ bind(L);
1187 }
1189 // pass JNIEnv
1190 __ get_thread(thread);
1191 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1192 __ movptr(Address(rsp, 0), t);
1194 // set_last_Java_frame_before_call
1195 // It is enough that the pc()
1196 // points into the right code segment. It does not have to be the correct return pc.
1197 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1199 // change thread state
1200 #ifdef ASSERT
1201 { Label L;
1202 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1203 __ cmpl(t, _thread_in_Java);
1204 __ jcc(Assembler::equal, L);
1205 __ stop("Wrong thread state in native stub");
1206 __ bind(L);
1207 }
1208 #endif
1210 // Change state to native
1211 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1212 __ call(rax);
1214 // result potentially in rdx:rax or ST0
1216 // Verify or restore cpu control state after JNI call
1217 __ restore_cpu_control_state_after_jni();
1219 // save potential result in ST(0) & rdx:rax
1220 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1221 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1222 // It is safe to do this push because state is _thread_in_native and return address will be found
1223 // via _last_native_pc and not via _last_jave_sp
1225 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1226 // If the order changes or anything else is added to the stack the code in
1227 // interpreter_frame_result will have to be changed.
1229 { Label L;
1230 Label push_double;
1231 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1232 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1233 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1234 float_handler.addr());
1235 __ jcc(Assembler::equal, push_double);
1236 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1237 double_handler.addr());
1238 __ jcc(Assembler::notEqual, L);
1239 __ bind(push_double);
1240 __ push(dtos);
1241 __ bind(L);
1242 }
1243 __ push(ltos);
1245 // change thread state
1246 __ get_thread(thread);
1247 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1248 if(os::is_MP()) {
1249 if (UseMembar) {
1250 // Force this write out before the read below
1251 __ membar(Assembler::Membar_mask_bits(
1252 Assembler::LoadLoad | Assembler::LoadStore |
1253 Assembler::StoreLoad | Assembler::StoreStore));
1254 } else {
1255 // Write serialization page so VM thread can do a pseudo remote membar.
1256 // We use the current thread pointer to calculate a thread specific
1257 // offset to write to within the page. This minimizes bus traffic
1258 // due to cache line collision.
1259 __ serialize_memory(thread, rcx);
1260 }
1261 }
1263 if (AlwaysRestoreFPU) {
1264 // Make sure the control word is correct.
1265 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1266 }
1268 // check for safepoint operation in progress and/or pending suspend requests
1269 { Label Continue;
1271 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1272 SafepointSynchronize::_not_synchronized);
1274 Label L;
1275 __ jcc(Assembler::notEqual, L);
1276 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1277 __ jcc(Assembler::equal, Continue);
1278 __ bind(L);
1280 // Don't use call_VM as it will see a possible pending exception and forward it
1281 // and never return here preventing us from clearing _last_native_pc down below.
1282 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1283 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1284 // by hand.
1285 //
1286 __ push(thread);
1287 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1288 JavaThread::check_special_condition_for_native_trans)));
1289 __ increment(rsp, wordSize);
1290 __ get_thread(thread);
1292 __ bind(Continue);
1293 }
1295 // change thread state
1296 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1298 __ reset_last_Java_frame(thread, true, true);
1300 // reset handle block
1301 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1302 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
1304 // If result was an oop then unbox and save it in the frame
1305 { Label L;
1306 Label no_oop, store_result;
1307 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
1308 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
1309 handler.addr());
1310 __ jcc(Assembler::notEqual, no_oop);
1311 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
1312 __ pop(ltos);
1313 __ testptr(rax, rax);
1314 __ jcc(Assembler::zero, store_result);
1315 // unbox
1316 __ movptr(rax, Address(rax, 0));
1317 __ bind(store_result);
1318 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
1319 // keep stack depth as expected by pushing oop which will eventually be discarded
1320 __ push(ltos);
1321 __ bind(no_oop);
1322 }
1324 {
1325 Label no_reguard;
1326 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1327 __ jcc(Assembler::notEqual, no_reguard);
1329 __ pusha();
1330 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1331 __ popa();
1333 __ bind(no_reguard);
1334 }
1336 // restore rsi to have legal interpreter frame,
1337 // i.e., bci == 0 <=> rsi == code_base()
1338 // Can't call_VM until bcp is within reasonable.
1339 __ get_method(method); // method is junk from thread_in_native to now.
1340 __ movptr(rsi, Address(method,Method::const_offset())); // get ConstMethod*
1341 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase
1343 // handle exceptions (exception handling will handle unlocking!)
1344 { Label L;
1345 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1346 __ jcc(Assembler::zero, L);
1347 // Note: At some point we may want to unify this with the code used in call_VM_base();
1348 // i.e., we should use the StubRoutines::forward_exception code. For now this
1349 // doesn't work here because the rsp is not correctly set at this point.
1350 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1351 __ should_not_reach_here();
1352 __ bind(L);
1353 }
1355 // do unlocking if necessary
1356 { Label L;
1357 __ movl(t, Address(method, Method::access_flags_offset()));
1358 __ testl(t, JVM_ACC_SYNCHRONIZED);
1359 __ jcc(Assembler::zero, L);
1360 // the code below should be shared with interpreter macro assembler implementation
1361 { Label unlock;
1362 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1363 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1364 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1366 __ lea(rdx, monitor); // address of first monitor
1368 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
1369 __ testptr(t, t);
1370 __ jcc(Assembler::notZero, unlock);
1372 // Entry already unlocked, need to throw exception
1373 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1374 __ should_not_reach_here();
1376 __ bind(unlock);
1377 __ unlock_object(rdx);
1378 }
1379 __ bind(L);
1380 }
1382 // jvmti/dtrace support
1383 // Note: This must happen _after_ handling/throwing any exceptions since
1384 // the exception handler code notifies the runtime of method exits
1385 // too. If this happens before, method entry/exit notifications are
1386 // not properly paired (was bug - gri 11/22/99).
1387 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1389 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1390 __ pop(ltos);
1391 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1392 __ call(t);
1394 // remove activation
1395 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1396 __ leave(); // remove frame anchor
1397 __ pop(rdi); // get return address
1398 __ mov(rsp, t); // set sp to sender sp
1399 __ jmp(rdi);
1401 if (inc_counter) {
1402 // Handle overflow of counter and compile method
1403 __ bind(invocation_counter_overflow);
1404 generate_counter_overflow(&continue_after_compile);
1405 }
1407 return entry_point;
1408 }
1410 //
1411 // Generic interpreted method entry to (asm) interpreter
1412 //
1413 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1414 // determine code generation flags
1415 bool inc_counter = UseCompiler || CountCompiledCalls;
1417 // rbx,: Method*
1418 // rsi: sender sp
1419 address entry_point = __ pc();
1421 const Address constMethod (rbx, Method::const_offset());
1422 const Address access_flags (rbx, Method::access_flags_offset());
1423 const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
1424 const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
1426 // get parameter size (always needed)
1427 __ movptr(rdx, constMethod);
1428 __ load_unsigned_short(rcx, size_of_parameters);
1430 // rbx,: Method*
1431 // rcx: size of parameters
1433 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
1435 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1436 __ subl(rdx, rcx); // rdx = no. of additional locals
1438 // see if we've got enough room on the stack for locals plus overhead.
1439 generate_stack_overflow_check();
1441 // get return address
1442 __ pop(rax);
1444 // compute beginning of parameters (rdi)
1445 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1447 // rdx - # of additional locals
1448 // allocate space for locals
1449 // explicitly initialize locals
1450 {
1451 Label exit, loop;
1452 __ testl(rdx, rdx);
1453 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1454 __ bind(loop);
1455 __ push((int32_t)NULL_WORD); // initialize local variables
1456 __ decrement(rdx); // until everything initialized
1457 __ jcc(Assembler::greater, loop);
1458 __ bind(exit);
1459 }
1461 // initialize fixed part of activation frame
1462 generate_fixed_frame(false);
1464 // make sure method is not native & not abstract
1465 #ifdef ASSERT
1466 __ movl(rax, access_flags);
1467 {
1468 Label L;
1469 __ testl(rax, JVM_ACC_NATIVE);
1470 __ jcc(Assembler::zero, L);
1471 __ stop("tried to execute native method as non-native");
1472 __ bind(L);
1473 }
1474 { Label L;
1475 __ testl(rax, JVM_ACC_ABSTRACT);
1476 __ jcc(Assembler::zero, L);
1477 __ stop("tried to execute abstract method in interpreter");
1478 __ bind(L);
1479 }
1480 #endif
1482 // Since at this point in the method invocation the exception handler
1483 // would try to exit the monitor of synchronized methods which hasn't
1484 // been entered yet, we set the thread local variable
1485 // _do_not_unlock_if_synchronized to true. The remove_activation will
1486 // check this flag.
1488 __ get_thread(rax);
1489 const Address do_not_unlock_if_synchronized(rax,
1490 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1491 __ movbool(do_not_unlock_if_synchronized, true);
1493 // increment invocation count & check for overflow
1494 Label invocation_counter_overflow;
1495 Label profile_method;
1496 Label profile_method_continue;
1497 if (inc_counter) {
1498 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1499 if (ProfileInterpreter) {
1500 __ bind(profile_method_continue);
1501 }
1502 }
1503 Label continue_after_compile;
1504 __ bind(continue_after_compile);
1506 bang_stack_shadow_pages(false);
1508 // reset the _do_not_unlock_if_synchronized flag
1509 __ get_thread(rax);
1510 __ movbool(do_not_unlock_if_synchronized, false);
1512 // check for synchronized methods
1513 // Must happen AFTER invocation_counter check and stack overflow check,
1514 // so method is not locked if overflows.
1515 //
1516 if (synchronized) {
1517 // Allocate monitor and lock method
1518 lock_method();
1519 } else {
1520 // no synchronization necessary
1521 #ifdef ASSERT
1522 { Label L;
1523 __ movl(rax, access_flags);
1524 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1525 __ jcc(Assembler::zero, L);
1526 __ stop("method needs synchronization");
1527 __ bind(L);
1528 }
1529 #endif
1530 }
1532 // start execution
1533 #ifdef ASSERT
1534 { Label L;
1535 const Address monitor_block_top (rbp,
1536 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1537 __ movptr(rax, monitor_block_top);
1538 __ cmpptr(rax, rsp);
1539 __ jcc(Assembler::equal, L);
1540 __ stop("broken stack frame setup in interpreter");
1541 __ bind(L);
1542 }
1543 #endif
1545 // jvmti support
1546 __ notify_method_entry();
1548 __ dispatch_next(vtos);
1550 // invocation counter overflow
1551 if (inc_counter) {
1552 if (ProfileInterpreter) {
1553 // We have decided to profile this method in the interpreter
1554 __ bind(profile_method);
1555 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1556 __ set_method_data_pointer_for_bcp();
1557 __ get_method(rbx);
1558 __ jmp(profile_method_continue);
1559 }
1560 // Handle overflow of counter and compile method
1561 __ bind(invocation_counter_overflow);
1562 generate_counter_overflow(&continue_after_compile);
1563 }
1565 return entry_point;
1566 }
1568 //------------------------------------------------------------------------------------------------------------------------
1569 // Entry points
1570 //
1571 // Here we generate the various kind of entries into the interpreter.
1572 // The two main entry type are generic bytecode methods and native call method.
1573 // These both come in synchronized and non-synchronized versions but the
1574 // frame layout they create is very similar. The other method entry
1575 // types are really just special purpose entries that are really entry
1576 // and interpretation all in one. These are for trivial methods like
1577 // accessor, empty, or special math methods.
1578 //
1579 // When control flow reaches any of the entry types for the interpreter
1580 // the following holds ->
1581 //
1582 // Arguments:
1583 //
1584 // rbx,: Method*
1585 // rcx: receiver
1586 //
1587 //
1588 // Stack layout immediately at entry
1589 //
1590 // [ return address ] <--- rsp
1591 // [ parameter n ]
1592 // ...
1593 // [ parameter 1 ]
1594 // [ expression stack ] (caller's java expression stack)
1596 // Assuming that we don't go to one of the trivial specialized
1597 // entries the stack will look like below when we are ready to execute
1598 // the first bytecode (or call the native routine). The register usage
1599 // will be as the template based interpreter expects (see interpreter_x86.hpp).
1600 //
1601 // local variables follow incoming parameters immediately; i.e.
1602 // the return address is moved to the end of the locals).
1603 //
1604 // [ monitor entry ] <--- rsp
1605 // ...
1606 // [ monitor entry ]
1607 // [ expr. stack bottom ]
1608 // [ saved rsi ]
1609 // [ current rdi ]
1610 // [ Method* ]
1611 // [ saved rbp, ] <--- rbp,
1612 // [ return address ]
1613 // [ local variable m ]
1614 // ...
1615 // [ local variable 1 ]
1616 // [ parameter n ]
1617 // ...
1618 // [ parameter 1 ] <--- rdi
1620 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
1621 // determine code generation flags
1622 bool synchronized = false;
1623 address entry_point = NULL;
1624 InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
1626 switch (kind) {
1627 case Interpreter::zerolocals : break;
1628 case Interpreter::zerolocals_synchronized: synchronized = true; break;
1629 case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
1630 case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
1631 case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
1632 case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
1633 case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
1635 case Interpreter::java_lang_math_sin : // fall thru
1636 case Interpreter::java_lang_math_cos : // fall thru
1637 case Interpreter::java_lang_math_tan : // fall thru
1638 case Interpreter::java_lang_math_abs : // fall thru
1639 case Interpreter::java_lang_math_log : // fall thru
1640 case Interpreter::java_lang_math_log10 : // fall thru
1641 case Interpreter::java_lang_math_sqrt : // fall thru
1642 case Interpreter::java_lang_math_pow : // fall thru
1643 case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
1644 case Interpreter::java_lang_ref_reference_get
1645 : entry_point = ig_this->generate_Reference_get_entry(); break;
1646 case Interpreter::java_util_zip_CRC32_update
1647 : entry_point = ig_this->generate_CRC32_update_entry(); break;
1648 case Interpreter::java_util_zip_CRC32_updateBytes
1649 : // fall thru
1650 case Interpreter::java_util_zip_CRC32_updateByteBuffer
1651 : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
1652 default:
1653 fatal(err_msg("unexpected method kind: %d", kind));
1654 break;
1655 }
1657 if (entry_point) return entry_point;
1659 return ig_this->generate_normal_entry(synchronized);
1661 }
1663 // These should never be compiled since the interpreter will prefer
1664 // the compiled version to the intrinsic version.
1665 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1666 switch (method_kind(m)) {
1667 case Interpreter::java_lang_math_sin : // fall thru
1668 case Interpreter::java_lang_math_cos : // fall thru
1669 case Interpreter::java_lang_math_tan : // fall thru
1670 case Interpreter::java_lang_math_abs : // fall thru
1671 case Interpreter::java_lang_math_log : // fall thru
1672 case Interpreter::java_lang_math_log10 : // fall thru
1673 case Interpreter::java_lang_math_sqrt : // fall thru
1674 case Interpreter::java_lang_math_pow : // fall thru
1675 case Interpreter::java_lang_math_exp :
1676 return false;
1677 default:
1678 return true;
1679 }
1680 }
1682 // How much stack a method activation needs in words.
1683 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1685 const int stub_code = 4; // see generate_call_stub
1686 // Save space for one monitor to get into the interpreted method in case
1687 // the method is synchronized
1688 int monitor_size = method->is_synchronized() ?
1689 1*frame::interpreter_frame_monitor_size() : 0;
1691 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
1692 // be sure to change this if you add/subtract anything to/from the overhead area
1693 const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
1695 const int method_stack = (method->max_locals() + method->max_stack()) *
1696 Interpreter::stackElementWords;
1697 return overhead_size + method_stack + stub_code;
1698 }
1700 // asm based interpreter deoptimization helpers
1702 int AbstractInterpreter::layout_activation(Method* method,
1703 int tempcount,
1704 int popframe_extra_args,
1705 int moncount,
1706 int caller_actual_parameters,
1707 int callee_param_count,
1708 int callee_locals,
1709 frame* caller,
1710 frame* interpreter_frame,
1711 bool is_top_frame,
1712 bool is_bottom_frame) {
1713 // Note: This calculation must exactly parallel the frame setup
1714 // in AbstractInterpreterGenerator::generate_method_entry.
1715 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
1716 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
1717 // as determined by a previous call to this method.
1718 // It is also guaranteed to be walkable even though it is in a skeletal state
1719 // NOTE: return size is in words not bytes
1721 // fixed size of an interpreter frame:
1722 int max_locals = method->max_locals() * Interpreter::stackElementWords;
1723 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1724 Interpreter::stackElementWords;
1726 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
1728 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
1729 // Since the callee parameters already account for the callee's params we only need to account for
1730 // the extra locals.
1733 int size = overhead +
1734 ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
1735 (moncount*frame::interpreter_frame_monitor_size()) +
1736 tempcount*Interpreter::stackElementWords + popframe_extra_args;
1738 if (interpreter_frame != NULL) {
1739 #ifdef ASSERT
1740 if (!EnableInvokeDynamic)
1741 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
1742 // Probably, since deoptimization doesn't work yet.
1743 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
1744 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
1745 #endif
1747 interpreter_frame->interpreter_frame_set_method(method);
1748 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
1749 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
1750 // and sender_sp is fp+8
1751 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1753 #ifdef ASSERT
1754 if (caller->is_interpreted_frame()) {
1755 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
1756 }
1757 #endif
1759 interpreter_frame->interpreter_frame_set_locals(locals);
1760 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1761 BasicObjectLock* monbot = montop - moncount;
1762 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1764 // Set last_sp
1765 intptr_t* rsp = (intptr_t*) monbot -
1766 tempcount*Interpreter::stackElementWords -
1767 popframe_extra_args;
1768 interpreter_frame->interpreter_frame_set_last_sp(rsp);
1770 // All frames but the initial (oldest) interpreter frame we fill in have a
1771 // value for sender_sp that allows walking the stack but isn't
1772 // truly correct. Correct the value here.
1774 if (extra_locals != 0 &&
1775 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
1776 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
1777 }
1778 *interpreter_frame->interpreter_frame_cache_addr() =
1779 method->constants()->cache();
1780 }
1781 return size;
1782 }
1785 //------------------------------------------------------------------------------------------------------------------------
1786 // Exceptions
1788 void TemplateInterpreterGenerator::generate_throw_exception() {
1789 // Entry point in previous activation (i.e., if the caller was interpreted)
1790 Interpreter::_rethrow_exception_entry = __ pc();
1791 const Register thread = rcx;
1793 // Restore sp to interpreter_frame_last_sp even though we are going
1794 // to empty the expression stack for the exception processing.
1795 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1796 // rax,: exception
1797 // rdx: return address/pc that threw exception
1798 __ restore_bcp(); // rsi points to call/send
1799 __ restore_locals();
1801 // Entry point for exceptions thrown within interpreter code
1802 Interpreter::_throw_exception_entry = __ pc();
1803 // expression stack is undefined here
1804 // rax,: exception
1805 // rsi: exception bcp
1806 __ verify_oop(rax);
1808 // expression stack must be empty before entering the VM in case of an exception
1809 __ empty_expression_stack();
1810 __ empty_FPU_stack();
1811 // find exception handler address and preserve exception oop
1812 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax);
1813 // rax,: exception handler entry point
1814 // rdx: preserved exception oop
1815 // rsi: bcp for exception handler
1816 __ push_ptr(rdx); // push exception which is now the only value on the stack
1817 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1819 // If the exception is not handled in the current frame the frame is removed and
1820 // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1821 //
1822 // Note: At this point the bci is still the bxi for the instruction which caused
1823 // the exception and the expression stack is empty. Thus, for any VM calls
1824 // at this point, GC will find a legal oop map (with empty expression stack).
1826 // In current activation
1827 // tos: exception
1828 // rsi: exception bcp
1830 //
1831 // JVMTI PopFrame support
1832 //
1834 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1835 __ empty_expression_stack();
1836 __ empty_FPU_stack();
1837 // Set the popframe_processing bit in pending_popframe_condition indicating that we are
1838 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1839 // popframe handling cycles.
1840 __ get_thread(thread);
1841 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1842 __ orl(rdx, JavaThread::popframe_processing_bit);
1843 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1845 {
1846 // Check to see whether we are returning to a deoptimized frame.
1847 // (The PopFrame call ensures that the caller of the popped frame is
1848 // either interpreted or compiled and deoptimizes it if compiled.)
1849 // In this case, we can't call dispatch_next() after the frame is
1850 // popped, but instead must save the incoming arguments and restore
1851 // them after deoptimization has occurred.
1852 //
1853 // Note that we don't compare the return PC against the
1854 // deoptimization blob's unpack entry because of the presence of
1855 // adapter frames in C2.
1856 Label caller_not_deoptimized;
1857 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
1858 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
1859 __ testl(rax, rax);
1860 __ jcc(Assembler::notZero, caller_not_deoptimized);
1862 // Compute size of arguments for saving when returning to deoptimized caller
1863 __ get_method(rax);
1864 __ movptr(rax, Address(rax, Method::const_offset()));
1865 __ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset()));
1866 __ shlptr(rax, Interpreter::logStackElementSize);
1867 __ restore_locals();
1868 __ subptr(rdi, rax);
1869 __ addptr(rdi, wordSize);
1870 // Save these arguments
1871 __ get_thread(thread);
1872 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
1874 __ remove_activation(vtos, rdx,
1875 /* throw_monitor_exception */ false,
1876 /* install_monitor_exception */ false,
1877 /* notify_jvmdi */ false);
1879 // Inform deoptimization that it is responsible for restoring these arguments
1880 __ get_thread(thread);
1881 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
1883 // Continue in deoptimization handler
1884 __ jmp(rdx);
1886 __ bind(caller_not_deoptimized);
1887 }
1889 __ remove_activation(vtos, rdx,
1890 /* throw_monitor_exception */ false,
1891 /* install_monitor_exception */ false,
1892 /* notify_jvmdi */ false);
1894 // Finish with popframe handling
1895 // A previous I2C followed by a deoptimization might have moved the
1896 // outgoing arguments further up the stack. PopFrame expects the
1897 // mutations to those outgoing arguments to be preserved and other
1898 // constraints basically require this frame to look exactly as
1899 // though it had previously invoked an interpreted activation with
1900 // no space between the top of the expression stack (current
1901 // last_sp) and the top of stack. Rather than force deopt to
1902 // maintain this kind of invariant all the time we call a small
1903 // fixup routine to move the mutated arguments onto the top of our
1904 // expression stack if necessary.
1905 __ mov(rax, rsp);
1906 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1907 __ get_thread(thread);
1908 // PC must point into interpreter here
1909 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1910 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1911 __ get_thread(thread);
1912 __ reset_last_Java_frame(thread, true, true);
1913 // Restore the last_sp and null it out
1914 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1915 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1917 __ restore_bcp();
1918 __ restore_locals();
1919 // The method data pointer was incremented already during
1920 // call profiling. We have to restore the mdp for the current bcp.
1921 if (ProfileInterpreter) {
1922 __ set_method_data_pointer_for_bcp();
1923 }
1925 // Clear the popframe condition flag
1926 __ get_thread(thread);
1927 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
1929 #if INCLUDE_JVMTI
1930 if (EnableInvokeDynamic) {
1931 Label L_done;
1932 const Register local0 = rdi;
1934 __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic);
1935 __ jcc(Assembler::notEqual, L_done);
1937 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1938 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1940 __ get_method(rdx);
1941 __ movptr(rax, Address(local0, 0));
1942 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi);
1944 __ testptr(rax, rax);
1945 __ jcc(Assembler::zero, L_done);
1947 __ movptr(Address(rbx, 0), rax);
1948 __ bind(L_done);
1949 }
1950 #endif // INCLUDE_JVMTI
1952 __ dispatch_next(vtos);
1953 // end of PopFrame support
1955 Interpreter::_remove_activation_entry = __ pc();
1957 // preserve exception over this code sequence
1958 __ pop_ptr(rax);
1959 __ get_thread(thread);
1960 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1961 // remove the activation (without doing throws on illegalMonitorExceptions)
1962 __ remove_activation(vtos, rdx, false, true, false);
1963 // restore exception
1964 __ get_thread(thread);
1965 __ get_vm_result(rax, thread);
1967 // Inbetween activations - previous activation type unknown yet
1968 // compute continuation point - the continuation point expects
1969 // the following registers set up:
1970 //
1971 // rax: exception
1972 // rdx: return address/pc that threw exception
1973 // rsp: expression stack of caller
1974 // rbp: rbp, of caller
1975 __ push(rax); // save exception
1976 __ push(rdx); // save return address
1977 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
1978 __ mov(rbx, rax); // save exception handler
1979 __ pop(rdx); // restore return address
1980 __ pop(rax); // restore exception
1981 // Note that an "issuing PC" is actually the next PC after the call
1982 __ jmp(rbx); // jump to exception handler of caller
1983 }
1986 //
1987 // JVMTI ForceEarlyReturn support
1988 //
1989 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1990 address entry = __ pc();
1991 const Register thread = rcx;
1993 __ restore_bcp();
1994 __ restore_locals();
1995 __ empty_expression_stack();
1996 __ empty_FPU_stack();
1997 __ load_earlyret_value(state);
1999 __ get_thread(thread);
2000 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
2001 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
2003 // Clear the earlyret state
2004 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
2006 __ remove_activation(state, rsi,
2007 false, /* throw_monitor_exception */
2008 false, /* install_monitor_exception */
2009 true); /* notify_jvmdi */
2010 __ jmp(rsi);
2011 return entry;
2012 } // end of ForceEarlyReturn support
2015 //------------------------------------------------------------------------------------------------------------------------
2016 // Helper for vtos entry point generation
2018 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
2019 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
2020 Label L;
2021 fep = __ pc(); __ push(ftos); __ jmp(L);
2022 dep = __ pc(); __ push(dtos); __ jmp(L);
2023 lep = __ pc(); __ push(ltos); __ jmp(L);
2024 aep = __ pc(); __ push(atos); __ jmp(L);
2025 bep = cep = sep = // fall through
2026 iep = __ pc(); __ push(itos); // fall through
2027 vep = __ pc(); __ bind(L); // fall through
2028 generate_and_dispatch(t);
2029 }
2031 //------------------------------------------------------------------------------------------------------------------------
2032 // Generation of individual instructions
2034 // helpers for generate_and_dispatch
2038 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
2039 : TemplateInterpreterGenerator(code) {
2040 generate_all(); // down here so it can be "virtual"
2041 }
2043 //------------------------------------------------------------------------------------------------------------------------
2045 // Non-product code
2046 #ifndef PRODUCT
2047 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2048 address entry = __ pc();
2050 // prepare expression stack
2051 __ pop(rcx); // pop return address so expression stack is 'pure'
2052 __ push(state); // save tosca
2054 // pass tosca registers as arguments & call tracer
2055 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
2056 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
2057 __ pop(state); // restore tosca
2059 // return
2060 __ jmp(rcx);
2062 return entry;
2063 }
2066 void TemplateInterpreterGenerator::count_bytecode() {
2067 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
2068 }
2071 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2072 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
2073 }
2076 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2077 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
2078 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
2079 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2080 ExternalAddress table((address) BytecodePairHistogram::_counters);
2081 Address index(noreg, rbx, Address::times_4);
2082 __ incrementl(ArrayAddress(table, index));
2083 }
2086 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2087 // Call a little run-time stub to avoid blow-up for each bytecode.
2088 // The run-time runtime saves the right registers, depending on
2089 // the tosca in-state for the given template.
2090 assert(Interpreter::trace_code(t->tos_in()) != NULL,
2091 "entry must have been generated");
2092 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
2093 }
2096 void TemplateInterpreterGenerator::stop_interpreter_at() {
2097 Label L;
2098 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
2099 StopInterpreterAt);
2100 __ jcc(Assembler::notEqual, L);
2101 __ int3();
2102 __ bind(L);
2103 }
2104 #endif // !PRODUCT
2105 #endif // CC_INTERP