Wed, 03 Apr 2013 11:12:57 -0700
8011102: Clear AVX registers after return from JNI call
Summary: Execute vzeroupper instruction after JNI call and on exits in jit compiled code which use 256bit vectors.
Reviewed-by: roland
1 /*
2 * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/cppInterpreter.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterGenerator.hpp"
31 #include "interpreter/interpreterRuntime.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "prims/jvmtiThreadState.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/interfaceSupport.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/timer.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/macros.hpp"
49 #ifdef SHARK
50 #include "shark/shark_globals.hpp"
51 #endif
53 #ifdef CC_INTERP
55 // Routine exists to make tracebacks look decent in debugger
56 // while we are recursed in the frame manager/c++ interpreter.
57 // We could use an address in the frame manager but having
58 // frames look natural in the debugger is a plus.
59 extern "C" void RecursiveInterpreterActivation(interpreterState istate )
60 {
61 //
62 ShouldNotReachHere();
63 }
66 #define __ _masm->
67 #define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
69 Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
70 // c++ interpreter entry point this holds that entry point label.
72 // default registers for state and sender_sp
73 // state and sender_sp are the same on 32bit because we have no choice.
74 // state could be rsi on 64bit but it is an arg reg and not callee save
75 // so r13 is better choice.
77 const Register state = NOT_LP64(rsi) LP64_ONLY(r13);
78 const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13);
80 // NEEDED for JVMTI?
81 // address AbstractInterpreter::_remove_activation_preserving_args_entry;
83 static address unctrap_frame_manager_entry = NULL;
85 static address deopt_frame_manager_return_atos = NULL;
86 static address deopt_frame_manager_return_btos = NULL;
87 static address deopt_frame_manager_return_itos = NULL;
88 static address deopt_frame_manager_return_ltos = NULL;
89 static address deopt_frame_manager_return_ftos = NULL;
90 static address deopt_frame_manager_return_dtos = NULL;
91 static address deopt_frame_manager_return_vtos = NULL;
93 int AbstractInterpreter::BasicType_as_index(BasicType type) {
94 int i = 0;
95 switch (type) {
96 case T_BOOLEAN: i = 0; break;
97 case T_CHAR : i = 1; break;
98 case T_BYTE : i = 2; break;
99 case T_SHORT : i = 3; break;
100 case T_INT : i = 4; break;
101 case T_VOID : i = 5; break;
102 case T_FLOAT : i = 8; break;
103 case T_LONG : i = 9; break;
104 case T_DOUBLE : i = 6; break;
105 case T_OBJECT : // fall through
106 case T_ARRAY : i = 7; break;
107 default : ShouldNotReachHere();
108 }
109 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
110 return i;
111 }
113 // Is this pc anywhere within code owned by the interpreter?
114 // This only works for pc that might possibly be exposed to frame
115 // walkers. It clearly misses all of the actual c++ interpreter
116 // implementation
117 bool CppInterpreter::contains(address pc) {
118 return (_code->contains(pc) ||
119 pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
120 }
123 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
124 address entry = __ pc();
125 switch (type) {
126 case T_BOOLEAN: __ c2bool(rax); break;
127 case T_CHAR : __ andl(rax, 0xFFFF); break;
128 case T_BYTE : __ sign_extend_byte (rax); break;
129 case T_SHORT : __ sign_extend_short(rax); break;
130 case T_VOID : // fall thru
131 case T_LONG : // fall thru
132 case T_INT : /* nothing to do */ break;
134 case T_DOUBLE :
135 case T_FLOAT :
136 {
137 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
138 __ pop(t); // remove return address first
139 // Must return a result for interpreter or compiler. In SSE
140 // mode, results are returned in xmm0 and the FPU stack must
141 // be empty.
142 if (type == T_FLOAT && UseSSE >= 1) {
143 #ifndef _LP64
144 // Load ST0
145 __ fld_d(Address(rsp, 0));
146 // Store as float and empty fpu stack
147 __ fstp_s(Address(rsp, 0));
148 #endif // !_LP64
149 // and reload
150 __ movflt(xmm0, Address(rsp, 0));
151 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
152 __ movdbl(xmm0, Address(rsp, 0));
153 } else {
154 // restore ST0
155 __ fld_d(Address(rsp, 0));
156 }
157 // and pop the temp
158 __ addptr(rsp, 2 * wordSize);
159 __ push(t); // restore return address
160 }
161 break;
162 case T_OBJECT :
163 // retrieve result from frame
164 __ movptr(rax, STATE(_oop_temp));
165 // and verify it
166 __ verify_oop(rax);
167 break;
168 default : ShouldNotReachHere();
169 }
170 __ ret(0); // return from result handler
171 return entry;
172 }
174 // tosca based result to c++ interpreter stack based result.
175 // Result goes to top of native stack.
177 #undef EXTEND // SHOULD NOT BE NEEDED
178 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
179 // A result is in the tosca (abi result) from either a native method call or compiled
180 // code. Place this result on the java expression stack so C++ interpreter can use it.
181 address entry = __ pc();
183 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
184 __ pop(t); // remove return address first
185 switch (type) {
186 case T_VOID:
187 break;
188 case T_BOOLEAN:
189 #ifdef EXTEND
190 __ c2bool(rax);
191 #endif
192 __ push(rax);
193 break;
194 case T_CHAR :
195 #ifdef EXTEND
196 __ andl(rax, 0xFFFF);
197 #endif
198 __ push(rax);
199 break;
200 case T_BYTE :
201 #ifdef EXTEND
202 __ sign_extend_byte (rax);
203 #endif
204 __ push(rax);
205 break;
206 case T_SHORT :
207 #ifdef EXTEND
208 __ sign_extend_short(rax);
209 #endif
210 __ push(rax);
211 break;
212 case T_LONG :
213 __ push(rdx); // pushes useless junk on 64bit
214 __ push(rax);
215 break;
216 case T_INT :
217 __ push(rax);
218 break;
219 case T_FLOAT :
220 // Result is in ST(0)/xmm0
221 __ subptr(rsp, wordSize);
222 if ( UseSSE < 1) {
223 __ fstp_s(Address(rsp, 0));
224 } else {
225 __ movflt(Address(rsp, 0), xmm0);
226 }
227 break;
228 case T_DOUBLE :
229 __ subptr(rsp, 2*wordSize);
230 if ( UseSSE < 2 ) {
231 __ fstp_d(Address(rsp, 0));
232 } else {
233 __ movdbl(Address(rsp, 0), xmm0);
234 }
235 break;
236 case T_OBJECT :
237 __ verify_oop(rax); // verify it
238 __ push(rax);
239 break;
240 default : ShouldNotReachHere();
241 }
242 __ jmp(t); // return from result handler
243 return entry;
244 }
246 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
247 // A result is in the java expression stack of the interpreted method that has just
248 // returned. Place this result on the java expression stack of the caller.
249 //
250 // The current interpreter activation in rsi/r13 is for the method just returning its
251 // result. So we know that the result of this method is on the top of the current
252 // execution stack (which is pre-pushed) and will be return to the top of the caller
253 // stack. The top of the callers stack is the bottom of the locals of the current
254 // activation.
255 // Because of the way activation are managed by the frame manager the value of rsp is
256 // below both the stack top of the current activation and naturally the stack top
257 // of the calling activation. This enable this routine to leave the return address
258 // to the frame manager on the stack and do a vanilla return.
259 //
260 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
261 // On Return: rsi/r13 - unchanged
262 // rax - new stack top for caller activation (i.e. activation in _prev_link)
263 //
264 // Can destroy rdx, rcx.
265 //
267 address entry = __ pc();
268 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
269 switch (type) {
270 case T_VOID:
271 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
272 __ addptr(rax, wordSize); // account for prepush before we return
273 break;
274 case T_FLOAT :
275 case T_BOOLEAN:
276 case T_CHAR :
277 case T_BYTE :
278 case T_SHORT :
279 case T_INT :
280 // 1 word result
281 __ movptr(rdx, STATE(_stack));
282 __ movptr(rax, STATE(_locals)); // address for result
283 __ movl(rdx, Address(rdx, wordSize)); // get result
284 __ movptr(Address(rax, 0), rdx); // and store it
285 break;
286 case T_LONG :
287 case T_DOUBLE :
288 // return top two words on current expression stack to caller's expression stack
289 // The caller's expression stack is adjacent to the current frame manager's intepretState
290 // except we allocated one extra word for this intepretState so we won't overwrite it
291 // when we return a two word result.
293 __ movptr(rax, STATE(_locals)); // address for result
294 __ movptr(rcx, STATE(_stack));
295 __ subptr(rax, wordSize); // need addition word besides locals[0]
296 __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit)
297 __ movptr(Address(rax, wordSize), rdx); // and store it
298 __ movptr(rdx, Address(rcx, wordSize)); // get result word
299 __ movptr(Address(rax, 0), rdx); // and store it
300 break;
301 case T_OBJECT :
302 __ movptr(rdx, STATE(_stack));
303 __ movptr(rax, STATE(_locals)); // address for result
304 __ movptr(rdx, Address(rdx, wordSize)); // get result
305 __ verify_oop(rdx); // verify it
306 __ movptr(Address(rax, 0), rdx); // and store it
307 break;
308 default : ShouldNotReachHere();
309 }
310 __ ret(0);
311 return entry;
312 }
314 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
315 // A result is in the java expression stack of the interpreted method that has just
316 // returned. Place this result in the native abi that the caller expects.
317 //
318 // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
319 // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
320 // and so rather than return result onto caller's java expression stack we return the
321 // result in the expected location based on the native abi.
322 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
323 // On Return: rsi/r13 - unchanged
324 // Other registers changed [rax/rdx/ST(0) as needed for the result returned]
326 address entry = __ pc();
327 switch (type) {
328 case T_VOID:
329 break;
330 case T_BOOLEAN:
331 case T_CHAR :
332 case T_BYTE :
333 case T_SHORT :
334 case T_INT :
335 __ movptr(rdx, STATE(_stack)); // get top of stack
336 __ movl(rax, Address(rdx, wordSize)); // get result word 1
337 break;
338 case T_LONG :
339 __ movptr(rdx, STATE(_stack)); // get top of stack
340 __ movptr(rax, Address(rdx, wordSize)); // get result low word
341 NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word
342 break;
343 case T_FLOAT :
344 __ movptr(rdx, STATE(_stack)); // get top of stack
345 if ( UseSSE >= 1) {
346 __ movflt(xmm0, Address(rdx, wordSize));
347 } else {
348 __ fld_s(Address(rdx, wordSize)); // pushd float result
349 }
350 break;
351 case T_DOUBLE :
352 __ movptr(rdx, STATE(_stack)); // get top of stack
353 if ( UseSSE > 1) {
354 __ movdbl(xmm0, Address(rdx, wordSize));
355 } else {
356 __ fld_d(Address(rdx, wordSize)); // push double result
357 }
358 break;
359 case T_OBJECT :
360 __ movptr(rdx, STATE(_stack)); // get top of stack
361 __ movptr(rax, Address(rdx, wordSize)); // get result word 1
362 __ verify_oop(rax); // verify it
363 break;
364 default : ShouldNotReachHere();
365 }
366 __ ret(0);
367 return entry;
368 }
370 address CppInterpreter::return_entry(TosState state, int length) {
371 // make it look good in the debugger
372 return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
373 }
375 address CppInterpreter::deopt_entry(TosState state, int length) {
376 address ret = NULL;
377 if (length != 0) {
378 switch (state) {
379 case atos: ret = deopt_frame_manager_return_atos; break;
380 case btos: ret = deopt_frame_manager_return_btos; break;
381 case ctos:
382 case stos:
383 case itos: ret = deopt_frame_manager_return_itos; break;
384 case ltos: ret = deopt_frame_manager_return_ltos; break;
385 case ftos: ret = deopt_frame_manager_return_ftos; break;
386 case dtos: ret = deopt_frame_manager_return_dtos; break;
387 case vtos: ret = deopt_frame_manager_return_vtos; break;
388 }
389 } else {
390 ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap)
391 }
392 assert(ret != NULL, "Not initialized");
393 return ret;
394 }
396 // C++ Interpreter
397 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
398 const Register locals,
399 const Register sender_sp,
400 bool native) {
402 // On entry the "locals" argument points to locals[0] (or where it would be in case no locals in
403 // a static method). "state" contains any previous frame manager state which we must save a link
404 // to in the newly generated state object. On return "state" is a pointer to the newly allocated
405 // state object. We must allocate and initialize a new interpretState object and the method
406 // expression stack. Because the returned result (if any) of the method will be placed on the caller's
407 // expression stack and this will overlap with locals[0] (and locals[1] if double/long) we must
408 // be sure to leave space on the caller's stack so that this result will not overwrite values when
409 // locals[0] and locals[1] do not exist (and in fact are return address and saved rbp). So when
410 // we are non-native we in essence ensure that locals[0-1] exist. We play an extra trick in
411 // non-product builds and initialize this last local with the previous interpreterState as
412 // this makes things look real nice in the debugger.
414 // State on entry
415 // Assumes locals == &locals[0]
416 // Assumes state == any previous frame manager state (assuming call path from c++ interpreter)
417 // Assumes rax = return address
418 // rcx == senders_sp
419 // rbx == method
420 // Modifies rcx, rdx, rax
421 // Returns:
422 // state == address of new interpreterState
423 // rsp == bottom of method's expression stack.
425 const Address const_offset (rbx, Method::const_offset());
428 // On entry sp is the sender's sp. This includes the space for the arguments
429 // that the sender pushed. If the sender pushed no args (a static) and the
430 // caller returns a long then we need two words on the sender's stack which
431 // are not present (although when we return a restore full size stack the
432 // space will be present). If we didn't allocate two words here then when
433 // we "push" the result of the caller's stack we would overwrite the return
434 // address and the saved rbp. Not good. So simply allocate 2 words now
435 // just to be safe. This is the "static long no_params() method" issue.
436 // See Lo.java for a testcase.
437 // We don't need this for native calls because they return result in
438 // register and the stack is expanded in the caller before we store
439 // the results on the stack.
441 if (!native) {
442 #ifdef PRODUCT
443 __ subptr(rsp, 2*wordSize);
444 #else /* PRODUCT */
445 __ push((int32_t)NULL_WORD);
446 __ push(state); // make it look like a real argument
447 #endif /* PRODUCT */
448 }
450 // Now that we are assure of space for stack result, setup typical linkage
452 __ push(rax);
453 __ enter();
455 __ mov(rax, state); // save current state
457 __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
458 __ mov(state, rsp);
460 // rsi/r13 == state/locals rax == prevstate
462 // initialize the "shadow" frame so that use since C++ interpreter not directly
463 // recursive. Simpler to recurse but we can't trim expression stack as we call
464 // new methods.
465 __ movptr(STATE(_locals), locals); // state->_locals = locals()
466 __ movptr(STATE(_self_link), state); // point to self
467 __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
468 __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
469 #ifdef _LP64
470 __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes()
471 #else
472 __ get_thread(rax); // get vm's javathread*
473 __ movptr(STATE(_thread), rax); // state->_bcp = codes()
474 #endif // _LP64
475 __ movptr(rdx, Address(rbx, Method::const_offset())); // get constantMethodOop
476 __ lea(rdx, Address(rdx, ConstMethod::codes_offset())); // get code base
477 if (native) {
478 __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
479 } else {
480 __ movptr(STATE(_bcp), rdx); // state->_bcp = codes()
481 }
482 __ xorptr(rdx, rdx);
483 __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
484 __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
485 __ movptr(rdx, Address(rbx, Method::const_offset()));
486 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
487 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
488 __ movptr(STATE(_constants), rdx); // state->_constants = constants()
490 __ movptr(STATE(_method), rbx); // state->_method = method()
491 __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
492 __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
495 __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
496 // entries run from -1..x where &monitor[x] ==
498 {
499 // Must not attempt to lock method until we enter interpreter as gc won't be able to find the
500 // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack
501 // immediately.
503 // synchronize method
504 const Address access_flags (rbx, Method::access_flags_offset());
505 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
506 Label not_synced;
508 __ movl(rax, access_flags);
509 __ testl(rax, JVM_ACC_SYNCHRONIZED);
510 __ jcc(Assembler::zero, not_synced);
512 // Allocate initial monitor and pre initialize it
513 // get synchronization object
515 Label done;
516 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
517 __ movl(rax, access_flags);
518 __ testl(rax, JVM_ACC_STATIC);
519 __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
520 __ jcc(Assembler::zero, done);
521 __ movptr(rax, Address(rbx, Method::const_offset()));
522 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
523 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
524 __ movptr(rax, Address(rax, mirror_offset));
525 __ bind(done);
526 // add space for monitor & lock
527 __ subptr(rsp, entry_size); // add space for a monitor entry
528 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
529 __ bind(not_synced);
530 }
532 __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
533 if (native) {
534 __ movptr(STATE(_stack), rsp); // set current expression stack tos
535 __ movptr(STATE(_stack_limit), rsp);
536 } else {
537 __ subptr(rsp, wordSize); // pre-push stack
538 __ movptr(STATE(_stack), rsp); // set current expression stack tos
540 // compute full expression stack limit
542 const int extra_stack = 0; //6815692//Method::extra_stack_words();
543 __ movptr(rdx, Address(rbx, Method::const_offset()));
544 __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
545 __ negptr(rdx); // so we can subtract in next step
546 // Allocate expression stack
547 __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack));
548 __ movptr(STATE(_stack_limit), rsp);
549 }
551 #ifdef _LP64
552 // Make sure stack is properly aligned and sized for the abi
553 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
554 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
555 #endif // _LP64
559 }
561 // Helpers for commoning out cases in the various type of method entries.
562 //
564 // increment invocation count & check for overflow
565 //
566 // Note: checking for negative value instead of overflow
567 // so we have a 'sticky' overflow test
568 //
569 // rbx,: method
570 // rcx: invocation counter
571 //
572 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
574 const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
575 const Address backedge_counter (rbx, Method::backedge_counter_offset() + InvocationCounter::counter_offset());
577 if (ProfileInterpreter) { // %%% Merge this into MethodData*
578 __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset()));
579 }
580 // Update standard invocation counters
581 __ movl(rax, backedge_counter); // load backedge counter
583 __ increment(rcx, InvocationCounter::count_increment);
584 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
586 __ movl(invocation_counter, rcx); // save invocation count
587 __ addl(rcx, rax); // add both counters
589 // profile_method is non-null only for interpreted method so
590 // profile_method != NULL == !native_call
591 // BytecodeInterpreter only calls for native so code is elided.
593 __ cmp32(rcx,
594 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
595 __ jcc(Assembler::aboveEqual, *overflow);
597 }
599 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
601 // C++ interpreter on entry
602 // rsi/r13 - new interpreter state pointer
603 // rbp - interpreter frame pointer
604 // rbx - method
606 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
607 // rbx, - method
608 // rcx - rcvr (assuming there is one)
609 // top of stack return address of interpreter caller
610 // rsp - sender_sp
612 // C++ interpreter only
613 // rsi/r13 - previous interpreter state pointer
615 // InterpreterRuntime::frequency_counter_overflow takes one argument
616 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
617 // The call returns the address of the verified entry point for the method or NULL
618 // if the compilation did not complete (either went background or bailed out).
619 __ movptr(rax, (int32_t)false);
620 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
622 // for c++ interpreter can rsi really be munged?
623 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // restore state
624 __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method
625 __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer
627 __ jmp(*do_continue, relocInfo::none);
629 }
631 void InterpreterGenerator::generate_stack_overflow_check(void) {
632 // see if we've got enough room on the stack for locals plus overhead.
633 // the expression stack grows down incrementally, so the normal guard
634 // page mechanism will work for that.
635 //
636 // Registers live on entry:
637 //
638 // Asm interpreter
639 // rdx: number of additional locals this frame needs (what we must check)
640 // rbx,: Method*
642 // C++ Interpreter
643 // rsi/r13: previous interpreter frame state object
644 // rdi: &locals[0]
645 // rcx: # of locals
646 // rdx: number of additional locals this frame needs (what we must check)
647 // rbx: Method*
649 // destroyed on exit
650 // rax,
652 // NOTE: since the additional locals are also always pushed (wasn't obvious in
653 // generate_method_entry) so the guard should work for them too.
654 //
656 // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
657 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
659 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
660 // be sure to change this if you add/subtract anything to/from the overhead area
661 const int overhead_size = (int)sizeof(BytecodeInterpreter);
663 const int page_size = os::vm_page_size();
665 Label after_frame_check;
667 // compute rsp as if this were going to be the last frame on
668 // the stack before the red zone
670 Label after_frame_check_pop;
672 // save rsi == caller's bytecode ptr (c++ previous interp. state)
673 // QQQ problem here?? rsi overload????
674 __ push(state);
676 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi);
678 NOT_LP64(__ get_thread(thread));
680 const Address stack_base(thread, Thread::stack_base_offset());
681 const Address stack_size(thread, Thread::stack_size_offset());
683 // locals + overhead, in bytes
684 // Always give one monitor to allow us to start interp if sync method.
685 // Any additional monitors need a check when moving the expression stack
686 const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
687 const int extra_stack = 0; //6815692//Method::extra_stack_entries();
688 __ movptr(rax, Address(rbx, Method::const_offset()));
689 __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
690 __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor));
691 __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
693 #ifdef ASSERT
694 Label stack_base_okay, stack_size_okay;
695 // verify that thread stack base is non-zero
696 __ cmpptr(stack_base, (int32_t)0);
697 __ jcc(Assembler::notEqual, stack_base_okay);
698 __ stop("stack base is zero");
699 __ bind(stack_base_okay);
700 // verify that thread stack size is non-zero
701 __ cmpptr(stack_size, (int32_t)0);
702 __ jcc(Assembler::notEqual, stack_size_okay);
703 __ stop("stack size is zero");
704 __ bind(stack_size_okay);
705 #endif
707 // Add stack base to locals and subtract stack size
708 __ addptr(rax, stack_base);
709 __ subptr(rax, stack_size);
711 // We should have a magic number here for the size of the c++ interpreter frame.
712 // We can't actually tell this ahead of time. The debug version size is around 3k
713 // product is 1k and fastdebug is 4k
714 const int slop = 6 * K;
716 // Use the maximum number of pages we might bang.
717 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
718 (StackRedPages+StackYellowPages);
719 // Only need this if we are stack banging which is temporary while
720 // we're debugging.
721 __ addptr(rax, slop + 2*max_pages * page_size);
723 // check against the current stack bottom
724 __ cmpptr(rsp, rax);
725 __ jcc(Assembler::above, after_frame_check_pop);
727 __ pop(state); // get c++ prev state.
729 // throw exception return address becomes throwing pc
730 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
732 // all done with frame size check
733 __ bind(after_frame_check_pop);
734 __ pop(state);
736 __ bind(after_frame_check);
737 }
739 // Find preallocated monitor and lock method (C++ interpreter)
740 // rbx - Method*
741 //
742 void InterpreterGenerator::lock_method(void) {
743 // assumes state == rsi/r13 == pointer to current interpreterState
744 // minimally destroys rax, rdx|c_rarg1, rdi
745 //
746 // synchronize method
747 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
748 const Address access_flags (rbx, Method::access_flags_offset());
750 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
752 // find initial monitor i.e. monitors[-1]
753 __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit
754 __ subptr(monitor, entry_size); // point to initial monitor
756 #ifdef ASSERT
757 { Label L;
758 __ movl(rax, access_flags);
759 __ testl(rax, JVM_ACC_SYNCHRONIZED);
760 __ jcc(Assembler::notZero, L);
761 __ stop("method doesn't need synchronization");
762 __ bind(L);
763 }
764 #endif // ASSERT
765 // get synchronization object
766 { Label done;
767 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
768 __ movl(rax, access_flags);
769 __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
770 __ testl(rax, JVM_ACC_STATIC);
771 __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
772 __ jcc(Assembler::zero, done);
773 __ movptr(rax, Address(rbx, Method::const_offset()));
774 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
775 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
776 __ movptr(rax, Address(rax, mirror_offset));
777 __ bind(done);
778 }
779 #ifdef ASSERT
780 { Label L;
781 __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object?
782 __ jcc(Assembler::equal, L);
783 __ stop("wrong synchronization lobject");
784 __ bind(L);
785 }
786 #endif // ASSERT
787 // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
788 __ lock_object(monitor);
789 }
791 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
793 address InterpreterGenerator::generate_accessor_entry(void) {
795 // rbx: Method*
797 // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
799 Label xreturn_path;
801 // do fastpath for resolved accessor methods
802 if (UseFastAccessorMethods) {
804 address entry_point = __ pc();
806 Label slow_path;
807 // If we need a safepoint check, generate full interpreter entry.
808 ExternalAddress state(SafepointSynchronize::address_of_state());
809 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
810 SafepointSynchronize::_not_synchronized);
812 __ jcc(Assembler::notEqual, slow_path);
813 // ASM/C++ Interpreter
814 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
815 // Note: We can only use this code if the getfield has been resolved
816 // and if we don't have a null-pointer exception => check for
817 // these conditions first and use slow path if necessary.
818 // rbx,: method
819 // rcx: receiver
820 __ movptr(rax, Address(rsp, wordSize));
822 // check if local 0 != NULL and read field
823 __ testptr(rax, rax);
824 __ jcc(Assembler::zero, slow_path);
826 // read first instruction word and extract bytecode @ 1 and index @ 2
827 __ movptr(rdx, Address(rbx, Method::const_offset()));
828 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
829 __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
830 // Shift codes right to get the index on the right.
831 // The bytecode fetched looks like <index><0xb4><0x2a>
832 __ shrl(rdx, 2*BitsPerByte);
833 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
834 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
836 // rax,: local 0
837 // rbx,: method
838 // rcx: receiver - do not destroy since it is needed for slow path!
839 // rcx: scratch
840 // rdx: constant pool cache index
841 // rdi: constant pool cache
842 // rsi/r13: sender sp
844 // check if getfield has been resolved and read constant pool cache entry
845 // check the validity of the cache entry by testing whether _indices field
846 // contains Bytecode::_getfield in b1 byte.
847 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
848 __ movl(rcx,
849 Address(rdi,
850 rdx,
851 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
852 __ shrl(rcx, 2*BitsPerByte);
853 __ andl(rcx, 0xFF);
854 __ cmpl(rcx, Bytecodes::_getfield);
855 __ jcc(Assembler::notEqual, slow_path);
857 // Note: constant pool entry is not valid before bytecode is resolved
858 __ movptr(rcx,
859 Address(rdi,
860 rdx,
861 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
862 __ movl(rdx,
863 Address(rdi,
864 rdx,
865 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
867 Label notByte, notShort, notChar;
868 const Address field_address (rax, rcx, Address::times_1);
870 // Need to differentiate between igetfield, agetfield, bgetfield etc.
871 // because they are different sizes.
872 // Use the type from the constant pool cache
873 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
874 // Make sure we don't need to mask rdx after the above shift
875 ConstantPoolCacheEntry::verify_tos_state_shift();
876 #ifdef _LP64
877 Label notObj;
878 __ cmpl(rdx, atos);
879 __ jcc(Assembler::notEqual, notObj);
880 // atos
881 __ movptr(rax, field_address);
882 __ jmp(xreturn_path);
884 __ bind(notObj);
885 #endif // _LP64
886 __ cmpl(rdx, btos);
887 __ jcc(Assembler::notEqual, notByte);
888 __ load_signed_byte(rax, field_address);
889 __ jmp(xreturn_path);
891 __ bind(notByte);
892 __ cmpl(rdx, stos);
893 __ jcc(Assembler::notEqual, notShort);
894 __ load_signed_short(rax, field_address);
895 __ jmp(xreturn_path);
897 __ bind(notShort);
898 __ cmpl(rdx, ctos);
899 __ jcc(Assembler::notEqual, notChar);
900 __ load_unsigned_short(rax, field_address);
901 __ jmp(xreturn_path);
903 __ bind(notChar);
904 #ifdef ASSERT
905 Label okay;
906 #ifndef _LP64
907 __ cmpl(rdx, atos);
908 __ jcc(Assembler::equal, okay);
909 #endif // _LP64
910 __ cmpl(rdx, itos);
911 __ jcc(Assembler::equal, okay);
912 __ stop("what type is this?");
913 __ bind(okay);
914 #endif // ASSERT
915 // All the rest are a 32 bit wordsize
916 __ movl(rax, field_address);
918 __ bind(xreturn_path);
920 // _ireturn/_areturn
921 __ pop(rdi); // get return address
922 __ mov(rsp, sender_sp_on_entry); // set sp to sender sp
923 __ jmp(rdi);
925 // generate a vanilla interpreter entry as the slow path
926 __ bind(slow_path);
927 // We will enter c++ interpreter looking like it was
928 // called by the call_stub this will cause it to return
929 // a tosca result to the invoker which might have been
930 // the c++ interpreter itself.
932 __ jmp(fast_accessor_slow_entry_path);
933 return entry_point;
935 } else {
936 return NULL;
937 }
939 }
941 address InterpreterGenerator::generate_Reference_get_entry(void) {
942 #if INCLUDE_ALL_GCS
943 if (UseG1GC) {
944 // We need to generate have a routine that generates code to:
945 // * load the value in the referent field
946 // * passes that value to the pre-barrier.
947 //
948 // In the case of G1 this will record the value of the
949 // referent in an SATB buffer if marking is active.
950 // This will cause concurrent marking to mark the referent
951 // field as live.
952 Unimplemented();
953 }
954 #endif // INCLUDE_ALL_GCS
956 // If G1 is not enabled then attempt to go through the accessor entry point
957 // Reference.get is an accessor
958 return generate_accessor_entry();
959 }
961 //
962 // C++ Interpreter stub for calling a native method.
963 // This sets up a somewhat different looking stack for calling the native method
964 // than the typical interpreter frame setup but still has the pointer to
965 // an interpreter state.
966 //
968 address InterpreterGenerator::generate_native_entry(bool synchronized) {
969 // determine code generation flags
970 bool inc_counter = UseCompiler || CountCompiledCalls;
972 // rbx: Method*
973 // rcx: receiver (unused)
974 // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve
975 // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless
976 // to save/restore.
977 address entry_point = __ pc();
979 const Address constMethod (rbx, Method::const_offset());
980 const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
981 const Address access_flags (rbx, Method::access_flags_offset());
982 const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
984 // rsi/r13 == state/locals rdi == prevstate
985 const Register locals = rdi;
987 // get parameter size (always needed)
988 __ movptr(rcx, constMethod);
989 __ load_unsigned_short(rcx, size_of_parameters);
991 // rbx: Method*
992 // rcx: size of parameters
993 __ pop(rax); // get return address
994 // for natives the size of locals is zero
996 // compute beginning of parameters /locals
998 __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
1000 // initialize fixed part of activation frame
1002 // Assumes rax = return address
1004 // allocate and initialize new interpreterState and method expression stack
1005 // IN(locals) -> locals
1006 // IN(state) -> previous frame manager state (NULL from stub/c1/c2)
1007 // destroys rax, rcx, rdx
1008 // OUT (state) -> new interpreterState
1009 // OUT(rsp) -> bottom of methods expression stack
1011 // save sender_sp
1012 __ mov(rcx, sender_sp_on_entry);
1013 // start with NULL previous state
1014 __ movptr(state, (int32_t)NULL_WORD);
1015 generate_compute_interpreter_state(state, locals, rcx, true);
1017 #ifdef ASSERT
1018 { Label L;
1019 __ movptr(rax, STATE(_stack_base));
1020 #ifdef _LP64
1021 // duplicate the alignment rsp got after setting stack_base
1022 __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
1023 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
1024 #endif // _LP64
1025 __ cmpptr(rax, rsp);
1026 __ jcc(Assembler::equal, L);
1027 __ stop("broken stack frame setup in interpreter");
1028 __ bind(L);
1029 }
1030 #endif
1032 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
1034 const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax);
1035 NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread
1036 // Since at this point in the method invocation the exception handler
1037 // would try to exit the monitor of synchronized methods which hasn't
1038 // been entered yet, we set the thread local variable
1039 // _do_not_unlock_if_synchronized to true. The remove_activation will
1040 // check this flag.
1042 const Address do_not_unlock_if_synchronized(unlock_thread,
1043 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1044 __ movbool(do_not_unlock_if_synchronized, true);
1046 // make sure method is native & not abstract
1047 #ifdef ASSERT
1048 __ movl(rax, access_flags);
1049 {
1050 Label L;
1051 __ testl(rax, JVM_ACC_NATIVE);
1052 __ jcc(Assembler::notZero, L);
1053 __ stop("tried to execute non-native method as native");
1054 __ bind(L);
1055 }
1056 { Label L;
1057 __ testl(rax, JVM_ACC_ABSTRACT);
1058 __ jcc(Assembler::zero, L);
1059 __ stop("tried to execute abstract method in interpreter");
1060 __ bind(L);
1061 }
1062 #endif
1065 // increment invocation count & check for overflow
1066 Label invocation_counter_overflow;
1067 if (inc_counter) {
1068 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1069 }
1071 Label continue_after_compile;
1073 __ bind(continue_after_compile);
1075 bang_stack_shadow_pages(true);
1077 // reset the _do_not_unlock_if_synchronized flag
1078 NOT_LP64(__ movl(rax, STATE(_thread));) // get thread
1079 __ movbool(do_not_unlock_if_synchronized, false);
1082 // check for synchronized native methods
1083 //
1084 // Note: This must happen *after* invocation counter check, since
1085 // when overflow happens, the method should not be locked.
1086 if (synchronized) {
1087 // potentially kills rax, rcx, rdx, rdi
1088 lock_method();
1089 } else {
1090 // no synchronization necessary
1091 #ifdef ASSERT
1092 { Label L;
1093 __ movl(rax, access_flags);
1094 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1095 __ jcc(Assembler::zero, L);
1096 __ stop("method needs synchronization");
1097 __ bind(L);
1098 }
1099 #endif
1100 }
1102 // start execution
1104 // jvmti support
1105 __ notify_method_entry();
1107 // work registers
1108 const Register method = rbx;
1109 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
1110 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
1111 const Address constMethod (method, Method::const_offset());
1112 const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
1114 // allocate space for parameters
1115 __ movptr(method, STATE(_method));
1116 __ verify_method_ptr(method);
1117 __ movptr(t, constMethod);
1118 __ load_unsigned_short(t, size_of_parameters);
1119 __ shll(t, 2);
1120 #ifdef _LP64
1121 __ subptr(rsp, t);
1122 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1123 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
1124 #else
1125 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
1126 __ subptr(rsp, t);
1127 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
1128 #endif // _LP64
1130 // get signature handler
1131 Label pending_exception_present;
1133 { Label L;
1134 __ movptr(t, Address(method, Method::signature_handler_offset()));
1135 __ testptr(t, t);
1136 __ jcc(Assembler::notZero, L);
1137 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
1138 __ movptr(method, STATE(_method));
1139 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1140 __ jcc(Assembler::notEqual, pending_exception_present);
1141 __ verify_method_ptr(method);
1142 __ movptr(t, Address(method, Method::signature_handler_offset()));
1143 __ bind(L);
1144 }
1145 #ifdef ASSERT
1146 {
1147 Label L;
1148 __ push(t);
1149 __ get_thread(t); // get vm's javathread*
1150 __ cmpptr(t, STATE(_thread));
1151 __ jcc(Assembler::equal, L);
1152 __ int3();
1153 __ bind(L);
1154 __ pop(t);
1155 }
1156 #endif //
1158 const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from();
1159 // call signature handler
1160 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
1162 // The generated handlers do not touch RBX (the method oop).
1163 // However, large signatures cannot be cached and are generated
1164 // each time here. The slow-path generator will blow RBX
1165 // sometime, so we must reload it after the call.
1166 __ movptr(from_ptr, STATE(_locals)); // get the from pointer
1167 __ call(t);
1168 __ movptr(method, STATE(_method));
1169 __ verify_method_ptr(method);
1171 // result handler is in rax
1172 // set result handler
1173 __ movptr(STATE(_result_handler), rax);
1176 // get native function entry point
1177 { Label L;
1178 __ movptr(rax, Address(method, Method::native_function_offset()));
1179 __ testptr(rax, rax);
1180 __ jcc(Assembler::notZero, L);
1181 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1182 __ movptr(method, STATE(_method));
1183 __ verify_method_ptr(method);
1184 __ movptr(rax, Address(method, Method::native_function_offset()));
1185 __ bind(L);
1186 }
1188 // pass mirror handle if static call
1189 { Label L;
1190 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1191 __ movl(t, Address(method, Method::access_flags_offset()));
1192 __ testl(t, JVM_ACC_STATIC);
1193 __ jcc(Assembler::zero, L);
1194 // get mirror
1195 __ movptr(t, Address(method, Method:: const_offset()));
1196 __ movptr(t, Address(t, ConstMethod::constants_offset()));
1197 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
1198 __ movptr(t, Address(t, mirror_offset));
1199 // copy mirror into activation object
1200 __ movptr(STATE(_oop_temp), t);
1201 // pass handle to mirror
1202 #ifdef _LP64
1203 __ lea(c_rarg1, STATE(_oop_temp));
1204 #else
1205 __ lea(t, STATE(_oop_temp));
1206 __ movptr(Address(rsp, wordSize), t);
1207 #endif // _LP64
1208 __ bind(L);
1209 }
1210 #ifdef ASSERT
1211 {
1212 Label L;
1213 __ push(t);
1214 __ get_thread(t); // get vm's javathread*
1215 __ cmpptr(t, STATE(_thread));
1216 __ jcc(Assembler::equal, L);
1217 __ int3();
1218 __ bind(L);
1219 __ pop(t);
1220 }
1221 #endif //
1223 // pass JNIEnv
1224 #ifdef _LP64
1225 __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset()));
1226 #else
1227 __ movptr(thread, STATE(_thread)); // get thread
1228 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1230 __ movptr(Address(rsp, 0), t);
1231 #endif // _LP64
1233 #ifdef ASSERT
1234 {
1235 Label L;
1236 __ push(t);
1237 __ get_thread(t); // get vm's javathread*
1238 __ cmpptr(t, STATE(_thread));
1239 __ jcc(Assembler::equal, L);
1240 __ int3();
1241 __ bind(L);
1242 __ pop(t);
1243 }
1244 #endif //
1246 #ifdef ASSERT
1247 { Label L;
1248 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1249 __ cmpl(t, _thread_in_Java);
1250 __ jcc(Assembler::equal, L);
1251 __ stop("Wrong thread state in native stub");
1252 __ bind(L);
1253 }
1254 #endif
1256 // Change state to native (we save the return address in the thread, since it might not
1257 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1258 // points into the right code segment. It does not have to be the correct return pc.
1260 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1262 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1264 __ call(rax);
1266 // result potentially in rdx:rax or ST0
1267 __ movptr(method, STATE(_method));
1268 NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread
1270 // The potential result is in ST(0) & rdx:rax
1271 // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then
1272 // we do the appropriate stuff for returning the result. rdx:rax must always be saved because just about
1273 // anything we do here will destroy it, st(0) is only saved if we re-enter the vm where it would
1274 // be destroyed.
1275 // It is safe to do these pushes because state is _thread_in_native and return address will be found
1276 // via _last_native_pc and not via _last_jave_sp
1278 // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler
1279 { Label Lpush, Lskip;
1280 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1281 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1282 __ cmpptr(STATE(_result_handler), float_handler.addr());
1283 __ jcc(Assembler::equal, Lpush);
1284 __ cmpptr(STATE(_result_handler), double_handler.addr());
1285 __ jcc(Assembler::notEqual, Lskip);
1286 __ bind(Lpush);
1287 __ subptr(rsp, 2*wordSize);
1288 if ( UseSSE < 2 ) {
1289 __ fstp_d(Address(rsp, 0));
1290 } else {
1291 __ movdbl(Address(rsp, 0), xmm0);
1292 }
1293 __ bind(Lskip);
1294 }
1296 // save rax:rdx for potential use by result handler.
1297 __ push(rax);
1298 #ifndef _LP64
1299 __ push(rdx);
1300 #endif // _LP64
1302 // Verify or restore cpu control state after JNI call
1303 __ restore_cpu_control_state_after_jni();
1305 // change thread state
1306 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1307 if(os::is_MP()) {
1308 // Write serialization page so VM thread can do a pseudo remote membar.
1309 // We use the current thread pointer to calculate a thread specific
1310 // offset to write to within the page. This minimizes bus traffic
1311 // due to cache line collision.
1312 __ serialize_memory(thread, rcx);
1313 }
1315 // check for safepoint operation in progress and/or pending suspend requests
1316 { Label Continue;
1318 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1319 SafepointSynchronize::_not_synchronized);
1321 // threads running native code and they are expected to self-suspend
1322 // when leaving the _thread_in_native state. We need to check for
1323 // pending suspend requests here.
1324 Label L;
1325 __ jcc(Assembler::notEqual, L);
1326 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1327 __ jcc(Assembler::equal, Continue);
1328 __ bind(L);
1330 // Don't use call_VM as it will see a possible pending exception and forward it
1331 // and never return here preventing us from clearing _last_native_pc down below.
1332 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1333 // preserved and correspond to the bcp/locals pointers.
1334 //
1336 ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1337 thread);
1338 __ increment(rsp, wordSize);
1340 __ movptr(method, STATE(_method));
1341 __ verify_method_ptr(method);
1342 __ movptr(thread, STATE(_thread)); // get thread
1344 __ bind(Continue);
1345 }
1347 // change thread state
1348 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1350 __ reset_last_Java_frame(thread, true, true);
1352 // reset handle block
1353 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1354 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1356 // If result was an oop then unbox and save it in the frame
1357 { Label L;
1358 Label no_oop, store_result;
1359 ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT));
1360 __ cmpptr(STATE(_result_handler), oop_handler.addr());
1361 __ jcc(Assembler::notEqual, no_oop);
1362 #ifndef _LP64
1363 __ pop(rdx);
1364 #endif // _LP64
1365 __ pop(rax);
1366 __ testptr(rax, rax);
1367 __ jcc(Assembler::zero, store_result);
1368 // unbox
1369 __ movptr(rax, Address(rax, 0));
1370 __ bind(store_result);
1371 __ movptr(STATE(_oop_temp), rax);
1372 // keep stack depth as expected by pushing oop which will eventually be discarded
1373 __ push(rax);
1374 #ifndef _LP64
1375 __ push(rdx);
1376 #endif // _LP64
1377 __ bind(no_oop);
1378 }
1380 {
1381 Label no_reguard;
1382 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1383 __ jcc(Assembler::notEqual, no_reguard);
1385 __ pusha();
1386 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1387 __ popa();
1389 __ bind(no_reguard);
1390 }
1393 // QQQ Seems like for native methods we simply return and the caller will see the pending
1394 // exception and do the right thing. Certainly the interpreter will, don't know about
1395 // compiled methods.
1396 // Seems that the answer to above is no this is wrong. The old code would see the exception
1397 // and forward it before doing the unlocking and notifying jvmdi that method has exited.
1398 // This seems wrong need to investigate the spec.
1400 // handle exceptions (exception handling will handle unlocking!)
1401 { Label L;
1402 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1403 __ jcc(Assembler::zero, L);
1404 __ bind(pending_exception_present);
1406 // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply
1407 // return and let caller deal with exception. This skips the unlocking here which
1408 // seems wrong but seems to be what asm interpreter did. Can't find this in the spec.
1409 // Note: must preverve method in rbx
1410 //
1412 // remove activation
1414 __ movptr(t, STATE(_sender_sp));
1415 __ leave(); // remove frame anchor
1416 __ pop(rdi); // get return address
1417 __ movptr(state, STATE(_prev_link)); // get previous state for return
1418 __ mov(rsp, t); // set sp to sender sp
1419 __ push(rdi); // push throwing pc
1420 // The skips unlocking!! This seems to be what asm interpreter does but seems
1421 // very wrong. Not clear if this violates the spec.
1422 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1423 __ bind(L);
1424 }
1426 // do unlocking if necessary
1427 { Label L;
1428 __ movl(t, Address(method, Method::access_flags_offset()));
1429 __ testl(t, JVM_ACC_SYNCHRONIZED);
1430 __ jcc(Assembler::zero, L);
1431 // the code below should be shared with interpreter macro assembler implementation
1432 { Label unlock;
1433 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1434 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1435 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1436 __ movptr(monitor, STATE(_monitor_base));
1437 __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor
1439 __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));
1440 __ testptr(t, t);
1441 __ jcc(Assembler::notZero, unlock);
1443 // Entry already unlocked, need to throw exception
1444 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1445 __ should_not_reach_here();
1447 __ bind(unlock);
1448 __ unlock_object(monitor);
1449 // unlock can blow rbx so restore it for path that needs it below
1450 __ movptr(method, STATE(_method));
1451 }
1452 __ bind(L);
1453 }
1455 // jvmti support
1456 // Note: This must happen _after_ handling/throwing any exceptions since
1457 // the exception handler code notifies the runtime of method exits
1458 // too. If this happens before, method entry/exit notifications are
1459 // not properly paired (was bug - gri 11/22/99).
1460 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1462 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1463 #ifndef _LP64
1464 __ pop(rdx);
1465 #endif // _LP64
1466 __ pop(rax);
1467 __ movptr(t, STATE(_result_handler)); // get result handler
1468 __ call(t); // call result handler to convert to tosca form
1470 // remove activation
1472 __ movptr(t, STATE(_sender_sp));
1474 __ leave(); // remove frame anchor
1475 __ pop(rdi); // get return address
1476 __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller)
1477 __ mov(rsp, t); // set sp to sender sp
1478 __ jmp(rdi);
1480 // invocation counter overflow
1481 if (inc_counter) {
1482 // Handle overflow of counter and compile method
1483 __ bind(invocation_counter_overflow);
1484 generate_counter_overflow(&continue_after_compile);
1485 }
1487 return entry_point;
1488 }
1490 // Generate entries that will put a result type index into rcx
1491 void CppInterpreterGenerator::generate_deopt_handling() {
1493 Label return_from_deopt_common;
1495 // Generate entries that will put a result type index into rcx
1496 // deopt needs to jump to here to enter the interpreter (return a result)
1497 deopt_frame_manager_return_atos = __ pc();
1499 // rax is live here
1500 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT)); // Result stub address array index
1501 __ jmp(return_from_deopt_common);
1504 // deopt needs to jump to here to enter the interpreter (return a result)
1505 deopt_frame_manager_return_btos = __ pc();
1507 // rax is live here
1508 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN)); // Result stub address array index
1509 __ jmp(return_from_deopt_common);
1511 // deopt needs to jump to here to enter the interpreter (return a result)
1512 deopt_frame_manager_return_itos = __ pc();
1514 // rax is live here
1515 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT)); // Result stub address array index
1516 __ jmp(return_from_deopt_common);
1518 // deopt needs to jump to here to enter the interpreter (return a result)
1520 deopt_frame_manager_return_ltos = __ pc();
1521 // rax,rdx are live here
1522 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG)); // Result stub address array index
1523 __ jmp(return_from_deopt_common);
1525 // deopt needs to jump to here to enter the interpreter (return a result)
1527 deopt_frame_manager_return_ftos = __ pc();
1528 // st(0) is live here
1529 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
1530 __ jmp(return_from_deopt_common);
1532 // deopt needs to jump to here to enter the interpreter (return a result)
1533 deopt_frame_manager_return_dtos = __ pc();
1535 // st(0) is live here
1536 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
1537 __ jmp(return_from_deopt_common);
1539 // deopt needs to jump to here to enter the interpreter (return a result)
1540 deopt_frame_manager_return_vtos = __ pc();
1542 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID));
1544 // Deopt return common
1545 // an index is present in rcx that lets us move any possible result being
1546 // return to the interpreter's stack
1547 //
1548 // Because we have a full sized interpreter frame on the youngest
1549 // activation the stack is pushed too deep to share the tosca to
1550 // stack converters directly. We shrink the stack to the desired
1551 // amount and then push result and then re-extend the stack.
1552 // We could have the code in size_activation layout a short
1553 // frame for the top activation but that would look different
1554 // than say sparc (which needs a full size activation because
1555 // the windows are in the way. Really it could be short? QQQ
1556 //
1557 __ bind(return_from_deopt_common);
1559 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1561 // setup rsp so we can push the "result" as needed.
1562 __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed)
1563 __ addptr(rsp, wordSize); // undo prepush
1565 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
1566 // Address index(noreg, rcx, Address::times_ptr);
1567 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
1568 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
1569 __ call(rcx); // call result converter
1571 __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume);
1572 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
1573 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
1574 // result if any on stack already )
1575 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
1576 }
1578 // Generate the code to handle a more_monitors message from the c++ interpreter
1579 void CppInterpreterGenerator::generate_more_monitors() {
1582 Label entry, loop;
1583 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1584 // 1. compute new pointers // rsp: old expression stack top
1585 __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom
1586 __ subptr(rsp, entry_size); // move expression stack top limit
1587 __ subptr(STATE(_stack), entry_size); // update interpreter stack top
1588 __ subptr(STATE(_stack_limit), entry_size); // inform interpreter
1589 __ subptr(rdx, entry_size); // move expression stack bottom
1590 __ movptr(STATE(_stack_base), rdx); // inform interpreter
1591 __ movptr(rcx, STATE(_stack)); // set start value for copy loop
1592 __ jmp(entry);
1593 // 2. move expression stack contents
1594 __ bind(loop);
1595 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
1596 __ movptr(Address(rcx, 0), rbx); // and store it at new location
1597 __ addptr(rcx, wordSize); // advance to next word
1598 __ bind(entry);
1599 __ cmpptr(rcx, rdx); // check if bottom reached
1600 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
1601 // now zero the slot so we can find it.
1602 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
1603 __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors);
1604 }
1607 // Initial entry to C++ interpreter from the call_stub.
1608 // This entry point is called the frame manager since it handles the generation
1609 // of interpreter activation frames via requests directly from the vm (via call_stub)
1610 // and via requests from the interpreter. The requests from the call_stub happen
1611 // directly thru the entry point. Requests from the interpreter happen via returning
1612 // from the interpreter and examining the message the interpreter has returned to
1613 // the frame manager. The frame manager can take the following requests:
1615 // NO_REQUEST - error, should never happen.
1616 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
1617 // allocate a new monitor.
1618 // CALL_METHOD - setup a new activation to call a new method. Very similar to what
1619 // happens during entry during the entry via the call stub.
1620 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
1621 //
1622 // Arguments:
1623 //
1624 // rbx: Method*
1625 // rcx: receiver - unused (retrieved from stack as needed)
1626 // rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2)
1627 //
1628 //
1629 // Stack layout at entry
1630 //
1631 // [ return address ] <--- rsp
1632 // [ parameter n ]
1633 // ...
1634 // [ parameter 1 ]
1635 // [ expression stack ]
1636 //
1637 //
1638 // We are free to blow any registers we like because the call_stub which brought us here
1639 // initially has preserved the callee save registers already.
1640 //
1641 //
1643 static address interpreter_frame_manager = NULL;
1645 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1647 // rbx: Method*
1648 // rsi/r13: sender sp
1650 // Because we redispatch "recursive" interpreter entries thru this same entry point
1651 // the "input" register usage is a little strange and not what you expect coming
1652 // from the call_stub. From the call stub rsi/rdi (current/previous) interpreter
1653 // state are NULL but on "recursive" dispatches they are what you'd expect.
1654 // rsi: current interpreter state (C++ interpreter) must preserve (null from call_stub/c1/c2)
1657 // A single frame manager is plenty as we don't specialize for synchronized. We could and
1658 // the code is pretty much ready. Would need to change the test below and for good measure
1659 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
1660 // routines. Not clear this is worth it yet.
1662 if (interpreter_frame_manager) return interpreter_frame_manager;
1664 address entry_point = __ pc();
1666 // Fast accessor methods share this entry point.
1667 // This works because frame manager is in the same codelet
1668 if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
1670 Label dispatch_entry_2;
1671 __ movptr(rcx, sender_sp_on_entry);
1672 __ movptr(state, (int32_t)NULL_WORD); // no current activation
1674 __ jmp(dispatch_entry_2);
1676 const Register locals = rdi;
1678 Label re_dispatch;
1680 __ bind(re_dispatch);
1682 // save sender sp (doesn't include return address
1683 __ lea(rcx, Address(rsp, wordSize));
1685 __ bind(dispatch_entry_2);
1687 // save sender sp
1688 __ push(rcx);
1690 const Address constMethod (rbx, Method::const_offset());
1691 const Address access_flags (rbx, Method::access_flags_offset());
1692 const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
1693 const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
1695 // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1696 // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
1697 // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1699 // get parameter size (always needed)
1700 __ movptr(rdx, constMethod);
1701 __ load_unsigned_short(rcx, size_of_parameters);
1703 // rbx: Method*
1704 // rcx: size of parameters
1705 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1707 __ subptr(rdx, rcx); // rdx = no. of additional locals
1709 // see if we've got enough room on the stack for locals plus overhead.
1710 generate_stack_overflow_check(); // C++
1712 // c++ interpreter does not use stack banging or any implicit exceptions
1713 // leave for now to verify that check is proper.
1714 bang_stack_shadow_pages(false);
1718 // compute beginning of parameters (rdi)
1719 __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize));
1721 // save sender's sp
1722 // __ movl(rcx, rsp);
1724 // get sender's sp
1725 __ pop(rcx);
1727 // get return address
1728 __ pop(rax);
1730 // rdx - # of additional locals
1731 // allocate space for locals
1732 // explicitly initialize locals
1733 {
1734 Label exit, loop;
1735 __ testl(rdx, rdx); // (32bit ok)
1736 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1737 __ bind(loop);
1738 __ push((int32_t)NULL_WORD); // initialize local variables
1739 __ decrement(rdx); // until everything initialized
1740 __ jcc(Assembler::greater, loop);
1741 __ bind(exit);
1742 }
1745 // Assumes rax = return address
1747 // allocate and initialize new interpreterState and method expression stack
1748 // IN(locals) -> locals
1749 // IN(state) -> any current interpreter activation
1750 // destroys rax, rcx, rdx, rdi
1751 // OUT (state) -> new interpreterState
1752 // OUT(rsp) -> bottom of methods expression stack
1754 generate_compute_interpreter_state(state, locals, rcx, false);
1756 // Call interpreter
1758 Label call_interpreter;
1759 __ bind(call_interpreter);
1761 // c++ interpreter does not use stack banging or any implicit exceptions
1762 // leave for now to verify that check is proper.
1763 bang_stack_shadow_pages(false);
1766 // Call interpreter enter here if message is
1767 // set and we know stack size is valid
1769 Label call_interpreter_2;
1771 __ bind(call_interpreter_2);
1773 {
1774 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1776 #ifdef _LP64
1777 __ mov(c_rarg0, state);
1778 #else
1779 __ push(state); // push arg to interpreter
1780 __ movptr(thread, STATE(_thread));
1781 #endif // _LP64
1783 // We can setup the frame anchor with everything we want at this point
1784 // as we are thread_in_Java and no safepoints can occur until we go to
1785 // vm mode. We do have to clear flags on return from vm but that is it
1786 //
1787 __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
1788 __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
1790 // Call the interpreter
1792 RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run));
1793 RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks));
1795 __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal);
1796 NOT_LP64(__ pop(rax);) // discard parameter to run
1797 //
1798 // state is preserved since it is callee saved
1799 //
1801 // reset_last_Java_frame
1803 NOT_LP64(__ movl(thread, STATE(_thread));)
1804 __ reset_last_Java_frame(thread, true, true);
1805 }
1807 // examine msg from interpreter to determine next action
1809 __ movl(rdx, STATE(_msg)); // Get new message
1811 Label call_method;
1812 Label return_from_interpreted_method;
1813 Label throw_exception;
1814 Label bad_msg;
1815 Label do_OSR;
1817 __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method);
1818 __ jcc(Assembler::equal, call_method);
1819 __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method);
1820 __ jcc(Assembler::equal, return_from_interpreted_method);
1821 __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr);
1822 __ jcc(Assembler::equal, do_OSR);
1823 __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception);
1824 __ jcc(Assembler::equal, throw_exception);
1825 __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors);
1826 __ jcc(Assembler::notEqual, bad_msg);
1828 // Allocate more monitor space, shuffle expression stack....
1830 generate_more_monitors();
1832 __ jmp(call_interpreter);
1834 // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
1835 unctrap_frame_manager_entry = __ pc();
1836 //
1837 // Load the registers we need.
1838 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1839 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
1840 __ jmp(call_interpreter_2);
1844 //=============================================================================
1845 // Returning from a compiled method into a deopted method. The bytecode at the
1846 // bcp has completed. The result of the bytecode is in the native abi (the tosca
1847 // for the template based interpreter). Any stack space that was used by the
1848 // bytecode that has completed has been removed (e.g. parameters for an invoke)
1849 // so all that we have to do is place any pending result on the expression stack
1850 // and resume execution on the next bytecode.
1853 generate_deopt_handling();
1854 __ jmp(call_interpreter);
1857 // Current frame has caught an exception we need to dispatch to the
1858 // handler. We can get here because a native interpreter frame caught
1859 // an exception in which case there is no handler and we must rethrow
1860 // If it is a vanilla interpreted frame the we simply drop into the
1861 // interpreter and let it do the lookup.
1863 Interpreter::_rethrow_exception_entry = __ pc();
1864 // rax: exception
1865 // rdx: return address/pc that threw exception
1867 Label return_with_exception;
1868 Label unwind_and_forward;
1870 // restore state pointer.
1871 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1873 __ movptr(rbx, STATE(_method)); // get method
1874 #ifdef _LP64
1875 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
1876 #else
1877 __ movl(rcx, STATE(_thread)); // get thread
1879 // Store exception with interpreter will expect it
1880 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax);
1881 #endif // _LP64
1883 // is current frame vanilla or native?
1885 __ movl(rdx, access_flags);
1886 __ testl(rdx, JVM_ACC_NATIVE);
1887 __ jcc(Assembler::zero, return_with_exception); // vanilla interpreted frame, handle directly
1889 // We drop thru to unwind a native interpreted frame with a pending exception
1890 // We jump here for the initial interpreter frame with exception pending
1891 // We unwind the current acivation and forward it to our caller.
1893 __ bind(unwind_and_forward);
1895 // unwind rbp, return stack to unextended value and re-push return address
1897 __ movptr(rcx, STATE(_sender_sp));
1898 __ leave();
1899 __ pop(rdx);
1900 __ mov(rsp, rcx);
1901 __ push(rdx);
1902 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1904 // Return point from a call which returns a result in the native abi
1905 // (c1/c2/jni-native). This result must be processed onto the java
1906 // expression stack.
1907 //
1908 // A pending exception may be present in which case there is no result present
1910 Label resume_interpreter;
1911 Label do_float;
1912 Label do_double;
1913 Label done_conv;
1915 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
1916 if (UseSSE < 2) {
1917 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1918 __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed
1919 __ movl(rcx, Address(rbx, Method::result_index_offset()));
1920 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
1921 __ jcc(Assembler::equal, do_float);
1922 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
1923 __ jcc(Assembler::equal, do_double);
1924 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
1925 __ empty_FPU_stack();
1926 #endif // COMPILER2
1927 __ jmp(done_conv);
1929 __ bind(do_float);
1930 #ifdef COMPILER2
1931 for (int i = 1; i < 8; i++) {
1932 __ ffree(i);
1933 }
1934 #endif // COMPILER2
1935 __ jmp(done_conv);
1936 __ bind(do_double);
1937 #ifdef COMPILER2
1938 for (int i = 1; i < 8; i++) {
1939 __ ffree(i);
1940 }
1941 #endif // COMPILER2
1942 __ jmp(done_conv);
1943 } else {
1944 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
1945 __ jmp(done_conv);
1946 }
1948 // Return point to interpreter from compiled/native method
1949 InternalAddress return_from_native_method(__ pc());
1951 __ bind(done_conv);
1954 // Result if any is in tosca. The java expression stack is in the state that the
1955 // calling convention left it (i.e. params may or may not be present)
1956 // Copy the result from tosca and place it on java expression stack.
1958 // Restore rsi/r13 as compiled code may not preserve it
1960 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1962 // restore stack to what we had when we left (in case i2c extended it)
1964 __ movptr(rsp, STATE(_stack));
1965 __ lea(rsp, Address(rsp, wordSize));
1967 // If there is a pending exception then we don't really have a result to process
1969 #ifdef _LP64
1970 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1971 #else
1972 __ movptr(rcx, STATE(_thread)); // get thread
1973 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1974 #endif // _LP64
1975 __ jcc(Assembler::notZero, return_with_exception);
1977 // get method just executed
1978 __ movptr(rbx, STATE(_result._to_call._callee));
1980 // callee left args on top of expression stack, remove them
1981 __ movptr(rcx, constMethod);
1982 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
1984 __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
1986 __ movl(rcx, Address(rbx, Method::result_index_offset()));
1987 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
1988 // Address index(noreg, rax, Address::times_ptr);
1989 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
1990 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
1991 __ call(rcx); // call result converter
1992 __ jmp(resume_interpreter);
1994 // An exception is being caught on return to a vanilla interpreter frame.
1995 // Empty the stack and resume interpreter
1997 __ bind(return_with_exception);
1999 // Exception present, empty stack
2000 __ movptr(rsp, STATE(_stack_base));
2001 __ jmp(resume_interpreter);
2003 // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
2004 // interpreter call, or native) and unwind this interpreter activation.
2005 // All monitors should be unlocked.
2007 __ bind(return_from_interpreted_method);
2009 Label return_to_initial_caller;
2011 __ movptr(rbx, STATE(_method)); // get method just executed
2012 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
2013 __ movl(rax, Address(rbx, Method::result_index_offset())); // get result type index
2014 __ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2)
2016 // Copy result to callers java stack
2017 ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack);
2018 // Address index(noreg, rax, Address::times_ptr);
2020 __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr)));
2021 // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack)));
2022 __ call(rax); // call result converter
2024 Label unwind_recursive_activation;
2025 __ bind(unwind_recursive_activation);
2027 // returning to interpreter method from "recursive" interpreter call
2028 // result converter left rax pointing to top of the java stack for method we are returning
2029 // to. Now all we must do is unwind the state from the completed call
2031 __ movptr(state, STATE(_prev_link)); // unwind state
2032 __ leave(); // pop the frame
2033 __ mov(rsp, rax); // unwind stack to remove args
2035 // Resume the interpreter. The current frame contains the current interpreter
2036 // state object.
2037 //
2039 __ bind(resume_interpreter);
2041 // state == interpreterState object for method we are resuming
2043 __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume);
2044 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
2045 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
2046 // result if any on stack already )
2047 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
2048 __ jmp(call_interpreter_2); // No need to bang
2050 // interpreter returning to native code (call_stub/c1/c2)
2051 // convert result and unwind initial activation
2052 // rax - result index
2054 __ bind(return_to_initial_caller);
2055 ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi);
2056 // Address index(noreg, rax, Address::times_ptr);
2058 __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr)));
2059 __ call(rax); // call result converter
2061 Label unwind_initial_activation;
2062 __ bind(unwind_initial_activation);
2064 // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0))
2066 /* Current stack picture
2068 [ incoming parameters ]
2069 [ extra locals ]
2070 [ return address to CALL_STUB/C1/C2]
2071 fp -> [ CALL_STUB/C1/C2 fp ]
2072 BytecodeInterpreter object
2073 expression stack
2074 sp ->
2076 */
2078 // return restoring the stack to the original sender_sp value
2080 __ movptr(rcx, STATE(_sender_sp));
2081 __ leave();
2082 __ pop(rdi); // get return address
2083 // set stack to sender's sp
2084 __ mov(rsp, rcx);
2085 __ jmp(rdi); // return to call_stub
2087 // OSR request, adjust return address to make current frame into adapter frame
2088 // and enter OSR nmethod
2090 __ bind(do_OSR);
2092 Label remove_initial_frame;
2094 // We are going to pop this frame. Is there another interpreter frame underneath
2095 // it or is it callstub/compiled?
2097 // Move buffer to the expected parameter location
2098 __ movptr(rcx, STATE(_result._osr._osr_buf));
2100 __ movptr(rax, STATE(_result._osr._osr_entry));
2102 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
2103 __ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2)
2105 __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register
2106 __ leave(); // pop the frame
2107 __ mov(rsp, sender_sp_on_entry); // trim any stack expansion
2110 // We know we are calling compiled so push specialized return
2111 // method uses specialized entry, push a return so we look like call stub setup
2112 // this path will handle fact that result is returned in registers and not
2113 // on the java stack.
2115 __ pushptr(return_from_native_method.addr());
2117 __ jmp(rax);
2119 __ bind(remove_initial_frame);
2121 __ movptr(rdx, STATE(_sender_sp));
2122 __ leave();
2123 // get real return
2124 __ pop(rsi);
2125 // set stack to sender's sp
2126 __ mov(rsp, rdx);
2127 // repush real return
2128 __ push(rsi);
2129 // Enter OSR nmethod
2130 __ jmp(rax);
2135 // Call a new method. All we do is (temporarily) trim the expression stack
2136 // push a return address to bring us back to here and leap to the new entry.
2138 __ bind(call_method);
2140 // stack points to next free location and not top element on expression stack
2141 // method expects sp to be pointing to topmost element
2143 __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top
2144 __ lea(rsp, Address(rsp, wordSize));
2146 __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute
2148 // don't need a return address if reinvoking interpreter
2150 // Make it look like call_stub calling conventions
2152 // Get (potential) receiver
2153 // get size of parameters in words
2154 __ movptr(rcx, constMethod);
2155 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
2157 ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
2158 __ pushptr(recursive.addr()); // make it look good in the debugger
2160 InternalAddress entry(entry_point);
2161 __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter?
2162 __ jcc(Assembler::equal, re_dispatch); // yes
2164 __ pop(rax); // pop dummy address
2167 // get specialized entry
2168 __ movptr(rax, STATE(_result._to_call._callee_entry_point));
2169 // set sender SP
2170 __ mov(sender_sp_on_entry, rsp);
2172 // method uses specialized entry, push a return so we look like call stub setup
2173 // this path will handle fact that result is returned in registers and not
2174 // on the java stack.
2176 __ pushptr(return_from_native_method.addr());
2178 __ jmp(rax);
2180 __ bind(bad_msg);
2181 __ stop("Bad message from interpreter");
2183 // Interpreted method "returned" with an exception pass it on...
2184 // Pass result, unwind activation and continue/return to interpreter/call_stub
2185 // We handle result (if any) differently based on return to interpreter or call_stub
2187 Label unwind_initial_with_pending_exception;
2189 __ bind(throw_exception);
2190 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call?
2191 __ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2)
2192 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
2193 __ addptr(rax, wordSize); // account for prepush before we return
2194 __ jmp(unwind_recursive_activation);
2196 __ bind(unwind_initial_with_pending_exception);
2198 // We will unwind the current (initial) interpreter frame and forward
2199 // the exception to the caller. We must put the exception in the
2200 // expected register and clear pending exception and then forward.
2202 __ jmp(unwind_and_forward);
2204 interpreter_frame_manager = entry_point;
2205 return entry_point;
2206 }
2208 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
2209 // determine code generation flags
2210 bool synchronized = false;
2211 address entry_point = NULL;
2213 switch (kind) {
2214 case Interpreter::zerolocals : break;
2215 case Interpreter::zerolocals_synchronized: synchronized = true; break;
2216 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
2217 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
2218 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
2219 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
2220 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
2221 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
2223 case Interpreter::java_lang_math_sin : // fall thru
2224 case Interpreter::java_lang_math_cos : // fall thru
2225 case Interpreter::java_lang_math_tan : // fall thru
2226 case Interpreter::java_lang_math_abs : // fall thru
2227 case Interpreter::java_lang_math_log : // fall thru
2228 case Interpreter::java_lang_math_log10 : // fall thru
2229 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
2230 case Interpreter::java_lang_ref_reference_get
2231 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
2232 default : ShouldNotReachHere(); break;
2233 }
2235 if (entry_point) return entry_point;
2237 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
2239 }
2241 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
2242 : CppInterpreterGenerator(code) {
2243 generate_all(); // down here so it can be "virtual"
2244 }
2246 // Deoptimization helpers for C++ interpreter
2248 // How much stack a method activation needs in words.
2249 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
2251 const int stub_code = 4; // see generate_call_stub
2252 // Save space for one monitor to get into the interpreted method in case
2253 // the method is synchronized
2254 int monitor_size = method->is_synchronized() ?
2255 1*frame::interpreter_frame_monitor_size() : 0;
2257 // total static overhead size. Account for interpreter state object, return
2258 // address, saved rbp and 2 words for a "static long no_params() method" issue.
2260 const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
2261 ( frame::sender_sp_offset - frame::link_offset) + 2;
2263 const int extra_stack = 0; //6815692//Method::extra_stack_entries();
2264 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
2265 Interpreter::stackElementWords;
2266 return overhead_size + method_stack + stub_code;
2267 }
2269 // returns the activation size.
2270 static int size_activation_helper(int extra_locals_size, int monitor_size) {
2271 return (extra_locals_size + // the addition space for locals
2272 2*BytesPerWord + // return address and saved rbp
2273 2*BytesPerWord + // "static long no_params() method" issue
2274 sizeof(BytecodeInterpreter) + // interpreterState
2275 monitor_size); // monitors
2276 }
2278 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
2279 frame* caller,
2280 frame* current,
2281 Method* method,
2282 intptr_t* locals,
2283 intptr_t* stack,
2284 intptr_t* stack_base,
2285 intptr_t* monitor_base,
2286 intptr_t* frame_bottom,
2287 bool is_top_frame
2288 )
2289 {
2290 // What about any vtable?
2291 //
2292 to_fill->_thread = JavaThread::current();
2293 // This gets filled in later but make it something recognizable for now
2294 to_fill->_bcp = method->code_base();
2295 to_fill->_locals = locals;
2296 to_fill->_constants = method->constants()->cache();
2297 to_fill->_method = method;
2298 to_fill->_mdx = NULL;
2299 to_fill->_stack = stack;
2300 if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
2301 to_fill->_msg = deopt_resume2;
2302 } else {
2303 to_fill->_msg = method_resume;
2304 }
2305 to_fill->_result._to_call._bcp_advance = 0;
2306 to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
2307 to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
2308 to_fill->_prev_link = NULL;
2310 to_fill->_sender_sp = caller->unextended_sp();
2312 if (caller->is_interpreted_frame()) {
2313 interpreterState prev = caller->get_interpreterState();
2314 to_fill->_prev_link = prev;
2315 // *current->register_addr(GR_Iprev_state) = (intptr_t) prev;
2316 // Make the prev callee look proper
2317 prev->_result._to_call._callee = method;
2318 if (*prev->_bcp == Bytecodes::_invokeinterface) {
2319 prev->_result._to_call._bcp_advance = 5;
2320 } else {
2321 prev->_result._to_call._bcp_advance = 3;
2322 }
2323 }
2324 to_fill->_oop_temp = NULL;
2325 to_fill->_stack_base = stack_base;
2326 // Need +1 here because stack_base points to the word just above the first expr stack entry
2327 // and stack_limit is supposed to point to the word just below the last expr stack entry.
2328 // See generate_compute_interpreter_state.
2329 int extra_stack = 0; //6815692//Method::extra_stack_entries();
2330 to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1);
2331 to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
2333 to_fill->_self_link = to_fill;
2334 assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base,
2335 "Stack top out of range");
2336 }
2338 int AbstractInterpreter::layout_activation(Method* method,
2339 int tempcount, //
2340 int popframe_extra_args,
2341 int moncount,
2342 int caller_actual_parameters,
2343 int callee_param_count,
2344 int callee_locals,
2345 frame* caller,
2346 frame* interpreter_frame,
2347 bool is_top_frame,
2348 bool is_bottom_frame) {
2350 assert(popframe_extra_args == 0, "FIX ME");
2351 // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
2352 // does as far as allocating an interpreter frame.
2353 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
2354 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
2355 // as determined by a previous call to this method.
2356 // It is also guaranteed to be walkable even though it is in a skeletal state
2357 // NOTE: return size is in words not bytes
2358 // NOTE: tempcount is the current size of the java expression stack. For top most
2359 // frames we will allocate a full sized expression stack and not the curback
2360 // version that non-top frames have.
2362 // Calculate the amount our frame will be adjust by the callee. For top frame
2363 // this is zero.
2365 // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
2366 // calculates the extra locals based on itself. Not what the callee does
2367 // to it. So it ignores last_frame_adjust value. Seems suspicious as far
2368 // as getting sender_sp correct.
2370 int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
2371 int monitor_size = sizeof(BasicObjectLock) * moncount;
2373 // First calculate the frame size without any java expression stack
2374 int short_frame_size = size_activation_helper(extra_locals_size,
2375 monitor_size);
2377 // Now with full size expression stack
2378 int extra_stack = 0; //6815692//Method::extra_stack_entries();
2379 int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord;
2381 // and now with only live portion of the expression stack
2382 short_frame_size = short_frame_size + tempcount * BytesPerWord;
2384 // the size the activation is right now. Only top frame is full size
2385 int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
2387 if (interpreter_frame != NULL) {
2388 #ifdef ASSERT
2389 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
2390 #endif
2392 // MUCHO HACK
2394 intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
2396 /* Now fillin the interpreterState object */
2398 // The state object is the first thing on the frame and easily located
2400 interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
2403 // Find the locals pointer. This is rather simple on x86 because there is no
2404 // confusing rounding at the callee to account for. We can trivially locate
2405 // our locals based on the current fp().
2406 // Note: the + 2 is for handling the "static long no_params() method" issue.
2407 // (too bad I don't really remember that issue well...)
2409 intptr_t* locals;
2410 // If the caller is interpreted we need to make sure that locals points to the first
2411 // argument that the caller passed and not in an area where the stack might have been extended.
2412 // because the stack to stack to converter needs a proper locals value in order to remove the
2413 // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
2414 // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
2415 // adjust the stack?? HMMM QQQ
2416 //
2417 if (caller->is_interpreted_frame()) {
2418 // locals must agree with the caller because it will be used to set the
2419 // caller's tos when we return.
2420 interpreterState prev = caller->get_interpreterState();
2421 // stack() is prepushed.
2422 locals = prev->stack() + method->size_of_parameters();
2423 // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
2424 if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
2425 // os::breakpoint();
2426 }
2427 } else {
2428 // this is where a c2i would have placed locals (except for the +2)
2429 locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
2430 }
2432 intptr_t* monitor_base = (intptr_t*) cur_state;
2433 intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
2434 /* +1 because stack is always prepushed */
2435 intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
2438 BytecodeInterpreter::layout_interpreterState(cur_state,
2439 caller,
2440 interpreter_frame,
2441 method,
2442 locals,
2443 stack,
2444 stack_base,
2445 monitor_base,
2446 frame_bottom,
2447 is_top_frame);
2449 // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
2450 }
2451 return frame_size/BytesPerWord;
2452 }
2454 #endif // CC_INTERP (all)