Fri, 25 Jan 2013 10:04:08 -0500
8000692: Remove old KERNEL code
Summary: Removed depreciated kernel VM source code from hotspot VM
Reviewed-by: dholmes, acorn
1 /*
2 * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/cppInterpreter.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterGenerator.hpp"
31 #include "interpreter/interpreterRuntime.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "prims/jvmtiThreadState.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/interfaceSupport.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/timer.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "utilities/debug.hpp"
48 #ifdef SHARK
49 #include "shark/shark_globals.hpp"
50 #endif
52 #ifdef CC_INTERP
54 // Routine exists to make tracebacks look decent in debugger
55 // while we are recursed in the frame manager/c++ interpreter.
56 // We could use an address in the frame manager but having
57 // frames look natural in the debugger is a plus.
58 extern "C" void RecursiveInterpreterActivation(interpreterState istate )
59 {
60 //
61 ShouldNotReachHere();
62 }
65 #define __ _masm->
66 #define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
68 Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
69 // c++ interpreter entry point this holds that entry point label.
71 // default registers for state and sender_sp
72 // state and sender_sp are the same on 32bit because we have no choice.
73 // state could be rsi on 64bit but it is an arg reg and not callee save
74 // so r13 is better choice.
76 const Register state = NOT_LP64(rsi) LP64_ONLY(r13);
77 const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13);
79 // NEEDED for JVMTI?
80 // address AbstractInterpreter::_remove_activation_preserving_args_entry;
82 static address unctrap_frame_manager_entry = NULL;
84 static address deopt_frame_manager_return_atos = NULL;
85 static address deopt_frame_manager_return_btos = NULL;
86 static address deopt_frame_manager_return_itos = NULL;
87 static address deopt_frame_manager_return_ltos = NULL;
88 static address deopt_frame_manager_return_ftos = NULL;
89 static address deopt_frame_manager_return_dtos = NULL;
90 static address deopt_frame_manager_return_vtos = NULL;
92 int AbstractInterpreter::BasicType_as_index(BasicType type) {
93 int i = 0;
94 switch (type) {
95 case T_BOOLEAN: i = 0; break;
96 case T_CHAR : i = 1; break;
97 case T_BYTE : i = 2; break;
98 case T_SHORT : i = 3; break;
99 case T_INT : i = 4; break;
100 case T_VOID : i = 5; break;
101 case T_FLOAT : i = 8; break;
102 case T_LONG : i = 9; break;
103 case T_DOUBLE : i = 6; break;
104 case T_OBJECT : // fall through
105 case T_ARRAY : i = 7; break;
106 default : ShouldNotReachHere();
107 }
108 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
109 return i;
110 }
112 // Is this pc anywhere within code owned by the interpreter?
113 // This only works for pc that might possibly be exposed to frame
114 // walkers. It clearly misses all of the actual c++ interpreter
115 // implementation
116 bool CppInterpreter::contains(address pc) {
117 return (_code->contains(pc) ||
118 pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
119 }
122 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
123 address entry = __ pc();
124 switch (type) {
125 case T_BOOLEAN: __ c2bool(rax); break;
126 case T_CHAR : __ andl(rax, 0xFFFF); break;
127 case T_BYTE : __ sign_extend_byte (rax); break;
128 case T_SHORT : __ sign_extend_short(rax); break;
129 case T_VOID : // fall thru
130 case T_LONG : // fall thru
131 case T_INT : /* nothing to do */ break;
133 case T_DOUBLE :
134 case T_FLOAT :
135 {
136 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
137 __ pop(t); // remove return address first
138 // Must return a result for interpreter or compiler. In SSE
139 // mode, results are returned in xmm0 and the FPU stack must
140 // be empty.
141 if (type == T_FLOAT && UseSSE >= 1) {
142 #ifndef _LP64
143 // Load ST0
144 __ fld_d(Address(rsp, 0));
145 // Store as float and empty fpu stack
146 __ fstp_s(Address(rsp, 0));
147 #endif // !_LP64
148 // and reload
149 __ movflt(xmm0, Address(rsp, 0));
150 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
151 __ movdbl(xmm0, Address(rsp, 0));
152 } else {
153 // restore ST0
154 __ fld_d(Address(rsp, 0));
155 }
156 // and pop the temp
157 __ addptr(rsp, 2 * wordSize);
158 __ push(t); // restore return address
159 }
160 break;
161 case T_OBJECT :
162 // retrieve result from frame
163 __ movptr(rax, STATE(_oop_temp));
164 // and verify it
165 __ verify_oop(rax);
166 break;
167 default : ShouldNotReachHere();
168 }
169 __ ret(0); // return from result handler
170 return entry;
171 }
173 // tosca based result to c++ interpreter stack based result.
174 // Result goes to top of native stack.
176 #undef EXTEND // SHOULD NOT BE NEEDED
177 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
178 // A result is in the tosca (abi result) from either a native method call or compiled
179 // code. Place this result on the java expression stack so C++ interpreter can use it.
180 address entry = __ pc();
182 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
183 __ pop(t); // remove return address first
184 switch (type) {
185 case T_VOID:
186 break;
187 case T_BOOLEAN:
188 #ifdef EXTEND
189 __ c2bool(rax);
190 #endif
191 __ push(rax);
192 break;
193 case T_CHAR :
194 #ifdef EXTEND
195 __ andl(rax, 0xFFFF);
196 #endif
197 __ push(rax);
198 break;
199 case T_BYTE :
200 #ifdef EXTEND
201 __ sign_extend_byte (rax);
202 #endif
203 __ push(rax);
204 break;
205 case T_SHORT :
206 #ifdef EXTEND
207 __ sign_extend_short(rax);
208 #endif
209 __ push(rax);
210 break;
211 case T_LONG :
212 __ push(rdx); // pushes useless junk on 64bit
213 __ push(rax);
214 break;
215 case T_INT :
216 __ push(rax);
217 break;
218 case T_FLOAT :
219 // Result is in ST(0)/xmm0
220 __ subptr(rsp, wordSize);
221 if ( UseSSE < 1) {
222 __ fstp_s(Address(rsp, 0));
223 } else {
224 __ movflt(Address(rsp, 0), xmm0);
225 }
226 break;
227 case T_DOUBLE :
228 __ subptr(rsp, 2*wordSize);
229 if ( UseSSE < 2 ) {
230 __ fstp_d(Address(rsp, 0));
231 } else {
232 __ movdbl(Address(rsp, 0), xmm0);
233 }
234 break;
235 case T_OBJECT :
236 __ verify_oop(rax); // verify it
237 __ push(rax);
238 break;
239 default : ShouldNotReachHere();
240 }
241 __ jmp(t); // return from result handler
242 return entry;
243 }
245 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
246 // A result is in the java expression stack of the interpreted method that has just
247 // returned. Place this result on the java expression stack of the caller.
248 //
249 // The current interpreter activation in rsi/r13 is for the method just returning its
250 // result. So we know that the result of this method is on the top of the current
251 // execution stack (which is pre-pushed) and will be return to the top of the caller
252 // stack. The top of the callers stack is the bottom of the locals of the current
253 // activation.
254 // Because of the way activation are managed by the frame manager the value of rsp is
255 // below both the stack top of the current activation and naturally the stack top
256 // of the calling activation. This enable this routine to leave the return address
257 // to the frame manager on the stack and do a vanilla return.
258 //
259 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
260 // On Return: rsi/r13 - unchanged
261 // rax - new stack top for caller activation (i.e. activation in _prev_link)
262 //
263 // Can destroy rdx, rcx.
264 //
266 address entry = __ pc();
267 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
268 switch (type) {
269 case T_VOID:
270 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
271 __ addptr(rax, wordSize); // account for prepush before we return
272 break;
273 case T_FLOAT :
274 case T_BOOLEAN:
275 case T_CHAR :
276 case T_BYTE :
277 case T_SHORT :
278 case T_INT :
279 // 1 word result
280 __ movptr(rdx, STATE(_stack));
281 __ movptr(rax, STATE(_locals)); // address for result
282 __ movl(rdx, Address(rdx, wordSize)); // get result
283 __ movptr(Address(rax, 0), rdx); // and store it
284 break;
285 case T_LONG :
286 case T_DOUBLE :
287 // return top two words on current expression stack to caller's expression stack
288 // The caller's expression stack is adjacent to the current frame manager's intepretState
289 // except we allocated one extra word for this intepretState so we won't overwrite it
290 // when we return a two word result.
292 __ movptr(rax, STATE(_locals)); // address for result
293 __ movptr(rcx, STATE(_stack));
294 __ subptr(rax, wordSize); // need addition word besides locals[0]
295 __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit)
296 __ movptr(Address(rax, wordSize), rdx); // and store it
297 __ movptr(rdx, Address(rcx, wordSize)); // get result word
298 __ movptr(Address(rax, 0), rdx); // and store it
299 break;
300 case T_OBJECT :
301 __ movptr(rdx, STATE(_stack));
302 __ movptr(rax, STATE(_locals)); // address for result
303 __ movptr(rdx, Address(rdx, wordSize)); // get result
304 __ verify_oop(rdx); // verify it
305 __ movptr(Address(rax, 0), rdx); // and store it
306 break;
307 default : ShouldNotReachHere();
308 }
309 __ ret(0);
310 return entry;
311 }
313 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
314 // A result is in the java expression stack of the interpreted method that has just
315 // returned. Place this result in the native abi that the caller expects.
316 //
317 // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
318 // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
319 // and so rather than return result onto caller's java expression stack we return the
320 // result in the expected location based on the native abi.
321 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
322 // On Return: rsi/r13 - unchanged
323 // Other registers changed [rax/rdx/ST(0) as needed for the result returned]
325 address entry = __ pc();
326 switch (type) {
327 case T_VOID:
328 break;
329 case T_BOOLEAN:
330 case T_CHAR :
331 case T_BYTE :
332 case T_SHORT :
333 case T_INT :
334 __ movptr(rdx, STATE(_stack)); // get top of stack
335 __ movl(rax, Address(rdx, wordSize)); // get result word 1
336 break;
337 case T_LONG :
338 __ movptr(rdx, STATE(_stack)); // get top of stack
339 __ movptr(rax, Address(rdx, wordSize)); // get result low word
340 NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word
341 break;
342 case T_FLOAT :
343 __ movptr(rdx, STATE(_stack)); // get top of stack
344 if ( UseSSE >= 1) {
345 __ movflt(xmm0, Address(rdx, wordSize));
346 } else {
347 __ fld_s(Address(rdx, wordSize)); // pushd float result
348 }
349 break;
350 case T_DOUBLE :
351 __ movptr(rdx, STATE(_stack)); // get top of stack
352 if ( UseSSE > 1) {
353 __ movdbl(xmm0, Address(rdx, wordSize));
354 } else {
355 __ fld_d(Address(rdx, wordSize)); // push double result
356 }
357 break;
358 case T_OBJECT :
359 __ movptr(rdx, STATE(_stack)); // get top of stack
360 __ movptr(rax, Address(rdx, wordSize)); // get result word 1
361 __ verify_oop(rax); // verify it
362 break;
363 default : ShouldNotReachHere();
364 }
365 __ ret(0);
366 return entry;
367 }
369 address CppInterpreter::return_entry(TosState state, int length) {
370 // make it look good in the debugger
371 return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
372 }
374 address CppInterpreter::deopt_entry(TosState state, int length) {
375 address ret = NULL;
376 if (length != 0) {
377 switch (state) {
378 case atos: ret = deopt_frame_manager_return_atos; break;
379 case btos: ret = deopt_frame_manager_return_btos; break;
380 case ctos:
381 case stos:
382 case itos: ret = deopt_frame_manager_return_itos; break;
383 case ltos: ret = deopt_frame_manager_return_ltos; break;
384 case ftos: ret = deopt_frame_manager_return_ftos; break;
385 case dtos: ret = deopt_frame_manager_return_dtos; break;
386 case vtos: ret = deopt_frame_manager_return_vtos; break;
387 }
388 } else {
389 ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap)
390 }
391 assert(ret != NULL, "Not initialized");
392 return ret;
393 }
395 // C++ Interpreter
396 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
397 const Register locals,
398 const Register sender_sp,
399 bool native) {
401 // On entry the "locals" argument points to locals[0] (or where it would be in case no locals in
402 // a static method). "state" contains any previous frame manager state which we must save a link
403 // to in the newly generated state object. On return "state" is a pointer to the newly allocated
404 // state object. We must allocate and initialize a new interpretState object and the method
405 // expression stack. Because the returned result (if any) of the method will be placed on the caller's
406 // expression stack and this will overlap with locals[0] (and locals[1] if double/long) we must
407 // be sure to leave space on the caller's stack so that this result will not overwrite values when
408 // locals[0] and locals[1] do not exist (and in fact are return address and saved rbp). So when
409 // we are non-native we in essence ensure that locals[0-1] exist. We play an extra trick in
410 // non-product builds and initialize this last local with the previous interpreterState as
411 // this makes things look real nice in the debugger.
413 // State on entry
414 // Assumes locals == &locals[0]
415 // Assumes state == any previous frame manager state (assuming call path from c++ interpreter)
416 // Assumes rax = return address
417 // rcx == senders_sp
418 // rbx == method
419 // Modifies rcx, rdx, rax
420 // Returns:
421 // state == address of new interpreterState
422 // rsp == bottom of method's expression stack.
424 const Address const_offset (rbx, Method::const_offset());
427 // On entry sp is the sender's sp. This includes the space for the arguments
428 // that the sender pushed. If the sender pushed no args (a static) and the
429 // caller returns a long then we need two words on the sender's stack which
430 // are not present (although when we return a restore full size stack the
431 // space will be present). If we didn't allocate two words here then when
432 // we "push" the result of the caller's stack we would overwrite the return
433 // address and the saved rbp. Not good. So simply allocate 2 words now
434 // just to be safe. This is the "static long no_params() method" issue.
435 // See Lo.java for a testcase.
436 // We don't need this for native calls because they return result in
437 // register and the stack is expanded in the caller before we store
438 // the results on the stack.
440 if (!native) {
441 #ifdef PRODUCT
442 __ subptr(rsp, 2*wordSize);
443 #else /* PRODUCT */
444 __ push((int32_t)NULL_WORD);
445 __ push(state); // make it look like a real argument
446 #endif /* PRODUCT */
447 }
449 // Now that we are assure of space for stack result, setup typical linkage
451 __ push(rax);
452 __ enter();
454 __ mov(rax, state); // save current state
456 __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
457 __ mov(state, rsp);
459 // rsi/r13 == state/locals rax == prevstate
461 // initialize the "shadow" frame so that use since C++ interpreter not directly
462 // recursive. Simpler to recurse but we can't trim expression stack as we call
463 // new methods.
464 __ movptr(STATE(_locals), locals); // state->_locals = locals()
465 __ movptr(STATE(_self_link), state); // point to self
466 __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
467 __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
468 #ifdef _LP64
469 __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes()
470 #else
471 __ get_thread(rax); // get vm's javathread*
472 __ movptr(STATE(_thread), rax); // state->_bcp = codes()
473 #endif // _LP64
474 __ movptr(rdx, Address(rbx, Method::const_offset())); // get constantMethodOop
475 __ lea(rdx, Address(rdx, ConstMethod::codes_offset())); // get code base
476 if (native) {
477 __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
478 } else {
479 __ movptr(STATE(_bcp), rdx); // state->_bcp = codes()
480 }
481 __ xorptr(rdx, rdx);
482 __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
483 __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
484 __ movptr(rdx, Address(rbx, Method::const_offset()));
485 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
486 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
487 __ movptr(STATE(_constants), rdx); // state->_constants = constants()
489 __ movptr(STATE(_method), rbx); // state->_method = method()
490 __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
491 __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
494 __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
495 // entries run from -1..x where &monitor[x] ==
497 {
498 // Must not attempt to lock method until we enter interpreter as gc won't be able to find the
499 // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack
500 // immediately.
502 // synchronize method
503 const Address access_flags (rbx, Method::access_flags_offset());
504 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
505 Label not_synced;
507 __ movl(rax, access_flags);
508 __ testl(rax, JVM_ACC_SYNCHRONIZED);
509 __ jcc(Assembler::zero, not_synced);
511 // Allocate initial monitor and pre initialize it
512 // get synchronization object
514 Label done;
515 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
516 __ movl(rax, access_flags);
517 __ testl(rax, JVM_ACC_STATIC);
518 __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
519 __ jcc(Assembler::zero, done);
520 __ movptr(rax, Address(rbx, Method::const_offset()));
521 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
522 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
523 __ movptr(rax, Address(rax, mirror_offset));
524 __ bind(done);
525 // add space for monitor & lock
526 __ subptr(rsp, entry_size); // add space for a monitor entry
527 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
528 __ bind(not_synced);
529 }
531 __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
532 if (native) {
533 __ movptr(STATE(_stack), rsp); // set current expression stack tos
534 __ movptr(STATE(_stack_limit), rsp);
535 } else {
536 __ subptr(rsp, wordSize); // pre-push stack
537 __ movptr(STATE(_stack), rsp); // set current expression stack tos
539 // compute full expression stack limit
541 const int extra_stack = 0; //6815692//Method::extra_stack_words();
542 __ movptr(rdx, Address(rbx, Method::const_offset()));
543 __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
544 __ negptr(rdx); // so we can subtract in next step
545 // Allocate expression stack
546 __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack));
547 __ movptr(STATE(_stack_limit), rsp);
548 }
550 #ifdef _LP64
551 // Make sure stack is properly aligned and sized for the abi
552 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
553 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
554 #endif // _LP64
558 }
560 // Helpers for commoning out cases in the various type of method entries.
561 //
563 // increment invocation count & check for overflow
564 //
565 // Note: checking for negative value instead of overflow
566 // so we have a 'sticky' overflow test
567 //
568 // rbx,: method
569 // rcx: invocation counter
570 //
571 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
573 const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
574 const Address backedge_counter (rbx, Method::backedge_counter_offset() + InvocationCounter::counter_offset());
576 if (ProfileInterpreter) { // %%% Merge this into MethodData*
577 __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset()));
578 }
579 // Update standard invocation counters
580 __ movl(rax, backedge_counter); // load backedge counter
582 __ increment(rcx, InvocationCounter::count_increment);
583 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
585 __ movl(invocation_counter, rcx); // save invocation count
586 __ addl(rcx, rax); // add both counters
588 // profile_method is non-null only for interpreted method so
589 // profile_method != NULL == !native_call
590 // BytecodeInterpreter only calls for native so code is elided.
592 __ cmp32(rcx,
593 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
594 __ jcc(Assembler::aboveEqual, *overflow);
596 }
598 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
600 // C++ interpreter on entry
601 // rsi/r13 - new interpreter state pointer
602 // rbp - interpreter frame pointer
603 // rbx - method
605 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
606 // rbx, - method
607 // rcx - rcvr (assuming there is one)
608 // top of stack return address of interpreter caller
609 // rsp - sender_sp
611 // C++ interpreter only
612 // rsi/r13 - previous interpreter state pointer
614 // InterpreterRuntime::frequency_counter_overflow takes one argument
615 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
616 // The call returns the address of the verified entry point for the method or NULL
617 // if the compilation did not complete (either went background or bailed out).
618 __ movptr(rax, (int32_t)false);
619 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
621 // for c++ interpreter can rsi really be munged?
622 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // restore state
623 __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method
624 __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer
626 __ jmp(*do_continue, relocInfo::none);
628 }
630 void InterpreterGenerator::generate_stack_overflow_check(void) {
631 // see if we've got enough room on the stack for locals plus overhead.
632 // the expression stack grows down incrementally, so the normal guard
633 // page mechanism will work for that.
634 //
635 // Registers live on entry:
636 //
637 // Asm interpreter
638 // rdx: number of additional locals this frame needs (what we must check)
639 // rbx,: Method*
641 // C++ Interpreter
642 // rsi/r13: previous interpreter frame state object
643 // rdi: &locals[0]
644 // rcx: # of locals
645 // rdx: number of additional locals this frame needs (what we must check)
646 // rbx: Method*
648 // destroyed on exit
649 // rax,
651 // NOTE: since the additional locals are also always pushed (wasn't obvious in
652 // generate_method_entry) so the guard should work for them too.
653 //
655 // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
656 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
658 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
659 // be sure to change this if you add/subtract anything to/from the overhead area
660 const int overhead_size = (int)sizeof(BytecodeInterpreter);
662 const int page_size = os::vm_page_size();
664 Label after_frame_check;
666 // compute rsp as if this were going to be the last frame on
667 // the stack before the red zone
669 Label after_frame_check_pop;
671 // save rsi == caller's bytecode ptr (c++ previous interp. state)
672 // QQQ problem here?? rsi overload????
673 __ push(state);
675 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi);
677 NOT_LP64(__ get_thread(thread));
679 const Address stack_base(thread, Thread::stack_base_offset());
680 const Address stack_size(thread, Thread::stack_size_offset());
682 // locals + overhead, in bytes
683 // Always give one monitor to allow us to start interp if sync method.
684 // Any additional monitors need a check when moving the expression stack
685 const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
686 const int extra_stack = 0; //6815692//Method::extra_stack_entries();
687 __ movptr(rax, Address(rbx, Method::const_offset()));
688 __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
689 __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor));
690 __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
692 #ifdef ASSERT
693 Label stack_base_okay, stack_size_okay;
694 // verify that thread stack base is non-zero
695 __ cmpptr(stack_base, (int32_t)0);
696 __ jcc(Assembler::notEqual, stack_base_okay);
697 __ stop("stack base is zero");
698 __ bind(stack_base_okay);
699 // verify that thread stack size is non-zero
700 __ cmpptr(stack_size, (int32_t)0);
701 __ jcc(Assembler::notEqual, stack_size_okay);
702 __ stop("stack size is zero");
703 __ bind(stack_size_okay);
704 #endif
706 // Add stack base to locals and subtract stack size
707 __ addptr(rax, stack_base);
708 __ subptr(rax, stack_size);
710 // We should have a magic number here for the size of the c++ interpreter frame.
711 // We can't actually tell this ahead of time. The debug version size is around 3k
712 // product is 1k and fastdebug is 4k
713 const int slop = 6 * K;
715 // Use the maximum number of pages we might bang.
716 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
717 (StackRedPages+StackYellowPages);
718 // Only need this if we are stack banging which is temporary while
719 // we're debugging.
720 __ addptr(rax, slop + 2*max_pages * page_size);
722 // check against the current stack bottom
723 __ cmpptr(rsp, rax);
724 __ jcc(Assembler::above, after_frame_check_pop);
726 __ pop(state); // get c++ prev state.
728 // throw exception return address becomes throwing pc
729 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
731 // all done with frame size check
732 __ bind(after_frame_check_pop);
733 __ pop(state);
735 __ bind(after_frame_check);
736 }
738 // Find preallocated monitor and lock method (C++ interpreter)
739 // rbx - Method*
740 //
741 void InterpreterGenerator::lock_method(void) {
742 // assumes state == rsi/r13 == pointer to current interpreterState
743 // minimally destroys rax, rdx|c_rarg1, rdi
744 //
745 // synchronize method
746 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
747 const Address access_flags (rbx, Method::access_flags_offset());
749 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
751 // find initial monitor i.e. monitors[-1]
752 __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit
753 __ subptr(monitor, entry_size); // point to initial monitor
755 #ifdef ASSERT
756 { Label L;
757 __ movl(rax, access_flags);
758 __ testl(rax, JVM_ACC_SYNCHRONIZED);
759 __ jcc(Assembler::notZero, L);
760 __ stop("method doesn't need synchronization");
761 __ bind(L);
762 }
763 #endif // ASSERT
764 // get synchronization object
765 { Label done;
766 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
767 __ movl(rax, access_flags);
768 __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
769 __ testl(rax, JVM_ACC_STATIC);
770 __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
771 __ jcc(Assembler::zero, done);
772 __ movptr(rax, Address(rbx, Method::const_offset()));
773 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
774 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
775 __ movptr(rax, Address(rax, mirror_offset));
776 __ bind(done);
777 }
778 #ifdef ASSERT
779 { Label L;
780 __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object?
781 __ jcc(Assembler::equal, L);
782 __ stop("wrong synchronization lobject");
783 __ bind(L);
784 }
785 #endif // ASSERT
786 // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
787 __ lock_object(monitor);
788 }
790 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
792 address InterpreterGenerator::generate_accessor_entry(void) {
794 // rbx: Method*
796 // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
798 Label xreturn_path;
800 // do fastpath for resolved accessor methods
801 if (UseFastAccessorMethods) {
803 address entry_point = __ pc();
805 Label slow_path;
806 // If we need a safepoint check, generate full interpreter entry.
807 ExternalAddress state(SafepointSynchronize::address_of_state());
808 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
809 SafepointSynchronize::_not_synchronized);
811 __ jcc(Assembler::notEqual, slow_path);
812 // ASM/C++ Interpreter
813 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
814 // Note: We can only use this code if the getfield has been resolved
815 // and if we don't have a null-pointer exception => check for
816 // these conditions first and use slow path if necessary.
817 // rbx,: method
818 // rcx: receiver
819 __ movptr(rax, Address(rsp, wordSize));
821 // check if local 0 != NULL and read field
822 __ testptr(rax, rax);
823 __ jcc(Assembler::zero, slow_path);
825 // read first instruction word and extract bytecode @ 1 and index @ 2
826 __ movptr(rdx, Address(rbx, Method::const_offset()));
827 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
828 __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
829 // Shift codes right to get the index on the right.
830 // The bytecode fetched looks like <index><0xb4><0x2a>
831 __ shrl(rdx, 2*BitsPerByte);
832 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
833 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
835 // rax,: local 0
836 // rbx,: method
837 // rcx: receiver - do not destroy since it is needed for slow path!
838 // rcx: scratch
839 // rdx: constant pool cache index
840 // rdi: constant pool cache
841 // rsi/r13: sender sp
843 // check if getfield has been resolved and read constant pool cache entry
844 // check the validity of the cache entry by testing whether _indices field
845 // contains Bytecode::_getfield in b1 byte.
846 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
847 __ movl(rcx,
848 Address(rdi,
849 rdx,
850 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
851 __ shrl(rcx, 2*BitsPerByte);
852 __ andl(rcx, 0xFF);
853 __ cmpl(rcx, Bytecodes::_getfield);
854 __ jcc(Assembler::notEqual, slow_path);
856 // Note: constant pool entry is not valid before bytecode is resolved
857 __ movptr(rcx,
858 Address(rdi,
859 rdx,
860 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
861 __ movl(rdx,
862 Address(rdi,
863 rdx,
864 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
866 Label notByte, notShort, notChar;
867 const Address field_address (rax, rcx, Address::times_1);
869 // Need to differentiate between igetfield, agetfield, bgetfield etc.
870 // because they are different sizes.
871 // Use the type from the constant pool cache
872 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
873 // Make sure we don't need to mask rdx after the above shift
874 ConstantPoolCacheEntry::verify_tos_state_shift();
875 #ifdef _LP64
876 Label notObj;
877 __ cmpl(rdx, atos);
878 __ jcc(Assembler::notEqual, notObj);
879 // atos
880 __ movptr(rax, field_address);
881 __ jmp(xreturn_path);
883 __ bind(notObj);
884 #endif // _LP64
885 __ cmpl(rdx, btos);
886 __ jcc(Assembler::notEqual, notByte);
887 __ load_signed_byte(rax, field_address);
888 __ jmp(xreturn_path);
890 __ bind(notByte);
891 __ cmpl(rdx, stos);
892 __ jcc(Assembler::notEqual, notShort);
893 __ load_signed_short(rax, field_address);
894 __ jmp(xreturn_path);
896 __ bind(notShort);
897 __ cmpl(rdx, ctos);
898 __ jcc(Assembler::notEqual, notChar);
899 __ load_unsigned_short(rax, field_address);
900 __ jmp(xreturn_path);
902 __ bind(notChar);
903 #ifdef ASSERT
904 Label okay;
905 #ifndef _LP64
906 __ cmpl(rdx, atos);
907 __ jcc(Assembler::equal, okay);
908 #endif // _LP64
909 __ cmpl(rdx, itos);
910 __ jcc(Assembler::equal, okay);
911 __ stop("what type is this?");
912 __ bind(okay);
913 #endif // ASSERT
914 // All the rest are a 32 bit wordsize
915 __ movl(rax, field_address);
917 __ bind(xreturn_path);
919 // _ireturn/_areturn
920 __ pop(rdi); // get return address
921 __ mov(rsp, sender_sp_on_entry); // set sp to sender sp
922 __ jmp(rdi);
924 // generate a vanilla interpreter entry as the slow path
925 __ bind(slow_path);
926 // We will enter c++ interpreter looking like it was
927 // called by the call_stub this will cause it to return
928 // a tosca result to the invoker which might have been
929 // the c++ interpreter itself.
931 __ jmp(fast_accessor_slow_entry_path);
932 return entry_point;
934 } else {
935 return NULL;
936 }
938 }
940 address InterpreterGenerator::generate_Reference_get_entry(void) {
941 #ifndef SERIALGC
942 if (UseG1GC) {
943 // We need to generate have a routine that generates code to:
944 // * load the value in the referent field
945 // * passes that value to the pre-barrier.
946 //
947 // In the case of G1 this will record the value of the
948 // referent in an SATB buffer if marking is active.
949 // This will cause concurrent marking to mark the referent
950 // field as live.
951 Unimplemented();
952 }
953 #endif // SERIALGC
955 // If G1 is not enabled then attempt to go through the accessor entry point
956 // Reference.get is an accessor
957 return generate_accessor_entry();
958 }
960 //
961 // C++ Interpreter stub for calling a native method.
962 // This sets up a somewhat different looking stack for calling the native method
963 // than the typical interpreter frame setup but still has the pointer to
964 // an interpreter state.
965 //
967 address InterpreterGenerator::generate_native_entry(bool synchronized) {
968 // determine code generation flags
969 bool inc_counter = UseCompiler || CountCompiledCalls;
971 // rbx: Method*
972 // rcx: receiver (unused)
973 // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve
974 // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless
975 // to save/restore.
976 address entry_point = __ pc();
978 const Address constMethod (rbx, Method::const_offset());
979 const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
980 const Address access_flags (rbx, Method::access_flags_offset());
981 const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
983 // rsi/r13 == state/locals rdi == prevstate
984 const Register locals = rdi;
986 // get parameter size (always needed)
987 __ movptr(rcx, constMethod);
988 __ load_unsigned_short(rcx, size_of_parameters);
990 // rbx: Method*
991 // rcx: size of parameters
992 __ pop(rax); // get return address
993 // for natives the size of locals is zero
995 // compute beginning of parameters /locals
997 __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
999 // initialize fixed part of activation frame
1001 // Assumes rax = return address
1003 // allocate and initialize new interpreterState and method expression stack
1004 // IN(locals) -> locals
1005 // IN(state) -> previous frame manager state (NULL from stub/c1/c2)
1006 // destroys rax, rcx, rdx
1007 // OUT (state) -> new interpreterState
1008 // OUT(rsp) -> bottom of methods expression stack
1010 // save sender_sp
1011 __ mov(rcx, sender_sp_on_entry);
1012 // start with NULL previous state
1013 __ movptr(state, (int32_t)NULL_WORD);
1014 generate_compute_interpreter_state(state, locals, rcx, true);
1016 #ifdef ASSERT
1017 { Label L;
1018 __ movptr(rax, STATE(_stack_base));
1019 #ifdef _LP64
1020 // duplicate the alignment rsp got after setting stack_base
1021 __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
1022 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
1023 #endif // _LP64
1024 __ cmpptr(rax, rsp);
1025 __ jcc(Assembler::equal, L);
1026 __ stop("broken stack frame setup in interpreter");
1027 __ bind(L);
1028 }
1029 #endif
1031 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
1033 const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax);
1034 NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread
1035 // Since at this point in the method invocation the exception handler
1036 // would try to exit the monitor of synchronized methods which hasn't
1037 // been entered yet, we set the thread local variable
1038 // _do_not_unlock_if_synchronized to true. The remove_activation will
1039 // check this flag.
1041 const Address do_not_unlock_if_synchronized(unlock_thread,
1042 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1043 __ movbool(do_not_unlock_if_synchronized, true);
1045 // make sure method is native & not abstract
1046 #ifdef ASSERT
1047 __ movl(rax, access_flags);
1048 {
1049 Label L;
1050 __ testl(rax, JVM_ACC_NATIVE);
1051 __ jcc(Assembler::notZero, L);
1052 __ stop("tried to execute non-native method as native");
1053 __ bind(L);
1054 }
1055 { Label L;
1056 __ testl(rax, JVM_ACC_ABSTRACT);
1057 __ jcc(Assembler::zero, L);
1058 __ stop("tried to execute abstract method in interpreter");
1059 __ bind(L);
1060 }
1061 #endif
1064 // increment invocation count & check for overflow
1065 Label invocation_counter_overflow;
1066 if (inc_counter) {
1067 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1068 }
1070 Label continue_after_compile;
1072 __ bind(continue_after_compile);
1074 bang_stack_shadow_pages(true);
1076 // reset the _do_not_unlock_if_synchronized flag
1077 NOT_LP64(__ movl(rax, STATE(_thread));) // get thread
1078 __ movbool(do_not_unlock_if_synchronized, false);
1081 // check for synchronized native methods
1082 //
1083 // Note: This must happen *after* invocation counter check, since
1084 // when overflow happens, the method should not be locked.
1085 if (synchronized) {
1086 // potentially kills rax, rcx, rdx, rdi
1087 lock_method();
1088 } else {
1089 // no synchronization necessary
1090 #ifdef ASSERT
1091 { Label L;
1092 __ movl(rax, access_flags);
1093 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1094 __ jcc(Assembler::zero, L);
1095 __ stop("method needs synchronization");
1096 __ bind(L);
1097 }
1098 #endif
1099 }
1101 // start execution
1103 // jvmti support
1104 __ notify_method_entry();
1106 // work registers
1107 const Register method = rbx;
1108 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
1109 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
1110 const Address constMethod (method, Method::const_offset());
1111 const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
1113 // allocate space for parameters
1114 __ movptr(method, STATE(_method));
1115 __ verify_method_ptr(method);
1116 __ movptr(t, constMethod);
1117 __ load_unsigned_short(t, size_of_parameters);
1118 __ shll(t, 2);
1119 #ifdef _LP64
1120 __ subptr(rsp, t);
1121 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1122 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
1123 #else
1124 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
1125 __ subptr(rsp, t);
1126 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
1127 #endif // _LP64
1129 // get signature handler
1130 Label pending_exception_present;
1132 { Label L;
1133 __ movptr(t, Address(method, Method::signature_handler_offset()));
1134 __ testptr(t, t);
1135 __ jcc(Assembler::notZero, L);
1136 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
1137 __ movptr(method, STATE(_method));
1138 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1139 __ jcc(Assembler::notEqual, pending_exception_present);
1140 __ verify_method_ptr(method);
1141 __ movptr(t, Address(method, Method::signature_handler_offset()));
1142 __ bind(L);
1143 }
1144 #ifdef ASSERT
1145 {
1146 Label L;
1147 __ push(t);
1148 __ get_thread(t); // get vm's javathread*
1149 __ cmpptr(t, STATE(_thread));
1150 __ jcc(Assembler::equal, L);
1151 __ int3();
1152 __ bind(L);
1153 __ pop(t);
1154 }
1155 #endif //
1157 const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from();
1158 // call signature handler
1159 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
1161 // The generated handlers do not touch RBX (the method oop).
1162 // However, large signatures cannot be cached and are generated
1163 // each time here. The slow-path generator will blow RBX
1164 // sometime, so we must reload it after the call.
1165 __ movptr(from_ptr, STATE(_locals)); // get the from pointer
1166 __ call(t);
1167 __ movptr(method, STATE(_method));
1168 __ verify_method_ptr(method);
1170 // result handler is in rax
1171 // set result handler
1172 __ movptr(STATE(_result_handler), rax);
1175 // get native function entry point
1176 { Label L;
1177 __ movptr(rax, Address(method, Method::native_function_offset()));
1178 __ testptr(rax, rax);
1179 __ jcc(Assembler::notZero, L);
1180 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1181 __ movptr(method, STATE(_method));
1182 __ verify_method_ptr(method);
1183 __ movptr(rax, Address(method, Method::native_function_offset()));
1184 __ bind(L);
1185 }
1187 // pass mirror handle if static call
1188 { Label L;
1189 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1190 __ movl(t, Address(method, Method::access_flags_offset()));
1191 __ testl(t, JVM_ACC_STATIC);
1192 __ jcc(Assembler::zero, L);
1193 // get mirror
1194 __ movptr(t, Address(method, Method:: const_offset()));
1195 __ movptr(t, Address(t, ConstMethod::constants_offset()));
1196 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
1197 __ movptr(t, Address(t, mirror_offset));
1198 // copy mirror into activation object
1199 __ movptr(STATE(_oop_temp), t);
1200 // pass handle to mirror
1201 #ifdef _LP64
1202 __ lea(c_rarg1, STATE(_oop_temp));
1203 #else
1204 __ lea(t, STATE(_oop_temp));
1205 __ movptr(Address(rsp, wordSize), t);
1206 #endif // _LP64
1207 __ bind(L);
1208 }
1209 #ifdef ASSERT
1210 {
1211 Label L;
1212 __ push(t);
1213 __ get_thread(t); // get vm's javathread*
1214 __ cmpptr(t, STATE(_thread));
1215 __ jcc(Assembler::equal, L);
1216 __ int3();
1217 __ bind(L);
1218 __ pop(t);
1219 }
1220 #endif //
1222 // pass JNIEnv
1223 #ifdef _LP64
1224 __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset()));
1225 #else
1226 __ movptr(thread, STATE(_thread)); // get thread
1227 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1229 __ movptr(Address(rsp, 0), t);
1230 #endif // _LP64
1232 #ifdef ASSERT
1233 {
1234 Label L;
1235 __ push(t);
1236 __ get_thread(t); // get vm's javathread*
1237 __ cmpptr(t, STATE(_thread));
1238 __ jcc(Assembler::equal, L);
1239 __ int3();
1240 __ bind(L);
1241 __ pop(t);
1242 }
1243 #endif //
1245 #ifdef ASSERT
1246 { Label L;
1247 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1248 __ cmpl(t, _thread_in_Java);
1249 __ jcc(Assembler::equal, L);
1250 __ stop("Wrong thread state in native stub");
1251 __ bind(L);
1252 }
1253 #endif
1255 // Change state to native (we save the return address in the thread, since it might not
1256 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1257 // points into the right code segment. It does not have to be the correct return pc.
1259 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1261 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1263 __ call(rax);
1265 // result potentially in rdx:rax or ST0
1266 __ movptr(method, STATE(_method));
1267 NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread
1269 // The potential result is in ST(0) & rdx:rax
1270 // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then
1271 // we do the appropriate stuff for returning the result. rdx:rax must always be saved because just about
1272 // anything we do here will destroy it, st(0) is only saved if we re-enter the vm where it would
1273 // be destroyed.
1274 // It is safe to do these pushes because state is _thread_in_native and return address will be found
1275 // via _last_native_pc and not via _last_jave_sp
1277 // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler
1278 { Label Lpush, Lskip;
1279 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1280 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1281 __ cmpptr(STATE(_result_handler), float_handler.addr());
1282 __ jcc(Assembler::equal, Lpush);
1283 __ cmpptr(STATE(_result_handler), double_handler.addr());
1284 __ jcc(Assembler::notEqual, Lskip);
1285 __ bind(Lpush);
1286 __ subptr(rsp, 2*wordSize);
1287 if ( UseSSE < 2 ) {
1288 __ fstp_d(Address(rsp, 0));
1289 } else {
1290 __ movdbl(Address(rsp, 0), xmm0);
1291 }
1292 __ bind(Lskip);
1293 }
1295 // save rax:rdx for potential use by result handler.
1296 __ push(rax);
1297 #ifndef _LP64
1298 __ push(rdx);
1299 #endif // _LP64
1301 // Either restore the MXCSR register after returning from the JNI Call
1302 // or verify that it wasn't changed.
1303 if (VM_Version::supports_sse()) {
1304 if (RestoreMXCSROnJNICalls) {
1305 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
1306 }
1307 else if (CheckJNICalls ) {
1308 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
1309 }
1310 }
1312 #ifndef _LP64
1313 // Either restore the x87 floating pointer control word after returning
1314 // from the JNI call or verify that it wasn't changed.
1315 if (CheckJNICalls) {
1316 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
1317 }
1318 #endif // _LP64
1321 // change thread state
1322 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1323 if(os::is_MP()) {
1324 // Write serialization page so VM thread can do a pseudo remote membar.
1325 // We use the current thread pointer to calculate a thread specific
1326 // offset to write to within the page. This minimizes bus traffic
1327 // due to cache line collision.
1328 __ serialize_memory(thread, rcx);
1329 }
1331 // check for safepoint operation in progress and/or pending suspend requests
1332 { Label Continue;
1334 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1335 SafepointSynchronize::_not_synchronized);
1337 // threads running native code and they are expected to self-suspend
1338 // when leaving the _thread_in_native state. We need to check for
1339 // pending suspend requests here.
1340 Label L;
1341 __ jcc(Assembler::notEqual, L);
1342 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1343 __ jcc(Assembler::equal, Continue);
1344 __ bind(L);
1346 // Don't use call_VM as it will see a possible pending exception and forward it
1347 // and never return here preventing us from clearing _last_native_pc down below.
1348 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1349 // preserved and correspond to the bcp/locals pointers.
1350 //
1352 ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1353 thread);
1354 __ increment(rsp, wordSize);
1356 __ movptr(method, STATE(_method));
1357 __ verify_method_ptr(method);
1358 __ movptr(thread, STATE(_thread)); // get thread
1360 __ bind(Continue);
1361 }
1363 // change thread state
1364 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1366 __ reset_last_Java_frame(thread, true, true);
1368 // reset handle block
1369 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1370 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1372 // If result was an oop then unbox and save it in the frame
1373 { Label L;
1374 Label no_oop, store_result;
1375 ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT));
1376 __ cmpptr(STATE(_result_handler), oop_handler.addr());
1377 __ jcc(Assembler::notEqual, no_oop);
1378 #ifndef _LP64
1379 __ pop(rdx);
1380 #endif // _LP64
1381 __ pop(rax);
1382 __ testptr(rax, rax);
1383 __ jcc(Assembler::zero, store_result);
1384 // unbox
1385 __ movptr(rax, Address(rax, 0));
1386 __ bind(store_result);
1387 __ movptr(STATE(_oop_temp), rax);
1388 // keep stack depth as expected by pushing oop which will eventually be discarded
1389 __ push(rax);
1390 #ifndef _LP64
1391 __ push(rdx);
1392 #endif // _LP64
1393 __ bind(no_oop);
1394 }
1396 {
1397 Label no_reguard;
1398 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1399 __ jcc(Assembler::notEqual, no_reguard);
1401 __ pusha();
1402 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1403 __ popa();
1405 __ bind(no_reguard);
1406 }
1409 // QQQ Seems like for native methods we simply return and the caller will see the pending
1410 // exception and do the right thing. Certainly the interpreter will, don't know about
1411 // compiled methods.
1412 // Seems that the answer to above is no this is wrong. The old code would see the exception
1413 // and forward it before doing the unlocking and notifying jvmdi that method has exited.
1414 // This seems wrong need to investigate the spec.
1416 // handle exceptions (exception handling will handle unlocking!)
1417 { Label L;
1418 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1419 __ jcc(Assembler::zero, L);
1420 __ bind(pending_exception_present);
1422 // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply
1423 // return and let caller deal with exception. This skips the unlocking here which
1424 // seems wrong but seems to be what asm interpreter did. Can't find this in the spec.
1425 // Note: must preverve method in rbx
1426 //
1428 // remove activation
1430 __ movptr(t, STATE(_sender_sp));
1431 __ leave(); // remove frame anchor
1432 __ pop(rdi); // get return address
1433 __ movptr(state, STATE(_prev_link)); // get previous state for return
1434 __ mov(rsp, t); // set sp to sender sp
1435 __ push(rdi); // push throwing pc
1436 // The skips unlocking!! This seems to be what asm interpreter does but seems
1437 // very wrong. Not clear if this violates the spec.
1438 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1439 __ bind(L);
1440 }
1442 // do unlocking if necessary
1443 { Label L;
1444 __ movl(t, Address(method, Method::access_flags_offset()));
1445 __ testl(t, JVM_ACC_SYNCHRONIZED);
1446 __ jcc(Assembler::zero, L);
1447 // the code below should be shared with interpreter macro assembler implementation
1448 { Label unlock;
1449 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1450 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1451 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1452 __ movptr(monitor, STATE(_monitor_base));
1453 __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor
1455 __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));
1456 __ testptr(t, t);
1457 __ jcc(Assembler::notZero, unlock);
1459 // Entry already unlocked, need to throw exception
1460 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1461 __ should_not_reach_here();
1463 __ bind(unlock);
1464 __ unlock_object(monitor);
1465 // unlock can blow rbx so restore it for path that needs it below
1466 __ movptr(method, STATE(_method));
1467 }
1468 __ bind(L);
1469 }
1471 // jvmti support
1472 // Note: This must happen _after_ handling/throwing any exceptions since
1473 // the exception handler code notifies the runtime of method exits
1474 // too. If this happens before, method entry/exit notifications are
1475 // not properly paired (was bug - gri 11/22/99).
1476 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1478 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1479 #ifndef _LP64
1480 __ pop(rdx);
1481 #endif // _LP64
1482 __ pop(rax);
1483 __ movptr(t, STATE(_result_handler)); // get result handler
1484 __ call(t); // call result handler to convert to tosca form
1486 // remove activation
1488 __ movptr(t, STATE(_sender_sp));
1490 __ leave(); // remove frame anchor
1491 __ pop(rdi); // get return address
1492 __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller)
1493 __ mov(rsp, t); // set sp to sender sp
1494 __ jmp(rdi);
1496 // invocation counter overflow
1497 if (inc_counter) {
1498 // Handle overflow of counter and compile method
1499 __ bind(invocation_counter_overflow);
1500 generate_counter_overflow(&continue_after_compile);
1501 }
1503 return entry_point;
1504 }
1506 // Generate entries that will put a result type index into rcx
1507 void CppInterpreterGenerator::generate_deopt_handling() {
1509 Label return_from_deopt_common;
1511 // Generate entries that will put a result type index into rcx
1512 // deopt needs to jump to here to enter the interpreter (return a result)
1513 deopt_frame_manager_return_atos = __ pc();
1515 // rax is live here
1516 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT)); // Result stub address array index
1517 __ jmp(return_from_deopt_common);
1520 // deopt needs to jump to here to enter the interpreter (return a result)
1521 deopt_frame_manager_return_btos = __ pc();
1523 // rax is live here
1524 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN)); // Result stub address array index
1525 __ jmp(return_from_deopt_common);
1527 // deopt needs to jump to here to enter the interpreter (return a result)
1528 deopt_frame_manager_return_itos = __ pc();
1530 // rax is live here
1531 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT)); // Result stub address array index
1532 __ jmp(return_from_deopt_common);
1534 // deopt needs to jump to here to enter the interpreter (return a result)
1536 deopt_frame_manager_return_ltos = __ pc();
1537 // rax,rdx are live here
1538 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG)); // Result stub address array index
1539 __ jmp(return_from_deopt_common);
1541 // deopt needs to jump to here to enter the interpreter (return a result)
1543 deopt_frame_manager_return_ftos = __ pc();
1544 // st(0) is live here
1545 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
1546 __ jmp(return_from_deopt_common);
1548 // deopt needs to jump to here to enter the interpreter (return a result)
1549 deopt_frame_manager_return_dtos = __ pc();
1551 // st(0) is live here
1552 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
1553 __ jmp(return_from_deopt_common);
1555 // deopt needs to jump to here to enter the interpreter (return a result)
1556 deopt_frame_manager_return_vtos = __ pc();
1558 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID));
1560 // Deopt return common
1561 // an index is present in rcx that lets us move any possible result being
1562 // return to the interpreter's stack
1563 //
1564 // Because we have a full sized interpreter frame on the youngest
1565 // activation the stack is pushed too deep to share the tosca to
1566 // stack converters directly. We shrink the stack to the desired
1567 // amount and then push result and then re-extend the stack.
1568 // We could have the code in size_activation layout a short
1569 // frame for the top activation but that would look different
1570 // than say sparc (which needs a full size activation because
1571 // the windows are in the way. Really it could be short? QQQ
1572 //
1573 __ bind(return_from_deopt_common);
1575 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1577 // setup rsp so we can push the "result" as needed.
1578 __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed)
1579 __ addptr(rsp, wordSize); // undo prepush
1581 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
1582 // Address index(noreg, rcx, Address::times_ptr);
1583 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
1584 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
1585 __ call(rcx); // call result converter
1587 __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume);
1588 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
1589 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
1590 // result if any on stack already )
1591 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
1592 }
1594 // Generate the code to handle a more_monitors message from the c++ interpreter
1595 void CppInterpreterGenerator::generate_more_monitors() {
1598 Label entry, loop;
1599 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1600 // 1. compute new pointers // rsp: old expression stack top
1601 __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom
1602 __ subptr(rsp, entry_size); // move expression stack top limit
1603 __ subptr(STATE(_stack), entry_size); // update interpreter stack top
1604 __ subptr(STATE(_stack_limit), entry_size); // inform interpreter
1605 __ subptr(rdx, entry_size); // move expression stack bottom
1606 __ movptr(STATE(_stack_base), rdx); // inform interpreter
1607 __ movptr(rcx, STATE(_stack)); // set start value for copy loop
1608 __ jmp(entry);
1609 // 2. move expression stack contents
1610 __ bind(loop);
1611 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
1612 __ movptr(Address(rcx, 0), rbx); // and store it at new location
1613 __ addptr(rcx, wordSize); // advance to next word
1614 __ bind(entry);
1615 __ cmpptr(rcx, rdx); // check if bottom reached
1616 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
1617 // now zero the slot so we can find it.
1618 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
1619 __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors);
1620 }
1623 // Initial entry to C++ interpreter from the call_stub.
1624 // This entry point is called the frame manager since it handles the generation
1625 // of interpreter activation frames via requests directly from the vm (via call_stub)
1626 // and via requests from the interpreter. The requests from the call_stub happen
1627 // directly thru the entry point. Requests from the interpreter happen via returning
1628 // from the interpreter and examining the message the interpreter has returned to
1629 // the frame manager. The frame manager can take the following requests:
1631 // NO_REQUEST - error, should never happen.
1632 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
1633 // allocate a new monitor.
1634 // CALL_METHOD - setup a new activation to call a new method. Very similar to what
1635 // happens during entry during the entry via the call stub.
1636 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
1637 //
1638 // Arguments:
1639 //
1640 // rbx: Method*
1641 // rcx: receiver - unused (retrieved from stack as needed)
1642 // rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2)
1643 //
1644 //
1645 // Stack layout at entry
1646 //
1647 // [ return address ] <--- rsp
1648 // [ parameter n ]
1649 // ...
1650 // [ parameter 1 ]
1651 // [ expression stack ]
1652 //
1653 //
1654 // We are free to blow any registers we like because the call_stub which brought us here
1655 // initially has preserved the callee save registers already.
1656 //
1657 //
1659 static address interpreter_frame_manager = NULL;
1661 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1663 // rbx: Method*
1664 // rsi/r13: sender sp
1666 // Because we redispatch "recursive" interpreter entries thru this same entry point
1667 // the "input" register usage is a little strange and not what you expect coming
1668 // from the call_stub. From the call stub rsi/rdi (current/previous) interpreter
1669 // state are NULL but on "recursive" dispatches they are what you'd expect.
1670 // rsi: current interpreter state (C++ interpreter) must preserve (null from call_stub/c1/c2)
1673 // A single frame manager is plenty as we don't specialize for synchronized. We could and
1674 // the code is pretty much ready. Would need to change the test below and for good measure
1675 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
1676 // routines. Not clear this is worth it yet.
1678 if (interpreter_frame_manager) return interpreter_frame_manager;
1680 address entry_point = __ pc();
1682 // Fast accessor methods share this entry point.
1683 // This works because frame manager is in the same codelet
1684 if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
1686 Label dispatch_entry_2;
1687 __ movptr(rcx, sender_sp_on_entry);
1688 __ movptr(state, (int32_t)NULL_WORD); // no current activation
1690 __ jmp(dispatch_entry_2);
1692 const Register locals = rdi;
1694 Label re_dispatch;
1696 __ bind(re_dispatch);
1698 // save sender sp (doesn't include return address
1699 __ lea(rcx, Address(rsp, wordSize));
1701 __ bind(dispatch_entry_2);
1703 // save sender sp
1704 __ push(rcx);
1706 const Address constMethod (rbx, Method::const_offset());
1707 const Address access_flags (rbx, Method::access_flags_offset());
1708 const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
1709 const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
1711 // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1712 // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
1713 // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1715 // get parameter size (always needed)
1716 __ movptr(rdx, constMethod);
1717 __ load_unsigned_short(rcx, size_of_parameters);
1719 // rbx: Method*
1720 // rcx: size of parameters
1721 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1723 __ subptr(rdx, rcx); // rdx = no. of additional locals
1725 // see if we've got enough room on the stack for locals plus overhead.
1726 generate_stack_overflow_check(); // C++
1728 // c++ interpreter does not use stack banging or any implicit exceptions
1729 // leave for now to verify that check is proper.
1730 bang_stack_shadow_pages(false);
1734 // compute beginning of parameters (rdi)
1735 __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize));
1737 // save sender's sp
1738 // __ movl(rcx, rsp);
1740 // get sender's sp
1741 __ pop(rcx);
1743 // get return address
1744 __ pop(rax);
1746 // rdx - # of additional locals
1747 // allocate space for locals
1748 // explicitly initialize locals
1749 {
1750 Label exit, loop;
1751 __ testl(rdx, rdx); // (32bit ok)
1752 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1753 __ bind(loop);
1754 __ push((int32_t)NULL_WORD); // initialize local variables
1755 __ decrement(rdx); // until everything initialized
1756 __ jcc(Assembler::greater, loop);
1757 __ bind(exit);
1758 }
1761 // Assumes rax = return address
1763 // allocate and initialize new interpreterState and method expression stack
1764 // IN(locals) -> locals
1765 // IN(state) -> any current interpreter activation
1766 // destroys rax, rcx, rdx, rdi
1767 // OUT (state) -> new interpreterState
1768 // OUT(rsp) -> bottom of methods expression stack
1770 generate_compute_interpreter_state(state, locals, rcx, false);
1772 // Call interpreter
1774 Label call_interpreter;
1775 __ bind(call_interpreter);
1777 // c++ interpreter does not use stack banging or any implicit exceptions
1778 // leave for now to verify that check is proper.
1779 bang_stack_shadow_pages(false);
1782 // Call interpreter enter here if message is
1783 // set and we know stack size is valid
1785 Label call_interpreter_2;
1787 __ bind(call_interpreter_2);
1789 {
1790 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1792 #ifdef _LP64
1793 __ mov(c_rarg0, state);
1794 #else
1795 __ push(state); // push arg to interpreter
1796 __ movptr(thread, STATE(_thread));
1797 #endif // _LP64
1799 // We can setup the frame anchor with everything we want at this point
1800 // as we are thread_in_Java and no safepoints can occur until we go to
1801 // vm mode. We do have to clear flags on return from vm but that is it
1802 //
1803 __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
1804 __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
1806 // Call the interpreter
1808 RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run));
1809 RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks));
1811 __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal);
1812 NOT_LP64(__ pop(rax);) // discard parameter to run
1813 //
1814 // state is preserved since it is callee saved
1815 //
1817 // reset_last_Java_frame
1819 NOT_LP64(__ movl(thread, STATE(_thread));)
1820 __ reset_last_Java_frame(thread, true, true);
1821 }
1823 // examine msg from interpreter to determine next action
1825 __ movl(rdx, STATE(_msg)); // Get new message
1827 Label call_method;
1828 Label return_from_interpreted_method;
1829 Label throw_exception;
1830 Label bad_msg;
1831 Label do_OSR;
1833 __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method);
1834 __ jcc(Assembler::equal, call_method);
1835 __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method);
1836 __ jcc(Assembler::equal, return_from_interpreted_method);
1837 __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr);
1838 __ jcc(Assembler::equal, do_OSR);
1839 __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception);
1840 __ jcc(Assembler::equal, throw_exception);
1841 __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors);
1842 __ jcc(Assembler::notEqual, bad_msg);
1844 // Allocate more monitor space, shuffle expression stack....
1846 generate_more_monitors();
1848 __ jmp(call_interpreter);
1850 // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
1851 unctrap_frame_manager_entry = __ pc();
1852 //
1853 // Load the registers we need.
1854 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1855 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
1856 __ jmp(call_interpreter_2);
1860 //=============================================================================
1861 // Returning from a compiled method into a deopted method. The bytecode at the
1862 // bcp has completed. The result of the bytecode is in the native abi (the tosca
1863 // for the template based interpreter). Any stack space that was used by the
1864 // bytecode that has completed has been removed (e.g. parameters for an invoke)
1865 // so all that we have to do is place any pending result on the expression stack
1866 // and resume execution on the next bytecode.
1869 generate_deopt_handling();
1870 __ jmp(call_interpreter);
1873 // Current frame has caught an exception we need to dispatch to the
1874 // handler. We can get here because a native interpreter frame caught
1875 // an exception in which case there is no handler and we must rethrow
1876 // If it is a vanilla interpreted frame the we simply drop into the
1877 // interpreter and let it do the lookup.
1879 Interpreter::_rethrow_exception_entry = __ pc();
1880 // rax: exception
1881 // rdx: return address/pc that threw exception
1883 Label return_with_exception;
1884 Label unwind_and_forward;
1886 // restore state pointer.
1887 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1889 __ movptr(rbx, STATE(_method)); // get method
1890 #ifdef _LP64
1891 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
1892 #else
1893 __ movl(rcx, STATE(_thread)); // get thread
1895 // Store exception with interpreter will expect it
1896 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax);
1897 #endif // _LP64
1899 // is current frame vanilla or native?
1901 __ movl(rdx, access_flags);
1902 __ testl(rdx, JVM_ACC_NATIVE);
1903 __ jcc(Assembler::zero, return_with_exception); // vanilla interpreted frame, handle directly
1905 // We drop thru to unwind a native interpreted frame with a pending exception
1906 // We jump here for the initial interpreter frame with exception pending
1907 // We unwind the current acivation and forward it to our caller.
1909 __ bind(unwind_and_forward);
1911 // unwind rbp, return stack to unextended value and re-push return address
1913 __ movptr(rcx, STATE(_sender_sp));
1914 __ leave();
1915 __ pop(rdx);
1916 __ mov(rsp, rcx);
1917 __ push(rdx);
1918 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1920 // Return point from a call which returns a result in the native abi
1921 // (c1/c2/jni-native). This result must be processed onto the java
1922 // expression stack.
1923 //
1924 // A pending exception may be present in which case there is no result present
1926 Label resume_interpreter;
1927 Label do_float;
1928 Label do_double;
1929 Label done_conv;
1931 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
1932 if (UseSSE < 2) {
1933 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1934 __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed
1935 __ movl(rcx, Address(rbx, Method::result_index_offset()));
1936 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
1937 __ jcc(Assembler::equal, do_float);
1938 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
1939 __ jcc(Assembler::equal, do_double);
1940 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
1941 __ empty_FPU_stack();
1942 #endif // COMPILER2
1943 __ jmp(done_conv);
1945 __ bind(do_float);
1946 #ifdef COMPILER2
1947 for (int i = 1; i < 8; i++) {
1948 __ ffree(i);
1949 }
1950 #endif // COMPILER2
1951 __ jmp(done_conv);
1952 __ bind(do_double);
1953 #ifdef COMPILER2
1954 for (int i = 1; i < 8; i++) {
1955 __ ffree(i);
1956 }
1957 #endif // COMPILER2
1958 __ jmp(done_conv);
1959 } else {
1960 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
1961 __ jmp(done_conv);
1962 }
1964 // Return point to interpreter from compiled/native method
1965 InternalAddress return_from_native_method(__ pc());
1967 __ bind(done_conv);
1970 // Result if any is in tosca. The java expression stack is in the state that the
1971 // calling convention left it (i.e. params may or may not be present)
1972 // Copy the result from tosca and place it on java expression stack.
1974 // Restore rsi/r13 as compiled code may not preserve it
1976 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1978 // restore stack to what we had when we left (in case i2c extended it)
1980 __ movptr(rsp, STATE(_stack));
1981 __ lea(rsp, Address(rsp, wordSize));
1983 // If there is a pending exception then we don't really have a result to process
1985 #ifdef _LP64
1986 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1987 #else
1988 __ movptr(rcx, STATE(_thread)); // get thread
1989 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1990 #endif // _LP64
1991 __ jcc(Assembler::notZero, return_with_exception);
1993 // get method just executed
1994 __ movptr(rbx, STATE(_result._to_call._callee));
1996 // callee left args on top of expression stack, remove them
1997 __ movptr(rcx, constMethod);
1998 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
2000 __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
2002 __ movl(rcx, Address(rbx, Method::result_index_offset()));
2003 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
2004 // Address index(noreg, rax, Address::times_ptr);
2005 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
2006 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
2007 __ call(rcx); // call result converter
2008 __ jmp(resume_interpreter);
2010 // An exception is being caught on return to a vanilla interpreter frame.
2011 // Empty the stack and resume interpreter
2013 __ bind(return_with_exception);
2015 // Exception present, empty stack
2016 __ movptr(rsp, STATE(_stack_base));
2017 __ jmp(resume_interpreter);
2019 // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
2020 // interpreter call, or native) and unwind this interpreter activation.
2021 // All monitors should be unlocked.
2023 __ bind(return_from_interpreted_method);
2025 Label return_to_initial_caller;
2027 __ movptr(rbx, STATE(_method)); // get method just executed
2028 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
2029 __ movl(rax, Address(rbx, Method::result_index_offset())); // get result type index
2030 __ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2)
2032 // Copy result to callers java stack
2033 ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack);
2034 // Address index(noreg, rax, Address::times_ptr);
2036 __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr)));
2037 // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack)));
2038 __ call(rax); // call result converter
2040 Label unwind_recursive_activation;
2041 __ bind(unwind_recursive_activation);
2043 // returning to interpreter method from "recursive" interpreter call
2044 // result converter left rax pointing to top of the java stack for method we are returning
2045 // to. Now all we must do is unwind the state from the completed call
2047 __ movptr(state, STATE(_prev_link)); // unwind state
2048 __ leave(); // pop the frame
2049 __ mov(rsp, rax); // unwind stack to remove args
2051 // Resume the interpreter. The current frame contains the current interpreter
2052 // state object.
2053 //
2055 __ bind(resume_interpreter);
2057 // state == interpreterState object for method we are resuming
2059 __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume);
2060 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
2061 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
2062 // result if any on stack already )
2063 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
2064 __ jmp(call_interpreter_2); // No need to bang
2066 // interpreter returning to native code (call_stub/c1/c2)
2067 // convert result and unwind initial activation
2068 // rax - result index
2070 __ bind(return_to_initial_caller);
2071 ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi);
2072 // Address index(noreg, rax, Address::times_ptr);
2074 __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr)));
2075 __ call(rax); // call result converter
2077 Label unwind_initial_activation;
2078 __ bind(unwind_initial_activation);
2080 // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0))
2082 /* Current stack picture
2084 [ incoming parameters ]
2085 [ extra locals ]
2086 [ return address to CALL_STUB/C1/C2]
2087 fp -> [ CALL_STUB/C1/C2 fp ]
2088 BytecodeInterpreter object
2089 expression stack
2090 sp ->
2092 */
2094 // return restoring the stack to the original sender_sp value
2096 __ movptr(rcx, STATE(_sender_sp));
2097 __ leave();
2098 __ pop(rdi); // get return address
2099 // set stack to sender's sp
2100 __ mov(rsp, rcx);
2101 __ jmp(rdi); // return to call_stub
2103 // OSR request, adjust return address to make current frame into adapter frame
2104 // and enter OSR nmethod
2106 __ bind(do_OSR);
2108 Label remove_initial_frame;
2110 // We are going to pop this frame. Is there another interpreter frame underneath
2111 // it or is it callstub/compiled?
2113 // Move buffer to the expected parameter location
2114 __ movptr(rcx, STATE(_result._osr._osr_buf));
2116 __ movptr(rax, STATE(_result._osr._osr_entry));
2118 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
2119 __ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2)
2121 __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register
2122 __ leave(); // pop the frame
2123 __ mov(rsp, sender_sp_on_entry); // trim any stack expansion
2126 // We know we are calling compiled so push specialized return
2127 // method uses specialized entry, push a return so we look like call stub setup
2128 // this path will handle fact that result is returned in registers and not
2129 // on the java stack.
2131 __ pushptr(return_from_native_method.addr());
2133 __ jmp(rax);
2135 __ bind(remove_initial_frame);
2137 __ movptr(rdx, STATE(_sender_sp));
2138 __ leave();
2139 // get real return
2140 __ pop(rsi);
2141 // set stack to sender's sp
2142 __ mov(rsp, rdx);
2143 // repush real return
2144 __ push(rsi);
2145 // Enter OSR nmethod
2146 __ jmp(rax);
2151 // Call a new method. All we do is (temporarily) trim the expression stack
2152 // push a return address to bring us back to here and leap to the new entry.
2154 __ bind(call_method);
2156 // stack points to next free location and not top element on expression stack
2157 // method expects sp to be pointing to topmost element
2159 __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top
2160 __ lea(rsp, Address(rsp, wordSize));
2162 __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute
2164 // don't need a return address if reinvoking interpreter
2166 // Make it look like call_stub calling conventions
2168 // Get (potential) receiver
2169 // get size of parameters in words
2170 __ movptr(rcx, constMethod);
2171 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
2173 ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
2174 __ pushptr(recursive.addr()); // make it look good in the debugger
2176 InternalAddress entry(entry_point);
2177 __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter?
2178 __ jcc(Assembler::equal, re_dispatch); // yes
2180 __ pop(rax); // pop dummy address
2183 // get specialized entry
2184 __ movptr(rax, STATE(_result._to_call._callee_entry_point));
2185 // set sender SP
2186 __ mov(sender_sp_on_entry, rsp);
2188 // method uses specialized entry, push a return so we look like call stub setup
2189 // this path will handle fact that result is returned in registers and not
2190 // on the java stack.
2192 __ pushptr(return_from_native_method.addr());
2194 __ jmp(rax);
2196 __ bind(bad_msg);
2197 __ stop("Bad message from interpreter");
2199 // Interpreted method "returned" with an exception pass it on...
2200 // Pass result, unwind activation and continue/return to interpreter/call_stub
2201 // We handle result (if any) differently based on return to interpreter or call_stub
2203 Label unwind_initial_with_pending_exception;
2205 __ bind(throw_exception);
2206 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call?
2207 __ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2)
2208 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
2209 __ addptr(rax, wordSize); // account for prepush before we return
2210 __ jmp(unwind_recursive_activation);
2212 __ bind(unwind_initial_with_pending_exception);
2214 // We will unwind the current (initial) interpreter frame and forward
2215 // the exception to the caller. We must put the exception in the
2216 // expected register and clear pending exception and then forward.
2218 __ jmp(unwind_and_forward);
2220 interpreter_frame_manager = entry_point;
2221 return entry_point;
2222 }
2224 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
2225 // determine code generation flags
2226 bool synchronized = false;
2227 address entry_point = NULL;
2229 switch (kind) {
2230 case Interpreter::zerolocals : break;
2231 case Interpreter::zerolocals_synchronized: synchronized = true; break;
2232 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
2233 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
2234 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
2235 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
2236 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
2237 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
2239 case Interpreter::java_lang_math_sin : // fall thru
2240 case Interpreter::java_lang_math_cos : // fall thru
2241 case Interpreter::java_lang_math_tan : // fall thru
2242 case Interpreter::java_lang_math_abs : // fall thru
2243 case Interpreter::java_lang_math_log : // fall thru
2244 case Interpreter::java_lang_math_log10 : // fall thru
2245 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
2246 case Interpreter::java_lang_ref_reference_get
2247 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
2248 default : ShouldNotReachHere(); break;
2249 }
2251 if (entry_point) return entry_point;
2253 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
2255 }
2257 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
2258 : CppInterpreterGenerator(code) {
2259 generate_all(); // down here so it can be "virtual"
2260 }
2262 // Deoptimization helpers for C++ interpreter
2264 // How much stack a method activation needs in words.
2265 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
2267 const int stub_code = 4; // see generate_call_stub
2268 // Save space for one monitor to get into the interpreted method in case
2269 // the method is synchronized
2270 int monitor_size = method->is_synchronized() ?
2271 1*frame::interpreter_frame_monitor_size() : 0;
2273 // total static overhead size. Account for interpreter state object, return
2274 // address, saved rbp and 2 words for a "static long no_params() method" issue.
2276 const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
2277 ( frame::sender_sp_offset - frame::link_offset) + 2;
2279 const int extra_stack = 0; //6815692//Method::extra_stack_entries();
2280 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
2281 Interpreter::stackElementWords;
2282 return overhead_size + method_stack + stub_code;
2283 }
2285 // returns the activation size.
2286 static int size_activation_helper(int extra_locals_size, int monitor_size) {
2287 return (extra_locals_size + // the addition space for locals
2288 2*BytesPerWord + // return address and saved rbp
2289 2*BytesPerWord + // "static long no_params() method" issue
2290 sizeof(BytecodeInterpreter) + // interpreterState
2291 monitor_size); // monitors
2292 }
2294 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
2295 frame* caller,
2296 frame* current,
2297 Method* method,
2298 intptr_t* locals,
2299 intptr_t* stack,
2300 intptr_t* stack_base,
2301 intptr_t* monitor_base,
2302 intptr_t* frame_bottom,
2303 bool is_top_frame
2304 )
2305 {
2306 // What about any vtable?
2307 //
2308 to_fill->_thread = JavaThread::current();
2309 // This gets filled in later but make it something recognizable for now
2310 to_fill->_bcp = method->code_base();
2311 to_fill->_locals = locals;
2312 to_fill->_constants = method->constants()->cache();
2313 to_fill->_method = method;
2314 to_fill->_mdx = NULL;
2315 to_fill->_stack = stack;
2316 if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
2317 to_fill->_msg = deopt_resume2;
2318 } else {
2319 to_fill->_msg = method_resume;
2320 }
2321 to_fill->_result._to_call._bcp_advance = 0;
2322 to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
2323 to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
2324 to_fill->_prev_link = NULL;
2326 to_fill->_sender_sp = caller->unextended_sp();
2328 if (caller->is_interpreted_frame()) {
2329 interpreterState prev = caller->get_interpreterState();
2330 to_fill->_prev_link = prev;
2331 // *current->register_addr(GR_Iprev_state) = (intptr_t) prev;
2332 // Make the prev callee look proper
2333 prev->_result._to_call._callee = method;
2334 if (*prev->_bcp == Bytecodes::_invokeinterface) {
2335 prev->_result._to_call._bcp_advance = 5;
2336 } else {
2337 prev->_result._to_call._bcp_advance = 3;
2338 }
2339 }
2340 to_fill->_oop_temp = NULL;
2341 to_fill->_stack_base = stack_base;
2342 // Need +1 here because stack_base points to the word just above the first expr stack entry
2343 // and stack_limit is supposed to point to the word just below the last expr stack entry.
2344 // See generate_compute_interpreter_state.
2345 int extra_stack = 0; //6815692//Method::extra_stack_entries();
2346 to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1);
2347 to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
2349 to_fill->_self_link = to_fill;
2350 assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base,
2351 "Stack top out of range");
2352 }
2354 int AbstractInterpreter::layout_activation(Method* method,
2355 int tempcount, //
2356 int popframe_extra_args,
2357 int moncount,
2358 int caller_actual_parameters,
2359 int callee_param_count,
2360 int callee_locals,
2361 frame* caller,
2362 frame* interpreter_frame,
2363 bool is_top_frame) {
2365 assert(popframe_extra_args == 0, "FIX ME");
2366 // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
2367 // does as far as allocating an interpreter frame.
2368 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
2369 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
2370 // as determined by a previous call to this method.
2371 // It is also guaranteed to be walkable even though it is in a skeletal state
2372 // NOTE: return size is in words not bytes
2373 // NOTE: tempcount is the current size of the java expression stack. For top most
2374 // frames we will allocate a full sized expression stack and not the curback
2375 // version that non-top frames have.
2377 // Calculate the amount our frame will be adjust by the callee. For top frame
2378 // this is zero.
2380 // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
2381 // calculates the extra locals based on itself. Not what the callee does
2382 // to it. So it ignores last_frame_adjust value. Seems suspicious as far
2383 // as getting sender_sp correct.
2385 int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
2386 int monitor_size = sizeof(BasicObjectLock) * moncount;
2388 // First calculate the frame size without any java expression stack
2389 int short_frame_size = size_activation_helper(extra_locals_size,
2390 monitor_size);
2392 // Now with full size expression stack
2393 int extra_stack = 0; //6815692//Method::extra_stack_entries();
2394 int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord;
2396 // and now with only live portion of the expression stack
2397 short_frame_size = short_frame_size + tempcount * BytesPerWord;
2399 // the size the activation is right now. Only top frame is full size
2400 int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
2402 if (interpreter_frame != NULL) {
2403 #ifdef ASSERT
2404 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
2405 #endif
2407 // MUCHO HACK
2409 intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
2411 /* Now fillin the interpreterState object */
2413 // The state object is the first thing on the frame and easily located
2415 interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
2418 // Find the locals pointer. This is rather simple on x86 because there is no
2419 // confusing rounding at the callee to account for. We can trivially locate
2420 // our locals based on the current fp().
2421 // Note: the + 2 is for handling the "static long no_params() method" issue.
2422 // (too bad I don't really remember that issue well...)
2424 intptr_t* locals;
2425 // If the caller is interpreted we need to make sure that locals points to the first
2426 // argument that the caller passed and not in an area where the stack might have been extended.
2427 // because the stack to stack to converter needs a proper locals value in order to remove the
2428 // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
2429 // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
2430 // adjust the stack?? HMMM QQQ
2431 //
2432 if (caller->is_interpreted_frame()) {
2433 // locals must agree with the caller because it will be used to set the
2434 // caller's tos when we return.
2435 interpreterState prev = caller->get_interpreterState();
2436 // stack() is prepushed.
2437 locals = prev->stack() + method->size_of_parameters();
2438 // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
2439 if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
2440 // os::breakpoint();
2441 }
2442 } else {
2443 // this is where a c2i would have placed locals (except for the +2)
2444 locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
2445 }
2447 intptr_t* monitor_base = (intptr_t*) cur_state;
2448 intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
2449 /* +1 because stack is always prepushed */
2450 intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
2453 BytecodeInterpreter::layout_interpreterState(cur_state,
2454 caller,
2455 interpreter_frame,
2456 method,
2457 locals,
2458 stack,
2459 stack_base,
2460 monitor_base,
2461 frame_bottom,
2462 is_top_frame);
2464 // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
2465 }
2466 return frame_size/BytesPerWord;
2467 }
2469 #endif // CC_INTERP (all)