Fri, 26 Jun 2009 07:26:10 -0700
5057225: Remove useless I2L conversions
Summary: The optimizer should be told to normalize (AndL (ConvI2L x) 0xFF) to (ConvI2L (AndI x 0xFF)), and then the existing matcher rule will work for free.
Reviewed-by: kvn
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateInterpreter_x86_32.cpp.incl"
28 #define __ _masm->
31 #ifndef CC_INTERP
32 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
33 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
34 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
36 //------------------------------------------------------------------------------------------------------------------------
38 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
39 address entry = __ pc();
41 // Note: There should be a minimal interpreter frame set up when stack
42 // overflow occurs since we check explicitly for it now.
43 //
44 #ifdef ASSERT
45 { Label L;
46 __ lea(rax, Address(rbp,
47 frame::interpreter_frame_monitor_block_top_offset * wordSize));
48 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
49 // (stack grows negative)
50 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
51 __ stop ("interpreter frame not set up");
52 __ bind(L);
53 }
54 #endif // ASSERT
55 // Restore bcp under the assumption that the current frame is still
56 // interpreted
57 __ restore_bcp();
59 // expression stack must be empty before entering the VM if an exception
60 // happened
61 __ empty_expression_stack();
62 __ empty_FPU_stack();
63 // throw exception
64 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
65 return entry;
66 }
68 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
69 address entry = __ pc();
70 // expression stack must be empty before entering the VM if an exception happened
71 __ empty_expression_stack();
72 __ empty_FPU_stack();
73 // setup parameters
74 // ??? convention: expect aberrant index in register rbx,
75 __ lea(rax, ExternalAddress((address)name));
76 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx);
77 return entry;
78 }
80 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
81 address entry = __ pc();
82 // object is at TOS
83 __ pop(rax);
84 // expression stack must be empty before entering the VM if an exception
85 // happened
86 __ empty_expression_stack();
87 __ empty_FPU_stack();
88 __ call_VM(noreg,
89 CAST_FROM_FN_PTR(address,
90 InterpreterRuntime::throw_ClassCastException),
91 rax);
92 return entry;
93 }
95 // Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
96 // pc at TOS (just for debugging)
97 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
98 address entry = __ pc();
100 __ pop(rbx); // actual failing object is at TOS
101 __ pop(rax); // required type is at TOS+4
103 __ verify_oop(rbx);
104 __ verify_oop(rax);
106 // Various method handle types use interpreter registers as temps.
107 __ restore_bcp();
108 __ restore_locals();
110 // Expression stack must be empty before entering the VM for an exception.
111 __ empty_expression_stack();
112 __ empty_FPU_stack();
113 __ call_VM(noreg,
114 CAST_FROM_FN_PTR(address,
115 InterpreterRuntime::throw_WrongMethodTypeException),
116 // pass required type, failing object (or NULL)
117 rax, rbx);
118 return entry;
119 }
122 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
123 assert(!pass_oop || message == NULL, "either oop or message but not both");
124 address entry = __ pc();
125 if (pass_oop) {
126 // object is at TOS
127 __ pop(rbx);
128 }
129 // expression stack must be empty before entering the VM if an exception happened
130 __ empty_expression_stack();
131 __ empty_FPU_stack();
132 // setup parameters
133 __ lea(rax, ExternalAddress((address)name));
134 if (pass_oop) {
135 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
136 } else {
137 if (message != NULL) {
138 __ lea(rbx, ExternalAddress((address)message));
139 } else {
140 __ movptr(rbx, NULL_WORD);
141 }
142 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
143 }
144 // throw exception
145 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
146 return entry;
147 }
150 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
151 address entry = __ pc();
152 // NULL last_sp until next java call
153 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
154 __ dispatch_next(state);
155 return entry;
156 }
159 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
160 TosState incoming_state = state;
161 if (EnableInvokeDynamic) {
162 if (unbox) {
163 incoming_state = atos;
164 }
165 } else {
166 assert(!unbox, "old behavior");
167 }
169 Label interpreter_entry;
170 address compiled_entry = __ pc();
172 #ifdef COMPILER2
173 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
174 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
175 for (int i = 1; i < 8; i++) {
176 __ ffree(i);
177 }
178 } else if (UseSSE < 2) {
179 __ empty_FPU_stack();
180 }
181 #endif
182 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
183 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
184 } else {
185 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
186 }
188 __ jmp(interpreter_entry, relocInfo::none);
189 // emit a sentinel we can test for when converting an interpreter
190 // entry point to a compiled entry point.
191 __ a_long(Interpreter::return_sentinel);
192 __ a_long((int)compiled_entry);
193 address entry = __ pc();
194 __ bind(interpreter_entry);
196 // In SSE mode, interpreter returns FP results in xmm0 but they need
197 // to end up back on the FPU so it can operate on them.
198 if (incoming_state == ftos && UseSSE >= 1) {
199 __ subptr(rsp, wordSize);
200 __ movflt(Address(rsp, 0), xmm0);
201 __ fld_s(Address(rsp, 0));
202 __ addptr(rsp, wordSize);
203 } else if (incoming_state == dtos && UseSSE >= 2) {
204 __ subptr(rsp, 2*wordSize);
205 __ movdbl(Address(rsp, 0), xmm0);
206 __ fld_d(Address(rsp, 0));
207 __ addptr(rsp, 2*wordSize);
208 }
210 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
212 // Restore stack bottom in case i2c adjusted stack
213 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
214 // and NULL it as marker that rsp is now tos until next java call
215 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
217 __ restore_bcp();
218 __ restore_locals();
220 Label L_fail;
222 if (unbox && state != atos) {
223 // cast and unbox
224 BasicType type = as_BasicType(state);
225 if (type == T_BYTE) type = T_BOOLEAN; // FIXME
226 KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
227 __ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
228 __ testl(rax, rax);
229 Label L_got_value, L_get_value;
230 // convert nulls to zeroes (avoid NPEs here)
231 if (!(type == T_FLOAT || type == T_DOUBLE)) {
232 // if rax already contains zero bits, forge ahead
233 __ jcc(Assembler::zero, L_got_value);
234 } else {
235 __ jcc(Assembler::notZero, L_get_value);
236 __ fldz();
237 __ jmp(L_got_value);
238 }
239 __ bind(L_get_value);
240 __ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
241 __ jcc(Assembler::notEqual, L_fail);
242 int offset = java_lang_boxing_object::value_offset_in_bytes(type);
243 // Cf. TemplateTable::getfield_or_static
244 switch (type) {
245 case T_BYTE: // fall through:
246 case T_BOOLEAN: __ load_signed_byte(rax, Address(rax, offset)); break;
247 case T_CHAR: __ load_unsigned_short(rax, Address(rax, offset)); break;
248 case T_SHORT: __ load_signed_short(rax, Address(rax, offset)); break;
249 case T_INT: __ movl(rax, Address(rax, offset)); break;
250 case T_FLOAT: __ fld_s(Address(rax, offset)); break;
251 case T_DOUBLE: __ fld_d(Address(rax, offset)); break;
252 // Access to java.lang.Double.value does not need to be atomic:
253 case T_LONG: { __ movl(rdx, Address(rax, offset + 4));
254 __ movl(rax, Address(rax, offset + 0)); } break;
255 default: ShouldNotReachHere();
256 }
257 __ bind(L_got_value);
258 }
260 Label L_got_cache, L_giant_index;
261 if (EnableInvokeDynamic) {
262 __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
263 __ jcc(Assembler::equal, L_giant_index);
264 }
265 __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
266 __ bind(L_got_cache);
267 if (unbox && state == atos) {
268 // insert a casting conversion, to keep verifier sane
269 Label L_ok, L_ok_pops;
270 __ testl(rax, rax);
271 __ jcc(Assembler::zero, L_ok);
272 __ push(rax); // save the object to check
273 __ push(rbx); // save CP cache reference
274 __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
275 __ movl(rbx, Address(rbx, rcx,
276 Address::times_4, constantPoolCacheOopDesc::base_offset() +
277 ConstantPoolCacheEntry::f1_offset()));
278 __ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
279 __ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
280 __ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
281 __ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
282 __ pop(rcx); // pop and discard CP cache
283 __ mov(rbx, rax); // target supertype into rbx for L_fail
284 __ pop(rax); // failed object into rax for L_fail
285 __ jmp(L_fail);
287 __ bind(L_ok_pops);
288 // restore pushed temp regs:
289 __ pop(rbx);
290 __ pop(rax);
291 __ bind(L_ok);
292 }
293 __ movl(rbx, Address(rbx, rcx,
294 Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
295 ConstantPoolCacheEntry::flags_offset()));
296 __ andptr(rbx, 0xFF);
297 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
298 __ dispatch_next(state, step);
300 // out of the main line of code...
301 if (EnableInvokeDynamic) {
302 __ bind(L_giant_index);
303 __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
304 __ jmp(L_got_cache);
306 if (unbox) {
307 __ bind(L_fail);
308 __ push(rbx); // missed klass (required)
309 __ push(rax); // bad object (actual)
310 __ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
311 __ call(rdx);
312 }
313 }
315 return entry;
316 }
319 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
320 address entry = __ pc();
322 // In SSE mode, FP results are in xmm0
323 if (state == ftos && UseSSE > 0) {
324 __ subptr(rsp, wordSize);
325 __ movflt(Address(rsp, 0), xmm0);
326 __ fld_s(Address(rsp, 0));
327 __ addptr(rsp, wordSize);
328 } else if (state == dtos && UseSSE >= 2) {
329 __ subptr(rsp, 2*wordSize);
330 __ movdbl(Address(rsp, 0), xmm0);
331 __ fld_d(Address(rsp, 0));
332 __ addptr(rsp, 2*wordSize);
333 }
335 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
337 // The stack is not extended by deopt but we must NULL last_sp as this
338 // entry is like a "return".
339 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
340 __ restore_bcp();
341 __ restore_locals();
342 // handle exceptions
343 { Label L;
344 const Register thread = rcx;
345 __ get_thread(thread);
346 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
347 __ jcc(Assembler::zero, L);
348 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
349 __ should_not_reach_here();
350 __ bind(L);
351 }
352 __ dispatch_next(state, step);
353 return entry;
354 }
357 int AbstractInterpreter::BasicType_as_index(BasicType type) {
358 int i = 0;
359 switch (type) {
360 case T_BOOLEAN: i = 0; break;
361 case T_CHAR : i = 1; break;
362 case T_BYTE : i = 2; break;
363 case T_SHORT : i = 3; break;
364 case T_INT : // fall through
365 case T_LONG : // fall through
366 case T_VOID : i = 4; break;
367 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE
368 case T_DOUBLE : i = 6; break;
369 case T_OBJECT : // fall through
370 case T_ARRAY : i = 7; break;
371 default : ShouldNotReachHere();
372 }
373 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
374 return i;
375 }
378 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
379 address entry = __ pc();
380 switch (type) {
381 case T_BOOLEAN: __ c2bool(rax); break;
382 case T_CHAR : __ andptr(rax, 0xFFFF); break;
383 case T_BYTE : __ sign_extend_byte (rax); break;
384 case T_SHORT : __ sign_extend_short(rax); break;
385 case T_INT : /* nothing to do */ break;
386 case T_DOUBLE :
387 case T_FLOAT :
388 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
389 __ pop(t); // remove return address first
390 __ pop_dtos_to_rsp();
391 // Must return a result for interpreter or compiler. In SSE
392 // mode, results are returned in xmm0 and the FPU stack must
393 // be empty.
394 if (type == T_FLOAT && UseSSE >= 1) {
395 // Load ST0
396 __ fld_d(Address(rsp, 0));
397 // Store as float and empty fpu stack
398 __ fstp_s(Address(rsp, 0));
399 // and reload
400 __ movflt(xmm0, Address(rsp, 0));
401 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
402 __ movdbl(xmm0, Address(rsp, 0));
403 } else {
404 // restore ST0
405 __ fld_d(Address(rsp, 0));
406 }
407 // and pop the temp
408 __ addptr(rsp, 2 * wordSize);
409 __ push(t); // restore return address
410 }
411 break;
412 case T_OBJECT :
413 // retrieve result from frame
414 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
415 // and verify it
416 __ verify_oop(rax);
417 break;
418 default : ShouldNotReachHere();
419 }
420 __ ret(0); // return from result handler
421 return entry;
422 }
424 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
425 address entry = __ pc();
426 __ push(state);
427 __ call_VM(noreg, runtime_entry);
428 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
429 return entry;
430 }
433 // Helpers for commoning out cases in the various type of method entries.
434 //
436 // increment invocation count & check for overflow
437 //
438 // Note: checking for negative value instead of overflow
439 // so we have a 'sticky' overflow test
440 //
441 // rbx,: method
442 // rcx: invocation counter
443 //
444 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
446 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
447 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
449 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
450 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
451 }
452 // Update standard invocation counters
453 __ movl(rax, backedge_counter); // load backedge counter
455 __ incrementl(rcx, InvocationCounter::count_increment);
456 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
458 __ movl(invocation_counter, rcx); // save invocation count
459 __ addl(rcx, rax); // add both counters
461 // profile_method is non-null only for interpreted method so
462 // profile_method != NULL == !native_call
463 // BytecodeInterpreter only calls for native so code is elided.
465 if (ProfileInterpreter && profile_method != NULL) {
466 // Test to see if we should create a method data oop
467 __ cmp32(rcx,
468 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
469 __ jcc(Assembler::less, *profile_method_continue);
471 // if no method data exists, go to profile_method
472 __ test_method_data_pointer(rax, *profile_method);
473 }
475 __ cmp32(rcx,
476 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
477 __ jcc(Assembler::aboveEqual, *overflow);
479 }
481 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
483 // Asm interpreter on entry
484 // rdi - locals
485 // rsi - bcp
486 // rbx, - method
487 // rdx - cpool
488 // rbp, - interpreter frame
490 // C++ interpreter on entry
491 // rsi - new interpreter state pointer
492 // rbp - interpreter frame pointer
493 // rbx - method
495 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
496 // rbx, - method
497 // rcx - rcvr (assuming there is one)
498 // top of stack return address of interpreter caller
499 // rsp - sender_sp
501 // C++ interpreter only
502 // rsi - previous interpreter state pointer
504 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
506 // InterpreterRuntime::frequency_counter_overflow takes one argument
507 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
508 // The call returns the address of the verified entry point for the method or NULL
509 // if the compilation did not complete (either went background or bailed out).
510 __ movptr(rax, (intptr_t)false);
511 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
513 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
515 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
516 // and jump to the interpreted entry.
517 __ jmp(*do_continue, relocInfo::none);
519 }
521 void InterpreterGenerator::generate_stack_overflow_check(void) {
522 // see if we've got enough room on the stack for locals plus overhead.
523 // the expression stack grows down incrementally, so the normal guard
524 // page mechanism will work for that.
525 //
526 // Registers live on entry:
527 //
528 // Asm interpreter
529 // rdx: number of additional locals this frame needs (what we must check)
530 // rbx,: methodOop
532 // destroyed on exit
533 // rax,
535 // NOTE: since the additional locals are also always pushed (wasn't obvious in
536 // generate_method_entry) so the guard should work for them too.
537 //
539 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
540 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
542 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
543 // be sure to change this if you add/subtract anything to/from the overhead area
544 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size;
546 const int page_size = os::vm_page_size();
548 Label after_frame_check;
550 // see if the frame is greater than one page in size. If so,
551 // then we need to verify there is enough stack space remaining
552 // for the additional locals.
553 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize());
554 __ jcc(Assembler::belowEqual, after_frame_check);
556 // compute rsp as if this were going to be the last frame on
557 // the stack before the red zone
559 Label after_frame_check_pop;
561 __ push(rsi);
563 const Register thread = rsi;
565 __ get_thread(thread);
567 const Address stack_base(thread, Thread::stack_base_offset());
568 const Address stack_size(thread, Thread::stack_size_offset());
570 // locals + overhead, in bytes
571 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
573 #ifdef ASSERT
574 Label stack_base_okay, stack_size_okay;
575 // verify that thread stack base is non-zero
576 __ cmpptr(stack_base, (int32_t)NULL_WORD);
577 __ jcc(Assembler::notEqual, stack_base_okay);
578 __ stop("stack base is zero");
579 __ bind(stack_base_okay);
580 // verify that thread stack size is non-zero
581 __ cmpptr(stack_size, 0);
582 __ jcc(Assembler::notEqual, stack_size_okay);
583 __ stop("stack size is zero");
584 __ bind(stack_size_okay);
585 #endif
587 // Add stack base to locals and subtract stack size
588 __ addptr(rax, stack_base);
589 __ subptr(rax, stack_size);
591 // Use the maximum number of pages we might bang.
592 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
593 (StackRedPages+StackYellowPages);
594 __ addptr(rax, max_pages * page_size);
596 // check against the current stack bottom
597 __ cmpptr(rsp, rax);
598 __ jcc(Assembler::above, after_frame_check_pop);
600 __ pop(rsi); // get saved bcp / (c++ prev state ).
602 __ pop(rax); // get return address
603 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
605 // all done with frame size check
606 __ bind(after_frame_check_pop);
607 __ pop(rsi);
609 __ bind(after_frame_check);
610 }
612 // Allocate monitor and lock method (asm interpreter)
613 // rbx, - methodOop
614 //
615 void InterpreterGenerator::lock_method(void) {
616 // synchronize method
617 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
618 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
619 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
621 #ifdef ASSERT
622 { Label L;
623 __ movl(rax, access_flags);
624 __ testl(rax, JVM_ACC_SYNCHRONIZED);
625 __ jcc(Assembler::notZero, L);
626 __ stop("method doesn't need synchronization");
627 __ bind(L);
628 }
629 #endif // ASSERT
630 // get synchronization object
631 { Label done;
632 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
633 __ movl(rax, access_flags);
634 __ testl(rax, JVM_ACC_STATIC);
635 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
636 __ jcc(Assembler::zero, done);
637 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
638 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
639 __ movptr(rax, Address(rax, mirror_offset));
640 __ bind(done);
641 }
642 // add space for monitor & lock
643 __ subptr(rsp, entry_size); // add space for a monitor entry
644 __ movptr(monitor_block_top, rsp); // set new monitor block top
645 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
646 __ mov(rdx, rsp); // object address
647 __ lock_object(rdx);
648 }
650 //
651 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
652 // and for native methods hence the shared code.
654 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
655 // initialize fixed part of activation frame
656 __ push(rax); // save return address
657 __ enter(); // save old & set new rbp,
660 __ push(rsi); // set sender sp
661 __ push((int32_t)NULL_WORD); // leave last_sp as null
662 __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
663 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
664 __ push(rbx); // save methodOop
665 if (ProfileInterpreter) {
666 Label method_data_continue;
667 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
668 __ testptr(rdx, rdx);
669 __ jcc(Assembler::zero, method_data_continue);
670 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
671 __ bind(method_data_continue);
672 __ push(rdx); // set the mdp (method data pointer)
673 } else {
674 __ push(0);
675 }
677 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
678 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
679 __ push(rdx); // set constant pool cache
680 __ push(rdi); // set locals pointer
681 if (native_call) {
682 __ push(0); // no bcp
683 } else {
684 __ push(rsi); // set bcp
685 }
686 __ push(0); // reserve word for pointer to expression stack bottom
687 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
688 }
690 // End of helpers
692 //
693 // Various method entries
694 //------------------------------------------------------------------------------------------------------------------------
695 //
696 //
698 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
700 address InterpreterGenerator::generate_accessor_entry(void) {
702 // rbx,: methodOop
703 // rcx: receiver (preserve for slow entry into asm interpreter)
705 // rsi: senderSP must preserved for slow path, set SP to it on fast path
707 address entry_point = __ pc();
708 Label xreturn_path;
710 // do fastpath for resolved accessor methods
711 if (UseFastAccessorMethods) {
712 Label slow_path;
713 // If we need a safepoint check, generate full interpreter entry.
714 ExternalAddress state(SafepointSynchronize::address_of_state());
715 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
716 SafepointSynchronize::_not_synchronized);
718 __ jcc(Assembler::notEqual, slow_path);
719 // ASM/C++ Interpreter
720 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
721 // Note: We can only use this code if the getfield has been resolved
722 // and if we don't have a null-pointer exception => check for
723 // these conditions first and use slow path if necessary.
724 // rbx,: method
725 // rcx: receiver
726 __ movptr(rax, Address(rsp, wordSize));
728 // check if local 0 != NULL and read field
729 __ testptr(rax, rax);
730 __ jcc(Assembler::zero, slow_path);
732 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
733 // read first instruction word and extract bytecode @ 1 and index @ 2
734 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
735 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
736 // Shift codes right to get the index on the right.
737 // The bytecode fetched looks like <index><0xb4><0x2a>
738 __ shrl(rdx, 2*BitsPerByte);
739 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
740 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
742 // rax,: local 0
743 // rbx,: method
744 // rcx: receiver - do not destroy since it is needed for slow path!
745 // rcx: scratch
746 // rdx: constant pool cache index
747 // rdi: constant pool cache
748 // rsi: sender sp
750 // check if getfield has been resolved and read constant pool cache entry
751 // check the validity of the cache entry by testing whether _indices field
752 // contains Bytecode::_getfield in b1 byte.
753 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
754 __ movl(rcx,
755 Address(rdi,
756 rdx,
757 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
758 __ shrl(rcx, 2*BitsPerByte);
759 __ andl(rcx, 0xFF);
760 __ cmpl(rcx, Bytecodes::_getfield);
761 __ jcc(Assembler::notEqual, slow_path);
763 // Note: constant pool entry is not valid before bytecode is resolved
764 __ movptr(rcx,
765 Address(rdi,
766 rdx,
767 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
768 __ movl(rdx,
769 Address(rdi,
770 rdx,
771 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
773 Label notByte, notShort, notChar;
774 const Address field_address (rax, rcx, Address::times_1);
776 // Need to differentiate between igetfield, agetfield, bgetfield etc.
777 // because they are different sizes.
778 // Use the type from the constant pool cache
779 __ shrl(rdx, ConstantPoolCacheEntry::tosBits);
780 // Make sure we don't need to mask rdx for tosBits after the above shift
781 ConstantPoolCacheEntry::verify_tosBits();
782 __ cmpl(rdx, btos);
783 __ jcc(Assembler::notEqual, notByte);
784 __ load_signed_byte(rax, field_address);
785 __ jmp(xreturn_path);
787 __ bind(notByte);
788 __ cmpl(rdx, stos);
789 __ jcc(Assembler::notEqual, notShort);
790 __ load_signed_short(rax, field_address);
791 __ jmp(xreturn_path);
793 __ bind(notShort);
794 __ cmpl(rdx, ctos);
795 __ jcc(Assembler::notEqual, notChar);
796 __ load_unsigned_short(rax, field_address);
797 __ jmp(xreturn_path);
799 __ bind(notChar);
800 #ifdef ASSERT
801 Label okay;
802 __ cmpl(rdx, atos);
803 __ jcc(Assembler::equal, okay);
804 __ cmpl(rdx, itos);
805 __ jcc(Assembler::equal, okay);
806 __ stop("what type is this?");
807 __ bind(okay);
808 #endif // ASSERT
809 // All the rest are a 32 bit wordsize
810 // This is ok for now. Since fast accessors should be going away
811 __ movptr(rax, field_address);
813 __ bind(xreturn_path);
815 // _ireturn/_areturn
816 __ pop(rdi); // get return address
817 __ mov(rsp, rsi); // set sp to sender sp
818 __ jmp(rdi);
820 // generate a vanilla interpreter entry as the slow path
821 __ bind(slow_path);
823 (void) generate_normal_entry(false);
824 return entry_point;
825 }
826 return NULL;
828 }
830 //
831 // Interpreter stub for calling a native method. (asm interpreter)
832 // This sets up a somewhat different looking stack for calling the native method
833 // than the typical interpreter frame setup.
834 //
836 address InterpreterGenerator::generate_native_entry(bool synchronized) {
837 // determine code generation flags
838 bool inc_counter = UseCompiler || CountCompiledCalls;
840 // rbx,: methodOop
841 // rsi: sender sp
842 // rsi: previous interpreter state (C++ interpreter) must preserve
843 address entry_point = __ pc();
846 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
847 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
848 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
850 // get parameter size (always needed)
851 __ load_unsigned_short(rcx, size_of_parameters);
853 // native calls don't need the stack size check since they have no expression stack
854 // and the arguments are already on the stack and we only add a handful of words
855 // to the stack
857 // rbx,: methodOop
858 // rcx: size of parameters
859 // rsi: sender sp
861 __ pop(rax); // get return address
862 // for natives the size of locals is zero
864 // compute beginning of parameters (rdi)
865 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
868 // add 2 zero-initialized slots for native calls
869 // NULL result handler
870 __ push((int32_t)NULL_WORD);
871 // NULL oop temp (mirror or jni oop result)
872 __ push((int32_t)NULL_WORD);
874 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
875 // initialize fixed part of activation frame
877 generate_fixed_frame(true);
879 // make sure method is native & not abstract
880 #ifdef ASSERT
881 __ movl(rax, access_flags);
882 {
883 Label L;
884 __ testl(rax, JVM_ACC_NATIVE);
885 __ jcc(Assembler::notZero, L);
886 __ stop("tried to execute non-native method as native");
887 __ bind(L);
888 }
889 { Label L;
890 __ testl(rax, JVM_ACC_ABSTRACT);
891 __ jcc(Assembler::zero, L);
892 __ stop("tried to execute abstract method in interpreter");
893 __ bind(L);
894 }
895 #endif
897 // Since at this point in the method invocation the exception handler
898 // would try to exit the monitor of synchronized methods which hasn't
899 // been entered yet, we set the thread local variable
900 // _do_not_unlock_if_synchronized to true. The remove_activation will
901 // check this flag.
903 __ get_thread(rax);
904 const Address do_not_unlock_if_synchronized(rax,
905 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
906 __ movbool(do_not_unlock_if_synchronized, true);
908 // increment invocation count & check for overflow
909 Label invocation_counter_overflow;
910 if (inc_counter) {
911 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
912 }
914 Label continue_after_compile;
915 __ bind(continue_after_compile);
917 bang_stack_shadow_pages(true);
919 // reset the _do_not_unlock_if_synchronized flag
920 __ get_thread(rax);
921 __ movbool(do_not_unlock_if_synchronized, false);
923 // check for synchronized methods
924 // Must happen AFTER invocation_counter check and stack overflow check,
925 // so method is not locked if overflows.
926 //
927 if (synchronized) {
928 lock_method();
929 } else {
930 // no synchronization necessary
931 #ifdef ASSERT
932 { Label L;
933 __ movl(rax, access_flags);
934 __ testl(rax, JVM_ACC_SYNCHRONIZED);
935 __ jcc(Assembler::zero, L);
936 __ stop("method needs synchronization");
937 __ bind(L);
938 }
939 #endif
940 }
942 // start execution
943 #ifdef ASSERT
944 { Label L;
945 const Address monitor_block_top (rbp,
946 frame::interpreter_frame_monitor_block_top_offset * wordSize);
947 __ movptr(rax, monitor_block_top);
948 __ cmpptr(rax, rsp);
949 __ jcc(Assembler::equal, L);
950 __ stop("broken stack frame setup in interpreter");
951 __ bind(L);
952 }
953 #endif
955 // jvmti/dtrace support
956 __ notify_method_entry();
958 // work registers
959 const Register method = rbx;
960 const Register thread = rdi;
961 const Register t = rcx;
963 // allocate space for parameters
964 __ get_method(method);
965 __ verify_oop(method);
966 __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
967 __ shlptr(t, Interpreter::logStackElementSize());
968 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
969 __ subptr(rsp, t);
970 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
972 // get signature handler
973 { Label L;
974 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
975 __ testptr(t, t);
976 __ jcc(Assembler::notZero, L);
977 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
978 __ get_method(method);
979 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
980 __ bind(L);
981 }
983 // call signature handler
984 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
985 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
986 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code");
987 // The generated handlers do not touch RBX (the method oop).
988 // However, large signatures cannot be cached and are generated
989 // each time here. The slow-path generator will blow RBX
990 // sometime, so we must reload it after the call.
991 __ call(t);
992 __ get_method(method); // slow path call blows RBX on DevStudio 5.0
994 // result handler is in rax,
995 // set result handler
996 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
998 // pass mirror handle if static call
999 { Label L;
1000 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
1001 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1002 __ testl(t, JVM_ACC_STATIC);
1003 __ jcc(Assembler::zero, L);
1004 // get mirror
1005 __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
1006 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
1007 __ movptr(t, Address(t, mirror_offset));
1008 // copy mirror into activation frame
1009 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
1010 // pass handle to mirror
1011 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
1012 __ movptr(Address(rsp, wordSize), t);
1013 __ bind(L);
1014 }
1016 // get native function entry point
1017 { Label L;
1018 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1019 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1020 __ cmpptr(rax, unsatisfied.addr());
1021 __ jcc(Assembler::notEqual, L);
1022 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1023 __ get_method(method);
1024 __ verify_oop(method);
1025 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1026 __ bind(L);
1027 }
1029 // pass JNIEnv
1030 __ get_thread(thread);
1031 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1032 __ movptr(Address(rsp, 0), t);
1034 // set_last_Java_frame_before_call
1035 // It is enough that the pc()
1036 // points into the right code segment. It does not have to be the correct return pc.
1037 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1039 // change thread state
1040 #ifdef ASSERT
1041 { Label L;
1042 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1043 __ cmpl(t, _thread_in_Java);
1044 __ jcc(Assembler::equal, L);
1045 __ stop("Wrong thread state in native stub");
1046 __ bind(L);
1047 }
1048 #endif
1050 // Change state to native
1051 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1052 __ call(rax);
1054 // result potentially in rdx:rax or ST0
1056 // Either restore the MXCSR register after returning from the JNI Call
1057 // or verify that it wasn't changed.
1058 if (VM_Version::supports_sse()) {
1059 if (RestoreMXCSROnJNICalls) {
1060 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
1061 }
1062 else if (CheckJNICalls ) {
1063 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
1064 }
1065 }
1067 // Either restore the x87 floating pointer control word after returning
1068 // from the JNI call or verify that it wasn't changed.
1069 if (CheckJNICalls) {
1070 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
1071 }
1073 // save potential result in ST(0) & rdx:rax
1074 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1075 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1076 // It is safe to do this push because state is _thread_in_native and return address will be found
1077 // via _last_native_pc and not via _last_jave_sp
1079 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1080 // If the order changes or anything else is added to the stack the code in
1081 // interpreter_frame_result will have to be changed.
1083 { Label L;
1084 Label push_double;
1085 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1086 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1087 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1088 float_handler.addr());
1089 __ jcc(Assembler::equal, push_double);
1090 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1091 double_handler.addr());
1092 __ jcc(Assembler::notEqual, L);
1093 __ bind(push_double);
1094 __ push(dtos);
1095 __ bind(L);
1096 }
1097 __ push(ltos);
1099 // change thread state
1100 __ get_thread(thread);
1101 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1102 if(os::is_MP()) {
1103 if (UseMembar) {
1104 // Force this write out before the read below
1105 __ membar(Assembler::Membar_mask_bits(
1106 Assembler::LoadLoad | Assembler::LoadStore |
1107 Assembler::StoreLoad | Assembler::StoreStore));
1108 } else {
1109 // Write serialization page so VM thread can do a pseudo remote membar.
1110 // We use the current thread pointer to calculate a thread specific
1111 // offset to write to within the page. This minimizes bus traffic
1112 // due to cache line collision.
1113 __ serialize_memory(thread, rcx);
1114 }
1115 }
1117 if (AlwaysRestoreFPU) {
1118 // Make sure the control word is correct.
1119 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1120 }
1122 // check for safepoint operation in progress and/or pending suspend requests
1123 { Label Continue;
1125 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1126 SafepointSynchronize::_not_synchronized);
1128 Label L;
1129 __ jcc(Assembler::notEqual, L);
1130 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1131 __ jcc(Assembler::equal, Continue);
1132 __ bind(L);
1134 // Don't use call_VM as it will see a possible pending exception and forward it
1135 // and never return here preventing us from clearing _last_native_pc down below.
1136 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1137 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1138 // by hand.
1139 //
1140 __ push(thread);
1141 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1142 JavaThread::check_special_condition_for_native_trans)));
1143 __ increment(rsp, wordSize);
1144 __ get_thread(thread);
1146 __ bind(Continue);
1147 }
1149 // change thread state
1150 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1152 __ reset_last_Java_frame(thread, true, true);
1154 // reset handle block
1155 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1156 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
1158 // If result was an oop then unbox and save it in the frame
1159 { Label L;
1160 Label no_oop, store_result;
1161 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
1162 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
1163 handler.addr());
1164 __ jcc(Assembler::notEqual, no_oop);
1165 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
1166 __ pop(ltos);
1167 __ testptr(rax, rax);
1168 __ jcc(Assembler::zero, store_result);
1169 // unbox
1170 __ movptr(rax, Address(rax, 0));
1171 __ bind(store_result);
1172 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
1173 // keep stack depth as expected by pushing oop which will eventually be discarded
1174 __ push(ltos);
1175 __ bind(no_oop);
1176 }
1178 {
1179 Label no_reguard;
1180 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1181 __ jcc(Assembler::notEqual, no_reguard);
1183 __ pusha();
1184 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1185 __ popa();
1187 __ bind(no_reguard);
1188 }
1190 // restore rsi to have legal interpreter frame,
1191 // i.e., bci == 0 <=> rsi == code_base()
1192 // Can't call_VM until bcp is within reasonable.
1193 __ get_method(method); // method is junk from thread_in_native to now.
1194 __ verify_oop(method);
1195 __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
1196 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
1198 // handle exceptions (exception handling will handle unlocking!)
1199 { Label L;
1200 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1201 __ jcc(Assembler::zero, L);
1202 // Note: At some point we may want to unify this with the code used in call_VM_base();
1203 // i.e., we should use the StubRoutines::forward_exception code. For now this
1204 // doesn't work here because the rsp is not correctly set at this point.
1205 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1206 __ should_not_reach_here();
1207 __ bind(L);
1208 }
1210 // do unlocking if necessary
1211 { Label L;
1212 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1213 __ testl(t, JVM_ACC_SYNCHRONIZED);
1214 __ jcc(Assembler::zero, L);
1215 // the code below should be shared with interpreter macro assembler implementation
1216 { Label unlock;
1217 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1218 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1219 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1221 __ lea(rdx, monitor); // address of first monitor
1223 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
1224 __ testptr(t, t);
1225 __ jcc(Assembler::notZero, unlock);
1227 // Entry already unlocked, need to throw exception
1228 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1229 __ should_not_reach_here();
1231 __ bind(unlock);
1232 __ unlock_object(rdx);
1233 }
1234 __ bind(L);
1235 }
1237 // jvmti/dtrace support
1238 // Note: This must happen _after_ handling/throwing any exceptions since
1239 // the exception handler code notifies the runtime of method exits
1240 // too. If this happens before, method entry/exit notifications are
1241 // not properly paired (was bug - gri 11/22/99).
1242 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1244 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1245 __ pop(ltos);
1246 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1247 __ call(t);
1249 // remove activation
1250 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1251 __ leave(); // remove frame anchor
1252 __ pop(rdi); // get return address
1253 __ mov(rsp, t); // set sp to sender sp
1254 __ jmp(rdi);
1256 if (inc_counter) {
1257 // Handle overflow of counter and compile method
1258 __ bind(invocation_counter_overflow);
1259 generate_counter_overflow(&continue_after_compile);
1260 }
1262 return entry_point;
1263 }
1265 //
1266 // Generic interpreted method entry to (asm) interpreter
1267 //
1268 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1269 // determine code generation flags
1270 bool inc_counter = UseCompiler || CountCompiledCalls;
1272 // rbx,: methodOop
1273 // rsi: sender sp
1274 address entry_point = __ pc();
1277 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
1278 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
1279 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
1280 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
1282 // get parameter size (always needed)
1283 __ load_unsigned_short(rcx, size_of_parameters);
1285 // rbx,: methodOop
1286 // rcx: size of parameters
1288 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
1290 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1291 __ subl(rdx, rcx); // rdx = no. of additional locals
1293 // see if we've got enough room on the stack for locals plus overhead.
1294 generate_stack_overflow_check();
1296 // get return address
1297 __ pop(rax);
1299 // compute beginning of parameters (rdi)
1300 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1302 // rdx - # of additional locals
1303 // allocate space for locals
1304 // explicitly initialize locals
1305 {
1306 Label exit, loop;
1307 __ testl(rdx, rdx);
1308 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1309 __ bind(loop);
1310 if (TaggedStackInterpreter) {
1311 __ push((int32_t)NULL_WORD); // push tag
1312 }
1313 __ push((int32_t)NULL_WORD); // initialize local variables
1314 __ decrement(rdx); // until everything initialized
1315 __ jcc(Assembler::greater, loop);
1316 __ bind(exit);
1317 }
1319 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
1320 // initialize fixed part of activation frame
1321 generate_fixed_frame(false);
1323 // make sure method is not native & not abstract
1324 #ifdef ASSERT
1325 __ movl(rax, access_flags);
1326 {
1327 Label L;
1328 __ testl(rax, JVM_ACC_NATIVE);
1329 __ jcc(Assembler::zero, L);
1330 __ stop("tried to execute native method as non-native");
1331 __ bind(L);
1332 }
1333 { Label L;
1334 __ testl(rax, JVM_ACC_ABSTRACT);
1335 __ jcc(Assembler::zero, L);
1336 __ stop("tried to execute abstract method in interpreter");
1337 __ bind(L);
1338 }
1339 #endif
1341 // Since at this point in the method invocation the exception handler
1342 // would try to exit the monitor of synchronized methods which hasn't
1343 // been entered yet, we set the thread local variable
1344 // _do_not_unlock_if_synchronized to true. The remove_activation will
1345 // check this flag.
1347 __ get_thread(rax);
1348 const Address do_not_unlock_if_synchronized(rax,
1349 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1350 __ movbool(do_not_unlock_if_synchronized, true);
1352 // increment invocation count & check for overflow
1353 Label invocation_counter_overflow;
1354 Label profile_method;
1355 Label profile_method_continue;
1356 if (inc_counter) {
1357 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1358 if (ProfileInterpreter) {
1359 __ bind(profile_method_continue);
1360 }
1361 }
1362 Label continue_after_compile;
1363 __ bind(continue_after_compile);
1365 bang_stack_shadow_pages(false);
1367 // reset the _do_not_unlock_if_synchronized flag
1368 __ get_thread(rax);
1369 __ movbool(do_not_unlock_if_synchronized, false);
1371 // check for synchronized methods
1372 // Must happen AFTER invocation_counter check and stack overflow check,
1373 // so method is not locked if overflows.
1374 //
1375 if (synchronized) {
1376 // Allocate monitor and lock method
1377 lock_method();
1378 } else {
1379 // no synchronization necessary
1380 #ifdef ASSERT
1381 { Label L;
1382 __ movl(rax, access_flags);
1383 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1384 __ jcc(Assembler::zero, L);
1385 __ stop("method needs synchronization");
1386 __ bind(L);
1387 }
1388 #endif
1389 }
1391 // start execution
1392 #ifdef ASSERT
1393 { Label L;
1394 const Address monitor_block_top (rbp,
1395 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1396 __ movptr(rax, monitor_block_top);
1397 __ cmpptr(rax, rsp);
1398 __ jcc(Assembler::equal, L);
1399 __ stop("broken stack frame setup in interpreter");
1400 __ bind(L);
1401 }
1402 #endif
1404 // jvmti support
1405 __ notify_method_entry();
1407 __ dispatch_next(vtos);
1409 // invocation counter overflow
1410 if (inc_counter) {
1411 if (ProfileInterpreter) {
1412 // We have decided to profile this method in the interpreter
1413 __ bind(profile_method);
1415 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true);
1417 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
1418 __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
1419 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
1420 __ test_method_data_pointer(rax, profile_method_continue);
1421 __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
1422 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
1423 __ jmp(profile_method_continue);
1424 }
1425 // Handle overflow of counter and compile method
1426 __ bind(invocation_counter_overflow);
1427 generate_counter_overflow(&continue_after_compile);
1428 }
1430 return entry_point;
1431 }
1433 //------------------------------------------------------------------------------------------------------------------------
1434 // Entry points
1435 //
1436 // Here we generate the various kind of entries into the interpreter.
1437 // The two main entry type are generic bytecode methods and native call method.
1438 // These both come in synchronized and non-synchronized versions but the
1439 // frame layout they create is very similar. The other method entry
1440 // types are really just special purpose entries that are really entry
1441 // and interpretation all in one. These are for trivial methods like
1442 // accessor, empty, or special math methods.
1443 //
1444 // When control flow reaches any of the entry types for the interpreter
1445 // the following holds ->
1446 //
1447 // Arguments:
1448 //
1449 // rbx,: methodOop
1450 // rcx: receiver
1451 //
1452 //
1453 // Stack layout immediately at entry
1454 //
1455 // [ return address ] <--- rsp
1456 // [ parameter n ]
1457 // ...
1458 // [ parameter 1 ]
1459 // [ expression stack ] (caller's java expression stack)
1461 // Assuming that we don't go to one of the trivial specialized
1462 // entries the stack will look like below when we are ready to execute
1463 // the first bytecode (or call the native routine). The register usage
1464 // will be as the template based interpreter expects (see interpreter_x86.hpp).
1465 //
1466 // local variables follow incoming parameters immediately; i.e.
1467 // the return address is moved to the end of the locals).
1468 //
1469 // [ monitor entry ] <--- rsp
1470 // ...
1471 // [ monitor entry ]
1472 // [ expr. stack bottom ]
1473 // [ saved rsi ]
1474 // [ current rdi ]
1475 // [ methodOop ]
1476 // [ saved rbp, ] <--- rbp,
1477 // [ return address ]
1478 // [ local variable m ]
1479 // ...
1480 // [ local variable 1 ]
1481 // [ parameter n ]
1482 // ...
1483 // [ parameter 1 ] <--- rdi
1485 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
1486 // determine code generation flags
1487 bool synchronized = false;
1488 address entry_point = NULL;
1490 switch (kind) {
1491 case Interpreter::zerolocals : break;
1492 case Interpreter::zerolocals_synchronized: synchronized = true; break;
1493 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
1494 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
1495 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
1496 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
1497 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
1498 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
1500 case Interpreter::java_lang_math_sin : // fall thru
1501 case Interpreter::java_lang_math_cos : // fall thru
1502 case Interpreter::java_lang_math_tan : // fall thru
1503 case Interpreter::java_lang_math_abs : // fall thru
1504 case Interpreter::java_lang_math_log : // fall thru
1505 case Interpreter::java_lang_math_log10 : // fall thru
1506 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
1507 default : ShouldNotReachHere(); break;
1508 }
1510 if (entry_point) return entry_point;
1512 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
1514 }
1516 // How much stack a method activation needs in words.
1517 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1519 const int stub_code = 4; // see generate_call_stub
1520 // Save space for one monitor to get into the interpreted method in case
1521 // the method is synchronized
1522 int monitor_size = method->is_synchronized() ?
1523 1*frame::interpreter_frame_monitor_size() : 0;
1525 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
1526 // be sure to change this if you add/subtract anything to/from the overhead area
1527 const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
1529 const int extra_stack = methodOopDesc::extra_stack_entries();
1530 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
1531 Interpreter::stackElementWords();
1532 return overhead_size + method_stack + stub_code;
1533 }
1535 // asm based interpreter deoptimization helpers
1537 int AbstractInterpreter::layout_activation(methodOop method,
1538 int tempcount,
1539 int popframe_extra_args,
1540 int moncount,
1541 int callee_param_count,
1542 int callee_locals,
1543 frame* caller,
1544 frame* interpreter_frame,
1545 bool is_top_frame) {
1546 // Note: This calculation must exactly parallel the frame setup
1547 // in AbstractInterpreterGenerator::generate_method_entry.
1548 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
1549 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
1550 // as determined by a previous call to this method.
1551 // It is also guaranteed to be walkable even though it is in a skeletal state
1552 // NOTE: return size is in words not bytes
1554 // fixed size of an interpreter frame:
1555 int max_locals = method->max_locals() * Interpreter::stackElementWords();
1556 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1557 Interpreter::stackElementWords();
1559 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
1561 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
1562 // Since the callee parameters already account for the callee's params we only need to account for
1563 // the extra locals.
1566 int size = overhead +
1567 ((callee_locals - callee_param_count)*Interpreter::stackElementWords()) +
1568 (moncount*frame::interpreter_frame_monitor_size()) +
1569 tempcount*Interpreter::stackElementWords() + popframe_extra_args;
1571 if (interpreter_frame != NULL) {
1572 #ifdef ASSERT
1573 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
1574 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
1575 #endif
1577 interpreter_frame->interpreter_frame_set_method(method);
1578 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
1579 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
1580 // and sender_sp is fp+8
1581 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1583 interpreter_frame->interpreter_frame_set_locals(locals);
1584 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1585 BasicObjectLock* monbot = montop - moncount;
1586 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1588 // Set last_sp
1589 intptr_t* rsp = (intptr_t*) monbot -
1590 tempcount*Interpreter::stackElementWords() -
1591 popframe_extra_args;
1592 interpreter_frame->interpreter_frame_set_last_sp(rsp);
1594 // All frames but the initial (oldest) interpreter frame we fill in have a
1595 // value for sender_sp that allows walking the stack but isn't
1596 // truly correct. Correct the value here.
1598 if (extra_locals != 0 &&
1599 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
1600 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
1601 }
1602 *interpreter_frame->interpreter_frame_cache_addr() =
1603 method->constants()->cache();
1604 }
1605 return size;
1606 }
1609 //------------------------------------------------------------------------------------------------------------------------
1610 // Exceptions
1612 void TemplateInterpreterGenerator::generate_throw_exception() {
1613 // Entry point in previous activation (i.e., if the caller was interpreted)
1614 Interpreter::_rethrow_exception_entry = __ pc();
1616 // Restore sp to interpreter_frame_last_sp even though we are going
1617 // to empty the expression stack for the exception processing.
1618 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1619 // rax,: exception
1620 // rdx: return address/pc that threw exception
1621 __ restore_bcp(); // rsi points to call/send
1622 __ restore_locals();
1624 // Entry point for exceptions thrown within interpreter code
1625 Interpreter::_throw_exception_entry = __ pc();
1626 // expression stack is undefined here
1627 // rax,: exception
1628 // rsi: exception bcp
1629 __ verify_oop(rax);
1631 // expression stack must be empty before entering the VM in case of an exception
1632 __ empty_expression_stack();
1633 __ empty_FPU_stack();
1634 // find exception handler address and preserve exception oop
1635 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax);
1636 // rax,: exception handler entry point
1637 // rdx: preserved exception oop
1638 // rsi: bcp for exception handler
1639 __ push_ptr(rdx); // push exception which is now the only value on the stack
1640 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1642 // If the exception is not handled in the current frame the frame is removed and
1643 // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1644 //
1645 // Note: At this point the bci is still the bxi for the instruction which caused
1646 // the exception and the expression stack is empty. Thus, for any VM calls
1647 // at this point, GC will find a legal oop map (with empty expression stack).
1649 // In current activation
1650 // tos: exception
1651 // rsi: exception bcp
1653 //
1654 // JVMTI PopFrame support
1655 //
1657 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1658 __ empty_expression_stack();
1659 __ empty_FPU_stack();
1660 // Set the popframe_processing bit in pending_popframe_condition indicating that we are
1661 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1662 // popframe handling cycles.
1663 __ get_thread(rcx);
1664 __ movl(rdx, Address(rcx, JavaThread::popframe_condition_offset()));
1665 __ orl(rdx, JavaThread::popframe_processing_bit);
1666 __ movl(Address(rcx, JavaThread::popframe_condition_offset()), rdx);
1668 {
1669 // Check to see whether we are returning to a deoptimized frame.
1670 // (The PopFrame call ensures that the caller of the popped frame is
1671 // either interpreted or compiled and deoptimizes it if compiled.)
1672 // In this case, we can't call dispatch_next() after the frame is
1673 // popped, but instead must save the incoming arguments and restore
1674 // them after deoptimization has occurred.
1675 //
1676 // Note that we don't compare the return PC against the
1677 // deoptimization blob's unpack entry because of the presence of
1678 // adapter frames in C2.
1679 Label caller_not_deoptimized;
1680 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
1681 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
1682 __ testl(rax, rax);
1683 __ jcc(Assembler::notZero, caller_not_deoptimized);
1685 // Compute size of arguments for saving when returning to deoptimized caller
1686 __ get_method(rax);
1687 __ verify_oop(rax);
1688 __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
1689 __ shlptr(rax, Interpreter::logStackElementSize());
1690 __ restore_locals();
1691 __ subptr(rdi, rax);
1692 __ addptr(rdi, wordSize);
1693 // Save these arguments
1694 __ get_thread(rcx);
1695 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi);
1697 __ remove_activation(vtos, rdx,
1698 /* throw_monitor_exception */ false,
1699 /* install_monitor_exception */ false,
1700 /* notify_jvmdi */ false);
1702 // Inform deoptimization that it is responsible for restoring these arguments
1703 __ get_thread(rcx);
1704 __ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
1706 // Continue in deoptimization handler
1707 __ jmp(rdx);
1709 __ bind(caller_not_deoptimized);
1710 }
1712 __ remove_activation(vtos, rdx,
1713 /* throw_monitor_exception */ false,
1714 /* install_monitor_exception */ false,
1715 /* notify_jvmdi */ false);
1717 // Finish with popframe handling
1718 // A previous I2C followed by a deoptimization might have moved the
1719 // outgoing arguments further up the stack. PopFrame expects the
1720 // mutations to those outgoing arguments to be preserved and other
1721 // constraints basically require this frame to look exactly as
1722 // though it had previously invoked an interpreted activation with
1723 // no space between the top of the expression stack (current
1724 // last_sp) and the top of stack. Rather than force deopt to
1725 // maintain this kind of invariant all the time we call a small
1726 // fixup routine to move the mutated arguments onto the top of our
1727 // expression stack if necessary.
1728 __ mov(rax, rsp);
1729 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1730 __ get_thread(rcx);
1731 // PC must point into interpreter here
1732 __ set_last_Java_frame(rcx, noreg, rbp, __ pc());
1733 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), rcx, rax, rbx);
1734 __ get_thread(rcx);
1735 __ reset_last_Java_frame(rcx, true, true);
1736 // Restore the last_sp and null it out
1737 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1738 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1740 __ restore_bcp();
1741 __ restore_locals();
1742 // The method data pointer was incremented already during
1743 // call profiling. We have to restore the mdp for the current bcp.
1744 if (ProfileInterpreter) {
1745 __ set_method_data_pointer_for_bcp();
1746 }
1748 // Clear the popframe condition flag
1749 __ get_thread(rcx);
1750 __ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
1752 __ dispatch_next(vtos);
1753 // end of PopFrame support
1755 Interpreter::_remove_activation_entry = __ pc();
1757 // preserve exception over this code sequence
1758 __ pop_ptr(rax);
1759 __ get_thread(rcx);
1760 __ movptr(Address(rcx, JavaThread::vm_result_offset()), rax);
1761 // remove the activation (without doing throws on illegalMonitorExceptions)
1762 __ remove_activation(vtos, rdx, false, true, false);
1763 // restore exception
1764 __ get_thread(rcx);
1765 __ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
1766 __ movptr(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD);
1767 __ verify_oop(rax);
1769 // Inbetween activations - previous activation type unknown yet
1770 // compute continuation point - the continuation point expects
1771 // the following registers set up:
1772 //
1773 // rax,: exception
1774 // rdx: return address/pc that threw exception
1775 // rsp: expression stack of caller
1776 // rbp,: rbp, of caller
1777 __ push(rax); // save exception
1778 __ push(rdx); // save return address
1779 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx);
1780 __ mov(rbx, rax); // save exception handler
1781 __ pop(rdx); // restore return address
1782 __ pop(rax); // restore exception
1783 // Note that an "issuing PC" is actually the next PC after the call
1784 __ jmp(rbx); // jump to exception handler of caller
1785 }
1788 //
1789 // JVMTI ForceEarlyReturn support
1790 //
1791 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1792 address entry = __ pc();
1794 __ restore_bcp();
1795 __ restore_locals();
1796 __ empty_expression_stack();
1797 __ empty_FPU_stack();
1798 __ load_earlyret_value(state);
1800 __ get_thread(rcx);
1801 __ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
1802 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1804 // Clear the earlyret state
1805 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1807 __ remove_activation(state, rsi,
1808 false, /* throw_monitor_exception */
1809 false, /* install_monitor_exception */
1810 true); /* notify_jvmdi */
1811 __ jmp(rsi);
1812 return entry;
1813 } // end of ForceEarlyReturn support
1816 //------------------------------------------------------------------------------------------------------------------------
1817 // Helper for vtos entry point generation
1819 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1820 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1821 Label L;
1822 fep = __ pc(); __ push(ftos); __ jmp(L);
1823 dep = __ pc(); __ push(dtos); __ jmp(L);
1824 lep = __ pc(); __ push(ltos); __ jmp(L);
1825 aep = __ pc(); __ push(atos); __ jmp(L);
1826 bep = cep = sep = // fall through
1827 iep = __ pc(); __ push(itos); // fall through
1828 vep = __ pc(); __ bind(L); // fall through
1829 generate_and_dispatch(t);
1830 }
1832 //------------------------------------------------------------------------------------------------------------------------
1833 // Generation of individual instructions
1835 // helpers for generate_and_dispatch
1839 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1840 : TemplateInterpreterGenerator(code) {
1841 generate_all(); // down here so it can be "virtual"
1842 }
1844 //------------------------------------------------------------------------------------------------------------------------
1846 // Non-product code
1847 #ifndef PRODUCT
1848 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1849 address entry = __ pc();
1851 // prepare expression stack
1852 __ pop(rcx); // pop return address so expression stack is 'pure'
1853 __ push(state); // save tosca
1855 // pass tosca registers as arguments & call tracer
1856 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
1857 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
1858 __ pop(state); // restore tosca
1860 // return
1861 __ jmp(rcx);
1863 return entry;
1864 }
1867 void TemplateInterpreterGenerator::count_bytecode() {
1868 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1869 }
1872 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1873 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1874 }
1877 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1878 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1879 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1880 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1881 ExternalAddress table((address) BytecodePairHistogram::_counters);
1882 Address index(noreg, rbx, Address::times_4);
1883 __ incrementl(ArrayAddress(table, index));
1884 }
1887 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1888 // Call a little run-time stub to avoid blow-up for each bytecode.
1889 // The run-time runtime saves the right registers, depending on
1890 // the tosca in-state for the given template.
1891 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1892 "entry must have been generated");
1893 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1894 }
1897 void TemplateInterpreterGenerator::stop_interpreter_at() {
1898 Label L;
1899 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1900 StopInterpreterAt);
1901 __ jcc(Assembler::notEqual, L);
1902 __ int3();
1903 __ bind(L);
1904 }
1905 #endif // !PRODUCT
1906 #endif // CC_INTERP