Sun, 13 Apr 2008 17:43:42 -0400
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
1 /*
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_interpreter_x86_64.cpp.incl"
28 #define __ _masm->
30 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
31 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
32 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
34 //-----------------------------------------------------------------------------
36 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
37 address entry = __ pc();
39 #ifdef ASSERT
40 {
41 Label L;
42 __ leaq(rax, Address(rbp,
43 frame::interpreter_frame_monitor_block_top_offset *
44 wordSize));
45 __ cmpq(rax, rsp); // rax = maximal rsp for current rbp (stack
46 // grows negative)
47 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
48 __ stop ("interpreter frame not set up");
49 __ bind(L);
50 }
51 #endif // ASSERT
52 // Restore bcp under the assumption that the current frame is still
53 // interpreted
54 __ restore_bcp();
56 // expression stack must be empty before entering the VM if an
57 // exception happened
58 __ empty_expression_stack();
59 // throw exception
60 __ call_VM(noreg,
61 CAST_FROM_FN_PTR(address,
62 InterpreterRuntime::throw_StackOverflowError));
63 return entry;
64 }
66 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
67 const char* name) {
68 address entry = __ pc();
69 // expression stack must be empty before entering the VM if an
70 // exception happened
71 __ empty_expression_stack();
72 // setup parameters
73 // ??? convention: expect aberrant index in register ebx
74 __ lea(c_rarg1, ExternalAddress((address)name));
75 __ call_VM(noreg,
76 CAST_FROM_FN_PTR(address,
77 InterpreterRuntime::
78 throw_ArrayIndexOutOfBoundsException),
79 c_rarg1, rbx);
80 return entry;
81 }
83 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
84 address entry = __ pc();
86 // object is at TOS
87 __ popq(c_rarg1);
89 // expression stack must be empty before entering the VM if an
90 // exception happened
91 __ empty_expression_stack();
93 __ call_VM(noreg,
94 CAST_FROM_FN_PTR(address,
95 InterpreterRuntime::
96 throw_ClassCastException),
97 c_rarg1);
98 return entry;
99 }
101 address TemplateInterpreterGenerator::generate_exception_handler_common(
102 const char* name, const char* message, bool pass_oop) {
103 assert(!pass_oop || message == NULL, "either oop or message but not both");
104 address entry = __ pc();
105 if (pass_oop) {
106 // object is at TOS
107 __ popq(c_rarg2);
108 }
109 // expression stack must be empty before entering the VM if an
110 // exception happened
111 __ empty_expression_stack();
112 // setup parameters
113 __ lea(c_rarg1, ExternalAddress((address)name));
114 if (pass_oop) {
115 __ call_VM(rax, CAST_FROM_FN_PTR(address,
116 InterpreterRuntime::
117 create_klass_exception),
118 c_rarg1, c_rarg2);
119 } else {
120 // kind of lame ExternalAddress can't take NULL because
121 // external_word_Relocation will assert.
122 if (message != NULL) {
123 __ lea(c_rarg2, ExternalAddress((address)message));
124 } else {
125 __ movptr(c_rarg2, NULL_WORD);
126 }
127 __ call_VM(rax,
128 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
129 c_rarg1, c_rarg2);
130 }
131 // throw exception
132 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
133 return entry;
134 }
137 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
138 address entry = __ pc();
139 // NULL last_sp until next java call
140 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
141 __ dispatch_next(state);
142 return entry;
143 }
146 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
147 int step) {
149 // amd64 doesn't need to do anything special about compiled returns
150 // to the interpreter so the code that exists on x86 to place a sentinel
151 // here and the specialized cleanup code is not needed here.
153 address entry = __ pc();
155 // Restore stack bottom in case i2c adjusted stack
156 __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
157 // and NULL it as marker that esp is now tos until next java call
158 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
160 __ restore_bcp();
161 __ restore_locals();
162 __ get_cache_and_index_at_bcp(rbx, rcx, 1);
163 __ movl(rbx, Address(rbx, rcx,
164 Address::times_8,
165 in_bytes(constantPoolCacheOopDesc::base_offset()) +
166 3 * wordSize));
167 __ andl(rbx, 0xFF);
168 if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
169 __ leaq(rsp, Address(rsp, rbx, Address::times_8));
170 __ dispatch_next(state, step);
171 return entry;
172 }
175 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
176 int step) {
177 address entry = __ pc();
178 // NULL last_sp until next java call
179 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
180 __ restore_bcp();
181 __ restore_locals();
182 // handle exceptions
183 {
184 Label L;
185 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
186 __ jcc(Assembler::zero, L);
187 __ call_VM(noreg,
188 CAST_FROM_FN_PTR(address,
189 InterpreterRuntime::throw_pending_exception));
190 __ should_not_reach_here();
191 __ bind(L);
192 }
193 __ dispatch_next(state, step);
194 return entry;
195 }
197 int AbstractInterpreter::BasicType_as_index(BasicType type) {
198 int i = 0;
199 switch (type) {
200 case T_BOOLEAN: i = 0; break;
201 case T_CHAR : i = 1; break;
202 case T_BYTE : i = 2; break;
203 case T_SHORT : i = 3; break;
204 case T_INT : i = 4; break;
205 case T_LONG : i = 5; break;
206 case T_VOID : i = 6; break;
207 case T_FLOAT : i = 7; break;
208 case T_DOUBLE : i = 8; break;
209 case T_OBJECT : i = 9; break;
210 case T_ARRAY : i = 9; break;
211 default : ShouldNotReachHere();
212 }
213 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
214 "index out of bounds");
215 return i;
216 }
219 address TemplateInterpreterGenerator::generate_result_handler_for(
220 BasicType type) {
221 address entry = __ pc();
222 switch (type) {
223 case T_BOOLEAN: __ c2bool(rax); break;
224 case T_CHAR : __ movzwl(rax, rax); break;
225 case T_BYTE : __ sign_extend_byte(rax); break;
226 case T_SHORT : __ sign_extend_short(rax); break;
227 case T_INT : /* nothing to do */ break;
228 case T_LONG : /* nothing to do */ break;
229 case T_VOID : /* nothing to do */ break;
230 case T_FLOAT : /* nothing to do */ break;
231 case T_DOUBLE : /* nothing to do */ break;
232 case T_OBJECT :
233 // retrieve result from frame
234 __ movq(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
235 // and verify it
236 __ verify_oop(rax);
237 break;
238 default : ShouldNotReachHere();
239 }
240 __ ret(0); // return from result handler
241 return entry;
242 }
244 address TemplateInterpreterGenerator::generate_safept_entry_for(
245 TosState state,
246 address runtime_entry) {
247 address entry = __ pc();
248 __ push(state);
249 __ call_VM(noreg, runtime_entry);
250 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
251 return entry;
252 }
256 // Helpers for commoning out cases in the various type of method entries.
257 //
260 // increment invocation count & check for overflow
261 //
262 // Note: checking for negative value instead of overflow
263 // so we have a 'sticky' overflow test
264 //
265 // rbx: method
266 // ecx: invocation counter
267 //
268 void InterpreterGenerator::generate_counter_incr(
269 Label* overflow,
270 Label* profile_method,
271 Label* profile_method_continue) {
273 const Address invocation_counter(rbx,
274 methodOopDesc::invocation_counter_offset() +
275 InvocationCounter::counter_offset());
276 const Address backedge_counter(rbx,
277 methodOopDesc::backedge_counter_offset() +
278 InvocationCounter::counter_offset());
280 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
281 __ incrementl(Address(rbx,
282 methodOopDesc::interpreter_invocation_counter_offset()));
283 }
284 // Update standard invocation counters
285 __ movl(rax, backedge_counter); // load backedge counter
287 __ incrementl(rcx, InvocationCounter::count_increment);
288 __ andl(rax, InvocationCounter::count_mask_value); // mask out the
289 // status bits
291 __ movl(invocation_counter, rcx); // save invocation count
292 __ addl(rcx, rax); // add both counters
294 // profile_method is non-null only for interpreted method so
295 // profile_method != NULL == !native_call
297 if (ProfileInterpreter && profile_method != NULL) {
298 // Test to see if we should create a method data oop
299 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
300 __ jcc(Assembler::less, *profile_method_continue);
302 // if no method data exists, go to profile_method
303 __ test_method_data_pointer(rax, *profile_method);
304 }
306 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
307 __ jcc(Assembler::aboveEqual, *overflow);
308 }
310 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
312 // Asm interpreter on entry
313 // r14 - locals
314 // r13 - bcp
315 // rbx - method
316 // edx - cpool --- DOES NOT APPEAR TO BE TRUE
317 // rbp - interpreter frame
319 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
320 // Everything as it was on entry
321 // rdx is not restored. Doesn't appear to really be set.
323 const Address size_of_parameters(rbx,
324 methodOopDesc::size_of_parameters_offset());
326 // InterpreterRuntime::frequency_counter_overflow takes two
327 // arguments, the first (thread) is passed by call_VM, the second
328 // indicates if the counter overflow occurs at a backwards branch
329 // (NULL bcp). We pass zero for it. The call returns the address
330 // of the verified entry point for the method or NULL if the
331 // compilation did not complete (either went background or bailed
332 // out).
333 __ movl(c_rarg1, 0);
334 __ call_VM(noreg,
335 CAST_FROM_FN_PTR(address,
336 InterpreterRuntime::frequency_counter_overflow),
337 c_rarg1);
339 __ movq(rbx, Address(rbp, method_offset)); // restore methodOop
340 // Preserve invariant that r13/r14 contain bcp/locals of sender frame
341 // and jump to the interpreted entry.
342 __ jmp(*do_continue, relocInfo::none);
343 }
345 // See if we've got enough room on the stack for locals plus overhead.
346 // The expression stack grows down incrementally, so the normal guard
347 // page mechanism will work for that.
348 //
349 // NOTE: Since the additional locals are also always pushed (wasn't
350 // obvious in generate_method_entry) so the guard should work for them
351 // too.
352 //
353 // Args:
354 // rdx: number of additional locals this frame needs (what we must check)
355 // rbx: methodOop
356 //
357 // Kills:
358 // rax
359 void InterpreterGenerator::generate_stack_overflow_check(void) {
361 // monitor entry size: see picture of stack set
362 // (generate_method_entry) and frame_amd64.hpp
363 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
365 // total overhead size: entry_size + (saved rbp through expr stack
366 // bottom). be sure to change this if you add/subtract anything
367 // to/from the overhead area
368 const int overhead_size =
369 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
371 const int page_size = os::vm_page_size();
373 Label after_frame_check;
375 // see if the frame is greater than one page in size. If so,
376 // then we need to verify there is enough stack space remaining
377 // for the additional locals.
378 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize());
379 __ jcc(Assembler::belowEqual, after_frame_check);
381 // compute rsp as if this were going to be the last frame on
382 // the stack before the red zone
384 const Address stack_base(r15_thread, Thread::stack_base_offset());
385 const Address stack_size(r15_thread, Thread::stack_size_offset());
387 // locals + overhead, in bytes
388 __ movq(rax, rdx);
389 __ shll(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
390 __ addq(rax, overhead_size);
392 #ifdef ASSERT
393 Label stack_base_okay, stack_size_okay;
394 // verify that thread stack base is non-zero
395 __ cmpq(stack_base, 0);
396 __ jcc(Assembler::notEqual, stack_base_okay);
397 __ stop("stack base is zero");
398 __ bind(stack_base_okay);
399 // verify that thread stack size is non-zero
400 __ cmpq(stack_size, 0);
401 __ jcc(Assembler::notEqual, stack_size_okay);
402 __ stop("stack size is zero");
403 __ bind(stack_size_okay);
404 #endif
406 // Add stack base to locals and subtract stack size
407 __ addq(rax, stack_base);
408 __ subq(rax, stack_size);
410 // add in the red and yellow zone sizes
411 __ addq(rax, (StackRedPages + StackYellowPages) * page_size);
413 // check against the current stack bottom
414 __ cmpq(rsp, rax);
415 __ jcc(Assembler::above, after_frame_check);
417 __ popq(rax); // get return address
418 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
420 // all done with frame size check
421 __ bind(after_frame_check);
422 }
424 // Allocate monitor and lock method (asm interpreter)
425 //
426 // Args:
427 // rbx: methodOop
428 // r14: locals
429 //
430 // Kills:
431 // rax
432 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
433 // rscratch1, rscratch2 (scratch regs)
434 void InterpreterGenerator::lock_method(void) {
435 // synchronize method
436 const Address access_flags(rbx, methodOopDesc::access_flags_offset());
437 const Address monitor_block_top(
438 rbp,
439 frame::interpreter_frame_monitor_block_top_offset * wordSize);
440 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
442 #ifdef ASSERT
443 {
444 Label L;
445 __ movl(rax, access_flags);
446 __ testl(rax, JVM_ACC_SYNCHRONIZED);
447 __ jcc(Assembler::notZero, L);
448 __ stop("method doesn't need synchronization");
449 __ bind(L);
450 }
451 #endif // ASSERT
453 // get synchronization object
454 {
455 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
456 Klass::java_mirror_offset_in_bytes();
457 Label done;
458 __ movl(rax, access_flags);
459 __ testl(rax, JVM_ACC_STATIC);
460 // get receiver (assume this is frequent case)
461 __ movq(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
462 __ jcc(Assembler::zero, done);
463 __ movq(rax, Address(rbx, methodOopDesc::constants_offset()));
464 __ movq(rax, Address(rax,
465 constantPoolOopDesc::pool_holder_offset_in_bytes()));
466 __ movq(rax, Address(rax, mirror_offset));
468 #ifdef ASSERT
469 {
470 Label L;
471 __ testq(rax, rax);
472 __ jcc(Assembler::notZero, L);
473 __ stop("synchronization object is NULL");
474 __ bind(L);
475 }
476 #endif // ASSERT
478 __ bind(done);
479 }
481 // add space for monitor & lock
482 __ subq(rsp, entry_size); // add space for a monitor entry
483 __ movq(monitor_block_top, rsp); // set new monitor block top
484 // store object
485 __ movq(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
486 __ movq(c_rarg1, rsp); // object address
487 __ lock_object(c_rarg1);
488 }
490 // Generate a fixed interpreter frame. This is identical setup for
491 // interpreted methods and for native methods hence the shared code.
492 //
493 // Args:
494 // rax: return address
495 // rbx: methodOop
496 // r14: pointer to locals
497 // r13: sender sp
498 // rdx: cp cache
499 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
500 // initialize fixed part of activation frame
501 __ pushq(rax); // save return address
502 __ enter(); // save old & set new rbp
503 __ pushq(r13); // set sender sp
504 __ pushq((int)NULL_WORD); // leave last_sp as null
505 __ movq(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop
506 __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
507 __ pushq(rbx); // save methodOop
508 if (ProfileInterpreter) {
509 Label method_data_continue;
510 __ movq(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
511 __ testq(rdx, rdx);
512 __ jcc(Assembler::zero, method_data_continue);
513 __ addq(rdx, in_bytes(methodDataOopDesc::data_offset()));
514 __ bind(method_data_continue);
515 __ pushq(rdx); // set the mdp (method data pointer)
516 } else {
517 __ pushq(0);
518 }
520 __ movq(rdx, Address(rbx, methodOopDesc::constants_offset()));
521 __ movq(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
522 __ pushq(rdx); // set constant pool cache
523 __ pushq(r14); // set locals pointer
524 if (native_call) {
525 __ pushq(0); // no bcp
526 } else {
527 __ pushq(r13); // set bcp
528 }
529 __ pushq(0); // reserve word for pointer to expression stack bottom
530 __ movq(Address(rsp, 0), rsp); // set expression stack bottom
531 }
533 // End of helpers
535 // Interpreter stub for calling a native method. (asm interpreter)
536 // This sets up a somewhat different looking stack for calling the
537 // native method than the typical interpreter frame setup.
538 address InterpreterGenerator::generate_native_entry(bool synchronized) {
539 // determine code generation flags
540 bool inc_counter = UseCompiler || CountCompiledCalls;
542 // rbx: methodOop
543 // r13: sender sp
545 address entry_point = __ pc();
547 const Address size_of_parameters(rbx, methodOopDesc::
548 size_of_parameters_offset());
549 const Address invocation_counter(rbx, methodOopDesc::
550 invocation_counter_offset() +
551 InvocationCounter::counter_offset());
552 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
554 // get parameter size (always needed)
555 __ load_unsigned_word(rcx, size_of_parameters);
557 // native calls don't need the stack size check since they have no
558 // expression stack and the arguments are already on the stack and
559 // we only add a handful of words to the stack
561 // rbx: methodOop
562 // rcx: size of parameters
563 // r13: sender sp
564 __ popq(rax); // get return address
566 // for natives the size of locals is zero
568 // compute beginning of parameters (r14)
569 if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
570 __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize));
572 // add 2 zero-initialized slots for native calls
573 // initialize result_handler slot
574 __ pushq((int) NULL);
575 // slot for oop temp
576 // (static native method holder mirror/jni oop result)
577 __ pushq((int) NULL);
579 if (inc_counter) {
580 __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
581 }
583 // initialize fixed part of activation frame
584 generate_fixed_frame(true);
586 // make sure method is native & not abstract
587 #ifdef ASSERT
588 __ movl(rax, access_flags);
589 {
590 Label L;
591 __ testl(rax, JVM_ACC_NATIVE);
592 __ jcc(Assembler::notZero, L);
593 __ stop("tried to execute non-native method as native");
594 __ bind(L);
595 }
596 {
597 Label L;
598 __ testl(rax, JVM_ACC_ABSTRACT);
599 __ jcc(Assembler::zero, L);
600 __ stop("tried to execute abstract method in interpreter");
601 __ bind(L);
602 }
603 #endif
605 // Since at this point in the method invocation the exception handler
606 // would try to exit the monitor of synchronized methods which hasn't
607 // been entered yet, we set the thread local variable
608 // _do_not_unlock_if_synchronized to true. The remove_activation will
609 // check this flag.
611 const Address do_not_unlock_if_synchronized(r15_thread,
612 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
613 __ movbool(do_not_unlock_if_synchronized, true);
615 // increment invocation count & check for overflow
616 Label invocation_counter_overflow;
617 if (inc_counter) {
618 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
619 }
621 Label continue_after_compile;
622 __ bind(continue_after_compile);
624 bang_stack_shadow_pages(true);
626 // reset the _do_not_unlock_if_synchronized flag
627 __ movbool(do_not_unlock_if_synchronized, false);
629 // check for synchronized methods
630 // Must happen AFTER invocation_counter check and stack overflow check,
631 // so method is not locked if overflows.
632 if (synchronized) {
633 lock_method();
634 } else {
635 // no synchronization necessary
636 #ifdef ASSERT
637 {
638 Label L;
639 __ movl(rax, access_flags);
640 __ testl(rax, JVM_ACC_SYNCHRONIZED);
641 __ jcc(Assembler::zero, L);
642 __ stop("method needs synchronization");
643 __ bind(L);
644 }
645 #endif
646 }
648 // start execution
649 #ifdef ASSERT
650 {
651 Label L;
652 const Address monitor_block_top(rbp,
653 frame::interpreter_frame_monitor_block_top_offset * wordSize);
654 __ movq(rax, monitor_block_top);
655 __ cmpq(rax, rsp);
656 __ jcc(Assembler::equal, L);
657 __ stop("broken stack frame setup in interpreter");
658 __ bind(L);
659 }
660 #endif
662 // jvmti support
663 __ notify_method_entry();
665 // work registers
666 const Register method = rbx;
667 const Register t = r11;
669 // allocate space for parameters
670 __ get_method(method);
671 __ verify_oop(method);
672 __ load_unsigned_word(t,
673 Address(method,
674 methodOopDesc::size_of_parameters_offset()));
675 __ shll(t, Interpreter::logStackElementSize());
677 __ subq(rsp, t);
678 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
679 __ andq(rsp, -16); // must be 16 byte boundry (see amd64 ABI)
681 // get signature handler
682 {
683 Label L;
684 __ movq(t, Address(method, methodOopDesc::signature_handler_offset()));
685 __ testq(t, t);
686 __ jcc(Assembler::notZero, L);
687 __ call_VM(noreg,
688 CAST_FROM_FN_PTR(address,
689 InterpreterRuntime::prepare_native_call),
690 method);
691 __ get_method(method);
692 __ movq(t, Address(method, methodOopDesc::signature_handler_offset()));
693 __ bind(L);
694 }
696 // call signature handler
697 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
698 "adjust this code");
699 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
700 "adjust this code");
701 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
702 "adjust this code");
704 // The generated handlers do not touch RBX (the method oop).
705 // However, large signatures cannot be cached and are generated
706 // each time here. The slow-path generator can do a GC on return,
707 // so we must reload it after the call.
708 __ call(t);
709 __ get_method(method); // slow path can do a GC, reload RBX
712 // result handler is in rax
713 // set result handler
714 __ movq(Address(rbp,
715 (frame::interpreter_frame_result_handler_offset) * wordSize),
716 rax);
718 // pass mirror handle if static call
719 {
720 Label L;
721 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
722 Klass::java_mirror_offset_in_bytes();
723 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
724 __ testl(t, JVM_ACC_STATIC);
725 __ jcc(Assembler::zero, L);
726 // get mirror
727 __ movq(t, Address(method, methodOopDesc::constants_offset()));
728 __ movq(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
729 __ movq(t, Address(t, mirror_offset));
730 // copy mirror into activation frame
731 __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
732 t);
733 // pass handle to mirror
734 __ leaq(c_rarg1,
735 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
736 __ bind(L);
737 }
739 // get native function entry point
740 {
741 Label L;
742 __ movq(rax, Address(method, methodOopDesc::native_function_offset()));
743 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
744 __ movptr(rscratch2, unsatisfied.addr());
745 __ cmpq(rax, rscratch2);
746 __ jcc(Assembler::notEqual, L);
747 __ call_VM(noreg,
748 CAST_FROM_FN_PTR(address,
749 InterpreterRuntime::prepare_native_call),
750 method);
751 __ get_method(method);
752 __ verify_oop(method);
753 __ movq(rax, Address(method, methodOopDesc::native_function_offset()));
754 __ bind(L);
755 }
757 // pass JNIEnv
758 __ leaq(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
760 // It is enough that the pc() points into the right code
761 // segment. It does not have to be the correct return pc.
762 __ set_last_Java_frame(rsp, rbp, (address) __ pc());
764 // change thread state
765 #ifdef ASSERT
766 {
767 Label L;
768 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
769 __ cmpl(t, _thread_in_Java);
770 __ jcc(Assembler::equal, L);
771 __ stop("Wrong thread state in native stub");
772 __ bind(L);
773 }
774 #endif
776 // Change state to native
778 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
779 _thread_in_native);
781 // Call the native method.
782 __ call(rax);
783 // result potentially in rax or xmm0
785 // Depending on runtime options, either restore the MXCSR
786 // register after returning from the JNI Call or verify that
787 // it wasn't changed during -Xcheck:jni.
788 if (RestoreMXCSROnJNICalls) {
789 __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
790 }
791 else if (CheckJNICalls) {
792 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
793 }
795 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
796 // in order to extract the result of a method call. If the order of these
797 // pushes change or anything else is added to the stack then the code in
798 // interpreter_frame_result must also change.
800 __ push(dtos);
801 __ push(ltos);
803 // change thread state
804 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
805 _thread_in_native_trans);
807 if (os::is_MP()) {
808 if (UseMembar) {
809 // Force this write out before the read below
810 __ membar(Assembler::Membar_mask_bits(
811 Assembler::LoadLoad | Assembler::LoadStore |
812 Assembler::StoreLoad | Assembler::StoreStore));
813 } else {
814 // Write serialization page so VM thread can do a pseudo remote membar.
815 // We use the current thread pointer to calculate a thread specific
816 // offset to write to within the page. This minimizes bus traffic
817 // due to cache line collision.
818 __ serialize_memory(r15_thread, rscratch2);
819 }
820 }
822 // check for safepoint operation in progress and/or pending suspend requests
823 {
824 Label Continue;
825 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
826 SafepointSynchronize::_not_synchronized);
828 Label L;
829 __ jcc(Assembler::notEqual, L);
830 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
831 __ jcc(Assembler::equal, Continue);
832 __ bind(L);
834 // Don't use call_VM as it will see a possible pending exception
835 // and forward it and never return here preventing us from
836 // clearing _last_native_pc down below. Also can't use
837 // call_VM_leaf either as it will check to see if r13 & r14 are
838 // preserved and correspond to the bcp/locals pointers. So we do a
839 // runtime call by hand.
840 //
841 __ movq(c_rarg0, r15_thread);
842 __ movq(r12, rsp); // remember sp
843 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
844 __ andq(rsp, -16); // align stack as required by ABI
845 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
846 __ movq(rsp, r12); // restore sp
847 __ reinit_heapbase();
848 __ bind(Continue);
849 }
851 // change thread state
852 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
854 // reset_last_Java_frame
855 __ reset_last_Java_frame(true, true);
857 // reset handle block
858 __ movq(t, Address(r15_thread, JavaThread::active_handles_offset()));
859 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
861 // If result is an oop unbox and store it in frame where gc will see it
862 // and result handler will pick it up
864 {
865 Label no_oop, store_result;
866 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
867 __ cmpq(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
868 __ jcc(Assembler::notEqual, no_oop);
869 // retrieve result
870 __ pop(ltos);
871 __ testq(rax, rax);
872 __ jcc(Assembler::zero, store_result);
873 __ movq(rax, Address(rax, 0));
874 __ bind(store_result);
875 __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
876 // keep stack depth as expected by pushing oop which will eventually be discarde
877 __ push(ltos);
878 __ bind(no_oop);
879 }
882 {
883 Label no_reguard;
884 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
885 JavaThread::stack_guard_yellow_disabled);
886 __ jcc(Assembler::notEqual, no_reguard);
888 __ pushaq(); // XXX only save smashed registers
889 __ movq(r12, rsp); // remember sp
890 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
891 __ andq(rsp, -16); // align stack as required by ABI
892 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
893 __ movq(rsp, r12); // restore sp
894 __ popaq(); // XXX only restore smashed registers
895 __ reinit_heapbase();
897 __ bind(no_reguard);
898 }
901 // The method register is junk from after the thread_in_native transition
902 // until here. Also can't call_VM until the bcp has been
903 // restored. Need bcp for throwing exception below so get it now.
904 __ get_method(method);
905 __ verify_oop(method);
907 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
908 // r13 == code_base()
909 __ movq(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop
910 __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
911 // handle exceptions (exception handling will handle unlocking!)
912 {
913 Label L;
914 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
915 __ jcc(Assembler::zero, L);
916 // Note: At some point we may want to unify this with the code
917 // used in call_VM_base(); i.e., we should use the
918 // StubRoutines::forward_exception code. For now this doesn't work
919 // here because the rsp is not correctly set at this point.
920 __ MacroAssembler::call_VM(noreg,
921 CAST_FROM_FN_PTR(address,
922 InterpreterRuntime::throw_pending_exception));
923 __ should_not_reach_here();
924 __ bind(L);
925 }
927 // do unlocking if necessary
928 {
929 Label L;
930 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
931 __ testl(t, JVM_ACC_SYNCHRONIZED);
932 __ jcc(Assembler::zero, L);
933 // the code below should be shared with interpreter macro
934 // assembler implementation
935 {
936 Label unlock;
937 // BasicObjectLock will be first in list, since this is a
938 // synchronized method. However, need to check that the object
939 // has not been unlocked by an explicit monitorexit bytecode.
940 const Address monitor(rbp,
941 (intptr_t)(frame::interpreter_frame_initial_sp_offset *
942 wordSize - sizeof(BasicObjectLock)));
944 // monitor expect in c_rarg1 for slow unlock path
945 __ leaq(c_rarg1, monitor); // address of first monitor
947 __ movq(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
948 __ testq(t, t);
949 __ jcc(Assembler::notZero, unlock);
951 // Entry already unlocked, need to throw exception
952 __ MacroAssembler::call_VM(noreg,
953 CAST_FROM_FN_PTR(address,
954 InterpreterRuntime::throw_illegal_monitor_state_exception));
955 __ should_not_reach_here();
957 __ bind(unlock);
958 __ unlock_object(c_rarg1);
959 }
960 __ bind(L);
961 }
963 // jvmti support
964 // Note: This must happen _after_ handling/throwing any exceptions since
965 // the exception handler code notifies the runtime of method exits
966 // too. If this happens before, method entry/exit notifications are
967 // not properly paired (was bug - gri 11/22/99).
968 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
970 // restore potential result in edx:eax, call result handler to
971 // restore potential result in ST0 & handle result
973 __ pop(ltos);
974 __ pop(dtos);
976 __ movq(t, Address(rbp,
977 (frame::interpreter_frame_result_handler_offset) * wordSize));
978 __ call(t);
980 // remove activation
981 __ movq(t, Address(rbp,
982 frame::interpreter_frame_sender_sp_offset *
983 wordSize)); // get sender sp
984 __ leave(); // remove frame anchor
985 __ popq(rdi); // get return address
986 __ movq(rsp, t); // set sp to sender sp
987 __ jmp(rdi);
989 if (inc_counter) {
990 // Handle overflow of counter and compile method
991 __ bind(invocation_counter_overflow);
992 generate_counter_overflow(&continue_after_compile);
993 }
995 return entry_point;
996 }
998 //
999 // Generic interpreted method entry to (asm) interpreter
1000 //
1001 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1002 // determine code generation flags
1003 bool inc_counter = UseCompiler || CountCompiledCalls;
1005 // ebx: methodOop
1006 // r13: sender sp
1007 address entry_point = __ pc();
1009 const Address size_of_parameters(rbx,
1010 methodOopDesc::size_of_parameters_offset());
1011 const Address size_of_locals(rbx, methodOopDesc::size_of_locals_offset());
1012 const Address invocation_counter(rbx,
1013 methodOopDesc::invocation_counter_offset() +
1014 InvocationCounter::counter_offset());
1015 const Address access_flags(rbx, methodOopDesc::access_flags_offset());
1017 // get parameter size (always needed)
1018 __ load_unsigned_word(rcx, size_of_parameters);
1020 // rbx: methodOop
1021 // rcx: size of parameters
1022 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
1024 __ load_unsigned_word(rdx, size_of_locals); // get size of locals in words
1025 __ subl(rdx, rcx); // rdx = no. of additional locals
1027 // YYY
1028 // __ incrementl(rdx);
1029 // __ andl(rdx, -2);
1031 // see if we've got enough room on the stack for locals plus overhead.
1032 generate_stack_overflow_check();
1034 // get return address
1035 __ popq(rax);
1037 // compute beginning of parameters (r14)
1038 if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
1039 __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize));
1041 // rdx - # of additional locals
1042 // allocate space for locals
1043 // explicitly initialize locals
1044 {
1045 Label exit, loop;
1046 __ testl(rdx, rdx);
1047 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1048 __ bind(loop);
1049 if (TaggedStackInterpreter) __ pushq((int) NULL); // push tag
1050 __ pushq((int) NULL); // initialize local variables
1051 __ decrementl(rdx); // until everything initialized
1052 __ jcc(Assembler::greater, loop);
1053 __ bind(exit);
1054 }
1056 // (pre-)fetch invocation count
1057 if (inc_counter) {
1058 __ movl(rcx, invocation_counter);
1059 }
1060 // initialize fixed part of activation frame
1061 generate_fixed_frame(false);
1063 // make sure method is not native & not abstract
1064 #ifdef ASSERT
1065 __ movl(rax, access_flags);
1066 {
1067 Label L;
1068 __ testl(rax, JVM_ACC_NATIVE);
1069 __ jcc(Assembler::zero, L);
1070 __ stop("tried to execute native method as non-native");
1071 __ bind(L);
1072 }
1073 {
1074 Label L;
1075 __ testl(rax, JVM_ACC_ABSTRACT);
1076 __ jcc(Assembler::zero, L);
1077 __ stop("tried to execute abstract method in interpreter");
1078 __ bind(L);
1079 }
1080 #endif
1082 // Since at this point in the method invocation the exception
1083 // handler would try to exit the monitor of synchronized methods
1084 // which hasn't been entered yet, we set the thread local variable
1085 // _do_not_unlock_if_synchronized to true. The remove_activation
1086 // will check this flag.
1088 const Address do_not_unlock_if_synchronized(r15_thread,
1089 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1090 __ movbool(do_not_unlock_if_synchronized, true);
1092 // increment invocation count & check for overflow
1093 Label invocation_counter_overflow;
1094 Label profile_method;
1095 Label profile_method_continue;
1096 if (inc_counter) {
1097 generate_counter_incr(&invocation_counter_overflow,
1098 &profile_method,
1099 &profile_method_continue);
1100 if (ProfileInterpreter) {
1101 __ bind(profile_method_continue);
1102 }
1103 }
1105 Label continue_after_compile;
1106 __ bind(continue_after_compile);
1108 // check for synchronized interpreted methods
1109 bang_stack_shadow_pages(false);
1111 // reset the _do_not_unlock_if_synchronized flag
1112 __ movbool(do_not_unlock_if_synchronized, false);
1114 // check for synchronized methods
1115 // Must happen AFTER invocation_counter check and stack overflow check,
1116 // so method is not locked if overflows.
1117 if (synchronized) {
1118 // Allocate monitor and lock method
1119 lock_method();
1120 } else {
1121 // no synchronization necessary
1122 #ifdef ASSERT
1123 {
1124 Label L;
1125 __ movl(rax, access_flags);
1126 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1127 __ jcc(Assembler::zero, L);
1128 __ stop("method needs synchronization");
1129 __ bind(L);
1130 }
1131 #endif
1132 }
1134 // start execution
1135 #ifdef ASSERT
1136 {
1137 Label L;
1138 const Address monitor_block_top (rbp,
1139 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1140 __ movq(rax, monitor_block_top);
1141 __ cmpq(rax, rsp);
1142 __ jcc(Assembler::equal, L);
1143 __ stop("broken stack frame setup in interpreter");
1144 __ bind(L);
1145 }
1146 #endif
1148 // jvmti support
1149 __ notify_method_entry();
1151 __ dispatch_next(vtos);
1153 // invocation counter overflow
1154 if (inc_counter) {
1155 if (ProfileInterpreter) {
1156 // We have decided to profile this method in the interpreter
1157 __ bind(profile_method);
1159 __ call_VM(noreg,
1160 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method),
1161 r13, true);
1163 __ movq(rbx, Address(rbp, method_offset)); // restore methodOop
1164 __ movq(rax, Address(rbx,
1165 in_bytes(methodOopDesc::method_data_offset())));
1166 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
1167 rax);
1168 __ test_method_data_pointer(rax, profile_method_continue);
1169 __ addq(rax, in_bytes(methodDataOopDesc::data_offset()));
1170 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
1171 rax);
1172 __ jmp(profile_method_continue);
1173 }
1174 // Handle overflow of counter and compile method
1175 __ bind(invocation_counter_overflow);
1176 generate_counter_overflow(&continue_after_compile);
1177 }
1179 return entry_point;
1180 }
1182 // Entry points
1183 //
1184 // Here we generate the various kind of entries into the interpreter.
1185 // The two main entry type are generic bytecode methods and native
1186 // call method. These both come in synchronized and non-synchronized
1187 // versions but the frame layout they create is very similar. The
1188 // other method entry types are really just special purpose entries
1189 // that are really entry and interpretation all in one. These are for
1190 // trivial methods like accessor, empty, or special math methods.
1191 //
1192 // When control flow reaches any of the entry types for the interpreter
1193 // the following holds ->
1194 //
1195 // Arguments:
1196 //
1197 // rbx: methodOop
1198 //
1199 // Stack layout immediately at entry
1200 //
1201 // [ return address ] <--- rsp
1202 // [ parameter n ]
1203 // ...
1204 // [ parameter 1 ]
1205 // [ expression stack ] (caller's java expression stack)
1207 // Assuming that we don't go to one of the trivial specialized entries
1208 // the stack will look like below when we are ready to execute the
1209 // first bytecode (or call the native routine). The register usage
1210 // will be as the template based interpreter expects (see
1211 // interpreter_amd64.hpp).
1212 //
1213 // local variables follow incoming parameters immediately; i.e.
1214 // the return address is moved to the end of the locals).
1215 //
1216 // [ monitor entry ] <--- rsp
1217 // ...
1218 // [ monitor entry ]
1219 // [ expr. stack bottom ]
1220 // [ saved r13 ]
1221 // [ current r14 ]
1222 // [ methodOop ]
1223 // [ saved ebp ] <--- rbp
1224 // [ return address ]
1225 // [ local variable m ]
1226 // ...
1227 // [ local variable 1 ]
1228 // [ parameter n ]
1229 // ...
1230 // [ parameter 1 ] <--- r14
1232 address AbstractInterpreterGenerator::generate_method_entry(
1233 AbstractInterpreter::MethodKind kind) {
1234 // determine code generation flags
1235 bool synchronized = false;
1236 address entry_point = NULL;
1238 switch (kind) {
1239 case Interpreter::zerolocals : break;
1240 case Interpreter::zerolocals_synchronized: synchronized = true; break;
1241 case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
1242 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
1243 case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
1244 case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
1245 case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
1246 case Interpreter::java_lang_math_sin : break;
1247 case Interpreter::java_lang_math_cos : break;
1248 case Interpreter::java_lang_math_tan : break;
1249 case Interpreter::java_lang_math_abs : break;
1250 case Interpreter::java_lang_math_log : break;
1251 case Interpreter::java_lang_math_log10 : break;
1252 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
1253 default : ShouldNotReachHere(); break;
1254 }
1256 if (entry_point) {
1257 return entry_point;
1258 }
1260 return ((InterpreterGenerator*) this)->
1261 generate_normal_entry(synchronized);
1262 }
1264 // How much stack a method activation needs in words.
1265 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1266 const int entry_size = frame::interpreter_frame_monitor_size();
1268 // total overhead size: entry_size + (saved rbp thru expr stack
1269 // bottom). be sure to change this if you add/subtract anything
1270 // to/from the overhead area
1271 const int overhead_size =
1272 -(frame::interpreter_frame_initial_sp_offset) + entry_size;
1274 const int stub_code = frame::entry_frame_after_call_words;
1275 const int method_stack = (method->max_locals() + method->max_stack()) *
1276 Interpreter::stackElementWords();
1277 return (overhead_size + method_stack + stub_code);
1278 }
1280 int AbstractInterpreter::layout_activation(methodOop method,
1281 int tempcount,
1282 int popframe_extra_args,
1283 int moncount,
1284 int callee_param_count,
1285 int callee_locals,
1286 frame* caller,
1287 frame* interpreter_frame,
1288 bool is_top_frame) {
1289 // Note: This calculation must exactly parallel the frame setup
1290 // in AbstractInterpreterGenerator::generate_method_entry.
1291 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
1292 // The frame interpreter_frame, if not NULL, is guaranteed to be the
1293 // right size, as determined by a previous call to this method.
1294 // It is also guaranteed to be walkable even though it is in a skeletal state
1296 // fixed size of an interpreter frame:
1297 int max_locals = method->max_locals() * Interpreter::stackElementWords();
1298 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1299 Interpreter::stackElementWords();
1301 int overhead = frame::sender_sp_offset -
1302 frame::interpreter_frame_initial_sp_offset;
1303 // Our locals were accounted for by the caller (or last_frame_adjust
1304 // on the transistion) Since the callee parameters already account
1305 // for the callee's params we only need to account for the extra
1306 // locals.
1307 int size = overhead +
1308 (callee_locals - callee_param_count)*Interpreter::stackElementWords() +
1309 moncount * frame::interpreter_frame_monitor_size() +
1310 tempcount* Interpreter::stackElementWords() + popframe_extra_args;
1311 if (interpreter_frame != NULL) {
1312 #ifdef ASSERT
1313 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(),
1314 "Frame not properly walkable");
1315 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
1316 #endif
1318 interpreter_frame->interpreter_frame_set_method(method);
1319 // NOTE the difference in using sender_sp and
1320 // interpreter_frame_sender_sp interpreter_frame_sender_sp is
1321 // the original sp of the caller (the unextended_sp) and
1322 // sender_sp is fp+16 XXX
1323 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1325 interpreter_frame->interpreter_frame_set_locals(locals);
1326 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1327 BasicObjectLock* monbot = montop - moncount;
1328 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1330 // Set last_sp
1331 intptr_t* esp = (intptr_t*) monbot -
1332 tempcount*Interpreter::stackElementWords() -
1333 popframe_extra_args;
1334 interpreter_frame->interpreter_frame_set_last_sp(esp);
1336 // All frames but the initial (oldest) interpreter frame we fill in have
1337 // a value for sender_sp that allows walking the stack but isn't
1338 // truly correct. Correct the value here.
1339 if (extra_locals != 0 &&
1340 interpreter_frame->sender_sp() ==
1341 interpreter_frame->interpreter_frame_sender_sp()) {
1342 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
1343 extra_locals);
1344 }
1345 *interpreter_frame->interpreter_frame_cache_addr() =
1346 method->constants()->cache();
1347 }
1348 return size;
1349 }
1351 //-----------------------------------------------------------------------------
1352 // Exceptions
1354 void TemplateInterpreterGenerator::generate_throw_exception() {
1355 // Entry point in previous activation (i.e., if the caller was
1356 // interpreted)
1357 Interpreter::_rethrow_exception_entry = __ pc();
1358 // Restore sp to interpreter_frame_last_sp even though we are going
1359 // to empty the expression stack for the exception processing.
1360 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1361 // rax: exception
1362 // rdx: return address/pc that threw exception
1363 __ restore_bcp(); // r13 points to call/send
1364 __ restore_locals();
1365 __ reinit_heapbase(); // restore r12 as heapbase.
1366 // Entry point for exceptions thrown within interpreter code
1367 Interpreter::_throw_exception_entry = __ pc();
1368 // expression stack is undefined here
1369 // rax: exception
1370 // r13: exception bcp
1371 __ verify_oop(rax);
1372 __ movq(c_rarg1, rax);
1374 // expression stack must be empty before entering the VM in case of
1375 // an exception
1376 __ empty_expression_stack();
1377 // find exception handler address and preserve exception oop
1378 __ call_VM(rdx,
1379 CAST_FROM_FN_PTR(address,
1380 InterpreterRuntime::exception_handler_for_exception),
1381 c_rarg1);
1382 // rax: exception handler entry point
1383 // rdx: preserved exception oop
1384 // r13: bcp for exception handler
1385 __ push_ptr(rdx); // push exception which is now the only value on the stack
1386 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1388 // If the exception is not handled in the current frame the frame is
1389 // removed and the exception is rethrown (i.e. exception
1390 // continuation is _rethrow_exception).
1391 //
1392 // Note: At this point the bci is still the bxi for the instruction
1393 // which caused the exception and the expression stack is
1394 // empty. Thus, for any VM calls at this point, GC will find a legal
1395 // oop map (with empty expression stack).
1397 // In current activation
1398 // tos: exception
1399 // esi: exception bcp
1401 //
1402 // JVMTI PopFrame support
1403 //
1405 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1406 __ empty_expression_stack();
1407 // Set the popframe_processing bit in pending_popframe_condition
1408 // indicating that we are currently handling popframe, so that
1409 // call_VMs that may happen later do not trigger new popframe
1410 // handling cycles.
1411 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
1412 __ orl(rdx, JavaThread::popframe_processing_bit);
1413 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
1415 {
1416 // Check to see whether we are returning to a deoptimized frame.
1417 // (The PopFrame call ensures that the caller of the popped frame is
1418 // either interpreted or compiled and deoptimizes it if compiled.)
1419 // In this case, we can't call dispatch_next() after the frame is
1420 // popped, but instead must save the incoming arguments and restore
1421 // them after deoptimization has occurred.
1422 //
1423 // Note that we don't compare the return PC against the
1424 // deoptimization blob's unpack entry because of the presence of
1425 // adapter frames in C2.
1426 Label caller_not_deoptimized;
1427 __ movq(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
1428 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1429 InterpreterRuntime::interpreter_contains), c_rarg1);
1430 __ testl(rax, rax);
1431 __ jcc(Assembler::notZero, caller_not_deoptimized);
1433 // Compute size of arguments for saving when returning to
1434 // deoptimized caller
1435 __ get_method(rax);
1436 __ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::
1437 size_of_parameters_offset())));
1438 __ shll(rax, Interpreter::logStackElementSize());
1439 __ restore_locals(); // XXX do we need this?
1440 __ subq(r14, rax);
1441 __ addq(r14, wordSize);
1442 // Save these arguments
1443 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1444 Deoptimization::
1445 popframe_preserve_args),
1446 r15_thread, rax, r14);
1448 __ remove_activation(vtos, rdx,
1449 /* throw_monitor_exception */ false,
1450 /* install_monitor_exception */ false,
1451 /* notify_jvmdi */ false);
1453 // Inform deoptimization that it is responsible for restoring
1454 // these arguments
1455 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
1456 JavaThread::popframe_force_deopt_reexecution_bit);
1458 // Continue in deoptimization handler
1459 __ jmp(rdx);
1461 __ bind(caller_not_deoptimized);
1462 }
1464 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
1465 /* throw_monitor_exception */ false,
1466 /* install_monitor_exception */ false,
1467 /* notify_jvmdi */ false);
1469 // Finish with popframe handling
1470 // A previous I2C followed by a deoptimization might have moved the
1471 // outgoing arguments further up the stack. PopFrame expects the
1472 // mutations to those outgoing arguments to be preserved and other
1473 // constraints basically require this frame to look exactly as
1474 // though it had previously invoked an interpreted activation with
1475 // no space between the top of the expression stack (current
1476 // last_sp) and the top of stack. Rather than force deopt to
1477 // maintain this kind of invariant all the time we call a small
1478 // fixup routine to move the mutated arguments onto the top of our
1479 // expression stack if necessary.
1480 __ movq(c_rarg1, rsp);
1481 __ movq(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1482 // PC must point into interpreter here
1483 __ set_last_Java_frame(noreg, rbp, __ pc());
1484 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
1485 __ reset_last_Java_frame(true, true);
1486 // Restore the last_sp and null it out
1487 __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1488 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1490 __ restore_bcp(); // XXX do we need this?
1491 __ restore_locals(); // XXX do we need this?
1492 // The method data pointer was incremented already during
1493 // call profiling. We have to restore the mdp for the current bcp.
1494 if (ProfileInterpreter) {
1495 __ set_method_data_pointer_for_bcp();
1496 }
1498 // Clear the popframe condition flag
1499 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
1500 JavaThread::popframe_inactive);
1502 __ dispatch_next(vtos);
1503 // end of PopFrame support
1505 Interpreter::_remove_activation_entry = __ pc();
1507 // preserve exception over this code sequence
1508 __ pop_ptr(rax);
1509 __ movq(Address(r15_thread, JavaThread::vm_result_offset()), rax);
1510 // remove the activation (without doing throws on illegalMonitorExceptions)
1511 __ remove_activation(vtos, rdx, false, true, false);
1512 // restore exception
1513 __ movq(rax, Address(r15_thread, JavaThread::vm_result_offset()));
1514 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
1515 __ verify_oop(rax);
1517 // In between activations - previous activation type unknown yet
1518 // compute continuation point - the continuation point expects the
1519 // following registers set up:
1520 //
1521 // rax: exception
1522 // rdx: return address/pc that threw exception
1523 // rsp: expression stack of caller
1524 // rbp: ebp of caller
1525 __ pushq(rax); // save exception
1526 __ pushq(rdx); // save return address
1527 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1528 SharedRuntime::exception_handler_for_return_address),
1529 rdx);
1530 __ movq(rbx, rax); // save exception handler
1531 __ popq(rdx); // restore return address
1532 __ popq(rax); // restore exception
1533 // Note that an "issuing PC" is actually the next PC after the call
1534 __ jmp(rbx); // jump to exception
1535 // handler of caller
1536 }
1539 //
1540 // JVMTI ForceEarlyReturn support
1541 //
1542 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1543 address entry = __ pc();
1545 __ restore_bcp();
1546 __ restore_locals();
1547 __ empty_expression_stack();
1548 __ load_earlyret_value(state);
1550 __ movq(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
1551 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
1553 // Clear the earlyret state
1554 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1556 __ remove_activation(state, rsi,
1557 false, /* throw_monitor_exception */
1558 false, /* install_monitor_exception */
1559 true); /* notify_jvmdi */
1560 __ jmp(rsi);
1562 return entry;
1563 } // end of ForceEarlyReturn support
1566 //-----------------------------------------------------------------------------
1567 // Helper for vtos entry point generation
1569 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1570 address& bep,
1571 address& cep,
1572 address& sep,
1573 address& aep,
1574 address& iep,
1575 address& lep,
1576 address& fep,
1577 address& dep,
1578 address& vep) {
1579 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1580 Label L;
1581 aep = __ pc(); __ push_ptr(); __ jmp(L);
1582 fep = __ pc(); __ push_f(); __ jmp(L);
1583 dep = __ pc(); __ push_d(); __ jmp(L);
1584 lep = __ pc(); __ push_l(); __ jmp(L);
1585 bep = cep = sep =
1586 iep = __ pc(); __ push_i();
1587 vep = __ pc();
1588 __ bind(L);
1589 generate_and_dispatch(t);
1590 }
1593 //-----------------------------------------------------------------------------
1594 // Generation of individual instructions
1596 // helpers for generate_and_dispatch
1599 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1600 : TemplateInterpreterGenerator(code) {
1601 generate_all(); // down here so it can be "virtual"
1602 }
1604 //-----------------------------------------------------------------------------
1606 // Non-product code
1607 #ifndef PRODUCT
1608 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1609 address entry = __ pc();
1611 __ push(state);
1612 __ pushq(c_rarg0);
1613 __ pushq(c_rarg1);
1614 __ pushq(c_rarg2);
1615 __ pushq(c_rarg3);
1616 __ movq(c_rarg2, rax); // Pass itos
1617 #ifdef _WIN64
1618 __ movflt(xmm3, xmm0); // Pass ftos
1619 #endif
1620 __ call_VM(noreg,
1621 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
1622 c_rarg1, c_rarg2, c_rarg3);
1623 __ popq(c_rarg3);
1624 __ popq(c_rarg2);
1625 __ popq(c_rarg1);
1626 __ popq(c_rarg0);
1627 __ pop(state);
1628 __ ret(0); // return from result handler
1630 return entry;
1631 }
1633 void TemplateInterpreterGenerator::count_bytecode() {
1634 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1635 }
1637 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1638 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1639 }
1641 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1642 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
1643 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1644 __ orl(rbx,
1645 ((int) t->bytecode()) <<
1646 BytecodePairHistogram::log2_number_of_codes);
1647 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1648 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
1649 __ incrementl(Address(rscratch1, rbx, Address::times_4));
1650 }
1653 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1654 // Call a little run-time stub to avoid blow-up for each bytecode.
1655 // The run-time runtime saves the right registers, depending on
1656 // the tosca in-state for the given template.
1658 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1659 "entry must have been generated");
1660 __ movq(r12, rsp); // remember sp
1661 __ andq(rsp, -16); // align stack as required by ABI
1662 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1663 __ movq(rsp, r12); // restore sp
1664 __ reinit_heapbase();
1665 }
1668 void TemplateInterpreterGenerator::stop_interpreter_at() {
1669 Label L;
1670 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1671 StopInterpreterAt);
1672 __ jcc(Assembler::notEqual, L);
1673 __ int3();
1674 __ bind(L);
1675 }
1676 #endif // !PRODUCT