Wed, 13 Mar 2013 09:44:45 +0100
8009761: Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
Summary: deoptimization doesn't set up callee frames so that they restore caller frames correctly.
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterGenerator.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "prims/jvmtiThreadState.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/synchronizer.hpp"
44 #include "runtime/timer.hpp"
45 #include "runtime/vframeArray.hpp"
46 #include "utilities/debug.hpp"
47 #include "utilities/macros.hpp"
49 #ifndef CC_INTERP
50 #ifndef FAST_DISPATCH
51 #define FAST_DISPATCH 1
52 #endif
53 #undef FAST_DISPATCH
56 // Generation of Interpreter
57 //
58 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
61 #define __ _masm->
64 //----------------------------------------------------------------------------------------------------
67 void InterpreterGenerator::save_native_result(void) {
68 // result potentially in O0/O1: save it across calls
69 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
71 // result potentially in F0/F1: save it across calls
72 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
74 // save and restore any potential method result value around the unlocking operation
75 __ stf(FloatRegisterImpl::D, F0, d_tmp);
76 #ifdef _LP64
77 __ stx(O0, l_tmp);
78 #else
79 __ std(O0, l_tmp);
80 #endif
81 }
83 void InterpreterGenerator::restore_native_result(void) {
84 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
85 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
87 // Restore any method result value
88 __ ldf(FloatRegisterImpl::D, d_tmp, F0);
89 #ifdef _LP64
90 __ ldx(l_tmp, O0);
91 #else
92 __ ldd(l_tmp, O0);
93 #endif
94 }
96 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
97 assert(!pass_oop || message == NULL, "either oop or message but not both");
98 address entry = __ pc();
99 // expression stack must be empty before entering the VM if an exception happened
100 __ empty_expression_stack();
101 // load exception object
102 __ set((intptr_t)name, G3_scratch);
103 if (pass_oop) {
104 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
105 } else {
106 __ set((intptr_t)message, G4_scratch);
107 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
108 }
109 // throw exception
110 assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
111 AddressLiteral thrower(Interpreter::throw_exception_entry());
112 __ jump_to(thrower, G3_scratch);
113 __ delayed()->nop();
114 return entry;
115 }
117 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
118 address entry = __ pc();
119 // expression stack must be empty before entering the VM if an exception
120 // happened
121 __ empty_expression_stack();
122 // load exception object
123 __ call_VM(Oexception,
124 CAST_FROM_FN_PTR(address,
125 InterpreterRuntime::throw_ClassCastException),
126 Otos_i);
127 __ should_not_reach_here();
128 return entry;
129 }
132 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
133 address entry = __ pc();
134 // expression stack must be empty before entering the VM if an exception happened
135 __ empty_expression_stack();
136 // convention: expect aberrant index in register G3_scratch, then shuffle the
137 // index to G4_scratch for the VM call
138 __ mov(G3_scratch, G4_scratch);
139 __ set((intptr_t)name, G3_scratch);
140 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
141 __ should_not_reach_here();
142 return entry;
143 }
146 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
147 address entry = __ pc();
148 // expression stack must be empty before entering the VM if an exception happened
149 __ empty_expression_stack();
150 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
151 __ should_not_reach_here();
152 return entry;
153 }
156 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
157 TosState incoming_state = state;
159 Label cont;
160 address compiled_entry = __ pc();
162 address entry = __ pc();
163 #if !defined(_LP64) && defined(COMPILER2)
164 // All return values are where we want them, except for Longs. C2 returns
165 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
166 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
167 // build even if we are returning from interpreted we just do a little
168 // stupid shuffing.
169 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
170 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
171 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
173 if (incoming_state == ltos) {
174 __ srl (G1, 0, O1);
175 __ srlx(G1, 32, O0);
176 }
177 #endif // !_LP64 && COMPILER2
179 __ bind(cont);
181 // The callee returns with the stack possibly adjusted by adapter transition
182 // We remove that possible adjustment here.
183 // All interpreter local registers are untouched. Any result is passed back
184 // in the O0/O1 or float registers. Before continuing, the arguments must be
185 // popped from the java expression stack; i.e., Lesp must be adjusted.
187 __ mov(Llast_SP, SP); // Remove any adapter added stack space.
189 Label L_got_cache, L_giant_index;
190 const Register cache = G3_scratch;
191 const Register size = G1_scratch;
192 if (EnableInvokeDynamic) {
193 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
194 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index);
195 }
196 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
197 __ bind(L_got_cache);
198 __ ld_ptr(cache, ConstantPoolCache::base_offset() +
199 ConstantPoolCacheEntry::flags_offset(), size);
200 __ and3(size, 0xFF, size); // argument size in words
201 __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
202 __ add(Lesp, size, Lesp); // pop arguments
203 __ dispatch_next(state, step);
205 // out of the main line of code...
206 if (EnableInvokeDynamic) {
207 __ bind(L_giant_index);
208 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
209 __ ba_short(L_got_cache);
210 }
212 return entry;
213 }
216 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
217 address entry = __ pc();
218 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
219 { Label L;
220 Address exception_addr(G2_thread, Thread::pending_exception_offset());
221 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
222 __ br_null_short(Gtemp, Assembler::pt, L);
223 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
224 __ should_not_reach_here();
225 __ bind(L);
226 }
227 __ dispatch_next(state, step);
228 return entry;
229 }
231 // A result handler converts/unboxes a native call result into
232 // a java interpreter/compiler result. The current frame is an
233 // interpreter frame. The activation frame unwind code must be
234 // consistent with that of TemplateTable::_return(...). In the
235 // case of native methods, the caller's SP was not modified.
236 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
237 address entry = __ pc();
238 Register Itos_i = Otos_i ->after_save();
239 Register Itos_l = Otos_l ->after_save();
240 Register Itos_l1 = Otos_l1->after_save();
241 Register Itos_l2 = Otos_l2->after_save();
242 switch (type) {
243 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
244 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
245 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
246 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
247 case T_LONG :
248 #ifndef _LP64
249 __ mov(O1, Itos_l2); // move other half of long
250 #endif // ifdef or no ifdef, fall through to the T_INT case
251 case T_INT : __ mov(O0, Itos_i); break;
252 case T_VOID : /* nothing to do */ break;
253 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
254 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
255 case T_OBJECT :
256 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
257 __ verify_oop(Itos_i);
258 break;
259 default : ShouldNotReachHere();
260 }
261 __ ret(); // return from interpreter activation
262 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
263 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly
264 return entry;
265 }
267 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
268 address entry = __ pc();
269 __ push(state);
270 __ call_VM(noreg, runtime_entry);
271 __ dispatch_via(vtos, Interpreter::normal_table(vtos));
272 return entry;
273 }
276 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
277 address entry = __ pc();
278 __ dispatch_next(state);
279 return entry;
280 }
282 //
283 // Helpers for commoning out cases in the various type of method entries.
284 //
286 // increment invocation count & check for overflow
287 //
288 // Note: checking for negative value instead of overflow
289 // so we have a 'sticky' overflow test
290 //
291 // Lmethod: method
292 // ??: invocation counter
293 //
294 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
295 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
296 if (TieredCompilation) {
297 const int increment = InvocationCounter::count_increment;
298 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
299 Label no_mdo, done;
300 if (ProfileInterpreter) {
301 // If no method data exists, go to profile_continue.
302 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
303 __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
304 // Increment counter
305 Address mdo_invocation_counter(G4_scratch,
306 in_bytes(MethodData::invocation_counter_offset()) +
307 in_bytes(InvocationCounter::counter_offset()));
308 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
309 G3_scratch, Lscratch,
310 Assembler::zero, overflow);
311 __ ba_short(done);
312 }
314 // Increment counter in Method*
315 __ bind(no_mdo);
316 Address invocation_counter(Lmethod,
317 in_bytes(Method::invocation_counter_offset()) +
318 in_bytes(InvocationCounter::counter_offset()));
319 __ increment_mask_and_jump(invocation_counter, increment, mask,
320 G3_scratch, Lscratch,
321 Assembler::zero, overflow);
322 __ bind(done);
323 } else {
324 // Update standard invocation counters
325 __ increment_invocation_counter(O0, G3_scratch);
326 if (ProfileInterpreter) { // %%% Merge this into MethodData*
327 Address interpreter_invocation_counter(Lmethod,in_bytes(Method::interpreter_invocation_counter_offset()));
328 __ ld(interpreter_invocation_counter, G3_scratch);
329 __ inc(G3_scratch);
330 __ st(G3_scratch, interpreter_invocation_counter);
331 }
333 if (ProfileInterpreter && profile_method != NULL) {
334 // Test to see if we should create a method data oop
335 AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
336 __ load_contents(profile_limit, G3_scratch);
337 __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
339 // if no method data exists, go to profile_method
340 __ test_method_data_pointer(*profile_method);
341 }
343 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
344 __ load_contents(invocation_limit, G3_scratch);
345 __ cmp(O0, G3_scratch);
346 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
347 __ delayed()->nop();
348 }
350 }
352 // Allocate monitor and lock method (asm interpreter)
353 // ebx - Method*
354 //
355 void InterpreterGenerator::lock_method(void) {
356 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags.
358 #ifdef ASSERT
359 { Label ok;
360 __ btst(JVM_ACC_SYNCHRONIZED, O0);
361 __ br( Assembler::notZero, false, Assembler::pt, ok);
362 __ delayed()->nop();
363 __ stop("method doesn't need synchronization");
364 __ bind(ok);
365 }
366 #endif // ASSERT
368 // get synchronization object to O0
369 { Label done;
370 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
371 __ btst(JVM_ACC_STATIC, O0);
372 __ br( Assembler::zero, true, Assembler::pt, done);
373 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
375 __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
376 __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
377 __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
379 // lock the mirror, not the Klass*
380 __ ld_ptr( O0, mirror_offset, O0);
382 #ifdef ASSERT
383 __ tst(O0);
384 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
385 #endif // ASSERT
387 __ bind(done);
388 }
390 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem
391 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object
392 // __ untested("lock_object from method entry");
393 __ lock_object(Lmonitors, O0);
394 }
397 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
398 Register Rscratch,
399 Register Rscratch2) {
400 const int page_size = os::vm_page_size();
401 Label after_frame_check;
403 assert_different_registers(Rframe_size, Rscratch, Rscratch2);
405 __ set(page_size, Rscratch);
406 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
408 // get the stack base, and in debug, verify it is non-zero
409 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
410 #ifdef ASSERT
411 Label base_not_zero;
412 __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
413 __ stop("stack base is zero in generate_stack_overflow_check");
414 __ bind(base_not_zero);
415 #endif
417 // get the stack size, and in debug, verify it is non-zero
418 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
419 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
420 #ifdef ASSERT
421 Label size_not_zero;
422 __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
423 __ stop("stack size is zero in generate_stack_overflow_check");
424 __ bind(size_not_zero);
425 #endif
427 // compute the beginning of the protected zone minus the requested frame size
428 __ sub( Rscratch, Rscratch2, Rscratch );
429 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
430 __ add( Rscratch, Rscratch2, Rscratch );
432 // Add in the size of the frame (which is the same as subtracting it from the
433 // SP, which would take another register
434 __ add( Rscratch, Rframe_size, Rscratch );
436 // the frame is greater than one page in size, so check against
437 // the bottom of the stack
438 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
440 // the stack will overflow, throw an exception
442 // Note that SP is restored to sender's sp (in the delay slot). This
443 // is necessary if the sender's frame is an extended compiled frame
444 // (see gen_c2i_adapter()) and safer anyway in case of JSR292
445 // adaptations.
447 // Note also that the restored frame is not necessarily interpreted.
448 // Use the shared runtime version of the StackOverflowError.
449 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
450 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
451 __ jump_to(stub, Rscratch);
452 __ delayed()->mov(O5_savedSP, SP);
454 // if you get to here, then there is enough stack space
455 __ bind( after_frame_check );
456 }
459 //
460 // Generate a fixed interpreter frame. This is identical setup for interpreted
461 // methods and for native methods hence the shared code.
463 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
464 //
465 //
466 // The entry code sets up a new interpreter frame in 4 steps:
467 //
468 // 1) Increase caller's SP by for the extra local space needed:
469 // (check for overflow)
470 // Efficient implementation of xload/xstore bytecodes requires
471 // that arguments and non-argument locals are in a contigously
472 // addressable memory block => non-argument locals must be
473 // allocated in the caller's frame.
474 //
475 // 2) Create a new stack frame and register window:
476 // The new stack frame must provide space for the standard
477 // register save area, the maximum java expression stack size,
478 // the monitor slots (0 slots initially), and some frame local
479 // scratch locations.
480 //
481 // 3) The following interpreter activation registers must be setup:
482 // Lesp : expression stack pointer
483 // Lbcp : bytecode pointer
484 // Lmethod : method
485 // Llocals : locals pointer
486 // Lmonitors : monitor pointer
487 // LcpoolCache: constant pool cache
488 //
489 // 4) Initialize the non-argument locals if necessary:
490 // Non-argument locals may need to be initialized to NULL
491 // for GC to work. If the oop-map information is accurate
492 // (in the absence of the JSR problem), no initialization
493 // is necessary.
494 //
495 // (gri - 2/25/2000)
498 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
500 const int extra_space =
501 rounded_vm_local_words + // frame local scratch space
502 //6815692//Method::extra_stack_words() + // extra push slots for MH adapters
503 frame::memory_parameter_word_sp_offset + // register save area
504 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
506 const Register Glocals_size = G3;
507 const Register RconstMethod = Glocals_size;
508 const Register Otmp1 = O3;
509 const Register Otmp2 = O4;
510 // Lscratch can't be used as a temporary because the call_stub uses
511 // it to assert that the stack frame was setup correctly.
512 const Address constMethod (G5_method, Method::const_offset());
513 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
515 __ ld_ptr( constMethod, RconstMethod );
516 __ lduh( size_of_parameters, Glocals_size);
518 // Gargs points to first local + BytesPerWord
519 // Set the saved SP after the register window save
520 //
521 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
522 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
523 __ add(Gargs, Otmp1, Gargs);
525 if (native_call) {
526 __ calc_mem_param_words( Glocals_size, Gframe_size );
527 __ add( Gframe_size, extra_space, Gframe_size);
528 __ round_to( Gframe_size, WordsPerLong );
529 __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
530 } else {
532 //
533 // Compute number of locals in method apart from incoming parameters
534 //
535 const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset());
536 __ ld_ptr( constMethod, Otmp1 );
537 __ lduh( size_of_locals, Otmp1 );
538 __ sub( Otmp1, Glocals_size, Glocals_size );
539 __ round_to( Glocals_size, WordsPerLong );
540 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
542 // see if the frame is greater than one page in size. If so,
543 // then we need to verify there is enough stack space remaining
544 // Frame_size = (max_stack + extra_space) * BytesPerWord;
545 __ ld_ptr( constMethod, Gframe_size );
546 __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
547 __ add( Gframe_size, extra_space, Gframe_size );
548 __ round_to( Gframe_size, WordsPerLong );
549 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
551 // Add in java locals size for stack overflow check only
552 __ add( Gframe_size, Glocals_size, Gframe_size );
554 const Register Otmp2 = O4;
555 assert_different_registers(Otmp1, Otmp2, O5_savedSP);
556 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
558 __ sub( Gframe_size, Glocals_size, Gframe_size);
560 //
561 // bump SP to accomodate the extra locals
562 //
563 __ sub( SP, Glocals_size, SP );
564 }
566 //
567 // now set up a stack frame with the size computed above
568 //
569 __ neg( Gframe_size );
570 __ save( SP, Gframe_size, SP );
572 //
573 // now set up all the local cache registers
574 //
575 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
576 // that all present references to Lbyte_code initialize the register
577 // immediately before use
578 if (native_call) {
579 __ mov(G0, Lbcp);
580 } else {
581 __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
582 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
583 }
584 __ mov( G5_method, Lmethod); // set Lmethod
585 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
586 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
587 #ifdef _LP64
588 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
589 #endif
590 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
592 // setup interpreter activation registers
593 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals
595 if (ProfileInterpreter) {
596 #ifdef FAST_DISPATCH
597 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
598 // they both use I2.
599 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
600 #endif // FAST_DISPATCH
601 __ set_method_data_pointer();
602 }
604 }
606 // Empty method, generate a very fast return.
608 address InterpreterGenerator::generate_empty_entry(void) {
610 // A method that does nother but return...
612 address entry = __ pc();
613 Label slow_path;
615 // do nothing for empty methods (do not even increment invocation counter)
616 if ( UseFastEmptyMethods) {
617 // If we need a safepoint check, generate full interpreter entry.
618 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
619 __ set(sync_state, G3_scratch);
620 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
622 // Code: _return
623 __ retl();
624 __ delayed()->mov(O5_savedSP, SP);
626 __ bind(slow_path);
627 (void) generate_normal_entry(false);
629 return entry;
630 }
631 return NULL;
632 }
634 // Call an accessor method (assuming it is resolved, otherwise drop into
635 // vanilla (slow path) entry
637 // Generates code to elide accessor methods
638 // Uses G3_scratch and G1_scratch as scratch
639 address InterpreterGenerator::generate_accessor_entry(void) {
641 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
642 // parameter size = 1
643 // Note: We can only use this code if the getfield has been resolved
644 // and if we don't have a null-pointer exception => check for
645 // these conditions first and use slow path if necessary.
646 address entry = __ pc();
647 Label slow_path;
650 // XXX: for compressed oops pointer loading and decoding doesn't fit in
651 // delay slot and damages G1
652 if ( UseFastAccessorMethods && !UseCompressedOops ) {
653 // Check if we need to reach a safepoint and generate full interpreter
654 // frame if so.
655 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
656 __ load_contents(sync_state, G3_scratch);
657 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
658 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
660 // Check if local 0 != NULL
661 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
662 // check if local 0 == NULL and go the slow path
663 __ br_null_short(Otos_i, Assembler::pn, slow_path);
666 // read first instruction word and extract bytecode @ 1 and index @ 2
667 // get first 4 bytes of the bytecodes (big endian!)
668 __ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
669 __ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
671 // move index @ 2 far left then to the right most two bytes.
672 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
673 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
674 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
676 // get constant pool cache
677 __ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
678 __ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
679 __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
681 // get specific constant pool cache entry
682 __ add(G3_scratch, G1_scratch, G3_scratch);
684 // Check the constant Pool cache entry to see if it has been resolved.
685 // If not, need the slow path.
686 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
687 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
688 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
689 __ and3(G1_scratch, 0xFF, G1_scratch);
690 __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
692 // Get the type and return field offset from the constant pool cache
693 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
694 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
696 Label xreturn_path;
697 // Need to differentiate between igetfield, agetfield, bgetfield etc.
698 // because they are different sizes.
699 // Get the type from the constant pool cache
700 __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
701 // Make sure we don't need to mask G1_scratch after the above shift
702 ConstantPoolCacheEntry::verify_tos_state_shift();
703 __ cmp(G1_scratch, atos );
704 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
705 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
706 __ cmp(G1_scratch, itos);
707 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
708 __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
709 __ cmp(G1_scratch, stos);
710 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
711 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
712 __ cmp(G1_scratch, ctos);
713 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
714 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
715 #ifdef ASSERT
716 __ cmp(G1_scratch, btos);
717 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
718 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
719 __ should_not_reach_here();
720 #endif
721 __ ldsb(Otos_i, G3_scratch, Otos_i);
722 __ bind(xreturn_path);
724 // _ireturn/_areturn
725 __ retl(); // return from leaf routine
726 __ delayed()->mov(O5_savedSP, SP);
728 // Generate regular method entry
729 __ bind(slow_path);
730 (void) generate_normal_entry(false);
731 return entry;
732 }
733 return NULL;
734 }
736 // Method entry for java.lang.ref.Reference.get.
737 address InterpreterGenerator::generate_Reference_get_entry(void) {
738 #if INCLUDE_ALL_GCS
739 // Code: _aload_0, _getfield, _areturn
740 // parameter size = 1
741 //
742 // The code that gets generated by this routine is split into 2 parts:
743 // 1. The "intrinsified" code for G1 (or any SATB based GC),
744 // 2. The slow path - which is an expansion of the regular method entry.
745 //
746 // Notes:-
747 // * In the G1 code we do not check whether we need to block for
748 // a safepoint. If G1 is enabled then we must execute the specialized
749 // code for Reference.get (except when the Reference object is null)
750 // so that we can log the value in the referent field with an SATB
751 // update buffer.
752 // If the code for the getfield template is modified so that the
753 // G1 pre-barrier code is executed when the current method is
754 // Reference.get() then going through the normal method entry
755 // will be fine.
756 // * The G1 code can, however, check the receiver object (the instance
757 // of java.lang.Reference) and jump to the slow path if null. If the
758 // Reference object is null then we obviously cannot fetch the referent
759 // and so we don't need to call the G1 pre-barrier. Thus we can use the
760 // regular method entry code to generate the NPE.
761 //
762 // This code is based on generate_accessor_enty.
764 address entry = __ pc();
766 const int referent_offset = java_lang_ref_Reference::referent_offset;
767 guarantee(referent_offset > 0, "referent offset not initialized");
769 if (UseG1GC) {
770 Label slow_path;
772 // In the G1 code we don't check if we need to reach a safepoint. We
773 // continue and the thread will safepoint at the next bytecode dispatch.
775 // Check if local 0 != NULL
776 // If the receiver is null then it is OK to jump to the slow path.
777 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
778 // check if local 0 == NULL and go the slow path
779 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
782 // Load the value of the referent field.
783 if (Assembler::is_simm13(referent_offset)) {
784 __ load_heap_oop(Otos_i, referent_offset, Otos_i);
785 } else {
786 __ set(referent_offset, G3_scratch);
787 __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
788 }
790 // Generate the G1 pre-barrier code to log the value of
791 // the referent field in an SATB buffer. Note with
792 // these parameters the pre-barrier does not generate
793 // the load of the previous value
795 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
796 Otos_i /* pre_val */,
797 G3_scratch /* tmp */,
798 true /* preserve_o_regs */);
800 // _areturn
801 __ retl(); // return from leaf routine
802 __ delayed()->mov(O5_savedSP, SP);
804 // Generate regular method entry
805 __ bind(slow_path);
806 (void) generate_normal_entry(false);
807 return entry;
808 }
809 #endif // INCLUDE_ALL_GCS
811 // If G1 is not enabled then attempt to go through the accessor entry point
812 // Reference.get is an accessor
813 return generate_accessor_entry();
814 }
816 //
817 // Interpreter stub for calling a native method. (asm interpreter)
818 // This sets up a somewhat different looking stack for calling the native method
819 // than the typical interpreter frame setup.
820 //
822 address InterpreterGenerator::generate_native_entry(bool synchronized) {
823 address entry = __ pc();
825 // the following temporary registers are used during frame creation
826 const Register Gtmp1 = G3_scratch ;
827 const Register Gtmp2 = G1_scratch;
828 bool inc_counter = UseCompiler || CountCompiledCalls;
830 // make sure registers are different!
831 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
833 const Address Laccess_flags(Lmethod, Method::access_flags_offset());
835 const Register Glocals_size = G3;
836 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
838 // make sure method is native & not abstract
839 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
840 #ifdef ASSERT
841 __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
842 {
843 Label L;
844 __ btst(JVM_ACC_NATIVE, Gtmp1);
845 __ br(Assembler::notZero, false, Assembler::pt, L);
846 __ delayed()->nop();
847 __ stop("tried to execute non-native method as native");
848 __ bind(L);
849 }
850 { Label L;
851 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
852 __ br(Assembler::zero, false, Assembler::pt, L);
853 __ delayed()->nop();
854 __ stop("tried to execute abstract method as non-abstract");
855 __ bind(L);
856 }
857 #endif // ASSERT
859 // generate the code to allocate the interpreter stack frame
860 generate_fixed_frame(true);
862 //
863 // No locals to initialize for native method
864 //
866 // this slot will be set later, we initialize it to null here just in
867 // case we get a GC before the actual value is stored later
868 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
870 const Address do_not_unlock_if_synchronized(G2_thread,
871 JavaThread::do_not_unlock_if_synchronized_offset());
872 // Since at this point in the method invocation the exception handler
873 // would try to exit the monitor of synchronized methods which hasn't
874 // been entered yet, we set the thread local variable
875 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
876 // runtime, exception handling i.e. unlock_if_synchronized_method will
877 // check this thread local flag.
878 // This flag has two effects, one is to force an unwind in the topmost
879 // interpreter frame and not perform an unlock while doing so.
881 __ movbool(true, G3_scratch);
882 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
884 // increment invocation counter and check for overflow
885 //
886 // Note: checking for negative value instead of overflow
887 // so we have a 'sticky' overflow test (may be of
888 // importance as soon as we have true MT/MP)
889 Label invocation_counter_overflow;
890 Label Lcontinue;
891 if (inc_counter) {
892 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
894 }
895 __ bind(Lcontinue);
897 bang_stack_shadow_pages(true);
899 // reset the _do_not_unlock_if_synchronized flag
900 __ stbool(G0, do_not_unlock_if_synchronized);
902 // check for synchronized methods
903 // Must happen AFTER invocation_counter check and stack overflow check,
904 // so method is not locked if overflows.
906 if (synchronized) {
907 lock_method();
908 } else {
909 #ifdef ASSERT
910 { Label ok;
911 __ ld(Laccess_flags, O0);
912 __ btst(JVM_ACC_SYNCHRONIZED, O0);
913 __ br( Assembler::zero, false, Assembler::pt, ok);
914 __ delayed()->nop();
915 __ stop("method needs synchronization");
916 __ bind(ok);
917 }
918 #endif // ASSERT
919 }
922 // start execution
923 __ verify_thread();
925 // JVMTI support
926 __ notify_method_entry();
928 // native call
930 // (note that O0 is never an oop--at most it is a handle)
931 // It is important not to smash any handles created by this call,
932 // until any oop handle in O0 is dereferenced.
934 // (note that the space for outgoing params is preallocated)
936 // get signature handler
937 { Label L;
938 Address signature_handler(Lmethod, Method::signature_handler_offset());
939 __ ld_ptr(signature_handler, G3_scratch);
940 __ br_notnull_short(G3_scratch, Assembler::pt, L);
941 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
942 __ ld_ptr(signature_handler, G3_scratch);
943 __ bind(L);
944 }
946 // Push a new frame so that the args will really be stored in
947 // Copy a few locals across so the new frame has the variables
948 // we need but these values will be dead at the jni call and
949 // therefore not gc volatile like the values in the current
950 // frame (Lmethod in particular)
952 // Flush the method pointer to the register save area
953 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
954 __ mov(Llocals, O1);
956 // calculate where the mirror handle body is allocated in the interpreter frame:
957 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
959 // Calculate current frame size
960 __ sub(SP, FP, O3); // Calculate negative of current frame size
961 __ save(SP, O3, SP); // Allocate an identical sized frame
963 // Note I7 has leftover trash. Slow signature handler will fill it in
964 // should we get there. Normal jni call will set reasonable last_Java_pc
965 // below (and fix I7 so the stack trace doesn't have a meaningless frame
966 // in it).
968 // Load interpreter frame's Lmethod into same register here
970 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
972 __ mov(I1, Llocals);
973 __ mov(I2, Lscratch2); // save the address of the mirror
976 // ONLY Lmethod and Llocals are valid here!
978 // call signature handler, It will move the arg properly since Llocals in current frame
979 // matches that in outer frame
981 __ callr(G3_scratch, 0);
982 __ delayed()->nop();
984 // Result handler is in Lscratch
986 // Reload interpreter frame's Lmethod since slow signature handler may block
987 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
989 { Label not_static;
991 __ ld(Laccess_flags, O0);
992 __ btst(JVM_ACC_STATIC, O0);
993 __ br( Assembler::zero, false, Assembler::pt, not_static);
994 // get native function entry point(O0 is a good temp until the very end)
995 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
996 // for static methods insert the mirror argument
997 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
999 __ ld_ptr(Lmethod, Method:: const_offset(), O1);
1000 __ ld_ptr(O1, ConstMethod::constants_offset(), O1);
1001 __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
1002 __ ld_ptr(O1, mirror_offset, O1);
1003 #ifdef ASSERT
1004 if (!PrintSignatureHandlers) // do not dirty the output with this
1005 { Label L;
1006 __ br_notnull_short(O1, Assembler::pt, L);
1007 __ stop("mirror is missing");
1008 __ bind(L);
1009 }
1010 #endif // ASSERT
1011 __ st_ptr(O1, Lscratch2, 0);
1012 __ mov(Lscratch2, O1);
1013 __ bind(not_static);
1014 }
1016 // At this point, arguments have been copied off of stack into
1017 // their JNI positions, which are O1..O5 and SP[68..].
1018 // Oops are boxed in-place on the stack, with handles copied to arguments.
1019 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
1021 #ifdef ASSERT
1022 { Label L;
1023 __ br_notnull_short(O0, Assembler::pt, L);
1024 __ stop("native entry point is missing");
1025 __ bind(L);
1026 }
1027 #endif // ASSERT
1029 //
1030 // setup the frame anchor
1031 //
1032 // The scavenge function only needs to know that the PC of this frame is
1033 // in the interpreter method entry code, it doesn't need to know the exact
1034 // PC and hence we can use O7 which points to the return address from the
1035 // previous call in the code stream (signature handler function)
1036 //
1037 // The other trick is we set last_Java_sp to FP instead of the usual SP because
1038 // we have pushed the extra frame in order to protect the volatile register(s)
1039 // in that frame when we return from the jni call
1040 //
1042 __ set_last_Java_frame(FP, O7);
1043 __ mov(O7, I7); // make dummy interpreter frame look like one above,
1044 // not meaningless information that'll confuse me.
1046 // flush the windows now. We don't care about the current (protection) frame
1047 // only the outer frames
1049 __ flush_windows();
1051 // mark windows as flushed
1052 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1053 __ set(JavaFrameAnchor::flushed, G3_scratch);
1054 __ st(G3_scratch, flags);
1056 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
1058 Address thread_state(G2_thread, JavaThread::thread_state_offset());
1059 #ifdef ASSERT
1060 { Label L;
1061 __ ld(thread_state, G3_scratch);
1062 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
1063 __ stop("Wrong thread state in native stub");
1064 __ bind(L);
1065 }
1066 #endif // ASSERT
1067 __ set(_thread_in_native, G3_scratch);
1068 __ st(G3_scratch, thread_state);
1070 // Call the jni method, using the delay slot to set the JNIEnv* argument.
1071 __ save_thread(L7_thread_cache); // save Gthread
1072 __ callr(O0, 0);
1073 __ delayed()->
1074 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1076 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1078 __ restore_thread(L7_thread_cache); // restore G2_thread
1079 __ reinit_heapbase();
1081 // must we block?
1083 // Block, if necessary, before resuming in _thread_in_Java state.
1084 // In order for GC to work, don't clear the last_Java_sp until after blocking.
1085 { Label no_block;
1086 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
1088 // Switch thread to "native transition" state before reading the synchronization state.
1089 // This additional state is necessary because reading and testing the synchronization
1090 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1091 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1092 // VM thread changes sync state to synchronizing and suspends threads for GC.
1093 // Thread A is resumed to finish this native method, but doesn't block here since it
1094 // didn't see any synchronization is progress, and escapes.
1095 __ set(_thread_in_native_trans, G3_scratch);
1096 __ st(G3_scratch, thread_state);
1097 if(os::is_MP()) {
1098 if (UseMembar) {
1099 // Force this write out before the read below
1100 __ membar(Assembler::StoreLoad);
1101 } else {
1102 // Write serialization page so VM thread can do a pseudo remote membar.
1103 // We use the current thread pointer to calculate a thread specific
1104 // offset to write to within the page. This minimizes bus traffic
1105 // due to cache line collision.
1106 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1107 }
1108 }
1109 __ load_contents(sync_state, G3_scratch);
1110 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1112 Label L;
1113 __ br(Assembler::notEqual, false, Assembler::pn, L);
1114 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1115 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
1116 __ bind(L);
1118 // Block. Save any potential method result value before the operation and
1119 // use a leaf call to leave the last_Java_frame setup undisturbed.
1120 save_native_result();
1121 __ call_VM_leaf(L7_thread_cache,
1122 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1123 G2_thread);
1125 // Restore any method result value
1126 restore_native_result();
1127 __ bind(no_block);
1128 }
1130 // Clear the frame anchor now
1132 __ reset_last_Java_frame();
1134 // Move the result handler address
1135 __ mov(Lscratch, G3_scratch);
1136 // return possible result to the outer frame
1137 #ifndef __LP64
1138 __ mov(O0, I0);
1139 __ restore(O1, G0, O1);
1140 #else
1141 __ restore(O0, G0, O0);
1142 #endif /* __LP64 */
1144 // Move result handler to expected register
1145 __ mov(G3_scratch, Lscratch);
1147 // Back in normal (native) interpreter frame. State is thread_in_native_trans
1148 // switch to thread_in_Java.
1150 __ set(_thread_in_Java, G3_scratch);
1151 __ st(G3_scratch, thread_state);
1153 // reset handle block
1154 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1155 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1157 // If we have an oop result store it where it will be safe for any further gc
1158 // until we return now that we've released the handle it might be protected by
1160 {
1161 Label no_oop, store_result;
1163 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1164 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
1165 __ addcc(G0, O0, O0);
1166 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
1167 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
1168 __ mov(G0, O0);
1170 __ bind(store_result);
1171 // Store it where gc will look for it and result handler expects it.
1172 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1174 __ bind(no_oop);
1176 }
1179 // handle exceptions (exception handling will handle unlocking!)
1180 { Label L;
1181 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1182 __ ld_ptr(exception_addr, Gtemp);
1183 __ br_null_short(Gtemp, Assembler::pt, L);
1184 // Note: This could be handled more efficiently since we know that the native
1185 // method doesn't have an exception handler. We could directly return
1186 // to the exception handler for the caller.
1187 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1188 __ should_not_reach_here();
1189 __ bind(L);
1190 }
1192 // JVMTI support (preserves thread register)
1193 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1195 if (synchronized) {
1196 // save and restore any potential method result value around the unlocking operation
1197 save_native_result();
1199 __ add( __ top_most_monitor(), O1);
1200 __ unlock_object(O1);
1202 restore_native_result();
1203 }
1205 #if defined(COMPILER2) && !defined(_LP64)
1207 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1208 // or compiled so just be safe.
1210 __ sllx(O0, 32, G1); // Shift bits into high G1
1211 __ srl (O1, 0, O1); // Zero extend O1
1212 __ or3 (O1, G1, G1); // OR 64 bits into G1
1214 #endif /* COMPILER2 && !_LP64 */
1216 // dispose of return address and remove activation
1217 #ifdef ASSERT
1218 {
1219 Label ok;
1220 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
1221 __ stop("bad I5_savedSP value");
1222 __ should_not_reach_here();
1223 __ bind(ok);
1224 }
1225 #endif
1226 if (TraceJumps) {
1227 // Move target to register that is recordable
1228 __ mov(Lscratch, G3_scratch);
1229 __ JMP(G3_scratch, 0);
1230 } else {
1231 __ jmp(Lscratch, 0);
1232 }
1233 __ delayed()->nop();
1236 if (inc_counter) {
1237 // handle invocation counter overflow
1238 __ bind(invocation_counter_overflow);
1239 generate_counter_overflow(Lcontinue);
1240 }
1244 return entry;
1245 }
1248 // Generic method entry to (asm) interpreter
1249 //------------------------------------------------------------------------------------------------------------------------
1250 //
1251 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1252 address entry = __ pc();
1254 bool inc_counter = UseCompiler || CountCompiledCalls;
1256 // the following temporary registers are used during frame creation
1257 const Register Gtmp1 = G3_scratch ;
1258 const Register Gtmp2 = G1_scratch;
1260 // make sure registers are different!
1261 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1263 const Address constMethod (G5_method, Method::const_offset());
1264 // Seems like G5_method is live at the point this is used. So we could make this look consistent
1265 // and use in the asserts.
1266 const Address access_flags (Lmethod, Method::access_flags_offset());
1268 const Register Glocals_size = G3;
1269 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1271 // make sure method is not native & not abstract
1272 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1273 #ifdef ASSERT
1274 __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
1275 {
1276 Label L;
1277 __ btst(JVM_ACC_NATIVE, Gtmp1);
1278 __ br(Assembler::zero, false, Assembler::pt, L);
1279 __ delayed()->nop();
1280 __ stop("tried to execute native method as non-native");
1281 __ bind(L);
1282 }
1283 { Label L;
1284 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1285 __ br(Assembler::zero, false, Assembler::pt, L);
1286 __ delayed()->nop();
1287 __ stop("tried to execute abstract method as non-abstract");
1288 __ bind(L);
1289 }
1290 #endif // ASSERT
1292 // generate the code to allocate the interpreter stack frame
1294 generate_fixed_frame(false);
1296 #ifdef FAST_DISPATCH
1297 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
1298 // set bytecode dispatch table base
1299 #endif
1301 //
1302 // Code to initialize the extra (i.e. non-parm) locals
1303 //
1304 Register init_value = noreg; // will be G0 if we must clear locals
1305 // The way the code was setup before zerolocals was always true for vanilla java entries.
1306 // It could only be false for the specialized entries like accessor or empty which have
1307 // no extra locals so the testing was a waste of time and the extra locals were always
1308 // initialized. We removed this extra complication to already over complicated code.
1310 init_value = G0;
1311 Label clear_loop;
1313 const Register RconstMethod = O1;
1314 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1315 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset());
1317 // NOTE: If you change the frame layout, this code will need to
1318 // be updated!
1319 __ ld_ptr( constMethod, RconstMethod );
1320 __ lduh( size_of_locals, O2 );
1321 __ lduh( size_of_parameters, O1 );
1322 __ sll( O2, Interpreter::logStackElementSize, O2);
1323 __ sll( O1, Interpreter::logStackElementSize, O1 );
1324 __ sub( Llocals, O2, O2 );
1325 __ sub( Llocals, O1, O1 );
1327 __ bind( clear_loop );
1328 __ inc( O2, wordSize );
1330 __ cmp( O2, O1 );
1331 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1332 __ delayed()->st_ptr( init_value, O2, 0 );
1334 const Address do_not_unlock_if_synchronized(G2_thread,
1335 JavaThread::do_not_unlock_if_synchronized_offset());
1336 // Since at this point in the method invocation the exception handler
1337 // would try to exit the monitor of synchronized methods which hasn't
1338 // been entered yet, we set the thread local variable
1339 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1340 // runtime, exception handling i.e. unlock_if_synchronized_method will
1341 // check this thread local flag.
1342 __ movbool(true, G3_scratch);
1343 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1345 // increment invocation counter and check for overflow
1346 //
1347 // Note: checking for negative value instead of overflow
1348 // so we have a 'sticky' overflow test (may be of
1349 // importance as soon as we have true MT/MP)
1350 Label invocation_counter_overflow;
1351 Label profile_method;
1352 Label profile_method_continue;
1353 Label Lcontinue;
1354 if (inc_counter) {
1355 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1356 if (ProfileInterpreter) {
1357 __ bind(profile_method_continue);
1358 }
1359 }
1360 __ bind(Lcontinue);
1362 bang_stack_shadow_pages(false);
1364 // reset the _do_not_unlock_if_synchronized flag
1365 __ stbool(G0, do_not_unlock_if_synchronized);
1367 // check for synchronized methods
1368 // Must happen AFTER invocation_counter check and stack overflow check,
1369 // so method is not locked if overflows.
1371 if (synchronized) {
1372 lock_method();
1373 } else {
1374 #ifdef ASSERT
1375 { Label ok;
1376 __ ld(access_flags, O0);
1377 __ btst(JVM_ACC_SYNCHRONIZED, O0);
1378 __ br( Assembler::zero, false, Assembler::pt, ok);
1379 __ delayed()->nop();
1380 __ stop("method needs synchronization");
1381 __ bind(ok);
1382 }
1383 #endif // ASSERT
1384 }
1386 // start execution
1388 __ verify_thread();
1390 // jvmti support
1391 __ notify_method_entry();
1393 // start executing instructions
1394 __ dispatch_next(vtos);
1397 if (inc_counter) {
1398 if (ProfileInterpreter) {
1399 // We have decided to profile this method in the interpreter
1400 __ bind(profile_method);
1402 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1403 __ set_method_data_pointer_for_bcp();
1404 __ ba_short(profile_method_continue);
1405 }
1407 // handle invocation counter overflow
1408 __ bind(invocation_counter_overflow);
1409 generate_counter_overflow(Lcontinue);
1410 }
1413 return entry;
1414 }
1417 //----------------------------------------------------------------------------------------------------
1418 // Entry points & stack frame layout
1419 //
1420 // Here we generate the various kind of entries into the interpreter.
1421 // The two main entry type are generic bytecode methods and native call method.
1422 // These both come in synchronized and non-synchronized versions but the
1423 // frame layout they create is very similar. The other method entry
1424 // types are really just special purpose entries that are really entry
1425 // and interpretation all in one. These are for trivial methods like
1426 // accessor, empty, or special math methods.
1427 //
1428 // When control flow reaches any of the entry types for the interpreter
1429 // the following holds ->
1430 //
1431 // C2 Calling Conventions:
1432 //
1433 // The entry code below assumes that the following registers are set
1434 // when coming in:
1435 // G5_method: holds the Method* of the method to call
1436 // Lesp: points to the TOS of the callers expression stack
1437 // after having pushed all the parameters
1438 //
1439 // The entry code does the following to setup an interpreter frame
1440 // pop parameters from the callers stack by adjusting Lesp
1441 // set O0 to Lesp
1442 // compute X = (max_locals - num_parameters)
1443 // bump SP up by X to accomadate the extra locals
1444 // compute X = max_expression_stack
1445 // + vm_local_words
1446 // + 16 words of register save area
1447 // save frame doing a save sp, -X, sp growing towards lower addresses
1448 // set Lbcp, Lmethod, LcpoolCache
1449 // set Llocals to i0
1450 // set Lmonitors to FP - rounded_vm_local_words
1451 // set Lesp to Lmonitors - 4
1452 //
1453 // The frame has now been setup to do the rest of the entry code
1455 // Try this optimization: Most method entries could live in a
1456 // "one size fits all" stack frame without all the dynamic size
1457 // calculations. It might be profitable to do all this calculation
1458 // statically and approximately for "small enough" methods.
1460 //-----------------------------------------------------------------------------------------------
1462 // C1 Calling conventions
1463 //
1464 // Upon method entry, the following registers are setup:
1465 //
1466 // g2 G2_thread: current thread
1467 // g5 G5_method: method to activate
1468 // g4 Gargs : pointer to last argument
1469 //
1470 //
1471 // Stack:
1472 //
1473 // +---------------+ <--- sp
1474 // | |
1475 // : reg save area :
1476 // | |
1477 // +---------------+ <--- sp + 0x40
1478 // | |
1479 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1480 // | |
1481 // +---------------+ <--- sp + 0x5c
1482 // | |
1483 // : free :
1484 // | |
1485 // +---------------+ <--- Gargs
1486 // | |
1487 // : arguments :
1488 // | |
1489 // +---------------+
1490 // | |
1491 //
1492 //
1493 //
1494 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
1495 //
1496 // +---------------+ <--- sp
1497 // | |
1498 // : reg save area :
1499 // | |
1500 // +---------------+ <--- sp + 0x40
1501 // | |
1502 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1503 // | |
1504 // +---------------+ <--- sp + 0x5c
1505 // | |
1506 // : :
1507 // | | <--- Lesp
1508 // +---------------+ <--- Lmonitors (fp - 0x18)
1509 // | VM locals |
1510 // +---------------+ <--- fp
1511 // | |
1512 // : reg save area :
1513 // | |
1514 // +---------------+ <--- fp + 0x40
1515 // | |
1516 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1517 // | |
1518 // +---------------+ <--- fp + 0x5c
1519 // | |
1520 // : free :
1521 // | |
1522 // +---------------+
1523 // | |
1524 // : nonarg locals :
1525 // | |
1526 // +---------------+
1527 // | |
1528 // : arguments :
1529 // | | <--- Llocals
1530 // +---------------+ <--- Gargs
1531 // | |
1533 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
1535 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
1536 // expression stack, the callee will have callee_extra_locals (so we can account for
1537 // frame extension) and monitor_size for monitors. Basically we need to calculate
1538 // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
1539 //
1540 //
1541 // The big complicating thing here is that we must ensure that the stack stays properly
1542 // aligned. This would be even uglier if monitor size wasn't modulo what the stack
1543 // needs to be aligned for). We are given that the sp (fp) is already aligned by
1544 // the caller so we must ensure that it is properly aligned for our callee.
1545 //
1546 const int rounded_vm_local_words =
1547 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1548 // callee_locals and max_stack are counts, not the size in frame.
1549 const int locals_size =
1550 round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
1551 const int max_stack_words = max_stack * Interpreter::stackElementWords;
1552 return (round_to((max_stack_words
1553 //6815692//+ Method::extra_stack_words()
1554 + rounded_vm_local_words
1555 + frame::memory_parameter_word_sp_offset), WordsPerLong)
1556 // already rounded
1557 + locals_size + monitor_size);
1558 }
1560 // How much stack a method top interpreter activation needs in words.
1561 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1563 // See call_stub code
1564 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
1565 WordsPerLong); // 7 + register save area
1567 // Save space for one monitor to get into the interpreted method in case
1568 // the method is synchronized
1569 int monitor_size = method->is_synchronized() ?
1570 1*frame::interpreter_frame_monitor_size() : 0;
1571 return size_activation_helper(method->max_locals(), method->max_stack(),
1572 monitor_size) + call_stub_size;
1573 }
1575 int AbstractInterpreter::layout_activation(Method* method,
1576 int tempcount,
1577 int popframe_extra_args,
1578 int moncount,
1579 int caller_actual_parameters,
1580 int callee_param_count,
1581 int callee_local_count,
1582 frame* caller,
1583 frame* interpreter_frame,
1584 bool is_top_frame,
1585 bool is_bottom_frame) {
1586 // Note: This calculation must exactly parallel the frame setup
1587 // in InterpreterGenerator::generate_fixed_frame.
1588 // If f!=NULL, set up the following variables:
1589 // - Lmethod
1590 // - Llocals
1591 // - Lmonitors (to the indicated number of monitors)
1592 // - Lesp (to the indicated number of temps)
1593 // The frame f (if not NULL) on entry is a description of the caller of the frame
1594 // we are about to layout. We are guaranteed that we will be able to fill in a
1595 // new interpreter frame as its callee (i.e. the stack space is allocated and
1596 // the amount was determined by an earlier call to this method with f == NULL).
1597 // On return f (if not NULL) while describe the interpreter frame we just layed out.
1599 int monitor_size = moncount * frame::interpreter_frame_monitor_size();
1600 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1602 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
1603 //
1604 // Note: if you look closely this appears to be doing something much different
1605 // than generate_fixed_frame. What is happening is this. On sparc we have to do
1606 // this dance with interpreter_sp_adjustment because the window save area would
1607 // appear just below the bottom (tos) of the caller's java expression stack. Because
1608 // the interpreter want to have the locals completely contiguous generate_fixed_frame
1609 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
1610 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
1611 // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
1612 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
1613 // because the oldest frame would have adjust its callers frame and yet that frame
1614 // already exists and isn't part of this array of frames we are unpacking. So at first
1615 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
1616 // will after it calculates all of the frame's on_stack_size()'s will then figure out the
1617 // amount to adjust the caller of the initial (oldest) frame and the calculation will all
1618 // add up. It does seem like it simpler to account for the adjustment here (and remove the
1619 // callee... parameters here). However this would mean that this routine would have to take
1620 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
1621 // and run the calling loop in the reverse order. This would also would appear to mean making
1622 // this code aware of what the interactions are when that initial caller fram was an osr or
1623 // other adapter frame. deoptimization is complicated enough and hard enough to debug that
1624 // there is no sense in messing working code.
1625 //
1627 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
1628 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
1630 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
1631 monitor_size);
1633 if (interpreter_frame != NULL) {
1634 // The skeleton frame must already look like an interpreter frame
1635 // even if not fully filled out.
1636 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
1638 intptr_t* fp = interpreter_frame->fp();
1640 JavaThread* thread = JavaThread::current();
1641 RegisterMap map(thread, false);
1642 // More verification that skeleton frame is properly walkable
1643 assert(fp == caller->sp(), "fp must match");
1645 intptr_t* montop = fp - rounded_vm_local_words;
1647 // preallocate monitors (cf. __ add_monitor_to_stack)
1648 intptr_t* monitors = montop - monitor_size;
1650 // preallocate stack space
1651 intptr_t* esp = monitors - 1 -
1652 (tempcount * Interpreter::stackElementWords) -
1653 popframe_extra_args;
1655 int local_words = method->max_locals() * Interpreter::stackElementWords;
1656 NEEDS_CLEANUP;
1657 intptr_t* locals;
1658 if (caller->is_interpreted_frame()) {
1659 // Can force the locals area to end up properly overlapping the top of the expression stack.
1660 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
1661 // Note that this computation means we replace size_of_parameters() values from the caller
1662 // interpreter frame's expression stack with our argument locals
1663 int parm_words = caller_actual_parameters * Interpreter::stackElementWords;
1664 locals = Lesp_ptr + parm_words;
1665 int delta = local_words - parm_words;
1666 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
1667 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
1668 if (!is_bottom_frame) {
1669 // Llast_SP is set below for the current frame to SP (with the
1670 // extra space for the callee's locals). Here we adjust
1671 // Llast_SP for the caller's frame, removing the extra space
1672 // for the current method's locals.
1673 *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
1674 } else {
1675 assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
1676 }
1677 } else {
1678 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
1679 // Don't have Lesp available; lay out locals block in the caller
1680 // adjacent to the register window save area.
1681 //
1682 // Compiled frames do not allocate a varargs area which is why this if
1683 // statement is needed.
1684 //
1685 if (caller->is_compiled_frame()) {
1686 locals = fp + frame::register_save_words + local_words - 1;
1687 } else {
1688 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
1689 }
1690 if (!caller->is_entry_frame()) {
1691 // Caller wants his own SP back
1692 int caller_frame_size = caller->cb()->frame_size();
1693 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
1694 }
1695 }
1696 if (TraceDeoptimization) {
1697 if (caller->is_entry_frame()) {
1698 // make sure I5_savedSP and the entry frames notion of saved SP
1699 // agree. This assertion duplicate a check in entry frame code
1700 // but catches the failure earlier.
1701 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
1702 "would change callers SP");
1703 }
1704 if (caller->is_entry_frame()) {
1705 tty->print("entry ");
1706 }
1707 if (caller->is_compiled_frame()) {
1708 tty->print("compiled ");
1709 if (caller->is_deoptimized_frame()) {
1710 tty->print("(deopt) ");
1711 }
1712 }
1713 if (caller->is_interpreted_frame()) {
1714 tty->print("interpreted ");
1715 }
1716 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
1717 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
1718 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
1719 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
1720 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
1721 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
1722 tty->print_cr("Llocals = 0x%x", locals);
1723 tty->print_cr("Lesp = 0x%x", esp);
1724 tty->print_cr("Lmonitors = 0x%x", monitors);
1725 }
1727 if (method->max_locals() > 0) {
1728 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
1729 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
1730 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
1731 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
1732 }
1733 #ifdef _LP64
1734 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
1735 #endif
1737 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
1738 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
1739 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors;
1740 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp;
1741 // Llast_SP will be same as SP as there is no adapter space
1742 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
1743 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
1744 #ifdef FAST_DISPATCH
1745 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
1746 #endif
1749 #ifdef ASSERT
1750 BasicObjectLock* mp = (BasicObjectLock*)monitors;
1752 assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
1753 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
1754 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
1755 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
1756 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
1758 // check bounds
1759 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
1760 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
1761 assert(lo < monitors && montop <= hi, "monitors in bounds");
1762 assert(lo <= esp && esp < monitors, "esp in bounds");
1763 #endif // ASSERT
1764 }
1766 return raw_frame_size;
1767 }
1769 //----------------------------------------------------------------------------------------------------
1770 // Exceptions
1771 void TemplateInterpreterGenerator::generate_throw_exception() {
1773 // Entry point in previous activation (i.e., if the caller was interpreted)
1774 Interpreter::_rethrow_exception_entry = __ pc();
1775 // O0: exception
1777 // entry point for exceptions thrown within interpreter code
1778 Interpreter::_throw_exception_entry = __ pc();
1779 __ verify_thread();
1780 // expression stack is undefined here
1781 // O0: exception, i.e. Oexception
1782 // Lbcp: exception bcx
1783 __ verify_oop(Oexception);
1786 // expression stack must be empty before entering the VM in case of an exception
1787 __ empty_expression_stack();
1788 // find exception handler address and preserve exception oop
1789 // call C routine to find handler and jump to it
1790 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1791 __ push_ptr(O1); // push exception for exception handler bytecodes
1793 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1794 __ delayed()->nop();
1797 // if the exception is not handled in the current frame
1798 // the frame is removed and the exception is rethrown
1799 // (i.e. exception continuation is _rethrow_exception)
1800 //
1801 // Note: At this point the bci is still the bxi for the instruction which caused
1802 // the exception and the expression stack is empty. Thus, for any VM calls
1803 // at this point, GC will find a legal oop map (with empty expression stack).
1805 // in current activation
1806 // tos: exception
1807 // Lbcp: exception bcp
1809 //
1810 // JVMTI PopFrame support
1811 //
1813 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1814 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1815 // Set the popframe_processing bit in popframe_condition indicating that we are
1816 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1817 // popframe handling cycles.
1819 __ ld(popframe_condition_addr, G3_scratch);
1820 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1821 __ stw(G3_scratch, popframe_condition_addr);
1823 // Empty the expression stack, as in normal exception handling
1824 __ empty_expression_stack();
1825 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1827 {
1828 // Check to see whether we are returning to a deoptimized frame.
1829 // (The PopFrame call ensures that the caller of the popped frame is
1830 // either interpreted or compiled and deoptimizes it if compiled.)
1831 // In this case, we can't call dispatch_next() after the frame is
1832 // popped, but instead must save the incoming arguments and restore
1833 // them after deoptimization has occurred.
1834 //
1835 // Note that we don't compare the return PC against the
1836 // deoptimization blob's unpack entry because of the presence of
1837 // adapter frames in C2.
1838 Label caller_not_deoptimized;
1839 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1840 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
1842 const Register Gtmp1 = G3_scratch;
1843 const Register Gtmp2 = G1_scratch;
1844 const Register RconstMethod = Gtmp1;
1845 const Address constMethod(Lmethod, Method::const_offset());
1846 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1848 // Compute size of arguments for saving when returning to deoptimized caller
1849 __ ld_ptr(constMethod, RconstMethod);
1850 __ lduh(size_of_parameters, Gtmp1);
1851 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1852 __ sub(Llocals, Gtmp1, Gtmp2);
1853 __ add(Gtmp2, wordSize, Gtmp2);
1854 // Save these arguments
1855 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1856 // Inform deoptimization that it is responsible for restoring these arguments
1857 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1858 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1859 __ st(Gtmp1, popframe_condition_addr);
1861 // Return from the current method
1862 // The caller's SP was adjusted upon method entry to accomodate
1863 // the callee's non-argument locals. Undo that adjustment.
1864 __ ret();
1865 __ delayed()->restore(I5_savedSP, G0, SP);
1867 __ bind(caller_not_deoptimized);
1868 }
1870 // Clear the popframe condition flag
1871 __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1873 // Get out of the current method (how this is done depends on the particular compiler calling
1874 // convention that the interpreter currently follows)
1875 // The caller's SP was adjusted upon method entry to accomodate
1876 // the callee's non-argument locals. Undo that adjustment.
1877 __ restore(I5_savedSP, G0, SP);
1878 // The method data pointer was incremented already during
1879 // call profiling. We have to restore the mdp for the current bcp.
1880 if (ProfileInterpreter) {
1881 __ set_method_data_pointer_for_bcp();
1882 }
1883 // Resume bytecode interpretation at the current bcp
1884 __ dispatch_next(vtos);
1885 // end of JVMTI PopFrame support
1887 Interpreter::_remove_activation_entry = __ pc();
1889 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1890 __ pop_ptr(Oexception); // get exception
1892 // Intel has the following comment:
1893 //// remove the activation (without doing throws on illegalMonitorExceptions)
1894 // They remove the activation without checking for bad monitor state.
1895 // %%% We should make sure this is the right semantics before implementing.
1897 __ set_vm_result(Oexception);
1898 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1900 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1902 __ get_vm_result(Oexception);
1903 __ verify_oop(Oexception);
1905 const int return_reg_adjustment = frame::pc_return_offset;
1906 Address issuing_pc_addr(I7, return_reg_adjustment);
1908 // We are done with this activation frame; find out where to go next.
1909 // The continuation point will be an exception handler, which expects
1910 // the following registers set up:
1911 //
1912 // Oexception: exception
1913 // Oissuing_pc: the local call that threw exception
1914 // Other On: garbage
1915 // In/Ln: the contents of the caller's register window
1916 //
1917 // We do the required restore at the last possible moment, because we
1918 // need to preserve some state across a runtime call.
1919 // (Remember that the caller activation is unknown--it might not be
1920 // interpreted, so things like Lscratch are useless in the caller.)
1922 // Although the Intel version uses call_C, we can use the more
1923 // compact call_VM. (The only real difference on SPARC is a
1924 // harmlessly ignored [re]set_last_Java_frame, compared with
1925 // the Intel code which lacks this.)
1926 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
1927 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
1928 __ super_call_VM_leaf(L7_thread_cache,
1929 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1930 G2_thread, Oissuing_pc->after_save());
1932 // The caller's SP was adjusted upon method entry to accomodate
1933 // the callee's non-argument locals. Undo that adjustment.
1934 __ JMP(O0, 0); // return exception handler in caller
1935 __ delayed()->restore(I5_savedSP, G0, SP);
1937 // (same old exception object is already in Oexception; see above)
1938 // Note that an "issuing PC" is actually the next PC after the call
1939 }
1942 //
1943 // JVMTI ForceEarlyReturn support
1944 //
1946 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1947 address entry = __ pc();
1949 __ empty_expression_stack();
1950 __ load_earlyret_value(state);
1952 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1953 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1955 // Clear the earlyret state
1956 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1958 __ remove_activation(state,
1959 /* throw_monitor_exception */ false,
1960 /* install_monitor_exception */ false);
1962 // The caller's SP was adjusted upon method entry to accomodate
1963 // the callee's non-argument locals. Undo that adjustment.
1964 __ ret(); // return to caller
1965 __ delayed()->restore(I5_savedSP, G0, SP);
1967 return entry;
1968 } // end of JVMTI ForceEarlyReturn support
1971 //------------------------------------------------------------------------------------------------------------------------
1972 // Helper for vtos entry point generation
1974 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1975 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1976 Label L;
1977 aep = __ pc(); __ push_ptr(); __ ba_short(L);
1978 fep = __ pc(); __ push_f(); __ ba_short(L);
1979 dep = __ pc(); __ push_d(); __ ba_short(L);
1980 lep = __ pc(); __ push_l(); __ ba_short(L);
1981 iep = __ pc(); __ push_i();
1982 bep = cep = sep = iep; // there aren't any
1983 vep = __ pc(); __ bind(L); // fall through
1984 generate_and_dispatch(t);
1985 }
1987 // --------------------------------------------------------------------------------
1990 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1991 : TemplateInterpreterGenerator(code) {
1992 generate_all(); // down here so it can be "virtual"
1993 }
1995 // --------------------------------------------------------------------------------
1997 // Non-product code
1998 #ifndef PRODUCT
1999 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2000 address entry = __ pc();
2002 __ push(state);
2003 __ mov(O7, Lscratch); // protect return address within interpreter
2005 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
2006 __ mov( Otos_l2, G3_scratch );
2007 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
2008 __ mov(Lscratch, O7); // restore return address
2009 __ pop(state);
2010 __ retl();
2011 __ delayed()->nop();
2013 return entry;
2014 }
2017 // helpers for generate_and_dispatch
2019 void TemplateInterpreterGenerator::count_bytecode() {
2020 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
2021 }
2024 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2025 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
2026 }
2029 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2030 AddressLiteral index (&BytecodePairHistogram::_index);
2031 AddressLiteral counters((address) &BytecodePairHistogram::_counters);
2033 // get index, shift out old bytecode, bring in new bytecode, and store it
2034 // _index = (_index >> log2_number_of_codes) |
2035 // (bytecode << log2_number_of_codes);
2037 __ load_contents(index, G4_scratch);
2038 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
2039 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
2040 __ or3( G3_scratch, G4_scratch, G4_scratch );
2041 __ store_contents(G4_scratch, index, G3_scratch);
2043 // bump bucket contents
2044 // _counters[_index] ++;
2046 __ set(counters, G3_scratch); // loads into G3_scratch
2047 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
2048 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
2049 __ ld (G3_scratch, 0, G4_scratch);
2050 __ inc (G4_scratch);
2051 __ st (G4_scratch, 0, G3_scratch);
2052 }
2055 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2056 // Call a little run-time stub to avoid blow-up for each bytecode.
2057 // The run-time runtime saves the right registers, depending on
2058 // the tosca in-state for the given template.
2059 address entry = Interpreter::trace_code(t->tos_in());
2060 guarantee(entry != NULL, "entry must have been generated");
2061 __ call(entry, relocInfo::none);
2062 __ delayed()->nop();
2063 }
2066 void TemplateInterpreterGenerator::stop_interpreter_at() {
2067 AddressLiteral counter(&BytecodeCounter::_counter_value);
2068 __ load_contents(counter, G3_scratch);
2069 AddressLiteral stop_at(&StopInterpreterAt);
2070 __ load_ptr_contents(stop_at, G4_scratch);
2071 __ cmp(G3_scratch, G4_scratch);
2072 __ breakpoint_trap(Assembler::equal, Assembler::icc);
2073 }
2074 #endif // not PRODUCT
2075 #endif // !CC_INTERP