Tue, 03 Aug 2010 08:13:38 -0400
6953477: Increase portability and flexibility of building Hotspot
Summary: A collection of portability improvements including shared code support for PPC, ARM platforms, software floating point, cross compilation support and improvements in error crash detail.
Reviewed-by: phh, never, coleenp, dholmes
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateInterpreter_sparc.cpp.incl"
28 #ifndef CC_INTERP
29 #ifndef FAST_DISPATCH
30 #define FAST_DISPATCH 1
31 #endif
32 #undef FAST_DISPATCH
35 // Generation of Interpreter
36 //
37 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
40 #define __ _masm->
43 //----------------------------------------------------------------------------------------------------
46 void InterpreterGenerator::save_native_result(void) {
47 // result potentially in O0/O1: save it across calls
48 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
50 // result potentially in F0/F1: save it across calls
51 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
53 // save and restore any potential method result value around the unlocking operation
54 __ stf(FloatRegisterImpl::D, F0, d_tmp);
55 #ifdef _LP64
56 __ stx(O0, l_tmp);
57 #else
58 __ std(O0, l_tmp);
59 #endif
60 }
62 void InterpreterGenerator::restore_native_result(void) {
63 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
64 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
66 // Restore any method result value
67 __ ldf(FloatRegisterImpl::D, d_tmp, F0);
68 #ifdef _LP64
69 __ ldx(l_tmp, O0);
70 #else
71 __ ldd(l_tmp, O0);
72 #endif
73 }
75 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
76 assert(!pass_oop || message == NULL, "either oop or message but not both");
77 address entry = __ pc();
78 // expression stack must be empty before entering the VM if an exception happened
79 __ empty_expression_stack();
80 // load exception object
81 __ set((intptr_t)name, G3_scratch);
82 if (pass_oop) {
83 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
84 } else {
85 __ set((intptr_t)message, G4_scratch);
86 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
87 }
88 // throw exception
89 assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
90 AddressLiteral thrower(Interpreter::throw_exception_entry());
91 __ jump_to(thrower, G3_scratch);
92 __ delayed()->nop();
93 return entry;
94 }
96 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
97 address entry = __ pc();
98 // expression stack must be empty before entering the VM if an exception
99 // happened
100 __ empty_expression_stack();
101 // load exception object
102 __ call_VM(Oexception,
103 CAST_FROM_FN_PTR(address,
104 InterpreterRuntime::throw_ClassCastException),
105 Otos_i);
106 __ should_not_reach_here();
107 return entry;
108 }
111 // Arguments are: required type in G5_method_type, and
112 // failing object (or NULL) in G3_method_handle.
113 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
114 address entry = __ pc();
115 // expression stack must be empty before entering the VM if an exception
116 // happened
117 __ empty_expression_stack();
118 // load exception object
119 __ call_VM(Oexception,
120 CAST_FROM_FN_PTR(address,
121 InterpreterRuntime::throw_WrongMethodTypeException),
122 G5_method_type, // required
123 G3_method_handle); // actual
124 __ should_not_reach_here();
125 return entry;
126 }
129 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
130 address entry = __ pc();
131 // expression stack must be empty before entering the VM if an exception happened
132 __ empty_expression_stack();
133 // convention: expect aberrant index in register G3_scratch, then shuffle the
134 // index to G4_scratch for the VM call
135 __ mov(G3_scratch, G4_scratch);
136 __ set((intptr_t)name, G3_scratch);
137 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
138 __ should_not_reach_here();
139 return entry;
140 }
143 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
144 address entry = __ pc();
145 // expression stack must be empty before entering the VM if an exception happened
146 __ empty_expression_stack();
147 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
148 __ should_not_reach_here();
149 return entry;
150 }
153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
154 TosState incoming_state = state;
156 Label cont;
157 address compiled_entry = __ pc();
159 address entry = __ pc();
160 #if !defined(_LP64) && defined(COMPILER2)
161 // All return values are where we want them, except for Longs. C2 returns
162 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
163 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
164 // build even if we are returning from interpreted we just do a little
165 // stupid shuffing.
166 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
167 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
168 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
170 if (incoming_state == ltos) {
171 __ srl (G1, 0, O1);
172 __ srlx(G1, 32, O0);
173 }
174 #endif // !_LP64 && COMPILER2
176 __ bind(cont);
178 // The callee returns with the stack possibly adjusted by adapter transition
179 // We remove that possible adjustment here.
180 // All interpreter local registers are untouched. Any result is passed back
181 // in the O0/O1 or float registers. Before continuing, the arguments must be
182 // popped from the java expression stack; i.e., Lesp must be adjusted.
184 __ mov(Llast_SP, SP); // Remove any adapter added stack space.
186 Label L_got_cache, L_giant_index;
187 const Register cache = G3_scratch;
188 const Register size = G1_scratch;
189 if (EnableInvokeDynamic) {
190 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
191 __ cmp(G1_scratch, Bytecodes::_invokedynamic);
192 __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
193 __ delayed()->nop();
194 }
195 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
196 __ bind(L_got_cache);
197 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
198 ConstantPoolCacheEntry::flags_offset(), size);
199 __ and3(size, 0xFF, size); // argument size in words
200 __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
201 __ add(Lesp, size, Lesp); // pop arguments
202 __ dispatch_next(state, step);
204 // out of the main line of code...
205 if (EnableInvokeDynamic) {
206 __ bind(L_giant_index);
207 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
208 __ ba(false, L_got_cache);
209 __ delayed()->nop();
210 }
212 return entry;
213 }
216 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
217 address entry = __ pc();
218 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
219 { Label L;
220 Address exception_addr(G2_thread, Thread::pending_exception_offset());
221 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
222 __ tst(Gtemp);
223 __ brx(Assembler::equal, false, Assembler::pt, L);
224 __ delayed()->nop();
225 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
226 __ should_not_reach_here();
227 __ bind(L);
228 }
229 __ dispatch_next(state, step);
230 return entry;
231 }
233 // A result handler converts/unboxes a native call result into
234 // a java interpreter/compiler result. The current frame is an
235 // interpreter frame. The activation frame unwind code must be
236 // consistent with that of TemplateTable::_return(...). In the
237 // case of native methods, the caller's SP was not modified.
238 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
239 address entry = __ pc();
240 Register Itos_i = Otos_i ->after_save();
241 Register Itos_l = Otos_l ->after_save();
242 Register Itos_l1 = Otos_l1->after_save();
243 Register Itos_l2 = Otos_l2->after_save();
244 switch (type) {
245 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
246 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
247 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
248 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
249 case T_LONG :
250 #ifndef _LP64
251 __ mov(O1, Itos_l2); // move other half of long
252 #endif // ifdef or no ifdef, fall through to the T_INT case
253 case T_INT : __ mov(O0, Itos_i); break;
254 case T_VOID : /* nothing to do */ break;
255 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
256 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
257 case T_OBJECT :
258 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
259 __ verify_oop(Itos_i);
260 break;
261 default : ShouldNotReachHere();
262 }
263 __ ret(); // return from interpreter activation
264 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
265 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly
266 return entry;
267 }
269 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
270 address entry = __ pc();
271 __ push(state);
272 __ call_VM(noreg, runtime_entry);
273 __ dispatch_via(vtos, Interpreter::normal_table(vtos));
274 return entry;
275 }
278 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
279 address entry = __ pc();
280 __ dispatch_next(state);
281 return entry;
282 }
284 //
285 // Helpers for commoning out cases in the various type of method entries.
286 //
288 // increment invocation count & check for overflow
289 //
290 // Note: checking for negative value instead of overflow
291 // so we have a 'sticky' overflow test
292 //
293 // Lmethod: method
294 // ??: invocation counter
295 //
296 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
297 // Update standard invocation counters
298 __ increment_invocation_counter(O0, G3_scratch);
299 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
300 Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset());
301 __ ld(interpreter_invocation_counter, G3_scratch);
302 __ inc(G3_scratch);
303 __ st(G3_scratch, interpreter_invocation_counter);
304 }
306 if (ProfileInterpreter && profile_method != NULL) {
307 // Test to see if we should create a method data oop
308 AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit);
309 __ sethi(profile_limit, G3_scratch);
310 __ ld(G3_scratch, profile_limit.low10(), G3_scratch);
311 __ cmp(O0, G3_scratch);
312 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
313 __ delayed()->nop();
315 // if no method data exists, go to profile_method
316 __ test_method_data_pointer(*profile_method);
317 }
319 AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
320 __ sethi(invocation_limit, G3_scratch);
321 __ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
322 __ cmp(O0, G3_scratch);
323 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
324 __ delayed()->nop();
326 }
328 // Allocate monitor and lock method (asm interpreter)
329 // ebx - methodOop
330 //
331 void InterpreterGenerator::lock_method(void) {
332 __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags.
334 #ifdef ASSERT
335 { Label ok;
336 __ btst(JVM_ACC_SYNCHRONIZED, O0);
337 __ br( Assembler::notZero, false, Assembler::pt, ok);
338 __ delayed()->nop();
339 __ stop("method doesn't need synchronization");
340 __ bind(ok);
341 }
342 #endif // ASSERT
344 // get synchronization object to O0
345 { Label done;
346 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
347 __ btst(JVM_ACC_STATIC, O0);
348 __ br( Assembler::zero, true, Assembler::pt, done);
349 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
351 __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0);
352 __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0);
354 // lock the mirror, not the klassOop
355 __ ld_ptr( O0, mirror_offset, O0);
357 #ifdef ASSERT
358 __ tst(O0);
359 __ breakpoint_trap(Assembler::zero);
360 #endif // ASSERT
362 __ bind(done);
363 }
365 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem
366 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object
367 // __ untested("lock_object from method entry");
368 __ lock_object(Lmonitors, O0);
369 }
372 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
373 Register Rscratch,
374 Register Rscratch2) {
375 const int page_size = os::vm_page_size();
376 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
377 Label after_frame_check;
379 assert_different_registers(Rframe_size, Rscratch, Rscratch2);
381 __ set( page_size, Rscratch );
382 __ cmp( Rframe_size, Rscratch );
384 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
385 __ delayed()->nop();
387 // get the stack base, and in debug, verify it is non-zero
388 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
389 #ifdef ASSERT
390 Label base_not_zero;
391 __ cmp( Rscratch, G0 );
392 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
393 __ delayed()->nop();
394 __ stop("stack base is zero in generate_stack_overflow_check");
395 __ bind(base_not_zero);
396 #endif
398 // get the stack size, and in debug, verify it is non-zero
399 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
400 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
401 #ifdef ASSERT
402 Label size_not_zero;
403 __ cmp( Rscratch2, G0 );
404 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
405 __ delayed()->nop();
406 __ stop("stack size is zero in generate_stack_overflow_check");
407 __ bind(size_not_zero);
408 #endif
410 // compute the beginning of the protected zone minus the requested frame size
411 __ sub( Rscratch, Rscratch2, Rscratch );
412 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
413 __ add( Rscratch, Rscratch2, Rscratch );
415 // Add in the size of the frame (which is the same as subtracting it from the
416 // SP, which would take another register
417 __ add( Rscratch, Rframe_size, Rscratch );
419 // the frame is greater than one page in size, so check against
420 // the bottom of the stack
421 __ cmp( SP, Rscratch );
422 __ brx( Assembler::greater, false, Assembler::pt, after_frame_check );
423 __ delayed()->nop();
425 // Save the return address as the exception pc
426 __ st_ptr(O7, saved_exception_pc);
428 // the stack will overflow, throw an exception
429 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
431 // if you get to here, then there is enough stack space
432 __ bind( after_frame_check );
433 }
436 //
437 // Generate a fixed interpreter frame. This is identical setup for interpreted
438 // methods and for native methods hence the shared code.
440 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
441 //
442 //
443 // The entry code sets up a new interpreter frame in 4 steps:
444 //
445 // 1) Increase caller's SP by for the extra local space needed:
446 // (check for overflow)
447 // Efficient implementation of xload/xstore bytecodes requires
448 // that arguments and non-argument locals are in a contigously
449 // addressable memory block => non-argument locals must be
450 // allocated in the caller's frame.
451 //
452 // 2) Create a new stack frame and register window:
453 // The new stack frame must provide space for the standard
454 // register save area, the maximum java expression stack size,
455 // the monitor slots (0 slots initially), and some frame local
456 // scratch locations.
457 //
458 // 3) The following interpreter activation registers must be setup:
459 // Lesp : expression stack pointer
460 // Lbcp : bytecode pointer
461 // Lmethod : method
462 // Llocals : locals pointer
463 // Lmonitors : monitor pointer
464 // LcpoolCache: constant pool cache
465 //
466 // 4) Initialize the non-argument locals if necessary:
467 // Non-argument locals may need to be initialized to NULL
468 // for GC to work. If the oop-map information is accurate
469 // (in the absence of the JSR problem), no initialization
470 // is necessary.
471 //
472 // (gri - 2/25/2000)
475 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
476 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
477 const Address max_stack (G5_method, methodOopDesc::max_stack_offset());
478 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
480 const int extra_space =
481 rounded_vm_local_words + // frame local scratch space
482 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters
483 frame::memory_parameter_word_sp_offset + // register save area
484 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
486 const Register Glocals_size = G3;
487 const Register Otmp1 = O3;
488 const Register Otmp2 = O4;
489 // Lscratch can't be used as a temporary because the call_stub uses
490 // it to assert that the stack frame was setup correctly.
492 __ lduh( size_of_parameters, Glocals_size);
494 // Gargs points to first local + BytesPerWord
495 // Set the saved SP after the register window save
496 //
497 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
498 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
499 __ add(Gargs, Otmp1, Gargs);
501 if (native_call) {
502 __ calc_mem_param_words( Glocals_size, Gframe_size );
503 __ add( Gframe_size, extra_space, Gframe_size);
504 __ round_to( Gframe_size, WordsPerLong );
505 __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
506 } else {
508 //
509 // Compute number of locals in method apart from incoming parameters
510 //
511 __ lduh( size_of_locals, Otmp1 );
512 __ sub( Otmp1, Glocals_size, Glocals_size );
513 __ round_to( Glocals_size, WordsPerLong );
514 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
516 // see if the frame is greater than one page in size. If so,
517 // then we need to verify there is enough stack space remaining
518 // Frame_size = (max_stack + extra_space) * BytesPerWord;
519 __ lduh( max_stack, Gframe_size );
520 __ add( Gframe_size, extra_space, Gframe_size );
521 __ round_to( Gframe_size, WordsPerLong );
522 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
524 // Add in java locals size for stack overflow check only
525 __ add( Gframe_size, Glocals_size, Gframe_size );
527 const Register Otmp2 = O4;
528 assert_different_registers(Otmp1, Otmp2, O5_savedSP);
529 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
531 __ sub( Gframe_size, Glocals_size, Gframe_size);
533 //
534 // bump SP to accomodate the extra locals
535 //
536 __ sub( SP, Glocals_size, SP );
537 }
539 //
540 // now set up a stack frame with the size computed above
541 //
542 __ neg( Gframe_size );
543 __ save( SP, Gframe_size, SP );
545 //
546 // now set up all the local cache registers
547 //
548 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
549 // that all present references to Lbyte_code initialize the register
550 // immediately before use
551 if (native_call) {
552 __ mov(G0, Lbcp);
553 } else {
554 __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
555 __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
556 }
557 __ mov( G5_method, Lmethod); // set Lmethod
558 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
559 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
560 #ifdef _LP64
561 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
562 #endif
563 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
565 // setup interpreter activation registers
566 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals
568 if (ProfileInterpreter) {
569 #ifdef FAST_DISPATCH
570 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
571 // they both use I2.
572 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
573 #endif // FAST_DISPATCH
574 __ set_method_data_pointer();
575 }
577 }
579 // Empty method, generate a very fast return.
581 address InterpreterGenerator::generate_empty_entry(void) {
583 // A method that does nother but return...
585 address entry = __ pc();
586 Label slow_path;
588 __ verify_oop(G5_method);
590 // do nothing for empty methods (do not even increment invocation counter)
591 if ( UseFastEmptyMethods) {
592 // If we need a safepoint check, generate full interpreter entry.
593 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
594 __ set(sync_state, G3_scratch);
595 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
596 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
597 __ delayed()->nop();
599 // Code: _return
600 __ retl();
601 __ delayed()->mov(O5_savedSP, SP);
603 __ bind(slow_path);
604 (void) generate_normal_entry(false);
606 return entry;
607 }
608 return NULL;
609 }
611 // Call an accessor method (assuming it is resolved, otherwise drop into
612 // vanilla (slow path) entry
614 // Generates code to elide accessor methods
615 // Uses G3_scratch and G1_scratch as scratch
616 address InterpreterGenerator::generate_accessor_entry(void) {
618 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
619 // parameter size = 1
620 // Note: We can only use this code if the getfield has been resolved
621 // and if we don't have a null-pointer exception => check for
622 // these conditions first and use slow path if necessary.
623 address entry = __ pc();
624 Label slow_path;
627 // XXX: for compressed oops pointer loading and decoding doesn't fit in
628 // delay slot and damages G1
629 if ( UseFastAccessorMethods && !UseCompressedOops ) {
630 // Check if we need to reach a safepoint and generate full interpreter
631 // frame if so.
632 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
633 __ load_contents(sync_state, G3_scratch);
634 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
635 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
636 __ delayed()->nop();
638 // Check if local 0 != NULL
639 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
640 __ tst(Otos_i); // check if local 0 == NULL and go the slow path
641 __ brx(Assembler::zero, false, Assembler::pn, slow_path);
642 __ delayed()->nop();
645 // read first instruction word and extract bytecode @ 1 and index @ 2
646 // get first 4 bytes of the bytecodes (big endian!)
647 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
648 __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
650 // move index @ 2 far left then to the right most two bytes.
651 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
652 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
653 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
655 // get constant pool cache
656 __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
657 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
659 // get specific constant pool cache entry
660 __ add(G3_scratch, G1_scratch, G3_scratch);
662 // Check the constant Pool cache entry to see if it has been resolved.
663 // If not, need the slow path.
664 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
665 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
666 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
667 __ and3(G1_scratch, 0xFF, G1_scratch);
668 __ cmp(G1_scratch, Bytecodes::_getfield);
669 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
670 __ delayed()->nop();
672 // Get the type and return field offset from the constant pool cache
673 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
674 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
676 Label xreturn_path;
677 // Need to differentiate between igetfield, agetfield, bgetfield etc.
678 // because they are different sizes.
679 // Get the type from the constant pool cache
680 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
681 // Make sure we don't need to mask G1_scratch for tosBits after the above shift
682 ConstantPoolCacheEntry::verify_tosBits();
683 __ cmp(G1_scratch, atos );
684 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
685 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
686 __ cmp(G1_scratch, itos);
687 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
688 __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
689 __ cmp(G1_scratch, stos);
690 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
691 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
692 __ cmp(G1_scratch, ctos);
693 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
694 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
695 #ifdef ASSERT
696 __ cmp(G1_scratch, btos);
697 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
698 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
699 __ should_not_reach_here();
700 #endif
701 __ ldsb(Otos_i, G3_scratch, Otos_i);
702 __ bind(xreturn_path);
704 // _ireturn/_areturn
705 __ retl(); // return from leaf routine
706 __ delayed()->mov(O5_savedSP, SP);
708 // Generate regular method entry
709 __ bind(slow_path);
710 (void) generate_normal_entry(false);
711 return entry;
712 }
713 return NULL;
714 }
716 //
717 // Interpreter stub for calling a native method. (asm interpreter)
718 // This sets up a somewhat different looking stack for calling the native method
719 // than the typical interpreter frame setup.
720 //
722 address InterpreterGenerator::generate_native_entry(bool synchronized) {
723 address entry = __ pc();
725 // the following temporary registers are used during frame creation
726 const Register Gtmp1 = G3_scratch ;
727 const Register Gtmp2 = G1_scratch;
728 bool inc_counter = UseCompiler || CountCompiledCalls;
730 // make sure registers are different!
731 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
733 const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
735 __ verify_oop(G5_method);
737 const Register Glocals_size = G3;
738 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
740 // make sure method is native & not abstract
741 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
742 #ifdef ASSERT
743 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
744 {
745 Label L;
746 __ btst(JVM_ACC_NATIVE, Gtmp1);
747 __ br(Assembler::notZero, false, Assembler::pt, L);
748 __ delayed()->nop();
749 __ stop("tried to execute non-native method as native");
750 __ bind(L);
751 }
752 { Label L;
753 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
754 __ br(Assembler::zero, false, Assembler::pt, L);
755 __ delayed()->nop();
756 __ stop("tried to execute abstract method as non-abstract");
757 __ bind(L);
758 }
759 #endif // ASSERT
761 // generate the code to allocate the interpreter stack frame
762 generate_fixed_frame(true);
764 //
765 // No locals to initialize for native method
766 //
768 // this slot will be set later, we initialize it to null here just in
769 // case we get a GC before the actual value is stored later
770 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
772 const Address do_not_unlock_if_synchronized(G2_thread,
773 JavaThread::do_not_unlock_if_synchronized_offset());
774 // Since at this point in the method invocation the exception handler
775 // would try to exit the monitor of synchronized methods which hasn't
776 // been entered yet, we set the thread local variable
777 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
778 // runtime, exception handling i.e. unlock_if_synchronized_method will
779 // check this thread local flag.
780 // This flag has two effects, one is to force an unwind in the topmost
781 // interpreter frame and not perform an unlock while doing so.
783 __ movbool(true, G3_scratch);
784 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
786 // increment invocation counter and check for overflow
787 //
788 // Note: checking for negative value instead of overflow
789 // so we have a 'sticky' overflow test (may be of
790 // importance as soon as we have true MT/MP)
791 Label invocation_counter_overflow;
792 Label Lcontinue;
793 if (inc_counter) {
794 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
796 }
797 __ bind(Lcontinue);
799 bang_stack_shadow_pages(true);
801 // reset the _do_not_unlock_if_synchronized flag
802 __ stbool(G0, do_not_unlock_if_synchronized);
804 // check for synchronized methods
805 // Must happen AFTER invocation_counter check and stack overflow check,
806 // so method is not locked if overflows.
808 if (synchronized) {
809 lock_method();
810 } else {
811 #ifdef ASSERT
812 { Label ok;
813 __ ld(Laccess_flags, O0);
814 __ btst(JVM_ACC_SYNCHRONIZED, O0);
815 __ br( Assembler::zero, false, Assembler::pt, ok);
816 __ delayed()->nop();
817 __ stop("method needs synchronization");
818 __ bind(ok);
819 }
820 #endif // ASSERT
821 }
824 // start execution
825 __ verify_thread();
827 // JVMTI support
828 __ notify_method_entry();
830 // native call
832 // (note that O0 is never an oop--at most it is a handle)
833 // It is important not to smash any handles created by this call,
834 // until any oop handle in O0 is dereferenced.
836 // (note that the space for outgoing params is preallocated)
838 // get signature handler
839 { Label L;
840 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
841 __ ld_ptr(signature_handler, G3_scratch);
842 __ tst(G3_scratch);
843 __ brx(Assembler::notZero, false, Assembler::pt, L);
844 __ delayed()->nop();
845 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
846 __ ld_ptr(signature_handler, G3_scratch);
847 __ bind(L);
848 }
850 // Push a new frame so that the args will really be stored in
851 // Copy a few locals across so the new frame has the variables
852 // we need but these values will be dead at the jni call and
853 // therefore not gc volatile like the values in the current
854 // frame (Lmethod in particular)
856 // Flush the method pointer to the register save area
857 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
858 __ mov(Llocals, O1);
860 // calculate where the mirror handle body is allocated in the interpreter frame:
861 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
863 // Calculate current frame size
864 __ sub(SP, FP, O3); // Calculate negative of current frame size
865 __ save(SP, O3, SP); // Allocate an identical sized frame
867 // Note I7 has leftover trash. Slow signature handler will fill it in
868 // should we get there. Normal jni call will set reasonable last_Java_pc
869 // below (and fix I7 so the stack trace doesn't have a meaningless frame
870 // in it).
872 // Load interpreter frame's Lmethod into same register here
874 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
876 __ mov(I1, Llocals);
877 __ mov(I2, Lscratch2); // save the address of the mirror
880 // ONLY Lmethod and Llocals are valid here!
882 // call signature handler, It will move the arg properly since Llocals in current frame
883 // matches that in outer frame
885 __ callr(G3_scratch, 0);
886 __ delayed()->nop();
888 // Result handler is in Lscratch
890 // Reload interpreter frame's Lmethod since slow signature handler may block
891 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
893 { Label not_static;
895 __ ld(Laccess_flags, O0);
896 __ btst(JVM_ACC_STATIC, O0);
897 __ br( Assembler::zero, false, Assembler::pt, not_static);
898 // get native function entry point(O0 is a good temp until the very end)
899 __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
900 // for static methods insert the mirror argument
901 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
903 __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
904 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
905 __ ld_ptr(O1, mirror_offset, O1);
906 #ifdef ASSERT
907 if (!PrintSignatureHandlers) // do not dirty the output with this
908 { Label L;
909 __ tst(O1);
910 __ brx(Assembler::notZero, false, Assembler::pt, L);
911 __ delayed()->nop();
912 __ stop("mirror is missing");
913 __ bind(L);
914 }
915 #endif // ASSERT
916 __ st_ptr(O1, Lscratch2, 0);
917 __ mov(Lscratch2, O1);
918 __ bind(not_static);
919 }
921 // At this point, arguments have been copied off of stack into
922 // their JNI positions, which are O1..O5 and SP[68..].
923 // Oops are boxed in-place on the stack, with handles copied to arguments.
924 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
926 #ifdef ASSERT
927 { Label L;
928 __ tst(O0);
929 __ brx(Assembler::notZero, false, Assembler::pt, L);
930 __ delayed()->nop();
931 __ stop("native entry point is missing");
932 __ bind(L);
933 }
934 #endif // ASSERT
936 //
937 // setup the frame anchor
938 //
939 // The scavenge function only needs to know that the PC of this frame is
940 // in the interpreter method entry code, it doesn't need to know the exact
941 // PC and hence we can use O7 which points to the return address from the
942 // previous call in the code stream (signature handler function)
943 //
944 // The other trick is we set last_Java_sp to FP instead of the usual SP because
945 // we have pushed the extra frame in order to protect the volatile register(s)
946 // in that frame when we return from the jni call
947 //
949 __ set_last_Java_frame(FP, O7);
950 __ mov(O7, I7); // make dummy interpreter frame look like one above,
951 // not meaningless information that'll confuse me.
953 // flush the windows now. We don't care about the current (protection) frame
954 // only the outer frames
956 __ flush_windows();
958 // mark windows as flushed
959 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
960 __ set(JavaFrameAnchor::flushed, G3_scratch);
961 __ st(G3_scratch, flags);
963 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
965 Address thread_state(G2_thread, JavaThread::thread_state_offset());
966 #ifdef ASSERT
967 { Label L;
968 __ ld(thread_state, G3_scratch);
969 __ cmp(G3_scratch, _thread_in_Java);
970 __ br(Assembler::equal, false, Assembler::pt, L);
971 __ delayed()->nop();
972 __ stop("Wrong thread state in native stub");
973 __ bind(L);
974 }
975 #endif // ASSERT
976 __ set(_thread_in_native, G3_scratch);
977 __ st(G3_scratch, thread_state);
979 // Call the jni method, using the delay slot to set the JNIEnv* argument.
980 __ save_thread(L7_thread_cache); // save Gthread
981 __ callr(O0, 0);
982 __ delayed()->
983 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
985 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
987 __ restore_thread(L7_thread_cache); // restore G2_thread
988 __ reinit_heapbase();
990 // must we block?
992 // Block, if necessary, before resuming in _thread_in_Java state.
993 // In order for GC to work, don't clear the last_Java_sp until after blocking.
994 { Label no_block;
995 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
997 // Switch thread to "native transition" state before reading the synchronization state.
998 // This additional state is necessary because reading and testing the synchronization
999 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1000 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1001 // VM thread changes sync state to synchronizing and suspends threads for GC.
1002 // Thread A is resumed to finish this native method, but doesn't block here since it
1003 // didn't see any synchronization is progress, and escapes.
1004 __ set(_thread_in_native_trans, G3_scratch);
1005 __ st(G3_scratch, thread_state);
1006 if(os::is_MP()) {
1007 if (UseMembar) {
1008 // Force this write out before the read below
1009 __ membar(Assembler::StoreLoad);
1010 } else {
1011 // Write serialization page so VM thread can do a pseudo remote membar.
1012 // We use the current thread pointer to calculate a thread specific
1013 // offset to write to within the page. This minimizes bus traffic
1014 // due to cache line collision.
1015 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1016 }
1017 }
1018 __ load_contents(sync_state, G3_scratch);
1019 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1021 Label L;
1022 __ br(Assembler::notEqual, false, Assembler::pn, L);
1023 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1024 __ cmp(G3_scratch, 0);
1025 __ br(Assembler::equal, false, Assembler::pt, no_block);
1026 __ delayed()->nop();
1027 __ bind(L);
1029 // Block. Save any potential method result value before the operation and
1030 // use a leaf call to leave the last_Java_frame setup undisturbed.
1031 save_native_result();
1032 __ call_VM_leaf(L7_thread_cache,
1033 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1034 G2_thread);
1036 // Restore any method result value
1037 restore_native_result();
1038 __ bind(no_block);
1039 }
1041 // Clear the frame anchor now
1043 __ reset_last_Java_frame();
1045 // Move the result handler address
1046 __ mov(Lscratch, G3_scratch);
1047 // return possible result to the outer frame
1048 #ifndef __LP64
1049 __ mov(O0, I0);
1050 __ restore(O1, G0, O1);
1051 #else
1052 __ restore(O0, G0, O0);
1053 #endif /* __LP64 */
1055 // Move result handler to expected register
1056 __ mov(G3_scratch, Lscratch);
1058 // Back in normal (native) interpreter frame. State is thread_in_native_trans
1059 // switch to thread_in_Java.
1061 __ set(_thread_in_Java, G3_scratch);
1062 __ st(G3_scratch, thread_state);
1064 // reset handle block
1065 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1066 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1068 // If we have an oop result store it where it will be safe for any further gc
1069 // until we return now that we've released the handle it might be protected by
1071 {
1072 Label no_oop, store_result;
1074 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1075 __ cmp(G3_scratch, Lscratch);
1076 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
1077 __ delayed()->nop();
1078 __ addcc(G0, O0, O0);
1079 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
1080 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
1081 __ mov(G0, O0);
1083 __ bind(store_result);
1084 // Store it where gc will look for it and result handler expects it.
1085 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1087 __ bind(no_oop);
1089 }
1092 // handle exceptions (exception handling will handle unlocking!)
1093 { Label L;
1094 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1095 __ ld_ptr(exception_addr, Gtemp);
1096 __ tst(Gtemp);
1097 __ brx(Assembler::equal, false, Assembler::pt, L);
1098 __ delayed()->nop();
1099 // Note: This could be handled more efficiently since we know that the native
1100 // method doesn't have an exception handler. We could directly return
1101 // to the exception handler for the caller.
1102 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1103 __ should_not_reach_here();
1104 __ bind(L);
1105 }
1107 // JVMTI support (preserves thread register)
1108 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1110 if (synchronized) {
1111 // save and restore any potential method result value around the unlocking operation
1112 save_native_result();
1114 __ add( __ top_most_monitor(), O1);
1115 __ unlock_object(O1);
1117 restore_native_result();
1118 }
1120 #if defined(COMPILER2) && !defined(_LP64)
1122 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1123 // or compiled so just be safe.
1125 __ sllx(O0, 32, G1); // Shift bits into high G1
1126 __ srl (O1, 0, O1); // Zero extend O1
1127 __ or3 (O1, G1, G1); // OR 64 bits into G1
1129 #endif /* COMPILER2 && !_LP64 */
1131 // dispose of return address and remove activation
1132 #ifdef ASSERT
1133 {
1134 Label ok;
1135 __ cmp(I5_savedSP, FP);
1136 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
1137 __ delayed()->nop();
1138 __ stop("bad I5_savedSP value");
1139 __ should_not_reach_here();
1140 __ bind(ok);
1141 }
1142 #endif
1143 if (TraceJumps) {
1144 // Move target to register that is recordable
1145 __ mov(Lscratch, G3_scratch);
1146 __ JMP(G3_scratch, 0);
1147 } else {
1148 __ jmp(Lscratch, 0);
1149 }
1150 __ delayed()->nop();
1153 if (inc_counter) {
1154 // handle invocation counter overflow
1155 __ bind(invocation_counter_overflow);
1156 generate_counter_overflow(Lcontinue);
1157 }
1161 return entry;
1162 }
1165 // Generic method entry to (asm) interpreter
1166 //------------------------------------------------------------------------------------------------------------------------
1167 //
1168 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1169 address entry = __ pc();
1171 bool inc_counter = UseCompiler || CountCompiledCalls;
1173 // the following temporary registers are used during frame creation
1174 const Register Gtmp1 = G3_scratch ;
1175 const Register Gtmp2 = G1_scratch;
1177 // make sure registers are different!
1178 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1180 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
1181 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
1182 // Seems like G5_method is live at the point this is used. So we could make this look consistent
1183 // and use in the asserts.
1184 const Address access_flags (Lmethod, methodOopDesc::access_flags_offset());
1186 __ verify_oop(G5_method);
1188 const Register Glocals_size = G3;
1189 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1191 // make sure method is not native & not abstract
1192 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1193 #ifdef ASSERT
1194 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
1195 {
1196 Label L;
1197 __ btst(JVM_ACC_NATIVE, Gtmp1);
1198 __ br(Assembler::zero, false, Assembler::pt, L);
1199 __ delayed()->nop();
1200 __ stop("tried to execute native method as non-native");
1201 __ bind(L);
1202 }
1203 { Label L;
1204 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1205 __ br(Assembler::zero, false, Assembler::pt, L);
1206 __ delayed()->nop();
1207 __ stop("tried to execute abstract method as non-abstract");
1208 __ bind(L);
1209 }
1210 #endif // ASSERT
1212 // generate the code to allocate the interpreter stack frame
1214 generate_fixed_frame(false);
1216 #ifdef FAST_DISPATCH
1217 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
1218 // set bytecode dispatch table base
1219 #endif
1221 //
1222 // Code to initialize the extra (i.e. non-parm) locals
1223 //
1224 Register init_value = noreg; // will be G0 if we must clear locals
1225 // The way the code was setup before zerolocals was always true for vanilla java entries.
1226 // It could only be false for the specialized entries like accessor or empty which have
1227 // no extra locals so the testing was a waste of time and the extra locals were always
1228 // initialized. We removed this extra complication to already over complicated code.
1230 init_value = G0;
1231 Label clear_loop;
1233 // NOTE: If you change the frame layout, this code will need to
1234 // be updated!
1235 __ lduh( size_of_locals, O2 );
1236 __ lduh( size_of_parameters, O1 );
1237 __ sll( O2, Interpreter::logStackElementSize, O2);
1238 __ sll( O1, Interpreter::logStackElementSize, O1 );
1239 __ sub( Llocals, O2, O2 );
1240 __ sub( Llocals, O1, O1 );
1242 __ bind( clear_loop );
1243 __ inc( O2, wordSize );
1245 __ cmp( O2, O1 );
1246 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1247 __ delayed()->st_ptr( init_value, O2, 0 );
1249 const Address do_not_unlock_if_synchronized(G2_thread,
1250 JavaThread::do_not_unlock_if_synchronized_offset());
1251 // Since at this point in the method invocation the exception handler
1252 // would try to exit the monitor of synchronized methods which hasn't
1253 // been entered yet, we set the thread local variable
1254 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1255 // runtime, exception handling i.e. unlock_if_synchronized_method will
1256 // check this thread local flag.
1257 __ movbool(true, G3_scratch);
1258 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1260 // increment invocation counter and check for overflow
1261 //
1262 // Note: checking for negative value instead of overflow
1263 // so we have a 'sticky' overflow test (may be of
1264 // importance as soon as we have true MT/MP)
1265 Label invocation_counter_overflow;
1266 Label profile_method;
1267 Label profile_method_continue;
1268 Label Lcontinue;
1269 if (inc_counter) {
1270 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1271 if (ProfileInterpreter) {
1272 __ bind(profile_method_continue);
1273 }
1274 }
1275 __ bind(Lcontinue);
1277 bang_stack_shadow_pages(false);
1279 // reset the _do_not_unlock_if_synchronized flag
1280 __ stbool(G0, do_not_unlock_if_synchronized);
1282 // check for synchronized methods
1283 // Must happen AFTER invocation_counter check and stack overflow check,
1284 // so method is not locked if overflows.
1286 if (synchronized) {
1287 lock_method();
1288 } else {
1289 #ifdef ASSERT
1290 { Label ok;
1291 __ ld(access_flags, O0);
1292 __ btst(JVM_ACC_SYNCHRONIZED, O0);
1293 __ br( Assembler::zero, false, Assembler::pt, ok);
1294 __ delayed()->nop();
1295 __ stop("method needs synchronization");
1296 __ bind(ok);
1297 }
1298 #endif // ASSERT
1299 }
1301 // start execution
1303 __ verify_thread();
1305 // jvmti support
1306 __ notify_method_entry();
1308 // start executing instructions
1309 __ dispatch_next(vtos);
1312 if (inc_counter) {
1313 if (ProfileInterpreter) {
1314 // We have decided to profile this method in the interpreter
1315 __ bind(profile_method);
1317 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true);
1319 #ifdef ASSERT
1320 __ tst(O0);
1321 __ breakpoint_trap(Assembler::notEqual);
1322 #endif
1324 __ set_method_data_pointer();
1326 __ ba(false, profile_method_continue);
1327 __ delayed()->nop();
1328 }
1330 // handle invocation counter overflow
1331 __ bind(invocation_counter_overflow);
1332 generate_counter_overflow(Lcontinue);
1333 }
1336 return entry;
1337 }
1340 //----------------------------------------------------------------------------------------------------
1341 // Entry points & stack frame layout
1342 //
1343 // Here we generate the various kind of entries into the interpreter.
1344 // The two main entry type are generic bytecode methods and native call method.
1345 // These both come in synchronized and non-synchronized versions but the
1346 // frame layout they create is very similar. The other method entry
1347 // types are really just special purpose entries that are really entry
1348 // and interpretation all in one. These are for trivial methods like
1349 // accessor, empty, or special math methods.
1350 //
1351 // When control flow reaches any of the entry types for the interpreter
1352 // the following holds ->
1353 //
1354 // C2 Calling Conventions:
1355 //
1356 // The entry code below assumes that the following registers are set
1357 // when coming in:
1358 // G5_method: holds the methodOop of the method to call
1359 // Lesp: points to the TOS of the callers expression stack
1360 // after having pushed all the parameters
1361 //
1362 // The entry code does the following to setup an interpreter frame
1363 // pop parameters from the callers stack by adjusting Lesp
1364 // set O0 to Lesp
1365 // compute X = (max_locals - num_parameters)
1366 // bump SP up by X to accomadate the extra locals
1367 // compute X = max_expression_stack
1368 // + vm_local_words
1369 // + 16 words of register save area
1370 // save frame doing a save sp, -X, sp growing towards lower addresses
1371 // set Lbcp, Lmethod, LcpoolCache
1372 // set Llocals to i0
1373 // set Lmonitors to FP - rounded_vm_local_words
1374 // set Lesp to Lmonitors - 4
1375 //
1376 // The frame has now been setup to do the rest of the entry code
1378 // Try this optimization: Most method entries could live in a
1379 // "one size fits all" stack frame without all the dynamic size
1380 // calculations. It might be profitable to do all this calculation
1381 // statically and approximately for "small enough" methods.
1383 //-----------------------------------------------------------------------------------------------
1385 // C1 Calling conventions
1386 //
1387 // Upon method entry, the following registers are setup:
1388 //
1389 // g2 G2_thread: current thread
1390 // g5 G5_method: method to activate
1391 // g4 Gargs : pointer to last argument
1392 //
1393 //
1394 // Stack:
1395 //
1396 // +---------------+ <--- sp
1397 // | |
1398 // : reg save area :
1399 // | |
1400 // +---------------+ <--- sp + 0x40
1401 // | |
1402 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1403 // | |
1404 // +---------------+ <--- sp + 0x5c
1405 // | |
1406 // : free :
1407 // | |
1408 // +---------------+ <--- Gargs
1409 // | |
1410 // : arguments :
1411 // | |
1412 // +---------------+
1413 // | |
1414 //
1415 //
1416 //
1417 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
1418 //
1419 // +---------------+ <--- sp
1420 // | |
1421 // : reg save area :
1422 // | |
1423 // +---------------+ <--- sp + 0x40
1424 // | |
1425 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1426 // | |
1427 // +---------------+ <--- sp + 0x5c
1428 // | |
1429 // : :
1430 // | | <--- Lesp
1431 // +---------------+ <--- Lmonitors (fp - 0x18)
1432 // | VM locals |
1433 // +---------------+ <--- fp
1434 // | |
1435 // : reg save area :
1436 // | |
1437 // +---------------+ <--- fp + 0x40
1438 // | |
1439 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1440 // | |
1441 // +---------------+ <--- fp + 0x5c
1442 // | |
1443 // : free :
1444 // | |
1445 // +---------------+
1446 // | |
1447 // : nonarg locals :
1448 // | |
1449 // +---------------+
1450 // | |
1451 // : arguments :
1452 // | | <--- Llocals
1453 // +---------------+ <--- Gargs
1454 // | |
1456 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
1458 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
1459 // expression stack, the callee will have callee_extra_locals (so we can account for
1460 // frame extension) and monitor_size for monitors. Basically we need to calculate
1461 // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
1462 //
1463 //
1464 // The big complicating thing here is that we must ensure that the stack stays properly
1465 // aligned. This would be even uglier if monitor size wasn't modulo what the stack
1466 // needs to be aligned for). We are given that the sp (fp) is already aligned by
1467 // the caller so we must ensure that it is properly aligned for our callee.
1468 //
1469 const int rounded_vm_local_words =
1470 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1471 // callee_locals and max_stack are counts, not the size in frame.
1472 const int locals_size =
1473 round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
1474 const int max_stack_words = max_stack * Interpreter::stackElementWords;
1475 return (round_to((max_stack_words
1476 //6815692//+ methodOopDesc::extra_stack_words()
1477 + rounded_vm_local_words
1478 + frame::memory_parameter_word_sp_offset), WordsPerLong)
1479 // already rounded
1480 + locals_size + monitor_size);
1481 }
1483 // How much stack a method top interpreter activation needs in words.
1484 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1486 // See call_stub code
1487 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
1488 WordsPerLong); // 7 + register save area
1490 // Save space for one monitor to get into the interpreted method in case
1491 // the method is synchronized
1492 int monitor_size = method->is_synchronized() ?
1493 1*frame::interpreter_frame_monitor_size() : 0;
1494 return size_activation_helper(method->max_locals(), method->max_stack(),
1495 monitor_size) + call_stub_size;
1496 }
1498 int AbstractInterpreter::layout_activation(methodOop method,
1499 int tempcount,
1500 int popframe_extra_args,
1501 int moncount,
1502 int callee_param_count,
1503 int callee_local_count,
1504 frame* caller,
1505 frame* interpreter_frame,
1506 bool is_top_frame) {
1507 // Note: This calculation must exactly parallel the frame setup
1508 // in InterpreterGenerator::generate_fixed_frame.
1509 // If f!=NULL, set up the following variables:
1510 // - Lmethod
1511 // - Llocals
1512 // - Lmonitors (to the indicated number of monitors)
1513 // - Lesp (to the indicated number of temps)
1514 // The frame f (if not NULL) on entry is a description of the caller of the frame
1515 // we are about to layout. We are guaranteed that we will be able to fill in a
1516 // new interpreter frame as its callee (i.e. the stack space is allocated and
1517 // the amount was determined by an earlier call to this method with f == NULL).
1518 // On return f (if not NULL) while describe the interpreter frame we just layed out.
1520 int monitor_size = moncount * frame::interpreter_frame_monitor_size();
1521 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1523 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
1524 //
1525 // Note: if you look closely this appears to be doing something much different
1526 // than generate_fixed_frame. What is happening is this. On sparc we have to do
1527 // this dance with interpreter_sp_adjustment because the window save area would
1528 // appear just below the bottom (tos) of the caller's java expression stack. Because
1529 // the interpreter want to have the locals completely contiguous generate_fixed_frame
1530 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
1531 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
1532 // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
1533 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
1534 // because the oldest frame would have adjust its callers frame and yet that frame
1535 // already exists and isn't part of this array of frames we are unpacking. So at first
1536 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
1537 // will after it calculates all of the frame's on_stack_size()'s will then figure out the
1538 // amount to adjust the caller of the initial (oldest) frame and the calculation will all
1539 // add up. It does seem like it simpler to account for the adjustment here (and remove the
1540 // callee... parameters here). However this would mean that this routine would have to take
1541 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
1542 // and run the calling loop in the reverse order. This would also would appear to mean making
1543 // this code aware of what the interactions are when that initial caller fram was an osr or
1544 // other adapter frame. deoptimization is complicated enough and hard enough to debug that
1545 // there is no sense in messing working code.
1546 //
1548 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
1549 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
1551 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
1552 monitor_size);
1554 if (interpreter_frame != NULL) {
1555 // The skeleton frame must already look like an interpreter frame
1556 // even if not fully filled out.
1557 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
1559 intptr_t* fp = interpreter_frame->fp();
1561 JavaThread* thread = JavaThread::current();
1562 RegisterMap map(thread, false);
1563 // More verification that skeleton frame is properly walkable
1564 assert(fp == caller->sp(), "fp must match");
1566 intptr_t* montop = fp - rounded_vm_local_words;
1568 // preallocate monitors (cf. __ add_monitor_to_stack)
1569 intptr_t* monitors = montop - monitor_size;
1571 // preallocate stack space
1572 intptr_t* esp = monitors - 1 -
1573 (tempcount * Interpreter::stackElementWords) -
1574 popframe_extra_args;
1576 int local_words = method->max_locals() * Interpreter::stackElementWords;
1577 int parm_words = method->size_of_parameters() * Interpreter::stackElementWords;
1578 NEEDS_CLEANUP;
1579 intptr_t* locals;
1580 if (caller->is_interpreted_frame()) {
1581 // Can force the locals area to end up properly overlapping the top of the expression stack.
1582 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
1583 // Note that this computation means we replace size_of_parameters() values from the caller
1584 // interpreter frame's expression stack with our argument locals
1585 locals = Lesp_ptr + parm_words;
1586 int delta = local_words - parm_words;
1587 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
1588 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
1589 } else {
1590 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
1591 // Don't have Lesp available; lay out locals block in the caller
1592 // adjacent to the register window save area.
1593 //
1594 // Compiled frames do not allocate a varargs area which is why this if
1595 // statement is needed.
1596 //
1597 if (caller->is_compiled_frame()) {
1598 locals = fp + frame::register_save_words + local_words - 1;
1599 } else {
1600 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
1601 }
1602 if (!caller->is_entry_frame()) {
1603 // Caller wants his own SP back
1604 int caller_frame_size = caller->cb()->frame_size();
1605 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
1606 }
1607 }
1608 if (TraceDeoptimization) {
1609 if (caller->is_entry_frame()) {
1610 // make sure I5_savedSP and the entry frames notion of saved SP
1611 // agree. This assertion duplicate a check in entry frame code
1612 // but catches the failure earlier.
1613 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
1614 "would change callers SP");
1615 }
1616 if (caller->is_entry_frame()) {
1617 tty->print("entry ");
1618 }
1619 if (caller->is_compiled_frame()) {
1620 tty->print("compiled ");
1621 if (caller->is_deoptimized_frame()) {
1622 tty->print("(deopt) ");
1623 }
1624 }
1625 if (caller->is_interpreted_frame()) {
1626 tty->print("interpreted ");
1627 }
1628 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
1629 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
1630 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
1631 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
1632 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
1633 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
1634 tty->print_cr("Llocals = 0x%x", locals);
1635 tty->print_cr("Lesp = 0x%x", esp);
1636 tty->print_cr("Lmonitors = 0x%x", monitors);
1637 }
1639 if (method->max_locals() > 0) {
1640 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
1641 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
1642 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
1643 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
1644 }
1645 #ifdef _LP64
1646 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
1647 #endif
1649 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
1650 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
1651 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors;
1652 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp;
1653 // Llast_SP will be same as SP as there is no adapter space
1654 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
1655 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
1656 #ifdef FAST_DISPATCH
1657 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
1658 #endif
1661 #ifdef ASSERT
1662 BasicObjectLock* mp = (BasicObjectLock*)monitors;
1664 assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
1665 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
1666 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
1667 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
1668 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
1670 // check bounds
1671 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
1672 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
1673 assert(lo < monitors && montop <= hi, "monitors in bounds");
1674 assert(lo <= esp && esp < monitors, "esp in bounds");
1675 #endif // ASSERT
1676 }
1678 return raw_frame_size;
1679 }
1681 //----------------------------------------------------------------------------------------------------
1682 // Exceptions
1683 void TemplateInterpreterGenerator::generate_throw_exception() {
1685 // Entry point in previous activation (i.e., if the caller was interpreted)
1686 Interpreter::_rethrow_exception_entry = __ pc();
1687 // O0: exception
1689 // entry point for exceptions thrown within interpreter code
1690 Interpreter::_throw_exception_entry = __ pc();
1691 __ verify_thread();
1692 // expression stack is undefined here
1693 // O0: exception, i.e. Oexception
1694 // Lbcp: exception bcx
1695 __ verify_oop(Oexception);
1698 // expression stack must be empty before entering the VM in case of an exception
1699 __ empty_expression_stack();
1700 // find exception handler address and preserve exception oop
1701 // call C routine to find handler and jump to it
1702 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1703 __ push_ptr(O1); // push exception for exception handler bytecodes
1705 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1706 __ delayed()->nop();
1709 // if the exception is not handled in the current frame
1710 // the frame is removed and the exception is rethrown
1711 // (i.e. exception continuation is _rethrow_exception)
1712 //
1713 // Note: At this point the bci is still the bxi for the instruction which caused
1714 // the exception and the expression stack is empty. Thus, for any VM calls
1715 // at this point, GC will find a legal oop map (with empty expression stack).
1717 // in current activation
1718 // tos: exception
1719 // Lbcp: exception bcp
1721 //
1722 // JVMTI PopFrame support
1723 //
1725 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1726 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1727 // Set the popframe_processing bit in popframe_condition indicating that we are
1728 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1729 // popframe handling cycles.
1731 __ ld(popframe_condition_addr, G3_scratch);
1732 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1733 __ stw(G3_scratch, popframe_condition_addr);
1735 // Empty the expression stack, as in normal exception handling
1736 __ empty_expression_stack();
1737 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1739 {
1740 // Check to see whether we are returning to a deoptimized frame.
1741 // (The PopFrame call ensures that the caller of the popped frame is
1742 // either interpreted or compiled and deoptimizes it if compiled.)
1743 // In this case, we can't call dispatch_next() after the frame is
1744 // popped, but instead must save the incoming arguments and restore
1745 // them after deoptimization has occurred.
1746 //
1747 // Note that we don't compare the return PC against the
1748 // deoptimization blob's unpack entry because of the presence of
1749 // adapter frames in C2.
1750 Label caller_not_deoptimized;
1751 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1752 __ tst(O0);
1753 __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized);
1754 __ delayed()->nop();
1756 const Register Gtmp1 = G3_scratch;
1757 const Register Gtmp2 = G1_scratch;
1759 // Compute size of arguments for saving when returning to deoptimized caller
1760 __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
1761 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1762 __ sub(Llocals, Gtmp1, Gtmp2);
1763 __ add(Gtmp2, wordSize, Gtmp2);
1764 // Save these arguments
1765 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1766 // Inform deoptimization that it is responsible for restoring these arguments
1767 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1768 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1769 __ st(Gtmp1, popframe_condition_addr);
1771 // Return from the current method
1772 // The caller's SP was adjusted upon method entry to accomodate
1773 // the callee's non-argument locals. Undo that adjustment.
1774 __ ret();
1775 __ delayed()->restore(I5_savedSP, G0, SP);
1777 __ bind(caller_not_deoptimized);
1778 }
1780 // Clear the popframe condition flag
1781 __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1783 // Get out of the current method (how this is done depends on the particular compiler calling
1784 // convention that the interpreter currently follows)
1785 // The caller's SP was adjusted upon method entry to accomodate
1786 // the callee's non-argument locals. Undo that adjustment.
1787 __ restore(I5_savedSP, G0, SP);
1788 // The method data pointer was incremented already during
1789 // call profiling. We have to restore the mdp for the current bcp.
1790 if (ProfileInterpreter) {
1791 __ set_method_data_pointer_for_bcp();
1792 }
1793 // Resume bytecode interpretation at the current bcp
1794 __ dispatch_next(vtos);
1795 // end of JVMTI PopFrame support
1797 Interpreter::_remove_activation_entry = __ pc();
1799 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1800 __ pop_ptr(Oexception); // get exception
1802 // Intel has the following comment:
1803 //// remove the activation (without doing throws on illegalMonitorExceptions)
1804 // They remove the activation without checking for bad monitor state.
1805 // %%% We should make sure this is the right semantics before implementing.
1807 // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here?
1808 __ set_vm_result(Oexception);
1809 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1811 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1813 __ get_vm_result(Oexception);
1814 __ verify_oop(Oexception);
1816 const int return_reg_adjustment = frame::pc_return_offset;
1817 Address issuing_pc_addr(I7, return_reg_adjustment);
1819 // We are done with this activation frame; find out where to go next.
1820 // The continuation point will be an exception handler, which expects
1821 // the following registers set up:
1822 //
1823 // Oexception: exception
1824 // Oissuing_pc: the local call that threw exception
1825 // Other On: garbage
1826 // In/Ln: the contents of the caller's register window
1827 //
1828 // We do the required restore at the last possible moment, because we
1829 // need to preserve some state across a runtime call.
1830 // (Remember that the caller activation is unknown--it might not be
1831 // interpreted, so things like Lscratch are useless in the caller.)
1833 // Although the Intel version uses call_C, we can use the more
1834 // compact call_VM. (The only real difference on SPARC is a
1835 // harmlessly ignored [re]set_last_Java_frame, compared with
1836 // the Intel code which lacks this.)
1837 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
1838 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
1839 __ super_call_VM_leaf(L7_thread_cache,
1840 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1841 G2_thread, Oissuing_pc->after_save());
1843 // The caller's SP was adjusted upon method entry to accomodate
1844 // the callee's non-argument locals. Undo that adjustment.
1845 __ JMP(O0, 0); // return exception handler in caller
1846 __ delayed()->restore(I5_savedSP, G0, SP);
1848 // (same old exception object is already in Oexception; see above)
1849 // Note that an "issuing PC" is actually the next PC after the call
1850 }
1853 //
1854 // JVMTI ForceEarlyReturn support
1855 //
1857 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1858 address entry = __ pc();
1860 __ empty_expression_stack();
1861 __ load_earlyret_value(state);
1863 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1864 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1866 // Clear the earlyret state
1867 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1869 __ remove_activation(state,
1870 /* throw_monitor_exception */ false,
1871 /* install_monitor_exception */ false);
1873 // The caller's SP was adjusted upon method entry to accomodate
1874 // the callee's non-argument locals. Undo that adjustment.
1875 __ ret(); // return to caller
1876 __ delayed()->restore(I5_savedSP, G0, SP);
1878 return entry;
1879 } // end of JVMTI ForceEarlyReturn support
1882 //------------------------------------------------------------------------------------------------------------------------
1883 // Helper for vtos entry point generation
1885 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1886 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1887 Label L;
1888 aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop();
1889 fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop();
1890 dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop();
1891 lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop();
1892 iep = __ pc(); __ push_i();
1893 bep = cep = sep = iep; // there aren't any
1894 vep = __ pc(); __ bind(L); // fall through
1895 generate_and_dispatch(t);
1896 }
1898 // --------------------------------------------------------------------------------
1901 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1902 : TemplateInterpreterGenerator(code) {
1903 generate_all(); // down here so it can be "virtual"
1904 }
1906 // --------------------------------------------------------------------------------
1908 // Non-product code
1909 #ifndef PRODUCT
1910 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1911 address entry = __ pc();
1913 __ push(state);
1914 __ mov(O7, Lscratch); // protect return address within interpreter
1916 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
1917 __ mov( Otos_l2, G3_scratch );
1918 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
1919 __ mov(Lscratch, O7); // restore return address
1920 __ pop(state);
1921 __ retl();
1922 __ delayed()->nop();
1924 return entry;
1925 }
1928 // helpers for generate_and_dispatch
1930 void TemplateInterpreterGenerator::count_bytecode() {
1931 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1932 }
1935 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1936 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1937 }
1940 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1941 AddressLiteral index (&BytecodePairHistogram::_index);
1942 AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1944 // get index, shift out old bytecode, bring in new bytecode, and store it
1945 // _index = (_index >> log2_number_of_codes) |
1946 // (bytecode << log2_number_of_codes);
1948 __ load_contents(index, G4_scratch);
1949 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1950 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
1951 __ or3( G3_scratch, G4_scratch, G4_scratch );
1952 __ store_contents(G4_scratch, index, G3_scratch);
1954 // bump bucket contents
1955 // _counters[_index] ++;
1957 __ set(counters, G3_scratch); // loads into G3_scratch
1958 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
1959 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
1960 __ ld (G3_scratch, 0, G4_scratch);
1961 __ inc (G4_scratch);
1962 __ st (G4_scratch, 0, G3_scratch);
1963 }
1966 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1967 // Call a little run-time stub to avoid blow-up for each bytecode.
1968 // The run-time runtime saves the right registers, depending on
1969 // the tosca in-state for the given template.
1970 address entry = Interpreter::trace_code(t->tos_in());
1971 guarantee(entry != NULL, "entry must have been generated");
1972 __ call(entry, relocInfo::none);
1973 __ delayed()->nop();
1974 }
1977 void TemplateInterpreterGenerator::stop_interpreter_at() {
1978 AddressLiteral counter(&BytecodeCounter::_counter_value);
1979 __ load_contents(counter, G3_scratch);
1980 AddressLiteral stop_at(&StopInterpreterAt);
1981 __ load_ptr_contents(stop_at, G4_scratch);
1982 __ cmp(G3_scratch, G4_scratch);
1983 __ breakpoint_trap(Assembler::equal);
1984 }
1985 #endif // not PRODUCT
1986 #endif // !CC_INTERP