Thu, 12 May 2011 10:29:02 -0700
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
Reviewed-by: kvn, coleenp
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterGenerator.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/methodDataOop.hpp"
34 #include "oops/methodOop.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "prims/jvmtiThreadState.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/synchronizer.hpp"
44 #include "runtime/timer.hpp"
45 #include "runtime/vframeArray.hpp"
46 #include "utilities/debug.hpp"
48 #ifndef CC_INTERP
49 #ifndef FAST_DISPATCH
50 #define FAST_DISPATCH 1
51 #endif
52 #undef FAST_DISPATCH
55 // Generation of Interpreter
56 //
57 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
60 #define __ _masm->
63 //----------------------------------------------------------------------------------------------------
66 void InterpreterGenerator::save_native_result(void) {
67 // result potentially in O0/O1: save it across calls
68 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
70 // result potentially in F0/F1: save it across calls
71 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
73 // save and restore any potential method result value around the unlocking operation
74 __ stf(FloatRegisterImpl::D, F0, d_tmp);
75 #ifdef _LP64
76 __ stx(O0, l_tmp);
77 #else
78 __ std(O0, l_tmp);
79 #endif
80 }
82 void InterpreterGenerator::restore_native_result(void) {
83 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
84 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
86 // Restore any method result value
87 __ ldf(FloatRegisterImpl::D, d_tmp, F0);
88 #ifdef _LP64
89 __ ldx(l_tmp, O0);
90 #else
91 __ ldd(l_tmp, O0);
92 #endif
93 }
95 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
96 assert(!pass_oop || message == NULL, "either oop or message but not both");
97 address entry = __ pc();
98 // expression stack must be empty before entering the VM if an exception happened
99 __ empty_expression_stack();
100 // load exception object
101 __ set((intptr_t)name, G3_scratch);
102 if (pass_oop) {
103 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
104 } else {
105 __ set((intptr_t)message, G4_scratch);
106 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
107 }
108 // throw exception
109 assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
110 AddressLiteral thrower(Interpreter::throw_exception_entry());
111 __ jump_to(thrower, G3_scratch);
112 __ delayed()->nop();
113 return entry;
114 }
116 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
117 address entry = __ pc();
118 // expression stack must be empty before entering the VM if an exception
119 // happened
120 __ empty_expression_stack();
121 // load exception object
122 __ call_VM(Oexception,
123 CAST_FROM_FN_PTR(address,
124 InterpreterRuntime::throw_ClassCastException),
125 Otos_i);
126 __ should_not_reach_here();
127 return entry;
128 }
131 // Arguments are: required type in G5_method_type, and
132 // failing object (or NULL) in G3_method_handle.
133 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
134 address entry = __ pc();
135 // expression stack must be empty before entering the VM if an exception
136 // happened
137 __ empty_expression_stack();
138 // load exception object
139 __ call_VM(Oexception,
140 CAST_FROM_FN_PTR(address,
141 InterpreterRuntime::throw_WrongMethodTypeException),
142 G5_method_type, // required
143 G3_method_handle); // actual
144 __ should_not_reach_here();
145 return entry;
146 }
149 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
150 address entry = __ pc();
151 // expression stack must be empty before entering the VM if an exception happened
152 __ empty_expression_stack();
153 // convention: expect aberrant index in register G3_scratch, then shuffle the
154 // index to G4_scratch for the VM call
155 __ mov(G3_scratch, G4_scratch);
156 __ set((intptr_t)name, G3_scratch);
157 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
158 __ should_not_reach_here();
159 return entry;
160 }
163 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
164 address entry = __ pc();
165 // expression stack must be empty before entering the VM if an exception happened
166 __ empty_expression_stack();
167 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
168 __ should_not_reach_here();
169 return entry;
170 }
173 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
174 TosState incoming_state = state;
176 Label cont;
177 address compiled_entry = __ pc();
179 address entry = __ pc();
180 #if !defined(_LP64) && defined(COMPILER2)
181 // All return values are where we want them, except for Longs. C2 returns
182 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
183 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
184 // build even if we are returning from interpreted we just do a little
185 // stupid shuffing.
186 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
187 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
188 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
190 if (incoming_state == ltos) {
191 __ srl (G1, 0, O1);
192 __ srlx(G1, 32, O0);
193 }
194 #endif // !_LP64 && COMPILER2
196 __ bind(cont);
198 // The callee returns with the stack possibly adjusted by adapter transition
199 // We remove that possible adjustment here.
200 // All interpreter local registers are untouched. Any result is passed back
201 // in the O0/O1 or float registers. Before continuing, the arguments must be
202 // popped from the java expression stack; i.e., Lesp must be adjusted.
204 __ mov(Llast_SP, SP); // Remove any adapter added stack space.
206 Label L_got_cache, L_giant_index;
207 const Register cache = G3_scratch;
208 const Register size = G1_scratch;
209 if (EnableInvokeDynamic) {
210 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
211 __ cmp(G1_scratch, Bytecodes::_invokedynamic);
212 __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
213 __ delayed()->nop();
214 }
215 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
216 __ bind(L_got_cache);
217 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
218 ConstantPoolCacheEntry::flags_offset(), size);
219 __ and3(size, 0xFF, size); // argument size in words
220 __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
221 __ add(Lesp, size, Lesp); // pop arguments
222 __ dispatch_next(state, step);
224 // out of the main line of code...
225 if (EnableInvokeDynamic) {
226 __ bind(L_giant_index);
227 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
228 __ ba(false, L_got_cache);
229 __ delayed()->nop();
230 }
232 return entry;
233 }
236 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
237 address entry = __ pc();
238 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
239 { Label L;
240 Address exception_addr(G2_thread, Thread::pending_exception_offset());
241 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
242 __ tst(Gtemp);
243 __ brx(Assembler::equal, false, Assembler::pt, L);
244 __ delayed()->nop();
245 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
246 __ should_not_reach_here();
247 __ bind(L);
248 }
249 __ dispatch_next(state, step);
250 return entry;
251 }
253 // A result handler converts/unboxes a native call result into
254 // a java interpreter/compiler result. The current frame is an
255 // interpreter frame. The activation frame unwind code must be
256 // consistent with that of TemplateTable::_return(...). In the
257 // case of native methods, the caller's SP was not modified.
258 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
259 address entry = __ pc();
260 Register Itos_i = Otos_i ->after_save();
261 Register Itos_l = Otos_l ->after_save();
262 Register Itos_l1 = Otos_l1->after_save();
263 Register Itos_l2 = Otos_l2->after_save();
264 switch (type) {
265 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
266 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
267 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
268 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
269 case T_LONG :
270 #ifndef _LP64
271 __ mov(O1, Itos_l2); // move other half of long
272 #endif // ifdef or no ifdef, fall through to the T_INT case
273 case T_INT : __ mov(O0, Itos_i); break;
274 case T_VOID : /* nothing to do */ break;
275 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
276 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
277 case T_OBJECT :
278 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
279 __ verify_oop(Itos_i);
280 break;
281 default : ShouldNotReachHere();
282 }
283 __ ret(); // return from interpreter activation
284 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
285 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly
286 return entry;
287 }
289 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
290 address entry = __ pc();
291 __ push(state);
292 __ call_VM(noreg, runtime_entry);
293 __ dispatch_via(vtos, Interpreter::normal_table(vtos));
294 return entry;
295 }
298 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
299 address entry = __ pc();
300 __ dispatch_next(state);
301 return entry;
302 }
304 //
305 // Helpers for commoning out cases in the various type of method entries.
306 //
308 // increment invocation count & check for overflow
309 //
310 // Note: checking for negative value instead of overflow
311 // so we have a 'sticky' overflow test
312 //
313 // Lmethod: method
314 // ??: invocation counter
315 //
316 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
317 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
318 if (TieredCompilation) {
319 const int increment = InvocationCounter::count_increment;
320 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
321 Label no_mdo, done;
322 if (ProfileInterpreter) {
323 // If no method data exists, go to profile_continue.
324 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
325 __ br_null(G4_scratch, false, Assembler::pn, no_mdo);
326 __ delayed()->nop();
327 // Increment counter
328 Address mdo_invocation_counter(G4_scratch,
329 in_bytes(methodDataOopDesc::invocation_counter_offset()) +
330 in_bytes(InvocationCounter::counter_offset()));
331 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
332 G3_scratch, Lscratch,
333 Assembler::zero, overflow);
334 __ ba(false, done);
335 __ delayed()->nop();
336 }
338 // Increment counter in methodOop
339 __ bind(no_mdo);
340 Address invocation_counter(Lmethod,
341 in_bytes(methodOopDesc::invocation_counter_offset()) +
342 in_bytes(InvocationCounter::counter_offset()));
343 __ increment_mask_and_jump(invocation_counter, increment, mask,
344 G3_scratch, Lscratch,
345 Assembler::zero, overflow);
346 __ bind(done);
347 } else {
348 // Update standard invocation counters
349 __ increment_invocation_counter(O0, G3_scratch);
350 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
351 Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
352 __ ld(interpreter_invocation_counter, G3_scratch);
353 __ inc(G3_scratch);
354 __ st(G3_scratch, interpreter_invocation_counter);
355 }
357 if (ProfileInterpreter && profile_method != NULL) {
358 // Test to see if we should create a method data oop
359 AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
360 __ load_contents(profile_limit, G3_scratch);
361 __ cmp(O0, G3_scratch);
362 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
363 __ delayed()->nop();
365 // if no method data exists, go to profile_method
366 __ test_method_data_pointer(*profile_method);
367 }
369 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
370 __ load_contents(invocation_limit, G3_scratch);
371 __ cmp(O0, G3_scratch);
372 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
373 __ delayed()->nop();
374 }
376 }
378 // Allocate monitor and lock method (asm interpreter)
379 // ebx - methodOop
380 //
381 void InterpreterGenerator::lock_method(void) {
382 __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags.
384 #ifdef ASSERT
385 { Label ok;
386 __ btst(JVM_ACC_SYNCHRONIZED, O0);
387 __ br( Assembler::notZero, false, Assembler::pt, ok);
388 __ delayed()->nop();
389 __ stop("method doesn't need synchronization");
390 __ bind(ok);
391 }
392 #endif // ASSERT
394 // get synchronization object to O0
395 { Label done;
396 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
397 __ btst(JVM_ACC_STATIC, O0);
398 __ br( Assembler::zero, true, Assembler::pt, done);
399 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
401 __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0);
402 __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0);
404 // lock the mirror, not the klassOop
405 __ ld_ptr( O0, mirror_offset, O0);
407 #ifdef ASSERT
408 __ tst(O0);
409 __ breakpoint_trap(Assembler::zero);
410 #endif // ASSERT
412 __ bind(done);
413 }
415 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem
416 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object
417 // __ untested("lock_object from method entry");
418 __ lock_object(Lmonitors, O0);
419 }
422 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
423 Register Rscratch,
424 Register Rscratch2) {
425 const int page_size = os::vm_page_size();
426 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
427 Label after_frame_check;
429 assert_different_registers(Rframe_size, Rscratch, Rscratch2);
431 __ set( page_size, Rscratch );
432 __ cmp( Rframe_size, Rscratch );
434 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
435 __ delayed()->nop();
437 // get the stack base, and in debug, verify it is non-zero
438 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
439 #ifdef ASSERT
440 Label base_not_zero;
441 __ cmp( Rscratch, G0 );
442 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
443 __ delayed()->nop();
444 __ stop("stack base is zero in generate_stack_overflow_check");
445 __ bind(base_not_zero);
446 #endif
448 // get the stack size, and in debug, verify it is non-zero
449 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
450 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
451 #ifdef ASSERT
452 Label size_not_zero;
453 __ cmp( Rscratch2, G0 );
454 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
455 __ delayed()->nop();
456 __ stop("stack size is zero in generate_stack_overflow_check");
457 __ bind(size_not_zero);
458 #endif
460 // compute the beginning of the protected zone minus the requested frame size
461 __ sub( Rscratch, Rscratch2, Rscratch );
462 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
463 __ add( Rscratch, Rscratch2, Rscratch );
465 // Add in the size of the frame (which is the same as subtracting it from the
466 // SP, which would take another register
467 __ add( Rscratch, Rframe_size, Rscratch );
469 // the frame is greater than one page in size, so check against
470 // the bottom of the stack
471 __ cmp( SP, Rscratch );
472 __ brx( Assembler::greater, false, Assembler::pt, after_frame_check );
473 __ delayed()->nop();
475 // Save the return address as the exception pc
476 __ st_ptr(O7, saved_exception_pc);
478 // the stack will overflow, throw an exception
479 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
481 // if you get to here, then there is enough stack space
482 __ bind( after_frame_check );
483 }
486 //
487 // Generate a fixed interpreter frame. This is identical setup for interpreted
488 // methods and for native methods hence the shared code.
490 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
491 //
492 //
493 // The entry code sets up a new interpreter frame in 4 steps:
494 //
495 // 1) Increase caller's SP by for the extra local space needed:
496 // (check for overflow)
497 // Efficient implementation of xload/xstore bytecodes requires
498 // that arguments and non-argument locals are in a contigously
499 // addressable memory block => non-argument locals must be
500 // allocated in the caller's frame.
501 //
502 // 2) Create a new stack frame and register window:
503 // The new stack frame must provide space for the standard
504 // register save area, the maximum java expression stack size,
505 // the monitor slots (0 slots initially), and some frame local
506 // scratch locations.
507 //
508 // 3) The following interpreter activation registers must be setup:
509 // Lesp : expression stack pointer
510 // Lbcp : bytecode pointer
511 // Lmethod : method
512 // Llocals : locals pointer
513 // Lmonitors : monitor pointer
514 // LcpoolCache: constant pool cache
515 //
516 // 4) Initialize the non-argument locals if necessary:
517 // Non-argument locals may need to be initialized to NULL
518 // for GC to work. If the oop-map information is accurate
519 // (in the absence of the JSR problem), no initialization
520 // is necessary.
521 //
522 // (gri - 2/25/2000)
525 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
526 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
527 const Address max_stack (G5_method, methodOopDesc::max_stack_offset());
528 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
530 const int extra_space =
531 rounded_vm_local_words + // frame local scratch space
532 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters
533 frame::memory_parameter_word_sp_offset + // register save area
534 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
536 const Register Glocals_size = G3;
537 const Register Otmp1 = O3;
538 const Register Otmp2 = O4;
539 // Lscratch can't be used as a temporary because the call_stub uses
540 // it to assert that the stack frame was setup correctly.
542 __ lduh( size_of_parameters, Glocals_size);
544 // Gargs points to first local + BytesPerWord
545 // Set the saved SP after the register window save
546 //
547 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
548 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
549 __ add(Gargs, Otmp1, Gargs);
551 if (native_call) {
552 __ calc_mem_param_words( Glocals_size, Gframe_size );
553 __ add( Gframe_size, extra_space, Gframe_size);
554 __ round_to( Gframe_size, WordsPerLong );
555 __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
556 } else {
558 //
559 // Compute number of locals in method apart from incoming parameters
560 //
561 __ lduh( size_of_locals, Otmp1 );
562 __ sub( Otmp1, Glocals_size, Glocals_size );
563 __ round_to( Glocals_size, WordsPerLong );
564 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
566 // see if the frame is greater than one page in size. If so,
567 // then we need to verify there is enough stack space remaining
568 // Frame_size = (max_stack + extra_space) * BytesPerWord;
569 __ lduh( max_stack, Gframe_size );
570 __ add( Gframe_size, extra_space, Gframe_size );
571 __ round_to( Gframe_size, WordsPerLong );
572 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
574 // Add in java locals size for stack overflow check only
575 __ add( Gframe_size, Glocals_size, Gframe_size );
577 const Register Otmp2 = O4;
578 assert_different_registers(Otmp1, Otmp2, O5_savedSP);
579 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
581 __ sub( Gframe_size, Glocals_size, Gframe_size);
583 //
584 // bump SP to accomodate the extra locals
585 //
586 __ sub( SP, Glocals_size, SP );
587 }
589 //
590 // now set up a stack frame with the size computed above
591 //
592 __ neg( Gframe_size );
593 __ save( SP, Gframe_size, SP );
595 //
596 // now set up all the local cache registers
597 //
598 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
599 // that all present references to Lbyte_code initialize the register
600 // immediately before use
601 if (native_call) {
602 __ mov(G0, Lbcp);
603 } else {
604 __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
605 __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
606 }
607 __ mov( G5_method, Lmethod); // set Lmethod
608 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
609 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
610 #ifdef _LP64
611 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
612 #endif
613 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
615 // setup interpreter activation registers
616 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals
618 if (ProfileInterpreter) {
619 #ifdef FAST_DISPATCH
620 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
621 // they both use I2.
622 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
623 #endif // FAST_DISPATCH
624 __ set_method_data_pointer();
625 }
627 }
629 // Empty method, generate a very fast return.
631 address InterpreterGenerator::generate_empty_entry(void) {
633 // A method that does nother but return...
635 address entry = __ pc();
636 Label slow_path;
638 __ verify_oop(G5_method);
640 // do nothing for empty methods (do not even increment invocation counter)
641 if ( UseFastEmptyMethods) {
642 // If we need a safepoint check, generate full interpreter entry.
643 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
644 __ set(sync_state, G3_scratch);
645 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
646 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
647 __ delayed()->nop();
649 // Code: _return
650 __ retl();
651 __ delayed()->mov(O5_savedSP, SP);
653 __ bind(slow_path);
654 (void) generate_normal_entry(false);
656 return entry;
657 }
658 return NULL;
659 }
661 // Call an accessor method (assuming it is resolved, otherwise drop into
662 // vanilla (slow path) entry
664 // Generates code to elide accessor methods
665 // Uses G3_scratch and G1_scratch as scratch
666 address InterpreterGenerator::generate_accessor_entry(void) {
668 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
669 // parameter size = 1
670 // Note: We can only use this code if the getfield has been resolved
671 // and if we don't have a null-pointer exception => check for
672 // these conditions first and use slow path if necessary.
673 address entry = __ pc();
674 Label slow_path;
677 // XXX: for compressed oops pointer loading and decoding doesn't fit in
678 // delay slot and damages G1
679 if ( UseFastAccessorMethods && !UseCompressedOops ) {
680 // Check if we need to reach a safepoint and generate full interpreter
681 // frame if so.
682 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
683 __ load_contents(sync_state, G3_scratch);
684 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
685 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
686 __ delayed()->nop();
688 // Check if local 0 != NULL
689 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
690 __ tst(Otos_i); // check if local 0 == NULL and go the slow path
691 __ brx(Assembler::zero, false, Assembler::pn, slow_path);
692 __ delayed()->nop();
695 // read first instruction word and extract bytecode @ 1 and index @ 2
696 // get first 4 bytes of the bytecodes (big endian!)
697 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
698 __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
700 // move index @ 2 far left then to the right most two bytes.
701 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
702 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
703 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
705 // get constant pool cache
706 __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
707 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
709 // get specific constant pool cache entry
710 __ add(G3_scratch, G1_scratch, G3_scratch);
712 // Check the constant Pool cache entry to see if it has been resolved.
713 // If not, need the slow path.
714 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
715 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
716 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
717 __ and3(G1_scratch, 0xFF, G1_scratch);
718 __ cmp(G1_scratch, Bytecodes::_getfield);
719 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
720 __ delayed()->nop();
722 // Get the type and return field offset from the constant pool cache
723 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
724 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
726 Label xreturn_path;
727 // Need to differentiate between igetfield, agetfield, bgetfield etc.
728 // because they are different sizes.
729 // Get the type from the constant pool cache
730 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
731 // Make sure we don't need to mask G1_scratch for tosBits after the above shift
732 ConstantPoolCacheEntry::verify_tosBits();
733 __ cmp(G1_scratch, atos );
734 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
735 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
736 __ cmp(G1_scratch, itos);
737 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
738 __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
739 __ cmp(G1_scratch, stos);
740 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
741 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
742 __ cmp(G1_scratch, ctos);
743 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
744 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
745 #ifdef ASSERT
746 __ cmp(G1_scratch, btos);
747 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
748 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
749 __ should_not_reach_here();
750 #endif
751 __ ldsb(Otos_i, G3_scratch, Otos_i);
752 __ bind(xreturn_path);
754 // _ireturn/_areturn
755 __ retl(); // return from leaf routine
756 __ delayed()->mov(O5_savedSP, SP);
758 // Generate regular method entry
759 __ bind(slow_path);
760 (void) generate_normal_entry(false);
761 return entry;
762 }
763 return NULL;
764 }
766 // Method entry for java.lang.ref.Reference.get.
767 address InterpreterGenerator::generate_Reference_get_entry(void) {
768 #ifndef SERIALGC
769 // Code: _aload_0, _getfield, _areturn
770 // parameter size = 1
771 //
772 // The code that gets generated by this routine is split into 2 parts:
773 // 1. The "intrinsified" code for G1 (or any SATB based GC),
774 // 2. The slow path - which is an expansion of the regular method entry.
775 //
776 // Notes:-
777 // * In the G1 code we do not check whether we need to block for
778 // a safepoint. If G1 is enabled then we must execute the specialized
779 // code for Reference.get (except when the Reference object is null)
780 // so that we can log the value in the referent field with an SATB
781 // update buffer.
782 // If the code for the getfield template is modified so that the
783 // G1 pre-barrier code is executed when the current method is
784 // Reference.get() then going through the normal method entry
785 // will be fine.
786 // * The G1 code can, however, check the receiver object (the instance
787 // of java.lang.Reference) and jump to the slow path if null. If the
788 // Reference object is null then we obviously cannot fetch the referent
789 // and so we don't need to call the G1 pre-barrier. Thus we can use the
790 // regular method entry code to generate the NPE.
791 //
792 // This code is based on generate_accessor_enty.
794 address entry = __ pc();
796 const int referent_offset = java_lang_ref_Reference::referent_offset;
797 guarantee(referent_offset > 0, "referent offset not initialized");
799 if (UseG1GC) {
800 Label slow_path;
802 // In the G1 code we don't check if we need to reach a safepoint. We
803 // continue and the thread will safepoint at the next bytecode dispatch.
805 // Check if local 0 != NULL
806 // If the receiver is null then it is OK to jump to the slow path.
807 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
808 __ tst(Otos_i); // check if local 0 == NULL and go the slow path
809 __ brx(Assembler::zero, false, Assembler::pn, slow_path);
810 __ delayed()->nop();
813 // Load the value of the referent field.
814 if (Assembler::is_simm13(referent_offset)) {
815 __ load_heap_oop(Otos_i, referent_offset, Otos_i);
816 } else {
817 __ set(referent_offset, G3_scratch);
818 __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
819 }
821 // Generate the G1 pre-barrier code to log the value of
822 // the referent field in an SATB buffer. Note with
823 // these parameters the pre-barrier does not generate
824 // the load of the previous value
826 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
827 Otos_i /* pre_val */,
828 G3_scratch /* tmp */,
829 true /* preserve_o_regs */);
831 // _areturn
832 __ retl(); // return from leaf routine
833 __ delayed()->mov(O5_savedSP, SP);
835 // Generate regular method entry
836 __ bind(slow_path);
837 (void) generate_normal_entry(false);
838 return entry;
839 }
840 #endif // SERIALGC
842 // If G1 is not enabled then attempt to go through the accessor entry point
843 // Reference.get is an accessor
844 return generate_accessor_entry();
845 }
847 //
848 // Interpreter stub for calling a native method. (asm interpreter)
849 // This sets up a somewhat different looking stack for calling the native method
850 // than the typical interpreter frame setup.
851 //
853 address InterpreterGenerator::generate_native_entry(bool synchronized) {
854 address entry = __ pc();
856 // the following temporary registers are used during frame creation
857 const Register Gtmp1 = G3_scratch ;
858 const Register Gtmp2 = G1_scratch;
859 bool inc_counter = UseCompiler || CountCompiledCalls;
861 // make sure registers are different!
862 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
864 const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
866 __ verify_oop(G5_method);
868 const Register Glocals_size = G3;
869 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
871 // make sure method is native & not abstract
872 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
873 #ifdef ASSERT
874 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
875 {
876 Label L;
877 __ btst(JVM_ACC_NATIVE, Gtmp1);
878 __ br(Assembler::notZero, false, Assembler::pt, L);
879 __ delayed()->nop();
880 __ stop("tried to execute non-native method as native");
881 __ bind(L);
882 }
883 { Label L;
884 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
885 __ br(Assembler::zero, false, Assembler::pt, L);
886 __ delayed()->nop();
887 __ stop("tried to execute abstract method as non-abstract");
888 __ bind(L);
889 }
890 #endif // ASSERT
892 // generate the code to allocate the interpreter stack frame
893 generate_fixed_frame(true);
895 //
896 // No locals to initialize for native method
897 //
899 // this slot will be set later, we initialize it to null here just in
900 // case we get a GC before the actual value is stored later
901 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
903 const Address do_not_unlock_if_synchronized(G2_thread,
904 JavaThread::do_not_unlock_if_synchronized_offset());
905 // Since at this point in the method invocation the exception handler
906 // would try to exit the monitor of synchronized methods which hasn't
907 // been entered yet, we set the thread local variable
908 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
909 // runtime, exception handling i.e. unlock_if_synchronized_method will
910 // check this thread local flag.
911 // This flag has two effects, one is to force an unwind in the topmost
912 // interpreter frame and not perform an unlock while doing so.
914 __ movbool(true, G3_scratch);
915 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
917 // increment invocation counter and check for overflow
918 //
919 // Note: checking for negative value instead of overflow
920 // so we have a 'sticky' overflow test (may be of
921 // importance as soon as we have true MT/MP)
922 Label invocation_counter_overflow;
923 Label Lcontinue;
924 if (inc_counter) {
925 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
927 }
928 __ bind(Lcontinue);
930 bang_stack_shadow_pages(true);
932 // reset the _do_not_unlock_if_synchronized flag
933 __ stbool(G0, do_not_unlock_if_synchronized);
935 // check for synchronized methods
936 // Must happen AFTER invocation_counter check and stack overflow check,
937 // so method is not locked if overflows.
939 if (synchronized) {
940 lock_method();
941 } else {
942 #ifdef ASSERT
943 { Label ok;
944 __ ld(Laccess_flags, O0);
945 __ btst(JVM_ACC_SYNCHRONIZED, O0);
946 __ br( Assembler::zero, false, Assembler::pt, ok);
947 __ delayed()->nop();
948 __ stop("method needs synchronization");
949 __ bind(ok);
950 }
951 #endif // ASSERT
952 }
955 // start execution
956 __ verify_thread();
958 // JVMTI support
959 __ notify_method_entry();
961 // native call
963 // (note that O0 is never an oop--at most it is a handle)
964 // It is important not to smash any handles created by this call,
965 // until any oop handle in O0 is dereferenced.
967 // (note that the space for outgoing params is preallocated)
969 // get signature handler
970 { Label L;
971 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
972 __ ld_ptr(signature_handler, G3_scratch);
973 __ tst(G3_scratch);
974 __ brx(Assembler::notZero, false, Assembler::pt, L);
975 __ delayed()->nop();
976 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
977 __ ld_ptr(signature_handler, G3_scratch);
978 __ bind(L);
979 }
981 // Push a new frame so that the args will really be stored in
982 // Copy a few locals across so the new frame has the variables
983 // we need but these values will be dead at the jni call and
984 // therefore not gc volatile like the values in the current
985 // frame (Lmethod in particular)
987 // Flush the method pointer to the register save area
988 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
989 __ mov(Llocals, O1);
991 // calculate where the mirror handle body is allocated in the interpreter frame:
992 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
994 // Calculate current frame size
995 __ sub(SP, FP, O3); // Calculate negative of current frame size
996 __ save(SP, O3, SP); // Allocate an identical sized frame
998 // Note I7 has leftover trash. Slow signature handler will fill it in
999 // should we get there. Normal jni call will set reasonable last_Java_pc
1000 // below (and fix I7 so the stack trace doesn't have a meaningless frame
1001 // in it).
1003 // Load interpreter frame's Lmethod into same register here
1005 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1007 __ mov(I1, Llocals);
1008 __ mov(I2, Lscratch2); // save the address of the mirror
1011 // ONLY Lmethod and Llocals are valid here!
1013 // call signature handler, It will move the arg properly since Llocals in current frame
1014 // matches that in outer frame
1016 __ callr(G3_scratch, 0);
1017 __ delayed()->nop();
1019 // Result handler is in Lscratch
1021 // Reload interpreter frame's Lmethod since slow signature handler may block
1022 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1024 { Label not_static;
1026 __ ld(Laccess_flags, O0);
1027 __ btst(JVM_ACC_STATIC, O0);
1028 __ br( Assembler::zero, false, Assembler::pt, not_static);
1029 // get native function entry point(O0 is a good temp until the very end)
1030 __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
1031 // for static methods insert the mirror argument
1032 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
1034 __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
1035 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
1036 __ ld_ptr(O1, mirror_offset, O1);
1037 #ifdef ASSERT
1038 if (!PrintSignatureHandlers) // do not dirty the output with this
1039 { Label L;
1040 __ tst(O1);
1041 __ brx(Assembler::notZero, false, Assembler::pt, L);
1042 __ delayed()->nop();
1043 __ stop("mirror is missing");
1044 __ bind(L);
1045 }
1046 #endif // ASSERT
1047 __ st_ptr(O1, Lscratch2, 0);
1048 __ mov(Lscratch2, O1);
1049 __ bind(not_static);
1050 }
1052 // At this point, arguments have been copied off of stack into
1053 // their JNI positions, which are O1..O5 and SP[68..].
1054 // Oops are boxed in-place on the stack, with handles copied to arguments.
1055 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
1057 #ifdef ASSERT
1058 { Label L;
1059 __ tst(O0);
1060 __ brx(Assembler::notZero, false, Assembler::pt, L);
1061 __ delayed()->nop();
1062 __ stop("native entry point is missing");
1063 __ bind(L);
1064 }
1065 #endif // ASSERT
1067 //
1068 // setup the frame anchor
1069 //
1070 // The scavenge function only needs to know that the PC of this frame is
1071 // in the interpreter method entry code, it doesn't need to know the exact
1072 // PC and hence we can use O7 which points to the return address from the
1073 // previous call in the code stream (signature handler function)
1074 //
1075 // The other trick is we set last_Java_sp to FP instead of the usual SP because
1076 // we have pushed the extra frame in order to protect the volatile register(s)
1077 // in that frame when we return from the jni call
1078 //
1080 __ set_last_Java_frame(FP, O7);
1081 __ mov(O7, I7); // make dummy interpreter frame look like one above,
1082 // not meaningless information that'll confuse me.
1084 // flush the windows now. We don't care about the current (protection) frame
1085 // only the outer frames
1087 __ flush_windows();
1089 // mark windows as flushed
1090 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1091 __ set(JavaFrameAnchor::flushed, G3_scratch);
1092 __ st(G3_scratch, flags);
1094 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
1096 Address thread_state(G2_thread, JavaThread::thread_state_offset());
1097 #ifdef ASSERT
1098 { Label L;
1099 __ ld(thread_state, G3_scratch);
1100 __ cmp(G3_scratch, _thread_in_Java);
1101 __ br(Assembler::equal, false, Assembler::pt, L);
1102 __ delayed()->nop();
1103 __ stop("Wrong thread state in native stub");
1104 __ bind(L);
1105 }
1106 #endif // ASSERT
1107 __ set(_thread_in_native, G3_scratch);
1108 __ st(G3_scratch, thread_state);
1110 // Call the jni method, using the delay slot to set the JNIEnv* argument.
1111 __ save_thread(L7_thread_cache); // save Gthread
1112 __ callr(O0, 0);
1113 __ delayed()->
1114 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1116 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1118 __ restore_thread(L7_thread_cache); // restore G2_thread
1119 __ reinit_heapbase();
1121 // must we block?
1123 // Block, if necessary, before resuming in _thread_in_Java state.
1124 // In order for GC to work, don't clear the last_Java_sp until after blocking.
1125 { Label no_block;
1126 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
1128 // Switch thread to "native transition" state before reading the synchronization state.
1129 // This additional state is necessary because reading and testing the synchronization
1130 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1131 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1132 // VM thread changes sync state to synchronizing and suspends threads for GC.
1133 // Thread A is resumed to finish this native method, but doesn't block here since it
1134 // didn't see any synchronization is progress, and escapes.
1135 __ set(_thread_in_native_trans, G3_scratch);
1136 __ st(G3_scratch, thread_state);
1137 if(os::is_MP()) {
1138 if (UseMembar) {
1139 // Force this write out before the read below
1140 __ membar(Assembler::StoreLoad);
1141 } else {
1142 // Write serialization page so VM thread can do a pseudo remote membar.
1143 // We use the current thread pointer to calculate a thread specific
1144 // offset to write to within the page. This minimizes bus traffic
1145 // due to cache line collision.
1146 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1147 }
1148 }
1149 __ load_contents(sync_state, G3_scratch);
1150 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1152 Label L;
1153 __ br(Assembler::notEqual, false, Assembler::pn, L);
1154 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1155 __ cmp(G3_scratch, 0);
1156 __ br(Assembler::equal, false, Assembler::pt, no_block);
1157 __ delayed()->nop();
1158 __ bind(L);
1160 // Block. Save any potential method result value before the operation and
1161 // use a leaf call to leave the last_Java_frame setup undisturbed.
1162 save_native_result();
1163 __ call_VM_leaf(L7_thread_cache,
1164 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1165 G2_thread);
1167 // Restore any method result value
1168 restore_native_result();
1169 __ bind(no_block);
1170 }
1172 // Clear the frame anchor now
1174 __ reset_last_Java_frame();
1176 // Move the result handler address
1177 __ mov(Lscratch, G3_scratch);
1178 // return possible result to the outer frame
1179 #ifndef __LP64
1180 __ mov(O0, I0);
1181 __ restore(O1, G0, O1);
1182 #else
1183 __ restore(O0, G0, O0);
1184 #endif /* __LP64 */
1186 // Move result handler to expected register
1187 __ mov(G3_scratch, Lscratch);
1189 // Back in normal (native) interpreter frame. State is thread_in_native_trans
1190 // switch to thread_in_Java.
1192 __ set(_thread_in_Java, G3_scratch);
1193 __ st(G3_scratch, thread_state);
1195 // reset handle block
1196 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1197 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1199 // If we have an oop result store it where it will be safe for any further gc
1200 // until we return now that we've released the handle it might be protected by
1202 {
1203 Label no_oop, store_result;
1205 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1206 __ cmp(G3_scratch, Lscratch);
1207 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
1208 __ delayed()->nop();
1209 __ addcc(G0, O0, O0);
1210 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
1211 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
1212 __ mov(G0, O0);
1214 __ bind(store_result);
1215 // Store it where gc will look for it and result handler expects it.
1216 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1218 __ bind(no_oop);
1220 }
1223 // handle exceptions (exception handling will handle unlocking!)
1224 { Label L;
1225 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1226 __ ld_ptr(exception_addr, Gtemp);
1227 __ tst(Gtemp);
1228 __ brx(Assembler::equal, false, Assembler::pt, L);
1229 __ delayed()->nop();
1230 // Note: This could be handled more efficiently since we know that the native
1231 // method doesn't have an exception handler. We could directly return
1232 // to the exception handler for the caller.
1233 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1234 __ should_not_reach_here();
1235 __ bind(L);
1236 }
1238 // JVMTI support (preserves thread register)
1239 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1241 if (synchronized) {
1242 // save and restore any potential method result value around the unlocking operation
1243 save_native_result();
1245 __ add( __ top_most_monitor(), O1);
1246 __ unlock_object(O1);
1248 restore_native_result();
1249 }
1251 #if defined(COMPILER2) && !defined(_LP64)
1253 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1254 // or compiled so just be safe.
1256 __ sllx(O0, 32, G1); // Shift bits into high G1
1257 __ srl (O1, 0, O1); // Zero extend O1
1258 __ or3 (O1, G1, G1); // OR 64 bits into G1
1260 #endif /* COMPILER2 && !_LP64 */
1262 // dispose of return address and remove activation
1263 #ifdef ASSERT
1264 {
1265 Label ok;
1266 __ cmp(I5_savedSP, FP);
1267 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
1268 __ delayed()->nop();
1269 __ stop("bad I5_savedSP value");
1270 __ should_not_reach_here();
1271 __ bind(ok);
1272 }
1273 #endif
1274 if (TraceJumps) {
1275 // Move target to register that is recordable
1276 __ mov(Lscratch, G3_scratch);
1277 __ JMP(G3_scratch, 0);
1278 } else {
1279 __ jmp(Lscratch, 0);
1280 }
1281 __ delayed()->nop();
1284 if (inc_counter) {
1285 // handle invocation counter overflow
1286 __ bind(invocation_counter_overflow);
1287 generate_counter_overflow(Lcontinue);
1288 }
1292 return entry;
1293 }
1296 // Generic method entry to (asm) interpreter
1297 //------------------------------------------------------------------------------------------------------------------------
1298 //
1299 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1300 address entry = __ pc();
1302 bool inc_counter = UseCompiler || CountCompiledCalls;
1304 // the following temporary registers are used during frame creation
1305 const Register Gtmp1 = G3_scratch ;
1306 const Register Gtmp2 = G1_scratch;
1308 // make sure registers are different!
1309 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1311 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
1312 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
1313 // Seems like G5_method is live at the point this is used. So we could make this look consistent
1314 // and use in the asserts.
1315 const Address access_flags (Lmethod, methodOopDesc::access_flags_offset());
1317 __ verify_oop(G5_method);
1319 const Register Glocals_size = G3;
1320 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1322 // make sure method is not native & not abstract
1323 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1324 #ifdef ASSERT
1325 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
1326 {
1327 Label L;
1328 __ btst(JVM_ACC_NATIVE, Gtmp1);
1329 __ br(Assembler::zero, false, Assembler::pt, L);
1330 __ delayed()->nop();
1331 __ stop("tried to execute native method as non-native");
1332 __ bind(L);
1333 }
1334 { Label L;
1335 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1336 __ br(Assembler::zero, false, Assembler::pt, L);
1337 __ delayed()->nop();
1338 __ stop("tried to execute abstract method as non-abstract");
1339 __ bind(L);
1340 }
1341 #endif // ASSERT
1343 // generate the code to allocate the interpreter stack frame
1345 generate_fixed_frame(false);
1347 #ifdef FAST_DISPATCH
1348 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
1349 // set bytecode dispatch table base
1350 #endif
1352 //
1353 // Code to initialize the extra (i.e. non-parm) locals
1354 //
1355 Register init_value = noreg; // will be G0 if we must clear locals
1356 // The way the code was setup before zerolocals was always true for vanilla java entries.
1357 // It could only be false for the specialized entries like accessor or empty which have
1358 // no extra locals so the testing was a waste of time and the extra locals were always
1359 // initialized. We removed this extra complication to already over complicated code.
1361 init_value = G0;
1362 Label clear_loop;
1364 // NOTE: If you change the frame layout, this code will need to
1365 // be updated!
1366 __ lduh( size_of_locals, O2 );
1367 __ lduh( size_of_parameters, O1 );
1368 __ sll( O2, Interpreter::logStackElementSize, O2);
1369 __ sll( O1, Interpreter::logStackElementSize, O1 );
1370 __ sub( Llocals, O2, O2 );
1371 __ sub( Llocals, O1, O1 );
1373 __ bind( clear_loop );
1374 __ inc( O2, wordSize );
1376 __ cmp( O2, O1 );
1377 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1378 __ delayed()->st_ptr( init_value, O2, 0 );
1380 const Address do_not_unlock_if_synchronized(G2_thread,
1381 JavaThread::do_not_unlock_if_synchronized_offset());
1382 // Since at this point in the method invocation the exception handler
1383 // would try to exit the monitor of synchronized methods which hasn't
1384 // been entered yet, we set the thread local variable
1385 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1386 // runtime, exception handling i.e. unlock_if_synchronized_method will
1387 // check this thread local flag.
1388 __ movbool(true, G3_scratch);
1389 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1391 // increment invocation counter and check for overflow
1392 //
1393 // Note: checking for negative value instead of overflow
1394 // so we have a 'sticky' overflow test (may be of
1395 // importance as soon as we have true MT/MP)
1396 Label invocation_counter_overflow;
1397 Label profile_method;
1398 Label profile_method_continue;
1399 Label Lcontinue;
1400 if (inc_counter) {
1401 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1402 if (ProfileInterpreter) {
1403 __ bind(profile_method_continue);
1404 }
1405 }
1406 __ bind(Lcontinue);
1408 bang_stack_shadow_pages(false);
1410 // reset the _do_not_unlock_if_synchronized flag
1411 __ stbool(G0, do_not_unlock_if_synchronized);
1413 // check for synchronized methods
1414 // Must happen AFTER invocation_counter check and stack overflow check,
1415 // so method is not locked if overflows.
1417 if (synchronized) {
1418 lock_method();
1419 } else {
1420 #ifdef ASSERT
1421 { Label ok;
1422 __ ld(access_flags, O0);
1423 __ btst(JVM_ACC_SYNCHRONIZED, O0);
1424 __ br( Assembler::zero, false, Assembler::pt, ok);
1425 __ delayed()->nop();
1426 __ stop("method needs synchronization");
1427 __ bind(ok);
1428 }
1429 #endif // ASSERT
1430 }
1432 // start execution
1434 __ verify_thread();
1436 // jvmti support
1437 __ notify_method_entry();
1439 // start executing instructions
1440 __ dispatch_next(vtos);
1443 if (inc_counter) {
1444 if (ProfileInterpreter) {
1445 // We have decided to profile this method in the interpreter
1446 __ bind(profile_method);
1448 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1449 __ set_method_data_pointer_for_bcp();
1450 __ ba(false, profile_method_continue);
1451 __ delayed()->nop();
1452 }
1454 // handle invocation counter overflow
1455 __ bind(invocation_counter_overflow);
1456 generate_counter_overflow(Lcontinue);
1457 }
1460 return entry;
1461 }
1464 //----------------------------------------------------------------------------------------------------
1465 // Entry points & stack frame layout
1466 //
1467 // Here we generate the various kind of entries into the interpreter.
1468 // The two main entry type are generic bytecode methods and native call method.
1469 // These both come in synchronized and non-synchronized versions but the
1470 // frame layout they create is very similar. The other method entry
1471 // types are really just special purpose entries that are really entry
1472 // and interpretation all in one. These are for trivial methods like
1473 // accessor, empty, or special math methods.
1474 //
1475 // When control flow reaches any of the entry types for the interpreter
1476 // the following holds ->
1477 //
1478 // C2 Calling Conventions:
1479 //
1480 // The entry code below assumes that the following registers are set
1481 // when coming in:
1482 // G5_method: holds the methodOop of the method to call
1483 // Lesp: points to the TOS of the callers expression stack
1484 // after having pushed all the parameters
1485 //
1486 // The entry code does the following to setup an interpreter frame
1487 // pop parameters from the callers stack by adjusting Lesp
1488 // set O0 to Lesp
1489 // compute X = (max_locals - num_parameters)
1490 // bump SP up by X to accomadate the extra locals
1491 // compute X = max_expression_stack
1492 // + vm_local_words
1493 // + 16 words of register save area
1494 // save frame doing a save sp, -X, sp growing towards lower addresses
1495 // set Lbcp, Lmethod, LcpoolCache
1496 // set Llocals to i0
1497 // set Lmonitors to FP - rounded_vm_local_words
1498 // set Lesp to Lmonitors - 4
1499 //
1500 // The frame has now been setup to do the rest of the entry code
1502 // Try this optimization: Most method entries could live in a
1503 // "one size fits all" stack frame without all the dynamic size
1504 // calculations. It might be profitable to do all this calculation
1505 // statically and approximately for "small enough" methods.
1507 //-----------------------------------------------------------------------------------------------
1509 // C1 Calling conventions
1510 //
1511 // Upon method entry, the following registers are setup:
1512 //
1513 // g2 G2_thread: current thread
1514 // g5 G5_method: method to activate
1515 // g4 Gargs : pointer to last argument
1516 //
1517 //
1518 // Stack:
1519 //
1520 // +---------------+ <--- sp
1521 // | |
1522 // : reg save area :
1523 // | |
1524 // +---------------+ <--- sp + 0x40
1525 // | |
1526 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1527 // | |
1528 // +---------------+ <--- sp + 0x5c
1529 // | |
1530 // : free :
1531 // | |
1532 // +---------------+ <--- Gargs
1533 // | |
1534 // : arguments :
1535 // | |
1536 // +---------------+
1537 // | |
1538 //
1539 //
1540 //
1541 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
1542 //
1543 // +---------------+ <--- sp
1544 // | |
1545 // : reg save area :
1546 // | |
1547 // +---------------+ <--- sp + 0x40
1548 // | |
1549 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1550 // | |
1551 // +---------------+ <--- sp + 0x5c
1552 // | |
1553 // : :
1554 // | | <--- Lesp
1555 // +---------------+ <--- Lmonitors (fp - 0x18)
1556 // | VM locals |
1557 // +---------------+ <--- fp
1558 // | |
1559 // : reg save area :
1560 // | |
1561 // +---------------+ <--- fp + 0x40
1562 // | |
1563 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
1564 // | |
1565 // +---------------+ <--- fp + 0x5c
1566 // | |
1567 // : free :
1568 // | |
1569 // +---------------+
1570 // | |
1571 // : nonarg locals :
1572 // | |
1573 // +---------------+
1574 // | |
1575 // : arguments :
1576 // | | <--- Llocals
1577 // +---------------+ <--- Gargs
1578 // | |
1580 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
1582 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
1583 // expression stack, the callee will have callee_extra_locals (so we can account for
1584 // frame extension) and monitor_size for monitors. Basically we need to calculate
1585 // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
1586 //
1587 //
1588 // The big complicating thing here is that we must ensure that the stack stays properly
1589 // aligned. This would be even uglier if monitor size wasn't modulo what the stack
1590 // needs to be aligned for). We are given that the sp (fp) is already aligned by
1591 // the caller so we must ensure that it is properly aligned for our callee.
1592 //
1593 const int rounded_vm_local_words =
1594 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1595 // callee_locals and max_stack are counts, not the size in frame.
1596 const int locals_size =
1597 round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
1598 const int max_stack_words = max_stack * Interpreter::stackElementWords;
1599 return (round_to((max_stack_words
1600 //6815692//+ methodOopDesc::extra_stack_words()
1601 + rounded_vm_local_words
1602 + frame::memory_parameter_word_sp_offset), WordsPerLong)
1603 // already rounded
1604 + locals_size + monitor_size);
1605 }
1607 // How much stack a method top interpreter activation needs in words.
1608 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1610 // See call_stub code
1611 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
1612 WordsPerLong); // 7 + register save area
1614 // Save space for one monitor to get into the interpreted method in case
1615 // the method is synchronized
1616 int monitor_size = method->is_synchronized() ?
1617 1*frame::interpreter_frame_monitor_size() : 0;
1618 return size_activation_helper(method->max_locals(), method->max_stack(),
1619 monitor_size) + call_stub_size;
1620 }
1622 int AbstractInterpreter::layout_activation(methodOop method,
1623 int tempcount,
1624 int popframe_extra_args,
1625 int moncount,
1626 int caller_actual_parameters,
1627 int callee_param_count,
1628 int callee_local_count,
1629 frame* caller,
1630 frame* interpreter_frame,
1631 bool is_top_frame) {
1632 // Note: This calculation must exactly parallel the frame setup
1633 // in InterpreterGenerator::generate_fixed_frame.
1634 // If f!=NULL, set up the following variables:
1635 // - Lmethod
1636 // - Llocals
1637 // - Lmonitors (to the indicated number of monitors)
1638 // - Lesp (to the indicated number of temps)
1639 // The frame f (if not NULL) on entry is a description of the caller of the frame
1640 // we are about to layout. We are guaranteed that we will be able to fill in a
1641 // new interpreter frame as its callee (i.e. the stack space is allocated and
1642 // the amount was determined by an earlier call to this method with f == NULL).
1643 // On return f (if not NULL) while describe the interpreter frame we just layed out.
1645 int monitor_size = moncount * frame::interpreter_frame_monitor_size();
1646 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1648 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
1649 //
1650 // Note: if you look closely this appears to be doing something much different
1651 // than generate_fixed_frame. What is happening is this. On sparc we have to do
1652 // this dance with interpreter_sp_adjustment because the window save area would
1653 // appear just below the bottom (tos) of the caller's java expression stack. Because
1654 // the interpreter want to have the locals completely contiguous generate_fixed_frame
1655 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
1656 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
1657 // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
1658 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
1659 // because the oldest frame would have adjust its callers frame and yet that frame
1660 // already exists and isn't part of this array of frames we are unpacking. So at first
1661 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
1662 // will after it calculates all of the frame's on_stack_size()'s will then figure out the
1663 // amount to adjust the caller of the initial (oldest) frame and the calculation will all
1664 // add up. It does seem like it simpler to account for the adjustment here (and remove the
1665 // callee... parameters here). However this would mean that this routine would have to take
1666 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
1667 // and run the calling loop in the reverse order. This would also would appear to mean making
1668 // this code aware of what the interactions are when that initial caller fram was an osr or
1669 // other adapter frame. deoptimization is complicated enough and hard enough to debug that
1670 // there is no sense in messing working code.
1671 //
1673 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
1674 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
1676 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
1677 monitor_size);
1679 if (interpreter_frame != NULL) {
1680 // The skeleton frame must already look like an interpreter frame
1681 // even if not fully filled out.
1682 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
1684 intptr_t* fp = interpreter_frame->fp();
1686 JavaThread* thread = JavaThread::current();
1687 RegisterMap map(thread, false);
1688 // More verification that skeleton frame is properly walkable
1689 assert(fp == caller->sp(), "fp must match");
1691 intptr_t* montop = fp - rounded_vm_local_words;
1693 // preallocate monitors (cf. __ add_monitor_to_stack)
1694 intptr_t* monitors = montop - monitor_size;
1696 // preallocate stack space
1697 intptr_t* esp = monitors - 1 -
1698 (tempcount * Interpreter::stackElementWords) -
1699 popframe_extra_args;
1701 int local_words = method->max_locals() * Interpreter::stackElementWords;
1702 NEEDS_CLEANUP;
1703 intptr_t* locals;
1704 if (caller->is_interpreted_frame()) {
1705 // Can force the locals area to end up properly overlapping the top of the expression stack.
1706 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
1707 // Note that this computation means we replace size_of_parameters() values from the caller
1708 // interpreter frame's expression stack with our argument locals
1709 int parm_words = caller_actual_parameters * Interpreter::stackElementWords;
1710 locals = Lesp_ptr + parm_words;
1711 int delta = local_words - parm_words;
1712 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
1713 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
1714 } else {
1715 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
1716 // Don't have Lesp available; lay out locals block in the caller
1717 // adjacent to the register window save area.
1718 //
1719 // Compiled frames do not allocate a varargs area which is why this if
1720 // statement is needed.
1721 //
1722 if (caller->is_compiled_frame()) {
1723 locals = fp + frame::register_save_words + local_words - 1;
1724 } else {
1725 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
1726 }
1727 if (!caller->is_entry_frame()) {
1728 // Caller wants his own SP back
1729 int caller_frame_size = caller->cb()->frame_size();
1730 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
1731 }
1732 }
1733 if (TraceDeoptimization) {
1734 if (caller->is_entry_frame()) {
1735 // make sure I5_savedSP and the entry frames notion of saved SP
1736 // agree. This assertion duplicate a check in entry frame code
1737 // but catches the failure earlier.
1738 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
1739 "would change callers SP");
1740 }
1741 if (caller->is_entry_frame()) {
1742 tty->print("entry ");
1743 }
1744 if (caller->is_compiled_frame()) {
1745 tty->print("compiled ");
1746 if (caller->is_deoptimized_frame()) {
1747 tty->print("(deopt) ");
1748 }
1749 }
1750 if (caller->is_interpreted_frame()) {
1751 tty->print("interpreted ");
1752 }
1753 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
1754 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
1755 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
1756 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
1757 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
1758 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
1759 tty->print_cr("Llocals = 0x%x", locals);
1760 tty->print_cr("Lesp = 0x%x", esp);
1761 tty->print_cr("Lmonitors = 0x%x", monitors);
1762 }
1764 if (method->max_locals() > 0) {
1765 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
1766 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
1767 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
1768 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
1769 }
1770 #ifdef _LP64
1771 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
1772 #endif
1774 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
1775 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
1776 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors;
1777 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp;
1778 // Llast_SP will be same as SP as there is no adapter space
1779 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
1780 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
1781 #ifdef FAST_DISPATCH
1782 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
1783 #endif
1786 #ifdef ASSERT
1787 BasicObjectLock* mp = (BasicObjectLock*)monitors;
1789 assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
1790 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
1791 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
1792 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
1793 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
1795 // check bounds
1796 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
1797 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
1798 assert(lo < monitors && montop <= hi, "monitors in bounds");
1799 assert(lo <= esp && esp < monitors, "esp in bounds");
1800 #endif // ASSERT
1801 }
1803 return raw_frame_size;
1804 }
1806 //----------------------------------------------------------------------------------------------------
1807 // Exceptions
1808 void TemplateInterpreterGenerator::generate_throw_exception() {
1810 // Entry point in previous activation (i.e., if the caller was interpreted)
1811 Interpreter::_rethrow_exception_entry = __ pc();
1812 // O0: exception
1814 // entry point for exceptions thrown within interpreter code
1815 Interpreter::_throw_exception_entry = __ pc();
1816 __ verify_thread();
1817 // expression stack is undefined here
1818 // O0: exception, i.e. Oexception
1819 // Lbcp: exception bcx
1820 __ verify_oop(Oexception);
1823 // expression stack must be empty before entering the VM in case of an exception
1824 __ empty_expression_stack();
1825 // find exception handler address and preserve exception oop
1826 // call C routine to find handler and jump to it
1827 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1828 __ push_ptr(O1); // push exception for exception handler bytecodes
1830 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1831 __ delayed()->nop();
1834 // if the exception is not handled in the current frame
1835 // the frame is removed and the exception is rethrown
1836 // (i.e. exception continuation is _rethrow_exception)
1837 //
1838 // Note: At this point the bci is still the bxi for the instruction which caused
1839 // the exception and the expression stack is empty. Thus, for any VM calls
1840 // at this point, GC will find a legal oop map (with empty expression stack).
1842 // in current activation
1843 // tos: exception
1844 // Lbcp: exception bcp
1846 //
1847 // JVMTI PopFrame support
1848 //
1850 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1851 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1852 // Set the popframe_processing bit in popframe_condition indicating that we are
1853 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1854 // popframe handling cycles.
1856 __ ld(popframe_condition_addr, G3_scratch);
1857 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1858 __ stw(G3_scratch, popframe_condition_addr);
1860 // Empty the expression stack, as in normal exception handling
1861 __ empty_expression_stack();
1862 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1864 {
1865 // Check to see whether we are returning to a deoptimized frame.
1866 // (The PopFrame call ensures that the caller of the popped frame is
1867 // either interpreted or compiled and deoptimizes it if compiled.)
1868 // In this case, we can't call dispatch_next() after the frame is
1869 // popped, but instead must save the incoming arguments and restore
1870 // them after deoptimization has occurred.
1871 //
1872 // Note that we don't compare the return PC against the
1873 // deoptimization blob's unpack entry because of the presence of
1874 // adapter frames in C2.
1875 Label caller_not_deoptimized;
1876 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1877 __ tst(O0);
1878 __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized);
1879 __ delayed()->nop();
1881 const Register Gtmp1 = G3_scratch;
1882 const Register Gtmp2 = G1_scratch;
1884 // Compute size of arguments for saving when returning to deoptimized caller
1885 __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
1886 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1887 __ sub(Llocals, Gtmp1, Gtmp2);
1888 __ add(Gtmp2, wordSize, Gtmp2);
1889 // Save these arguments
1890 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1891 // Inform deoptimization that it is responsible for restoring these arguments
1892 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1893 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1894 __ st(Gtmp1, popframe_condition_addr);
1896 // Return from the current method
1897 // The caller's SP was adjusted upon method entry to accomodate
1898 // the callee's non-argument locals. Undo that adjustment.
1899 __ ret();
1900 __ delayed()->restore(I5_savedSP, G0, SP);
1902 __ bind(caller_not_deoptimized);
1903 }
1905 // Clear the popframe condition flag
1906 __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1908 // Get out of the current method (how this is done depends on the particular compiler calling
1909 // convention that the interpreter currently follows)
1910 // The caller's SP was adjusted upon method entry to accomodate
1911 // the callee's non-argument locals. Undo that adjustment.
1912 __ restore(I5_savedSP, G0, SP);
1913 // The method data pointer was incremented already during
1914 // call profiling. We have to restore the mdp for the current bcp.
1915 if (ProfileInterpreter) {
1916 __ set_method_data_pointer_for_bcp();
1917 }
1918 // Resume bytecode interpretation at the current bcp
1919 __ dispatch_next(vtos);
1920 // end of JVMTI PopFrame support
1922 Interpreter::_remove_activation_entry = __ pc();
1924 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1925 __ pop_ptr(Oexception); // get exception
1927 // Intel has the following comment:
1928 //// remove the activation (without doing throws on illegalMonitorExceptions)
1929 // They remove the activation without checking for bad monitor state.
1930 // %%% We should make sure this is the right semantics before implementing.
1932 // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here?
1933 __ set_vm_result(Oexception);
1934 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1936 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1938 __ get_vm_result(Oexception);
1939 __ verify_oop(Oexception);
1941 const int return_reg_adjustment = frame::pc_return_offset;
1942 Address issuing_pc_addr(I7, return_reg_adjustment);
1944 // We are done with this activation frame; find out where to go next.
1945 // The continuation point will be an exception handler, which expects
1946 // the following registers set up:
1947 //
1948 // Oexception: exception
1949 // Oissuing_pc: the local call that threw exception
1950 // Other On: garbage
1951 // In/Ln: the contents of the caller's register window
1952 //
1953 // We do the required restore at the last possible moment, because we
1954 // need to preserve some state across a runtime call.
1955 // (Remember that the caller activation is unknown--it might not be
1956 // interpreted, so things like Lscratch are useless in the caller.)
1958 // Although the Intel version uses call_C, we can use the more
1959 // compact call_VM. (The only real difference on SPARC is a
1960 // harmlessly ignored [re]set_last_Java_frame, compared with
1961 // the Intel code which lacks this.)
1962 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
1963 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
1964 __ super_call_VM_leaf(L7_thread_cache,
1965 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1966 G2_thread, Oissuing_pc->after_save());
1968 // The caller's SP was adjusted upon method entry to accomodate
1969 // the callee's non-argument locals. Undo that adjustment.
1970 __ JMP(O0, 0); // return exception handler in caller
1971 __ delayed()->restore(I5_savedSP, G0, SP);
1973 // (same old exception object is already in Oexception; see above)
1974 // Note that an "issuing PC" is actually the next PC after the call
1975 }
1978 //
1979 // JVMTI ForceEarlyReturn support
1980 //
1982 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1983 address entry = __ pc();
1985 __ empty_expression_stack();
1986 __ load_earlyret_value(state);
1988 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1989 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1991 // Clear the earlyret state
1992 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1994 __ remove_activation(state,
1995 /* throw_monitor_exception */ false,
1996 /* install_monitor_exception */ false);
1998 // The caller's SP was adjusted upon method entry to accomodate
1999 // the callee's non-argument locals. Undo that adjustment.
2000 __ ret(); // return to caller
2001 __ delayed()->restore(I5_savedSP, G0, SP);
2003 return entry;
2004 } // end of JVMTI ForceEarlyReturn support
2007 //------------------------------------------------------------------------------------------------------------------------
2008 // Helper for vtos entry point generation
2010 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
2011 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
2012 Label L;
2013 aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop();
2014 fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop();
2015 dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop();
2016 lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop();
2017 iep = __ pc(); __ push_i();
2018 bep = cep = sep = iep; // there aren't any
2019 vep = __ pc(); __ bind(L); // fall through
2020 generate_and_dispatch(t);
2021 }
2023 // --------------------------------------------------------------------------------
2026 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
2027 : TemplateInterpreterGenerator(code) {
2028 generate_all(); // down here so it can be "virtual"
2029 }
2031 // --------------------------------------------------------------------------------
2033 // Non-product code
2034 #ifndef PRODUCT
2035 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2036 address entry = __ pc();
2038 __ push(state);
2039 __ mov(O7, Lscratch); // protect return address within interpreter
2041 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
2042 __ mov( Otos_l2, G3_scratch );
2043 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
2044 __ mov(Lscratch, O7); // restore return address
2045 __ pop(state);
2046 __ retl();
2047 __ delayed()->nop();
2049 return entry;
2050 }
2053 // helpers for generate_and_dispatch
2055 void TemplateInterpreterGenerator::count_bytecode() {
2056 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
2057 }
2060 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2061 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
2062 }
2065 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2066 AddressLiteral index (&BytecodePairHistogram::_index);
2067 AddressLiteral counters((address) &BytecodePairHistogram::_counters);
2069 // get index, shift out old bytecode, bring in new bytecode, and store it
2070 // _index = (_index >> log2_number_of_codes) |
2071 // (bytecode << log2_number_of_codes);
2073 __ load_contents(index, G4_scratch);
2074 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
2075 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
2076 __ or3( G3_scratch, G4_scratch, G4_scratch );
2077 __ store_contents(G4_scratch, index, G3_scratch);
2079 // bump bucket contents
2080 // _counters[_index] ++;
2082 __ set(counters, G3_scratch); // loads into G3_scratch
2083 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
2084 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
2085 __ ld (G3_scratch, 0, G4_scratch);
2086 __ inc (G4_scratch);
2087 __ st (G4_scratch, 0, G3_scratch);
2088 }
2091 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2092 // Call a little run-time stub to avoid blow-up for each bytecode.
2093 // The run-time runtime saves the right registers, depending on
2094 // the tosca in-state for the given template.
2095 address entry = Interpreter::trace_code(t->tos_in());
2096 guarantee(entry != NULL, "entry must have been generated");
2097 __ call(entry, relocInfo::none);
2098 __ delayed()->nop();
2099 }
2102 void TemplateInterpreterGenerator::stop_interpreter_at() {
2103 AddressLiteral counter(&BytecodeCounter::_counter_value);
2104 __ load_contents(counter, G3_scratch);
2105 AddressLiteral stop_at(&StopInterpreterAt);
2106 __ load_ptr_contents(stop_at, G4_scratch);
2107 __ cmp(G3_scratch, G4_scratch);
2108 __ breakpoint_trap(Assembler::equal);
2109 }
2110 #endif // not PRODUCT
2111 #endif // !CC_INTERP