|
1 /* |
|
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "asm/macroAssembler.hpp" |
|
27 #include "interpreter/bytecodeHistogram.hpp" |
|
28 #include "interpreter/interpreter.hpp" |
|
29 #include "interpreter/interpreterGenerator.hpp" |
|
30 #include "interpreter/interpreterRuntime.hpp" |
|
31 #include "interpreter/templateTable.hpp" |
|
32 #include "oops/arrayOop.hpp" |
|
33 #include "oops/methodData.hpp" |
|
34 #include "oops/method.hpp" |
|
35 #include "oops/oop.inline.hpp" |
|
36 #include "prims/jvmtiExport.hpp" |
|
37 #include "prims/jvmtiThreadState.hpp" |
|
38 #include "runtime/arguments.hpp" |
|
39 #include "runtime/deoptimization.hpp" |
|
40 #include "runtime/frame.inline.hpp" |
|
41 #include "runtime/sharedRuntime.hpp" |
|
42 #include "runtime/stubRoutines.hpp" |
|
43 #include "runtime/synchronizer.hpp" |
|
44 #include "runtime/timer.hpp" |
|
45 #include "runtime/vframeArray.hpp" |
|
46 #include "utilities/debug.hpp" |
|
47 #include "utilities/macros.hpp" |
|
48 |
|
49 #define __ _masm-> |
|
50 |
|
51 #ifndef CC_INTERP |
|
52 |
|
53 const int method_offset = frame::interpreter_frame_method_offset * wordSize; |
|
54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; |
|
55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; |
|
56 |
|
57 //----------------------------------------------------------------------------- |
|
58 |
|
59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { |
|
60 address entry = __ pc(); |
|
61 |
|
62 #ifdef ASSERT |
|
63 { |
|
64 Label L; |
|
65 __ lea(rax, Address(rbp, |
|
66 frame::interpreter_frame_monitor_block_top_offset * |
|
67 wordSize)); |
|
68 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack |
|
69 // grows negative) |
|
70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete |
|
71 __ stop ("interpreter frame not set up"); |
|
72 __ bind(L); |
|
73 } |
|
74 #endif // ASSERT |
|
75 // Restore bcp under the assumption that the current frame is still |
|
76 // interpreted |
|
77 __ restore_bcp(); |
|
78 |
|
79 // expression stack must be empty before entering the VM if an |
|
80 // exception happened |
|
81 __ empty_expression_stack(); |
|
82 // throw exception |
|
83 __ call_VM(noreg, |
|
84 CAST_FROM_FN_PTR(address, |
|
85 InterpreterRuntime::throw_StackOverflowError)); |
|
86 return entry; |
|
87 } |
|
88 |
|
89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( |
|
90 const char* name) { |
|
91 address entry = __ pc(); |
|
92 // expression stack must be empty before entering the VM if an |
|
93 // exception happened |
|
94 __ empty_expression_stack(); |
|
95 // setup parameters |
|
96 // ??? convention: expect aberrant index in register ebx |
|
97 __ lea(c_rarg1, ExternalAddress((address)name)); |
|
98 __ call_VM(noreg, |
|
99 CAST_FROM_FN_PTR(address, |
|
100 InterpreterRuntime:: |
|
101 throw_ArrayIndexOutOfBoundsException), |
|
102 c_rarg1, rbx); |
|
103 return entry; |
|
104 } |
|
105 |
|
106 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { |
|
107 address entry = __ pc(); |
|
108 |
|
109 // object is at TOS |
|
110 __ pop(c_rarg1); |
|
111 |
|
112 // expression stack must be empty before entering the VM if an |
|
113 // exception happened |
|
114 __ empty_expression_stack(); |
|
115 |
|
116 __ call_VM(noreg, |
|
117 CAST_FROM_FN_PTR(address, |
|
118 InterpreterRuntime:: |
|
119 throw_ClassCastException), |
|
120 c_rarg1); |
|
121 return entry; |
|
122 } |
|
123 |
|
124 address TemplateInterpreterGenerator::generate_exception_handler_common( |
|
125 const char* name, const char* message, bool pass_oop) { |
|
126 assert(!pass_oop || message == NULL, "either oop or message but not both"); |
|
127 address entry = __ pc(); |
|
128 if (pass_oop) { |
|
129 // object is at TOS |
|
130 __ pop(c_rarg2); |
|
131 } |
|
132 // expression stack must be empty before entering the VM if an |
|
133 // exception happened |
|
134 __ empty_expression_stack(); |
|
135 // setup parameters |
|
136 __ lea(c_rarg1, ExternalAddress((address)name)); |
|
137 if (pass_oop) { |
|
138 __ call_VM(rax, CAST_FROM_FN_PTR(address, |
|
139 InterpreterRuntime:: |
|
140 create_klass_exception), |
|
141 c_rarg1, c_rarg2); |
|
142 } else { |
|
143 // kind of lame ExternalAddress can't take NULL because |
|
144 // external_word_Relocation will assert. |
|
145 if (message != NULL) { |
|
146 __ lea(c_rarg2, ExternalAddress((address)message)); |
|
147 } else { |
|
148 __ movptr(c_rarg2, NULL_WORD); |
|
149 } |
|
150 __ call_VM(rax, |
|
151 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), |
|
152 c_rarg1, c_rarg2); |
|
153 } |
|
154 // throw exception |
|
155 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); |
|
156 return entry; |
|
157 } |
|
158 |
|
159 |
|
160 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { |
|
161 address entry = __ pc(); |
|
162 // NULL last_sp until next java call |
|
163 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
164 __ dispatch_next(state); |
|
165 return entry; |
|
166 } |
|
167 |
|
168 |
|
169 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { |
|
170 address entry = __ pc(); |
|
171 |
|
172 // Restore stack bottom in case i2c adjusted stack |
|
173 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
174 // and NULL it as marker that esp is now tos until next java call |
|
175 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
176 |
|
177 __ restore_bcp(); |
|
178 __ restore_locals(); |
|
179 |
|
180 if (state == atos) { |
|
181 Register mdp = rbx; |
|
182 Register tmp = rcx; |
|
183 __ profile_return_type(mdp, rax, tmp); |
|
184 } |
|
185 |
|
186 const Register cache = rbx; |
|
187 const Register index = rcx; |
|
188 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); |
|
189 |
|
190 const Register flags = cache; |
|
191 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); |
|
192 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); |
|
193 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); |
|
194 __ dispatch_next(state, step); |
|
195 |
|
196 return entry; |
|
197 } |
|
198 |
|
199 |
|
200 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, |
|
201 int step) { |
|
202 address entry = __ pc(); |
|
203 // NULL last_sp until next java call |
|
204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
205 __ restore_bcp(); |
|
206 __ restore_locals(); |
|
207 // handle exceptions |
|
208 { |
|
209 Label L; |
|
210 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); |
|
211 __ jcc(Assembler::zero, L); |
|
212 __ call_VM(noreg, |
|
213 CAST_FROM_FN_PTR(address, |
|
214 InterpreterRuntime::throw_pending_exception)); |
|
215 __ should_not_reach_here(); |
|
216 __ bind(L); |
|
217 } |
|
218 __ dispatch_next(state, step); |
|
219 return entry; |
|
220 } |
|
221 |
|
222 int AbstractInterpreter::BasicType_as_index(BasicType type) { |
|
223 int i = 0; |
|
224 switch (type) { |
|
225 case T_BOOLEAN: i = 0; break; |
|
226 case T_CHAR : i = 1; break; |
|
227 case T_BYTE : i = 2; break; |
|
228 case T_SHORT : i = 3; break; |
|
229 case T_INT : i = 4; break; |
|
230 case T_LONG : i = 5; break; |
|
231 case T_VOID : i = 6; break; |
|
232 case T_FLOAT : i = 7; break; |
|
233 case T_DOUBLE : i = 8; break; |
|
234 case T_OBJECT : i = 9; break; |
|
235 case T_ARRAY : i = 9; break; |
|
236 default : ShouldNotReachHere(); |
|
237 } |
|
238 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, |
|
239 "index out of bounds"); |
|
240 return i; |
|
241 } |
|
242 |
|
243 |
|
244 address TemplateInterpreterGenerator::generate_result_handler_for( |
|
245 BasicType type) { |
|
246 address entry = __ pc(); |
|
247 switch (type) { |
|
248 case T_BOOLEAN: __ c2bool(rax); break; |
|
249 case T_CHAR : __ movzwl(rax, rax); break; |
|
250 case T_BYTE : __ sign_extend_byte(rax); break; |
|
251 case T_SHORT : __ sign_extend_short(rax); break; |
|
252 case T_INT : /* nothing to do */ break; |
|
253 case T_LONG : /* nothing to do */ break; |
|
254 case T_VOID : /* nothing to do */ break; |
|
255 case T_FLOAT : /* nothing to do */ break; |
|
256 case T_DOUBLE : /* nothing to do */ break; |
|
257 case T_OBJECT : |
|
258 // retrieve result from frame |
|
259 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); |
|
260 // and verify it |
|
261 __ verify_oop(rax); |
|
262 break; |
|
263 default : ShouldNotReachHere(); |
|
264 } |
|
265 __ ret(0); // return from result handler |
|
266 return entry; |
|
267 } |
|
268 |
|
269 address TemplateInterpreterGenerator::generate_safept_entry_for( |
|
270 TosState state, |
|
271 address runtime_entry) { |
|
272 address entry = __ pc(); |
|
273 __ push(state); |
|
274 __ call_VM(noreg, runtime_entry); |
|
275 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); |
|
276 return entry; |
|
277 } |
|
278 |
|
279 |
|
280 |
|
281 // Helpers for commoning out cases in the various type of method entries. |
|
282 // |
|
283 |
|
284 |
|
285 // increment invocation count & check for overflow |
|
286 // |
|
287 // Note: checking for negative value instead of overflow |
|
288 // so we have a 'sticky' overflow test |
|
289 // |
|
290 // rbx: method |
|
291 // ecx: invocation counter |
|
292 // |
|
293 void InterpreterGenerator::generate_counter_incr( |
|
294 Label* overflow, |
|
295 Label* profile_method, |
|
296 Label* profile_method_continue) { |
|
297 Label done; |
|
298 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. |
|
299 if (TieredCompilation) { |
|
300 int increment = InvocationCounter::count_increment; |
|
301 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; |
|
302 Label no_mdo; |
|
303 if (ProfileInterpreter) { |
|
304 // Are we profiling? |
|
305 __ movptr(rax, Address(rbx, Method::method_data_offset())); |
|
306 __ testptr(rax, rax); |
|
307 __ jccb(Assembler::zero, no_mdo); |
|
308 // Increment counter in the MDO |
|
309 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + |
|
310 in_bytes(InvocationCounter::counter_offset())); |
|
311 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); |
|
312 __ jmp(done); |
|
313 } |
|
314 __ bind(no_mdo); |
|
315 // Increment counter in MethodCounters |
|
316 const Address invocation_counter(rax, |
|
317 MethodCounters::invocation_counter_offset() + |
|
318 InvocationCounter::counter_offset()); |
|
319 __ get_method_counters(rbx, rax, done); |
|
320 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, |
|
321 false, Assembler::zero, overflow); |
|
322 __ bind(done); |
|
323 } else { |
|
324 const Address backedge_counter(rax, |
|
325 MethodCounters::backedge_counter_offset() + |
|
326 InvocationCounter::counter_offset()); |
|
327 const Address invocation_counter(rax, |
|
328 MethodCounters::invocation_counter_offset() + |
|
329 InvocationCounter::counter_offset()); |
|
330 |
|
331 __ get_method_counters(rbx, rax, done); |
|
332 |
|
333 if (ProfileInterpreter) { |
|
334 __ incrementl(Address(rax, |
|
335 MethodCounters::interpreter_invocation_counter_offset())); |
|
336 } |
|
337 // Update standard invocation counters |
|
338 __ movl(rcx, invocation_counter); |
|
339 __ incrementl(rcx, InvocationCounter::count_increment); |
|
340 __ movl(invocation_counter, rcx); // save invocation count |
|
341 |
|
342 __ movl(rax, backedge_counter); // load backedge counter |
|
343 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits |
|
344 |
|
345 __ addl(rcx, rax); // add both counters |
|
346 |
|
347 // profile_method is non-null only for interpreted method so |
|
348 // profile_method != NULL == !native_call |
|
349 |
|
350 if (ProfileInterpreter && profile_method != NULL) { |
|
351 // Test to see if we should create a method data oop |
|
352 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); |
|
353 __ jcc(Assembler::less, *profile_method_continue); |
|
354 |
|
355 // if no method data exists, go to profile_method |
|
356 __ test_method_data_pointer(rax, *profile_method); |
|
357 } |
|
358 |
|
359 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); |
|
360 __ jcc(Assembler::aboveEqual, *overflow); |
|
361 __ bind(done); |
|
362 } |
|
363 } |
|
364 |
|
365 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { |
|
366 |
|
367 // Asm interpreter on entry |
|
368 // r14 - locals |
|
369 // r13 - bcp |
|
370 // rbx - method |
|
371 // edx - cpool --- DOES NOT APPEAR TO BE TRUE |
|
372 // rbp - interpreter frame |
|
373 |
|
374 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] |
|
375 // Everything as it was on entry |
|
376 // rdx is not restored. Doesn't appear to really be set. |
|
377 |
|
378 // InterpreterRuntime::frequency_counter_overflow takes two |
|
379 // arguments, the first (thread) is passed by call_VM, the second |
|
380 // indicates if the counter overflow occurs at a backwards branch |
|
381 // (NULL bcp). We pass zero for it. The call returns the address |
|
382 // of the verified entry point for the method or NULL if the |
|
383 // compilation did not complete (either went background or bailed |
|
384 // out). |
|
385 __ movl(c_rarg1, 0); |
|
386 __ call_VM(noreg, |
|
387 CAST_FROM_FN_PTR(address, |
|
388 InterpreterRuntime::frequency_counter_overflow), |
|
389 c_rarg1); |
|
390 |
|
391 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* |
|
392 // Preserve invariant that r13/r14 contain bcp/locals of sender frame |
|
393 // and jump to the interpreted entry. |
|
394 __ jmp(*do_continue, relocInfo::none); |
|
395 } |
|
396 |
|
397 // See if we've got enough room on the stack for locals plus overhead. |
|
398 // The expression stack grows down incrementally, so the normal guard |
|
399 // page mechanism will work for that. |
|
400 // |
|
401 // NOTE: Since the additional locals are also always pushed (wasn't |
|
402 // obvious in generate_method_entry) so the guard should work for them |
|
403 // too. |
|
404 // |
|
405 // Args: |
|
406 // rdx: number of additional locals this frame needs (what we must check) |
|
407 // rbx: Method* |
|
408 // |
|
409 // Kills: |
|
410 // rax |
|
411 void InterpreterGenerator::generate_stack_overflow_check(void) { |
|
412 |
|
413 // monitor entry size: see picture of stack set |
|
414 // (generate_method_entry) and frame_amd64.hpp |
|
415 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
416 |
|
417 // total overhead size: entry_size + (saved rbp through expr stack |
|
418 // bottom). be sure to change this if you add/subtract anything |
|
419 // to/from the overhead area |
|
420 const int overhead_size = |
|
421 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; |
|
422 |
|
423 const int page_size = os::vm_page_size(); |
|
424 |
|
425 Label after_frame_check; |
|
426 |
|
427 // see if the frame is greater than one page in size. If so, |
|
428 // then we need to verify there is enough stack space remaining |
|
429 // for the additional locals. |
|
430 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); |
|
431 __ jcc(Assembler::belowEqual, after_frame_check); |
|
432 |
|
433 // compute rsp as if this were going to be the last frame on |
|
434 // the stack before the red zone |
|
435 |
|
436 const Address stack_base(r15_thread, Thread::stack_base_offset()); |
|
437 const Address stack_size(r15_thread, Thread::stack_size_offset()); |
|
438 |
|
439 // locals + overhead, in bytes |
|
440 __ mov(rax, rdx); |
|
441 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter. |
|
442 __ addptr(rax, overhead_size); |
|
443 |
|
444 #ifdef ASSERT |
|
445 Label stack_base_okay, stack_size_okay; |
|
446 // verify that thread stack base is non-zero |
|
447 __ cmpptr(stack_base, (int32_t)NULL_WORD); |
|
448 __ jcc(Assembler::notEqual, stack_base_okay); |
|
449 __ stop("stack base is zero"); |
|
450 __ bind(stack_base_okay); |
|
451 // verify that thread stack size is non-zero |
|
452 __ cmpptr(stack_size, 0); |
|
453 __ jcc(Assembler::notEqual, stack_size_okay); |
|
454 __ stop("stack size is zero"); |
|
455 __ bind(stack_size_okay); |
|
456 #endif |
|
457 |
|
458 // Add stack base to locals and subtract stack size |
|
459 __ addptr(rax, stack_base); |
|
460 __ subptr(rax, stack_size); |
|
461 |
|
462 // Use the maximum number of pages we might bang. |
|
463 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : |
|
464 (StackRedPages+StackYellowPages); |
|
465 |
|
466 // add in the red and yellow zone sizes |
|
467 __ addptr(rax, max_pages * page_size); |
|
468 |
|
469 // check against the current stack bottom |
|
470 __ cmpptr(rsp, rax); |
|
471 __ jcc(Assembler::above, after_frame_check); |
|
472 |
|
473 // Restore sender's sp as SP. This is necessary if the sender's |
|
474 // frame is an extended compiled frame (see gen_c2i_adapter()) |
|
475 // and safer anyway in case of JSR292 adaptations. |
|
476 |
|
477 __ pop(rax); // return address must be moved if SP is changed |
|
478 __ mov(rsp, r13); |
|
479 __ push(rax); |
|
480 |
|
481 // Note: the restored frame is not necessarily interpreted. |
|
482 // Use the shared runtime version of the StackOverflowError. |
|
483 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); |
|
484 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); |
|
485 |
|
486 // all done with frame size check |
|
487 __ bind(after_frame_check); |
|
488 } |
|
489 |
|
490 // Allocate monitor and lock method (asm interpreter) |
|
491 // |
|
492 // Args: |
|
493 // rbx: Method* |
|
494 // r14: locals |
|
495 // |
|
496 // Kills: |
|
497 // rax |
|
498 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) |
|
499 // rscratch1, rscratch2 (scratch regs) |
|
500 void InterpreterGenerator::lock_method(void) { |
|
501 // synchronize method |
|
502 const Address access_flags(rbx, Method::access_flags_offset()); |
|
503 const Address monitor_block_top( |
|
504 rbp, |
|
505 frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
506 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
507 |
|
508 #ifdef ASSERT |
|
509 { |
|
510 Label L; |
|
511 __ movl(rax, access_flags); |
|
512 __ testl(rax, JVM_ACC_SYNCHRONIZED); |
|
513 __ jcc(Assembler::notZero, L); |
|
514 __ stop("method doesn't need synchronization"); |
|
515 __ bind(L); |
|
516 } |
|
517 #endif // ASSERT |
|
518 |
|
519 // get synchronization object |
|
520 { |
|
521 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
522 Label done; |
|
523 __ movl(rax, access_flags); |
|
524 __ testl(rax, JVM_ACC_STATIC); |
|
525 // get receiver (assume this is frequent case) |
|
526 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); |
|
527 __ jcc(Assembler::zero, done); |
|
528 __ movptr(rax, Address(rbx, Method::const_offset())); |
|
529 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); |
|
530 __ movptr(rax, Address(rax, |
|
531 ConstantPool::pool_holder_offset_in_bytes())); |
|
532 __ movptr(rax, Address(rax, mirror_offset)); |
|
533 |
|
534 #ifdef ASSERT |
|
535 { |
|
536 Label L; |
|
537 __ testptr(rax, rax); |
|
538 __ jcc(Assembler::notZero, L); |
|
539 __ stop("synchronization object is NULL"); |
|
540 __ bind(L); |
|
541 } |
|
542 #endif // ASSERT |
|
543 |
|
544 __ bind(done); |
|
545 } |
|
546 |
|
547 // add space for monitor & lock |
|
548 __ subptr(rsp, entry_size); // add space for a monitor entry |
|
549 __ movptr(monitor_block_top, rsp); // set new monitor block top |
|
550 // store object |
|
551 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); |
|
552 __ movptr(c_rarg1, rsp); // object address |
|
553 __ lock_object(c_rarg1); |
|
554 } |
|
555 |
|
556 // Generate a fixed interpreter frame. This is identical setup for |
|
557 // interpreted methods and for native methods hence the shared code. |
|
558 // |
|
559 // Args: |
|
560 // rax: return address |
|
561 // rbx: Method* |
|
562 // r14: pointer to locals |
|
563 // r13: sender sp |
|
564 // rdx: cp cache |
|
565 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { |
|
566 // initialize fixed part of activation frame |
|
567 __ push(rax); // save return address |
|
568 __ enter(); // save old & set new rbp |
|
569 __ push(r13); // set sender sp |
|
570 __ push((int)NULL_WORD); // leave last_sp as null |
|
571 __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod* |
|
572 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase |
|
573 __ push(rbx); // save Method* |
|
574 if (ProfileInterpreter) { |
|
575 Label method_data_continue; |
|
576 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); |
|
577 __ testptr(rdx, rdx); |
|
578 __ jcc(Assembler::zero, method_data_continue); |
|
579 __ addptr(rdx, in_bytes(MethodData::data_offset())); |
|
580 __ bind(method_data_continue); |
|
581 __ push(rdx); // set the mdp (method data pointer) |
|
582 } else { |
|
583 __ push(0); |
|
584 } |
|
585 |
|
586 __ movptr(rdx, Address(rbx, Method::const_offset())); |
|
587 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); |
|
588 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); |
|
589 __ push(rdx); // set constant pool cache |
|
590 __ push(r14); // set locals pointer |
|
591 if (native_call) { |
|
592 __ push(0); // no bcp |
|
593 } else { |
|
594 __ push(r13); // set bcp |
|
595 } |
|
596 __ push(0); // reserve word for pointer to expression stack bottom |
|
597 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom |
|
598 } |
|
599 |
|
600 // End of helpers |
|
601 |
|
602 // Various method entries |
|
603 //------------------------------------------------------------------------------------------------------------------------ |
|
604 // |
|
605 // |
|
606 |
|
607 // Call an accessor method (assuming it is resolved, otherwise drop |
|
608 // into vanilla (slow path) entry |
|
609 address InterpreterGenerator::generate_accessor_entry(void) { |
|
610 // rbx: Method* |
|
611 |
|
612 // r13: senderSP must preserver for slow path, set SP to it on fast path |
|
613 |
|
614 address entry_point = __ pc(); |
|
615 Label xreturn_path; |
|
616 |
|
617 // do fastpath for resolved accessor methods |
|
618 if (UseFastAccessorMethods) { |
|
619 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites |
|
620 // thereof; parameter size = 1 |
|
621 // Note: We can only use this code if the getfield has been resolved |
|
622 // and if we don't have a null-pointer exception => check for |
|
623 // these conditions first and use slow path if necessary. |
|
624 Label slow_path; |
|
625 // If we need a safepoint check, generate full interpreter entry. |
|
626 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), |
|
627 SafepointSynchronize::_not_synchronized); |
|
628 |
|
629 __ jcc(Assembler::notEqual, slow_path); |
|
630 // rbx: method |
|
631 __ movptr(rax, Address(rsp, wordSize)); |
|
632 |
|
633 // check if local 0 != NULL and read field |
|
634 __ testptr(rax, rax); |
|
635 __ jcc(Assembler::zero, slow_path); |
|
636 |
|
637 // read first instruction word and extract bytecode @ 1 and index @ 2 |
|
638 __ movptr(rdx, Address(rbx, Method::const_offset())); |
|
639 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); |
|
640 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); |
|
641 // Shift codes right to get the index on the right. |
|
642 // The bytecode fetched looks like <index><0xb4><0x2a> |
|
643 __ shrl(rdx, 2 * BitsPerByte); |
|
644 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); |
|
645 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); |
|
646 |
|
647 // rax: local 0 |
|
648 // rbx: method |
|
649 // rdx: constant pool cache index |
|
650 // rdi: constant pool cache |
|
651 |
|
652 // check if getfield has been resolved and read constant pool cache entry |
|
653 // check the validity of the cache entry by testing whether _indices field |
|
654 // contains Bytecode::_getfield in b1 byte. |
|
655 assert(in_words(ConstantPoolCacheEntry::size()) == 4, |
|
656 "adjust shift below"); |
|
657 __ movl(rcx, |
|
658 Address(rdi, |
|
659 rdx, |
|
660 Address::times_8, |
|
661 ConstantPoolCache::base_offset() + |
|
662 ConstantPoolCacheEntry::indices_offset())); |
|
663 __ shrl(rcx, 2 * BitsPerByte); |
|
664 __ andl(rcx, 0xFF); |
|
665 __ cmpl(rcx, Bytecodes::_getfield); |
|
666 __ jcc(Assembler::notEqual, slow_path); |
|
667 |
|
668 // Note: constant pool entry is not valid before bytecode is resolved |
|
669 __ movptr(rcx, |
|
670 Address(rdi, |
|
671 rdx, |
|
672 Address::times_8, |
|
673 ConstantPoolCache::base_offset() + |
|
674 ConstantPoolCacheEntry::f2_offset())); |
|
675 // edx: flags |
|
676 __ movl(rdx, |
|
677 Address(rdi, |
|
678 rdx, |
|
679 Address::times_8, |
|
680 ConstantPoolCache::base_offset() + |
|
681 ConstantPoolCacheEntry::flags_offset())); |
|
682 |
|
683 Label notObj, notInt, notByte, notShort; |
|
684 const Address field_address(rax, rcx, Address::times_1); |
|
685 |
|
686 // Need to differentiate between igetfield, agetfield, bgetfield etc. |
|
687 // because they are different sizes. |
|
688 // Use the type from the constant pool cache |
|
689 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); |
|
690 // Make sure we don't need to mask edx after the above shift |
|
691 ConstantPoolCacheEntry::verify_tos_state_shift(); |
|
692 |
|
693 __ cmpl(rdx, atos); |
|
694 __ jcc(Assembler::notEqual, notObj); |
|
695 // atos |
|
696 __ load_heap_oop(rax, field_address); |
|
697 __ jmp(xreturn_path); |
|
698 |
|
699 __ bind(notObj); |
|
700 __ cmpl(rdx, itos); |
|
701 __ jcc(Assembler::notEqual, notInt); |
|
702 // itos |
|
703 __ movl(rax, field_address); |
|
704 __ jmp(xreturn_path); |
|
705 |
|
706 __ bind(notInt); |
|
707 __ cmpl(rdx, btos); |
|
708 __ jcc(Assembler::notEqual, notByte); |
|
709 // btos |
|
710 __ load_signed_byte(rax, field_address); |
|
711 __ jmp(xreturn_path); |
|
712 |
|
713 __ bind(notByte); |
|
714 __ cmpl(rdx, stos); |
|
715 __ jcc(Assembler::notEqual, notShort); |
|
716 // stos |
|
717 __ load_signed_short(rax, field_address); |
|
718 __ jmp(xreturn_path); |
|
719 |
|
720 __ bind(notShort); |
|
721 #ifdef ASSERT |
|
722 Label okay; |
|
723 __ cmpl(rdx, ctos); |
|
724 __ jcc(Assembler::equal, okay); |
|
725 __ stop("what type is this?"); |
|
726 __ bind(okay); |
|
727 #endif |
|
728 // ctos |
|
729 __ load_unsigned_short(rax, field_address); |
|
730 |
|
731 __ bind(xreturn_path); |
|
732 |
|
733 // _ireturn/_areturn |
|
734 __ pop(rdi); |
|
735 __ mov(rsp, r13); |
|
736 __ jmp(rdi); |
|
737 __ ret(0); |
|
738 |
|
739 // generate a vanilla interpreter entry as the slow path |
|
740 __ bind(slow_path); |
|
741 (void) generate_normal_entry(false); |
|
742 } else { |
|
743 (void) generate_normal_entry(false); |
|
744 } |
|
745 |
|
746 return entry_point; |
|
747 } |
|
748 |
|
749 // Method entry for java.lang.ref.Reference.get. |
|
750 address InterpreterGenerator::generate_Reference_get_entry(void) { |
|
751 #if INCLUDE_ALL_GCS |
|
752 // Code: _aload_0, _getfield, _areturn |
|
753 // parameter size = 1 |
|
754 // |
|
755 // The code that gets generated by this routine is split into 2 parts: |
|
756 // 1. The "intrinsified" code for G1 (or any SATB based GC), |
|
757 // 2. The slow path - which is an expansion of the regular method entry. |
|
758 // |
|
759 // Notes:- |
|
760 // * In the G1 code we do not check whether we need to block for |
|
761 // a safepoint. If G1 is enabled then we must execute the specialized |
|
762 // code for Reference.get (except when the Reference object is null) |
|
763 // so that we can log the value in the referent field with an SATB |
|
764 // update buffer. |
|
765 // If the code for the getfield template is modified so that the |
|
766 // G1 pre-barrier code is executed when the current method is |
|
767 // Reference.get() then going through the normal method entry |
|
768 // will be fine. |
|
769 // * The G1 code can, however, check the receiver object (the instance |
|
770 // of java.lang.Reference) and jump to the slow path if null. If the |
|
771 // Reference object is null then we obviously cannot fetch the referent |
|
772 // and so we don't need to call the G1 pre-barrier. Thus we can use the |
|
773 // regular method entry code to generate the NPE. |
|
774 // |
|
775 // This code is based on generate_accessor_enty. |
|
776 // |
|
777 // rbx: Method* |
|
778 |
|
779 // r13: senderSP must preserve for slow path, set SP to it on fast path |
|
780 |
|
781 address entry = __ pc(); |
|
782 |
|
783 const int referent_offset = java_lang_ref_Reference::referent_offset; |
|
784 guarantee(referent_offset > 0, "referent offset not initialized"); |
|
785 |
|
786 if (UseG1GC) { |
|
787 Label slow_path; |
|
788 // rbx: method |
|
789 |
|
790 // Check if local 0 != NULL |
|
791 // If the receiver is null then it is OK to jump to the slow path. |
|
792 __ movptr(rax, Address(rsp, wordSize)); |
|
793 |
|
794 __ testptr(rax, rax); |
|
795 __ jcc(Assembler::zero, slow_path); |
|
796 |
|
797 // rax: local 0 |
|
798 // rbx: method (but can be used as scratch now) |
|
799 // rdx: scratch |
|
800 // rdi: scratch |
|
801 |
|
802 // Generate the G1 pre-barrier code to log the value of |
|
803 // the referent field in an SATB buffer. |
|
804 |
|
805 // Load the value of the referent field. |
|
806 const Address field_address(rax, referent_offset); |
|
807 __ load_heap_oop(rax, field_address); |
|
808 |
|
809 // Generate the G1 pre-barrier code to log the value of |
|
810 // the referent field in an SATB buffer. |
|
811 __ g1_write_barrier_pre(noreg /* obj */, |
|
812 rax /* pre_val */, |
|
813 r15_thread /* thread */, |
|
814 rbx /* tmp */, |
|
815 true /* tosca_live */, |
|
816 true /* expand_call */); |
|
817 |
|
818 // _areturn |
|
819 __ pop(rdi); // get return address |
|
820 __ mov(rsp, r13); // set sp to sender sp |
|
821 __ jmp(rdi); |
|
822 __ ret(0); |
|
823 |
|
824 // generate a vanilla interpreter entry as the slow path |
|
825 __ bind(slow_path); |
|
826 (void) generate_normal_entry(false); |
|
827 |
|
828 return entry; |
|
829 } |
|
830 #endif // INCLUDE_ALL_GCS |
|
831 |
|
832 // If G1 is not enabled then attempt to go through the accessor entry point |
|
833 // Reference.get is an accessor |
|
834 return generate_accessor_entry(); |
|
835 } |
|
836 |
|
837 /** |
|
838 * Method entry for static native methods: |
|
839 * int java.util.zip.CRC32.update(int crc, int b) |
|
840 */ |
|
841 address InterpreterGenerator::generate_CRC32_update_entry() { |
|
842 if (UseCRC32Intrinsics) { |
|
843 address entry = __ pc(); |
|
844 |
|
845 // rbx,: Method* |
|
846 // r13: senderSP must preserved for slow path, set SP to it on fast path |
|
847 // c_rarg0: scratch (rdi on non-Win64, rcx on Win64) |
|
848 // c_rarg1: scratch (rsi on non-Win64, rdx on Win64) |
|
849 |
|
850 Label slow_path; |
|
851 // If we need a safepoint check, generate full interpreter entry. |
|
852 ExternalAddress state(SafepointSynchronize::address_of_state()); |
|
853 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), |
|
854 SafepointSynchronize::_not_synchronized); |
|
855 __ jcc(Assembler::notEqual, slow_path); |
|
856 |
|
857 // We don't generate local frame and don't align stack because |
|
858 // we call stub code and there is no safepoint on this path. |
|
859 |
|
860 // Load parameters |
|
861 const Register crc = rax; // crc |
|
862 const Register val = c_rarg0; // source java byte value |
|
863 const Register tbl = c_rarg1; // scratch |
|
864 |
|
865 // Arguments are reversed on java expression stack |
|
866 __ movl(val, Address(rsp, wordSize)); // byte value |
|
867 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC |
|
868 |
|
869 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); |
|
870 __ notl(crc); // ~crc |
|
871 __ update_byte_crc32(crc, val, tbl); |
|
872 __ notl(crc); // ~crc |
|
873 // result in rax |
|
874 |
|
875 // _areturn |
|
876 __ pop(rdi); // get return address |
|
877 __ mov(rsp, r13); // set sp to sender sp |
|
878 __ jmp(rdi); |
|
879 |
|
880 // generate a vanilla native entry as the slow path |
|
881 __ bind(slow_path); |
|
882 |
|
883 (void) generate_native_entry(false); |
|
884 |
|
885 return entry; |
|
886 } |
|
887 return generate_native_entry(false); |
|
888 } |
|
889 |
|
890 /** |
|
891 * Method entry for static native methods: |
|
892 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) |
|
893 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) |
|
894 */ |
|
895 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { |
|
896 if (UseCRC32Intrinsics) { |
|
897 address entry = __ pc(); |
|
898 |
|
899 // rbx,: Method* |
|
900 // r13: senderSP must preserved for slow path, set SP to it on fast path |
|
901 |
|
902 Label slow_path; |
|
903 // If we need a safepoint check, generate full interpreter entry. |
|
904 ExternalAddress state(SafepointSynchronize::address_of_state()); |
|
905 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), |
|
906 SafepointSynchronize::_not_synchronized); |
|
907 __ jcc(Assembler::notEqual, slow_path); |
|
908 |
|
909 // We don't generate local frame and don't align stack because |
|
910 // we call stub code and there is no safepoint on this path. |
|
911 |
|
912 // Load parameters |
|
913 const Register crc = c_rarg0; // crc |
|
914 const Register buf = c_rarg1; // source java byte array address |
|
915 const Register len = c_rarg2; // length |
|
916 const Register off = len; // offset (never overlaps with 'len') |
|
917 |
|
918 // Arguments are reversed on java expression stack |
|
919 // Calculate address of start element |
|
920 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { |
|
921 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf |
|
922 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset |
|
923 __ addq(buf, off); // + offset |
|
924 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC |
|
925 } else { |
|
926 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array |
|
927 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size |
|
928 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset |
|
929 __ addq(buf, off); // + offset |
|
930 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC |
|
931 } |
|
932 // Can now load 'len' since we're finished with 'off' |
|
933 __ movl(len, Address(rsp, wordSize)); // Length |
|
934 |
|
935 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); |
|
936 // result in rax |
|
937 |
|
938 // _areturn |
|
939 __ pop(rdi); // get return address |
|
940 __ mov(rsp, r13); // set sp to sender sp |
|
941 __ jmp(rdi); |
|
942 |
|
943 // generate a vanilla native entry as the slow path |
|
944 __ bind(slow_path); |
|
945 |
|
946 (void) generate_native_entry(false); |
|
947 |
|
948 return entry; |
|
949 } |
|
950 return generate_native_entry(false); |
|
951 } |
|
952 |
|
953 // Interpreter stub for calling a native method. (asm interpreter) |
|
954 // This sets up a somewhat different looking stack for calling the |
|
955 // native method than the typical interpreter frame setup. |
|
956 address InterpreterGenerator::generate_native_entry(bool synchronized) { |
|
957 // determine code generation flags |
|
958 bool inc_counter = UseCompiler || CountCompiledCalls; |
|
959 |
|
960 // rbx: Method* |
|
961 // r13: sender sp |
|
962 |
|
963 address entry_point = __ pc(); |
|
964 |
|
965 const Address constMethod (rbx, Method::const_offset()); |
|
966 const Address access_flags (rbx, Method::access_flags_offset()); |
|
967 const Address size_of_parameters(rcx, ConstMethod:: |
|
968 size_of_parameters_offset()); |
|
969 |
|
970 |
|
971 // get parameter size (always needed) |
|
972 __ movptr(rcx, constMethod); |
|
973 __ load_unsigned_short(rcx, size_of_parameters); |
|
974 |
|
975 // native calls don't need the stack size check since they have no |
|
976 // expression stack and the arguments are already on the stack and |
|
977 // we only add a handful of words to the stack |
|
978 |
|
979 // rbx: Method* |
|
980 // rcx: size of parameters |
|
981 // r13: sender sp |
|
982 __ pop(rax); // get return address |
|
983 |
|
984 // for natives the size of locals is zero |
|
985 |
|
986 // compute beginning of parameters (r14) |
|
987 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); |
|
988 |
|
989 // add 2 zero-initialized slots for native calls |
|
990 // initialize result_handler slot |
|
991 __ push((int) NULL_WORD); |
|
992 // slot for oop temp |
|
993 // (static native method holder mirror/jni oop result) |
|
994 __ push((int) NULL_WORD); |
|
995 |
|
996 // initialize fixed part of activation frame |
|
997 generate_fixed_frame(true); |
|
998 |
|
999 // make sure method is native & not abstract |
|
1000 #ifdef ASSERT |
|
1001 __ movl(rax, access_flags); |
|
1002 { |
|
1003 Label L; |
|
1004 __ testl(rax, JVM_ACC_NATIVE); |
|
1005 __ jcc(Assembler::notZero, L); |
|
1006 __ stop("tried to execute non-native method as native"); |
|
1007 __ bind(L); |
|
1008 } |
|
1009 { |
|
1010 Label L; |
|
1011 __ testl(rax, JVM_ACC_ABSTRACT); |
|
1012 __ jcc(Assembler::zero, L); |
|
1013 __ stop("tried to execute abstract method in interpreter"); |
|
1014 __ bind(L); |
|
1015 } |
|
1016 #endif |
|
1017 |
|
1018 // Since at this point in the method invocation the exception handler |
|
1019 // would try to exit the monitor of synchronized methods which hasn't |
|
1020 // been entered yet, we set the thread local variable |
|
1021 // _do_not_unlock_if_synchronized to true. The remove_activation will |
|
1022 // check this flag. |
|
1023 |
|
1024 const Address do_not_unlock_if_synchronized(r15_thread, |
|
1025 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
|
1026 __ movbool(do_not_unlock_if_synchronized, true); |
|
1027 |
|
1028 // increment invocation count & check for overflow |
|
1029 Label invocation_counter_overflow; |
|
1030 if (inc_counter) { |
|
1031 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); |
|
1032 } |
|
1033 |
|
1034 Label continue_after_compile; |
|
1035 __ bind(continue_after_compile); |
|
1036 |
|
1037 bang_stack_shadow_pages(true); |
|
1038 |
|
1039 // reset the _do_not_unlock_if_synchronized flag |
|
1040 __ movbool(do_not_unlock_if_synchronized, false); |
|
1041 |
|
1042 // check for synchronized methods |
|
1043 // Must happen AFTER invocation_counter check and stack overflow check, |
|
1044 // so method is not locked if overflows. |
|
1045 if (synchronized) { |
|
1046 lock_method(); |
|
1047 } else { |
|
1048 // no synchronization necessary |
|
1049 #ifdef ASSERT |
|
1050 { |
|
1051 Label L; |
|
1052 __ movl(rax, access_flags); |
|
1053 __ testl(rax, JVM_ACC_SYNCHRONIZED); |
|
1054 __ jcc(Assembler::zero, L); |
|
1055 __ stop("method needs synchronization"); |
|
1056 __ bind(L); |
|
1057 } |
|
1058 #endif |
|
1059 } |
|
1060 |
|
1061 // start execution |
|
1062 #ifdef ASSERT |
|
1063 { |
|
1064 Label L; |
|
1065 const Address monitor_block_top(rbp, |
|
1066 frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
1067 __ movptr(rax, monitor_block_top); |
|
1068 __ cmpptr(rax, rsp); |
|
1069 __ jcc(Assembler::equal, L); |
|
1070 __ stop("broken stack frame setup in interpreter"); |
|
1071 __ bind(L); |
|
1072 } |
|
1073 #endif |
|
1074 |
|
1075 // jvmti support |
|
1076 __ notify_method_entry(); |
|
1077 |
|
1078 // work registers |
|
1079 const Register method = rbx; |
|
1080 const Register t = r11; |
|
1081 |
|
1082 // allocate space for parameters |
|
1083 __ get_method(method); |
|
1084 __ movptr(t, Address(method, Method::const_offset())); |
|
1085 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); |
|
1086 __ shll(t, Interpreter::logStackElementSize); |
|
1087 |
|
1088 __ subptr(rsp, t); |
|
1089 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
|
1090 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) |
|
1091 |
|
1092 // get signature handler |
|
1093 { |
|
1094 Label L; |
|
1095 __ movptr(t, Address(method, Method::signature_handler_offset())); |
|
1096 __ testptr(t, t); |
|
1097 __ jcc(Assembler::notZero, L); |
|
1098 __ call_VM(noreg, |
|
1099 CAST_FROM_FN_PTR(address, |
|
1100 InterpreterRuntime::prepare_native_call), |
|
1101 method); |
|
1102 __ get_method(method); |
|
1103 __ movptr(t, Address(method, Method::signature_handler_offset())); |
|
1104 __ bind(L); |
|
1105 } |
|
1106 |
|
1107 // call signature handler |
|
1108 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14, |
|
1109 "adjust this code"); |
|
1110 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, |
|
1111 "adjust this code"); |
|
1112 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, |
|
1113 "adjust this code"); |
|
1114 |
|
1115 // The generated handlers do not touch RBX (the method oop). |
|
1116 // However, large signatures cannot be cached and are generated |
|
1117 // each time here. The slow-path generator can do a GC on return, |
|
1118 // so we must reload it after the call. |
|
1119 __ call(t); |
|
1120 __ get_method(method); // slow path can do a GC, reload RBX |
|
1121 |
|
1122 |
|
1123 // result handler is in rax |
|
1124 // set result handler |
|
1125 __ movptr(Address(rbp, |
|
1126 (frame::interpreter_frame_result_handler_offset) * wordSize), |
|
1127 rax); |
|
1128 |
|
1129 // pass mirror handle if static call |
|
1130 { |
|
1131 Label L; |
|
1132 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
1133 __ movl(t, Address(method, Method::access_flags_offset())); |
|
1134 __ testl(t, JVM_ACC_STATIC); |
|
1135 __ jcc(Assembler::zero, L); |
|
1136 // get mirror |
|
1137 __ movptr(t, Address(method, Method::const_offset())); |
|
1138 __ movptr(t, Address(t, ConstMethod::constants_offset())); |
|
1139 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); |
|
1140 __ movptr(t, Address(t, mirror_offset)); |
|
1141 // copy mirror into activation frame |
|
1142 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), |
|
1143 t); |
|
1144 // pass handle to mirror |
|
1145 __ lea(c_rarg1, |
|
1146 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); |
|
1147 __ bind(L); |
|
1148 } |
|
1149 |
|
1150 // get native function entry point |
|
1151 { |
|
1152 Label L; |
|
1153 __ movptr(rax, Address(method, Method::native_function_offset())); |
|
1154 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); |
|
1155 __ movptr(rscratch2, unsatisfied.addr()); |
|
1156 __ cmpptr(rax, rscratch2); |
|
1157 __ jcc(Assembler::notEqual, L); |
|
1158 __ call_VM(noreg, |
|
1159 CAST_FROM_FN_PTR(address, |
|
1160 InterpreterRuntime::prepare_native_call), |
|
1161 method); |
|
1162 __ get_method(method); |
|
1163 __ movptr(rax, Address(method, Method::native_function_offset())); |
|
1164 __ bind(L); |
|
1165 } |
|
1166 |
|
1167 // pass JNIEnv |
|
1168 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); |
|
1169 |
|
1170 // It is enough that the pc() points into the right code |
|
1171 // segment. It does not have to be the correct return pc. |
|
1172 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); |
|
1173 |
|
1174 // change thread state |
|
1175 #ifdef ASSERT |
|
1176 { |
|
1177 Label L; |
|
1178 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset())); |
|
1179 __ cmpl(t, _thread_in_Java); |
|
1180 __ jcc(Assembler::equal, L); |
|
1181 __ stop("Wrong thread state in native stub"); |
|
1182 __ bind(L); |
|
1183 } |
|
1184 #endif |
|
1185 |
|
1186 // Change state to native |
|
1187 |
|
1188 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), |
|
1189 _thread_in_native); |
|
1190 |
|
1191 // Call the native method. |
|
1192 __ call(rax); |
|
1193 // result potentially in rax or xmm0 |
|
1194 |
|
1195 // Verify or restore cpu control state after JNI call |
|
1196 __ restore_cpu_control_state_after_jni(); |
|
1197 |
|
1198 // NOTE: The order of these pushes is known to frame::interpreter_frame_result |
|
1199 // in order to extract the result of a method call. If the order of these |
|
1200 // pushes change or anything else is added to the stack then the code in |
|
1201 // interpreter_frame_result must also change. |
|
1202 |
|
1203 __ push(dtos); |
|
1204 __ push(ltos); |
|
1205 |
|
1206 // change thread state |
|
1207 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), |
|
1208 _thread_in_native_trans); |
|
1209 |
|
1210 if (os::is_MP()) { |
|
1211 if (UseMembar) { |
|
1212 // Force this write out before the read below |
|
1213 __ membar(Assembler::Membar_mask_bits( |
|
1214 Assembler::LoadLoad | Assembler::LoadStore | |
|
1215 Assembler::StoreLoad | Assembler::StoreStore)); |
|
1216 } else { |
|
1217 // Write serialization page so VM thread can do a pseudo remote membar. |
|
1218 // We use the current thread pointer to calculate a thread specific |
|
1219 // offset to write to within the page. This minimizes bus traffic |
|
1220 // due to cache line collision. |
|
1221 __ serialize_memory(r15_thread, rscratch2); |
|
1222 } |
|
1223 } |
|
1224 |
|
1225 // check for safepoint operation in progress and/or pending suspend requests |
|
1226 { |
|
1227 Label Continue; |
|
1228 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), |
|
1229 SafepointSynchronize::_not_synchronized); |
|
1230 |
|
1231 Label L; |
|
1232 __ jcc(Assembler::notEqual, L); |
|
1233 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); |
|
1234 __ jcc(Assembler::equal, Continue); |
|
1235 __ bind(L); |
|
1236 |
|
1237 // Don't use call_VM as it will see a possible pending exception |
|
1238 // and forward it and never return here preventing us from |
|
1239 // clearing _last_native_pc down below. Also can't use |
|
1240 // call_VM_leaf either as it will check to see if r13 & r14 are |
|
1241 // preserved and correspond to the bcp/locals pointers. So we do a |
|
1242 // runtime call by hand. |
|
1243 // |
|
1244 __ mov(c_rarg0, r15_thread); |
|
1245 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
|
1246 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
|
1247 __ andptr(rsp, -16); // align stack as required by ABI |
|
1248 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); |
|
1249 __ mov(rsp, r12); // restore sp |
|
1250 __ reinit_heapbase(); |
|
1251 __ bind(Continue); |
|
1252 } |
|
1253 |
|
1254 // change thread state |
|
1255 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); |
|
1256 |
|
1257 // reset_last_Java_frame |
|
1258 __ reset_last_Java_frame(true, true); |
|
1259 |
|
1260 // reset handle block |
|
1261 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); |
|
1262 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); |
|
1263 |
|
1264 // If result is an oop unbox and store it in frame where gc will see it |
|
1265 // and result handler will pick it up |
|
1266 |
|
1267 { |
|
1268 Label no_oop, store_result; |
|
1269 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); |
|
1270 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); |
|
1271 __ jcc(Assembler::notEqual, no_oop); |
|
1272 // retrieve result |
|
1273 __ pop(ltos); |
|
1274 __ testptr(rax, rax); |
|
1275 __ jcc(Assembler::zero, store_result); |
|
1276 __ movptr(rax, Address(rax, 0)); |
|
1277 __ bind(store_result); |
|
1278 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); |
|
1279 // keep stack depth as expected by pushing oop which will eventually be discarde |
|
1280 __ push(ltos); |
|
1281 __ bind(no_oop); |
|
1282 } |
|
1283 |
|
1284 |
|
1285 { |
|
1286 Label no_reguard; |
|
1287 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), |
|
1288 JavaThread::stack_guard_yellow_disabled); |
|
1289 __ jcc(Assembler::notEqual, no_reguard); |
|
1290 |
|
1291 __ pusha(); // XXX only save smashed registers |
|
1292 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
|
1293 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
|
1294 __ andptr(rsp, -16); // align stack as required by ABI |
|
1295 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); |
|
1296 __ mov(rsp, r12); // restore sp |
|
1297 __ popa(); // XXX only restore smashed registers |
|
1298 __ reinit_heapbase(); |
|
1299 |
|
1300 __ bind(no_reguard); |
|
1301 } |
|
1302 |
|
1303 |
|
1304 // The method register is junk from after the thread_in_native transition |
|
1305 // until here. Also can't call_VM until the bcp has been |
|
1306 // restored. Need bcp for throwing exception below so get it now. |
|
1307 __ get_method(method); |
|
1308 |
|
1309 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=> |
|
1310 // r13 == code_base() |
|
1311 __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod* |
|
1312 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase |
|
1313 // handle exceptions (exception handling will handle unlocking!) |
|
1314 { |
|
1315 Label L; |
|
1316 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); |
|
1317 __ jcc(Assembler::zero, L); |
|
1318 // Note: At some point we may want to unify this with the code |
|
1319 // used in call_VM_base(); i.e., we should use the |
|
1320 // StubRoutines::forward_exception code. For now this doesn't work |
|
1321 // here because the rsp is not correctly set at this point. |
|
1322 __ MacroAssembler::call_VM(noreg, |
|
1323 CAST_FROM_FN_PTR(address, |
|
1324 InterpreterRuntime::throw_pending_exception)); |
|
1325 __ should_not_reach_here(); |
|
1326 __ bind(L); |
|
1327 } |
|
1328 |
|
1329 // do unlocking if necessary |
|
1330 { |
|
1331 Label L; |
|
1332 __ movl(t, Address(method, Method::access_flags_offset())); |
|
1333 __ testl(t, JVM_ACC_SYNCHRONIZED); |
|
1334 __ jcc(Assembler::zero, L); |
|
1335 // the code below should be shared with interpreter macro |
|
1336 // assembler implementation |
|
1337 { |
|
1338 Label unlock; |
|
1339 // BasicObjectLock will be first in list, since this is a |
|
1340 // synchronized method. However, need to check that the object |
|
1341 // has not been unlocked by an explicit monitorexit bytecode. |
|
1342 const Address monitor(rbp, |
|
1343 (intptr_t)(frame::interpreter_frame_initial_sp_offset * |
|
1344 wordSize - sizeof(BasicObjectLock))); |
|
1345 |
|
1346 // monitor expect in c_rarg1 for slow unlock path |
|
1347 __ lea(c_rarg1, monitor); // address of first monitor |
|
1348 |
|
1349 __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); |
|
1350 __ testptr(t, t); |
|
1351 __ jcc(Assembler::notZero, unlock); |
|
1352 |
|
1353 // Entry already unlocked, need to throw exception |
|
1354 __ MacroAssembler::call_VM(noreg, |
|
1355 CAST_FROM_FN_PTR(address, |
|
1356 InterpreterRuntime::throw_illegal_monitor_state_exception)); |
|
1357 __ should_not_reach_here(); |
|
1358 |
|
1359 __ bind(unlock); |
|
1360 __ unlock_object(c_rarg1); |
|
1361 } |
|
1362 __ bind(L); |
|
1363 } |
|
1364 |
|
1365 // jvmti support |
|
1366 // Note: This must happen _after_ handling/throwing any exceptions since |
|
1367 // the exception handler code notifies the runtime of method exits |
|
1368 // too. If this happens before, method entry/exit notifications are |
|
1369 // not properly paired (was bug - gri 11/22/99). |
|
1370 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); |
|
1371 |
|
1372 // restore potential result in edx:eax, call result handler to |
|
1373 // restore potential result in ST0 & handle result |
|
1374 |
|
1375 __ pop(ltos); |
|
1376 __ pop(dtos); |
|
1377 |
|
1378 __ movptr(t, Address(rbp, |
|
1379 (frame::interpreter_frame_result_handler_offset) * wordSize)); |
|
1380 __ call(t); |
|
1381 |
|
1382 // remove activation |
|
1383 __ movptr(t, Address(rbp, |
|
1384 frame::interpreter_frame_sender_sp_offset * |
|
1385 wordSize)); // get sender sp |
|
1386 __ leave(); // remove frame anchor |
|
1387 __ pop(rdi); // get return address |
|
1388 __ mov(rsp, t); // set sp to sender sp |
|
1389 __ jmp(rdi); |
|
1390 |
|
1391 if (inc_counter) { |
|
1392 // Handle overflow of counter and compile method |
|
1393 __ bind(invocation_counter_overflow); |
|
1394 generate_counter_overflow(&continue_after_compile); |
|
1395 } |
|
1396 |
|
1397 return entry_point; |
|
1398 } |
|
1399 |
|
1400 // |
|
1401 // Generic interpreted method entry to (asm) interpreter |
|
1402 // |
|
1403 address InterpreterGenerator::generate_normal_entry(bool synchronized) { |
|
1404 // determine code generation flags |
|
1405 bool inc_counter = UseCompiler || CountCompiledCalls; |
|
1406 |
|
1407 // ebx: Method* |
|
1408 // r13: sender sp |
|
1409 address entry_point = __ pc(); |
|
1410 |
|
1411 const Address constMethod(rbx, Method::const_offset()); |
|
1412 const Address access_flags(rbx, Method::access_flags_offset()); |
|
1413 const Address size_of_parameters(rdx, |
|
1414 ConstMethod::size_of_parameters_offset()); |
|
1415 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); |
|
1416 |
|
1417 |
|
1418 // get parameter size (always needed) |
|
1419 __ movptr(rdx, constMethod); |
|
1420 __ load_unsigned_short(rcx, size_of_parameters); |
|
1421 |
|
1422 // rbx: Method* |
|
1423 // rcx: size of parameters |
|
1424 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i ) |
|
1425 |
|
1426 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words |
|
1427 __ subl(rdx, rcx); // rdx = no. of additional locals |
|
1428 |
|
1429 // YYY |
|
1430 // __ incrementl(rdx); |
|
1431 // __ andl(rdx, -2); |
|
1432 |
|
1433 // see if we've got enough room on the stack for locals plus overhead. |
|
1434 generate_stack_overflow_check(); |
|
1435 |
|
1436 // get return address |
|
1437 __ pop(rax); |
|
1438 |
|
1439 // compute beginning of parameters (r14) |
|
1440 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); |
|
1441 |
|
1442 // rdx - # of additional locals |
|
1443 // allocate space for locals |
|
1444 // explicitly initialize locals |
|
1445 { |
|
1446 Label exit, loop; |
|
1447 __ testl(rdx, rdx); |
|
1448 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 |
|
1449 __ bind(loop); |
|
1450 __ push((int) NULL_WORD); // initialize local variables |
|
1451 __ decrementl(rdx); // until everything initialized |
|
1452 __ jcc(Assembler::greater, loop); |
|
1453 __ bind(exit); |
|
1454 } |
|
1455 |
|
1456 // initialize fixed part of activation frame |
|
1457 generate_fixed_frame(false); |
|
1458 |
|
1459 // make sure method is not native & not abstract |
|
1460 #ifdef ASSERT |
|
1461 __ movl(rax, access_flags); |
|
1462 { |
|
1463 Label L; |
|
1464 __ testl(rax, JVM_ACC_NATIVE); |
|
1465 __ jcc(Assembler::zero, L); |
|
1466 __ stop("tried to execute native method as non-native"); |
|
1467 __ bind(L); |
|
1468 } |
|
1469 { |
|
1470 Label L; |
|
1471 __ testl(rax, JVM_ACC_ABSTRACT); |
|
1472 __ jcc(Assembler::zero, L); |
|
1473 __ stop("tried to execute abstract method in interpreter"); |
|
1474 __ bind(L); |
|
1475 } |
|
1476 #endif |
|
1477 |
|
1478 // Since at this point in the method invocation the exception |
|
1479 // handler would try to exit the monitor of synchronized methods |
|
1480 // which hasn't been entered yet, we set the thread local variable |
|
1481 // _do_not_unlock_if_synchronized to true. The remove_activation |
|
1482 // will check this flag. |
|
1483 |
|
1484 const Address do_not_unlock_if_synchronized(r15_thread, |
|
1485 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
|
1486 __ movbool(do_not_unlock_if_synchronized, true); |
|
1487 |
|
1488 __ profile_parameters_type(rax, rcx, rdx); |
|
1489 // increment invocation count & check for overflow |
|
1490 Label invocation_counter_overflow; |
|
1491 Label profile_method; |
|
1492 Label profile_method_continue; |
|
1493 if (inc_counter) { |
|
1494 generate_counter_incr(&invocation_counter_overflow, |
|
1495 &profile_method, |
|
1496 &profile_method_continue); |
|
1497 if (ProfileInterpreter) { |
|
1498 __ bind(profile_method_continue); |
|
1499 } |
|
1500 } |
|
1501 |
|
1502 Label continue_after_compile; |
|
1503 __ bind(continue_after_compile); |
|
1504 |
|
1505 // check for synchronized interpreted methods |
|
1506 bang_stack_shadow_pages(false); |
|
1507 |
|
1508 // reset the _do_not_unlock_if_synchronized flag |
|
1509 __ movbool(do_not_unlock_if_synchronized, false); |
|
1510 |
|
1511 // check for synchronized methods |
|
1512 // Must happen AFTER invocation_counter check and stack overflow check, |
|
1513 // so method is not locked if overflows. |
|
1514 if (synchronized) { |
|
1515 // Allocate monitor and lock method |
|
1516 lock_method(); |
|
1517 } else { |
|
1518 // no synchronization necessary |
|
1519 #ifdef ASSERT |
|
1520 { |
|
1521 Label L; |
|
1522 __ movl(rax, access_flags); |
|
1523 __ testl(rax, JVM_ACC_SYNCHRONIZED); |
|
1524 __ jcc(Assembler::zero, L); |
|
1525 __ stop("method needs synchronization"); |
|
1526 __ bind(L); |
|
1527 } |
|
1528 #endif |
|
1529 } |
|
1530 |
|
1531 // start execution |
|
1532 #ifdef ASSERT |
|
1533 { |
|
1534 Label L; |
|
1535 const Address monitor_block_top (rbp, |
|
1536 frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
1537 __ movptr(rax, monitor_block_top); |
|
1538 __ cmpptr(rax, rsp); |
|
1539 __ jcc(Assembler::equal, L); |
|
1540 __ stop("broken stack frame setup in interpreter"); |
|
1541 __ bind(L); |
|
1542 } |
|
1543 #endif |
|
1544 |
|
1545 // jvmti support |
|
1546 __ notify_method_entry(); |
|
1547 |
|
1548 __ dispatch_next(vtos); |
|
1549 |
|
1550 // invocation counter overflow |
|
1551 if (inc_counter) { |
|
1552 if (ProfileInterpreter) { |
|
1553 // We have decided to profile this method in the interpreter |
|
1554 __ bind(profile_method); |
|
1555 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
|
1556 __ set_method_data_pointer_for_bcp(); |
|
1557 __ get_method(rbx); |
|
1558 __ jmp(profile_method_continue); |
|
1559 } |
|
1560 // Handle overflow of counter and compile method |
|
1561 __ bind(invocation_counter_overflow); |
|
1562 generate_counter_overflow(&continue_after_compile); |
|
1563 } |
|
1564 |
|
1565 return entry_point; |
|
1566 } |
|
1567 |
|
1568 // Entry points |
|
1569 // |
|
1570 // Here we generate the various kind of entries into the interpreter. |
|
1571 // The two main entry type are generic bytecode methods and native |
|
1572 // call method. These both come in synchronized and non-synchronized |
|
1573 // versions but the frame layout they create is very similar. The |
|
1574 // other method entry types are really just special purpose entries |
|
1575 // that are really entry and interpretation all in one. These are for |
|
1576 // trivial methods like accessor, empty, or special math methods. |
|
1577 // |
|
1578 // When control flow reaches any of the entry types for the interpreter |
|
1579 // the following holds -> |
|
1580 // |
|
1581 // Arguments: |
|
1582 // |
|
1583 // rbx: Method* |
|
1584 // |
|
1585 // Stack layout immediately at entry |
|
1586 // |
|
1587 // [ return address ] <--- rsp |
|
1588 // [ parameter n ] |
|
1589 // ... |
|
1590 // [ parameter 1 ] |
|
1591 // [ expression stack ] (caller's java expression stack) |
|
1592 |
|
1593 // Assuming that we don't go to one of the trivial specialized entries |
|
1594 // the stack will look like below when we are ready to execute the |
|
1595 // first bytecode (or call the native routine). The register usage |
|
1596 // will be as the template based interpreter expects (see |
|
1597 // interpreter_amd64.hpp). |
|
1598 // |
|
1599 // local variables follow incoming parameters immediately; i.e. |
|
1600 // the return address is moved to the end of the locals). |
|
1601 // |
|
1602 // [ monitor entry ] <--- rsp |
|
1603 // ... |
|
1604 // [ monitor entry ] |
|
1605 // [ expr. stack bottom ] |
|
1606 // [ saved r13 ] |
|
1607 // [ current r14 ] |
|
1608 // [ Method* ] |
|
1609 // [ saved ebp ] <--- rbp |
|
1610 // [ return address ] |
|
1611 // [ local variable m ] |
|
1612 // ... |
|
1613 // [ local variable 1 ] |
|
1614 // [ parameter n ] |
|
1615 // ... |
|
1616 // [ parameter 1 ] <--- r14 |
|
1617 |
|
1618 address AbstractInterpreterGenerator::generate_method_entry( |
|
1619 AbstractInterpreter::MethodKind kind) { |
|
1620 // determine code generation flags |
|
1621 bool synchronized = false; |
|
1622 address entry_point = NULL; |
|
1623 InterpreterGenerator* ig_this = (InterpreterGenerator*)this; |
|
1624 |
|
1625 switch (kind) { |
|
1626 case Interpreter::zerolocals : break; |
|
1627 case Interpreter::zerolocals_synchronized: synchronized = true; break; |
|
1628 case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; |
|
1629 case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; |
|
1630 case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; |
|
1631 case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; |
|
1632 case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; |
|
1633 |
|
1634 case Interpreter::java_lang_math_sin : // fall thru |
|
1635 case Interpreter::java_lang_math_cos : // fall thru |
|
1636 case Interpreter::java_lang_math_tan : // fall thru |
|
1637 case Interpreter::java_lang_math_abs : // fall thru |
|
1638 case Interpreter::java_lang_math_log : // fall thru |
|
1639 case Interpreter::java_lang_math_log10 : // fall thru |
|
1640 case Interpreter::java_lang_math_sqrt : // fall thru |
|
1641 case Interpreter::java_lang_math_pow : // fall thru |
|
1642 case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; |
|
1643 case Interpreter::java_lang_ref_reference_get |
|
1644 : entry_point = ig_this->generate_Reference_get_entry(); break; |
|
1645 case Interpreter::java_util_zip_CRC32_update |
|
1646 : entry_point = ig_this->generate_CRC32_update_entry(); break; |
|
1647 case Interpreter::java_util_zip_CRC32_updateBytes |
|
1648 : // fall thru |
|
1649 case Interpreter::java_util_zip_CRC32_updateByteBuffer |
|
1650 : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; |
|
1651 default: |
|
1652 fatal(err_msg("unexpected method kind: %d", kind)); |
|
1653 break; |
|
1654 } |
|
1655 |
|
1656 if (entry_point) { |
|
1657 return entry_point; |
|
1658 } |
|
1659 |
|
1660 return ig_this->generate_normal_entry(synchronized); |
|
1661 } |
|
1662 |
|
1663 // These should never be compiled since the interpreter will prefer |
|
1664 // the compiled version to the intrinsic version. |
|
1665 bool AbstractInterpreter::can_be_compiled(methodHandle m) { |
|
1666 switch (method_kind(m)) { |
|
1667 case Interpreter::java_lang_math_sin : // fall thru |
|
1668 case Interpreter::java_lang_math_cos : // fall thru |
|
1669 case Interpreter::java_lang_math_tan : // fall thru |
|
1670 case Interpreter::java_lang_math_abs : // fall thru |
|
1671 case Interpreter::java_lang_math_log : // fall thru |
|
1672 case Interpreter::java_lang_math_log10 : // fall thru |
|
1673 case Interpreter::java_lang_math_sqrt : // fall thru |
|
1674 case Interpreter::java_lang_math_pow : // fall thru |
|
1675 case Interpreter::java_lang_math_exp : |
|
1676 return false; |
|
1677 default: |
|
1678 return true; |
|
1679 } |
|
1680 } |
|
1681 |
|
1682 // How much stack a method activation needs in words. |
|
1683 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { |
|
1684 const int entry_size = frame::interpreter_frame_monitor_size(); |
|
1685 |
|
1686 // total overhead size: entry_size + (saved rbp thru expr stack |
|
1687 // bottom). be sure to change this if you add/subtract anything |
|
1688 // to/from the overhead area |
|
1689 const int overhead_size = |
|
1690 -(frame::interpreter_frame_initial_sp_offset) + entry_size; |
|
1691 |
|
1692 const int stub_code = frame::entry_frame_after_call_words; |
|
1693 const int method_stack = (method->max_locals() + method->max_stack()) * |
|
1694 Interpreter::stackElementWords; |
|
1695 return (overhead_size + method_stack + stub_code); |
|
1696 } |
|
1697 |
|
1698 //----------------------------------------------------------------------------- |
|
1699 // Exceptions |
|
1700 |
|
1701 void TemplateInterpreterGenerator::generate_throw_exception() { |
|
1702 // Entry point in previous activation (i.e., if the caller was |
|
1703 // interpreted) |
|
1704 Interpreter::_rethrow_exception_entry = __ pc(); |
|
1705 // Restore sp to interpreter_frame_last_sp even though we are going |
|
1706 // to empty the expression stack for the exception processing. |
|
1707 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
1708 // rax: exception |
|
1709 // rdx: return address/pc that threw exception |
|
1710 __ restore_bcp(); // r13 points to call/send |
|
1711 __ restore_locals(); |
|
1712 __ reinit_heapbase(); // restore r12 as heapbase. |
|
1713 // Entry point for exceptions thrown within interpreter code |
|
1714 Interpreter::_throw_exception_entry = __ pc(); |
|
1715 // expression stack is undefined here |
|
1716 // rax: exception |
|
1717 // r13: exception bcp |
|
1718 __ verify_oop(rax); |
|
1719 __ mov(c_rarg1, rax); |
|
1720 |
|
1721 // expression stack must be empty before entering the VM in case of |
|
1722 // an exception |
|
1723 __ empty_expression_stack(); |
|
1724 // find exception handler address and preserve exception oop |
|
1725 __ call_VM(rdx, |
|
1726 CAST_FROM_FN_PTR(address, |
|
1727 InterpreterRuntime::exception_handler_for_exception), |
|
1728 c_rarg1); |
|
1729 // rax: exception handler entry point |
|
1730 // rdx: preserved exception oop |
|
1731 // r13: bcp for exception handler |
|
1732 __ push_ptr(rdx); // push exception which is now the only value on the stack |
|
1733 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) |
|
1734 |
|
1735 // If the exception is not handled in the current frame the frame is |
|
1736 // removed and the exception is rethrown (i.e. exception |
|
1737 // continuation is _rethrow_exception). |
|
1738 // |
|
1739 // Note: At this point the bci is still the bxi for the instruction |
|
1740 // which caused the exception and the expression stack is |
|
1741 // empty. Thus, for any VM calls at this point, GC will find a legal |
|
1742 // oop map (with empty expression stack). |
|
1743 |
|
1744 // In current activation |
|
1745 // tos: exception |
|
1746 // esi: exception bcp |
|
1747 |
|
1748 // |
|
1749 // JVMTI PopFrame support |
|
1750 // |
|
1751 |
|
1752 Interpreter::_remove_activation_preserving_args_entry = __ pc(); |
|
1753 __ empty_expression_stack(); |
|
1754 // Set the popframe_processing bit in pending_popframe_condition |
|
1755 // indicating that we are currently handling popframe, so that |
|
1756 // call_VMs that may happen later do not trigger new popframe |
|
1757 // handling cycles. |
|
1758 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset())); |
|
1759 __ orl(rdx, JavaThread::popframe_processing_bit); |
|
1760 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx); |
|
1761 |
|
1762 { |
|
1763 // Check to see whether we are returning to a deoptimized frame. |
|
1764 // (The PopFrame call ensures that the caller of the popped frame is |
|
1765 // either interpreted or compiled and deoptimizes it if compiled.) |
|
1766 // In this case, we can't call dispatch_next() after the frame is |
|
1767 // popped, but instead must save the incoming arguments and restore |
|
1768 // them after deoptimization has occurred. |
|
1769 // |
|
1770 // Note that we don't compare the return PC against the |
|
1771 // deoptimization blob's unpack entry because of the presence of |
|
1772 // adapter frames in C2. |
|
1773 Label caller_not_deoptimized; |
|
1774 __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); |
|
1775 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1776 InterpreterRuntime::interpreter_contains), c_rarg1); |
|
1777 __ testl(rax, rax); |
|
1778 __ jcc(Assembler::notZero, caller_not_deoptimized); |
|
1779 |
|
1780 // Compute size of arguments for saving when returning to |
|
1781 // deoptimized caller |
|
1782 __ get_method(rax); |
|
1783 __ movptr(rax, Address(rax, Method::const_offset())); |
|
1784 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: |
|
1785 size_of_parameters_offset()))); |
|
1786 __ shll(rax, Interpreter::logStackElementSize); |
|
1787 __ restore_locals(); // XXX do we need this? |
|
1788 __ subptr(r14, rax); |
|
1789 __ addptr(r14, wordSize); |
|
1790 // Save these arguments |
|
1791 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1792 Deoptimization:: |
|
1793 popframe_preserve_args), |
|
1794 r15_thread, rax, r14); |
|
1795 |
|
1796 __ remove_activation(vtos, rdx, |
|
1797 /* throw_monitor_exception */ false, |
|
1798 /* install_monitor_exception */ false, |
|
1799 /* notify_jvmdi */ false); |
|
1800 |
|
1801 // Inform deoptimization that it is responsible for restoring |
|
1802 // these arguments |
|
1803 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), |
|
1804 JavaThread::popframe_force_deopt_reexecution_bit); |
|
1805 |
|
1806 // Continue in deoptimization handler |
|
1807 __ jmp(rdx); |
|
1808 |
|
1809 __ bind(caller_not_deoptimized); |
|
1810 } |
|
1811 |
|
1812 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ |
|
1813 /* throw_monitor_exception */ false, |
|
1814 /* install_monitor_exception */ false, |
|
1815 /* notify_jvmdi */ false); |
|
1816 |
|
1817 // Finish with popframe handling |
|
1818 // A previous I2C followed by a deoptimization might have moved the |
|
1819 // outgoing arguments further up the stack. PopFrame expects the |
|
1820 // mutations to those outgoing arguments to be preserved and other |
|
1821 // constraints basically require this frame to look exactly as |
|
1822 // though it had previously invoked an interpreted activation with |
|
1823 // no space between the top of the expression stack (current |
|
1824 // last_sp) and the top of stack. Rather than force deopt to |
|
1825 // maintain this kind of invariant all the time we call a small |
|
1826 // fixup routine to move the mutated arguments onto the top of our |
|
1827 // expression stack if necessary. |
|
1828 __ mov(c_rarg1, rsp); |
|
1829 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1830 // PC must point into interpreter here |
|
1831 __ set_last_Java_frame(noreg, rbp, __ pc()); |
|
1832 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); |
|
1833 __ reset_last_Java_frame(true, true); |
|
1834 // Restore the last_sp and null it out |
|
1835 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1836 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
1837 |
|
1838 __ restore_bcp(); // XXX do we need this? |
|
1839 __ restore_locals(); // XXX do we need this? |
|
1840 // The method data pointer was incremented already during |
|
1841 // call profiling. We have to restore the mdp for the current bcp. |
|
1842 if (ProfileInterpreter) { |
|
1843 __ set_method_data_pointer_for_bcp(); |
|
1844 } |
|
1845 |
|
1846 // Clear the popframe condition flag |
|
1847 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), |
|
1848 JavaThread::popframe_inactive); |
|
1849 |
|
1850 #if INCLUDE_JVMTI |
|
1851 if (EnableInvokeDynamic) { |
|
1852 Label L_done; |
|
1853 const Register local0 = r14; |
|
1854 |
|
1855 __ cmpb(Address(r13, 0), Bytecodes::_invokestatic); |
|
1856 __ jcc(Assembler::notEqual, L_done); |
|
1857 |
|
1858 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. |
|
1859 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. |
|
1860 |
|
1861 __ get_method(rdx); |
|
1862 __ movptr(rax, Address(local0, 0)); |
|
1863 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13); |
|
1864 |
|
1865 __ testptr(rax, rax); |
|
1866 __ jcc(Assembler::zero, L_done); |
|
1867 |
|
1868 __ movptr(Address(rbx, 0), rax); |
|
1869 __ bind(L_done); |
|
1870 } |
|
1871 #endif // INCLUDE_JVMTI |
|
1872 |
|
1873 __ dispatch_next(vtos); |
|
1874 // end of PopFrame support |
|
1875 |
|
1876 Interpreter::_remove_activation_entry = __ pc(); |
|
1877 |
|
1878 // preserve exception over this code sequence |
|
1879 __ pop_ptr(rax); |
|
1880 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax); |
|
1881 // remove the activation (without doing throws on illegalMonitorExceptions) |
|
1882 __ remove_activation(vtos, rdx, false, true, false); |
|
1883 // restore exception |
|
1884 __ get_vm_result(rax, r15_thread); |
|
1885 |
|
1886 // In between activations - previous activation type unknown yet |
|
1887 // compute continuation point - the continuation point expects the |
|
1888 // following registers set up: |
|
1889 // |
|
1890 // rax: exception |
|
1891 // rdx: return address/pc that threw exception |
|
1892 // rsp: expression stack of caller |
|
1893 // rbp: ebp of caller |
|
1894 __ push(rax); // save exception |
|
1895 __ push(rdx); // save return address |
|
1896 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1897 SharedRuntime::exception_handler_for_return_address), |
|
1898 r15_thread, rdx); |
|
1899 __ mov(rbx, rax); // save exception handler |
|
1900 __ pop(rdx); // restore return address |
|
1901 __ pop(rax); // restore exception |
|
1902 // Note that an "issuing PC" is actually the next PC after the call |
|
1903 __ jmp(rbx); // jump to exception |
|
1904 // handler of caller |
|
1905 } |
|
1906 |
|
1907 |
|
1908 // |
|
1909 // JVMTI ForceEarlyReturn support |
|
1910 // |
|
1911 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { |
|
1912 address entry = __ pc(); |
|
1913 |
|
1914 __ restore_bcp(); |
|
1915 __ restore_locals(); |
|
1916 __ empty_expression_stack(); |
|
1917 __ load_earlyret_value(state); |
|
1918 |
|
1919 __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); |
|
1920 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset()); |
|
1921 |
|
1922 // Clear the earlyret state |
|
1923 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); |
|
1924 |
|
1925 __ remove_activation(state, rsi, |
|
1926 false, /* throw_monitor_exception */ |
|
1927 false, /* install_monitor_exception */ |
|
1928 true); /* notify_jvmdi */ |
|
1929 __ jmp(rsi); |
|
1930 |
|
1931 return entry; |
|
1932 } // end of ForceEarlyReturn support |
|
1933 |
|
1934 |
|
1935 //----------------------------------------------------------------------------- |
|
1936 // Helper for vtos entry point generation |
|
1937 |
|
1938 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, |
|
1939 address& bep, |
|
1940 address& cep, |
|
1941 address& sep, |
|
1942 address& aep, |
|
1943 address& iep, |
|
1944 address& lep, |
|
1945 address& fep, |
|
1946 address& dep, |
|
1947 address& vep) { |
|
1948 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); |
|
1949 Label L; |
|
1950 aep = __ pc(); __ push_ptr(); __ jmp(L); |
|
1951 fep = __ pc(); __ push_f(); __ jmp(L); |
|
1952 dep = __ pc(); __ push_d(); __ jmp(L); |
|
1953 lep = __ pc(); __ push_l(); __ jmp(L); |
|
1954 bep = cep = sep = |
|
1955 iep = __ pc(); __ push_i(); |
|
1956 vep = __ pc(); |
|
1957 __ bind(L); |
|
1958 generate_and_dispatch(t); |
|
1959 } |
|
1960 |
|
1961 |
|
1962 //----------------------------------------------------------------------------- |
|
1963 // Generation of individual instructions |
|
1964 |
|
1965 // helpers for generate_and_dispatch |
|
1966 |
|
1967 |
|
1968 InterpreterGenerator::InterpreterGenerator(StubQueue* code) |
|
1969 : TemplateInterpreterGenerator(code) { |
|
1970 generate_all(); // down here so it can be "virtual" |
|
1971 } |
|
1972 |
|
1973 //----------------------------------------------------------------------------- |
|
1974 |
|
1975 // Non-product code |
|
1976 #ifndef PRODUCT |
|
1977 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { |
|
1978 address entry = __ pc(); |
|
1979 |
|
1980 __ push(state); |
|
1981 __ push(c_rarg0); |
|
1982 __ push(c_rarg1); |
|
1983 __ push(c_rarg2); |
|
1984 __ push(c_rarg3); |
|
1985 __ mov(c_rarg2, rax); // Pass itos |
|
1986 #ifdef _WIN64 |
|
1987 __ movflt(xmm3, xmm0); // Pass ftos |
|
1988 #endif |
|
1989 __ call_VM(noreg, |
|
1990 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), |
|
1991 c_rarg1, c_rarg2, c_rarg3); |
|
1992 __ pop(c_rarg3); |
|
1993 __ pop(c_rarg2); |
|
1994 __ pop(c_rarg1); |
|
1995 __ pop(c_rarg0); |
|
1996 __ pop(state); |
|
1997 __ ret(0); // return from result handler |
|
1998 |
|
1999 return entry; |
|
2000 } |
|
2001 |
|
2002 void TemplateInterpreterGenerator::count_bytecode() { |
|
2003 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); |
|
2004 } |
|
2005 |
|
2006 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { |
|
2007 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); |
|
2008 } |
|
2009 |
|
2010 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { |
|
2011 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); |
|
2012 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); |
|
2013 __ orl(rbx, |
|
2014 ((int) t->bytecode()) << |
|
2015 BytecodePairHistogram::log2_number_of_codes); |
|
2016 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); |
|
2017 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); |
|
2018 __ incrementl(Address(rscratch1, rbx, Address::times_4)); |
|
2019 } |
|
2020 |
|
2021 |
|
2022 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { |
|
2023 // Call a little run-time stub to avoid blow-up for each bytecode. |
|
2024 // The run-time runtime saves the right registers, depending on |
|
2025 // the tosca in-state for the given template. |
|
2026 |
|
2027 assert(Interpreter::trace_code(t->tos_in()) != NULL, |
|
2028 "entry must have been generated"); |
|
2029 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
|
2030 __ andptr(rsp, -16); // align stack as required by ABI |
|
2031 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); |
|
2032 __ mov(rsp, r12); // restore sp |
|
2033 __ reinit_heapbase(); |
|
2034 } |
|
2035 |
|
2036 |
|
2037 void TemplateInterpreterGenerator::stop_interpreter_at() { |
|
2038 Label L; |
|
2039 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), |
|
2040 StopInterpreterAt); |
|
2041 __ jcc(Assembler::notEqual, L); |
|
2042 __ int3(); |
|
2043 __ bind(L); |
|
2044 } |
|
2045 #endif // !PRODUCT |
|
2046 #endif // ! CC_INTERP |