|
1 /* |
|
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 * have any questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "incls/_precompiled.incl" |
|
26 #include "incls/_interpreter_x86_64.cpp.incl" |
|
27 |
|
28 #define __ _masm-> |
|
29 |
|
30 |
|
31 #ifdef _WIN64 |
|
32 address AbstractInterpreterGenerator::generate_slow_signature_handler() { |
|
33 address entry = __ pc(); |
|
34 |
|
35 // rbx: method |
|
36 // r14: pointer to locals |
|
37 // c_rarg3: first stack arg - wordSize |
|
38 __ movq(c_rarg3, rsp); |
|
39 // adjust rsp |
|
40 __ subq(rsp, 4 * wordSize); |
|
41 __ call_VM(noreg, |
|
42 CAST_FROM_FN_PTR(address, |
|
43 InterpreterRuntime::slow_signature_handler), |
|
44 rbx, r14, c_rarg3); |
|
45 |
|
46 // rax: result handler |
|
47 |
|
48 // Stack layout: |
|
49 // rsp: 3 integer or float args (if static first is unused) |
|
50 // 1 float/double identifiers |
|
51 // return address |
|
52 // stack args |
|
53 // garbage |
|
54 // expression stack bottom |
|
55 // bcp (NULL) |
|
56 // ... |
|
57 |
|
58 // Do FP first so we can use c_rarg3 as temp |
|
59 __ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers |
|
60 |
|
61 for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) { |
|
62 XMMRegister floatreg = as_XMMRegister(i+1); |
|
63 Label isfloatordouble, isdouble, next; |
|
64 |
|
65 __ testl(c_rarg3, 1 << (i*2)); // Float or Double? |
|
66 __ jcc(Assembler::notZero, isfloatordouble); |
|
67 |
|
68 // Do Int register here |
|
69 switch ( i ) { |
|
70 case 0: |
|
71 __ movl(rscratch1, Address(rbx, methodOopDesc::access_flags_offset())); |
|
72 __ testl(rscratch1, JVM_ACC_STATIC); |
|
73 __ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0)); |
|
74 break; |
|
75 case 1: |
|
76 __ movq(c_rarg2, Address(rsp, wordSize)); |
|
77 break; |
|
78 case 2: |
|
79 __ movq(c_rarg3, Address(rsp, 2 * wordSize)); |
|
80 break; |
|
81 default: |
|
82 break; |
|
83 } |
|
84 |
|
85 __ jmp (next); |
|
86 |
|
87 __ bind(isfloatordouble); |
|
88 __ testl(c_rarg3, 1 << ((i*2)+1)); // Double? |
|
89 __ jcc(Assembler::notZero, isdouble); |
|
90 |
|
91 // Do Float Here |
|
92 __ movflt(floatreg, Address(rsp, i * wordSize)); |
|
93 __ jmp(next); |
|
94 |
|
95 // Do Double here |
|
96 __ bind(isdouble); |
|
97 __ movdbl(floatreg, Address(rsp, i * wordSize)); |
|
98 |
|
99 __ bind(next); |
|
100 } |
|
101 |
|
102 |
|
103 // restore rsp |
|
104 __ addq(rsp, 4 * wordSize); |
|
105 |
|
106 __ ret(0); |
|
107 |
|
108 return entry; |
|
109 } |
|
110 #else |
|
111 address AbstractInterpreterGenerator::generate_slow_signature_handler() { |
|
112 address entry = __ pc(); |
|
113 |
|
114 // rbx: method |
|
115 // r14: pointer to locals |
|
116 // c_rarg3: first stack arg - wordSize |
|
117 __ movq(c_rarg3, rsp); |
|
118 // adjust rsp |
|
119 __ subq(rsp, 14 * wordSize); |
|
120 __ call_VM(noreg, |
|
121 CAST_FROM_FN_PTR(address, |
|
122 InterpreterRuntime::slow_signature_handler), |
|
123 rbx, r14, c_rarg3); |
|
124 |
|
125 // rax: result handler |
|
126 |
|
127 // Stack layout: |
|
128 // rsp: 5 integer args (if static first is unused) |
|
129 // 1 float/double identifiers |
|
130 // 8 double args |
|
131 // return address |
|
132 // stack args |
|
133 // garbage |
|
134 // expression stack bottom |
|
135 // bcp (NULL) |
|
136 // ... |
|
137 |
|
138 // Do FP first so we can use c_rarg3 as temp |
|
139 __ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers |
|
140 |
|
141 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { |
|
142 const XMMRegister r = as_XMMRegister(i); |
|
143 |
|
144 Label d, done; |
|
145 |
|
146 __ testl(c_rarg3, 1 << i); |
|
147 __ jcc(Assembler::notZero, d); |
|
148 __ movflt(r, Address(rsp, (6 + i) * wordSize)); |
|
149 __ jmp(done); |
|
150 __ bind(d); |
|
151 __ movdbl(r, Address(rsp, (6 + i) * wordSize)); |
|
152 __ bind(done); |
|
153 } |
|
154 |
|
155 // Now handle integrals. Only do c_rarg1 if not static. |
|
156 __ movl(c_rarg3, Address(rbx, methodOopDesc::access_flags_offset())); |
|
157 __ testl(c_rarg3, JVM_ACC_STATIC); |
|
158 __ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0)); |
|
159 |
|
160 __ movq(c_rarg2, Address(rsp, wordSize)); |
|
161 __ movq(c_rarg3, Address(rsp, 2 * wordSize)); |
|
162 __ movq(c_rarg4, Address(rsp, 3 * wordSize)); |
|
163 __ movq(c_rarg5, Address(rsp, 4 * wordSize)); |
|
164 |
|
165 // restore rsp |
|
166 __ addq(rsp, 14 * wordSize); |
|
167 |
|
168 __ ret(0); |
|
169 |
|
170 return entry; |
|
171 } |
|
172 #endif |
|
173 |
|
174 |
|
175 // |
|
176 // Various method entries |
|
177 // |
|
178 |
|
179 address InterpreterGenerator::generate_math_entry( |
|
180 AbstractInterpreter::MethodKind kind) { |
|
181 // rbx: methodOop |
|
182 |
|
183 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry |
|
184 |
|
185 assert(kind == Interpreter::java_lang_math_sqrt, |
|
186 "Other intrinsics are not special"); |
|
187 |
|
188 address entry_point = __ pc(); |
|
189 |
|
190 // These don't need a safepoint check because they aren't virtually |
|
191 // callable. We won't enter these intrinsics from compiled code. |
|
192 // If in the future we added an intrinsic which was virtually callable |
|
193 // we'd have to worry about how to safepoint so that this code is used. |
|
194 |
|
195 // mathematical functions inlined by compiler |
|
196 // (interpreter must provide identical implementation |
|
197 // in order to avoid monotonicity bugs when switching |
|
198 // from interpreter to compiler in the middle of some |
|
199 // computation) |
|
200 |
|
201 // Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are |
|
202 // native methods. Interpreter::method_kind(...) does a check for |
|
203 // native methods first before checking for intrinsic methods and |
|
204 // thus will never select this entry point. Make sure it is not |
|
205 // called accidentally since the SharedRuntime entry points will |
|
206 // not work for JDK 1.2. |
|
207 // |
|
208 // We no longer need to check for JDK 1.2 since it's EOL'ed. |
|
209 // The following check existed in pre 1.6 implementation, |
|
210 // if (Universe::is_jdk12x_version()) { |
|
211 // __ should_not_reach_here(); |
|
212 // } |
|
213 // Universe::is_jdk12x_version() always returns false since |
|
214 // the JDK version is not yet determined when this method is called. |
|
215 // This method is called during interpreter_init() whereas |
|
216 // JDK version is only determined when universe2_init() is called. |
|
217 |
|
218 // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are |
|
219 // java methods. Interpreter::method_kind(...) will select |
|
220 // this entry point for the corresponding methods in JDK 1.3. |
|
221 __ sqrtsd(xmm0, Address(rsp, wordSize)); |
|
222 |
|
223 __ popq(rax); |
|
224 __ movq(rsp, r13); |
|
225 __ jmp(rax); |
|
226 |
|
227 return entry_point; |
|
228 } |
|
229 |
|
230 |
|
231 // Abstract method entry |
|
232 // Attempt to execute abstract method. Throw exception |
|
233 address InterpreterGenerator::generate_abstract_entry(void) { |
|
234 // rbx: methodOop |
|
235 // r13: sender SP |
|
236 |
|
237 address entry_point = __ pc(); |
|
238 |
|
239 // abstract method entry |
|
240 // remove return address. Not really needed, since exception |
|
241 // handling throws away expression stack |
|
242 __ popq(rbx); |
|
243 |
|
244 // adjust stack to what a normal return would do |
|
245 __ movq(rsp, r13); |
|
246 |
|
247 // throw exception |
|
248 __ call_VM(noreg, CAST_FROM_FN_PTR(address, |
|
249 InterpreterRuntime::throw_AbstractMethodError)); |
|
250 // the call_VM checks for exception, so we should never return here. |
|
251 __ should_not_reach_here(); |
|
252 |
|
253 return entry_point; |
|
254 } |
|
255 |
|
256 |
|
257 // Empty method, generate a very fast return. |
|
258 |
|
259 address InterpreterGenerator::generate_empty_entry(void) { |
|
260 // rbx: methodOop |
|
261 // r13: sender sp must set sp to this value on return |
|
262 |
|
263 if (!UseFastEmptyMethods) { |
|
264 return NULL; |
|
265 } |
|
266 |
|
267 address entry_point = __ pc(); |
|
268 |
|
269 // If we need a safepoint check, generate full interpreter entry. |
|
270 Label slow_path; |
|
271 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), |
|
272 SafepointSynchronize::_not_synchronized); |
|
273 __ jcc(Assembler::notEqual, slow_path); |
|
274 |
|
275 // do nothing for empty methods (do not even increment invocation counter) |
|
276 // Code: _return |
|
277 // _return |
|
278 // return w/o popping parameters |
|
279 __ popq(rax); |
|
280 __ movq(rsp, r13); |
|
281 __ jmp(rax); |
|
282 |
|
283 __ bind(slow_path); |
|
284 (void) generate_normal_entry(false); |
|
285 return entry_point; |
|
286 |
|
287 } |
|
288 |
|
289 // Call an accessor method (assuming it is resolved, otherwise drop |
|
290 // into vanilla (slow path) entry |
|
291 address InterpreterGenerator::generate_accessor_entry(void) { |
|
292 // rbx: methodOop |
|
293 |
|
294 // r13: senderSP must preserver for slow path, set SP to it on fast path |
|
295 |
|
296 address entry_point = __ pc(); |
|
297 Label xreturn_path; |
|
298 |
|
299 // do fastpath for resolved accessor methods |
|
300 if (UseFastAccessorMethods) { |
|
301 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites |
|
302 // thereof; parameter size = 1 |
|
303 // Note: We can only use this code if the getfield has been resolved |
|
304 // and if we don't have a null-pointer exception => check for |
|
305 // these conditions first and use slow path if necessary. |
|
306 Label slow_path; |
|
307 // If we need a safepoint check, generate full interpreter entry. |
|
308 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), |
|
309 SafepointSynchronize::_not_synchronized); |
|
310 |
|
311 __ jcc(Assembler::notEqual, slow_path); |
|
312 // rbx: method |
|
313 __ movq(rax, Address(rsp, wordSize)); |
|
314 |
|
315 // check if local 0 != NULL and read field |
|
316 __ testq(rax, rax); |
|
317 __ jcc(Assembler::zero, slow_path); |
|
318 |
|
319 __ movq(rdi, Address(rbx, methodOopDesc::constants_offset())); |
|
320 // read first instruction word and extract bytecode @ 1 and index @ 2 |
|
321 __ movq(rdx, Address(rbx, methodOopDesc::const_offset())); |
|
322 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); |
|
323 // Shift codes right to get the index on the right. |
|
324 // The bytecode fetched looks like <index><0xb4><0x2a> |
|
325 __ shrl(rdx, 2 * BitsPerByte); |
|
326 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); |
|
327 __ movq(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); |
|
328 |
|
329 // rax: local 0 |
|
330 // rbx: method |
|
331 // rdx: constant pool cache index |
|
332 // rdi: constant pool cache |
|
333 |
|
334 // check if getfield has been resolved and read constant pool cache entry |
|
335 // check the validity of the cache entry by testing whether _indices field |
|
336 // contains Bytecode::_getfield in b1 byte. |
|
337 assert(in_words(ConstantPoolCacheEntry::size()) == 4, |
|
338 "adjust shift below"); |
|
339 __ movl(rcx, |
|
340 Address(rdi, |
|
341 rdx, |
|
342 Address::times_8, |
|
343 constantPoolCacheOopDesc::base_offset() + |
|
344 ConstantPoolCacheEntry::indices_offset())); |
|
345 __ shrl(rcx, 2 * BitsPerByte); |
|
346 __ andl(rcx, 0xFF); |
|
347 __ cmpl(rcx, Bytecodes::_getfield); |
|
348 __ jcc(Assembler::notEqual, slow_path); |
|
349 |
|
350 // Note: constant pool entry is not valid before bytecode is resolved |
|
351 __ movq(rcx, |
|
352 Address(rdi, |
|
353 rdx, |
|
354 Address::times_8, |
|
355 constantPoolCacheOopDesc::base_offset() + |
|
356 ConstantPoolCacheEntry::f2_offset())); |
|
357 // edx: flags |
|
358 __ movl(rdx, |
|
359 Address(rdi, |
|
360 rdx, |
|
361 Address::times_8, |
|
362 constantPoolCacheOopDesc::base_offset() + |
|
363 ConstantPoolCacheEntry::flags_offset())); |
|
364 |
|
365 Label notObj, notInt, notByte, notShort; |
|
366 const Address field_address(rax, rcx, Address::times_1); |
|
367 |
|
368 // Need to differentiate between igetfield, agetfield, bgetfield etc. |
|
369 // because they are different sizes. |
|
370 // Use the type from the constant pool cache |
|
371 __ shrl(rdx, ConstantPoolCacheEntry::tosBits); |
|
372 // Make sure we don't need to mask edx for tosBits after the above shift |
|
373 ConstantPoolCacheEntry::verify_tosBits(); |
|
374 |
|
375 __ cmpl(rdx, atos); |
|
376 __ jcc(Assembler::notEqual, notObj); |
|
377 // atos |
|
378 __ movq(rax, field_address); |
|
379 __ jmp(xreturn_path); |
|
380 |
|
381 __ bind(notObj); |
|
382 __ cmpl(rdx, itos); |
|
383 __ jcc(Assembler::notEqual, notInt); |
|
384 // itos |
|
385 __ movl(rax, field_address); |
|
386 __ jmp(xreturn_path); |
|
387 |
|
388 __ bind(notInt); |
|
389 __ cmpl(rdx, btos); |
|
390 __ jcc(Assembler::notEqual, notByte); |
|
391 // btos |
|
392 __ load_signed_byte(rax, field_address); |
|
393 __ jmp(xreturn_path); |
|
394 |
|
395 __ bind(notByte); |
|
396 __ cmpl(rdx, stos); |
|
397 __ jcc(Assembler::notEqual, notShort); |
|
398 // stos |
|
399 __ load_signed_word(rax, field_address); |
|
400 __ jmp(xreturn_path); |
|
401 |
|
402 __ bind(notShort); |
|
403 #ifdef ASSERT |
|
404 Label okay; |
|
405 __ cmpl(rdx, ctos); |
|
406 __ jcc(Assembler::equal, okay); |
|
407 __ stop("what type is this?"); |
|
408 __ bind(okay); |
|
409 #endif |
|
410 // ctos |
|
411 __ load_unsigned_word(rax, field_address); |
|
412 |
|
413 __ bind(xreturn_path); |
|
414 |
|
415 // _ireturn/_areturn |
|
416 __ popq(rdi); |
|
417 __ movq(rsp, r13); |
|
418 __ jmp(rdi); |
|
419 __ ret(0); |
|
420 |
|
421 // generate a vanilla interpreter entry as the slow path |
|
422 __ bind(slow_path); |
|
423 (void) generate_normal_entry(false); |
|
424 } else { |
|
425 (void) generate_normal_entry(false); |
|
426 } |
|
427 |
|
428 return entry_point; |
|
429 } |
|
430 |
|
431 // This method tells the deoptimizer how big an interpreted frame must be: |
|
432 int AbstractInterpreter::size_activation(methodOop method, |
|
433 int tempcount, |
|
434 int popframe_extra_args, |
|
435 int moncount, |
|
436 int callee_param_count, |
|
437 int callee_locals, |
|
438 bool is_top_frame) { |
|
439 return layout_activation(method, |
|
440 tempcount, popframe_extra_args, moncount, |
|
441 callee_param_count, callee_locals, |
|
442 (frame*) NULL, (frame*) NULL, is_top_frame); |
|
443 } |
|
444 |
|
445 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { |
|
446 |
|
447 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in |
|
448 // the days we had adapter frames. When we deoptimize a situation where a |
|
449 // compiled caller calls a compiled caller will have registers it expects |
|
450 // to survive the call to the callee. If we deoptimize the callee the only |
|
451 // way we can restore these registers is to have the oldest interpreter |
|
452 // frame that we create restore these values. That is what this routine |
|
453 // will accomplish. |
|
454 |
|
455 // At the moment we have modified c2 to not have any callee save registers |
|
456 // so this problem does not exist and this routine is just a place holder. |
|
457 |
|
458 assert(f->is_interpreted_frame(), "must be interpreted"); |
|
459 } |