Mon, 20 Aug 2012 09:58:58 -0700
7190310: Inlining WeakReference.get(), and hoisting $referent may lead to non-terminating loops
Summary: In C2 add software membar after load from Reference.referent field to prevent commoning of loads across safepoint since GC can change its value. In C1 always generate Reference.get() intrinsic.
Reviewed-by: roland, twisti, dholmes, johnc
1 /*
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodDataOop.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
38 #ifndef CC_INTERP
40 #define __ _masm->
42 // Platform-dependent initialization
44 void TemplateTable::pd_initialize() {
45 // No amd64 specific initialization
46 }
48 // Address computation: local variables
50 static inline Address iaddress(int n) {
51 return Address(r14, Interpreter::local_offset_in_bytes(n));
52 }
54 static inline Address laddress(int n) {
55 return iaddress(n + 1);
56 }
58 static inline Address faddress(int n) {
59 return iaddress(n);
60 }
62 static inline Address daddress(int n) {
63 return laddress(n);
64 }
66 static inline Address aaddress(int n) {
67 return iaddress(n);
68 }
70 static inline Address iaddress(Register r) {
71 return Address(r14, r, Address::times_8);
72 }
74 static inline Address laddress(Register r) {
75 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
76 }
78 static inline Address faddress(Register r) {
79 return iaddress(r);
80 }
82 static inline Address daddress(Register r) {
83 return laddress(r);
84 }
86 static inline Address aaddress(Register r) {
87 return iaddress(r);
88 }
90 static inline Address at_rsp() {
91 return Address(rsp, 0);
92 }
94 // At top of Java expression stack which may be different than esp(). It
95 // isn't for category 1 objects.
96 static inline Address at_tos () {
97 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
98 }
100 static inline Address at_tos_p1() {
101 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
102 }
104 static inline Address at_tos_p2() {
105 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
106 }
108 static inline Address at_tos_p3() {
109 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
110 }
112 // Condition conversion
113 static Assembler::Condition j_not(TemplateTable::Condition cc) {
114 switch (cc) {
115 case TemplateTable::equal : return Assembler::notEqual;
116 case TemplateTable::not_equal : return Assembler::equal;
117 case TemplateTable::less : return Assembler::greaterEqual;
118 case TemplateTable::less_equal : return Assembler::greater;
119 case TemplateTable::greater : return Assembler::lessEqual;
120 case TemplateTable::greater_equal: return Assembler::less;
121 }
122 ShouldNotReachHere();
123 return Assembler::zero;
124 }
127 // Miscelaneous helper routines
128 // Store an oop (or NULL) at the address described by obj.
129 // If val == noreg this means store a NULL
131 static void do_oop_store(InterpreterMacroAssembler* _masm,
132 Address obj,
133 Register val,
134 BarrierSet::Name barrier,
135 bool precise) {
136 assert(val == noreg || val == rax, "parameter is just for looks");
137 switch (barrier) {
138 #ifndef SERIALGC
139 case BarrierSet::G1SATBCT:
140 case BarrierSet::G1SATBCTLogging:
141 {
142 // flatten object address if needed
143 if (obj.index() == noreg && obj.disp() == 0) {
144 if (obj.base() != rdx) {
145 __ movq(rdx, obj.base());
146 }
147 } else {
148 __ leaq(rdx, obj);
149 }
150 __ g1_write_barrier_pre(rdx /* obj */,
151 rbx /* pre_val */,
152 r15_thread /* thread */,
153 r8 /* tmp */,
154 val != noreg /* tosca_live */,
155 false /* expand_call */);
156 if (val == noreg) {
157 __ store_heap_oop_null(Address(rdx, 0));
158 } else {
159 __ store_heap_oop(Address(rdx, 0), val);
160 __ g1_write_barrier_post(rdx /* store_adr */,
161 val /* new_val */,
162 r15_thread /* thread */,
163 r8 /* tmp */,
164 rbx /* tmp2 */);
165 }
167 }
168 break;
169 #endif // SERIALGC
170 case BarrierSet::CardTableModRef:
171 case BarrierSet::CardTableExtension:
172 {
173 if (val == noreg) {
174 __ store_heap_oop_null(obj);
175 } else {
176 __ store_heap_oop(obj, val);
177 // flatten object address if needed
178 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
179 __ store_check(obj.base());
180 } else {
181 __ leaq(rdx, obj);
182 __ store_check(rdx);
183 }
184 }
185 }
186 break;
187 case BarrierSet::ModRef:
188 case BarrierSet::Other:
189 if (val == noreg) {
190 __ store_heap_oop_null(obj);
191 } else {
192 __ store_heap_oop(obj, val);
193 }
194 break;
195 default :
196 ShouldNotReachHere();
198 }
199 }
201 Address TemplateTable::at_bcp(int offset) {
202 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
203 return Address(r13, offset);
204 }
206 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
207 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
208 int byte_no) {
209 if (!RewriteBytecodes) return;
210 Label L_patch_done;
212 switch (bc) {
213 case Bytecodes::_fast_aputfield:
214 case Bytecodes::_fast_bputfield:
215 case Bytecodes::_fast_cputfield:
216 case Bytecodes::_fast_dputfield:
217 case Bytecodes::_fast_fputfield:
218 case Bytecodes::_fast_iputfield:
219 case Bytecodes::_fast_lputfield:
220 case Bytecodes::_fast_sputfield:
221 {
222 // We skip bytecode quickening for putfield instructions when
223 // the put_code written to the constant pool cache is zero.
224 // This is required so that every execution of this instruction
225 // calls out to InterpreterRuntime::resolve_get_put to do
226 // additional, required work.
227 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
228 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
229 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
230 __ movl(bc_reg, bc);
231 __ cmpl(temp_reg, (int) 0);
232 __ jcc(Assembler::zero, L_patch_done); // don't patch
233 }
234 break;
235 default:
236 assert(byte_no == -1, "sanity");
237 // the pair bytecodes have already done the load.
238 if (load_bc_into_bc_reg) {
239 __ movl(bc_reg, bc);
240 }
241 }
243 if (JvmtiExport::can_post_breakpoint()) {
244 Label L_fast_patch;
245 // if a breakpoint is present we can't rewrite the stream directly
246 __ movzbl(temp_reg, at_bcp(0));
247 __ cmpl(temp_reg, Bytecodes::_breakpoint);
248 __ jcc(Assembler::notEqual, L_fast_patch);
249 __ get_method(temp_reg);
250 // Let breakpoint table handling rewrite to quicker bytecode
251 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
252 #ifndef ASSERT
253 __ jmpb(L_patch_done);
254 #else
255 __ jmp(L_patch_done);
256 #endif
257 __ bind(L_fast_patch);
258 }
260 #ifdef ASSERT
261 Label L_okay;
262 __ load_unsigned_byte(temp_reg, at_bcp(0));
263 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
264 __ jcc(Assembler::equal, L_okay);
265 __ cmpl(temp_reg, bc_reg);
266 __ jcc(Assembler::equal, L_okay);
267 __ stop("patching the wrong bytecode");
268 __ bind(L_okay);
269 #endif
271 // patch bytecode
272 __ movb(at_bcp(0), bc_reg);
273 __ bind(L_patch_done);
274 }
277 // Individual instructions
279 void TemplateTable::nop() {
280 transition(vtos, vtos);
281 // nothing to do
282 }
284 void TemplateTable::shouldnotreachhere() {
285 transition(vtos, vtos);
286 __ stop("shouldnotreachhere bytecode");
287 }
289 void TemplateTable::aconst_null() {
290 transition(vtos, atos);
291 __ xorl(rax, rax);
292 }
294 void TemplateTable::iconst(int value) {
295 transition(vtos, itos);
296 if (value == 0) {
297 __ xorl(rax, rax);
298 } else {
299 __ movl(rax, value);
300 }
301 }
303 void TemplateTable::lconst(int value) {
304 transition(vtos, ltos);
305 if (value == 0) {
306 __ xorl(rax, rax);
307 } else {
308 __ movl(rax, value);
309 }
310 }
312 void TemplateTable::fconst(int value) {
313 transition(vtos, ftos);
314 static float one = 1.0f, two = 2.0f;
315 switch (value) {
316 case 0:
317 __ xorps(xmm0, xmm0);
318 break;
319 case 1:
320 __ movflt(xmm0, ExternalAddress((address) &one));
321 break;
322 case 2:
323 __ movflt(xmm0, ExternalAddress((address) &two));
324 break;
325 default:
326 ShouldNotReachHere();
327 break;
328 }
329 }
331 void TemplateTable::dconst(int value) {
332 transition(vtos, dtos);
333 static double one = 1.0;
334 switch (value) {
335 case 0:
336 __ xorpd(xmm0, xmm0);
337 break;
338 case 1:
339 __ movdbl(xmm0, ExternalAddress((address) &one));
340 break;
341 default:
342 ShouldNotReachHere();
343 break;
344 }
345 }
347 void TemplateTable::bipush() {
348 transition(vtos, itos);
349 __ load_signed_byte(rax, at_bcp(1));
350 }
352 void TemplateTable::sipush() {
353 transition(vtos, itos);
354 __ load_unsigned_short(rax, at_bcp(1));
355 __ bswapl(rax);
356 __ sarl(rax, 16);
357 }
359 void TemplateTable::ldc(bool wide) {
360 transition(vtos, vtos);
361 Label call_ldc, notFloat, notClass, Done;
363 if (wide) {
364 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
365 } else {
366 __ load_unsigned_byte(rbx, at_bcp(1));
367 }
369 __ get_cpool_and_tags(rcx, rax);
370 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
371 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
373 // get type
374 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
376 // unresolved string - get the resolved string
377 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
378 __ jccb(Assembler::equal, call_ldc);
380 // unresolved class - get the resolved class
381 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
382 __ jccb(Assembler::equal, call_ldc);
384 // unresolved class in error state - call into runtime to throw the error
385 // from the first resolution attempt
386 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
387 __ jccb(Assembler::equal, call_ldc);
389 // resolved class - need to call vm to get java mirror of the class
390 __ cmpl(rdx, JVM_CONSTANT_Class);
391 __ jcc(Assembler::notEqual, notClass);
393 __ bind(call_ldc);
394 __ movl(c_rarg1, wide);
395 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
396 __ push_ptr(rax);
397 __ verify_oop(rax);
398 __ jmp(Done);
400 __ bind(notClass);
401 __ cmpl(rdx, JVM_CONSTANT_Float);
402 __ jccb(Assembler::notEqual, notFloat);
403 // ftos
404 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
405 __ push_f();
406 __ jmp(Done);
408 __ bind(notFloat);
409 #ifdef ASSERT
410 {
411 Label L;
412 __ cmpl(rdx, JVM_CONSTANT_Integer);
413 __ jcc(Assembler::equal, L);
414 __ cmpl(rdx, JVM_CONSTANT_String);
415 __ jcc(Assembler::equal, L);
416 __ cmpl(rdx, JVM_CONSTANT_Object);
417 __ jcc(Assembler::equal, L);
418 __ stop("unexpected tag type in ldc");
419 __ bind(L);
420 }
421 #endif
422 // atos and itos
423 Label isOop;
424 __ cmpl(rdx, JVM_CONSTANT_Integer);
425 __ jcc(Assembler::notEqual, isOop);
426 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
427 __ push_i(rax);
428 __ jmp(Done);
430 __ bind(isOop);
431 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
432 __ push_ptr(rax);
434 if (VerifyOops) {
435 __ verify_oop(rax);
436 }
438 __ bind(Done);
439 }
441 // Fast path for caching oop constants.
442 // %%% We should use this to handle Class and String constants also.
443 // %%% It will simplify the ldc/primitive path considerably.
444 void TemplateTable::fast_aldc(bool wide) {
445 transition(vtos, atos);
447 if (!EnableInvokeDynamic) {
448 // We should not encounter this bytecode if !EnableInvokeDynamic.
449 // The verifier will stop it. However, if we get past the verifier,
450 // this will stop the thread in a reasonable way, without crashing the JVM.
451 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
452 InterpreterRuntime::throw_IncompatibleClassChangeError));
453 // the call_VM checks for exception, so we should never return here.
454 __ should_not_reach_here();
455 return;
456 }
458 const Register cache = rcx;
459 const Register index = rdx;
461 resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
462 if (VerifyOops) {
463 __ verify_oop(rax);
464 }
466 Label L_done, L_throw_exception;
467 const Register con_klass_temp = rcx; // same as cache
468 const Register array_klass_temp = rdx; // same as index
469 __ load_klass(con_klass_temp, rax);
470 __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
471 __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
472 __ jcc(Assembler::notEqual, L_done);
473 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
474 __ jcc(Assembler::notEqual, L_throw_exception);
475 __ xorptr(rax, rax);
476 __ jmp(L_done);
478 // Load the exception from the system-array which wraps it:
479 __ bind(L_throw_exception);
480 __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
481 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
483 __ bind(L_done);
484 }
486 void TemplateTable::ldc2_w() {
487 transition(vtos, vtos);
488 Label Long, Done;
489 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
491 __ get_cpool_and_tags(rcx, rax);
492 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
493 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
495 // get type
496 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
497 JVM_CONSTANT_Double);
498 __ jccb(Assembler::notEqual, Long);
499 // dtos
500 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
501 __ push_d();
502 __ jmpb(Done);
504 __ bind(Long);
505 // ltos
506 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
507 __ push_l();
509 __ bind(Done);
510 }
512 void TemplateTable::locals_index(Register reg, int offset) {
513 __ load_unsigned_byte(reg, at_bcp(offset));
514 __ negptr(reg);
515 }
517 void TemplateTable::iload() {
518 transition(vtos, itos);
519 if (RewriteFrequentPairs) {
520 Label rewrite, done;
521 const Register bc = c_rarg3;
522 assert(rbx != bc, "register damaged");
524 // get next byte
525 __ load_unsigned_byte(rbx,
526 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
527 // if _iload, wait to rewrite to iload2. We only want to rewrite the
528 // last two iloads in a pair. Comparing against fast_iload means that
529 // the next bytecode is neither an iload or a caload, and therefore
530 // an iload pair.
531 __ cmpl(rbx, Bytecodes::_iload);
532 __ jcc(Assembler::equal, done);
534 __ cmpl(rbx, Bytecodes::_fast_iload);
535 __ movl(bc, Bytecodes::_fast_iload2);
536 __ jccb(Assembler::equal, rewrite);
538 // if _caload, rewrite to fast_icaload
539 __ cmpl(rbx, Bytecodes::_caload);
540 __ movl(bc, Bytecodes::_fast_icaload);
541 __ jccb(Assembler::equal, rewrite);
543 // rewrite so iload doesn't check again.
544 __ movl(bc, Bytecodes::_fast_iload);
546 // rewrite
547 // bc: fast bytecode
548 __ bind(rewrite);
549 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
550 __ bind(done);
551 }
553 // Get the local value into tos
554 locals_index(rbx);
555 __ movl(rax, iaddress(rbx));
556 }
558 void TemplateTable::fast_iload2() {
559 transition(vtos, itos);
560 locals_index(rbx);
561 __ movl(rax, iaddress(rbx));
562 __ push(itos);
563 locals_index(rbx, 3);
564 __ movl(rax, iaddress(rbx));
565 }
567 void TemplateTable::fast_iload() {
568 transition(vtos, itos);
569 locals_index(rbx);
570 __ movl(rax, iaddress(rbx));
571 }
573 void TemplateTable::lload() {
574 transition(vtos, ltos);
575 locals_index(rbx);
576 __ movq(rax, laddress(rbx));
577 }
579 void TemplateTable::fload() {
580 transition(vtos, ftos);
581 locals_index(rbx);
582 __ movflt(xmm0, faddress(rbx));
583 }
585 void TemplateTable::dload() {
586 transition(vtos, dtos);
587 locals_index(rbx);
588 __ movdbl(xmm0, daddress(rbx));
589 }
591 void TemplateTable::aload() {
592 transition(vtos, atos);
593 locals_index(rbx);
594 __ movptr(rax, aaddress(rbx));
595 }
597 void TemplateTable::locals_index_wide(Register reg) {
598 __ movl(reg, at_bcp(2));
599 __ bswapl(reg);
600 __ shrl(reg, 16);
601 __ negptr(reg);
602 }
604 void TemplateTable::wide_iload() {
605 transition(vtos, itos);
606 locals_index_wide(rbx);
607 __ movl(rax, iaddress(rbx));
608 }
610 void TemplateTable::wide_lload() {
611 transition(vtos, ltos);
612 locals_index_wide(rbx);
613 __ movq(rax, laddress(rbx));
614 }
616 void TemplateTable::wide_fload() {
617 transition(vtos, ftos);
618 locals_index_wide(rbx);
619 __ movflt(xmm0, faddress(rbx));
620 }
622 void TemplateTable::wide_dload() {
623 transition(vtos, dtos);
624 locals_index_wide(rbx);
625 __ movdbl(xmm0, daddress(rbx));
626 }
628 void TemplateTable::wide_aload() {
629 transition(vtos, atos);
630 locals_index_wide(rbx);
631 __ movptr(rax, aaddress(rbx));
632 }
634 void TemplateTable::index_check(Register array, Register index) {
635 // destroys rbx
636 // check array
637 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
638 // sign extend index for use by indexed load
639 __ movl2ptr(index, index);
640 // check index
641 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
642 if (index != rbx) {
643 // ??? convention: move aberrant index into ebx for exception message
644 assert(rbx != array, "different registers");
645 __ movl(rbx, index);
646 }
647 __ jump_cc(Assembler::aboveEqual,
648 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
649 }
651 void TemplateTable::iaload() {
652 transition(itos, itos);
653 __ pop_ptr(rdx);
654 // eax: index
655 // rdx: array
656 index_check(rdx, rax); // kills rbx
657 __ movl(rax, Address(rdx, rax,
658 Address::times_4,
659 arrayOopDesc::base_offset_in_bytes(T_INT)));
660 }
662 void TemplateTable::laload() {
663 transition(itos, ltos);
664 __ pop_ptr(rdx);
665 // eax: index
666 // rdx: array
667 index_check(rdx, rax); // kills rbx
668 __ movq(rax, Address(rdx, rbx,
669 Address::times_8,
670 arrayOopDesc::base_offset_in_bytes(T_LONG)));
671 }
673 void TemplateTable::faload() {
674 transition(itos, ftos);
675 __ pop_ptr(rdx);
676 // eax: index
677 // rdx: array
678 index_check(rdx, rax); // kills rbx
679 __ movflt(xmm0, Address(rdx, rax,
680 Address::times_4,
681 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
682 }
684 void TemplateTable::daload() {
685 transition(itos, dtos);
686 __ pop_ptr(rdx);
687 // eax: index
688 // rdx: array
689 index_check(rdx, rax); // kills rbx
690 __ movdbl(xmm0, Address(rdx, rax,
691 Address::times_8,
692 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
693 }
695 void TemplateTable::aaload() {
696 transition(itos, atos);
697 __ pop_ptr(rdx);
698 // eax: index
699 // rdx: array
700 index_check(rdx, rax); // kills rbx
701 __ load_heap_oop(rax, Address(rdx, rax,
702 UseCompressedOops ? Address::times_4 : Address::times_8,
703 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
704 }
706 void TemplateTable::baload() {
707 transition(itos, itos);
708 __ pop_ptr(rdx);
709 // eax: index
710 // rdx: array
711 index_check(rdx, rax); // kills rbx
712 __ load_signed_byte(rax,
713 Address(rdx, rax,
714 Address::times_1,
715 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
716 }
718 void TemplateTable::caload() {
719 transition(itos, itos);
720 __ pop_ptr(rdx);
721 // eax: index
722 // rdx: array
723 index_check(rdx, rax); // kills rbx
724 __ load_unsigned_short(rax,
725 Address(rdx, rax,
726 Address::times_2,
727 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
728 }
730 // iload followed by caload frequent pair
731 void TemplateTable::fast_icaload() {
732 transition(vtos, itos);
733 // load index out of locals
734 locals_index(rbx);
735 __ movl(rax, iaddress(rbx));
737 // eax: index
738 // rdx: array
739 __ pop_ptr(rdx);
740 index_check(rdx, rax); // kills rbx
741 __ load_unsigned_short(rax,
742 Address(rdx, rax,
743 Address::times_2,
744 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
745 }
747 void TemplateTable::saload() {
748 transition(itos, itos);
749 __ pop_ptr(rdx);
750 // eax: index
751 // rdx: array
752 index_check(rdx, rax); // kills rbx
753 __ load_signed_short(rax,
754 Address(rdx, rax,
755 Address::times_2,
756 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
757 }
759 void TemplateTable::iload(int n) {
760 transition(vtos, itos);
761 __ movl(rax, iaddress(n));
762 }
764 void TemplateTable::lload(int n) {
765 transition(vtos, ltos);
766 __ movq(rax, laddress(n));
767 }
769 void TemplateTable::fload(int n) {
770 transition(vtos, ftos);
771 __ movflt(xmm0, faddress(n));
772 }
774 void TemplateTable::dload(int n) {
775 transition(vtos, dtos);
776 __ movdbl(xmm0, daddress(n));
777 }
779 void TemplateTable::aload(int n) {
780 transition(vtos, atos);
781 __ movptr(rax, aaddress(n));
782 }
784 void TemplateTable::aload_0() {
785 transition(vtos, atos);
786 // According to bytecode histograms, the pairs:
787 //
788 // _aload_0, _fast_igetfield
789 // _aload_0, _fast_agetfield
790 // _aload_0, _fast_fgetfield
791 //
792 // occur frequently. If RewriteFrequentPairs is set, the (slow)
793 // _aload_0 bytecode checks if the next bytecode is either
794 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
795 // rewrites the current bytecode into a pair bytecode; otherwise it
796 // rewrites the current bytecode into _fast_aload_0 that doesn't do
797 // the pair check anymore.
798 //
799 // Note: If the next bytecode is _getfield, the rewrite must be
800 // delayed, otherwise we may miss an opportunity for a pair.
801 //
802 // Also rewrite frequent pairs
803 // aload_0, aload_1
804 // aload_0, iload_1
805 // These bytecodes with a small amount of code are most profitable
806 // to rewrite
807 if (RewriteFrequentPairs) {
808 Label rewrite, done;
809 const Register bc = c_rarg3;
810 assert(rbx != bc, "register damaged");
811 // get next byte
812 __ load_unsigned_byte(rbx,
813 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
815 // do actual aload_0
816 aload(0);
818 // if _getfield then wait with rewrite
819 __ cmpl(rbx, Bytecodes::_getfield);
820 __ jcc(Assembler::equal, done);
822 // if _igetfield then reqrite to _fast_iaccess_0
823 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
824 Bytecodes::_aload_0,
825 "fix bytecode definition");
826 __ cmpl(rbx, Bytecodes::_fast_igetfield);
827 __ movl(bc, Bytecodes::_fast_iaccess_0);
828 __ jccb(Assembler::equal, rewrite);
830 // if _agetfield then reqrite to _fast_aaccess_0
831 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
832 Bytecodes::_aload_0,
833 "fix bytecode definition");
834 __ cmpl(rbx, Bytecodes::_fast_agetfield);
835 __ movl(bc, Bytecodes::_fast_aaccess_0);
836 __ jccb(Assembler::equal, rewrite);
838 // if _fgetfield then reqrite to _fast_faccess_0
839 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
840 Bytecodes::_aload_0,
841 "fix bytecode definition");
842 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
843 __ movl(bc, Bytecodes::_fast_faccess_0);
844 __ jccb(Assembler::equal, rewrite);
846 // else rewrite to _fast_aload0
847 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
848 Bytecodes::_aload_0,
849 "fix bytecode definition");
850 __ movl(bc, Bytecodes::_fast_aload_0);
852 // rewrite
853 // bc: fast bytecode
854 __ bind(rewrite);
855 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
857 __ bind(done);
858 } else {
859 aload(0);
860 }
861 }
863 void TemplateTable::istore() {
864 transition(itos, vtos);
865 locals_index(rbx);
866 __ movl(iaddress(rbx), rax);
867 }
869 void TemplateTable::lstore() {
870 transition(ltos, vtos);
871 locals_index(rbx);
872 __ movq(laddress(rbx), rax);
873 }
875 void TemplateTable::fstore() {
876 transition(ftos, vtos);
877 locals_index(rbx);
878 __ movflt(faddress(rbx), xmm0);
879 }
881 void TemplateTable::dstore() {
882 transition(dtos, vtos);
883 locals_index(rbx);
884 __ movdbl(daddress(rbx), xmm0);
885 }
887 void TemplateTable::astore() {
888 transition(vtos, vtos);
889 __ pop_ptr(rax);
890 locals_index(rbx);
891 __ movptr(aaddress(rbx), rax);
892 }
894 void TemplateTable::wide_istore() {
895 transition(vtos, vtos);
896 __ pop_i();
897 locals_index_wide(rbx);
898 __ movl(iaddress(rbx), rax);
899 }
901 void TemplateTable::wide_lstore() {
902 transition(vtos, vtos);
903 __ pop_l();
904 locals_index_wide(rbx);
905 __ movq(laddress(rbx), rax);
906 }
908 void TemplateTable::wide_fstore() {
909 transition(vtos, vtos);
910 __ pop_f();
911 locals_index_wide(rbx);
912 __ movflt(faddress(rbx), xmm0);
913 }
915 void TemplateTable::wide_dstore() {
916 transition(vtos, vtos);
917 __ pop_d();
918 locals_index_wide(rbx);
919 __ movdbl(daddress(rbx), xmm0);
920 }
922 void TemplateTable::wide_astore() {
923 transition(vtos, vtos);
924 __ pop_ptr(rax);
925 locals_index_wide(rbx);
926 __ movptr(aaddress(rbx), rax);
927 }
929 void TemplateTable::iastore() {
930 transition(itos, vtos);
931 __ pop_i(rbx);
932 __ pop_ptr(rdx);
933 // eax: value
934 // ebx: index
935 // rdx: array
936 index_check(rdx, rbx); // prefer index in ebx
937 __ movl(Address(rdx, rbx,
938 Address::times_4,
939 arrayOopDesc::base_offset_in_bytes(T_INT)),
940 rax);
941 }
943 void TemplateTable::lastore() {
944 transition(ltos, vtos);
945 __ pop_i(rbx);
946 __ pop_ptr(rdx);
947 // rax: value
948 // ebx: index
949 // rdx: array
950 index_check(rdx, rbx); // prefer index in ebx
951 __ movq(Address(rdx, rbx,
952 Address::times_8,
953 arrayOopDesc::base_offset_in_bytes(T_LONG)),
954 rax);
955 }
957 void TemplateTable::fastore() {
958 transition(ftos, vtos);
959 __ pop_i(rbx);
960 __ pop_ptr(rdx);
961 // xmm0: value
962 // ebx: index
963 // rdx: array
964 index_check(rdx, rbx); // prefer index in ebx
965 __ movflt(Address(rdx, rbx,
966 Address::times_4,
967 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
968 xmm0);
969 }
971 void TemplateTable::dastore() {
972 transition(dtos, vtos);
973 __ pop_i(rbx);
974 __ pop_ptr(rdx);
975 // xmm0: value
976 // ebx: index
977 // rdx: array
978 index_check(rdx, rbx); // prefer index in ebx
979 __ movdbl(Address(rdx, rbx,
980 Address::times_8,
981 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
982 xmm0);
983 }
985 void TemplateTable::aastore() {
986 Label is_null, ok_is_subtype, done;
987 transition(vtos, vtos);
988 // stack: ..., array, index, value
989 __ movptr(rax, at_tos()); // value
990 __ movl(rcx, at_tos_p1()); // index
991 __ movptr(rdx, at_tos_p2()); // array
993 Address element_address(rdx, rcx,
994 UseCompressedOops? Address::times_4 : Address::times_8,
995 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
997 index_check(rdx, rcx); // kills rbx
998 // do array store check - check for NULL value first
999 __ testptr(rax, rax);
1000 __ jcc(Assembler::zero, is_null);
1002 // Move subklass into rbx
1003 __ load_klass(rbx, rax);
1004 // Move superklass into rax
1005 __ load_klass(rax, rdx);
1006 __ movptr(rax, Address(rax,
1007 objArrayKlass::element_klass_offset()));
1008 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
1009 __ lea(rdx, element_address);
1011 // Generate subtype check. Blows rcx, rdi
1012 // Superklass in rax. Subklass in rbx.
1013 __ gen_subtype_check(rbx, ok_is_subtype);
1015 // Come here on failure
1016 // object is at TOS
1017 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1019 // Come here on success
1020 __ bind(ok_is_subtype);
1022 // Get the value we will store
1023 __ movptr(rax, at_tos());
1024 // Now store using the appropriate barrier
1025 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
1026 __ jmp(done);
1028 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1029 __ bind(is_null);
1030 __ profile_null_seen(rbx);
1032 // Store a NULL
1033 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1035 // Pop stack arguments
1036 __ bind(done);
1037 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1038 }
1040 void TemplateTable::bastore() {
1041 transition(itos, vtos);
1042 __ pop_i(rbx);
1043 __ pop_ptr(rdx);
1044 // eax: value
1045 // ebx: index
1046 // rdx: array
1047 index_check(rdx, rbx); // prefer index in ebx
1048 __ movb(Address(rdx, rbx,
1049 Address::times_1,
1050 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1051 rax);
1052 }
1054 void TemplateTable::castore() {
1055 transition(itos, vtos);
1056 __ pop_i(rbx);
1057 __ pop_ptr(rdx);
1058 // eax: value
1059 // ebx: index
1060 // rdx: array
1061 index_check(rdx, rbx); // prefer index in ebx
1062 __ movw(Address(rdx, rbx,
1063 Address::times_2,
1064 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1065 rax);
1066 }
1068 void TemplateTable::sastore() {
1069 castore();
1070 }
1072 void TemplateTable::istore(int n) {
1073 transition(itos, vtos);
1074 __ movl(iaddress(n), rax);
1075 }
1077 void TemplateTable::lstore(int n) {
1078 transition(ltos, vtos);
1079 __ movq(laddress(n), rax);
1080 }
1082 void TemplateTable::fstore(int n) {
1083 transition(ftos, vtos);
1084 __ movflt(faddress(n), xmm0);
1085 }
1087 void TemplateTable::dstore(int n) {
1088 transition(dtos, vtos);
1089 __ movdbl(daddress(n), xmm0);
1090 }
1092 void TemplateTable::astore(int n) {
1093 transition(vtos, vtos);
1094 __ pop_ptr(rax);
1095 __ movptr(aaddress(n), rax);
1096 }
1098 void TemplateTable::pop() {
1099 transition(vtos, vtos);
1100 __ addptr(rsp, Interpreter::stackElementSize);
1101 }
1103 void TemplateTable::pop2() {
1104 transition(vtos, vtos);
1105 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1106 }
1108 void TemplateTable::dup() {
1109 transition(vtos, vtos);
1110 __ load_ptr(0, rax);
1111 __ push_ptr(rax);
1112 // stack: ..., a, a
1113 }
1115 void TemplateTable::dup_x1() {
1116 transition(vtos, vtos);
1117 // stack: ..., a, b
1118 __ load_ptr( 0, rax); // load b
1119 __ load_ptr( 1, rcx); // load a
1120 __ store_ptr(1, rax); // store b
1121 __ store_ptr(0, rcx); // store a
1122 __ push_ptr(rax); // push b
1123 // stack: ..., b, a, b
1124 }
1126 void TemplateTable::dup_x2() {
1127 transition(vtos, vtos);
1128 // stack: ..., a, b, c
1129 __ load_ptr( 0, rax); // load c
1130 __ load_ptr( 2, rcx); // load a
1131 __ store_ptr(2, rax); // store c in a
1132 __ push_ptr(rax); // push c
1133 // stack: ..., c, b, c, c
1134 __ load_ptr( 2, rax); // load b
1135 __ store_ptr(2, rcx); // store a in b
1136 // stack: ..., c, a, c, c
1137 __ store_ptr(1, rax); // store b in c
1138 // stack: ..., c, a, b, c
1139 }
1141 void TemplateTable::dup2() {
1142 transition(vtos, vtos);
1143 // stack: ..., a, b
1144 __ load_ptr(1, rax); // load a
1145 __ push_ptr(rax); // push a
1146 __ load_ptr(1, rax); // load b
1147 __ push_ptr(rax); // push b
1148 // stack: ..., a, b, a, b
1149 }
1151 void TemplateTable::dup2_x1() {
1152 transition(vtos, vtos);
1153 // stack: ..., a, b, c
1154 __ load_ptr( 0, rcx); // load c
1155 __ load_ptr( 1, rax); // load b
1156 __ push_ptr(rax); // push b
1157 __ push_ptr(rcx); // push c
1158 // stack: ..., a, b, c, b, c
1159 __ store_ptr(3, rcx); // store c in b
1160 // stack: ..., a, c, c, b, c
1161 __ load_ptr( 4, rcx); // load a
1162 __ store_ptr(2, rcx); // store a in 2nd c
1163 // stack: ..., a, c, a, b, c
1164 __ store_ptr(4, rax); // store b in a
1165 // stack: ..., b, c, a, b, c
1166 }
1168 void TemplateTable::dup2_x2() {
1169 transition(vtos, vtos);
1170 // stack: ..., a, b, c, d
1171 __ load_ptr( 0, rcx); // load d
1172 __ load_ptr( 1, rax); // load c
1173 __ push_ptr(rax); // push c
1174 __ push_ptr(rcx); // push d
1175 // stack: ..., a, b, c, d, c, d
1176 __ load_ptr( 4, rax); // load b
1177 __ store_ptr(2, rax); // store b in d
1178 __ store_ptr(4, rcx); // store d in b
1179 // stack: ..., a, d, c, b, c, d
1180 __ load_ptr( 5, rcx); // load a
1181 __ load_ptr( 3, rax); // load c
1182 __ store_ptr(3, rcx); // store a in c
1183 __ store_ptr(5, rax); // store c in a
1184 // stack: ..., c, d, a, b, c, d
1185 }
1187 void TemplateTable::swap() {
1188 transition(vtos, vtos);
1189 // stack: ..., a, b
1190 __ load_ptr( 1, rcx); // load a
1191 __ load_ptr( 0, rax); // load b
1192 __ store_ptr(0, rcx); // store a in b
1193 __ store_ptr(1, rax); // store b in a
1194 // stack: ..., b, a
1195 }
1197 void TemplateTable::iop2(Operation op) {
1198 transition(itos, itos);
1199 switch (op) {
1200 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1201 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1202 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1203 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1204 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1205 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1206 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1207 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1208 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1209 default : ShouldNotReachHere();
1210 }
1211 }
1213 void TemplateTable::lop2(Operation op) {
1214 transition(ltos, ltos);
1215 switch (op) {
1216 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1217 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1218 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1219 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1220 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1221 default : ShouldNotReachHere();
1222 }
1223 }
1225 void TemplateTable::idiv() {
1226 transition(itos, itos);
1227 __ movl(rcx, rax);
1228 __ pop_i(rax);
1229 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1230 // they are not equal, one could do a normal division (no correction
1231 // needed), which may speed up this implementation for the common case.
1232 // (see also JVM spec., p.243 & p.271)
1233 __ corrected_idivl(rcx);
1234 }
1236 void TemplateTable::irem() {
1237 transition(itos, itos);
1238 __ movl(rcx, rax);
1239 __ pop_i(rax);
1240 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1241 // they are not equal, one could do a normal division (no correction
1242 // needed), which may speed up this implementation for the common case.
1243 // (see also JVM spec., p.243 & p.271)
1244 __ corrected_idivl(rcx);
1245 __ movl(rax, rdx);
1246 }
1248 void TemplateTable::lmul() {
1249 transition(ltos, ltos);
1250 __ pop_l(rdx);
1251 __ imulq(rax, rdx);
1252 }
1254 void TemplateTable::ldiv() {
1255 transition(ltos, ltos);
1256 __ mov(rcx, rax);
1257 __ pop_l(rax);
1258 // generate explicit div0 check
1259 __ testq(rcx, rcx);
1260 __ jump_cc(Assembler::zero,
1261 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1262 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1263 // they are not equal, one could do a normal division (no correction
1264 // needed), which may speed up this implementation for the common case.
1265 // (see also JVM spec., p.243 & p.271)
1266 __ corrected_idivq(rcx); // kills rbx
1267 }
1269 void TemplateTable::lrem() {
1270 transition(ltos, ltos);
1271 __ mov(rcx, rax);
1272 __ pop_l(rax);
1273 __ testq(rcx, rcx);
1274 __ jump_cc(Assembler::zero,
1275 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1276 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1277 // they are not equal, one could do a normal division (no correction
1278 // needed), which may speed up this implementation for the common case.
1279 // (see also JVM spec., p.243 & p.271)
1280 __ corrected_idivq(rcx); // kills rbx
1281 __ mov(rax, rdx);
1282 }
1284 void TemplateTable::lshl() {
1285 transition(itos, ltos);
1286 __ movl(rcx, rax); // get shift count
1287 __ pop_l(rax); // get shift value
1288 __ shlq(rax);
1289 }
1291 void TemplateTable::lshr() {
1292 transition(itos, ltos);
1293 __ movl(rcx, rax); // get shift count
1294 __ pop_l(rax); // get shift value
1295 __ sarq(rax);
1296 }
1298 void TemplateTable::lushr() {
1299 transition(itos, ltos);
1300 __ movl(rcx, rax); // get shift count
1301 __ pop_l(rax); // get shift value
1302 __ shrq(rax);
1303 }
1305 void TemplateTable::fop2(Operation op) {
1306 transition(ftos, ftos);
1307 switch (op) {
1308 case add:
1309 __ addss(xmm0, at_rsp());
1310 __ addptr(rsp, Interpreter::stackElementSize);
1311 break;
1312 case sub:
1313 __ movflt(xmm1, xmm0);
1314 __ pop_f(xmm0);
1315 __ subss(xmm0, xmm1);
1316 break;
1317 case mul:
1318 __ mulss(xmm0, at_rsp());
1319 __ addptr(rsp, Interpreter::stackElementSize);
1320 break;
1321 case div:
1322 __ movflt(xmm1, xmm0);
1323 __ pop_f(xmm0);
1324 __ divss(xmm0, xmm1);
1325 break;
1326 case rem:
1327 __ movflt(xmm1, xmm0);
1328 __ pop_f(xmm0);
1329 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1330 break;
1331 default:
1332 ShouldNotReachHere();
1333 break;
1334 }
1335 }
1337 void TemplateTable::dop2(Operation op) {
1338 transition(dtos, dtos);
1339 switch (op) {
1340 case add:
1341 __ addsd(xmm0, at_rsp());
1342 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1343 break;
1344 case sub:
1345 __ movdbl(xmm1, xmm0);
1346 __ pop_d(xmm0);
1347 __ subsd(xmm0, xmm1);
1348 break;
1349 case mul:
1350 __ mulsd(xmm0, at_rsp());
1351 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1352 break;
1353 case div:
1354 __ movdbl(xmm1, xmm0);
1355 __ pop_d(xmm0);
1356 __ divsd(xmm0, xmm1);
1357 break;
1358 case rem:
1359 __ movdbl(xmm1, xmm0);
1360 __ pop_d(xmm0);
1361 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1362 break;
1363 default:
1364 ShouldNotReachHere();
1365 break;
1366 }
1367 }
1369 void TemplateTable::ineg() {
1370 transition(itos, itos);
1371 __ negl(rax);
1372 }
1374 void TemplateTable::lneg() {
1375 transition(ltos, ltos);
1376 __ negq(rax);
1377 }
1379 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1380 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1381 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1382 // of 128-bits operands for SSE instructions.
1383 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1384 // Store the value to a 128-bits operand.
1385 operand[0] = lo;
1386 operand[1] = hi;
1387 return operand;
1388 }
1390 // Buffer for 128-bits masks used by SSE instructions.
1391 static jlong float_signflip_pool[2*2];
1392 static jlong double_signflip_pool[2*2];
1394 void TemplateTable::fneg() {
1395 transition(ftos, ftos);
1396 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1397 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1398 }
1400 void TemplateTable::dneg() {
1401 transition(dtos, dtos);
1402 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1403 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1404 }
1406 void TemplateTable::iinc() {
1407 transition(vtos, vtos);
1408 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1409 locals_index(rbx);
1410 __ addl(iaddress(rbx), rdx);
1411 }
1413 void TemplateTable::wide_iinc() {
1414 transition(vtos, vtos);
1415 __ movl(rdx, at_bcp(4)); // get constant
1416 locals_index_wide(rbx);
1417 __ bswapl(rdx); // swap bytes & sign-extend constant
1418 __ sarl(rdx, 16);
1419 __ addl(iaddress(rbx), rdx);
1420 // Note: should probably use only one movl to get both
1421 // the index and the constant -> fix this
1422 }
1424 void TemplateTable::convert() {
1425 // Checking
1426 #ifdef ASSERT
1427 {
1428 TosState tos_in = ilgl;
1429 TosState tos_out = ilgl;
1430 switch (bytecode()) {
1431 case Bytecodes::_i2l: // fall through
1432 case Bytecodes::_i2f: // fall through
1433 case Bytecodes::_i2d: // fall through
1434 case Bytecodes::_i2b: // fall through
1435 case Bytecodes::_i2c: // fall through
1436 case Bytecodes::_i2s: tos_in = itos; break;
1437 case Bytecodes::_l2i: // fall through
1438 case Bytecodes::_l2f: // fall through
1439 case Bytecodes::_l2d: tos_in = ltos; break;
1440 case Bytecodes::_f2i: // fall through
1441 case Bytecodes::_f2l: // fall through
1442 case Bytecodes::_f2d: tos_in = ftos; break;
1443 case Bytecodes::_d2i: // fall through
1444 case Bytecodes::_d2l: // fall through
1445 case Bytecodes::_d2f: tos_in = dtos; break;
1446 default : ShouldNotReachHere();
1447 }
1448 switch (bytecode()) {
1449 case Bytecodes::_l2i: // fall through
1450 case Bytecodes::_f2i: // fall through
1451 case Bytecodes::_d2i: // fall through
1452 case Bytecodes::_i2b: // fall through
1453 case Bytecodes::_i2c: // fall through
1454 case Bytecodes::_i2s: tos_out = itos; break;
1455 case Bytecodes::_i2l: // fall through
1456 case Bytecodes::_f2l: // fall through
1457 case Bytecodes::_d2l: tos_out = ltos; break;
1458 case Bytecodes::_i2f: // fall through
1459 case Bytecodes::_l2f: // fall through
1460 case Bytecodes::_d2f: tos_out = ftos; break;
1461 case Bytecodes::_i2d: // fall through
1462 case Bytecodes::_l2d: // fall through
1463 case Bytecodes::_f2d: tos_out = dtos; break;
1464 default : ShouldNotReachHere();
1465 }
1466 transition(tos_in, tos_out);
1467 }
1468 #endif // ASSERT
1470 static const int64_t is_nan = 0x8000000000000000L;
1472 // Conversion
1473 switch (bytecode()) {
1474 case Bytecodes::_i2l:
1475 __ movslq(rax, rax);
1476 break;
1477 case Bytecodes::_i2f:
1478 __ cvtsi2ssl(xmm0, rax);
1479 break;
1480 case Bytecodes::_i2d:
1481 __ cvtsi2sdl(xmm0, rax);
1482 break;
1483 case Bytecodes::_i2b:
1484 __ movsbl(rax, rax);
1485 break;
1486 case Bytecodes::_i2c:
1487 __ movzwl(rax, rax);
1488 break;
1489 case Bytecodes::_i2s:
1490 __ movswl(rax, rax);
1491 break;
1492 case Bytecodes::_l2i:
1493 __ movl(rax, rax);
1494 break;
1495 case Bytecodes::_l2f:
1496 __ cvtsi2ssq(xmm0, rax);
1497 break;
1498 case Bytecodes::_l2d:
1499 __ cvtsi2sdq(xmm0, rax);
1500 break;
1501 case Bytecodes::_f2i:
1502 {
1503 Label L;
1504 __ cvttss2sil(rax, xmm0);
1505 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1506 __ jcc(Assembler::notEqual, L);
1507 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1508 __ bind(L);
1509 }
1510 break;
1511 case Bytecodes::_f2l:
1512 {
1513 Label L;
1514 __ cvttss2siq(rax, xmm0);
1515 // NaN or overflow/underflow?
1516 __ cmp64(rax, ExternalAddress((address) &is_nan));
1517 __ jcc(Assembler::notEqual, L);
1518 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1519 __ bind(L);
1520 }
1521 break;
1522 case Bytecodes::_f2d:
1523 __ cvtss2sd(xmm0, xmm0);
1524 break;
1525 case Bytecodes::_d2i:
1526 {
1527 Label L;
1528 __ cvttsd2sil(rax, xmm0);
1529 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1530 __ jcc(Assembler::notEqual, L);
1531 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1532 __ bind(L);
1533 }
1534 break;
1535 case Bytecodes::_d2l:
1536 {
1537 Label L;
1538 __ cvttsd2siq(rax, xmm0);
1539 // NaN or overflow/underflow?
1540 __ cmp64(rax, ExternalAddress((address) &is_nan));
1541 __ jcc(Assembler::notEqual, L);
1542 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1543 __ bind(L);
1544 }
1545 break;
1546 case Bytecodes::_d2f:
1547 __ cvtsd2ss(xmm0, xmm0);
1548 break;
1549 default:
1550 ShouldNotReachHere();
1551 }
1552 }
1554 void TemplateTable::lcmp() {
1555 transition(ltos, itos);
1556 Label done;
1557 __ pop_l(rdx);
1558 __ cmpq(rdx, rax);
1559 __ movl(rax, -1);
1560 __ jccb(Assembler::less, done);
1561 __ setb(Assembler::notEqual, rax);
1562 __ movzbl(rax, rax);
1563 __ bind(done);
1564 }
1566 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1567 Label done;
1568 if (is_float) {
1569 // XXX get rid of pop here, use ... reg, mem32
1570 __ pop_f(xmm1);
1571 __ ucomiss(xmm1, xmm0);
1572 } else {
1573 // XXX get rid of pop here, use ... reg, mem64
1574 __ pop_d(xmm1);
1575 __ ucomisd(xmm1, xmm0);
1576 }
1577 if (unordered_result < 0) {
1578 __ movl(rax, -1);
1579 __ jccb(Assembler::parity, done);
1580 __ jccb(Assembler::below, done);
1581 __ setb(Assembler::notEqual, rdx);
1582 __ movzbl(rax, rdx);
1583 } else {
1584 __ movl(rax, 1);
1585 __ jccb(Assembler::parity, done);
1586 __ jccb(Assembler::above, done);
1587 __ movl(rax, 0);
1588 __ jccb(Assembler::equal, done);
1589 __ decrementl(rax);
1590 }
1591 __ bind(done);
1592 }
1594 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1595 __ get_method(rcx); // rcx holds method
1596 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1597 // holds bumped taken count
1599 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
1600 InvocationCounter::counter_offset();
1601 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() +
1602 InvocationCounter::counter_offset();
1603 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1605 // Load up edx with the branch displacement
1606 __ movl(rdx, at_bcp(1));
1607 __ bswapl(rdx);
1609 if (!is_wide) {
1610 __ sarl(rdx, 16);
1611 }
1612 __ movl2ptr(rdx, rdx);
1614 // Handle all the JSR stuff here, then exit.
1615 // It's much shorter and cleaner than intermingling with the non-JSR
1616 // normal-branch stuff occurring below.
1617 if (is_jsr) {
1618 // Pre-load the next target bytecode into rbx
1619 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1621 // compute return address as bci in rax
1622 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1623 in_bytes(constMethodOopDesc::codes_offset())));
1624 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1625 // Adjust the bcp in r13 by the displacement in rdx
1626 __ addptr(r13, rdx);
1627 // jsr returns atos that is not an oop
1628 __ push_i(rax);
1629 __ dispatch_only(vtos);
1630 return;
1631 }
1633 // Normal (non-jsr) branch handling
1635 // Adjust the bcp in r13 by the displacement in rdx
1636 __ addptr(r13, rdx);
1638 assert(UseLoopCounter || !UseOnStackReplacement,
1639 "on-stack-replacement requires loop counters");
1640 Label backedge_counter_overflow;
1641 Label profile_method;
1642 Label dispatch;
1643 if (UseLoopCounter) {
1644 // increment backedge counter for backward branches
1645 // rax: MDO
1646 // ebx: MDO bumped taken-count
1647 // rcx: method
1648 // rdx: target offset
1649 // r13: target bcp
1650 // r14: locals pointer
1651 __ testl(rdx, rdx); // check if forward or backward branch
1652 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1653 if (TieredCompilation) {
1654 Label no_mdo;
1655 int increment = InvocationCounter::count_increment;
1656 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1657 if (ProfileInterpreter) {
1658 // Are we profiling?
1659 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1660 __ testptr(rbx, rbx);
1661 __ jccb(Assembler::zero, no_mdo);
1662 // Increment the MDO backedge counter
1663 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1664 in_bytes(InvocationCounter::counter_offset()));
1665 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1666 rax, false, Assembler::zero, &backedge_counter_overflow);
1667 __ jmp(dispatch);
1668 }
1669 __ bind(no_mdo);
1670 // Increment backedge counter in methodOop
1671 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1672 rax, false, Assembler::zero, &backedge_counter_overflow);
1673 } else {
1674 // increment counter
1675 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1676 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1677 __ movl(Address(rcx, be_offset), rax); // store counter
1679 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1680 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1681 __ addl(rax, Address(rcx, be_offset)); // add both counters
1683 if (ProfileInterpreter) {
1684 // Test to see if we should create a method data oop
1685 __ cmp32(rax,
1686 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1687 __ jcc(Assembler::less, dispatch);
1689 // if no method data exists, go to profile method
1690 __ test_method_data_pointer(rax, profile_method);
1692 if (UseOnStackReplacement) {
1693 // check for overflow against ebx which is the MDO taken count
1694 __ cmp32(rbx,
1695 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1696 __ jcc(Assembler::below, dispatch);
1698 // When ProfileInterpreter is on, the backedge_count comes
1699 // from the methodDataOop, which value does not get reset on
1700 // the call to frequency_counter_overflow(). To avoid
1701 // excessive calls to the overflow routine while the method is
1702 // being compiled, add a second test to make sure the overflow
1703 // function is called only once every overflow_frequency.
1704 const int overflow_frequency = 1024;
1705 __ andl(rbx, overflow_frequency - 1);
1706 __ jcc(Assembler::zero, backedge_counter_overflow);
1708 }
1709 } else {
1710 if (UseOnStackReplacement) {
1711 // check for overflow against eax, which is the sum of the
1712 // counters
1713 __ cmp32(rax,
1714 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1715 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1717 }
1718 }
1719 }
1720 __ bind(dispatch);
1721 }
1723 // Pre-load the next target bytecode into rbx
1724 __ load_unsigned_byte(rbx, Address(r13, 0));
1726 // continue with the bytecode @ target
1727 // eax: return bci for jsr's, unused otherwise
1728 // ebx: target bytecode
1729 // r13: target bcp
1730 __ dispatch_only(vtos);
1732 if (UseLoopCounter) {
1733 if (ProfileInterpreter) {
1734 // Out-of-line code to allocate method data oop.
1735 __ bind(profile_method);
1736 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1737 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1738 __ set_method_data_pointer_for_bcp();
1739 __ jmp(dispatch);
1740 }
1742 if (UseOnStackReplacement) {
1743 // invocation counter overflow
1744 __ bind(backedge_counter_overflow);
1745 __ negptr(rdx);
1746 __ addptr(rdx, r13); // branch bcp
1747 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1748 __ call_VM(noreg,
1749 CAST_FROM_FN_PTR(address,
1750 InterpreterRuntime::frequency_counter_overflow),
1751 rdx);
1752 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1754 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1755 // ebx: target bytecode
1756 // rdx: scratch
1757 // r14: locals pointer
1758 // r13: bcp
1759 __ testptr(rax, rax); // test result
1760 __ jcc(Assembler::zero, dispatch); // no osr if null
1761 // nmethod may have been invalidated (VM may block upon call_VM return)
1762 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1763 __ cmpl(rcx, InvalidOSREntryBci);
1764 __ jcc(Assembler::equal, dispatch);
1766 // We have the address of an on stack replacement routine in eax
1767 // We need to prepare to execute the OSR method. First we must
1768 // migrate the locals and monitors off of the stack.
1770 __ mov(r13, rax); // save the nmethod
1772 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1774 // eax is OSR buffer, move it to expected parameter location
1775 __ mov(j_rarg0, rax);
1777 // We use j_rarg definitions here so that registers don't conflict as parameter
1778 // registers change across platforms as we are in the midst of a calling
1779 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1781 const Register retaddr = j_rarg2;
1782 const Register sender_sp = j_rarg1;
1784 // pop the interpreter frame
1785 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1786 __ leave(); // remove frame anchor
1787 __ pop(retaddr); // get return address
1788 __ mov(rsp, sender_sp); // set sp to sender sp
1789 // Ensure compiled code always sees stack at proper alignment
1790 __ andptr(rsp, -(StackAlignmentInBytes));
1792 // unlike x86 we need no specialized return from compiled code
1793 // to the interpreter or the call stub.
1795 // push the return address
1796 __ push(retaddr);
1798 // and begin the OSR nmethod
1799 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1800 }
1801 }
1802 }
1805 void TemplateTable::if_0cmp(Condition cc) {
1806 transition(itos, vtos);
1807 // assume branch is more often taken than not (loops use backward branches)
1808 Label not_taken;
1809 __ testl(rax, rax);
1810 __ jcc(j_not(cc), not_taken);
1811 branch(false, false);
1812 __ bind(not_taken);
1813 __ profile_not_taken_branch(rax);
1814 }
1816 void TemplateTable::if_icmp(Condition cc) {
1817 transition(itos, vtos);
1818 // assume branch is more often taken than not (loops use backward branches)
1819 Label not_taken;
1820 __ pop_i(rdx);
1821 __ cmpl(rdx, rax);
1822 __ jcc(j_not(cc), not_taken);
1823 branch(false, false);
1824 __ bind(not_taken);
1825 __ profile_not_taken_branch(rax);
1826 }
1828 void TemplateTable::if_nullcmp(Condition cc) {
1829 transition(atos, vtos);
1830 // assume branch is more often taken than not (loops use backward branches)
1831 Label not_taken;
1832 __ testptr(rax, rax);
1833 __ jcc(j_not(cc), not_taken);
1834 branch(false, false);
1835 __ bind(not_taken);
1836 __ profile_not_taken_branch(rax);
1837 }
1839 void TemplateTable::if_acmp(Condition cc) {
1840 transition(atos, vtos);
1841 // assume branch is more often taken than not (loops use backward branches)
1842 Label not_taken;
1843 __ pop_ptr(rdx);
1844 __ cmpptr(rdx, rax);
1845 __ jcc(j_not(cc), not_taken);
1846 branch(false, false);
1847 __ bind(not_taken);
1848 __ profile_not_taken_branch(rax);
1849 }
1851 void TemplateTable::ret() {
1852 transition(vtos, vtos);
1853 locals_index(rbx);
1854 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1855 __ profile_ret(rbx, rcx);
1856 __ get_method(rax);
1857 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1858 __ lea(r13, Address(r13, rbx, Address::times_1,
1859 constMethodOopDesc::codes_offset()));
1860 __ dispatch_next(vtos);
1861 }
1863 void TemplateTable::wide_ret() {
1864 transition(vtos, vtos);
1865 locals_index_wide(rbx);
1866 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1867 __ profile_ret(rbx, rcx);
1868 __ get_method(rax);
1869 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1870 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1871 __ dispatch_next(vtos);
1872 }
1874 void TemplateTable::tableswitch() {
1875 Label default_case, continue_execution;
1876 transition(itos, vtos);
1877 // align r13
1878 __ lea(rbx, at_bcp(BytesPerInt));
1879 __ andptr(rbx, -BytesPerInt);
1880 // load lo & hi
1881 __ movl(rcx, Address(rbx, BytesPerInt));
1882 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1883 __ bswapl(rcx);
1884 __ bswapl(rdx);
1885 // check against lo & hi
1886 __ cmpl(rax, rcx);
1887 __ jcc(Assembler::less, default_case);
1888 __ cmpl(rax, rdx);
1889 __ jcc(Assembler::greater, default_case);
1890 // lookup dispatch offset
1891 __ subl(rax, rcx);
1892 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1893 __ profile_switch_case(rax, rbx, rcx);
1894 // continue execution
1895 __ bind(continue_execution);
1896 __ bswapl(rdx);
1897 __ movl2ptr(rdx, rdx);
1898 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1899 __ addptr(r13, rdx);
1900 __ dispatch_only(vtos);
1901 // handle default
1902 __ bind(default_case);
1903 __ profile_switch_default(rax);
1904 __ movl(rdx, Address(rbx, 0));
1905 __ jmp(continue_execution);
1906 }
1908 void TemplateTable::lookupswitch() {
1909 transition(itos, itos);
1910 __ stop("lookupswitch bytecode should have been rewritten");
1911 }
1913 void TemplateTable::fast_linearswitch() {
1914 transition(itos, vtos);
1915 Label loop_entry, loop, found, continue_execution;
1916 // bswap rax so we can avoid bswapping the table entries
1917 __ bswapl(rax);
1918 // align r13
1919 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1920 // this instruction (change offsets
1921 // below)
1922 __ andptr(rbx, -BytesPerInt);
1923 // set counter
1924 __ movl(rcx, Address(rbx, BytesPerInt));
1925 __ bswapl(rcx);
1926 __ jmpb(loop_entry);
1927 // table search
1928 __ bind(loop);
1929 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1930 __ jcc(Assembler::equal, found);
1931 __ bind(loop_entry);
1932 __ decrementl(rcx);
1933 __ jcc(Assembler::greaterEqual, loop);
1934 // default case
1935 __ profile_switch_default(rax);
1936 __ movl(rdx, Address(rbx, 0));
1937 __ jmp(continue_execution);
1938 // entry found -> get offset
1939 __ bind(found);
1940 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1941 __ profile_switch_case(rcx, rax, rbx);
1942 // continue execution
1943 __ bind(continue_execution);
1944 __ bswapl(rdx);
1945 __ movl2ptr(rdx, rdx);
1946 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1947 __ addptr(r13, rdx);
1948 __ dispatch_only(vtos);
1949 }
1951 void TemplateTable::fast_binaryswitch() {
1952 transition(itos, vtos);
1953 // Implementation using the following core algorithm:
1954 //
1955 // int binary_search(int key, LookupswitchPair* array, int n) {
1956 // // Binary search according to "Methodik des Programmierens" by
1957 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1958 // int i = 0;
1959 // int j = n;
1960 // while (i+1 < j) {
1961 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1962 // // with Q: for all i: 0 <= i < n: key < a[i]
1963 // // where a stands for the array and assuming that the (inexisting)
1964 // // element a[n] is infinitely big.
1965 // int h = (i + j) >> 1;
1966 // // i < h < j
1967 // if (key < array[h].fast_match()) {
1968 // j = h;
1969 // } else {
1970 // i = h;
1971 // }
1972 // }
1973 // // R: a[i] <= key < a[i+1] or Q
1974 // // (i.e., if key is within array, i is the correct index)
1975 // return i;
1976 // }
1978 // Register allocation
1979 const Register key = rax; // already set (tosca)
1980 const Register array = rbx;
1981 const Register i = rcx;
1982 const Register j = rdx;
1983 const Register h = rdi;
1984 const Register temp = rsi;
1986 // Find array start
1987 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1988 // get rid of this
1989 // instruction (change
1990 // offsets below)
1991 __ andptr(array, -BytesPerInt);
1993 // Initialize i & j
1994 __ xorl(i, i); // i = 0;
1995 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1997 // Convert j into native byteordering
1998 __ bswapl(j);
2000 // And start
2001 Label entry;
2002 __ jmp(entry);
2004 // binary search loop
2005 {
2006 Label loop;
2007 __ bind(loop);
2008 // int h = (i + j) >> 1;
2009 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2010 __ sarl(h, 1); // h = (i + j) >> 1;
2011 // if (key < array[h].fast_match()) {
2012 // j = h;
2013 // } else {
2014 // i = h;
2015 // }
2016 // Convert array[h].match to native byte-ordering before compare
2017 __ movl(temp, Address(array, h, Address::times_8));
2018 __ bswapl(temp);
2019 __ cmpl(key, temp);
2020 // j = h if (key < array[h].fast_match())
2021 __ cmovl(Assembler::less, j, h);
2022 // i = h if (key >= array[h].fast_match())
2023 __ cmovl(Assembler::greaterEqual, i, h);
2024 // while (i+1 < j)
2025 __ bind(entry);
2026 __ leal(h, Address(i, 1)); // i+1
2027 __ cmpl(h, j); // i+1 < j
2028 __ jcc(Assembler::less, loop);
2029 }
2031 // end of binary search, result index is i (must check again!)
2032 Label default_case;
2033 // Convert array[i].match to native byte-ordering before compare
2034 __ movl(temp, Address(array, i, Address::times_8));
2035 __ bswapl(temp);
2036 __ cmpl(key, temp);
2037 __ jcc(Assembler::notEqual, default_case);
2039 // entry found -> j = offset
2040 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2041 __ profile_switch_case(i, key, array);
2042 __ bswapl(j);
2043 __ movl2ptr(j, j);
2044 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2045 __ addptr(r13, j);
2046 __ dispatch_only(vtos);
2048 // default case -> j = default offset
2049 __ bind(default_case);
2050 __ profile_switch_default(i);
2051 __ movl(j, Address(array, -2 * BytesPerInt));
2052 __ bswapl(j);
2053 __ movl2ptr(j, j);
2054 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2055 __ addptr(r13, j);
2056 __ dispatch_only(vtos);
2057 }
2060 void TemplateTable::_return(TosState state) {
2061 transition(state, state);
2062 assert(_desc->calls_vm(),
2063 "inconsistent calls_vm information"); // call in remove_activation
2065 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2066 assert(state == vtos, "only valid state");
2067 __ movptr(c_rarg1, aaddress(0));
2068 __ load_klass(rdi, c_rarg1);
2069 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2070 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2071 Label skip_register_finalizer;
2072 __ jcc(Assembler::zero, skip_register_finalizer);
2074 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2076 __ bind(skip_register_finalizer);
2077 }
2079 __ remove_activation(state, r13);
2080 __ jmp(r13);
2081 }
2083 // ----------------------------------------------------------------------------
2084 // Volatile variables demand their effects be made known to all CPU's
2085 // in order. Store buffers on most chips allow reads & writes to
2086 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2087 // without some kind of memory barrier (i.e., it's not sufficient that
2088 // the interpreter does not reorder volatile references, the hardware
2089 // also must not reorder them).
2090 //
2091 // According to the new Java Memory Model (JMM):
2092 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2093 // writes act as aquire & release, so:
2094 // (2) A read cannot let unrelated NON-volatile memory refs that
2095 // happen after the read float up to before the read. It's OK for
2096 // non-volatile memory refs that happen before the volatile read to
2097 // float down below it.
2098 // (3) Similar a volatile write cannot let unrelated NON-volatile
2099 // memory refs that happen BEFORE the write float down to after the
2100 // write. It's OK for non-volatile memory refs that happen after the
2101 // volatile write to float up before it.
2102 //
2103 // We only put in barriers around volatile refs (they are expensive),
2104 // not _between_ memory refs (that would require us to track the
2105 // flavor of the previous memory refs). Requirements (2) and (3)
2106 // require some barriers before volatile stores and after volatile
2107 // loads. These nearly cover requirement (1) but miss the
2108 // volatile-store-volatile-load case. This final case is placed after
2109 // volatile-stores although it could just as well go before
2110 // volatile-loads.
2111 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2112 order_constraint) {
2113 // Helper function to insert a is-volatile test and memory barrier
2114 if (os::is_MP()) { // Not needed on single CPU
2115 __ membar(order_constraint);
2116 }
2117 }
2119 void TemplateTable::resolve_cache_and_index(int byte_no,
2120 Register result,
2121 Register Rcache,
2122 Register index,
2123 size_t index_size) {
2124 const Register temp = rbx;
2125 assert_different_registers(result, Rcache, index, temp);
2127 Label resolved;
2128 if (byte_no == f12_oop) {
2129 // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
2130 // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
2131 // there is a 1-1 relation between bytecode type and CP entry type.
2132 // The caller will also load a methodOop from f2.
2133 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2134 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2135 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2136 __ testptr(result, result);
2137 __ jcc(Assembler::notEqual, resolved);
2138 } else {
2139 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2140 assert(result == noreg, ""); //else change code for setting result
2141 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2142 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2143 __ jcc(Assembler::equal, resolved);
2144 }
2146 // resolve first time through
2147 address entry;
2148 switch (bytecode()) {
2149 case Bytecodes::_getstatic:
2150 case Bytecodes::_putstatic:
2151 case Bytecodes::_getfield:
2152 case Bytecodes::_putfield:
2153 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2154 break;
2155 case Bytecodes::_invokevirtual:
2156 case Bytecodes::_invokespecial:
2157 case Bytecodes::_invokestatic:
2158 case Bytecodes::_invokeinterface:
2159 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2160 break;
2161 case Bytecodes::_invokehandle:
2162 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
2163 break;
2164 case Bytecodes::_invokedynamic:
2165 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2166 break;
2167 case Bytecodes::_fast_aldc:
2168 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2169 break;
2170 case Bytecodes::_fast_aldc_w:
2171 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2172 break;
2173 default:
2174 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2175 break;
2176 }
2177 __ movl(temp, (int) bytecode());
2178 __ call_VM(noreg, entry, temp);
2180 // Update registers with resolved info
2181 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2182 if (result != noreg)
2183 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2184 __ bind(resolved);
2185 }
2187 // The cache and index registers must be set before call
2188 void TemplateTable::load_field_cp_cache_entry(Register obj,
2189 Register cache,
2190 Register index,
2191 Register off,
2192 Register flags,
2193 bool is_static = false) {
2194 assert_different_registers(cache, index, flags, off);
2196 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2197 // Field offset
2198 __ movptr(off, Address(cache, index, Address::times_ptr,
2199 in_bytes(cp_base_offset +
2200 ConstantPoolCacheEntry::f2_offset())));
2201 // Flags
2202 __ movl(flags, Address(cache, index, Address::times_ptr,
2203 in_bytes(cp_base_offset +
2204 ConstantPoolCacheEntry::flags_offset())));
2206 // klass overwrite register
2207 if (is_static) {
2208 __ movptr(obj, Address(cache, index, Address::times_ptr,
2209 in_bytes(cp_base_offset +
2210 ConstantPoolCacheEntry::f1_offset())));
2211 }
2212 }
2214 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2215 Register method,
2216 Register itable_index,
2217 Register flags,
2218 bool is_invokevirtual,
2219 bool is_invokevfinal, /*unused*/
2220 bool is_invokedynamic) {
2221 // setup registers
2222 const Register cache = rcx;
2223 const Register index = rdx;
2224 assert_different_registers(method, flags);
2225 assert_different_registers(method, cache, index);
2226 assert_different_registers(itable_index, flags);
2227 assert_different_registers(itable_index, cache, index);
2228 // determine constant pool cache field offsets
2229 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2230 const int method_offset = in_bytes(
2231 constantPoolCacheOopDesc::base_offset() +
2232 ((byte_no == f2_byte)
2233 ? ConstantPoolCacheEntry::f2_offset()
2234 : ConstantPoolCacheEntry::f1_offset()));
2235 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2236 ConstantPoolCacheEntry::flags_offset());
2237 // access constant pool cache fields
2238 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2239 ConstantPoolCacheEntry::f2_offset());
2241 if (byte_no == f12_oop) {
2242 // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
2243 // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
2244 // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
2245 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2246 resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
2247 __ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
2248 itable_index = noreg; // hack to disable load below
2249 } else {
2250 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2251 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2252 }
2253 if (itable_index != noreg) {
2254 // pick up itable index from f2 also:
2255 assert(byte_no == f1_byte, "already picked up f1");
2256 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2257 }
2258 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2259 }
2262 // The registers cache and index expected to be set before call.
2263 // Correct values of the cache and index registers are preserved.
2264 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2265 bool is_static, bool has_tos) {
2266 // do the JVMTI work here to avoid disturbing the register state below
2267 // We use c_rarg registers here because we want to use the register used in
2268 // the call to the VM
2269 if (JvmtiExport::can_post_field_access()) {
2270 // Check to see if a field access watch has been set before we
2271 // take the time to call into the VM.
2272 Label L1;
2273 assert_different_registers(cache, index, rax);
2274 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2275 __ testl(rax, rax);
2276 __ jcc(Assembler::zero, L1);
2278 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2280 // cache entry pointer
2281 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
2282 __ shll(c_rarg3, LogBytesPerWord);
2283 __ addptr(c_rarg2, c_rarg3);
2284 if (is_static) {
2285 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2286 } else {
2287 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2288 __ verify_oop(c_rarg1);
2289 }
2290 // c_rarg1: object pointer or NULL
2291 // c_rarg2: cache entry pointer
2292 // c_rarg3: jvalue object on the stack
2293 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2294 InterpreterRuntime::post_field_access),
2295 c_rarg1, c_rarg2, c_rarg3);
2296 __ get_cache_and_index_at_bcp(cache, index, 1);
2297 __ bind(L1);
2298 }
2299 }
2301 void TemplateTable::pop_and_check_object(Register r) {
2302 __ pop_ptr(r);
2303 __ null_check(r); // for field access must check obj.
2304 __ verify_oop(r);
2305 }
2307 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2308 transition(vtos, vtos);
2310 const Register cache = rcx;
2311 const Register index = rdx;
2312 const Register obj = c_rarg3;
2313 const Register off = rbx;
2314 const Register flags = rax;
2315 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2317 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2318 jvmti_post_field_access(cache, index, is_static, false);
2319 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2321 if (!is_static) {
2322 // obj is on the stack
2323 pop_and_check_object(obj);
2324 }
2326 const Address field(obj, off, Address::times_1);
2328 Label Done, notByte, notInt, notShort, notChar,
2329 notLong, notFloat, notObj, notDouble;
2331 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2332 // Make sure we don't need to mask edx after the above shift
2333 assert(btos == 0, "change code, btos != 0");
2335 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2336 __ jcc(Assembler::notZero, notByte);
2337 // btos
2338 __ load_signed_byte(rax, field);
2339 __ push(btos);
2340 // Rewrite bytecode to be faster
2341 if (!is_static) {
2342 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2343 }
2344 __ jmp(Done);
2346 __ bind(notByte);
2347 __ cmpl(flags, atos);
2348 __ jcc(Assembler::notEqual, notObj);
2349 // atos
2350 __ load_heap_oop(rax, field);
2351 __ push(atos);
2352 if (!is_static) {
2353 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2354 }
2355 __ jmp(Done);
2357 __ bind(notObj);
2358 __ cmpl(flags, itos);
2359 __ jcc(Assembler::notEqual, notInt);
2360 // itos
2361 __ movl(rax, field);
2362 __ push(itos);
2363 // Rewrite bytecode to be faster
2364 if (!is_static) {
2365 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2366 }
2367 __ jmp(Done);
2369 __ bind(notInt);
2370 __ cmpl(flags, ctos);
2371 __ jcc(Assembler::notEqual, notChar);
2372 // ctos
2373 __ load_unsigned_short(rax, field);
2374 __ push(ctos);
2375 // Rewrite bytecode to be faster
2376 if (!is_static) {
2377 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2378 }
2379 __ jmp(Done);
2381 __ bind(notChar);
2382 __ cmpl(flags, stos);
2383 __ jcc(Assembler::notEqual, notShort);
2384 // stos
2385 __ load_signed_short(rax, field);
2386 __ push(stos);
2387 // Rewrite bytecode to be faster
2388 if (!is_static) {
2389 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2390 }
2391 __ jmp(Done);
2393 __ bind(notShort);
2394 __ cmpl(flags, ltos);
2395 __ jcc(Assembler::notEqual, notLong);
2396 // ltos
2397 __ movq(rax, field);
2398 __ push(ltos);
2399 // Rewrite bytecode to be faster
2400 if (!is_static) {
2401 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2402 }
2403 __ jmp(Done);
2405 __ bind(notLong);
2406 __ cmpl(flags, ftos);
2407 __ jcc(Assembler::notEqual, notFloat);
2408 // ftos
2409 __ movflt(xmm0, field);
2410 __ push(ftos);
2411 // Rewrite bytecode to be faster
2412 if (!is_static) {
2413 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2414 }
2415 __ jmp(Done);
2417 __ bind(notFloat);
2418 #ifdef ASSERT
2419 __ cmpl(flags, dtos);
2420 __ jcc(Assembler::notEqual, notDouble);
2421 #endif
2422 // dtos
2423 __ movdbl(xmm0, field);
2424 __ push(dtos);
2425 // Rewrite bytecode to be faster
2426 if (!is_static) {
2427 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2428 }
2429 #ifdef ASSERT
2430 __ jmp(Done);
2432 __ bind(notDouble);
2433 __ stop("Bad state");
2434 #endif
2436 __ bind(Done);
2437 // [jk] not needed currently
2438 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2439 // Assembler::LoadStore));
2440 }
2443 void TemplateTable::getfield(int byte_no) {
2444 getfield_or_static(byte_no, false);
2445 }
2447 void TemplateTable::getstatic(int byte_no) {
2448 getfield_or_static(byte_no, true);
2449 }
2451 // The registers cache and index expected to be set before call.
2452 // The function may destroy various registers, just not the cache and index registers.
2453 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2454 transition(vtos, vtos);
2456 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2458 if (JvmtiExport::can_post_field_modification()) {
2459 // Check to see if a field modification watch has been set before
2460 // we take the time to call into the VM.
2461 Label L1;
2462 assert_different_registers(cache, index, rax);
2463 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2464 __ testl(rax, rax);
2465 __ jcc(Assembler::zero, L1);
2467 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2469 if (is_static) {
2470 // Life is simple. Null out the object pointer.
2471 __ xorl(c_rarg1, c_rarg1);
2472 } else {
2473 // Life is harder. The stack holds the value on top, followed by
2474 // the object. We don't know the size of the value, though; it
2475 // could be one or two words depending on its type. As a result,
2476 // we must find the type to determine where the object is.
2477 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2478 Address::times_8,
2479 in_bytes(cp_base_offset +
2480 ConstantPoolCacheEntry::flags_offset())));
2481 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
2482 // Make sure we don't need to mask rcx after the above shift
2483 ConstantPoolCacheEntry::verify_tos_state_shift();
2484 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2485 __ cmpl(c_rarg3, ltos);
2486 __ cmovptr(Assembler::equal,
2487 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2488 __ cmpl(c_rarg3, dtos);
2489 __ cmovptr(Assembler::equal,
2490 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2491 }
2492 // cache entry pointer
2493 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2494 __ shll(rscratch1, LogBytesPerWord);
2495 __ addptr(c_rarg2, rscratch1);
2496 // object (tos)
2497 __ mov(c_rarg3, rsp);
2498 // c_rarg1: object pointer set up above (NULL if static)
2499 // c_rarg2: cache entry pointer
2500 // c_rarg3: jvalue object on the stack
2501 __ call_VM(noreg,
2502 CAST_FROM_FN_PTR(address,
2503 InterpreterRuntime::post_field_modification),
2504 c_rarg1, c_rarg2, c_rarg3);
2505 __ get_cache_and_index_at_bcp(cache, index, 1);
2506 __ bind(L1);
2507 }
2508 }
2510 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2511 transition(vtos, vtos);
2513 const Register cache = rcx;
2514 const Register index = rdx;
2515 const Register obj = rcx;
2516 const Register off = rbx;
2517 const Register flags = rax;
2518 const Register bc = c_rarg3;
2520 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2521 jvmti_post_field_mod(cache, index, is_static);
2522 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2524 // [jk] not needed currently
2525 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2526 // Assembler::StoreStore));
2528 Label notVolatile, Done;
2529 __ movl(rdx, flags);
2530 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2531 __ andl(rdx, 0x1);
2533 // field address
2534 const Address field(obj, off, Address::times_1);
2536 Label notByte, notInt, notShort, notChar,
2537 notLong, notFloat, notObj, notDouble;
2539 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2541 assert(btos == 0, "change code, btos != 0");
2542 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2543 __ jcc(Assembler::notZero, notByte);
2545 // btos
2546 {
2547 __ pop(btos);
2548 if (!is_static) pop_and_check_object(obj);
2549 __ movb(field, rax);
2550 if (!is_static) {
2551 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2552 }
2553 __ jmp(Done);
2554 }
2556 __ bind(notByte);
2557 __ cmpl(flags, atos);
2558 __ jcc(Assembler::notEqual, notObj);
2560 // atos
2561 {
2562 __ pop(atos);
2563 if (!is_static) pop_and_check_object(obj);
2564 // Store into the field
2565 do_oop_store(_masm, field, rax, _bs->kind(), false);
2566 if (!is_static) {
2567 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2568 }
2569 __ jmp(Done);
2570 }
2572 __ bind(notObj);
2573 __ cmpl(flags, itos);
2574 __ jcc(Assembler::notEqual, notInt);
2576 // itos
2577 {
2578 __ pop(itos);
2579 if (!is_static) pop_and_check_object(obj);
2580 __ movl(field, rax);
2581 if (!is_static) {
2582 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2583 }
2584 __ jmp(Done);
2585 }
2587 __ bind(notInt);
2588 __ cmpl(flags, ctos);
2589 __ jcc(Assembler::notEqual, notChar);
2591 // ctos
2592 {
2593 __ pop(ctos);
2594 if (!is_static) pop_and_check_object(obj);
2595 __ movw(field, rax);
2596 if (!is_static) {
2597 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2598 }
2599 __ jmp(Done);
2600 }
2602 __ bind(notChar);
2603 __ cmpl(flags, stos);
2604 __ jcc(Assembler::notEqual, notShort);
2606 // stos
2607 {
2608 __ pop(stos);
2609 if (!is_static) pop_and_check_object(obj);
2610 __ movw(field, rax);
2611 if (!is_static) {
2612 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2613 }
2614 __ jmp(Done);
2615 }
2617 __ bind(notShort);
2618 __ cmpl(flags, ltos);
2619 __ jcc(Assembler::notEqual, notLong);
2621 // ltos
2622 {
2623 __ pop(ltos);
2624 if (!is_static) pop_and_check_object(obj);
2625 __ movq(field, rax);
2626 if (!is_static) {
2627 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2628 }
2629 __ jmp(Done);
2630 }
2632 __ bind(notLong);
2633 __ cmpl(flags, ftos);
2634 __ jcc(Assembler::notEqual, notFloat);
2636 // ftos
2637 {
2638 __ pop(ftos);
2639 if (!is_static) pop_and_check_object(obj);
2640 __ movflt(field, xmm0);
2641 if (!is_static) {
2642 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2643 }
2644 __ jmp(Done);
2645 }
2647 __ bind(notFloat);
2648 #ifdef ASSERT
2649 __ cmpl(flags, dtos);
2650 __ jcc(Assembler::notEqual, notDouble);
2651 #endif
2653 // dtos
2654 {
2655 __ pop(dtos);
2656 if (!is_static) pop_and_check_object(obj);
2657 __ movdbl(field, xmm0);
2658 if (!is_static) {
2659 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2660 }
2661 }
2663 #ifdef ASSERT
2664 __ jmp(Done);
2666 __ bind(notDouble);
2667 __ stop("Bad state");
2668 #endif
2670 __ bind(Done);
2672 // Check for volatile store
2673 __ testl(rdx, rdx);
2674 __ jcc(Assembler::zero, notVolatile);
2675 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2676 Assembler::StoreStore));
2677 __ bind(notVolatile);
2678 }
2680 void TemplateTable::putfield(int byte_no) {
2681 putfield_or_static(byte_no, false);
2682 }
2684 void TemplateTable::putstatic(int byte_no) {
2685 putfield_or_static(byte_no, true);
2686 }
2688 void TemplateTable::jvmti_post_fast_field_mod() {
2689 if (JvmtiExport::can_post_field_modification()) {
2690 // Check to see if a field modification watch has been set before
2691 // we take the time to call into the VM.
2692 Label L2;
2693 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2694 __ testl(c_rarg3, c_rarg3);
2695 __ jcc(Assembler::zero, L2);
2696 __ pop_ptr(rbx); // copy the object pointer from tos
2697 __ verify_oop(rbx);
2698 __ push_ptr(rbx); // put the object pointer back on tos
2699 // Save tos values before call_VM() clobbers them. Since we have
2700 // to do it for every data type, we use the saved values as the
2701 // jvalue object.
2702 switch (bytecode()) { // load values into the jvalue object
2703 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2704 case Bytecodes::_fast_bputfield: // fall through
2705 case Bytecodes::_fast_sputfield: // fall through
2706 case Bytecodes::_fast_cputfield: // fall through
2707 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2708 case Bytecodes::_fast_dputfield: __ push_d(); break;
2709 case Bytecodes::_fast_fputfield: __ push_f(); break;
2710 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2712 default:
2713 ShouldNotReachHere();
2714 }
2715 __ mov(c_rarg3, rsp); // points to jvalue on the stack
2716 // access constant pool cache entry
2717 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2718 __ verify_oop(rbx);
2719 // rbx: object pointer copied above
2720 // c_rarg2: cache entry pointer
2721 // c_rarg3: jvalue object on the stack
2722 __ call_VM(noreg,
2723 CAST_FROM_FN_PTR(address,
2724 InterpreterRuntime::post_field_modification),
2725 rbx, c_rarg2, c_rarg3);
2727 switch (bytecode()) { // restore tos values
2728 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2729 case Bytecodes::_fast_bputfield: // fall through
2730 case Bytecodes::_fast_sputfield: // fall through
2731 case Bytecodes::_fast_cputfield: // fall through
2732 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2733 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2734 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2735 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2736 }
2737 __ bind(L2);
2738 }
2739 }
2741 void TemplateTable::fast_storefield(TosState state) {
2742 transition(state, vtos);
2744 ByteSize base = constantPoolCacheOopDesc::base_offset();
2746 jvmti_post_fast_field_mod();
2748 // access constant pool cache
2749 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2751 // test for volatile with rdx
2752 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2753 in_bytes(base +
2754 ConstantPoolCacheEntry::flags_offset())));
2756 // replace index with field offset from cache entry
2757 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2758 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2760 // [jk] not needed currently
2761 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2762 // Assembler::StoreStore));
2764 Label notVolatile;
2765 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2766 __ andl(rdx, 0x1);
2768 // Get object from stack
2769 pop_and_check_object(rcx);
2771 // field address
2772 const Address field(rcx, rbx, Address::times_1);
2774 // access field
2775 switch (bytecode()) {
2776 case Bytecodes::_fast_aputfield:
2777 do_oop_store(_masm, field, rax, _bs->kind(), false);
2778 break;
2779 case Bytecodes::_fast_lputfield:
2780 __ movq(field, rax);
2781 break;
2782 case Bytecodes::_fast_iputfield:
2783 __ movl(field, rax);
2784 break;
2785 case Bytecodes::_fast_bputfield:
2786 __ movb(field, rax);
2787 break;
2788 case Bytecodes::_fast_sputfield:
2789 // fall through
2790 case Bytecodes::_fast_cputfield:
2791 __ movw(field, rax);
2792 break;
2793 case Bytecodes::_fast_fputfield:
2794 __ movflt(field, xmm0);
2795 break;
2796 case Bytecodes::_fast_dputfield:
2797 __ movdbl(field, xmm0);
2798 break;
2799 default:
2800 ShouldNotReachHere();
2801 }
2803 // Check for volatile store
2804 __ testl(rdx, rdx);
2805 __ jcc(Assembler::zero, notVolatile);
2806 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2807 Assembler::StoreStore));
2808 __ bind(notVolatile);
2809 }
2812 void TemplateTable::fast_accessfield(TosState state) {
2813 transition(atos, state);
2815 // Do the JVMTI work here to avoid disturbing the register state below
2816 if (JvmtiExport::can_post_field_access()) {
2817 // Check to see if a field access watch has been set before we
2818 // take the time to call into the VM.
2819 Label L1;
2820 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2821 __ testl(rcx, rcx);
2822 __ jcc(Assembler::zero, L1);
2823 // access constant pool cache entry
2824 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2825 __ verify_oop(rax);
2826 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2827 __ mov(c_rarg1, rax);
2828 // c_rarg1: object pointer copied above
2829 // c_rarg2: cache entry pointer
2830 __ call_VM(noreg,
2831 CAST_FROM_FN_PTR(address,
2832 InterpreterRuntime::post_field_access),
2833 c_rarg1, c_rarg2);
2834 __ pop_ptr(rax); // restore object pointer
2835 __ bind(L1);
2836 }
2838 // access constant pool cache
2839 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2840 // replace index with field offset from cache entry
2841 // [jk] not needed currently
2842 // if (os::is_MP()) {
2843 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2844 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2845 // ConstantPoolCacheEntry::flags_offset())));
2846 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2847 // __ andl(rdx, 0x1);
2848 // }
2849 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2850 in_bytes(constantPoolCacheOopDesc::base_offset() +
2851 ConstantPoolCacheEntry::f2_offset())));
2853 // rax: object
2854 __ verify_oop(rax);
2855 __ null_check(rax);
2856 Address field(rax, rbx, Address::times_1);
2858 // access field
2859 switch (bytecode()) {
2860 case Bytecodes::_fast_agetfield:
2861 __ load_heap_oop(rax, field);
2862 __ verify_oop(rax);
2863 break;
2864 case Bytecodes::_fast_lgetfield:
2865 __ movq(rax, field);
2866 break;
2867 case Bytecodes::_fast_igetfield:
2868 __ movl(rax, field);
2869 break;
2870 case Bytecodes::_fast_bgetfield:
2871 __ movsbl(rax, field);
2872 break;
2873 case Bytecodes::_fast_sgetfield:
2874 __ load_signed_short(rax, field);
2875 break;
2876 case Bytecodes::_fast_cgetfield:
2877 __ load_unsigned_short(rax, field);
2878 break;
2879 case Bytecodes::_fast_fgetfield:
2880 __ movflt(xmm0, field);
2881 break;
2882 case Bytecodes::_fast_dgetfield:
2883 __ movdbl(xmm0, field);
2884 break;
2885 default:
2886 ShouldNotReachHere();
2887 }
2888 // [jk] not needed currently
2889 // if (os::is_MP()) {
2890 // Label notVolatile;
2891 // __ testl(rdx, rdx);
2892 // __ jcc(Assembler::zero, notVolatile);
2893 // __ membar(Assembler::LoadLoad);
2894 // __ bind(notVolatile);
2895 //};
2896 }
2898 void TemplateTable::fast_xaccess(TosState state) {
2899 transition(vtos, state);
2901 // get receiver
2902 __ movptr(rax, aaddress(0));
2903 // access constant pool cache
2904 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2905 __ movptr(rbx,
2906 Address(rcx, rdx, Address::times_8,
2907 in_bytes(constantPoolCacheOopDesc::base_offset() +
2908 ConstantPoolCacheEntry::f2_offset())));
2909 // make sure exception is reported in correct bcp range (getfield is
2910 // next instruction)
2911 __ increment(r13);
2912 __ null_check(rax);
2913 switch (state) {
2914 case itos:
2915 __ movl(rax, Address(rax, rbx, Address::times_1));
2916 break;
2917 case atos:
2918 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2919 __ verify_oop(rax);
2920 break;
2921 case ftos:
2922 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2923 break;
2924 default:
2925 ShouldNotReachHere();
2926 }
2928 // [jk] not needed currently
2929 // if (os::is_MP()) {
2930 // Label notVolatile;
2931 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2932 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2933 // ConstantPoolCacheEntry::flags_offset())));
2934 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2935 // __ testl(rdx, 0x1);
2936 // __ jcc(Assembler::zero, notVolatile);
2937 // __ membar(Assembler::LoadLoad);
2938 // __ bind(notVolatile);
2939 // }
2941 __ decrement(r13);
2942 }
2946 //-----------------------------------------------------------------------------
2947 // Calls
2949 void TemplateTable::count_calls(Register method, Register temp) {
2950 // implemented elsewhere
2951 ShouldNotReachHere();
2952 }
2954 void TemplateTable::prepare_invoke(int byte_no,
2955 Register method, // linked method (or i-klass)
2956 Register index, // itable index, MethodType, etc.
2957 Register recv, // if caller wants to see it
2958 Register flags // if caller wants to test it
2959 ) {
2960 // determine flags
2961 const Bytecodes::Code code = bytecode();
2962 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2963 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2964 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2965 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2966 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2967 const bool load_receiver = (recv != noreg);
2968 const bool save_flags = (flags != noreg);
2969 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2970 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2971 assert(flags == noreg || flags == rdx, "");
2972 assert(recv == noreg || recv == rcx, "");
2974 // setup registers & access constant pool cache
2975 if (recv == noreg) recv = rcx;
2976 if (flags == noreg) flags = rdx;
2977 assert_different_registers(method, index, recv, flags);
2979 // save 'interpreter return address'
2980 __ save_bcp();
2982 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2984 // maybe push appendix to arguments (just before return address)
2985 if (is_invokedynamic || is_invokehandle) {
2986 Label L_no_push;
2987 __ verify_oop(index);
2988 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2989 __ jccb(Assembler::zero, L_no_push);
2990 // Push the appendix as a trailing parameter.
2991 // This must be done before we get the receiver,
2992 // since the parameter_size includes it.
2993 __ push(index); // push appendix (MethodType, CallSite, etc.)
2994 __ bind(L_no_push);
2995 }
2997 // load receiver if needed (after appendix is pushed so parameter size is correct)
2998 // Note: no return address pushed yet
2999 if (load_receiver) {
3000 __ movl(recv, flags);
3001 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3002 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3003 const int receiver_is_at_end = -1; // back off one slot to get receiver
3004 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3005 __ movptr(recv, recv_addr);
3006 __ verify_oop(recv);
3007 }
3009 if (save_flags) {
3010 __ movl(r13, flags);
3011 }
3013 // compute return type
3014 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3015 // Make sure we don't need to mask flags after the above shift
3016 ConstantPoolCacheEntry::verify_tos_state_shift();
3017 // load return address
3018 {
3019 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
3020 (address)Interpreter::return_5_addrs_by_index_table() :
3021 (address)Interpreter::return_3_addrs_by_index_table();
3022 ExternalAddress table(table_addr);
3023 __ lea(rscratch1, table);
3024 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
3025 }
3027 // push return address
3028 __ push(flags);
3030 // Restore flags value from the constant pool cache, and restore rsi
3031 // for later null checks. r13 is the bytecode pointer
3032 if (save_flags) {
3033 __ movl(flags, r13);
3034 __ restore_bcp();
3035 }
3036 }
3039 void TemplateTable::invokevirtual_helper(Register index,
3040 Register recv,
3041 Register flags) {
3042 // Uses temporary registers rax, rdx
3043 assert_different_registers(index, recv, rax, rdx);
3044 assert(index == rbx, "");
3045 assert(recv == rcx, "");
3047 // Test for an invoke of a final method
3048 Label notFinal;
3049 __ movl(rax, flags);
3050 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3051 __ jcc(Assembler::zero, notFinal);
3053 const Register method = index; // method must be rbx
3054 assert(method == rbx,
3055 "methodOop must be rbx for interpreter calling convention");
3057 // do the call - the index is actually the method to call
3058 // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
3059 __ verify_oop(method);
3061 // It's final, need a null check here!
3062 __ null_check(recv);
3064 // profile this call
3065 __ profile_final_call(rax);
3067 __ jump_from_interpreted(method, rax);
3069 __ bind(notFinal);
3071 // get receiver klass
3072 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3073 __ load_klass(rax, recv);
3074 __ verify_oop(rax);
3076 // profile this call
3077 __ profile_virtual_call(rax, r14, rdx);
3079 // get target methodOop & entry point
3080 __ lookup_virtual_method(rax, index, method);
3081 __ jump_from_interpreted(method, rdx);
3082 }
3085 void TemplateTable::invokevirtual(int byte_no) {
3086 transition(vtos, vtos);
3087 assert(byte_no == f2_byte, "use this argument");
3088 prepare_invoke(byte_no,
3089 rbx, // method or vtable index
3090 noreg, // unused itable index
3091 rcx, rdx); // recv, flags
3093 // rbx: index
3094 // rcx: receiver
3095 // rdx: flags
3097 invokevirtual_helper(rbx, rcx, rdx);
3098 }
3101 void TemplateTable::invokespecial(int byte_no) {
3102 transition(vtos, vtos);
3103 assert(byte_no == f1_byte, "use this argument");
3104 prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
3105 rcx); // get receiver also for null check
3106 __ verify_oop(rcx);
3107 __ null_check(rcx);
3108 // do the call
3109 __ verify_oop(rbx);
3110 __ profile_call(rax);
3111 __ jump_from_interpreted(rbx, rax);
3112 }
3115 void TemplateTable::invokestatic(int byte_no) {
3116 transition(vtos, vtos);
3117 assert(byte_no == f1_byte, "use this argument");
3118 prepare_invoke(byte_no, rbx); // get f1 methodOop
3119 // do the call
3120 __ verify_oop(rbx);
3121 __ profile_call(rax);
3122 __ jump_from_interpreted(rbx, rax);
3123 }
3125 void TemplateTable::fast_invokevfinal(int byte_no) {
3126 transition(vtos, vtos);
3127 assert(byte_no == f2_byte, "use this argument");
3128 __ stop("fast_invokevfinal not used on amd64");
3129 }
3131 void TemplateTable::invokeinterface(int byte_no) {
3132 transition(vtos, vtos);
3133 assert(byte_no == f1_byte, "use this argument");
3134 prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
3135 rcx, rdx); // recv, flags
3137 // rax: interface klass (from f1)
3138 // rbx: itable index (from f2)
3139 // rcx: receiver
3140 // rdx: flags
3142 // Special case of invokeinterface called for virtual method of
3143 // java.lang.Object. See cpCacheOop.cpp for details.
3144 // This code isn't produced by javac, but could be produced by
3145 // another compliant java compiler.
3146 Label notMethod;
3147 __ movl(r14, rdx);
3148 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3149 __ jcc(Assembler::zero, notMethod);
3151 invokevirtual_helper(rbx, rcx, rdx);
3152 __ bind(notMethod);
3154 // Get receiver klass into rdx - also a null check
3155 __ restore_locals(); // restore r14
3156 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3157 __ load_klass(rdx, rcx);
3158 __ verify_oop(rdx);
3160 // profile this call
3161 __ profile_virtual_call(rdx, r13, r14);
3163 Label no_such_interface, no_such_method;
3165 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3166 rdx, rax, rbx,
3167 // outputs: method, scan temp. reg
3168 rbx, r13,
3169 no_such_interface);
3171 // rbx: methodOop to call
3172 // rcx: receiver
3173 // Check for abstract method error
3174 // Note: This should be done more efficiently via a throw_abstract_method_error
3175 // interpreter entry point and a conditional jump to it in case of a null
3176 // method.
3177 __ testptr(rbx, rbx);
3178 __ jcc(Assembler::zero, no_such_method);
3180 // do the call
3181 // rcx: receiver
3182 // rbx,: methodOop
3183 __ jump_from_interpreted(rbx, rdx);
3184 __ should_not_reach_here();
3186 // exception handling code follows...
3187 // note: must restore interpreter registers to canonical
3188 // state for exception handling to work correctly!
3190 __ bind(no_such_method);
3191 // throw exception
3192 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3193 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3194 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3195 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3196 // the call_VM checks for exception, so we should never return here.
3197 __ should_not_reach_here();
3199 __ bind(no_such_interface);
3200 // throw exception
3201 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3202 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3203 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3204 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3205 InterpreterRuntime::throw_IncompatibleClassChangeError));
3206 // the call_VM checks for exception, so we should never return here.
3207 __ should_not_reach_here();
3208 }
3211 void TemplateTable::invokehandle(int byte_no) {
3212 transition(vtos, vtos);
3213 assert(byte_no == f12_oop, "use this argument");
3214 const Register rbx_method = rbx; // f2
3215 const Register rax_mtype = rax; // f1
3216 const Register rcx_recv = rcx;
3217 const Register rdx_flags = rdx;
3219 if (!EnableInvokeDynamic) {
3220 // rewriter does not generate this bytecode
3221 __ should_not_reach_here();
3222 return;
3223 }
3225 prepare_invoke(byte_no,
3226 rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
3227 rcx_recv);
3228 __ verify_oop(rbx_method);
3229 __ verify_oop(rcx_recv);
3230 __ null_check(rcx_recv);
3232 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3234 // FIXME: profile the LambdaForm also
3235 __ profile_final_call(rax);
3237 __ jump_from_interpreted(rbx_method, rdx);
3238 }
3241 void TemplateTable::invokedynamic(int byte_no) {
3242 transition(vtos, vtos);
3243 assert(byte_no == f12_oop, "use this argument");
3245 if (!EnableInvokeDynamic) {
3246 // We should not encounter this bytecode if !EnableInvokeDynamic.
3247 // The verifier will stop it. However, if we get past the verifier,
3248 // this will stop the thread in a reasonable way, without crashing the JVM.
3249 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3250 InterpreterRuntime::throw_IncompatibleClassChangeError));
3251 // the call_VM checks for exception, so we should never return here.
3252 __ should_not_reach_here();
3253 return;
3254 }
3256 const Register rbx_method = rbx;
3257 const Register rax_callsite = rax;
3259 prepare_invoke(byte_no, rbx_method, rax_callsite);
3261 // rax: CallSite object (from f1)
3262 // rbx: MH.linkToCallSite method (from f2)
3264 // Note: rax_callsite is already pushed by prepare_invoke
3266 // %%% should make a type profile for any invokedynamic that takes a ref argument
3267 // profile this call
3268 __ profile_call(r13);
3270 __ verify_oop(rax_callsite);
3272 __ jump_from_interpreted(rbx_method, rdx);
3273 }
3276 //-----------------------------------------------------------------------------
3277 // Allocation
3279 void TemplateTable::_new() {
3280 transition(vtos, atos);
3281 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3282 Label slow_case;
3283 Label done;
3284 Label initialize_header;
3285 Label initialize_object; // including clearing the fields
3286 Label allocate_shared;
3288 __ get_cpool_and_tags(rsi, rax);
3289 // Make sure the class we're about to instantiate has been resolved.
3290 // This is done before loading instanceKlass to be consistent with the order
3291 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3292 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3293 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3294 JVM_CONSTANT_Class);
3295 __ jcc(Assembler::notEqual, slow_case);
3297 // get instanceKlass
3298 __ movptr(rsi, Address(rsi, rdx,
3299 Address::times_8, sizeof(constantPoolOopDesc)));
3301 // make sure klass is initialized & doesn't have finalizer
3302 // make sure klass is fully initialized
3303 __ cmpb(Address(rsi,
3304 instanceKlass::init_state_offset()),
3305 instanceKlass::fully_initialized);
3306 __ jcc(Assembler::notEqual, slow_case);
3308 // get instance_size in instanceKlass (scaled to a count of bytes)
3309 __ movl(rdx,
3310 Address(rsi,
3311 Klass::layout_helper_offset()));
3312 // test to see if it has a finalizer or is malformed in some way
3313 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3314 __ jcc(Assembler::notZero, slow_case);
3316 // Allocate the instance
3317 // 1) Try to allocate in the TLAB
3318 // 2) if fail and the object is large allocate in the shared Eden
3319 // 3) if the above fails (or is not applicable), go to a slow case
3320 // (creates a new TLAB, etc.)
3322 const bool allow_shared_alloc =
3323 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3325 if (UseTLAB) {
3326 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3327 __ lea(rbx, Address(rax, rdx, Address::times_1));
3328 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3329 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3330 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3331 if (ZeroTLAB) {
3332 // the fields have been already cleared
3333 __ jmp(initialize_header);
3334 } else {
3335 // initialize both the header and fields
3336 __ jmp(initialize_object);
3337 }
3338 }
3340 // Allocation in the shared Eden, if allowed.
3341 //
3342 // rdx: instance size in bytes
3343 if (allow_shared_alloc) {
3344 __ bind(allocate_shared);
3346 ExternalAddress top((address)Universe::heap()->top_addr());
3347 ExternalAddress end((address)Universe::heap()->end_addr());
3349 const Register RtopAddr = rscratch1;
3350 const Register RendAddr = rscratch2;
3352 __ lea(RtopAddr, top);
3353 __ lea(RendAddr, end);
3354 __ movptr(rax, Address(RtopAddr, 0));
3356 // For retries rax gets set by cmpxchgq
3357 Label retry;
3358 __ bind(retry);
3359 __ lea(rbx, Address(rax, rdx, Address::times_1));
3360 __ cmpptr(rbx, Address(RendAddr, 0));
3361 __ jcc(Assembler::above, slow_case);
3363 // Compare rax with the top addr, and if still equal, store the new
3364 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3365 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3366 //
3367 // rax: object begin
3368 // rbx: object end
3369 // rdx: instance size in bytes
3370 if (os::is_MP()) {
3371 __ lock();
3372 }
3373 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3375 // if someone beat us on the allocation, try again, otherwise continue
3376 __ jcc(Assembler::notEqual, retry);
3378 __ incr_allocated_bytes(r15_thread, rdx, 0);
3379 }
3381 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3382 // The object is initialized before the header. If the object size is
3383 // zero, go directly to the header initialization.
3384 __ bind(initialize_object);
3385 __ decrementl(rdx, sizeof(oopDesc));
3386 __ jcc(Assembler::zero, initialize_header);
3388 // Initialize object fields
3389 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3390 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3391 {
3392 Label loop;
3393 __ bind(loop);
3394 __ movq(Address(rax, rdx, Address::times_8,
3395 sizeof(oopDesc) - oopSize),
3396 rcx);
3397 __ decrementl(rdx);
3398 __ jcc(Assembler::notZero, loop);
3399 }
3401 // initialize object header only.
3402 __ bind(initialize_header);
3403 if (UseBiasedLocking) {
3404 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
3405 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3406 } else {
3407 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3408 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3409 }
3410 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3411 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3412 __ store_klass(rax, rsi); // store klass last
3414 {
3415 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3416 // Trigger dtrace event for fastpath
3417 __ push(atos); // save the return value
3418 __ call_VM_leaf(
3419 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3420 __ pop(atos); // restore the return value
3422 }
3423 __ jmp(done);
3424 }
3427 // slow case
3428 __ bind(slow_case);
3429 __ get_constant_pool(c_rarg1);
3430 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3431 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3432 __ verify_oop(rax);
3434 // continue
3435 __ bind(done);
3436 }
3438 void TemplateTable::newarray() {
3439 transition(itos, atos);
3440 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3441 __ movl(c_rarg2, rax);
3442 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3443 c_rarg1, c_rarg2);
3444 }
3446 void TemplateTable::anewarray() {
3447 transition(itos, atos);
3448 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3449 __ get_constant_pool(c_rarg1);
3450 __ movl(c_rarg3, rax);
3451 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3452 c_rarg1, c_rarg2, c_rarg3);
3453 }
3455 void TemplateTable::arraylength() {
3456 transition(atos, itos);
3457 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3458 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3459 }
3461 void TemplateTable::checkcast() {
3462 transition(atos, atos);
3463 Label done, is_null, ok_is_subtype, quicked, resolved;
3464 __ testptr(rax, rax); // object is in rax
3465 __ jcc(Assembler::zero, is_null);
3467 // Get cpool & tags index
3468 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3469 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3470 // See if bytecode has already been quicked
3471 __ cmpb(Address(rdx, rbx,
3472 Address::times_1,
3473 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3474 JVM_CONSTANT_Class);
3475 __ jcc(Assembler::equal, quicked);
3476 __ push(atos); // save receiver for result, and for GC
3477 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3478 __ pop_ptr(rdx); // restore receiver
3479 __ jmpb(resolved);
3481 // Get superklass in rax and subklass in rbx
3482 __ bind(quicked);
3483 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3484 __ movptr(rax, Address(rcx, rbx,
3485 Address::times_8, sizeof(constantPoolOopDesc)));
3487 __ bind(resolved);
3488 __ load_klass(rbx, rdx);
3490 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3491 // Superklass in rax. Subklass in rbx.
3492 __ gen_subtype_check(rbx, ok_is_subtype);
3494 // Come here on failure
3495 __ push_ptr(rdx);
3496 // object is at TOS
3497 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3499 // Come here on success
3500 __ bind(ok_is_subtype);
3501 __ mov(rax, rdx); // Restore object in rdx
3503 // Collect counts on whether this check-cast sees NULLs a lot or not.
3504 if (ProfileInterpreter) {
3505 __ jmp(done);
3506 __ bind(is_null);
3507 __ profile_null_seen(rcx);
3508 } else {
3509 __ bind(is_null); // same as 'done'
3510 }
3511 __ bind(done);
3512 }
3514 void TemplateTable::instanceof() {
3515 transition(atos, itos);
3516 Label done, is_null, ok_is_subtype, quicked, resolved;
3517 __ testptr(rax, rax);
3518 __ jcc(Assembler::zero, is_null);
3520 // Get cpool & tags index
3521 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3522 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3523 // See if bytecode has already been quicked
3524 __ cmpb(Address(rdx, rbx,
3525 Address::times_1,
3526 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3527 JVM_CONSTANT_Class);
3528 __ jcc(Assembler::equal, quicked);
3530 __ push(atos); // save receiver for result, and for GC
3531 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3532 __ pop_ptr(rdx); // restore receiver
3533 __ verify_oop(rdx);
3534 __ load_klass(rdx, rdx);
3535 __ jmpb(resolved);
3537 // Get superklass in rax and subklass in rdx
3538 __ bind(quicked);
3539 __ load_klass(rdx, rax);
3540 __ movptr(rax, Address(rcx, rbx,
3541 Address::times_8, sizeof(constantPoolOopDesc)));
3543 __ bind(resolved);
3545 // Generate subtype check. Blows rcx, rdi
3546 // Superklass in rax. Subklass in rdx.
3547 __ gen_subtype_check(rdx, ok_is_subtype);
3549 // Come here on failure
3550 __ xorl(rax, rax);
3551 __ jmpb(done);
3552 // Come here on success
3553 __ bind(ok_is_subtype);
3554 __ movl(rax, 1);
3556 // Collect counts on whether this test sees NULLs a lot or not.
3557 if (ProfileInterpreter) {
3558 __ jmp(done);
3559 __ bind(is_null);
3560 __ profile_null_seen(rcx);
3561 } else {
3562 __ bind(is_null); // same as 'done'
3563 }
3564 __ bind(done);
3565 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3566 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3567 }
3569 //-----------------------------------------------------------------------------
3570 // Breakpoints
3571 void TemplateTable::_breakpoint() {
3572 // Note: We get here even if we are single stepping..
3573 // jbug inists on setting breakpoints at every bytecode
3574 // even if we are in single step mode.
3576 transition(vtos, vtos);
3578 // get the unpatched byte code
3579 __ get_method(c_rarg1);
3580 __ call_VM(noreg,
3581 CAST_FROM_FN_PTR(address,
3582 InterpreterRuntime::get_original_bytecode_at),
3583 c_rarg1, r13);
3584 __ mov(rbx, rax);
3586 // post the breakpoint event
3587 __ get_method(c_rarg1);
3588 __ call_VM(noreg,
3589 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3590 c_rarg1, r13);
3592 // complete the execution of original bytecode
3593 __ dispatch_only_normal(vtos);
3594 }
3596 //-----------------------------------------------------------------------------
3597 // Exceptions
3599 void TemplateTable::athrow() {
3600 transition(atos, vtos);
3601 __ null_check(rax);
3602 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3603 }
3605 //-----------------------------------------------------------------------------
3606 // Synchronization
3607 //
3608 // Note: monitorenter & exit are symmetric routines; which is reflected
3609 // in the assembly code structure as well
3610 //
3611 // Stack layout:
3612 //
3613 // [expressions ] <--- rsp = expression stack top
3614 // ..
3615 // [expressions ]
3616 // [monitor entry] <--- monitor block top = expression stack bot
3617 // ..
3618 // [monitor entry]
3619 // [frame data ] <--- monitor block bot
3620 // ...
3621 // [saved rbp ] <--- rbp
3622 void TemplateTable::monitorenter() {
3623 transition(atos, vtos);
3625 // check for NULL object
3626 __ null_check(rax);
3628 const Address monitor_block_top(
3629 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3630 const Address monitor_block_bot(
3631 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3632 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3634 Label allocated;
3636 // initialize entry pointer
3637 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3639 // find a free slot in the monitor block (result in c_rarg1)
3640 {
3641 Label entry, loop, exit;
3642 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3643 // starting with top-most entry
3644 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3645 // of monitor block
3646 __ jmpb(entry);
3648 __ bind(loop);
3649 // check if current entry is used
3650 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3651 // if not used then remember entry in c_rarg1
3652 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3653 // check if current entry is for same object
3654 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3655 // if same object then stop searching
3656 __ jccb(Assembler::equal, exit);
3657 // otherwise advance to next entry
3658 __ addptr(c_rarg3, entry_size);
3659 __ bind(entry);
3660 // check if bottom reached
3661 __ cmpptr(c_rarg3, c_rarg2);
3662 // if not at bottom then check this entry
3663 __ jcc(Assembler::notEqual, loop);
3664 __ bind(exit);
3665 }
3667 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3668 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3670 // allocate one if there's no free slot
3671 {
3672 Label entry, loop;
3673 // 1. compute new pointers // rsp: old expression stack top
3674 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3675 __ subptr(rsp, entry_size); // move expression stack top
3676 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3677 __ mov(c_rarg3, rsp); // set start value for copy loop
3678 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3679 __ jmp(entry);
3680 // 2. move expression stack contents
3681 __ bind(loop);
3682 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3683 // word from old location
3684 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3685 __ addptr(c_rarg3, wordSize); // advance to next word
3686 __ bind(entry);
3687 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3688 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3689 // copy next word
3690 }
3692 // call run-time routine
3693 // c_rarg1: points to monitor entry
3694 __ bind(allocated);
3696 // Increment bcp to point to the next bytecode, so exception
3697 // handling for async. exceptions work correctly.
3698 // The object has already been poped from the stack, so the
3699 // expression stack looks correct.
3700 __ increment(r13);
3702 // store object
3703 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3704 __ lock_object(c_rarg1);
3706 // check to make sure this monitor doesn't cause stack overflow after locking
3707 __ save_bcp(); // in case of exception
3708 __ generate_stack_overflow_check(0);
3710 // The bcp has already been incremented. Just need to dispatch to
3711 // next instruction.
3712 __ dispatch_next(vtos);
3713 }
3716 void TemplateTable::monitorexit() {
3717 transition(atos, vtos);
3719 // check for NULL object
3720 __ null_check(rax);
3722 const Address monitor_block_top(
3723 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3724 const Address monitor_block_bot(
3725 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3726 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3728 Label found;
3730 // find matching slot
3731 {
3732 Label entry, loop;
3733 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3734 // starting with top-most entry
3735 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3736 // of monitor block
3737 __ jmpb(entry);
3739 __ bind(loop);
3740 // check if current entry is for same object
3741 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3742 // if same object then stop searching
3743 __ jcc(Assembler::equal, found);
3744 // otherwise advance to next entry
3745 __ addptr(c_rarg1, entry_size);
3746 __ bind(entry);
3747 // check if bottom reached
3748 __ cmpptr(c_rarg1, c_rarg2);
3749 // if not at bottom then check this entry
3750 __ jcc(Assembler::notEqual, loop);
3751 }
3753 // error handling. Unlocking was not block-structured
3754 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3755 InterpreterRuntime::throw_illegal_monitor_state_exception));
3756 __ should_not_reach_here();
3758 // call run-time routine
3759 // rsi: points to monitor entry
3760 __ bind(found);
3761 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3762 __ unlock_object(c_rarg1);
3763 __ pop_ptr(rax); // discard object
3764 }
3767 // Wide instructions
3768 void TemplateTable::wide() {
3769 transition(vtos, vtos);
3770 __ load_unsigned_byte(rbx, at_bcp(1));
3771 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3772 __ jmp(Address(rscratch1, rbx, Address::times_8));
3773 // Note: the r13 increment step is part of the individual wide
3774 // bytecode implementations
3775 }
3778 // Multi arrays
3779 void TemplateTable::multianewarray() {
3780 transition(vtos, atos);
3781 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3782 // last dim is on top of stack; we want address of first one:
3783 // first_addr = last_addr + (ndims - 1) * wordSize
3784 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3785 call_VM(rax,
3786 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3787 c_rarg1);
3788 __ load_unsigned_byte(rbx, at_bcp(3));
3789 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3790 }
3791 #endif // !CC_INTERP