Tue, 02 Sep 2014 12:48:45 -0700
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
Summary: Add new C2 intrinsic for BigInteger::multiplyToLen() on x86 in 64-bit VM.
Reviewed-by: roland
1 /*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
40 #ifndef CC_INTERP
42 #define __ _masm->
44 // Platform-dependent initialization
46 void TemplateTable::pd_initialize() {
47 // No amd64 specific initialization
48 }
50 // Address computation: local variables
52 static inline Address iaddress(int n) {
53 return Address(r14, Interpreter::local_offset_in_bytes(n));
54 }
56 static inline Address laddress(int n) {
57 return iaddress(n + 1);
58 }
60 static inline Address faddress(int n) {
61 return iaddress(n);
62 }
64 static inline Address daddress(int n) {
65 return laddress(n);
66 }
68 static inline Address aaddress(int n) {
69 return iaddress(n);
70 }
72 static inline Address iaddress(Register r) {
73 return Address(r14, r, Address::times_8);
74 }
76 static inline Address laddress(Register r) {
77 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
78 }
80 static inline Address faddress(Register r) {
81 return iaddress(r);
82 }
84 static inline Address daddress(Register r) {
85 return laddress(r);
86 }
88 static inline Address aaddress(Register r) {
89 return iaddress(r);
90 }
92 static inline Address at_rsp() {
93 return Address(rsp, 0);
94 }
96 // At top of Java expression stack which may be different than esp(). It
97 // isn't for category 1 objects.
98 static inline Address at_tos () {
99 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
100 }
102 static inline Address at_tos_p1() {
103 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
104 }
106 static inline Address at_tos_p2() {
107 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
108 }
110 static inline Address at_tos_p3() {
111 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
112 }
114 // Condition conversion
115 static Assembler::Condition j_not(TemplateTable::Condition cc) {
116 switch (cc) {
117 case TemplateTable::equal : return Assembler::notEqual;
118 case TemplateTable::not_equal : return Assembler::equal;
119 case TemplateTable::less : return Assembler::greaterEqual;
120 case TemplateTable::less_equal : return Assembler::greater;
121 case TemplateTable::greater : return Assembler::lessEqual;
122 case TemplateTable::greater_equal: return Assembler::less;
123 }
124 ShouldNotReachHere();
125 return Assembler::zero;
126 }
129 // Miscelaneous helper routines
130 // Store an oop (or NULL) at the address described by obj.
131 // If val == noreg this means store a NULL
133 static void do_oop_store(InterpreterMacroAssembler* _masm,
134 Address obj,
135 Register val,
136 BarrierSet::Name barrier,
137 bool precise) {
138 assert(val == noreg || val == rax, "parameter is just for looks");
139 switch (barrier) {
140 #if INCLUDE_ALL_GCS
141 case BarrierSet::G1SATBCT:
142 case BarrierSet::G1SATBCTLogging:
143 {
144 // flatten object address if needed
145 if (obj.index() == noreg && obj.disp() == 0) {
146 if (obj.base() != rdx) {
147 __ movq(rdx, obj.base());
148 }
149 } else {
150 __ leaq(rdx, obj);
151 }
152 __ g1_write_barrier_pre(rdx /* obj */,
153 rbx /* pre_val */,
154 r15_thread /* thread */,
155 r8 /* tmp */,
156 val != noreg /* tosca_live */,
157 false /* expand_call */);
158 if (val == noreg) {
159 __ store_heap_oop_null(Address(rdx, 0));
160 } else {
161 // G1 barrier needs uncompressed oop for region cross check.
162 Register new_val = val;
163 if (UseCompressedOops) {
164 new_val = rbx;
165 __ movptr(new_val, val);
166 }
167 __ store_heap_oop(Address(rdx, 0), val);
168 __ g1_write_barrier_post(rdx /* store_adr */,
169 new_val /* new_val */,
170 r15_thread /* thread */,
171 r8 /* tmp */,
172 rbx /* tmp2 */);
173 }
174 }
175 break;
176 #endif // INCLUDE_ALL_GCS
177 case BarrierSet::CardTableModRef:
178 case BarrierSet::CardTableExtension:
179 {
180 if (val == noreg) {
181 __ store_heap_oop_null(obj);
182 } else {
183 __ store_heap_oop(obj, val);
184 // flatten object address if needed
185 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
186 __ store_check(obj.base());
187 } else {
188 __ leaq(rdx, obj);
189 __ store_check(rdx);
190 }
191 }
192 }
193 break;
194 case BarrierSet::ModRef:
195 case BarrierSet::Other:
196 if (val == noreg) {
197 __ store_heap_oop_null(obj);
198 } else {
199 __ store_heap_oop(obj, val);
200 }
201 break;
202 default :
203 ShouldNotReachHere();
205 }
206 }
208 Address TemplateTable::at_bcp(int offset) {
209 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
210 return Address(r13, offset);
211 }
213 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
214 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
215 int byte_no) {
216 if (!RewriteBytecodes) return;
217 Label L_patch_done;
219 switch (bc) {
220 case Bytecodes::_fast_aputfield:
221 case Bytecodes::_fast_bputfield:
222 case Bytecodes::_fast_cputfield:
223 case Bytecodes::_fast_dputfield:
224 case Bytecodes::_fast_fputfield:
225 case Bytecodes::_fast_iputfield:
226 case Bytecodes::_fast_lputfield:
227 case Bytecodes::_fast_sputfield:
228 {
229 // We skip bytecode quickening for putfield instructions when
230 // the put_code written to the constant pool cache is zero.
231 // This is required so that every execution of this instruction
232 // calls out to InterpreterRuntime::resolve_get_put to do
233 // additional, required work.
234 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
235 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
236 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
237 __ movl(bc_reg, bc);
238 __ cmpl(temp_reg, (int) 0);
239 __ jcc(Assembler::zero, L_patch_done); // don't patch
240 }
241 break;
242 default:
243 assert(byte_no == -1, "sanity");
244 // the pair bytecodes have already done the load.
245 if (load_bc_into_bc_reg) {
246 __ movl(bc_reg, bc);
247 }
248 }
250 if (JvmtiExport::can_post_breakpoint()) {
251 Label L_fast_patch;
252 // if a breakpoint is present we can't rewrite the stream directly
253 __ movzbl(temp_reg, at_bcp(0));
254 __ cmpl(temp_reg, Bytecodes::_breakpoint);
255 __ jcc(Assembler::notEqual, L_fast_patch);
256 __ get_method(temp_reg);
257 // Let breakpoint table handling rewrite to quicker bytecode
258 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
259 #ifndef ASSERT
260 __ jmpb(L_patch_done);
261 #else
262 __ jmp(L_patch_done);
263 #endif
264 __ bind(L_fast_patch);
265 }
267 #ifdef ASSERT
268 Label L_okay;
269 __ load_unsigned_byte(temp_reg, at_bcp(0));
270 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
271 __ jcc(Assembler::equal, L_okay);
272 __ cmpl(temp_reg, bc_reg);
273 __ jcc(Assembler::equal, L_okay);
274 __ stop("patching the wrong bytecode");
275 __ bind(L_okay);
276 #endif
278 // patch bytecode
279 __ movb(at_bcp(0), bc_reg);
280 __ bind(L_patch_done);
281 }
284 // Individual instructions
286 void TemplateTable::nop() {
287 transition(vtos, vtos);
288 // nothing to do
289 }
291 void TemplateTable::shouldnotreachhere() {
292 transition(vtos, vtos);
293 __ stop("shouldnotreachhere bytecode");
294 }
296 void TemplateTable::aconst_null() {
297 transition(vtos, atos);
298 __ xorl(rax, rax);
299 }
301 void TemplateTable::iconst(int value) {
302 transition(vtos, itos);
303 if (value == 0) {
304 __ xorl(rax, rax);
305 } else {
306 __ movl(rax, value);
307 }
308 }
310 void TemplateTable::lconst(int value) {
311 transition(vtos, ltos);
312 if (value == 0) {
313 __ xorl(rax, rax);
314 } else {
315 __ movl(rax, value);
316 }
317 }
319 void TemplateTable::fconst(int value) {
320 transition(vtos, ftos);
321 static float one = 1.0f, two = 2.0f;
322 switch (value) {
323 case 0:
324 __ xorps(xmm0, xmm0);
325 break;
326 case 1:
327 __ movflt(xmm0, ExternalAddress((address) &one));
328 break;
329 case 2:
330 __ movflt(xmm0, ExternalAddress((address) &two));
331 break;
332 default:
333 ShouldNotReachHere();
334 break;
335 }
336 }
338 void TemplateTable::dconst(int value) {
339 transition(vtos, dtos);
340 static double one = 1.0;
341 switch (value) {
342 case 0:
343 __ xorpd(xmm0, xmm0);
344 break;
345 case 1:
346 __ movdbl(xmm0, ExternalAddress((address) &one));
347 break;
348 default:
349 ShouldNotReachHere();
350 break;
351 }
352 }
354 void TemplateTable::bipush() {
355 transition(vtos, itos);
356 __ load_signed_byte(rax, at_bcp(1));
357 }
359 void TemplateTable::sipush() {
360 transition(vtos, itos);
361 __ load_unsigned_short(rax, at_bcp(1));
362 __ bswapl(rax);
363 __ sarl(rax, 16);
364 }
366 void TemplateTable::ldc(bool wide) {
367 transition(vtos, vtos);
368 Label call_ldc, notFloat, notClass, Done;
370 if (wide) {
371 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
372 } else {
373 __ load_unsigned_byte(rbx, at_bcp(1));
374 }
376 __ get_cpool_and_tags(rcx, rax);
377 const int base_offset = ConstantPool::header_size() * wordSize;
378 const int tags_offset = Array<u1>::base_offset_in_bytes();
380 // get type
381 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
383 // unresolved class - get the resolved class
384 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
385 __ jccb(Assembler::equal, call_ldc);
387 // unresolved class in error state - call into runtime to throw the error
388 // from the first resolution attempt
389 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
390 __ jccb(Assembler::equal, call_ldc);
392 // resolved class - need to call vm to get java mirror of the class
393 __ cmpl(rdx, JVM_CONSTANT_Class);
394 __ jcc(Assembler::notEqual, notClass);
396 __ bind(call_ldc);
397 __ movl(c_rarg1, wide);
398 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
399 __ push_ptr(rax);
400 __ verify_oop(rax);
401 __ jmp(Done);
403 __ bind(notClass);
404 __ cmpl(rdx, JVM_CONSTANT_Float);
405 __ jccb(Assembler::notEqual, notFloat);
406 // ftos
407 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
408 __ push_f();
409 __ jmp(Done);
411 __ bind(notFloat);
412 #ifdef ASSERT
413 {
414 Label L;
415 __ cmpl(rdx, JVM_CONSTANT_Integer);
416 __ jcc(Assembler::equal, L);
417 // String and Object are rewritten to fast_aldc
418 __ stop("unexpected tag type in ldc");
419 __ bind(L);
420 }
421 #endif
422 // itos JVM_CONSTANT_Integer only
423 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
424 __ push_i(rax);
425 __ bind(Done);
426 }
428 // Fast path for caching oop constants.
429 void TemplateTable::fast_aldc(bool wide) {
430 transition(vtos, atos);
432 Register result = rax;
433 Register tmp = rdx;
434 int index_size = wide ? sizeof(u2) : sizeof(u1);
436 Label resolved;
438 // We are resolved if the resolved reference cache entry contains a
439 // non-null object (String, MethodType, etc.)
440 assert_different_registers(result, tmp);
441 __ get_cache_index_at_bcp(tmp, 1, index_size);
442 __ load_resolved_reference_at_index(result, tmp);
443 __ testl(result, result);
444 __ jcc(Assembler::notZero, resolved);
446 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
448 // first time invocation - must resolve first
449 __ movl(tmp, (int)bytecode());
450 __ call_VM(result, entry, tmp);
452 __ bind(resolved);
454 if (VerifyOops) {
455 __ verify_oop(result);
456 }
457 }
459 void TemplateTable::ldc2_w() {
460 transition(vtos, vtos);
461 Label Long, Done;
462 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
464 __ get_cpool_and_tags(rcx, rax);
465 const int base_offset = ConstantPool::header_size() * wordSize;
466 const int tags_offset = Array<u1>::base_offset_in_bytes();
468 // get type
469 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
470 JVM_CONSTANT_Double);
471 __ jccb(Assembler::notEqual, Long);
472 // dtos
473 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
474 __ push_d();
475 __ jmpb(Done);
477 __ bind(Long);
478 // ltos
479 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
480 __ push_l();
482 __ bind(Done);
483 }
485 void TemplateTable::locals_index(Register reg, int offset) {
486 __ load_unsigned_byte(reg, at_bcp(offset));
487 __ negptr(reg);
488 }
490 void TemplateTable::iload() {
491 transition(vtos, itos);
492 if (RewriteFrequentPairs) {
493 Label rewrite, done;
494 const Register bc = c_rarg3;
495 assert(rbx != bc, "register damaged");
497 // get next byte
498 __ load_unsigned_byte(rbx,
499 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
500 // if _iload, wait to rewrite to iload2. We only want to rewrite the
501 // last two iloads in a pair. Comparing against fast_iload means that
502 // the next bytecode is neither an iload or a caload, and therefore
503 // an iload pair.
504 __ cmpl(rbx, Bytecodes::_iload);
505 __ jcc(Assembler::equal, done);
507 __ cmpl(rbx, Bytecodes::_fast_iload);
508 __ movl(bc, Bytecodes::_fast_iload2);
509 __ jccb(Assembler::equal, rewrite);
511 // if _caload, rewrite to fast_icaload
512 __ cmpl(rbx, Bytecodes::_caload);
513 __ movl(bc, Bytecodes::_fast_icaload);
514 __ jccb(Assembler::equal, rewrite);
516 // rewrite so iload doesn't check again.
517 __ movl(bc, Bytecodes::_fast_iload);
519 // rewrite
520 // bc: fast bytecode
521 __ bind(rewrite);
522 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
523 __ bind(done);
524 }
526 // Get the local value into tos
527 locals_index(rbx);
528 __ movl(rax, iaddress(rbx));
529 }
531 void TemplateTable::fast_iload2() {
532 transition(vtos, itos);
533 locals_index(rbx);
534 __ movl(rax, iaddress(rbx));
535 __ push(itos);
536 locals_index(rbx, 3);
537 __ movl(rax, iaddress(rbx));
538 }
540 void TemplateTable::fast_iload() {
541 transition(vtos, itos);
542 locals_index(rbx);
543 __ movl(rax, iaddress(rbx));
544 }
546 void TemplateTable::lload() {
547 transition(vtos, ltos);
548 locals_index(rbx);
549 __ movq(rax, laddress(rbx));
550 }
552 void TemplateTable::fload() {
553 transition(vtos, ftos);
554 locals_index(rbx);
555 __ movflt(xmm0, faddress(rbx));
556 }
558 void TemplateTable::dload() {
559 transition(vtos, dtos);
560 locals_index(rbx);
561 __ movdbl(xmm0, daddress(rbx));
562 }
564 void TemplateTable::aload() {
565 transition(vtos, atos);
566 locals_index(rbx);
567 __ movptr(rax, aaddress(rbx));
568 }
570 void TemplateTable::locals_index_wide(Register reg) {
571 __ load_unsigned_short(reg, at_bcp(2));
572 __ bswapl(reg);
573 __ shrl(reg, 16);
574 __ negptr(reg);
575 }
577 void TemplateTable::wide_iload() {
578 transition(vtos, itos);
579 locals_index_wide(rbx);
580 __ movl(rax, iaddress(rbx));
581 }
583 void TemplateTable::wide_lload() {
584 transition(vtos, ltos);
585 locals_index_wide(rbx);
586 __ movq(rax, laddress(rbx));
587 }
589 void TemplateTable::wide_fload() {
590 transition(vtos, ftos);
591 locals_index_wide(rbx);
592 __ movflt(xmm0, faddress(rbx));
593 }
595 void TemplateTable::wide_dload() {
596 transition(vtos, dtos);
597 locals_index_wide(rbx);
598 __ movdbl(xmm0, daddress(rbx));
599 }
601 void TemplateTable::wide_aload() {
602 transition(vtos, atos);
603 locals_index_wide(rbx);
604 __ movptr(rax, aaddress(rbx));
605 }
607 void TemplateTable::index_check(Register array, Register index) {
608 // destroys rbx
609 // check array
610 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
611 // sign extend index for use by indexed load
612 __ movl2ptr(index, index);
613 // check index
614 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
615 if (index != rbx) {
616 // ??? convention: move aberrant index into ebx for exception message
617 assert(rbx != array, "different registers");
618 __ movl(rbx, index);
619 }
620 __ jump_cc(Assembler::aboveEqual,
621 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
622 }
624 void TemplateTable::iaload() {
625 transition(itos, itos);
626 __ pop_ptr(rdx);
627 // eax: index
628 // rdx: array
629 index_check(rdx, rax); // kills rbx
630 __ movl(rax, Address(rdx, rax,
631 Address::times_4,
632 arrayOopDesc::base_offset_in_bytes(T_INT)));
633 }
635 void TemplateTable::laload() {
636 transition(itos, ltos);
637 __ pop_ptr(rdx);
638 // eax: index
639 // rdx: array
640 index_check(rdx, rax); // kills rbx
641 __ movq(rax, Address(rdx, rbx,
642 Address::times_8,
643 arrayOopDesc::base_offset_in_bytes(T_LONG)));
644 }
646 void TemplateTable::faload() {
647 transition(itos, ftos);
648 __ pop_ptr(rdx);
649 // eax: index
650 // rdx: array
651 index_check(rdx, rax); // kills rbx
652 __ movflt(xmm0, Address(rdx, rax,
653 Address::times_4,
654 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
655 }
657 void TemplateTable::daload() {
658 transition(itos, dtos);
659 __ pop_ptr(rdx);
660 // eax: index
661 // rdx: array
662 index_check(rdx, rax); // kills rbx
663 __ movdbl(xmm0, Address(rdx, rax,
664 Address::times_8,
665 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
666 }
668 void TemplateTable::aaload() {
669 transition(itos, atos);
670 __ pop_ptr(rdx);
671 // eax: index
672 // rdx: array
673 index_check(rdx, rax); // kills rbx
674 __ load_heap_oop(rax, Address(rdx, rax,
675 UseCompressedOops ? Address::times_4 : Address::times_8,
676 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
677 }
679 void TemplateTable::baload() {
680 transition(itos, itos);
681 __ pop_ptr(rdx);
682 // eax: index
683 // rdx: array
684 index_check(rdx, rax); // kills rbx
685 __ load_signed_byte(rax,
686 Address(rdx, rax,
687 Address::times_1,
688 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
689 }
691 void TemplateTable::caload() {
692 transition(itos, itos);
693 __ pop_ptr(rdx);
694 // eax: index
695 // rdx: array
696 index_check(rdx, rax); // kills rbx
697 __ load_unsigned_short(rax,
698 Address(rdx, rax,
699 Address::times_2,
700 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
701 }
703 // iload followed by caload frequent pair
704 void TemplateTable::fast_icaload() {
705 transition(vtos, itos);
706 // load index out of locals
707 locals_index(rbx);
708 __ movl(rax, iaddress(rbx));
710 // eax: index
711 // rdx: array
712 __ pop_ptr(rdx);
713 index_check(rdx, rax); // kills rbx
714 __ load_unsigned_short(rax,
715 Address(rdx, rax,
716 Address::times_2,
717 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
718 }
720 void TemplateTable::saload() {
721 transition(itos, itos);
722 __ pop_ptr(rdx);
723 // eax: index
724 // rdx: array
725 index_check(rdx, rax); // kills rbx
726 __ load_signed_short(rax,
727 Address(rdx, rax,
728 Address::times_2,
729 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
730 }
732 void TemplateTable::iload(int n) {
733 transition(vtos, itos);
734 __ movl(rax, iaddress(n));
735 }
737 void TemplateTable::lload(int n) {
738 transition(vtos, ltos);
739 __ movq(rax, laddress(n));
740 }
742 void TemplateTable::fload(int n) {
743 transition(vtos, ftos);
744 __ movflt(xmm0, faddress(n));
745 }
747 void TemplateTable::dload(int n) {
748 transition(vtos, dtos);
749 __ movdbl(xmm0, daddress(n));
750 }
752 void TemplateTable::aload(int n) {
753 transition(vtos, atos);
754 __ movptr(rax, aaddress(n));
755 }
757 void TemplateTable::aload_0() {
758 transition(vtos, atos);
759 // According to bytecode histograms, the pairs:
760 //
761 // _aload_0, _fast_igetfield
762 // _aload_0, _fast_agetfield
763 // _aload_0, _fast_fgetfield
764 //
765 // occur frequently. If RewriteFrequentPairs is set, the (slow)
766 // _aload_0 bytecode checks if the next bytecode is either
767 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
768 // rewrites the current bytecode into a pair bytecode; otherwise it
769 // rewrites the current bytecode into _fast_aload_0 that doesn't do
770 // the pair check anymore.
771 //
772 // Note: If the next bytecode is _getfield, the rewrite must be
773 // delayed, otherwise we may miss an opportunity for a pair.
774 //
775 // Also rewrite frequent pairs
776 // aload_0, aload_1
777 // aload_0, iload_1
778 // These bytecodes with a small amount of code are most profitable
779 // to rewrite
780 if (RewriteFrequentPairs) {
781 Label rewrite, done;
782 const Register bc = c_rarg3;
783 assert(rbx != bc, "register damaged");
784 // get next byte
785 __ load_unsigned_byte(rbx,
786 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
788 // do actual aload_0
789 aload(0);
791 // if _getfield then wait with rewrite
792 __ cmpl(rbx, Bytecodes::_getfield);
793 __ jcc(Assembler::equal, done);
795 // if _igetfield then reqrite to _fast_iaccess_0
796 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
797 Bytecodes::_aload_0,
798 "fix bytecode definition");
799 __ cmpl(rbx, Bytecodes::_fast_igetfield);
800 __ movl(bc, Bytecodes::_fast_iaccess_0);
801 __ jccb(Assembler::equal, rewrite);
803 // if _agetfield then reqrite to _fast_aaccess_0
804 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
805 Bytecodes::_aload_0,
806 "fix bytecode definition");
807 __ cmpl(rbx, Bytecodes::_fast_agetfield);
808 __ movl(bc, Bytecodes::_fast_aaccess_0);
809 __ jccb(Assembler::equal, rewrite);
811 // if _fgetfield then reqrite to _fast_faccess_0
812 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
813 Bytecodes::_aload_0,
814 "fix bytecode definition");
815 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
816 __ movl(bc, Bytecodes::_fast_faccess_0);
817 __ jccb(Assembler::equal, rewrite);
819 // else rewrite to _fast_aload0
820 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
821 Bytecodes::_aload_0,
822 "fix bytecode definition");
823 __ movl(bc, Bytecodes::_fast_aload_0);
825 // rewrite
826 // bc: fast bytecode
827 __ bind(rewrite);
828 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
830 __ bind(done);
831 } else {
832 aload(0);
833 }
834 }
836 void TemplateTable::istore() {
837 transition(itos, vtos);
838 locals_index(rbx);
839 __ movl(iaddress(rbx), rax);
840 }
842 void TemplateTable::lstore() {
843 transition(ltos, vtos);
844 locals_index(rbx);
845 __ movq(laddress(rbx), rax);
846 }
848 void TemplateTable::fstore() {
849 transition(ftos, vtos);
850 locals_index(rbx);
851 __ movflt(faddress(rbx), xmm0);
852 }
854 void TemplateTable::dstore() {
855 transition(dtos, vtos);
856 locals_index(rbx);
857 __ movdbl(daddress(rbx), xmm0);
858 }
860 void TemplateTable::astore() {
861 transition(vtos, vtos);
862 __ pop_ptr(rax);
863 locals_index(rbx);
864 __ movptr(aaddress(rbx), rax);
865 }
867 void TemplateTable::wide_istore() {
868 transition(vtos, vtos);
869 __ pop_i();
870 locals_index_wide(rbx);
871 __ movl(iaddress(rbx), rax);
872 }
874 void TemplateTable::wide_lstore() {
875 transition(vtos, vtos);
876 __ pop_l();
877 locals_index_wide(rbx);
878 __ movq(laddress(rbx), rax);
879 }
881 void TemplateTable::wide_fstore() {
882 transition(vtos, vtos);
883 __ pop_f();
884 locals_index_wide(rbx);
885 __ movflt(faddress(rbx), xmm0);
886 }
888 void TemplateTable::wide_dstore() {
889 transition(vtos, vtos);
890 __ pop_d();
891 locals_index_wide(rbx);
892 __ movdbl(daddress(rbx), xmm0);
893 }
895 void TemplateTable::wide_astore() {
896 transition(vtos, vtos);
897 __ pop_ptr(rax);
898 locals_index_wide(rbx);
899 __ movptr(aaddress(rbx), rax);
900 }
902 void TemplateTable::iastore() {
903 transition(itos, vtos);
904 __ pop_i(rbx);
905 __ pop_ptr(rdx);
906 // eax: value
907 // ebx: index
908 // rdx: array
909 index_check(rdx, rbx); // prefer index in ebx
910 __ movl(Address(rdx, rbx,
911 Address::times_4,
912 arrayOopDesc::base_offset_in_bytes(T_INT)),
913 rax);
914 }
916 void TemplateTable::lastore() {
917 transition(ltos, vtos);
918 __ pop_i(rbx);
919 __ pop_ptr(rdx);
920 // rax: value
921 // ebx: index
922 // rdx: array
923 index_check(rdx, rbx); // prefer index in ebx
924 __ movq(Address(rdx, rbx,
925 Address::times_8,
926 arrayOopDesc::base_offset_in_bytes(T_LONG)),
927 rax);
928 }
930 void TemplateTable::fastore() {
931 transition(ftos, vtos);
932 __ pop_i(rbx);
933 __ pop_ptr(rdx);
934 // xmm0: value
935 // ebx: index
936 // rdx: array
937 index_check(rdx, rbx); // prefer index in ebx
938 __ movflt(Address(rdx, rbx,
939 Address::times_4,
940 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
941 xmm0);
942 }
944 void TemplateTable::dastore() {
945 transition(dtos, vtos);
946 __ pop_i(rbx);
947 __ pop_ptr(rdx);
948 // xmm0: value
949 // ebx: index
950 // rdx: array
951 index_check(rdx, rbx); // prefer index in ebx
952 __ movdbl(Address(rdx, rbx,
953 Address::times_8,
954 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
955 xmm0);
956 }
958 void TemplateTable::aastore() {
959 Label is_null, ok_is_subtype, done;
960 transition(vtos, vtos);
961 // stack: ..., array, index, value
962 __ movptr(rax, at_tos()); // value
963 __ movl(rcx, at_tos_p1()); // index
964 __ movptr(rdx, at_tos_p2()); // array
966 Address element_address(rdx, rcx,
967 UseCompressedOops? Address::times_4 : Address::times_8,
968 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
970 index_check(rdx, rcx); // kills rbx
971 // do array store check - check for NULL value first
972 __ testptr(rax, rax);
973 __ jcc(Assembler::zero, is_null);
975 // Move subklass into rbx
976 __ load_klass(rbx, rax);
977 // Move superklass into rax
978 __ load_klass(rax, rdx);
979 __ movptr(rax, Address(rax,
980 ObjArrayKlass::element_klass_offset()));
981 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
982 __ lea(rdx, element_address);
984 // Generate subtype check. Blows rcx, rdi
985 // Superklass in rax. Subklass in rbx.
986 __ gen_subtype_check(rbx, ok_is_subtype);
988 // Come here on failure
989 // object is at TOS
990 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
992 // Come here on success
993 __ bind(ok_is_subtype);
995 // Get the value we will store
996 __ movptr(rax, at_tos());
997 // Now store using the appropriate barrier
998 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
999 __ jmp(done);
1001 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1002 __ bind(is_null);
1003 __ profile_null_seen(rbx);
1005 // Store a NULL
1006 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1008 // Pop stack arguments
1009 __ bind(done);
1010 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1011 }
1013 void TemplateTable::bastore() {
1014 transition(itos, vtos);
1015 __ pop_i(rbx);
1016 __ pop_ptr(rdx);
1017 // eax: value
1018 // ebx: index
1019 // rdx: array
1020 index_check(rdx, rbx); // prefer index in ebx
1021 __ movb(Address(rdx, rbx,
1022 Address::times_1,
1023 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1024 rax);
1025 }
1027 void TemplateTable::castore() {
1028 transition(itos, vtos);
1029 __ pop_i(rbx);
1030 __ pop_ptr(rdx);
1031 // eax: value
1032 // ebx: index
1033 // rdx: array
1034 index_check(rdx, rbx); // prefer index in ebx
1035 __ movw(Address(rdx, rbx,
1036 Address::times_2,
1037 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1038 rax);
1039 }
1041 void TemplateTable::sastore() {
1042 castore();
1043 }
1045 void TemplateTable::istore(int n) {
1046 transition(itos, vtos);
1047 __ movl(iaddress(n), rax);
1048 }
1050 void TemplateTable::lstore(int n) {
1051 transition(ltos, vtos);
1052 __ movq(laddress(n), rax);
1053 }
1055 void TemplateTable::fstore(int n) {
1056 transition(ftos, vtos);
1057 __ movflt(faddress(n), xmm0);
1058 }
1060 void TemplateTable::dstore(int n) {
1061 transition(dtos, vtos);
1062 __ movdbl(daddress(n), xmm0);
1063 }
1065 void TemplateTable::astore(int n) {
1066 transition(vtos, vtos);
1067 __ pop_ptr(rax);
1068 __ movptr(aaddress(n), rax);
1069 }
1071 void TemplateTable::pop() {
1072 transition(vtos, vtos);
1073 __ addptr(rsp, Interpreter::stackElementSize);
1074 }
1076 void TemplateTable::pop2() {
1077 transition(vtos, vtos);
1078 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1079 }
1081 void TemplateTable::dup() {
1082 transition(vtos, vtos);
1083 __ load_ptr(0, rax);
1084 __ push_ptr(rax);
1085 // stack: ..., a, a
1086 }
1088 void TemplateTable::dup_x1() {
1089 transition(vtos, vtos);
1090 // stack: ..., a, b
1091 __ load_ptr( 0, rax); // load b
1092 __ load_ptr( 1, rcx); // load a
1093 __ store_ptr(1, rax); // store b
1094 __ store_ptr(0, rcx); // store a
1095 __ push_ptr(rax); // push b
1096 // stack: ..., b, a, b
1097 }
1099 void TemplateTable::dup_x2() {
1100 transition(vtos, vtos);
1101 // stack: ..., a, b, c
1102 __ load_ptr( 0, rax); // load c
1103 __ load_ptr( 2, rcx); // load a
1104 __ store_ptr(2, rax); // store c in a
1105 __ push_ptr(rax); // push c
1106 // stack: ..., c, b, c, c
1107 __ load_ptr( 2, rax); // load b
1108 __ store_ptr(2, rcx); // store a in b
1109 // stack: ..., c, a, c, c
1110 __ store_ptr(1, rax); // store b in c
1111 // stack: ..., c, a, b, c
1112 }
1114 void TemplateTable::dup2() {
1115 transition(vtos, vtos);
1116 // stack: ..., a, b
1117 __ load_ptr(1, rax); // load a
1118 __ push_ptr(rax); // push a
1119 __ load_ptr(1, rax); // load b
1120 __ push_ptr(rax); // push b
1121 // stack: ..., a, b, a, b
1122 }
1124 void TemplateTable::dup2_x1() {
1125 transition(vtos, vtos);
1126 // stack: ..., a, b, c
1127 __ load_ptr( 0, rcx); // load c
1128 __ load_ptr( 1, rax); // load b
1129 __ push_ptr(rax); // push b
1130 __ push_ptr(rcx); // push c
1131 // stack: ..., a, b, c, b, c
1132 __ store_ptr(3, rcx); // store c in b
1133 // stack: ..., a, c, c, b, c
1134 __ load_ptr( 4, rcx); // load a
1135 __ store_ptr(2, rcx); // store a in 2nd c
1136 // stack: ..., a, c, a, b, c
1137 __ store_ptr(4, rax); // store b in a
1138 // stack: ..., b, c, a, b, c
1139 }
1141 void TemplateTable::dup2_x2() {
1142 transition(vtos, vtos);
1143 // stack: ..., a, b, c, d
1144 __ load_ptr( 0, rcx); // load d
1145 __ load_ptr( 1, rax); // load c
1146 __ push_ptr(rax); // push c
1147 __ push_ptr(rcx); // push d
1148 // stack: ..., a, b, c, d, c, d
1149 __ load_ptr( 4, rax); // load b
1150 __ store_ptr(2, rax); // store b in d
1151 __ store_ptr(4, rcx); // store d in b
1152 // stack: ..., a, d, c, b, c, d
1153 __ load_ptr( 5, rcx); // load a
1154 __ load_ptr( 3, rax); // load c
1155 __ store_ptr(3, rcx); // store a in c
1156 __ store_ptr(5, rax); // store c in a
1157 // stack: ..., c, d, a, b, c, d
1158 }
1160 void TemplateTable::swap() {
1161 transition(vtos, vtos);
1162 // stack: ..., a, b
1163 __ load_ptr( 1, rcx); // load a
1164 __ load_ptr( 0, rax); // load b
1165 __ store_ptr(0, rcx); // store a in b
1166 __ store_ptr(1, rax); // store b in a
1167 // stack: ..., b, a
1168 }
1170 void TemplateTable::iop2(Operation op) {
1171 transition(itos, itos);
1172 switch (op) {
1173 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1174 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1175 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1176 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1177 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1178 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1179 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1180 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1181 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1182 default : ShouldNotReachHere();
1183 }
1184 }
1186 void TemplateTable::lop2(Operation op) {
1187 transition(ltos, ltos);
1188 switch (op) {
1189 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1190 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1191 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1192 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1193 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1194 default : ShouldNotReachHere();
1195 }
1196 }
1198 void TemplateTable::idiv() {
1199 transition(itos, itos);
1200 __ movl(rcx, rax);
1201 __ pop_i(rax);
1202 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1203 // they are not equal, one could do a normal division (no correction
1204 // needed), which may speed up this implementation for the common case.
1205 // (see also JVM spec., p.243 & p.271)
1206 __ corrected_idivl(rcx);
1207 }
1209 void TemplateTable::irem() {
1210 transition(itos, itos);
1211 __ movl(rcx, rax);
1212 __ pop_i(rax);
1213 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1214 // they are not equal, one could do a normal division (no correction
1215 // needed), which may speed up this implementation for the common case.
1216 // (see also JVM spec., p.243 & p.271)
1217 __ corrected_idivl(rcx);
1218 __ movl(rax, rdx);
1219 }
1221 void TemplateTable::lmul() {
1222 transition(ltos, ltos);
1223 __ pop_l(rdx);
1224 __ imulq(rax, rdx);
1225 }
1227 void TemplateTable::ldiv() {
1228 transition(ltos, ltos);
1229 __ mov(rcx, rax);
1230 __ pop_l(rax);
1231 // generate explicit div0 check
1232 __ testq(rcx, rcx);
1233 __ jump_cc(Assembler::zero,
1234 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1235 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1236 // they are not equal, one could do a normal division (no correction
1237 // needed), which may speed up this implementation for the common case.
1238 // (see also JVM spec., p.243 & p.271)
1239 __ corrected_idivq(rcx); // kills rbx
1240 }
1242 void TemplateTable::lrem() {
1243 transition(ltos, ltos);
1244 __ mov(rcx, rax);
1245 __ pop_l(rax);
1246 __ testq(rcx, rcx);
1247 __ jump_cc(Assembler::zero,
1248 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1249 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1250 // they are not equal, one could do a normal division (no correction
1251 // needed), which may speed up this implementation for the common case.
1252 // (see also JVM spec., p.243 & p.271)
1253 __ corrected_idivq(rcx); // kills rbx
1254 __ mov(rax, rdx);
1255 }
1257 void TemplateTable::lshl() {
1258 transition(itos, ltos);
1259 __ movl(rcx, rax); // get shift count
1260 __ pop_l(rax); // get shift value
1261 __ shlq(rax);
1262 }
1264 void TemplateTable::lshr() {
1265 transition(itos, ltos);
1266 __ movl(rcx, rax); // get shift count
1267 __ pop_l(rax); // get shift value
1268 __ sarq(rax);
1269 }
1271 void TemplateTable::lushr() {
1272 transition(itos, ltos);
1273 __ movl(rcx, rax); // get shift count
1274 __ pop_l(rax); // get shift value
1275 __ shrq(rax);
1276 }
1278 void TemplateTable::fop2(Operation op) {
1279 transition(ftos, ftos);
1280 switch (op) {
1281 case add:
1282 __ addss(xmm0, at_rsp());
1283 __ addptr(rsp, Interpreter::stackElementSize);
1284 break;
1285 case sub:
1286 __ movflt(xmm1, xmm0);
1287 __ pop_f(xmm0);
1288 __ subss(xmm0, xmm1);
1289 break;
1290 case mul:
1291 __ mulss(xmm0, at_rsp());
1292 __ addptr(rsp, Interpreter::stackElementSize);
1293 break;
1294 case div:
1295 __ movflt(xmm1, xmm0);
1296 __ pop_f(xmm0);
1297 __ divss(xmm0, xmm1);
1298 break;
1299 case rem:
1300 __ movflt(xmm1, xmm0);
1301 __ pop_f(xmm0);
1302 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1303 break;
1304 default:
1305 ShouldNotReachHere();
1306 break;
1307 }
1308 }
1310 void TemplateTable::dop2(Operation op) {
1311 transition(dtos, dtos);
1312 switch (op) {
1313 case add:
1314 __ addsd(xmm0, at_rsp());
1315 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1316 break;
1317 case sub:
1318 __ movdbl(xmm1, xmm0);
1319 __ pop_d(xmm0);
1320 __ subsd(xmm0, xmm1);
1321 break;
1322 case mul:
1323 __ mulsd(xmm0, at_rsp());
1324 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1325 break;
1326 case div:
1327 __ movdbl(xmm1, xmm0);
1328 __ pop_d(xmm0);
1329 __ divsd(xmm0, xmm1);
1330 break;
1331 case rem:
1332 __ movdbl(xmm1, xmm0);
1333 __ pop_d(xmm0);
1334 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1335 break;
1336 default:
1337 ShouldNotReachHere();
1338 break;
1339 }
1340 }
1342 void TemplateTable::ineg() {
1343 transition(itos, itos);
1344 __ negl(rax);
1345 }
1347 void TemplateTable::lneg() {
1348 transition(ltos, ltos);
1349 __ negq(rax);
1350 }
1352 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1353 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1354 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1355 // of 128-bits operands for SSE instructions.
1356 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1357 // Store the value to a 128-bits operand.
1358 operand[0] = lo;
1359 operand[1] = hi;
1360 return operand;
1361 }
1363 // Buffer for 128-bits masks used by SSE instructions.
1364 static jlong float_signflip_pool[2*2];
1365 static jlong double_signflip_pool[2*2];
1367 void TemplateTable::fneg() {
1368 transition(ftos, ftos);
1369 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1370 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1371 }
1373 void TemplateTable::dneg() {
1374 transition(dtos, dtos);
1375 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1376 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1377 }
1379 void TemplateTable::iinc() {
1380 transition(vtos, vtos);
1381 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1382 locals_index(rbx);
1383 __ addl(iaddress(rbx), rdx);
1384 }
1386 void TemplateTable::wide_iinc() {
1387 transition(vtos, vtos);
1388 __ movl(rdx, at_bcp(4)); // get constant
1389 locals_index_wide(rbx);
1390 __ bswapl(rdx); // swap bytes & sign-extend constant
1391 __ sarl(rdx, 16);
1392 __ addl(iaddress(rbx), rdx);
1393 // Note: should probably use only one movl to get both
1394 // the index and the constant -> fix this
1395 }
1397 void TemplateTable::convert() {
1398 // Checking
1399 #ifdef ASSERT
1400 {
1401 TosState tos_in = ilgl;
1402 TosState tos_out = ilgl;
1403 switch (bytecode()) {
1404 case Bytecodes::_i2l: // fall through
1405 case Bytecodes::_i2f: // fall through
1406 case Bytecodes::_i2d: // fall through
1407 case Bytecodes::_i2b: // fall through
1408 case Bytecodes::_i2c: // fall through
1409 case Bytecodes::_i2s: tos_in = itos; break;
1410 case Bytecodes::_l2i: // fall through
1411 case Bytecodes::_l2f: // fall through
1412 case Bytecodes::_l2d: tos_in = ltos; break;
1413 case Bytecodes::_f2i: // fall through
1414 case Bytecodes::_f2l: // fall through
1415 case Bytecodes::_f2d: tos_in = ftos; break;
1416 case Bytecodes::_d2i: // fall through
1417 case Bytecodes::_d2l: // fall through
1418 case Bytecodes::_d2f: tos_in = dtos; break;
1419 default : ShouldNotReachHere();
1420 }
1421 switch (bytecode()) {
1422 case Bytecodes::_l2i: // fall through
1423 case Bytecodes::_f2i: // fall through
1424 case Bytecodes::_d2i: // fall through
1425 case Bytecodes::_i2b: // fall through
1426 case Bytecodes::_i2c: // fall through
1427 case Bytecodes::_i2s: tos_out = itos; break;
1428 case Bytecodes::_i2l: // fall through
1429 case Bytecodes::_f2l: // fall through
1430 case Bytecodes::_d2l: tos_out = ltos; break;
1431 case Bytecodes::_i2f: // fall through
1432 case Bytecodes::_l2f: // fall through
1433 case Bytecodes::_d2f: tos_out = ftos; break;
1434 case Bytecodes::_i2d: // fall through
1435 case Bytecodes::_l2d: // fall through
1436 case Bytecodes::_f2d: tos_out = dtos; break;
1437 default : ShouldNotReachHere();
1438 }
1439 transition(tos_in, tos_out);
1440 }
1441 #endif // ASSERT
1443 static const int64_t is_nan = 0x8000000000000000L;
1445 // Conversion
1446 switch (bytecode()) {
1447 case Bytecodes::_i2l:
1448 __ movslq(rax, rax);
1449 break;
1450 case Bytecodes::_i2f:
1451 __ cvtsi2ssl(xmm0, rax);
1452 break;
1453 case Bytecodes::_i2d:
1454 __ cvtsi2sdl(xmm0, rax);
1455 break;
1456 case Bytecodes::_i2b:
1457 __ movsbl(rax, rax);
1458 break;
1459 case Bytecodes::_i2c:
1460 __ movzwl(rax, rax);
1461 break;
1462 case Bytecodes::_i2s:
1463 __ movswl(rax, rax);
1464 break;
1465 case Bytecodes::_l2i:
1466 __ movl(rax, rax);
1467 break;
1468 case Bytecodes::_l2f:
1469 __ cvtsi2ssq(xmm0, rax);
1470 break;
1471 case Bytecodes::_l2d:
1472 __ cvtsi2sdq(xmm0, rax);
1473 break;
1474 case Bytecodes::_f2i:
1475 {
1476 Label L;
1477 __ cvttss2sil(rax, xmm0);
1478 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1479 __ jcc(Assembler::notEqual, L);
1480 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1481 __ bind(L);
1482 }
1483 break;
1484 case Bytecodes::_f2l:
1485 {
1486 Label L;
1487 __ cvttss2siq(rax, xmm0);
1488 // NaN or overflow/underflow?
1489 __ cmp64(rax, ExternalAddress((address) &is_nan));
1490 __ jcc(Assembler::notEqual, L);
1491 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1492 __ bind(L);
1493 }
1494 break;
1495 case Bytecodes::_f2d:
1496 __ cvtss2sd(xmm0, xmm0);
1497 break;
1498 case Bytecodes::_d2i:
1499 {
1500 Label L;
1501 __ cvttsd2sil(rax, xmm0);
1502 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1503 __ jcc(Assembler::notEqual, L);
1504 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1505 __ bind(L);
1506 }
1507 break;
1508 case Bytecodes::_d2l:
1509 {
1510 Label L;
1511 __ cvttsd2siq(rax, xmm0);
1512 // NaN or overflow/underflow?
1513 __ cmp64(rax, ExternalAddress((address) &is_nan));
1514 __ jcc(Assembler::notEqual, L);
1515 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1516 __ bind(L);
1517 }
1518 break;
1519 case Bytecodes::_d2f:
1520 __ cvtsd2ss(xmm0, xmm0);
1521 break;
1522 default:
1523 ShouldNotReachHere();
1524 }
1525 }
1527 void TemplateTable::lcmp() {
1528 transition(ltos, itos);
1529 Label done;
1530 __ pop_l(rdx);
1531 __ cmpq(rdx, rax);
1532 __ movl(rax, -1);
1533 __ jccb(Assembler::less, done);
1534 __ setb(Assembler::notEqual, rax);
1535 __ movzbl(rax, rax);
1536 __ bind(done);
1537 }
1539 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1540 Label done;
1541 if (is_float) {
1542 // XXX get rid of pop here, use ... reg, mem32
1543 __ pop_f(xmm1);
1544 __ ucomiss(xmm1, xmm0);
1545 } else {
1546 // XXX get rid of pop here, use ... reg, mem64
1547 __ pop_d(xmm1);
1548 __ ucomisd(xmm1, xmm0);
1549 }
1550 if (unordered_result < 0) {
1551 __ movl(rax, -1);
1552 __ jccb(Assembler::parity, done);
1553 __ jccb(Assembler::below, done);
1554 __ setb(Assembler::notEqual, rdx);
1555 __ movzbl(rax, rdx);
1556 } else {
1557 __ movl(rax, 1);
1558 __ jccb(Assembler::parity, done);
1559 __ jccb(Assembler::above, done);
1560 __ movl(rax, 0);
1561 __ jccb(Assembler::equal, done);
1562 __ decrementl(rax);
1563 }
1564 __ bind(done);
1565 }
1567 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1568 __ get_method(rcx); // rcx holds method
1569 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1570 // holds bumped taken count
1572 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1573 InvocationCounter::counter_offset();
1574 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1575 InvocationCounter::counter_offset();
1577 // Load up edx with the branch displacement
1578 if (is_wide) {
1579 __ movl(rdx, at_bcp(1));
1580 } else {
1581 __ load_signed_short(rdx, at_bcp(1));
1582 }
1583 __ bswapl(rdx);
1585 if (!is_wide) {
1586 __ sarl(rdx, 16);
1587 }
1588 __ movl2ptr(rdx, rdx);
1590 // Handle all the JSR stuff here, then exit.
1591 // It's much shorter and cleaner than intermingling with the non-JSR
1592 // normal-branch stuff occurring below.
1593 if (is_jsr) {
1594 // Pre-load the next target bytecode into rbx
1595 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1597 // compute return address as bci in rax
1598 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1599 in_bytes(ConstMethod::codes_offset())));
1600 __ subptr(rax, Address(rcx, Method::const_offset()));
1601 // Adjust the bcp in r13 by the displacement in rdx
1602 __ addptr(r13, rdx);
1603 // jsr returns atos that is not an oop
1604 __ push_i(rax);
1605 __ dispatch_only(vtos);
1606 return;
1607 }
1609 // Normal (non-jsr) branch handling
1611 // Adjust the bcp in r13 by the displacement in rdx
1612 __ addptr(r13, rdx);
1614 assert(UseLoopCounter || !UseOnStackReplacement,
1615 "on-stack-replacement requires loop counters");
1616 Label backedge_counter_overflow;
1617 Label profile_method;
1618 Label dispatch;
1619 if (UseLoopCounter) {
1620 // increment backedge counter for backward branches
1621 // rax: MDO
1622 // ebx: MDO bumped taken-count
1623 // rcx: method
1624 // rdx: target offset
1625 // r13: target bcp
1626 // r14: locals pointer
1627 __ testl(rdx, rdx); // check if forward or backward branch
1628 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1630 // check if MethodCounters exists
1631 Label has_counters;
1632 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1633 __ testptr(rax, rax);
1634 __ jcc(Assembler::notZero, has_counters);
1635 __ push(rdx);
1636 __ push(rcx);
1637 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
1638 rcx);
1639 __ pop(rcx);
1640 __ pop(rdx);
1641 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1642 __ jcc(Assembler::zero, dispatch);
1643 __ bind(has_counters);
1645 if (TieredCompilation) {
1646 Label no_mdo;
1647 int increment = InvocationCounter::count_increment;
1648 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1649 if (ProfileInterpreter) {
1650 // Are we profiling?
1651 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1652 __ testptr(rbx, rbx);
1653 __ jccb(Assembler::zero, no_mdo);
1654 // Increment the MDO backedge counter
1655 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1656 in_bytes(InvocationCounter::counter_offset()));
1657 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1658 rax, false, Assembler::zero, &backedge_counter_overflow);
1659 __ jmp(dispatch);
1660 }
1661 __ bind(no_mdo);
1662 // Increment backedge counter in MethodCounters*
1663 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1664 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1665 rax, false, Assembler::zero, &backedge_counter_overflow);
1666 } else {
1667 // increment counter
1668 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1669 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1670 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1671 __ movl(Address(rcx, be_offset), rax); // store counter
1673 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1675 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1676 __ addl(rax, Address(rcx, be_offset)); // add both counters
1678 if (ProfileInterpreter) {
1679 // Test to see if we should create a method data oop
1680 __ cmp32(rax,
1681 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1682 __ jcc(Assembler::less, dispatch);
1684 // if no method data exists, go to profile method
1685 __ test_method_data_pointer(rax, profile_method);
1687 if (UseOnStackReplacement) {
1688 // check for overflow against ebx which is the MDO taken count
1689 __ cmp32(rbx,
1690 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1691 __ jcc(Assembler::below, dispatch);
1693 // When ProfileInterpreter is on, the backedge_count comes
1694 // from the MethodData*, which value does not get reset on
1695 // the call to frequency_counter_overflow(). To avoid
1696 // excessive calls to the overflow routine while the method is
1697 // being compiled, add a second test to make sure the overflow
1698 // function is called only once every overflow_frequency.
1699 const int overflow_frequency = 1024;
1700 __ andl(rbx, overflow_frequency - 1);
1701 __ jcc(Assembler::zero, backedge_counter_overflow);
1703 }
1704 } else {
1705 if (UseOnStackReplacement) {
1706 // check for overflow against eax, which is the sum of the
1707 // counters
1708 __ cmp32(rax,
1709 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1710 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1712 }
1713 }
1714 }
1715 __ bind(dispatch);
1716 }
1718 // Pre-load the next target bytecode into rbx
1719 __ load_unsigned_byte(rbx, Address(r13, 0));
1721 // continue with the bytecode @ target
1722 // eax: return bci for jsr's, unused otherwise
1723 // ebx: target bytecode
1724 // r13: target bcp
1725 __ dispatch_only(vtos);
1727 if (UseLoopCounter) {
1728 if (ProfileInterpreter) {
1729 // Out-of-line code to allocate method data oop.
1730 __ bind(profile_method);
1731 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1732 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1733 __ set_method_data_pointer_for_bcp();
1734 __ jmp(dispatch);
1735 }
1737 if (UseOnStackReplacement) {
1738 // invocation counter overflow
1739 __ bind(backedge_counter_overflow);
1740 __ negptr(rdx);
1741 __ addptr(rdx, r13); // branch bcp
1742 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1743 __ call_VM(noreg,
1744 CAST_FROM_FN_PTR(address,
1745 InterpreterRuntime::frequency_counter_overflow),
1746 rdx);
1747 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1749 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1750 // ebx: target bytecode
1751 // rdx: scratch
1752 // r14: locals pointer
1753 // r13: bcp
1754 __ testptr(rax, rax); // test result
1755 __ jcc(Assembler::zero, dispatch); // no osr if null
1756 // nmethod may have been invalidated (VM may block upon call_VM return)
1757 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1758 __ cmpl(rcx, InvalidOSREntryBci);
1759 __ jcc(Assembler::equal, dispatch);
1761 // We have the address of an on stack replacement routine in eax
1762 // We need to prepare to execute the OSR method. First we must
1763 // migrate the locals and monitors off of the stack.
1765 __ mov(r13, rax); // save the nmethod
1767 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1769 // eax is OSR buffer, move it to expected parameter location
1770 __ mov(j_rarg0, rax);
1772 // We use j_rarg definitions here so that registers don't conflict as parameter
1773 // registers change across platforms as we are in the midst of a calling
1774 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1776 const Register retaddr = j_rarg2;
1777 const Register sender_sp = j_rarg1;
1779 // pop the interpreter frame
1780 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1781 __ leave(); // remove frame anchor
1782 __ pop(retaddr); // get return address
1783 __ mov(rsp, sender_sp); // set sp to sender sp
1784 // Ensure compiled code always sees stack at proper alignment
1785 __ andptr(rsp, -(StackAlignmentInBytes));
1787 // unlike x86 we need no specialized return from compiled code
1788 // to the interpreter or the call stub.
1790 // push the return address
1791 __ push(retaddr);
1793 // and begin the OSR nmethod
1794 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1795 }
1796 }
1797 }
1800 void TemplateTable::if_0cmp(Condition cc) {
1801 transition(itos, vtos);
1802 // assume branch is more often taken than not (loops use backward branches)
1803 Label not_taken;
1804 __ testl(rax, rax);
1805 __ jcc(j_not(cc), not_taken);
1806 branch(false, false);
1807 __ bind(not_taken);
1808 __ profile_not_taken_branch(rax);
1809 }
1811 void TemplateTable::if_icmp(Condition cc) {
1812 transition(itos, vtos);
1813 // assume branch is more often taken than not (loops use backward branches)
1814 Label not_taken;
1815 __ pop_i(rdx);
1816 __ cmpl(rdx, rax);
1817 __ jcc(j_not(cc), not_taken);
1818 branch(false, false);
1819 __ bind(not_taken);
1820 __ profile_not_taken_branch(rax);
1821 }
1823 void TemplateTable::if_nullcmp(Condition cc) {
1824 transition(atos, vtos);
1825 // assume branch is more often taken than not (loops use backward branches)
1826 Label not_taken;
1827 __ testptr(rax, rax);
1828 __ jcc(j_not(cc), not_taken);
1829 branch(false, false);
1830 __ bind(not_taken);
1831 __ profile_not_taken_branch(rax);
1832 }
1834 void TemplateTable::if_acmp(Condition cc) {
1835 transition(atos, vtos);
1836 // assume branch is more often taken than not (loops use backward branches)
1837 Label not_taken;
1838 __ pop_ptr(rdx);
1839 __ cmpptr(rdx, rax);
1840 __ jcc(j_not(cc), not_taken);
1841 branch(false, false);
1842 __ bind(not_taken);
1843 __ profile_not_taken_branch(rax);
1844 }
1846 void TemplateTable::ret() {
1847 transition(vtos, vtos);
1848 locals_index(rbx);
1849 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1850 __ profile_ret(rbx, rcx);
1851 __ get_method(rax);
1852 __ movptr(r13, Address(rax, Method::const_offset()));
1853 __ lea(r13, Address(r13, rbx, Address::times_1,
1854 ConstMethod::codes_offset()));
1855 __ dispatch_next(vtos);
1856 }
1858 void TemplateTable::wide_ret() {
1859 transition(vtos, vtos);
1860 locals_index_wide(rbx);
1861 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1862 __ profile_ret(rbx, rcx);
1863 __ get_method(rax);
1864 __ movptr(r13, Address(rax, Method::const_offset()));
1865 __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
1866 __ dispatch_next(vtos);
1867 }
1869 void TemplateTable::tableswitch() {
1870 Label default_case, continue_execution;
1871 transition(itos, vtos);
1872 // align r13
1873 __ lea(rbx, at_bcp(BytesPerInt));
1874 __ andptr(rbx, -BytesPerInt);
1875 // load lo & hi
1876 __ movl(rcx, Address(rbx, BytesPerInt));
1877 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1878 __ bswapl(rcx);
1879 __ bswapl(rdx);
1880 // check against lo & hi
1881 __ cmpl(rax, rcx);
1882 __ jcc(Assembler::less, default_case);
1883 __ cmpl(rax, rdx);
1884 __ jcc(Assembler::greater, default_case);
1885 // lookup dispatch offset
1886 __ subl(rax, rcx);
1887 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1888 __ profile_switch_case(rax, rbx, rcx);
1889 // continue execution
1890 __ bind(continue_execution);
1891 __ bswapl(rdx);
1892 __ movl2ptr(rdx, rdx);
1893 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1894 __ addptr(r13, rdx);
1895 __ dispatch_only(vtos);
1896 // handle default
1897 __ bind(default_case);
1898 __ profile_switch_default(rax);
1899 __ movl(rdx, Address(rbx, 0));
1900 __ jmp(continue_execution);
1901 }
1903 void TemplateTable::lookupswitch() {
1904 transition(itos, itos);
1905 __ stop("lookupswitch bytecode should have been rewritten");
1906 }
1908 void TemplateTable::fast_linearswitch() {
1909 transition(itos, vtos);
1910 Label loop_entry, loop, found, continue_execution;
1911 // bswap rax so we can avoid bswapping the table entries
1912 __ bswapl(rax);
1913 // align r13
1914 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1915 // this instruction (change offsets
1916 // below)
1917 __ andptr(rbx, -BytesPerInt);
1918 // set counter
1919 __ movl(rcx, Address(rbx, BytesPerInt));
1920 __ bswapl(rcx);
1921 __ jmpb(loop_entry);
1922 // table search
1923 __ bind(loop);
1924 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1925 __ jcc(Assembler::equal, found);
1926 __ bind(loop_entry);
1927 __ decrementl(rcx);
1928 __ jcc(Assembler::greaterEqual, loop);
1929 // default case
1930 __ profile_switch_default(rax);
1931 __ movl(rdx, Address(rbx, 0));
1932 __ jmp(continue_execution);
1933 // entry found -> get offset
1934 __ bind(found);
1935 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1936 __ profile_switch_case(rcx, rax, rbx);
1937 // continue execution
1938 __ bind(continue_execution);
1939 __ bswapl(rdx);
1940 __ movl2ptr(rdx, rdx);
1941 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1942 __ addptr(r13, rdx);
1943 __ dispatch_only(vtos);
1944 }
1946 void TemplateTable::fast_binaryswitch() {
1947 transition(itos, vtos);
1948 // Implementation using the following core algorithm:
1949 //
1950 // int binary_search(int key, LookupswitchPair* array, int n) {
1951 // // Binary search according to "Methodik des Programmierens" by
1952 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1953 // int i = 0;
1954 // int j = n;
1955 // while (i+1 < j) {
1956 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1957 // // with Q: for all i: 0 <= i < n: key < a[i]
1958 // // where a stands for the array and assuming that the (inexisting)
1959 // // element a[n] is infinitely big.
1960 // int h = (i + j) >> 1;
1961 // // i < h < j
1962 // if (key < array[h].fast_match()) {
1963 // j = h;
1964 // } else {
1965 // i = h;
1966 // }
1967 // }
1968 // // R: a[i] <= key < a[i+1] or Q
1969 // // (i.e., if key is within array, i is the correct index)
1970 // return i;
1971 // }
1973 // Register allocation
1974 const Register key = rax; // already set (tosca)
1975 const Register array = rbx;
1976 const Register i = rcx;
1977 const Register j = rdx;
1978 const Register h = rdi;
1979 const Register temp = rsi;
1981 // Find array start
1982 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1983 // get rid of this
1984 // instruction (change
1985 // offsets below)
1986 __ andptr(array, -BytesPerInt);
1988 // Initialize i & j
1989 __ xorl(i, i); // i = 0;
1990 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1992 // Convert j into native byteordering
1993 __ bswapl(j);
1995 // And start
1996 Label entry;
1997 __ jmp(entry);
1999 // binary search loop
2000 {
2001 Label loop;
2002 __ bind(loop);
2003 // int h = (i + j) >> 1;
2004 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2005 __ sarl(h, 1); // h = (i + j) >> 1;
2006 // if (key < array[h].fast_match()) {
2007 // j = h;
2008 // } else {
2009 // i = h;
2010 // }
2011 // Convert array[h].match to native byte-ordering before compare
2012 __ movl(temp, Address(array, h, Address::times_8));
2013 __ bswapl(temp);
2014 __ cmpl(key, temp);
2015 // j = h if (key < array[h].fast_match())
2016 __ cmovl(Assembler::less, j, h);
2017 // i = h if (key >= array[h].fast_match())
2018 __ cmovl(Assembler::greaterEqual, i, h);
2019 // while (i+1 < j)
2020 __ bind(entry);
2021 __ leal(h, Address(i, 1)); // i+1
2022 __ cmpl(h, j); // i+1 < j
2023 __ jcc(Assembler::less, loop);
2024 }
2026 // end of binary search, result index is i (must check again!)
2027 Label default_case;
2028 // Convert array[i].match to native byte-ordering before compare
2029 __ movl(temp, Address(array, i, Address::times_8));
2030 __ bswapl(temp);
2031 __ cmpl(key, temp);
2032 __ jcc(Assembler::notEqual, default_case);
2034 // entry found -> j = offset
2035 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2036 __ profile_switch_case(i, key, array);
2037 __ bswapl(j);
2038 __ movl2ptr(j, j);
2039 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2040 __ addptr(r13, j);
2041 __ dispatch_only(vtos);
2043 // default case -> j = default offset
2044 __ bind(default_case);
2045 __ profile_switch_default(i);
2046 __ movl(j, Address(array, -2 * BytesPerInt));
2047 __ bswapl(j);
2048 __ movl2ptr(j, j);
2049 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2050 __ addptr(r13, j);
2051 __ dispatch_only(vtos);
2052 }
2055 void TemplateTable::_return(TosState state) {
2056 transition(state, state);
2057 assert(_desc->calls_vm(),
2058 "inconsistent calls_vm information"); // call in remove_activation
2060 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2061 assert(state == vtos, "only valid state");
2062 __ movptr(c_rarg1, aaddress(0));
2063 __ load_klass(rdi, c_rarg1);
2064 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2065 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2066 Label skip_register_finalizer;
2067 __ jcc(Assembler::zero, skip_register_finalizer);
2069 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2071 __ bind(skip_register_finalizer);
2072 }
2074 __ remove_activation(state, r13);
2075 __ jmp(r13);
2076 }
2078 // ----------------------------------------------------------------------------
2079 // Volatile variables demand their effects be made known to all CPU's
2080 // in order. Store buffers on most chips allow reads & writes to
2081 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2082 // without some kind of memory barrier (i.e., it's not sufficient that
2083 // the interpreter does not reorder volatile references, the hardware
2084 // also must not reorder them).
2085 //
2086 // According to the new Java Memory Model (JMM):
2087 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2088 // writes act as aquire & release, so:
2089 // (2) A read cannot let unrelated NON-volatile memory refs that
2090 // happen after the read float up to before the read. It's OK for
2091 // non-volatile memory refs that happen before the volatile read to
2092 // float down below it.
2093 // (3) Similar a volatile write cannot let unrelated NON-volatile
2094 // memory refs that happen BEFORE the write float down to after the
2095 // write. It's OK for non-volatile memory refs that happen after the
2096 // volatile write to float up before it.
2097 //
2098 // We only put in barriers around volatile refs (they are expensive),
2099 // not _between_ memory refs (that would require us to track the
2100 // flavor of the previous memory refs). Requirements (2) and (3)
2101 // require some barriers before volatile stores and after volatile
2102 // loads. These nearly cover requirement (1) but miss the
2103 // volatile-store-volatile-load case. This final case is placed after
2104 // volatile-stores although it could just as well go before
2105 // volatile-loads.
2106 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2107 order_constraint) {
2108 // Helper function to insert a is-volatile test and memory barrier
2109 if (os::is_MP()) { // Not needed on single CPU
2110 __ membar(order_constraint);
2111 }
2112 }
2114 void TemplateTable::resolve_cache_and_index(int byte_no,
2115 Register Rcache,
2116 Register index,
2117 size_t index_size) {
2118 const Register temp = rbx;
2119 assert_different_registers(Rcache, index, temp);
2121 Label resolved;
2122 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2123 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2124 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2125 __ jcc(Assembler::equal, resolved);
2127 // resolve first time through
2128 address entry;
2129 switch (bytecode()) {
2130 case Bytecodes::_getstatic:
2131 case Bytecodes::_putstatic:
2132 case Bytecodes::_getfield:
2133 case Bytecodes::_putfield:
2134 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2135 break;
2136 case Bytecodes::_invokevirtual:
2137 case Bytecodes::_invokespecial:
2138 case Bytecodes::_invokestatic:
2139 case Bytecodes::_invokeinterface:
2140 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2141 break;
2142 case Bytecodes::_invokehandle:
2143 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
2144 break;
2145 case Bytecodes::_invokedynamic:
2146 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2147 break;
2148 default:
2149 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2150 break;
2151 }
2152 __ movl(temp, (int) bytecode());
2153 __ call_VM(noreg, entry, temp);
2155 // Update registers with resolved info
2156 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2157 __ bind(resolved);
2158 }
2160 // The cache and index registers must be set before call
2161 void TemplateTable::load_field_cp_cache_entry(Register obj,
2162 Register cache,
2163 Register index,
2164 Register off,
2165 Register flags,
2166 bool is_static = false) {
2167 assert_different_registers(cache, index, flags, off);
2169 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2170 // Field offset
2171 __ movptr(off, Address(cache, index, Address::times_ptr,
2172 in_bytes(cp_base_offset +
2173 ConstantPoolCacheEntry::f2_offset())));
2174 // Flags
2175 __ movl(flags, Address(cache, index, Address::times_ptr,
2176 in_bytes(cp_base_offset +
2177 ConstantPoolCacheEntry::flags_offset())));
2179 // klass overwrite register
2180 if (is_static) {
2181 __ movptr(obj, Address(cache, index, Address::times_ptr,
2182 in_bytes(cp_base_offset +
2183 ConstantPoolCacheEntry::f1_offset())));
2184 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2185 __ movptr(obj, Address(obj, mirror_offset));
2186 }
2187 }
2189 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2190 Register method,
2191 Register itable_index,
2192 Register flags,
2193 bool is_invokevirtual,
2194 bool is_invokevfinal, /*unused*/
2195 bool is_invokedynamic) {
2196 // setup registers
2197 const Register cache = rcx;
2198 const Register index = rdx;
2199 assert_different_registers(method, flags);
2200 assert_different_registers(method, cache, index);
2201 assert_different_registers(itable_index, flags);
2202 assert_different_registers(itable_index, cache, index);
2203 // determine constant pool cache field offsets
2204 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2205 const int method_offset = in_bytes(
2206 ConstantPoolCache::base_offset() +
2207 ((byte_no == f2_byte)
2208 ? ConstantPoolCacheEntry::f2_offset()
2209 : ConstantPoolCacheEntry::f1_offset()));
2210 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2211 ConstantPoolCacheEntry::flags_offset());
2212 // access constant pool cache fields
2213 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2214 ConstantPoolCacheEntry::f2_offset());
2216 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2217 resolve_cache_and_index(byte_no, cache, index, index_size);
2218 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2220 if (itable_index != noreg) {
2221 // pick up itable or appendix index from f2 also:
2222 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2223 }
2224 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2225 }
2227 // Correct values of the cache and index registers are preserved.
2228 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2229 bool is_static, bool has_tos) {
2230 // do the JVMTI work here to avoid disturbing the register state below
2231 // We use c_rarg registers here because we want to use the register used in
2232 // the call to the VM
2233 if (JvmtiExport::can_post_field_access()) {
2234 // Check to see if a field access watch has been set before we
2235 // take the time to call into the VM.
2236 Label L1;
2237 assert_different_registers(cache, index, rax);
2238 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2239 __ testl(rax, rax);
2240 __ jcc(Assembler::zero, L1);
2242 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2244 // cache entry pointer
2245 __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
2246 __ shll(c_rarg3, LogBytesPerWord);
2247 __ addptr(c_rarg2, c_rarg3);
2248 if (is_static) {
2249 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2250 } else {
2251 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2252 __ verify_oop(c_rarg1);
2253 }
2254 // c_rarg1: object pointer or NULL
2255 // c_rarg2: cache entry pointer
2256 // c_rarg3: jvalue object on the stack
2257 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2258 InterpreterRuntime::post_field_access),
2259 c_rarg1, c_rarg2, c_rarg3);
2260 __ get_cache_and_index_at_bcp(cache, index, 1);
2261 __ bind(L1);
2262 }
2263 }
2265 void TemplateTable::pop_and_check_object(Register r) {
2266 __ pop_ptr(r);
2267 __ null_check(r); // for field access must check obj.
2268 __ verify_oop(r);
2269 }
2271 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2272 transition(vtos, vtos);
2274 const Register cache = rcx;
2275 const Register index = rdx;
2276 const Register obj = c_rarg3;
2277 const Register off = rbx;
2278 const Register flags = rax;
2279 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2281 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2282 jvmti_post_field_access(cache, index, is_static, false);
2283 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2285 if (!is_static) {
2286 // obj is on the stack
2287 pop_and_check_object(obj);
2288 }
2290 const Address field(obj, off, Address::times_1);
2292 Label Done, notByte, notInt, notShort, notChar,
2293 notLong, notFloat, notObj, notDouble;
2295 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2296 // Make sure we don't need to mask edx after the above shift
2297 assert(btos == 0, "change code, btos != 0");
2299 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2300 __ jcc(Assembler::notZero, notByte);
2301 // btos
2302 __ load_signed_byte(rax, field);
2303 __ push(btos);
2304 // Rewrite bytecode to be faster
2305 if (!is_static) {
2306 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2307 }
2308 __ jmp(Done);
2310 __ bind(notByte);
2311 __ cmpl(flags, atos);
2312 __ jcc(Assembler::notEqual, notObj);
2313 // atos
2314 __ load_heap_oop(rax, field);
2315 __ push(atos);
2316 if (!is_static) {
2317 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2318 }
2319 __ jmp(Done);
2321 __ bind(notObj);
2322 __ cmpl(flags, itos);
2323 __ jcc(Assembler::notEqual, notInt);
2324 // itos
2325 __ movl(rax, field);
2326 __ push(itos);
2327 // Rewrite bytecode to be faster
2328 if (!is_static) {
2329 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2330 }
2331 __ jmp(Done);
2333 __ bind(notInt);
2334 __ cmpl(flags, ctos);
2335 __ jcc(Assembler::notEqual, notChar);
2336 // ctos
2337 __ load_unsigned_short(rax, field);
2338 __ push(ctos);
2339 // Rewrite bytecode to be faster
2340 if (!is_static) {
2341 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2342 }
2343 __ jmp(Done);
2345 __ bind(notChar);
2346 __ cmpl(flags, stos);
2347 __ jcc(Assembler::notEqual, notShort);
2348 // stos
2349 __ load_signed_short(rax, field);
2350 __ push(stos);
2351 // Rewrite bytecode to be faster
2352 if (!is_static) {
2353 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2354 }
2355 __ jmp(Done);
2357 __ bind(notShort);
2358 __ cmpl(flags, ltos);
2359 __ jcc(Assembler::notEqual, notLong);
2360 // ltos
2361 __ movq(rax, field);
2362 __ push(ltos);
2363 // Rewrite bytecode to be faster
2364 if (!is_static) {
2365 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2366 }
2367 __ jmp(Done);
2369 __ bind(notLong);
2370 __ cmpl(flags, ftos);
2371 __ jcc(Assembler::notEqual, notFloat);
2372 // ftos
2373 __ movflt(xmm0, field);
2374 __ push(ftos);
2375 // Rewrite bytecode to be faster
2376 if (!is_static) {
2377 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2378 }
2379 __ jmp(Done);
2381 __ bind(notFloat);
2382 #ifdef ASSERT
2383 __ cmpl(flags, dtos);
2384 __ jcc(Assembler::notEqual, notDouble);
2385 #endif
2386 // dtos
2387 __ movdbl(xmm0, field);
2388 __ push(dtos);
2389 // Rewrite bytecode to be faster
2390 if (!is_static) {
2391 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2392 }
2393 #ifdef ASSERT
2394 __ jmp(Done);
2396 __ bind(notDouble);
2397 __ stop("Bad state");
2398 #endif
2400 __ bind(Done);
2401 // [jk] not needed currently
2402 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2403 // Assembler::LoadStore));
2404 }
2407 void TemplateTable::getfield(int byte_no) {
2408 getfield_or_static(byte_no, false);
2409 }
2411 void TemplateTable::getstatic(int byte_no) {
2412 getfield_or_static(byte_no, true);
2413 }
2415 // The registers cache and index expected to be set before call.
2416 // The function may destroy various registers, just not the cache and index registers.
2417 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2418 transition(vtos, vtos);
2420 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2422 if (JvmtiExport::can_post_field_modification()) {
2423 // Check to see if a field modification watch has been set before
2424 // we take the time to call into the VM.
2425 Label L1;
2426 assert_different_registers(cache, index, rax);
2427 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2428 __ testl(rax, rax);
2429 __ jcc(Assembler::zero, L1);
2431 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2433 if (is_static) {
2434 // Life is simple. Null out the object pointer.
2435 __ xorl(c_rarg1, c_rarg1);
2436 } else {
2437 // Life is harder. The stack holds the value on top, followed by
2438 // the object. We don't know the size of the value, though; it
2439 // could be one or two words depending on its type. As a result,
2440 // we must find the type to determine where the object is.
2441 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2442 Address::times_8,
2443 in_bytes(cp_base_offset +
2444 ConstantPoolCacheEntry::flags_offset())));
2445 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
2446 // Make sure we don't need to mask rcx after the above shift
2447 ConstantPoolCacheEntry::verify_tos_state_shift();
2448 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2449 __ cmpl(c_rarg3, ltos);
2450 __ cmovptr(Assembler::equal,
2451 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2452 __ cmpl(c_rarg3, dtos);
2453 __ cmovptr(Assembler::equal,
2454 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2455 }
2456 // cache entry pointer
2457 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2458 __ shll(rscratch1, LogBytesPerWord);
2459 __ addptr(c_rarg2, rscratch1);
2460 // object (tos)
2461 __ mov(c_rarg3, rsp);
2462 // c_rarg1: object pointer set up above (NULL if static)
2463 // c_rarg2: cache entry pointer
2464 // c_rarg3: jvalue object on the stack
2465 __ call_VM(noreg,
2466 CAST_FROM_FN_PTR(address,
2467 InterpreterRuntime::post_field_modification),
2468 c_rarg1, c_rarg2, c_rarg3);
2469 __ get_cache_and_index_at_bcp(cache, index, 1);
2470 __ bind(L1);
2471 }
2472 }
2474 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2475 transition(vtos, vtos);
2477 const Register cache = rcx;
2478 const Register index = rdx;
2479 const Register obj = rcx;
2480 const Register off = rbx;
2481 const Register flags = rax;
2482 const Register bc = c_rarg3;
2484 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2485 jvmti_post_field_mod(cache, index, is_static);
2486 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2488 // [jk] not needed currently
2489 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2490 // Assembler::StoreStore));
2492 Label notVolatile, Done;
2493 __ movl(rdx, flags);
2494 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2495 __ andl(rdx, 0x1);
2497 // field address
2498 const Address field(obj, off, Address::times_1);
2500 Label notByte, notInt, notShort, notChar,
2501 notLong, notFloat, notObj, notDouble;
2503 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2505 assert(btos == 0, "change code, btos != 0");
2506 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2507 __ jcc(Assembler::notZero, notByte);
2509 // btos
2510 {
2511 __ pop(btos);
2512 if (!is_static) pop_and_check_object(obj);
2513 __ movb(field, rax);
2514 if (!is_static) {
2515 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2516 }
2517 __ jmp(Done);
2518 }
2520 __ bind(notByte);
2521 __ cmpl(flags, atos);
2522 __ jcc(Assembler::notEqual, notObj);
2524 // atos
2525 {
2526 __ pop(atos);
2527 if (!is_static) pop_and_check_object(obj);
2528 // Store into the field
2529 do_oop_store(_masm, field, rax, _bs->kind(), false);
2530 if (!is_static) {
2531 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2532 }
2533 __ jmp(Done);
2534 }
2536 __ bind(notObj);
2537 __ cmpl(flags, itos);
2538 __ jcc(Assembler::notEqual, notInt);
2540 // itos
2541 {
2542 __ pop(itos);
2543 if (!is_static) pop_and_check_object(obj);
2544 __ movl(field, rax);
2545 if (!is_static) {
2546 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2547 }
2548 __ jmp(Done);
2549 }
2551 __ bind(notInt);
2552 __ cmpl(flags, ctos);
2553 __ jcc(Assembler::notEqual, notChar);
2555 // ctos
2556 {
2557 __ pop(ctos);
2558 if (!is_static) pop_and_check_object(obj);
2559 __ movw(field, rax);
2560 if (!is_static) {
2561 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2562 }
2563 __ jmp(Done);
2564 }
2566 __ bind(notChar);
2567 __ cmpl(flags, stos);
2568 __ jcc(Assembler::notEqual, notShort);
2570 // stos
2571 {
2572 __ pop(stos);
2573 if (!is_static) pop_and_check_object(obj);
2574 __ movw(field, rax);
2575 if (!is_static) {
2576 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2577 }
2578 __ jmp(Done);
2579 }
2581 __ bind(notShort);
2582 __ cmpl(flags, ltos);
2583 __ jcc(Assembler::notEqual, notLong);
2585 // ltos
2586 {
2587 __ pop(ltos);
2588 if (!is_static) pop_and_check_object(obj);
2589 __ movq(field, rax);
2590 if (!is_static) {
2591 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2592 }
2593 __ jmp(Done);
2594 }
2596 __ bind(notLong);
2597 __ cmpl(flags, ftos);
2598 __ jcc(Assembler::notEqual, notFloat);
2600 // ftos
2601 {
2602 __ pop(ftos);
2603 if (!is_static) pop_and_check_object(obj);
2604 __ movflt(field, xmm0);
2605 if (!is_static) {
2606 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2607 }
2608 __ jmp(Done);
2609 }
2611 __ bind(notFloat);
2612 #ifdef ASSERT
2613 __ cmpl(flags, dtos);
2614 __ jcc(Assembler::notEqual, notDouble);
2615 #endif
2617 // dtos
2618 {
2619 __ pop(dtos);
2620 if (!is_static) pop_and_check_object(obj);
2621 __ movdbl(field, xmm0);
2622 if (!is_static) {
2623 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2624 }
2625 }
2627 #ifdef ASSERT
2628 __ jmp(Done);
2630 __ bind(notDouble);
2631 __ stop("Bad state");
2632 #endif
2634 __ bind(Done);
2636 // Check for volatile store
2637 __ testl(rdx, rdx);
2638 __ jcc(Assembler::zero, notVolatile);
2639 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2640 Assembler::StoreStore));
2641 __ bind(notVolatile);
2642 }
2644 void TemplateTable::putfield(int byte_no) {
2645 putfield_or_static(byte_no, false);
2646 }
2648 void TemplateTable::putstatic(int byte_no) {
2649 putfield_or_static(byte_no, true);
2650 }
2652 void TemplateTable::jvmti_post_fast_field_mod() {
2653 if (JvmtiExport::can_post_field_modification()) {
2654 // Check to see if a field modification watch has been set before
2655 // we take the time to call into the VM.
2656 Label L2;
2657 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2658 __ testl(c_rarg3, c_rarg3);
2659 __ jcc(Assembler::zero, L2);
2660 __ pop_ptr(rbx); // copy the object pointer from tos
2661 __ verify_oop(rbx);
2662 __ push_ptr(rbx); // put the object pointer back on tos
2663 // Save tos values before call_VM() clobbers them. Since we have
2664 // to do it for every data type, we use the saved values as the
2665 // jvalue object.
2666 switch (bytecode()) { // load values into the jvalue object
2667 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2668 case Bytecodes::_fast_bputfield: // fall through
2669 case Bytecodes::_fast_sputfield: // fall through
2670 case Bytecodes::_fast_cputfield: // fall through
2671 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2672 case Bytecodes::_fast_dputfield: __ push_d(); break;
2673 case Bytecodes::_fast_fputfield: __ push_f(); break;
2674 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2676 default:
2677 ShouldNotReachHere();
2678 }
2679 __ mov(c_rarg3, rsp); // points to jvalue on the stack
2680 // access constant pool cache entry
2681 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2682 __ verify_oop(rbx);
2683 // rbx: object pointer copied above
2684 // c_rarg2: cache entry pointer
2685 // c_rarg3: jvalue object on the stack
2686 __ call_VM(noreg,
2687 CAST_FROM_FN_PTR(address,
2688 InterpreterRuntime::post_field_modification),
2689 rbx, c_rarg2, c_rarg3);
2691 switch (bytecode()) { // restore tos values
2692 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2693 case Bytecodes::_fast_bputfield: // fall through
2694 case Bytecodes::_fast_sputfield: // fall through
2695 case Bytecodes::_fast_cputfield: // fall through
2696 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2697 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2698 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2699 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2700 }
2701 __ bind(L2);
2702 }
2703 }
2705 void TemplateTable::fast_storefield(TosState state) {
2706 transition(state, vtos);
2708 ByteSize base = ConstantPoolCache::base_offset();
2710 jvmti_post_fast_field_mod();
2712 // access constant pool cache
2713 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2715 // test for volatile with rdx
2716 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2717 in_bytes(base +
2718 ConstantPoolCacheEntry::flags_offset())));
2720 // replace index with field offset from cache entry
2721 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2722 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2724 // [jk] not needed currently
2725 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2726 // Assembler::StoreStore));
2728 Label notVolatile;
2729 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2730 __ andl(rdx, 0x1);
2732 // Get object from stack
2733 pop_and_check_object(rcx);
2735 // field address
2736 const Address field(rcx, rbx, Address::times_1);
2738 // access field
2739 switch (bytecode()) {
2740 case Bytecodes::_fast_aputfield:
2741 do_oop_store(_masm, field, rax, _bs->kind(), false);
2742 break;
2743 case Bytecodes::_fast_lputfield:
2744 __ movq(field, rax);
2745 break;
2746 case Bytecodes::_fast_iputfield:
2747 __ movl(field, rax);
2748 break;
2749 case Bytecodes::_fast_bputfield:
2750 __ movb(field, rax);
2751 break;
2752 case Bytecodes::_fast_sputfield:
2753 // fall through
2754 case Bytecodes::_fast_cputfield:
2755 __ movw(field, rax);
2756 break;
2757 case Bytecodes::_fast_fputfield:
2758 __ movflt(field, xmm0);
2759 break;
2760 case Bytecodes::_fast_dputfield:
2761 __ movdbl(field, xmm0);
2762 break;
2763 default:
2764 ShouldNotReachHere();
2765 }
2767 // Check for volatile store
2768 __ testl(rdx, rdx);
2769 __ jcc(Assembler::zero, notVolatile);
2770 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2771 Assembler::StoreStore));
2772 __ bind(notVolatile);
2773 }
2776 void TemplateTable::fast_accessfield(TosState state) {
2777 transition(atos, state);
2779 // Do the JVMTI work here to avoid disturbing the register state below
2780 if (JvmtiExport::can_post_field_access()) {
2781 // Check to see if a field access watch has been set before we
2782 // take the time to call into the VM.
2783 Label L1;
2784 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2785 __ testl(rcx, rcx);
2786 __ jcc(Assembler::zero, L1);
2787 // access constant pool cache entry
2788 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2789 __ verify_oop(rax);
2790 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2791 __ mov(c_rarg1, rax);
2792 // c_rarg1: object pointer copied above
2793 // c_rarg2: cache entry pointer
2794 __ call_VM(noreg,
2795 CAST_FROM_FN_PTR(address,
2796 InterpreterRuntime::post_field_access),
2797 c_rarg1, c_rarg2);
2798 __ pop_ptr(rax); // restore object pointer
2799 __ bind(L1);
2800 }
2802 // access constant pool cache
2803 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2804 // replace index with field offset from cache entry
2805 // [jk] not needed currently
2806 // if (os::is_MP()) {
2807 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2808 // in_bytes(ConstantPoolCache::base_offset() +
2809 // ConstantPoolCacheEntry::flags_offset())));
2810 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2811 // __ andl(rdx, 0x1);
2812 // }
2813 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2814 in_bytes(ConstantPoolCache::base_offset() +
2815 ConstantPoolCacheEntry::f2_offset())));
2817 // rax: object
2818 __ verify_oop(rax);
2819 __ null_check(rax);
2820 Address field(rax, rbx, Address::times_1);
2822 // access field
2823 switch (bytecode()) {
2824 case Bytecodes::_fast_agetfield:
2825 __ load_heap_oop(rax, field);
2826 __ verify_oop(rax);
2827 break;
2828 case Bytecodes::_fast_lgetfield:
2829 __ movq(rax, field);
2830 break;
2831 case Bytecodes::_fast_igetfield:
2832 __ movl(rax, field);
2833 break;
2834 case Bytecodes::_fast_bgetfield:
2835 __ movsbl(rax, field);
2836 break;
2837 case Bytecodes::_fast_sgetfield:
2838 __ load_signed_short(rax, field);
2839 break;
2840 case Bytecodes::_fast_cgetfield:
2841 __ load_unsigned_short(rax, field);
2842 break;
2843 case Bytecodes::_fast_fgetfield:
2844 __ movflt(xmm0, field);
2845 break;
2846 case Bytecodes::_fast_dgetfield:
2847 __ movdbl(xmm0, field);
2848 break;
2849 default:
2850 ShouldNotReachHere();
2851 }
2852 // [jk] not needed currently
2853 // if (os::is_MP()) {
2854 // Label notVolatile;
2855 // __ testl(rdx, rdx);
2856 // __ jcc(Assembler::zero, notVolatile);
2857 // __ membar(Assembler::LoadLoad);
2858 // __ bind(notVolatile);
2859 //};
2860 }
2862 void TemplateTable::fast_xaccess(TosState state) {
2863 transition(vtos, state);
2865 // get receiver
2866 __ movptr(rax, aaddress(0));
2867 // access constant pool cache
2868 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2869 __ movptr(rbx,
2870 Address(rcx, rdx, Address::times_8,
2871 in_bytes(ConstantPoolCache::base_offset() +
2872 ConstantPoolCacheEntry::f2_offset())));
2873 // make sure exception is reported in correct bcp range (getfield is
2874 // next instruction)
2875 __ increment(r13);
2876 __ null_check(rax);
2877 switch (state) {
2878 case itos:
2879 __ movl(rax, Address(rax, rbx, Address::times_1));
2880 break;
2881 case atos:
2882 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2883 __ verify_oop(rax);
2884 break;
2885 case ftos:
2886 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2887 break;
2888 default:
2889 ShouldNotReachHere();
2890 }
2892 // [jk] not needed currently
2893 // if (os::is_MP()) {
2894 // Label notVolatile;
2895 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2896 // in_bytes(ConstantPoolCache::base_offset() +
2897 // ConstantPoolCacheEntry::flags_offset())));
2898 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2899 // __ testl(rdx, 0x1);
2900 // __ jcc(Assembler::zero, notVolatile);
2901 // __ membar(Assembler::LoadLoad);
2902 // __ bind(notVolatile);
2903 // }
2905 __ decrement(r13);
2906 }
2910 //-----------------------------------------------------------------------------
2911 // Calls
2913 void TemplateTable::count_calls(Register method, Register temp) {
2914 // implemented elsewhere
2915 ShouldNotReachHere();
2916 }
2918 void TemplateTable::prepare_invoke(int byte_no,
2919 Register method, // linked method (or i-klass)
2920 Register index, // itable index, MethodType, etc.
2921 Register recv, // if caller wants to see it
2922 Register flags // if caller wants to test it
2923 ) {
2924 // determine flags
2925 const Bytecodes::Code code = bytecode();
2926 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2927 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2928 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2929 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2930 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2931 const bool load_receiver = (recv != noreg);
2932 const bool save_flags = (flags != noreg);
2933 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2934 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2935 assert(flags == noreg || flags == rdx, "");
2936 assert(recv == noreg || recv == rcx, "");
2938 // setup registers & access constant pool cache
2939 if (recv == noreg) recv = rcx;
2940 if (flags == noreg) flags = rdx;
2941 assert_different_registers(method, index, recv, flags);
2943 // save 'interpreter return address'
2944 __ save_bcp();
2946 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2948 // maybe push appendix to arguments (just before return address)
2949 if (is_invokedynamic || is_invokehandle) {
2950 Label L_no_push;
2951 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2952 __ jcc(Assembler::zero, L_no_push);
2953 // Push the appendix as a trailing parameter.
2954 // This must be done before we get the receiver,
2955 // since the parameter_size includes it.
2956 __ push(rbx);
2957 __ mov(rbx, index);
2958 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2959 __ load_resolved_reference_at_index(index, rbx);
2960 __ pop(rbx);
2961 __ push(index); // push appendix (MethodType, CallSite, etc.)
2962 __ bind(L_no_push);
2963 }
2965 // load receiver if needed (after appendix is pushed so parameter size is correct)
2966 // Note: no return address pushed yet
2967 if (load_receiver) {
2968 __ movl(recv, flags);
2969 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2970 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2971 const int receiver_is_at_end = -1; // back off one slot to get receiver
2972 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2973 __ movptr(recv, recv_addr);
2974 __ verify_oop(recv);
2975 }
2977 if (save_flags) {
2978 __ movl(r13, flags);
2979 }
2981 // compute return type
2982 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2983 // Make sure we don't need to mask flags after the above shift
2984 ConstantPoolCacheEntry::verify_tos_state_shift();
2985 // load return address
2986 {
2987 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2988 ExternalAddress table(table_addr);
2989 __ lea(rscratch1, table);
2990 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2991 }
2993 // push return address
2994 __ push(flags);
2996 // Restore flags value from the constant pool cache, and restore rsi
2997 // for later null checks. r13 is the bytecode pointer
2998 if (save_flags) {
2999 __ movl(flags, r13);
3000 __ restore_bcp();
3001 }
3002 }
3005 void TemplateTable::invokevirtual_helper(Register index,
3006 Register recv,
3007 Register flags) {
3008 // Uses temporary registers rax, rdx
3009 assert_different_registers(index, recv, rax, rdx);
3010 assert(index == rbx, "");
3011 assert(recv == rcx, "");
3013 // Test for an invoke of a final method
3014 Label notFinal;
3015 __ movl(rax, flags);
3016 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3017 __ jcc(Assembler::zero, notFinal);
3019 const Register method = index; // method must be rbx
3020 assert(method == rbx,
3021 "Method* must be rbx for interpreter calling convention");
3023 // do the call - the index is actually the method to call
3024 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3026 // It's final, need a null check here!
3027 __ null_check(recv);
3029 // profile this call
3030 __ profile_final_call(rax);
3031 __ profile_arguments_type(rax, method, r13, true);
3033 __ jump_from_interpreted(method, rax);
3035 __ bind(notFinal);
3037 // get receiver klass
3038 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3039 __ load_klass(rax, recv);
3041 // profile this call
3042 __ profile_virtual_call(rax, r14, rdx);
3044 // get target Method* & entry point
3045 __ lookup_virtual_method(rax, index, method);
3046 __ profile_arguments_type(rdx, method, r13, true);
3047 __ jump_from_interpreted(method, rdx);
3048 }
3051 void TemplateTable::invokevirtual(int byte_no) {
3052 transition(vtos, vtos);
3053 assert(byte_no == f2_byte, "use this argument");
3054 prepare_invoke(byte_no,
3055 rbx, // method or vtable index
3056 noreg, // unused itable index
3057 rcx, rdx); // recv, flags
3059 // rbx: index
3060 // rcx: receiver
3061 // rdx: flags
3063 invokevirtual_helper(rbx, rcx, rdx);
3064 }
3067 void TemplateTable::invokespecial(int byte_no) {
3068 transition(vtos, vtos);
3069 assert(byte_no == f1_byte, "use this argument");
3070 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3071 rcx); // get receiver also for null check
3072 __ verify_oop(rcx);
3073 __ null_check(rcx);
3074 // do the call
3075 __ profile_call(rax);
3076 __ profile_arguments_type(rax, rbx, r13, false);
3077 __ jump_from_interpreted(rbx, rax);
3078 }
3081 void TemplateTable::invokestatic(int byte_no) {
3082 transition(vtos, vtos);
3083 assert(byte_no == f1_byte, "use this argument");
3084 prepare_invoke(byte_no, rbx); // get f1 Method*
3085 // do the call
3086 __ profile_call(rax);
3087 __ profile_arguments_type(rax, rbx, r13, false);
3088 __ jump_from_interpreted(rbx, rax);
3089 }
3091 void TemplateTable::fast_invokevfinal(int byte_no) {
3092 transition(vtos, vtos);
3093 assert(byte_no == f2_byte, "use this argument");
3094 __ stop("fast_invokevfinal not used on amd64");
3095 }
3097 void TemplateTable::invokeinterface(int byte_no) {
3098 transition(vtos, vtos);
3099 assert(byte_no == f1_byte, "use this argument");
3100 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3101 rcx, rdx); // recv, flags
3103 // rax: interface klass (from f1)
3104 // rbx: itable index (from f2)
3105 // rcx: receiver
3106 // rdx: flags
3108 // Special case of invokeinterface called for virtual method of
3109 // java.lang.Object. See cpCacheOop.cpp for details.
3110 // This code isn't produced by javac, but could be produced by
3111 // another compliant java compiler.
3112 Label notMethod;
3113 __ movl(r14, rdx);
3114 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3115 __ jcc(Assembler::zero, notMethod);
3117 invokevirtual_helper(rbx, rcx, rdx);
3118 __ bind(notMethod);
3120 // Get receiver klass into rdx - also a null check
3121 __ restore_locals(); // restore r14
3122 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3123 __ load_klass(rdx, rcx);
3125 // profile this call
3126 __ profile_virtual_call(rdx, r13, r14);
3128 Label no_such_interface, no_such_method;
3130 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3131 rdx, rax, rbx,
3132 // outputs: method, scan temp. reg
3133 rbx, r13,
3134 no_such_interface);
3136 // rbx: Method* to call
3137 // rcx: receiver
3138 // Check for abstract method error
3139 // Note: This should be done more efficiently via a throw_abstract_method_error
3140 // interpreter entry point and a conditional jump to it in case of a null
3141 // method.
3142 __ testptr(rbx, rbx);
3143 __ jcc(Assembler::zero, no_such_method);
3145 __ profile_arguments_type(rdx, rbx, r13, true);
3147 // do the call
3148 // rcx: receiver
3149 // rbx,: Method*
3150 __ jump_from_interpreted(rbx, rdx);
3151 __ should_not_reach_here();
3153 // exception handling code follows...
3154 // note: must restore interpreter registers to canonical
3155 // state for exception handling to work correctly!
3157 __ bind(no_such_method);
3158 // throw exception
3159 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3160 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3161 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3162 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3163 // the call_VM checks for exception, so we should never return here.
3164 __ should_not_reach_here();
3166 __ bind(no_such_interface);
3167 // throw exception
3168 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3169 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3170 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3171 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3172 InterpreterRuntime::throw_IncompatibleClassChangeError));
3173 // the call_VM checks for exception, so we should never return here.
3174 __ should_not_reach_here();
3175 }
3178 void TemplateTable::invokehandle(int byte_no) {
3179 transition(vtos, vtos);
3180 assert(byte_no == f1_byte, "use this argument");
3181 const Register rbx_method = rbx;
3182 const Register rax_mtype = rax;
3183 const Register rcx_recv = rcx;
3184 const Register rdx_flags = rdx;
3186 if (!EnableInvokeDynamic) {
3187 // rewriter does not generate this bytecode
3188 __ should_not_reach_here();
3189 return;
3190 }
3192 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3193 __ verify_method_ptr(rbx_method);
3194 __ verify_oop(rcx_recv);
3195 __ null_check(rcx_recv);
3197 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3198 // rbx: MH.invokeExact_MT method (from f2)
3200 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3202 // FIXME: profile the LambdaForm also
3203 __ profile_final_call(rax);
3204 __ profile_arguments_type(rdx, rbx_method, r13, true);
3206 __ jump_from_interpreted(rbx_method, rdx);
3207 }
3210 void TemplateTable::invokedynamic(int byte_no) {
3211 transition(vtos, vtos);
3212 assert(byte_no == f1_byte, "use this argument");
3214 if (!EnableInvokeDynamic) {
3215 // We should not encounter this bytecode if !EnableInvokeDynamic.
3216 // The verifier will stop it. However, if we get past the verifier,
3217 // this will stop the thread in a reasonable way, without crashing the JVM.
3218 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3219 InterpreterRuntime::throw_IncompatibleClassChangeError));
3220 // the call_VM checks for exception, so we should never return here.
3221 __ should_not_reach_here();
3222 return;
3223 }
3225 const Register rbx_method = rbx;
3226 const Register rax_callsite = rax;
3228 prepare_invoke(byte_no, rbx_method, rax_callsite);
3230 // rax: CallSite object (from cpool->resolved_references[f1])
3231 // rbx: MH.linkToCallSite method (from f2)
3233 // Note: rax_callsite is already pushed by prepare_invoke
3235 // %%% should make a type profile for any invokedynamic that takes a ref argument
3236 // profile this call
3237 __ profile_call(r13);
3238 __ profile_arguments_type(rdx, rbx_method, r13, false);
3240 __ verify_oop(rax_callsite);
3242 __ jump_from_interpreted(rbx_method, rdx);
3243 }
3246 //-----------------------------------------------------------------------------
3247 // Allocation
3249 void TemplateTable::_new() {
3250 transition(vtos, atos);
3251 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3252 Label slow_case;
3253 Label done;
3254 Label initialize_header;
3255 Label initialize_object; // including clearing the fields
3256 Label allocate_shared;
3258 __ get_cpool_and_tags(rsi, rax);
3259 // Make sure the class we're about to instantiate has been resolved.
3260 // This is done before loading InstanceKlass to be consistent with the order
3261 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3262 const int tags_offset = Array<u1>::base_offset_in_bytes();
3263 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3264 JVM_CONSTANT_Class);
3265 __ jcc(Assembler::notEqual, slow_case);
3267 // get InstanceKlass
3268 __ movptr(rsi, Address(rsi, rdx,
3269 Address::times_8, sizeof(ConstantPool)));
3271 // make sure klass is initialized & doesn't have finalizer
3272 // make sure klass is fully initialized
3273 __ cmpb(Address(rsi,
3274 InstanceKlass::init_state_offset()),
3275 InstanceKlass::fully_initialized);
3276 __ jcc(Assembler::notEqual, slow_case);
3278 // get instance_size in InstanceKlass (scaled to a count of bytes)
3279 __ movl(rdx,
3280 Address(rsi,
3281 Klass::layout_helper_offset()));
3282 // test to see if it has a finalizer or is malformed in some way
3283 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3284 __ jcc(Assembler::notZero, slow_case);
3286 // Allocate the instance
3287 // 1) Try to allocate in the TLAB
3288 // 2) if fail and the object is large allocate in the shared Eden
3289 // 3) if the above fails (or is not applicable), go to a slow case
3290 // (creates a new TLAB, etc.)
3292 const bool allow_shared_alloc =
3293 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3295 if (UseTLAB) {
3296 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3297 __ lea(rbx, Address(rax, rdx, Address::times_1));
3298 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3299 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3300 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3301 if (ZeroTLAB) {
3302 // the fields have been already cleared
3303 __ jmp(initialize_header);
3304 } else {
3305 // initialize both the header and fields
3306 __ jmp(initialize_object);
3307 }
3308 }
3310 // Allocation in the shared Eden, if allowed.
3311 //
3312 // rdx: instance size in bytes
3313 if (allow_shared_alloc) {
3314 __ bind(allocate_shared);
3316 ExternalAddress top((address)Universe::heap()->top_addr());
3317 ExternalAddress end((address)Universe::heap()->end_addr());
3319 const Register RtopAddr = rscratch1;
3320 const Register RendAddr = rscratch2;
3322 __ lea(RtopAddr, top);
3323 __ lea(RendAddr, end);
3324 __ movptr(rax, Address(RtopAddr, 0));
3326 // For retries rax gets set by cmpxchgq
3327 Label retry;
3328 __ bind(retry);
3329 __ lea(rbx, Address(rax, rdx, Address::times_1));
3330 __ cmpptr(rbx, Address(RendAddr, 0));
3331 __ jcc(Assembler::above, slow_case);
3333 // Compare rax with the top addr, and if still equal, store the new
3334 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3335 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3336 //
3337 // rax: object begin
3338 // rbx: object end
3339 // rdx: instance size in bytes
3340 if (os::is_MP()) {
3341 __ lock();
3342 }
3343 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3345 // if someone beat us on the allocation, try again, otherwise continue
3346 __ jcc(Assembler::notEqual, retry);
3348 __ incr_allocated_bytes(r15_thread, rdx, 0);
3349 }
3351 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3352 // The object is initialized before the header. If the object size is
3353 // zero, go directly to the header initialization.
3354 __ bind(initialize_object);
3355 __ decrementl(rdx, sizeof(oopDesc));
3356 __ jcc(Assembler::zero, initialize_header);
3358 // Initialize object fields
3359 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3360 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3361 {
3362 Label loop;
3363 __ bind(loop);
3364 __ movq(Address(rax, rdx, Address::times_8,
3365 sizeof(oopDesc) - oopSize),
3366 rcx);
3367 __ decrementl(rdx);
3368 __ jcc(Assembler::notZero, loop);
3369 }
3371 // initialize object header only.
3372 __ bind(initialize_header);
3373 if (UseBiasedLocking) {
3374 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
3375 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3376 } else {
3377 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3378 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3379 }
3380 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3381 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3382 __ store_klass(rax, rsi); // store klass last
3384 {
3385 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3386 // Trigger dtrace event for fastpath
3387 __ push(atos); // save the return value
3388 __ call_VM_leaf(
3389 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3390 __ pop(atos); // restore the return value
3392 }
3393 __ jmp(done);
3394 }
3397 // slow case
3398 __ bind(slow_case);
3399 __ get_constant_pool(c_rarg1);
3400 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3401 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3402 __ verify_oop(rax);
3404 // continue
3405 __ bind(done);
3406 }
3408 void TemplateTable::newarray() {
3409 transition(itos, atos);
3410 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3411 __ movl(c_rarg2, rax);
3412 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3413 c_rarg1, c_rarg2);
3414 }
3416 void TemplateTable::anewarray() {
3417 transition(itos, atos);
3418 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3419 __ get_constant_pool(c_rarg1);
3420 __ movl(c_rarg3, rax);
3421 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3422 c_rarg1, c_rarg2, c_rarg3);
3423 }
3425 void TemplateTable::arraylength() {
3426 transition(atos, itos);
3427 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3428 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3429 }
3431 void TemplateTable::checkcast() {
3432 transition(atos, atos);
3433 Label done, is_null, ok_is_subtype, quicked, resolved;
3434 __ testptr(rax, rax); // object is in rax
3435 __ jcc(Assembler::zero, is_null);
3437 // Get cpool & tags index
3438 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3439 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3440 // See if bytecode has already been quicked
3441 __ cmpb(Address(rdx, rbx,
3442 Address::times_1,
3443 Array<u1>::base_offset_in_bytes()),
3444 JVM_CONSTANT_Class);
3445 __ jcc(Assembler::equal, quicked);
3446 __ push(atos); // save receiver for result, and for GC
3447 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3448 // vm_result_2 has metadata result
3449 __ get_vm_result_2(rax, r15_thread);
3450 __ pop_ptr(rdx); // restore receiver
3451 __ jmpb(resolved);
3453 // Get superklass in rax and subklass in rbx
3454 __ bind(quicked);
3455 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3456 __ movptr(rax, Address(rcx, rbx,
3457 Address::times_8, sizeof(ConstantPool)));
3459 __ bind(resolved);
3460 __ load_klass(rbx, rdx);
3462 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3463 // Superklass in rax. Subklass in rbx.
3464 __ gen_subtype_check(rbx, ok_is_subtype);
3466 // Come here on failure
3467 __ push_ptr(rdx);
3468 // object is at TOS
3469 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3471 // Come here on success
3472 __ bind(ok_is_subtype);
3473 __ mov(rax, rdx); // Restore object in rdx
3475 // Collect counts on whether this check-cast sees NULLs a lot or not.
3476 if (ProfileInterpreter) {
3477 __ jmp(done);
3478 __ bind(is_null);
3479 __ profile_null_seen(rcx);
3480 } else {
3481 __ bind(is_null); // same as 'done'
3482 }
3483 __ bind(done);
3484 }
3486 void TemplateTable::instanceof() {
3487 transition(atos, itos);
3488 Label done, is_null, ok_is_subtype, quicked, resolved;
3489 __ testptr(rax, rax);
3490 __ jcc(Assembler::zero, is_null);
3492 // Get cpool & tags index
3493 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3494 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3495 // See if bytecode has already been quicked
3496 __ cmpb(Address(rdx, rbx,
3497 Address::times_1,
3498 Array<u1>::base_offset_in_bytes()),
3499 JVM_CONSTANT_Class);
3500 __ jcc(Assembler::equal, quicked);
3502 __ push(atos); // save receiver for result, and for GC
3503 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3504 // vm_result_2 has metadata result
3505 __ get_vm_result_2(rax, r15_thread);
3506 __ pop_ptr(rdx); // restore receiver
3507 __ verify_oop(rdx);
3508 __ load_klass(rdx, rdx);
3509 __ jmpb(resolved);
3511 // Get superklass in rax and subklass in rdx
3512 __ bind(quicked);
3513 __ load_klass(rdx, rax);
3514 __ movptr(rax, Address(rcx, rbx,
3515 Address::times_8, sizeof(ConstantPool)));
3517 __ bind(resolved);
3519 // Generate subtype check. Blows rcx, rdi
3520 // Superklass in rax. Subklass in rdx.
3521 __ gen_subtype_check(rdx, ok_is_subtype);
3523 // Come here on failure
3524 __ xorl(rax, rax);
3525 __ jmpb(done);
3526 // Come here on success
3527 __ bind(ok_is_subtype);
3528 __ movl(rax, 1);
3530 // Collect counts on whether this test sees NULLs a lot or not.
3531 if (ProfileInterpreter) {
3532 __ jmp(done);
3533 __ bind(is_null);
3534 __ profile_null_seen(rcx);
3535 } else {
3536 __ bind(is_null); // same as 'done'
3537 }
3538 __ bind(done);
3539 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3540 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3541 }
3543 //-----------------------------------------------------------------------------
3544 // Breakpoints
3545 void TemplateTable::_breakpoint() {
3546 // Note: We get here even if we are single stepping..
3547 // jbug inists on setting breakpoints at every bytecode
3548 // even if we are in single step mode.
3550 transition(vtos, vtos);
3552 // get the unpatched byte code
3553 __ get_method(c_rarg1);
3554 __ call_VM(noreg,
3555 CAST_FROM_FN_PTR(address,
3556 InterpreterRuntime::get_original_bytecode_at),
3557 c_rarg1, r13);
3558 __ mov(rbx, rax);
3560 // post the breakpoint event
3561 __ get_method(c_rarg1);
3562 __ call_VM(noreg,
3563 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3564 c_rarg1, r13);
3566 // complete the execution of original bytecode
3567 __ dispatch_only_normal(vtos);
3568 }
3570 //-----------------------------------------------------------------------------
3571 // Exceptions
3573 void TemplateTable::athrow() {
3574 transition(atos, vtos);
3575 __ null_check(rax);
3576 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3577 }
3579 //-----------------------------------------------------------------------------
3580 // Synchronization
3581 //
3582 // Note: monitorenter & exit are symmetric routines; which is reflected
3583 // in the assembly code structure as well
3584 //
3585 // Stack layout:
3586 //
3587 // [expressions ] <--- rsp = expression stack top
3588 // ..
3589 // [expressions ]
3590 // [monitor entry] <--- monitor block top = expression stack bot
3591 // ..
3592 // [monitor entry]
3593 // [frame data ] <--- monitor block bot
3594 // ...
3595 // [saved rbp ] <--- rbp
3596 void TemplateTable::monitorenter() {
3597 transition(atos, vtos);
3599 // check for NULL object
3600 __ null_check(rax);
3602 const Address monitor_block_top(
3603 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3604 const Address monitor_block_bot(
3605 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3606 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3608 Label allocated;
3610 // initialize entry pointer
3611 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3613 // find a free slot in the monitor block (result in c_rarg1)
3614 {
3615 Label entry, loop, exit;
3616 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3617 // starting with top-most entry
3618 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3619 // of monitor block
3620 __ jmpb(entry);
3622 __ bind(loop);
3623 // check if current entry is used
3624 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3625 // if not used then remember entry in c_rarg1
3626 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3627 // check if current entry is for same object
3628 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3629 // if same object then stop searching
3630 __ jccb(Assembler::equal, exit);
3631 // otherwise advance to next entry
3632 __ addptr(c_rarg3, entry_size);
3633 __ bind(entry);
3634 // check if bottom reached
3635 __ cmpptr(c_rarg3, c_rarg2);
3636 // if not at bottom then check this entry
3637 __ jcc(Assembler::notEqual, loop);
3638 __ bind(exit);
3639 }
3641 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3642 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3644 // allocate one if there's no free slot
3645 {
3646 Label entry, loop;
3647 // 1. compute new pointers // rsp: old expression stack top
3648 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3649 __ subptr(rsp, entry_size); // move expression stack top
3650 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3651 __ mov(c_rarg3, rsp); // set start value for copy loop
3652 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3653 __ jmp(entry);
3654 // 2. move expression stack contents
3655 __ bind(loop);
3656 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3657 // word from old location
3658 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3659 __ addptr(c_rarg3, wordSize); // advance to next word
3660 __ bind(entry);
3661 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3662 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3663 // copy next word
3664 }
3666 // call run-time routine
3667 // c_rarg1: points to monitor entry
3668 __ bind(allocated);
3670 // Increment bcp to point to the next bytecode, so exception
3671 // handling for async. exceptions work correctly.
3672 // The object has already been poped from the stack, so the
3673 // expression stack looks correct.
3674 __ increment(r13);
3676 // store object
3677 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3678 __ lock_object(c_rarg1);
3680 // check to make sure this monitor doesn't cause stack overflow after locking
3681 __ save_bcp(); // in case of exception
3682 __ generate_stack_overflow_check(0);
3684 // The bcp has already been incremented. Just need to dispatch to
3685 // next instruction.
3686 __ dispatch_next(vtos);
3687 }
3690 void TemplateTable::monitorexit() {
3691 transition(atos, vtos);
3693 // check for NULL object
3694 __ null_check(rax);
3696 const Address monitor_block_top(
3697 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3698 const Address monitor_block_bot(
3699 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3700 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3702 Label found;
3704 // find matching slot
3705 {
3706 Label entry, loop;
3707 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3708 // starting with top-most entry
3709 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3710 // of monitor block
3711 __ jmpb(entry);
3713 __ bind(loop);
3714 // check if current entry is for same object
3715 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3716 // if same object then stop searching
3717 __ jcc(Assembler::equal, found);
3718 // otherwise advance to next entry
3719 __ addptr(c_rarg1, entry_size);
3720 __ bind(entry);
3721 // check if bottom reached
3722 __ cmpptr(c_rarg1, c_rarg2);
3723 // if not at bottom then check this entry
3724 __ jcc(Assembler::notEqual, loop);
3725 }
3727 // error handling. Unlocking was not block-structured
3728 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3729 InterpreterRuntime::throw_illegal_monitor_state_exception));
3730 __ should_not_reach_here();
3732 // call run-time routine
3733 // rsi: points to monitor entry
3734 __ bind(found);
3735 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3736 __ unlock_object(c_rarg1);
3737 __ pop_ptr(rax); // discard object
3738 }
3741 // Wide instructions
3742 void TemplateTable::wide() {
3743 transition(vtos, vtos);
3744 __ load_unsigned_byte(rbx, at_bcp(1));
3745 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3746 __ jmp(Address(rscratch1, rbx, Address::times_8));
3747 // Note: the r13 increment step is part of the individual wide
3748 // bytecode implementations
3749 }
3752 // Multi arrays
3753 void TemplateTable::multianewarray() {
3754 transition(vtos, atos);
3755 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3756 // last dim is on top of stack; we want address of first one:
3757 // first_addr = last_addr + (ndims - 1) * wordSize
3758 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3759 call_VM(rax,
3760 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3761 c_rarg1);
3762 __ load_unsigned_byte(rbx, at_bcp(3));
3763 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3764 }
3765 #endif // !CC_INTERP