Mon, 03 Jul 2017 15:57:11 -0700
Merge
1 /*
2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
40 #ifndef CC_INTERP
42 #define __ _masm->
44 // Platform-dependent initialization
46 void TemplateTable::pd_initialize() {
47 // No amd64 specific initialization
48 }
50 // Address computation: local variables
52 static inline Address iaddress(int n) {
53 return Address(r14, Interpreter::local_offset_in_bytes(n));
54 }
56 static inline Address laddress(int n) {
57 return iaddress(n + 1);
58 }
60 static inline Address faddress(int n) {
61 return iaddress(n);
62 }
64 static inline Address daddress(int n) {
65 return laddress(n);
66 }
68 static inline Address aaddress(int n) {
69 return iaddress(n);
70 }
72 static inline Address iaddress(Register r) {
73 return Address(r14, r, Address::times_8);
74 }
76 static inline Address laddress(Register r) {
77 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
78 }
80 static inline Address faddress(Register r) {
81 return iaddress(r);
82 }
84 static inline Address daddress(Register r) {
85 return laddress(r);
86 }
88 static inline Address aaddress(Register r) {
89 return iaddress(r);
90 }
92 static inline Address at_rsp() {
93 return Address(rsp, 0);
94 }
96 // At top of Java expression stack which may be different than esp(). It
97 // isn't for category 1 objects.
98 static inline Address at_tos () {
99 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
100 }
102 static inline Address at_tos_p1() {
103 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
104 }
106 static inline Address at_tos_p2() {
107 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
108 }
110 static inline Address at_tos_p3() {
111 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
112 }
114 // Condition conversion
115 static Assembler::Condition j_not(TemplateTable::Condition cc) {
116 switch (cc) {
117 case TemplateTable::equal : return Assembler::notEqual;
118 case TemplateTable::not_equal : return Assembler::equal;
119 case TemplateTable::less : return Assembler::greaterEqual;
120 case TemplateTable::less_equal : return Assembler::greater;
121 case TemplateTable::greater : return Assembler::lessEqual;
122 case TemplateTable::greater_equal: return Assembler::less;
123 }
124 ShouldNotReachHere();
125 return Assembler::zero;
126 }
129 // Miscelaneous helper routines
130 // Store an oop (or NULL) at the address described by obj.
131 // If val == noreg this means store a NULL
133 static void do_oop_store(InterpreterMacroAssembler* _masm,
134 Address obj,
135 Register val,
136 BarrierSet::Name barrier,
137 bool precise) {
138 assert(val == noreg || val == rax, "parameter is just for looks");
139 switch (barrier) {
140 #if INCLUDE_ALL_GCS
141 case BarrierSet::G1SATBCT:
142 case BarrierSet::G1SATBCTLogging:
143 {
144 // flatten object address if needed
145 if (obj.index() == noreg && obj.disp() == 0) {
146 if (obj.base() != rdx) {
147 __ movq(rdx, obj.base());
148 }
149 } else {
150 __ leaq(rdx, obj);
151 }
152 __ g1_write_barrier_pre(rdx /* obj */,
153 rbx /* pre_val */,
154 r15_thread /* thread */,
155 r8 /* tmp */,
156 val != noreg /* tosca_live */,
157 false /* expand_call */);
158 if (val == noreg) {
159 __ store_heap_oop_null(Address(rdx, 0));
160 } else {
161 // G1 barrier needs uncompressed oop for region cross check.
162 Register new_val = val;
163 if (UseCompressedOops) {
164 new_val = rbx;
165 __ movptr(new_val, val);
166 }
167 __ store_heap_oop(Address(rdx, 0), val);
168 __ g1_write_barrier_post(rdx /* store_adr */,
169 new_val /* new_val */,
170 r15_thread /* thread */,
171 r8 /* tmp */,
172 rbx /* tmp2 */);
173 }
174 }
175 break;
176 #endif // INCLUDE_ALL_GCS
177 case BarrierSet::CardTableModRef:
178 case BarrierSet::CardTableExtension:
179 {
180 if (val == noreg) {
181 __ store_heap_oop_null(obj);
182 } else {
183 __ store_heap_oop(obj, val);
184 // flatten object address if needed
185 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
186 __ store_check(obj.base());
187 } else {
188 __ leaq(rdx, obj);
189 __ store_check(rdx);
190 }
191 }
192 }
193 break;
194 case BarrierSet::ModRef:
195 case BarrierSet::Other:
196 if (val == noreg) {
197 __ store_heap_oop_null(obj);
198 } else {
199 __ store_heap_oop(obj, val);
200 }
201 break;
202 default :
203 ShouldNotReachHere();
205 }
206 }
208 Address TemplateTable::at_bcp(int offset) {
209 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
210 return Address(r13, offset);
211 }
213 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
214 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
215 int byte_no) {
216 if (!RewriteBytecodes) return;
217 Label L_patch_done;
219 switch (bc) {
220 case Bytecodes::_fast_aputfield:
221 case Bytecodes::_fast_bputfield:
222 case Bytecodes::_fast_zputfield:
223 case Bytecodes::_fast_cputfield:
224 case Bytecodes::_fast_dputfield:
225 case Bytecodes::_fast_fputfield:
226 case Bytecodes::_fast_iputfield:
227 case Bytecodes::_fast_lputfield:
228 case Bytecodes::_fast_sputfield:
229 {
230 // We skip bytecode quickening for putfield instructions when
231 // the put_code written to the constant pool cache is zero.
232 // This is required so that every execution of this instruction
233 // calls out to InterpreterRuntime::resolve_get_put to do
234 // additional, required work.
235 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
236 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
237 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
238 __ movl(bc_reg, bc);
239 __ cmpl(temp_reg, (int) 0);
240 __ jcc(Assembler::zero, L_patch_done); // don't patch
241 }
242 break;
243 default:
244 assert(byte_no == -1, "sanity");
245 // the pair bytecodes have already done the load.
246 if (load_bc_into_bc_reg) {
247 __ movl(bc_reg, bc);
248 }
249 }
251 if (JvmtiExport::can_post_breakpoint()) {
252 Label L_fast_patch;
253 // if a breakpoint is present we can't rewrite the stream directly
254 __ movzbl(temp_reg, at_bcp(0));
255 __ cmpl(temp_reg, Bytecodes::_breakpoint);
256 __ jcc(Assembler::notEqual, L_fast_patch);
257 __ get_method(temp_reg);
258 // Let breakpoint table handling rewrite to quicker bytecode
259 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
260 #ifndef ASSERT
261 __ jmpb(L_patch_done);
262 #else
263 __ jmp(L_patch_done);
264 #endif
265 __ bind(L_fast_patch);
266 }
268 #ifdef ASSERT
269 Label L_okay;
270 __ load_unsigned_byte(temp_reg, at_bcp(0));
271 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
272 __ jcc(Assembler::equal, L_okay);
273 __ cmpl(temp_reg, bc_reg);
274 __ jcc(Assembler::equal, L_okay);
275 __ stop("patching the wrong bytecode");
276 __ bind(L_okay);
277 #endif
279 // patch bytecode
280 __ movb(at_bcp(0), bc_reg);
281 __ bind(L_patch_done);
282 }
285 // Individual instructions
287 void TemplateTable::nop() {
288 transition(vtos, vtos);
289 // nothing to do
290 }
292 void TemplateTable::shouldnotreachhere() {
293 transition(vtos, vtos);
294 __ stop("shouldnotreachhere bytecode");
295 }
297 void TemplateTable::aconst_null() {
298 transition(vtos, atos);
299 __ xorl(rax, rax);
300 }
302 void TemplateTable::iconst(int value) {
303 transition(vtos, itos);
304 if (value == 0) {
305 __ xorl(rax, rax);
306 } else {
307 __ movl(rax, value);
308 }
309 }
311 void TemplateTable::lconst(int value) {
312 transition(vtos, ltos);
313 if (value == 0) {
314 __ xorl(rax, rax);
315 } else {
316 __ movl(rax, value);
317 }
318 }
320 void TemplateTable::fconst(int value) {
321 transition(vtos, ftos);
322 static float one = 1.0f, two = 2.0f;
323 switch (value) {
324 case 0:
325 __ xorps(xmm0, xmm0);
326 break;
327 case 1:
328 __ movflt(xmm0, ExternalAddress((address) &one));
329 break;
330 case 2:
331 __ movflt(xmm0, ExternalAddress((address) &two));
332 break;
333 default:
334 ShouldNotReachHere();
335 break;
336 }
337 }
339 void TemplateTable::dconst(int value) {
340 transition(vtos, dtos);
341 static double one = 1.0;
342 switch (value) {
343 case 0:
344 __ xorpd(xmm0, xmm0);
345 break;
346 case 1:
347 __ movdbl(xmm0, ExternalAddress((address) &one));
348 break;
349 default:
350 ShouldNotReachHere();
351 break;
352 }
353 }
355 void TemplateTable::bipush() {
356 transition(vtos, itos);
357 __ load_signed_byte(rax, at_bcp(1));
358 }
360 void TemplateTable::sipush() {
361 transition(vtos, itos);
362 __ load_unsigned_short(rax, at_bcp(1));
363 __ bswapl(rax);
364 __ sarl(rax, 16);
365 }
367 void TemplateTable::ldc(bool wide) {
368 transition(vtos, vtos);
369 Label call_ldc, notFloat, notClass, Done;
371 if (wide) {
372 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
373 } else {
374 __ load_unsigned_byte(rbx, at_bcp(1));
375 }
377 __ get_cpool_and_tags(rcx, rax);
378 const int base_offset = ConstantPool::header_size() * wordSize;
379 const int tags_offset = Array<u1>::base_offset_in_bytes();
381 // get type
382 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
384 // unresolved class - get the resolved class
385 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
386 __ jccb(Assembler::equal, call_ldc);
388 // unresolved class in error state - call into runtime to throw the error
389 // from the first resolution attempt
390 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
391 __ jccb(Assembler::equal, call_ldc);
393 // resolved class - need to call vm to get java mirror of the class
394 __ cmpl(rdx, JVM_CONSTANT_Class);
395 __ jcc(Assembler::notEqual, notClass);
397 __ bind(call_ldc);
398 __ movl(c_rarg1, wide);
399 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
400 __ push_ptr(rax);
401 __ verify_oop(rax);
402 __ jmp(Done);
404 __ bind(notClass);
405 __ cmpl(rdx, JVM_CONSTANT_Float);
406 __ jccb(Assembler::notEqual, notFloat);
407 // ftos
408 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
409 __ push_f();
410 __ jmp(Done);
412 __ bind(notFloat);
413 #ifdef ASSERT
414 {
415 Label L;
416 __ cmpl(rdx, JVM_CONSTANT_Integer);
417 __ jcc(Assembler::equal, L);
418 // String and Object are rewritten to fast_aldc
419 __ stop("unexpected tag type in ldc");
420 __ bind(L);
421 }
422 #endif
423 // itos JVM_CONSTANT_Integer only
424 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
425 __ push_i(rax);
426 __ bind(Done);
427 }
429 // Fast path for caching oop constants.
430 void TemplateTable::fast_aldc(bool wide) {
431 transition(vtos, atos);
433 Register result = rax;
434 Register tmp = rdx;
435 int index_size = wide ? sizeof(u2) : sizeof(u1);
437 Label resolved;
439 // We are resolved if the resolved reference cache entry contains a
440 // non-null object (String, MethodType, etc.)
441 assert_different_registers(result, tmp);
442 __ get_cache_index_at_bcp(tmp, 1, index_size);
443 __ load_resolved_reference_at_index(result, tmp);
444 __ testl(result, result);
445 __ jcc(Assembler::notZero, resolved);
447 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
449 // first time invocation - must resolve first
450 __ movl(tmp, (int)bytecode());
451 __ call_VM(result, entry, tmp);
453 __ bind(resolved);
455 if (VerifyOops) {
456 __ verify_oop(result);
457 }
458 }
460 void TemplateTable::ldc2_w() {
461 transition(vtos, vtos);
462 Label Long, Done;
463 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
465 __ get_cpool_and_tags(rcx, rax);
466 const int base_offset = ConstantPool::header_size() * wordSize;
467 const int tags_offset = Array<u1>::base_offset_in_bytes();
469 // get type
470 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
471 JVM_CONSTANT_Double);
472 __ jccb(Assembler::notEqual, Long);
473 // dtos
474 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
475 __ push_d();
476 __ jmpb(Done);
478 __ bind(Long);
479 // ltos
480 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
481 __ push_l();
483 __ bind(Done);
484 }
486 void TemplateTable::locals_index(Register reg, int offset) {
487 __ load_unsigned_byte(reg, at_bcp(offset));
488 __ negptr(reg);
489 }
491 void TemplateTable::iload() {
492 transition(vtos, itos);
493 if (RewriteFrequentPairs) {
494 Label rewrite, done;
495 const Register bc = c_rarg3;
496 assert(rbx != bc, "register damaged");
498 // get next byte
499 __ load_unsigned_byte(rbx,
500 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
501 // if _iload, wait to rewrite to iload2. We only want to rewrite the
502 // last two iloads in a pair. Comparing against fast_iload means that
503 // the next bytecode is neither an iload or a caload, and therefore
504 // an iload pair.
505 __ cmpl(rbx, Bytecodes::_iload);
506 __ jcc(Assembler::equal, done);
508 __ cmpl(rbx, Bytecodes::_fast_iload);
509 __ movl(bc, Bytecodes::_fast_iload2);
510 __ jccb(Assembler::equal, rewrite);
512 // if _caload, rewrite to fast_icaload
513 __ cmpl(rbx, Bytecodes::_caload);
514 __ movl(bc, Bytecodes::_fast_icaload);
515 __ jccb(Assembler::equal, rewrite);
517 // rewrite so iload doesn't check again.
518 __ movl(bc, Bytecodes::_fast_iload);
520 // rewrite
521 // bc: fast bytecode
522 __ bind(rewrite);
523 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
524 __ bind(done);
525 }
527 // Get the local value into tos
528 locals_index(rbx);
529 __ movl(rax, iaddress(rbx));
530 }
532 void TemplateTable::fast_iload2() {
533 transition(vtos, itos);
534 locals_index(rbx);
535 __ movl(rax, iaddress(rbx));
536 __ push(itos);
537 locals_index(rbx, 3);
538 __ movl(rax, iaddress(rbx));
539 }
541 void TemplateTable::fast_iload() {
542 transition(vtos, itos);
543 locals_index(rbx);
544 __ movl(rax, iaddress(rbx));
545 }
547 void TemplateTable::lload() {
548 transition(vtos, ltos);
549 locals_index(rbx);
550 __ movq(rax, laddress(rbx));
551 }
553 void TemplateTable::fload() {
554 transition(vtos, ftos);
555 locals_index(rbx);
556 __ movflt(xmm0, faddress(rbx));
557 }
559 void TemplateTable::dload() {
560 transition(vtos, dtos);
561 locals_index(rbx);
562 __ movdbl(xmm0, daddress(rbx));
563 }
565 void TemplateTable::aload() {
566 transition(vtos, atos);
567 locals_index(rbx);
568 __ movptr(rax, aaddress(rbx));
569 }
571 void TemplateTable::locals_index_wide(Register reg) {
572 __ load_unsigned_short(reg, at_bcp(2));
573 __ bswapl(reg);
574 __ shrl(reg, 16);
575 __ negptr(reg);
576 }
578 void TemplateTable::wide_iload() {
579 transition(vtos, itos);
580 locals_index_wide(rbx);
581 __ movl(rax, iaddress(rbx));
582 }
584 void TemplateTable::wide_lload() {
585 transition(vtos, ltos);
586 locals_index_wide(rbx);
587 __ movq(rax, laddress(rbx));
588 }
590 void TemplateTable::wide_fload() {
591 transition(vtos, ftos);
592 locals_index_wide(rbx);
593 __ movflt(xmm0, faddress(rbx));
594 }
596 void TemplateTable::wide_dload() {
597 transition(vtos, dtos);
598 locals_index_wide(rbx);
599 __ movdbl(xmm0, daddress(rbx));
600 }
602 void TemplateTable::wide_aload() {
603 transition(vtos, atos);
604 locals_index_wide(rbx);
605 __ movptr(rax, aaddress(rbx));
606 }
608 void TemplateTable::index_check(Register array, Register index) {
609 // destroys rbx
610 // check array
611 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
612 // sign extend index for use by indexed load
613 __ movl2ptr(index, index);
614 // check index
615 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
616 if (index != rbx) {
617 // ??? convention: move aberrant index into ebx for exception message
618 assert(rbx != array, "different registers");
619 __ movl(rbx, index);
620 }
621 __ jump_cc(Assembler::aboveEqual,
622 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
623 }
625 void TemplateTable::iaload() {
626 transition(itos, itos);
627 __ pop_ptr(rdx);
628 // eax: index
629 // rdx: array
630 index_check(rdx, rax); // kills rbx
631 __ movl(rax, Address(rdx, rax,
632 Address::times_4,
633 arrayOopDesc::base_offset_in_bytes(T_INT)));
634 }
636 void TemplateTable::laload() {
637 transition(itos, ltos);
638 __ pop_ptr(rdx);
639 // eax: index
640 // rdx: array
641 index_check(rdx, rax); // kills rbx
642 __ movq(rax, Address(rdx, rbx,
643 Address::times_8,
644 arrayOopDesc::base_offset_in_bytes(T_LONG)));
645 }
647 void TemplateTable::faload() {
648 transition(itos, ftos);
649 __ pop_ptr(rdx);
650 // eax: index
651 // rdx: array
652 index_check(rdx, rax); // kills rbx
653 __ movflt(xmm0, Address(rdx, rax,
654 Address::times_4,
655 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
656 }
658 void TemplateTable::daload() {
659 transition(itos, dtos);
660 __ pop_ptr(rdx);
661 // eax: index
662 // rdx: array
663 index_check(rdx, rax); // kills rbx
664 __ movdbl(xmm0, Address(rdx, rax,
665 Address::times_8,
666 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
667 }
669 void TemplateTable::aaload() {
670 transition(itos, atos);
671 __ pop_ptr(rdx);
672 // eax: index
673 // rdx: array
674 index_check(rdx, rax); // kills rbx
675 __ load_heap_oop(rax, Address(rdx, rax,
676 UseCompressedOops ? Address::times_4 : Address::times_8,
677 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
678 }
680 void TemplateTable::baload() {
681 transition(itos, itos);
682 __ pop_ptr(rdx);
683 // eax: index
684 // rdx: array
685 index_check(rdx, rax); // kills rbx
686 __ load_signed_byte(rax,
687 Address(rdx, rax,
688 Address::times_1,
689 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
690 }
692 void TemplateTable::caload() {
693 transition(itos, itos);
694 __ pop_ptr(rdx);
695 // eax: index
696 // rdx: array
697 index_check(rdx, rax); // kills rbx
698 __ load_unsigned_short(rax,
699 Address(rdx, rax,
700 Address::times_2,
701 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
702 }
704 // iload followed by caload frequent pair
705 void TemplateTable::fast_icaload() {
706 transition(vtos, itos);
707 // load index out of locals
708 locals_index(rbx);
709 __ movl(rax, iaddress(rbx));
711 // eax: index
712 // rdx: array
713 __ pop_ptr(rdx);
714 index_check(rdx, rax); // kills rbx
715 __ load_unsigned_short(rax,
716 Address(rdx, rax,
717 Address::times_2,
718 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
719 }
721 void TemplateTable::saload() {
722 transition(itos, itos);
723 __ pop_ptr(rdx);
724 // eax: index
725 // rdx: array
726 index_check(rdx, rax); // kills rbx
727 __ load_signed_short(rax,
728 Address(rdx, rax,
729 Address::times_2,
730 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
731 }
733 void TemplateTable::iload(int n) {
734 transition(vtos, itos);
735 __ movl(rax, iaddress(n));
736 }
738 void TemplateTable::lload(int n) {
739 transition(vtos, ltos);
740 __ movq(rax, laddress(n));
741 }
743 void TemplateTable::fload(int n) {
744 transition(vtos, ftos);
745 __ movflt(xmm0, faddress(n));
746 }
748 void TemplateTable::dload(int n) {
749 transition(vtos, dtos);
750 __ movdbl(xmm0, daddress(n));
751 }
753 void TemplateTable::aload(int n) {
754 transition(vtos, atos);
755 __ movptr(rax, aaddress(n));
756 }
758 void TemplateTable::aload_0() {
759 transition(vtos, atos);
760 // According to bytecode histograms, the pairs:
761 //
762 // _aload_0, _fast_igetfield
763 // _aload_0, _fast_agetfield
764 // _aload_0, _fast_fgetfield
765 //
766 // occur frequently. If RewriteFrequentPairs is set, the (slow)
767 // _aload_0 bytecode checks if the next bytecode is either
768 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
769 // rewrites the current bytecode into a pair bytecode; otherwise it
770 // rewrites the current bytecode into _fast_aload_0 that doesn't do
771 // the pair check anymore.
772 //
773 // Note: If the next bytecode is _getfield, the rewrite must be
774 // delayed, otherwise we may miss an opportunity for a pair.
775 //
776 // Also rewrite frequent pairs
777 // aload_0, aload_1
778 // aload_0, iload_1
779 // These bytecodes with a small amount of code are most profitable
780 // to rewrite
781 if (RewriteFrequentPairs) {
782 Label rewrite, done;
783 const Register bc = c_rarg3;
784 assert(rbx != bc, "register damaged");
785 // get next byte
786 __ load_unsigned_byte(rbx,
787 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
789 // do actual aload_0
790 aload(0);
792 // if _getfield then wait with rewrite
793 __ cmpl(rbx, Bytecodes::_getfield);
794 __ jcc(Assembler::equal, done);
796 // if _igetfield then reqrite to _fast_iaccess_0
797 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
798 Bytecodes::_aload_0,
799 "fix bytecode definition");
800 __ cmpl(rbx, Bytecodes::_fast_igetfield);
801 __ movl(bc, Bytecodes::_fast_iaccess_0);
802 __ jccb(Assembler::equal, rewrite);
804 // if _agetfield then reqrite to _fast_aaccess_0
805 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
806 Bytecodes::_aload_0,
807 "fix bytecode definition");
808 __ cmpl(rbx, Bytecodes::_fast_agetfield);
809 __ movl(bc, Bytecodes::_fast_aaccess_0);
810 __ jccb(Assembler::equal, rewrite);
812 // if _fgetfield then reqrite to _fast_faccess_0
813 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
814 Bytecodes::_aload_0,
815 "fix bytecode definition");
816 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
817 __ movl(bc, Bytecodes::_fast_faccess_0);
818 __ jccb(Assembler::equal, rewrite);
820 // else rewrite to _fast_aload0
821 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
822 Bytecodes::_aload_0,
823 "fix bytecode definition");
824 __ movl(bc, Bytecodes::_fast_aload_0);
826 // rewrite
827 // bc: fast bytecode
828 __ bind(rewrite);
829 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
831 __ bind(done);
832 } else {
833 aload(0);
834 }
835 }
837 void TemplateTable::istore() {
838 transition(itos, vtos);
839 locals_index(rbx);
840 __ movl(iaddress(rbx), rax);
841 }
843 void TemplateTable::lstore() {
844 transition(ltos, vtos);
845 locals_index(rbx);
846 __ movq(laddress(rbx), rax);
847 }
849 void TemplateTable::fstore() {
850 transition(ftos, vtos);
851 locals_index(rbx);
852 __ movflt(faddress(rbx), xmm0);
853 }
855 void TemplateTable::dstore() {
856 transition(dtos, vtos);
857 locals_index(rbx);
858 __ movdbl(daddress(rbx), xmm0);
859 }
861 void TemplateTable::astore() {
862 transition(vtos, vtos);
863 __ pop_ptr(rax);
864 locals_index(rbx);
865 __ movptr(aaddress(rbx), rax);
866 }
868 void TemplateTable::wide_istore() {
869 transition(vtos, vtos);
870 __ pop_i();
871 locals_index_wide(rbx);
872 __ movl(iaddress(rbx), rax);
873 }
875 void TemplateTable::wide_lstore() {
876 transition(vtos, vtos);
877 __ pop_l();
878 locals_index_wide(rbx);
879 __ movq(laddress(rbx), rax);
880 }
882 void TemplateTable::wide_fstore() {
883 transition(vtos, vtos);
884 __ pop_f();
885 locals_index_wide(rbx);
886 __ movflt(faddress(rbx), xmm0);
887 }
889 void TemplateTable::wide_dstore() {
890 transition(vtos, vtos);
891 __ pop_d();
892 locals_index_wide(rbx);
893 __ movdbl(daddress(rbx), xmm0);
894 }
896 void TemplateTable::wide_astore() {
897 transition(vtos, vtos);
898 __ pop_ptr(rax);
899 locals_index_wide(rbx);
900 __ movptr(aaddress(rbx), rax);
901 }
903 void TemplateTable::iastore() {
904 transition(itos, vtos);
905 __ pop_i(rbx);
906 __ pop_ptr(rdx);
907 // eax: value
908 // ebx: index
909 // rdx: array
910 index_check(rdx, rbx); // prefer index in ebx
911 __ movl(Address(rdx, rbx,
912 Address::times_4,
913 arrayOopDesc::base_offset_in_bytes(T_INT)),
914 rax);
915 }
917 void TemplateTable::lastore() {
918 transition(ltos, vtos);
919 __ pop_i(rbx);
920 __ pop_ptr(rdx);
921 // rax: value
922 // ebx: index
923 // rdx: array
924 index_check(rdx, rbx); // prefer index in ebx
925 __ movq(Address(rdx, rbx,
926 Address::times_8,
927 arrayOopDesc::base_offset_in_bytes(T_LONG)),
928 rax);
929 }
931 void TemplateTable::fastore() {
932 transition(ftos, vtos);
933 __ pop_i(rbx);
934 __ pop_ptr(rdx);
935 // xmm0: value
936 // ebx: index
937 // rdx: array
938 index_check(rdx, rbx); // prefer index in ebx
939 __ movflt(Address(rdx, rbx,
940 Address::times_4,
941 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
942 xmm0);
943 }
945 void TemplateTable::dastore() {
946 transition(dtos, vtos);
947 __ pop_i(rbx);
948 __ pop_ptr(rdx);
949 // xmm0: value
950 // ebx: index
951 // rdx: array
952 index_check(rdx, rbx); // prefer index in ebx
953 __ movdbl(Address(rdx, rbx,
954 Address::times_8,
955 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
956 xmm0);
957 }
959 void TemplateTable::aastore() {
960 Label is_null, ok_is_subtype, done;
961 transition(vtos, vtos);
962 // stack: ..., array, index, value
963 __ movptr(rax, at_tos()); // value
964 __ movl(rcx, at_tos_p1()); // index
965 __ movptr(rdx, at_tos_p2()); // array
967 Address element_address(rdx, rcx,
968 UseCompressedOops? Address::times_4 : Address::times_8,
969 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
971 index_check(rdx, rcx); // kills rbx
972 // do array store check - check for NULL value first
973 __ testptr(rax, rax);
974 __ jcc(Assembler::zero, is_null);
976 // Move subklass into rbx
977 __ load_klass(rbx, rax);
978 // Move superklass into rax
979 __ load_klass(rax, rdx);
980 __ movptr(rax, Address(rax,
981 ObjArrayKlass::element_klass_offset()));
982 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
983 __ lea(rdx, element_address);
985 // Generate subtype check. Blows rcx, rdi
986 // Superklass in rax. Subklass in rbx.
987 __ gen_subtype_check(rbx, ok_is_subtype);
989 // Come here on failure
990 // object is at TOS
991 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
993 // Come here on success
994 __ bind(ok_is_subtype);
996 // Get the value we will store
997 __ movptr(rax, at_tos());
998 // Now store using the appropriate barrier
999 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
1000 __ jmp(done);
1002 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1003 __ bind(is_null);
1004 __ profile_null_seen(rbx);
1006 // Store a NULL
1007 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1009 // Pop stack arguments
1010 __ bind(done);
1011 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1012 }
1014 void TemplateTable::bastore() {
1015 transition(itos, vtos);
1016 __ pop_i(rbx);
1017 __ pop_ptr(rdx);
1018 // eax: value
1019 // ebx: index
1020 // rdx: array
1021 index_check(rdx, rbx); // prefer index in ebx
1022 // Need to check whether array is boolean or byte
1023 // since both types share the bastore bytecode.
1024 __ load_klass(rcx, rdx);
1025 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1026 int diffbit = Klass::layout_helper_boolean_diffbit();
1027 __ testl(rcx, diffbit);
1028 Label L_skip;
1029 __ jccb(Assembler::zero, L_skip);
1030 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1031 __ bind(L_skip);
1032 __ movb(Address(rdx, rbx,
1033 Address::times_1,
1034 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1035 rax);
1036 }
1038 void TemplateTable::castore() {
1039 transition(itos, vtos);
1040 __ pop_i(rbx);
1041 __ pop_ptr(rdx);
1042 // eax: value
1043 // ebx: index
1044 // rdx: array
1045 index_check(rdx, rbx); // prefer index in ebx
1046 __ movw(Address(rdx, rbx,
1047 Address::times_2,
1048 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1049 rax);
1050 }
1052 void TemplateTable::sastore() {
1053 castore();
1054 }
1056 void TemplateTable::istore(int n) {
1057 transition(itos, vtos);
1058 __ movl(iaddress(n), rax);
1059 }
1061 void TemplateTable::lstore(int n) {
1062 transition(ltos, vtos);
1063 __ movq(laddress(n), rax);
1064 }
1066 void TemplateTable::fstore(int n) {
1067 transition(ftos, vtos);
1068 __ movflt(faddress(n), xmm0);
1069 }
1071 void TemplateTable::dstore(int n) {
1072 transition(dtos, vtos);
1073 __ movdbl(daddress(n), xmm0);
1074 }
1076 void TemplateTable::astore(int n) {
1077 transition(vtos, vtos);
1078 __ pop_ptr(rax);
1079 __ movptr(aaddress(n), rax);
1080 }
1082 void TemplateTable::pop() {
1083 transition(vtos, vtos);
1084 __ addptr(rsp, Interpreter::stackElementSize);
1085 }
1087 void TemplateTable::pop2() {
1088 transition(vtos, vtos);
1089 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1090 }
1092 void TemplateTable::dup() {
1093 transition(vtos, vtos);
1094 __ load_ptr(0, rax);
1095 __ push_ptr(rax);
1096 // stack: ..., a, a
1097 }
1099 void TemplateTable::dup_x1() {
1100 transition(vtos, vtos);
1101 // stack: ..., a, b
1102 __ load_ptr( 0, rax); // load b
1103 __ load_ptr( 1, rcx); // load a
1104 __ store_ptr(1, rax); // store b
1105 __ store_ptr(0, rcx); // store a
1106 __ push_ptr(rax); // push b
1107 // stack: ..., b, a, b
1108 }
1110 void TemplateTable::dup_x2() {
1111 transition(vtos, vtos);
1112 // stack: ..., a, b, c
1113 __ load_ptr( 0, rax); // load c
1114 __ load_ptr( 2, rcx); // load a
1115 __ store_ptr(2, rax); // store c in a
1116 __ push_ptr(rax); // push c
1117 // stack: ..., c, b, c, c
1118 __ load_ptr( 2, rax); // load b
1119 __ store_ptr(2, rcx); // store a in b
1120 // stack: ..., c, a, c, c
1121 __ store_ptr(1, rax); // store b in c
1122 // stack: ..., c, a, b, c
1123 }
1125 void TemplateTable::dup2() {
1126 transition(vtos, vtos);
1127 // stack: ..., a, b
1128 __ load_ptr(1, rax); // load a
1129 __ push_ptr(rax); // push a
1130 __ load_ptr(1, rax); // load b
1131 __ push_ptr(rax); // push b
1132 // stack: ..., a, b, a, b
1133 }
1135 void TemplateTable::dup2_x1() {
1136 transition(vtos, vtos);
1137 // stack: ..., a, b, c
1138 __ load_ptr( 0, rcx); // load c
1139 __ load_ptr( 1, rax); // load b
1140 __ push_ptr(rax); // push b
1141 __ push_ptr(rcx); // push c
1142 // stack: ..., a, b, c, b, c
1143 __ store_ptr(3, rcx); // store c in b
1144 // stack: ..., a, c, c, b, c
1145 __ load_ptr( 4, rcx); // load a
1146 __ store_ptr(2, rcx); // store a in 2nd c
1147 // stack: ..., a, c, a, b, c
1148 __ store_ptr(4, rax); // store b in a
1149 // stack: ..., b, c, a, b, c
1150 }
1152 void TemplateTable::dup2_x2() {
1153 transition(vtos, vtos);
1154 // stack: ..., a, b, c, d
1155 __ load_ptr( 0, rcx); // load d
1156 __ load_ptr( 1, rax); // load c
1157 __ push_ptr(rax); // push c
1158 __ push_ptr(rcx); // push d
1159 // stack: ..., a, b, c, d, c, d
1160 __ load_ptr( 4, rax); // load b
1161 __ store_ptr(2, rax); // store b in d
1162 __ store_ptr(4, rcx); // store d in b
1163 // stack: ..., a, d, c, b, c, d
1164 __ load_ptr( 5, rcx); // load a
1165 __ load_ptr( 3, rax); // load c
1166 __ store_ptr(3, rcx); // store a in c
1167 __ store_ptr(5, rax); // store c in a
1168 // stack: ..., c, d, a, b, c, d
1169 }
1171 void TemplateTable::swap() {
1172 transition(vtos, vtos);
1173 // stack: ..., a, b
1174 __ load_ptr( 1, rcx); // load a
1175 __ load_ptr( 0, rax); // load b
1176 __ store_ptr(0, rcx); // store a in b
1177 __ store_ptr(1, rax); // store b in a
1178 // stack: ..., b, a
1179 }
1181 void TemplateTable::iop2(Operation op) {
1182 transition(itos, itos);
1183 switch (op) {
1184 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1185 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1186 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1187 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1188 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1189 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1190 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1191 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1192 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1193 default : ShouldNotReachHere();
1194 }
1195 }
1197 void TemplateTable::lop2(Operation op) {
1198 transition(ltos, ltos);
1199 switch (op) {
1200 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1201 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1202 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1203 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1204 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1205 default : ShouldNotReachHere();
1206 }
1207 }
1209 void TemplateTable::idiv() {
1210 transition(itos, itos);
1211 __ movl(rcx, rax);
1212 __ pop_i(rax);
1213 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1214 // they are not equal, one could do a normal division (no correction
1215 // needed), which may speed up this implementation for the common case.
1216 // (see also JVM spec., p.243 & p.271)
1217 __ corrected_idivl(rcx);
1218 }
1220 void TemplateTable::irem() {
1221 transition(itos, itos);
1222 __ movl(rcx, rax);
1223 __ pop_i(rax);
1224 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1225 // they are not equal, one could do a normal division (no correction
1226 // needed), which may speed up this implementation for the common case.
1227 // (see also JVM spec., p.243 & p.271)
1228 __ corrected_idivl(rcx);
1229 __ movl(rax, rdx);
1230 }
1232 void TemplateTable::lmul() {
1233 transition(ltos, ltos);
1234 __ pop_l(rdx);
1235 __ imulq(rax, rdx);
1236 }
1238 void TemplateTable::ldiv() {
1239 transition(ltos, ltos);
1240 __ mov(rcx, rax);
1241 __ pop_l(rax);
1242 // generate explicit div0 check
1243 __ testq(rcx, rcx);
1244 __ jump_cc(Assembler::zero,
1245 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1246 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1247 // they are not equal, one could do a normal division (no correction
1248 // needed), which may speed up this implementation for the common case.
1249 // (see also JVM spec., p.243 & p.271)
1250 __ corrected_idivq(rcx); // kills rbx
1251 }
1253 void TemplateTable::lrem() {
1254 transition(ltos, ltos);
1255 __ mov(rcx, rax);
1256 __ pop_l(rax);
1257 __ testq(rcx, rcx);
1258 __ jump_cc(Assembler::zero,
1259 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1260 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1261 // they are not equal, one could do a normal division (no correction
1262 // needed), which may speed up this implementation for the common case.
1263 // (see also JVM spec., p.243 & p.271)
1264 __ corrected_idivq(rcx); // kills rbx
1265 __ mov(rax, rdx);
1266 }
1268 void TemplateTable::lshl() {
1269 transition(itos, ltos);
1270 __ movl(rcx, rax); // get shift count
1271 __ pop_l(rax); // get shift value
1272 __ shlq(rax);
1273 }
1275 void TemplateTable::lshr() {
1276 transition(itos, ltos);
1277 __ movl(rcx, rax); // get shift count
1278 __ pop_l(rax); // get shift value
1279 __ sarq(rax);
1280 }
1282 void TemplateTable::lushr() {
1283 transition(itos, ltos);
1284 __ movl(rcx, rax); // get shift count
1285 __ pop_l(rax); // get shift value
1286 __ shrq(rax);
1287 }
1289 void TemplateTable::fop2(Operation op) {
1290 transition(ftos, ftos);
1291 switch (op) {
1292 case add:
1293 __ addss(xmm0, at_rsp());
1294 __ addptr(rsp, Interpreter::stackElementSize);
1295 break;
1296 case sub:
1297 __ movflt(xmm1, xmm0);
1298 __ pop_f(xmm0);
1299 __ subss(xmm0, xmm1);
1300 break;
1301 case mul:
1302 __ mulss(xmm0, at_rsp());
1303 __ addptr(rsp, Interpreter::stackElementSize);
1304 break;
1305 case div:
1306 __ movflt(xmm1, xmm0);
1307 __ pop_f(xmm0);
1308 __ divss(xmm0, xmm1);
1309 break;
1310 case rem:
1311 __ movflt(xmm1, xmm0);
1312 __ pop_f(xmm0);
1313 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1314 break;
1315 default:
1316 ShouldNotReachHere();
1317 break;
1318 }
1319 }
1321 void TemplateTable::dop2(Operation op) {
1322 transition(dtos, dtos);
1323 switch (op) {
1324 case add:
1325 __ addsd(xmm0, at_rsp());
1326 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1327 break;
1328 case sub:
1329 __ movdbl(xmm1, xmm0);
1330 __ pop_d(xmm0);
1331 __ subsd(xmm0, xmm1);
1332 break;
1333 case mul:
1334 __ mulsd(xmm0, at_rsp());
1335 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1336 break;
1337 case div:
1338 __ movdbl(xmm1, xmm0);
1339 __ pop_d(xmm0);
1340 __ divsd(xmm0, xmm1);
1341 break;
1342 case rem:
1343 __ movdbl(xmm1, xmm0);
1344 __ pop_d(xmm0);
1345 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1346 break;
1347 default:
1348 ShouldNotReachHere();
1349 break;
1350 }
1351 }
1353 void TemplateTable::ineg() {
1354 transition(itos, itos);
1355 __ negl(rax);
1356 }
1358 void TemplateTable::lneg() {
1359 transition(ltos, ltos);
1360 __ negq(rax);
1361 }
1363 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1364 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1365 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1366 // of 128-bits operands for SSE instructions.
1367 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1368 // Store the value to a 128-bits operand.
1369 operand[0] = lo;
1370 operand[1] = hi;
1371 return operand;
1372 }
1374 // Buffer for 128-bits masks used by SSE instructions.
1375 static jlong float_signflip_pool[2*2];
1376 static jlong double_signflip_pool[2*2];
1378 void TemplateTable::fneg() {
1379 transition(ftos, ftos);
1380 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1381 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1382 }
1384 void TemplateTable::dneg() {
1385 transition(dtos, dtos);
1386 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1387 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1388 }
1390 void TemplateTable::iinc() {
1391 transition(vtos, vtos);
1392 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1393 locals_index(rbx);
1394 __ addl(iaddress(rbx), rdx);
1395 }
1397 void TemplateTable::wide_iinc() {
1398 transition(vtos, vtos);
1399 __ movl(rdx, at_bcp(4)); // get constant
1400 locals_index_wide(rbx);
1401 __ bswapl(rdx); // swap bytes & sign-extend constant
1402 __ sarl(rdx, 16);
1403 __ addl(iaddress(rbx), rdx);
1404 // Note: should probably use only one movl to get both
1405 // the index and the constant -> fix this
1406 }
1408 void TemplateTable::convert() {
1409 // Checking
1410 #ifdef ASSERT
1411 {
1412 TosState tos_in = ilgl;
1413 TosState tos_out = ilgl;
1414 switch (bytecode()) {
1415 case Bytecodes::_i2l: // fall through
1416 case Bytecodes::_i2f: // fall through
1417 case Bytecodes::_i2d: // fall through
1418 case Bytecodes::_i2b: // fall through
1419 case Bytecodes::_i2c: // fall through
1420 case Bytecodes::_i2s: tos_in = itos; break;
1421 case Bytecodes::_l2i: // fall through
1422 case Bytecodes::_l2f: // fall through
1423 case Bytecodes::_l2d: tos_in = ltos; break;
1424 case Bytecodes::_f2i: // fall through
1425 case Bytecodes::_f2l: // fall through
1426 case Bytecodes::_f2d: tos_in = ftos; break;
1427 case Bytecodes::_d2i: // fall through
1428 case Bytecodes::_d2l: // fall through
1429 case Bytecodes::_d2f: tos_in = dtos; break;
1430 default : ShouldNotReachHere();
1431 }
1432 switch (bytecode()) {
1433 case Bytecodes::_l2i: // fall through
1434 case Bytecodes::_f2i: // fall through
1435 case Bytecodes::_d2i: // fall through
1436 case Bytecodes::_i2b: // fall through
1437 case Bytecodes::_i2c: // fall through
1438 case Bytecodes::_i2s: tos_out = itos; break;
1439 case Bytecodes::_i2l: // fall through
1440 case Bytecodes::_f2l: // fall through
1441 case Bytecodes::_d2l: tos_out = ltos; break;
1442 case Bytecodes::_i2f: // fall through
1443 case Bytecodes::_l2f: // fall through
1444 case Bytecodes::_d2f: tos_out = ftos; break;
1445 case Bytecodes::_i2d: // fall through
1446 case Bytecodes::_l2d: // fall through
1447 case Bytecodes::_f2d: tos_out = dtos; break;
1448 default : ShouldNotReachHere();
1449 }
1450 transition(tos_in, tos_out);
1451 }
1452 #endif // ASSERT
1454 static const int64_t is_nan = 0x8000000000000000L;
1456 // Conversion
1457 switch (bytecode()) {
1458 case Bytecodes::_i2l:
1459 __ movslq(rax, rax);
1460 break;
1461 case Bytecodes::_i2f:
1462 __ cvtsi2ssl(xmm0, rax);
1463 break;
1464 case Bytecodes::_i2d:
1465 __ cvtsi2sdl(xmm0, rax);
1466 break;
1467 case Bytecodes::_i2b:
1468 __ movsbl(rax, rax);
1469 break;
1470 case Bytecodes::_i2c:
1471 __ movzwl(rax, rax);
1472 break;
1473 case Bytecodes::_i2s:
1474 __ movswl(rax, rax);
1475 break;
1476 case Bytecodes::_l2i:
1477 __ movl(rax, rax);
1478 break;
1479 case Bytecodes::_l2f:
1480 __ cvtsi2ssq(xmm0, rax);
1481 break;
1482 case Bytecodes::_l2d:
1483 __ cvtsi2sdq(xmm0, rax);
1484 break;
1485 case Bytecodes::_f2i:
1486 {
1487 Label L;
1488 __ cvttss2sil(rax, xmm0);
1489 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1490 __ jcc(Assembler::notEqual, L);
1491 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1492 __ bind(L);
1493 }
1494 break;
1495 case Bytecodes::_f2l:
1496 {
1497 Label L;
1498 __ cvttss2siq(rax, xmm0);
1499 // NaN or overflow/underflow?
1500 __ cmp64(rax, ExternalAddress((address) &is_nan));
1501 __ jcc(Assembler::notEqual, L);
1502 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1503 __ bind(L);
1504 }
1505 break;
1506 case Bytecodes::_f2d:
1507 __ cvtss2sd(xmm0, xmm0);
1508 break;
1509 case Bytecodes::_d2i:
1510 {
1511 Label L;
1512 __ cvttsd2sil(rax, xmm0);
1513 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1514 __ jcc(Assembler::notEqual, L);
1515 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1516 __ bind(L);
1517 }
1518 break;
1519 case Bytecodes::_d2l:
1520 {
1521 Label L;
1522 __ cvttsd2siq(rax, xmm0);
1523 // NaN or overflow/underflow?
1524 __ cmp64(rax, ExternalAddress((address) &is_nan));
1525 __ jcc(Assembler::notEqual, L);
1526 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1527 __ bind(L);
1528 }
1529 break;
1530 case Bytecodes::_d2f:
1531 __ cvtsd2ss(xmm0, xmm0);
1532 break;
1533 default:
1534 ShouldNotReachHere();
1535 }
1536 }
1538 void TemplateTable::lcmp() {
1539 transition(ltos, itos);
1540 Label done;
1541 __ pop_l(rdx);
1542 __ cmpq(rdx, rax);
1543 __ movl(rax, -1);
1544 __ jccb(Assembler::less, done);
1545 __ setb(Assembler::notEqual, rax);
1546 __ movzbl(rax, rax);
1547 __ bind(done);
1548 }
1550 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1551 Label done;
1552 if (is_float) {
1553 // XXX get rid of pop here, use ... reg, mem32
1554 __ pop_f(xmm1);
1555 __ ucomiss(xmm1, xmm0);
1556 } else {
1557 // XXX get rid of pop here, use ... reg, mem64
1558 __ pop_d(xmm1);
1559 __ ucomisd(xmm1, xmm0);
1560 }
1561 if (unordered_result < 0) {
1562 __ movl(rax, -1);
1563 __ jccb(Assembler::parity, done);
1564 __ jccb(Assembler::below, done);
1565 __ setb(Assembler::notEqual, rdx);
1566 __ movzbl(rax, rdx);
1567 } else {
1568 __ movl(rax, 1);
1569 __ jccb(Assembler::parity, done);
1570 __ jccb(Assembler::above, done);
1571 __ movl(rax, 0);
1572 __ jccb(Assembler::equal, done);
1573 __ decrementl(rax);
1574 }
1575 __ bind(done);
1576 }
1578 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1579 __ get_method(rcx); // rcx holds method
1580 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1581 // holds bumped taken count
1583 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1584 InvocationCounter::counter_offset();
1585 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1586 InvocationCounter::counter_offset();
1588 // Load up edx with the branch displacement
1589 if (is_wide) {
1590 __ movl(rdx, at_bcp(1));
1591 } else {
1592 __ load_signed_short(rdx, at_bcp(1));
1593 }
1594 __ bswapl(rdx);
1596 if (!is_wide) {
1597 __ sarl(rdx, 16);
1598 }
1599 __ movl2ptr(rdx, rdx);
1601 // Handle all the JSR stuff here, then exit.
1602 // It's much shorter and cleaner than intermingling with the non-JSR
1603 // normal-branch stuff occurring below.
1604 if (is_jsr) {
1605 // Pre-load the next target bytecode into rbx
1606 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1608 // compute return address as bci in rax
1609 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1610 in_bytes(ConstMethod::codes_offset())));
1611 __ subptr(rax, Address(rcx, Method::const_offset()));
1612 // Adjust the bcp in r13 by the displacement in rdx
1613 __ addptr(r13, rdx);
1614 // jsr returns atos that is not an oop
1615 __ push_i(rax);
1616 __ dispatch_only(vtos);
1617 return;
1618 }
1620 // Normal (non-jsr) branch handling
1622 // Adjust the bcp in r13 by the displacement in rdx
1623 __ addptr(r13, rdx);
1625 assert(UseLoopCounter || !UseOnStackReplacement,
1626 "on-stack-replacement requires loop counters");
1627 Label backedge_counter_overflow;
1628 Label profile_method;
1629 Label dispatch;
1630 if (UseLoopCounter) {
1631 // increment backedge counter for backward branches
1632 // rax: MDO
1633 // ebx: MDO bumped taken-count
1634 // rcx: method
1635 // rdx: target offset
1636 // r13: target bcp
1637 // r14: locals pointer
1638 __ testl(rdx, rdx); // check if forward or backward branch
1639 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1641 // check if MethodCounters exists
1642 Label has_counters;
1643 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1644 __ testptr(rax, rax);
1645 __ jcc(Assembler::notZero, has_counters);
1646 __ push(rdx);
1647 __ push(rcx);
1648 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
1649 rcx);
1650 __ pop(rcx);
1651 __ pop(rdx);
1652 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1653 __ jcc(Assembler::zero, dispatch);
1654 __ bind(has_counters);
1656 if (TieredCompilation) {
1657 Label no_mdo;
1658 int increment = InvocationCounter::count_increment;
1659 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1660 if (ProfileInterpreter) {
1661 // Are we profiling?
1662 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1663 __ testptr(rbx, rbx);
1664 __ jccb(Assembler::zero, no_mdo);
1665 // Increment the MDO backedge counter
1666 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1667 in_bytes(InvocationCounter::counter_offset()));
1668 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1669 rax, false, Assembler::zero, &backedge_counter_overflow);
1670 __ jmp(dispatch);
1671 }
1672 __ bind(no_mdo);
1673 // Increment backedge counter in MethodCounters*
1674 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1675 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1676 rax, false, Assembler::zero, &backedge_counter_overflow);
1677 } else {
1678 // increment counter
1679 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1680 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1681 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1682 __ movl(Address(rcx, be_offset), rax); // store counter
1684 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1686 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1687 __ addl(rax, Address(rcx, be_offset)); // add both counters
1689 if (ProfileInterpreter) {
1690 // Test to see if we should create a method data oop
1691 __ cmp32(rax,
1692 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1693 __ jcc(Assembler::less, dispatch);
1695 // if no method data exists, go to profile method
1696 __ test_method_data_pointer(rax, profile_method);
1698 if (UseOnStackReplacement) {
1699 // check for overflow against ebx which is the MDO taken count
1700 __ cmp32(rbx,
1701 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1702 __ jcc(Assembler::below, dispatch);
1704 // When ProfileInterpreter is on, the backedge_count comes
1705 // from the MethodData*, which value does not get reset on
1706 // the call to frequency_counter_overflow(). To avoid
1707 // excessive calls to the overflow routine while the method is
1708 // being compiled, add a second test to make sure the overflow
1709 // function is called only once every overflow_frequency.
1710 const int overflow_frequency = 1024;
1711 __ andl(rbx, overflow_frequency - 1);
1712 __ jcc(Assembler::zero, backedge_counter_overflow);
1714 }
1715 } else {
1716 if (UseOnStackReplacement) {
1717 // check for overflow against eax, which is the sum of the
1718 // counters
1719 __ cmp32(rax,
1720 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1721 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1723 }
1724 }
1725 }
1726 __ bind(dispatch);
1727 }
1729 // Pre-load the next target bytecode into rbx
1730 __ load_unsigned_byte(rbx, Address(r13, 0));
1732 // continue with the bytecode @ target
1733 // eax: return bci for jsr's, unused otherwise
1734 // ebx: target bytecode
1735 // r13: target bcp
1736 __ dispatch_only(vtos);
1738 if (UseLoopCounter) {
1739 if (ProfileInterpreter) {
1740 // Out-of-line code to allocate method data oop.
1741 __ bind(profile_method);
1742 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1743 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1744 __ set_method_data_pointer_for_bcp();
1745 __ jmp(dispatch);
1746 }
1748 if (UseOnStackReplacement) {
1749 // invocation counter overflow
1750 __ bind(backedge_counter_overflow);
1751 __ negptr(rdx);
1752 __ addptr(rdx, r13); // branch bcp
1753 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1754 __ call_VM(noreg,
1755 CAST_FROM_FN_PTR(address,
1756 InterpreterRuntime::frequency_counter_overflow),
1757 rdx);
1758 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1760 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1761 // ebx: target bytecode
1762 // rdx: scratch
1763 // r14: locals pointer
1764 // r13: bcp
1765 __ testptr(rax, rax); // test result
1766 __ jcc(Assembler::zero, dispatch); // no osr if null
1767 // nmethod may have been invalidated (VM may block upon call_VM return)
1768 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1769 __ cmpl(rcx, InvalidOSREntryBci);
1770 __ jcc(Assembler::equal, dispatch);
1772 // We have the address of an on stack replacement routine in eax
1773 // We need to prepare to execute the OSR method. First we must
1774 // migrate the locals and monitors off of the stack.
1776 __ mov(r13, rax); // save the nmethod
1778 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1780 // eax is OSR buffer, move it to expected parameter location
1781 __ mov(j_rarg0, rax);
1783 // We use j_rarg definitions here so that registers don't conflict as parameter
1784 // registers change across platforms as we are in the midst of a calling
1785 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1787 const Register retaddr = j_rarg2;
1788 const Register sender_sp = j_rarg1;
1790 // pop the interpreter frame
1791 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1792 __ leave(); // remove frame anchor
1793 __ pop(retaddr); // get return address
1794 __ mov(rsp, sender_sp); // set sp to sender sp
1795 // Ensure compiled code always sees stack at proper alignment
1796 __ andptr(rsp, -(StackAlignmentInBytes));
1798 // unlike x86 we need no specialized return from compiled code
1799 // to the interpreter or the call stub.
1801 // push the return address
1802 __ push(retaddr);
1804 // and begin the OSR nmethod
1805 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1806 }
1807 }
1808 }
1811 void TemplateTable::if_0cmp(Condition cc) {
1812 transition(itos, vtos);
1813 // assume branch is more often taken than not (loops use backward branches)
1814 Label not_taken;
1815 __ testl(rax, rax);
1816 __ jcc(j_not(cc), not_taken);
1817 branch(false, false);
1818 __ bind(not_taken);
1819 __ profile_not_taken_branch(rax);
1820 }
1822 void TemplateTable::if_icmp(Condition cc) {
1823 transition(itos, vtos);
1824 // assume branch is more often taken than not (loops use backward branches)
1825 Label not_taken;
1826 __ pop_i(rdx);
1827 __ cmpl(rdx, rax);
1828 __ jcc(j_not(cc), not_taken);
1829 branch(false, false);
1830 __ bind(not_taken);
1831 __ profile_not_taken_branch(rax);
1832 }
1834 void TemplateTable::if_nullcmp(Condition cc) {
1835 transition(atos, vtos);
1836 // assume branch is more often taken than not (loops use backward branches)
1837 Label not_taken;
1838 __ testptr(rax, rax);
1839 __ jcc(j_not(cc), not_taken);
1840 branch(false, false);
1841 __ bind(not_taken);
1842 __ profile_not_taken_branch(rax);
1843 }
1845 void TemplateTable::if_acmp(Condition cc) {
1846 transition(atos, vtos);
1847 // assume branch is more often taken than not (loops use backward branches)
1848 Label not_taken;
1849 __ pop_ptr(rdx);
1850 __ cmpptr(rdx, rax);
1851 __ jcc(j_not(cc), not_taken);
1852 branch(false, false);
1853 __ bind(not_taken);
1854 __ profile_not_taken_branch(rax);
1855 }
1857 void TemplateTable::ret() {
1858 transition(vtos, vtos);
1859 locals_index(rbx);
1860 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1861 __ profile_ret(rbx, rcx);
1862 __ get_method(rax);
1863 __ movptr(r13, Address(rax, Method::const_offset()));
1864 __ lea(r13, Address(r13, rbx, Address::times_1,
1865 ConstMethod::codes_offset()));
1866 __ dispatch_next(vtos);
1867 }
1869 void TemplateTable::wide_ret() {
1870 transition(vtos, vtos);
1871 locals_index_wide(rbx);
1872 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1873 __ profile_ret(rbx, rcx);
1874 __ get_method(rax);
1875 __ movptr(r13, Address(rax, Method::const_offset()));
1876 __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
1877 __ dispatch_next(vtos);
1878 }
1880 void TemplateTable::tableswitch() {
1881 Label default_case, continue_execution;
1882 transition(itos, vtos);
1883 // align r13
1884 __ lea(rbx, at_bcp(BytesPerInt));
1885 __ andptr(rbx, -BytesPerInt);
1886 // load lo & hi
1887 __ movl(rcx, Address(rbx, BytesPerInt));
1888 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1889 __ bswapl(rcx);
1890 __ bswapl(rdx);
1891 // check against lo & hi
1892 __ cmpl(rax, rcx);
1893 __ jcc(Assembler::less, default_case);
1894 __ cmpl(rax, rdx);
1895 __ jcc(Assembler::greater, default_case);
1896 // lookup dispatch offset
1897 __ subl(rax, rcx);
1898 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1899 __ profile_switch_case(rax, rbx, rcx);
1900 // continue execution
1901 __ bind(continue_execution);
1902 __ bswapl(rdx);
1903 __ movl2ptr(rdx, rdx);
1904 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1905 __ addptr(r13, rdx);
1906 __ dispatch_only(vtos);
1907 // handle default
1908 __ bind(default_case);
1909 __ profile_switch_default(rax);
1910 __ movl(rdx, Address(rbx, 0));
1911 __ jmp(continue_execution);
1912 }
1914 void TemplateTable::lookupswitch() {
1915 transition(itos, itos);
1916 __ stop("lookupswitch bytecode should have been rewritten");
1917 }
1919 void TemplateTable::fast_linearswitch() {
1920 transition(itos, vtos);
1921 Label loop_entry, loop, found, continue_execution;
1922 // bswap rax so we can avoid bswapping the table entries
1923 __ bswapl(rax);
1924 // align r13
1925 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1926 // this instruction (change offsets
1927 // below)
1928 __ andptr(rbx, -BytesPerInt);
1929 // set counter
1930 __ movl(rcx, Address(rbx, BytesPerInt));
1931 __ bswapl(rcx);
1932 __ jmpb(loop_entry);
1933 // table search
1934 __ bind(loop);
1935 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1936 __ jcc(Assembler::equal, found);
1937 __ bind(loop_entry);
1938 __ decrementl(rcx);
1939 __ jcc(Assembler::greaterEqual, loop);
1940 // default case
1941 __ profile_switch_default(rax);
1942 __ movl(rdx, Address(rbx, 0));
1943 __ jmp(continue_execution);
1944 // entry found -> get offset
1945 __ bind(found);
1946 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1947 __ profile_switch_case(rcx, rax, rbx);
1948 // continue execution
1949 __ bind(continue_execution);
1950 __ bswapl(rdx);
1951 __ movl2ptr(rdx, rdx);
1952 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1953 __ addptr(r13, rdx);
1954 __ dispatch_only(vtos);
1955 }
1957 void TemplateTable::fast_binaryswitch() {
1958 transition(itos, vtos);
1959 // Implementation using the following core algorithm:
1960 //
1961 // int binary_search(int key, LookupswitchPair* array, int n) {
1962 // // Binary search according to "Methodik des Programmierens" by
1963 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1964 // int i = 0;
1965 // int j = n;
1966 // while (i+1 < j) {
1967 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1968 // // with Q: for all i: 0 <= i < n: key < a[i]
1969 // // where a stands for the array and assuming that the (inexisting)
1970 // // element a[n] is infinitely big.
1971 // int h = (i + j) >> 1;
1972 // // i < h < j
1973 // if (key < array[h].fast_match()) {
1974 // j = h;
1975 // } else {
1976 // i = h;
1977 // }
1978 // }
1979 // // R: a[i] <= key < a[i+1] or Q
1980 // // (i.e., if key is within array, i is the correct index)
1981 // return i;
1982 // }
1984 // Register allocation
1985 const Register key = rax; // already set (tosca)
1986 const Register array = rbx;
1987 const Register i = rcx;
1988 const Register j = rdx;
1989 const Register h = rdi;
1990 const Register temp = rsi;
1992 // Find array start
1993 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1994 // get rid of this
1995 // instruction (change
1996 // offsets below)
1997 __ andptr(array, -BytesPerInt);
1999 // Initialize i & j
2000 __ xorl(i, i); // i = 0;
2001 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2003 // Convert j into native byteordering
2004 __ bswapl(j);
2006 // And start
2007 Label entry;
2008 __ jmp(entry);
2010 // binary search loop
2011 {
2012 Label loop;
2013 __ bind(loop);
2014 // int h = (i + j) >> 1;
2015 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2016 __ sarl(h, 1); // h = (i + j) >> 1;
2017 // if (key < array[h].fast_match()) {
2018 // j = h;
2019 // } else {
2020 // i = h;
2021 // }
2022 // Convert array[h].match to native byte-ordering before compare
2023 __ movl(temp, Address(array, h, Address::times_8));
2024 __ bswapl(temp);
2025 __ cmpl(key, temp);
2026 // j = h if (key < array[h].fast_match())
2027 __ cmovl(Assembler::less, j, h);
2028 // i = h if (key >= array[h].fast_match())
2029 __ cmovl(Assembler::greaterEqual, i, h);
2030 // while (i+1 < j)
2031 __ bind(entry);
2032 __ leal(h, Address(i, 1)); // i+1
2033 __ cmpl(h, j); // i+1 < j
2034 __ jcc(Assembler::less, loop);
2035 }
2037 // end of binary search, result index is i (must check again!)
2038 Label default_case;
2039 // Convert array[i].match to native byte-ordering before compare
2040 __ movl(temp, Address(array, i, Address::times_8));
2041 __ bswapl(temp);
2042 __ cmpl(key, temp);
2043 __ jcc(Assembler::notEqual, default_case);
2045 // entry found -> j = offset
2046 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2047 __ profile_switch_case(i, key, array);
2048 __ bswapl(j);
2049 __ movl2ptr(j, j);
2050 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2051 __ addptr(r13, j);
2052 __ dispatch_only(vtos);
2054 // default case -> j = default offset
2055 __ bind(default_case);
2056 __ profile_switch_default(i);
2057 __ movl(j, Address(array, -2 * BytesPerInt));
2058 __ bswapl(j);
2059 __ movl2ptr(j, j);
2060 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2061 __ addptr(r13, j);
2062 __ dispatch_only(vtos);
2063 }
2066 void TemplateTable::_return(TosState state) {
2067 transition(state, state);
2068 assert(_desc->calls_vm(),
2069 "inconsistent calls_vm information"); // call in remove_activation
2071 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2072 assert(state == vtos, "only valid state");
2073 __ movptr(c_rarg1, aaddress(0));
2074 __ load_klass(rdi, c_rarg1);
2075 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2076 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2077 Label skip_register_finalizer;
2078 __ jcc(Assembler::zero, skip_register_finalizer);
2080 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2082 __ bind(skip_register_finalizer);
2083 }
2085 // Narrow result if state is itos but result type is smaller.
2086 // Need to narrow in the return bytecode rather than in generate_return_entry
2087 // since compiled code callers expect the result to already be narrowed.
2088 if (state == itos) {
2089 __ narrow(rax);
2090 }
2091 __ remove_activation(state, r13);
2093 __ jmp(r13);
2094 }
2096 // ----------------------------------------------------------------------------
2097 // Volatile variables demand their effects be made known to all CPU's
2098 // in order. Store buffers on most chips allow reads & writes to
2099 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2100 // without some kind of memory barrier (i.e., it's not sufficient that
2101 // the interpreter does not reorder volatile references, the hardware
2102 // also must not reorder them).
2103 //
2104 // According to the new Java Memory Model (JMM):
2105 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2106 // writes act as aquire & release, so:
2107 // (2) A read cannot let unrelated NON-volatile memory refs that
2108 // happen after the read float up to before the read. It's OK for
2109 // non-volatile memory refs that happen before the volatile read to
2110 // float down below it.
2111 // (3) Similar a volatile write cannot let unrelated NON-volatile
2112 // memory refs that happen BEFORE the write float down to after the
2113 // write. It's OK for non-volatile memory refs that happen after the
2114 // volatile write to float up before it.
2115 //
2116 // We only put in barriers around volatile refs (they are expensive),
2117 // not _between_ memory refs (that would require us to track the
2118 // flavor of the previous memory refs). Requirements (2) and (3)
2119 // require some barriers before volatile stores and after volatile
2120 // loads. These nearly cover requirement (1) but miss the
2121 // volatile-store-volatile-load case. This final case is placed after
2122 // volatile-stores although it could just as well go before
2123 // volatile-loads.
2124 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2125 order_constraint) {
2126 // Helper function to insert a is-volatile test and memory barrier
2127 if (os::is_MP()) { // Not needed on single CPU
2128 __ membar(order_constraint);
2129 }
2130 }
2132 void TemplateTable::resolve_cache_and_index(int byte_no,
2133 Register Rcache,
2134 Register index,
2135 size_t index_size) {
2136 const Register temp = rbx;
2137 assert_different_registers(Rcache, index, temp);
2139 Label resolved;
2140 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2141 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2142 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2143 __ jcc(Assembler::equal, resolved);
2145 // resolve first time through
2146 address entry;
2147 switch (bytecode()) {
2148 case Bytecodes::_getstatic:
2149 case Bytecodes::_putstatic:
2150 case Bytecodes::_getfield:
2151 case Bytecodes::_putfield:
2152 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2153 break;
2154 case Bytecodes::_invokevirtual:
2155 case Bytecodes::_invokespecial:
2156 case Bytecodes::_invokestatic:
2157 case Bytecodes::_invokeinterface:
2158 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2159 break;
2160 case Bytecodes::_invokehandle:
2161 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
2162 break;
2163 case Bytecodes::_invokedynamic:
2164 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2165 break;
2166 default:
2167 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2168 break;
2169 }
2170 __ movl(temp, (int) bytecode());
2171 __ call_VM(noreg, entry, temp);
2173 // Update registers with resolved info
2174 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2175 __ bind(resolved);
2176 }
2178 // The cache and index registers must be set before call
2179 void TemplateTable::load_field_cp_cache_entry(Register obj,
2180 Register cache,
2181 Register index,
2182 Register off,
2183 Register flags,
2184 bool is_static = false) {
2185 assert_different_registers(cache, index, flags, off);
2187 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2188 // Field offset
2189 __ movptr(off, Address(cache, index, Address::times_ptr,
2190 in_bytes(cp_base_offset +
2191 ConstantPoolCacheEntry::f2_offset())));
2192 // Flags
2193 __ movl(flags, Address(cache, index, Address::times_ptr,
2194 in_bytes(cp_base_offset +
2195 ConstantPoolCacheEntry::flags_offset())));
2197 // klass overwrite register
2198 if (is_static) {
2199 __ movptr(obj, Address(cache, index, Address::times_ptr,
2200 in_bytes(cp_base_offset +
2201 ConstantPoolCacheEntry::f1_offset())));
2202 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2203 __ movptr(obj, Address(obj, mirror_offset));
2204 }
2205 }
2207 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2208 Register method,
2209 Register itable_index,
2210 Register flags,
2211 bool is_invokevirtual,
2212 bool is_invokevfinal, /*unused*/
2213 bool is_invokedynamic) {
2214 // setup registers
2215 const Register cache = rcx;
2216 const Register index = rdx;
2217 assert_different_registers(method, flags);
2218 assert_different_registers(method, cache, index);
2219 assert_different_registers(itable_index, flags);
2220 assert_different_registers(itable_index, cache, index);
2221 // determine constant pool cache field offsets
2222 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2223 const int method_offset = in_bytes(
2224 ConstantPoolCache::base_offset() +
2225 ((byte_no == f2_byte)
2226 ? ConstantPoolCacheEntry::f2_offset()
2227 : ConstantPoolCacheEntry::f1_offset()));
2228 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2229 ConstantPoolCacheEntry::flags_offset());
2230 // access constant pool cache fields
2231 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2232 ConstantPoolCacheEntry::f2_offset());
2234 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2235 resolve_cache_and_index(byte_no, cache, index, index_size);
2236 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2238 if (itable_index != noreg) {
2239 // pick up itable or appendix index from f2 also:
2240 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2241 }
2242 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2243 }
2245 // Correct values of the cache and index registers are preserved.
2246 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2247 bool is_static, bool has_tos) {
2248 // do the JVMTI work here to avoid disturbing the register state below
2249 // We use c_rarg registers here because we want to use the register used in
2250 // the call to the VM
2251 if (JvmtiExport::can_post_field_access()) {
2252 // Check to see if a field access watch has been set before we
2253 // take the time to call into the VM.
2254 Label L1;
2255 assert_different_registers(cache, index, rax);
2256 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2257 __ testl(rax, rax);
2258 __ jcc(Assembler::zero, L1);
2260 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2262 // cache entry pointer
2263 __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
2264 __ shll(c_rarg3, LogBytesPerWord);
2265 __ addptr(c_rarg2, c_rarg3);
2266 if (is_static) {
2267 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2268 } else {
2269 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2270 __ verify_oop(c_rarg1);
2271 }
2272 // c_rarg1: object pointer or NULL
2273 // c_rarg2: cache entry pointer
2274 // c_rarg3: jvalue object on the stack
2275 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2276 InterpreterRuntime::post_field_access),
2277 c_rarg1, c_rarg2, c_rarg3);
2278 __ get_cache_and_index_at_bcp(cache, index, 1);
2279 __ bind(L1);
2280 }
2281 }
2283 void TemplateTable::pop_and_check_object(Register r) {
2284 __ pop_ptr(r);
2285 __ null_check(r); // for field access must check obj.
2286 __ verify_oop(r);
2287 }
2289 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2290 transition(vtos, vtos);
2292 const Register cache = rcx;
2293 const Register index = rdx;
2294 const Register obj = c_rarg3;
2295 const Register off = rbx;
2296 const Register flags = rax;
2297 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2299 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2300 jvmti_post_field_access(cache, index, is_static, false);
2301 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2303 if (!is_static) {
2304 // obj is on the stack
2305 pop_and_check_object(obj);
2306 }
2308 const Address field(obj, off, Address::times_1);
2310 Label Done, notByte, notBool, notInt, notShort, notChar,
2311 notLong, notFloat, notObj, notDouble;
2313 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2314 // Make sure we don't need to mask edx after the above shift
2315 assert(btos == 0, "change code, btos != 0");
2317 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2318 __ jcc(Assembler::notZero, notByte);
2319 // btos
2320 __ load_signed_byte(rax, field);
2321 __ push(btos);
2322 // Rewrite bytecode to be faster
2323 if (!is_static) {
2324 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2325 }
2326 __ jmp(Done);
2328 __ bind(notByte);
2329 __ cmpl(flags, ztos);
2330 __ jcc(Assembler::notEqual, notBool);
2332 // ztos (same code as btos)
2333 __ load_signed_byte(rax, field);
2334 __ push(ztos);
2335 // Rewrite bytecode to be faster
2336 if (!is_static) {
2337 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2338 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2339 }
2340 __ jmp(Done);
2342 __ bind(notBool);
2343 __ cmpl(flags, atos);
2344 __ jcc(Assembler::notEqual, notObj);
2345 // atos
2346 __ load_heap_oop(rax, field);
2347 __ push(atos);
2348 if (!is_static) {
2349 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2350 }
2351 __ jmp(Done);
2353 __ bind(notObj);
2354 __ cmpl(flags, itos);
2355 __ jcc(Assembler::notEqual, notInt);
2356 // itos
2357 __ movl(rax, field);
2358 __ push(itos);
2359 // Rewrite bytecode to be faster
2360 if (!is_static) {
2361 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2362 }
2363 __ jmp(Done);
2365 __ bind(notInt);
2366 __ cmpl(flags, ctos);
2367 __ jcc(Assembler::notEqual, notChar);
2368 // ctos
2369 __ load_unsigned_short(rax, field);
2370 __ push(ctos);
2371 // Rewrite bytecode to be faster
2372 if (!is_static) {
2373 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2374 }
2375 __ jmp(Done);
2377 __ bind(notChar);
2378 __ cmpl(flags, stos);
2379 __ jcc(Assembler::notEqual, notShort);
2380 // stos
2381 __ load_signed_short(rax, field);
2382 __ push(stos);
2383 // Rewrite bytecode to be faster
2384 if (!is_static) {
2385 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2386 }
2387 __ jmp(Done);
2389 __ bind(notShort);
2390 __ cmpl(flags, ltos);
2391 __ jcc(Assembler::notEqual, notLong);
2392 // ltos
2393 __ movq(rax, field);
2394 __ push(ltos);
2395 // Rewrite bytecode to be faster
2396 if (!is_static) {
2397 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2398 }
2399 __ jmp(Done);
2401 __ bind(notLong);
2402 __ cmpl(flags, ftos);
2403 __ jcc(Assembler::notEqual, notFloat);
2404 // ftos
2405 __ movflt(xmm0, field);
2406 __ push(ftos);
2407 // Rewrite bytecode to be faster
2408 if (!is_static) {
2409 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2410 }
2411 __ jmp(Done);
2413 __ bind(notFloat);
2414 #ifdef ASSERT
2415 __ cmpl(flags, dtos);
2416 __ jcc(Assembler::notEqual, notDouble);
2417 #endif
2418 // dtos
2419 __ movdbl(xmm0, field);
2420 __ push(dtos);
2421 // Rewrite bytecode to be faster
2422 if (!is_static) {
2423 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2424 }
2425 #ifdef ASSERT
2426 __ jmp(Done);
2428 __ bind(notDouble);
2429 __ stop("Bad state");
2430 #endif
2432 __ bind(Done);
2433 // [jk] not needed currently
2434 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2435 // Assembler::LoadStore));
2436 }
2439 void TemplateTable::getfield(int byte_no) {
2440 getfield_or_static(byte_no, false);
2441 }
2443 void TemplateTable::getstatic(int byte_no) {
2444 getfield_or_static(byte_no, true);
2445 }
2447 // The registers cache and index expected to be set before call.
2448 // The function may destroy various registers, just not the cache and index registers.
2449 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2450 transition(vtos, vtos);
2452 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2454 if (JvmtiExport::can_post_field_modification()) {
2455 // Check to see if a field modification watch has been set before
2456 // we take the time to call into the VM.
2457 Label L1;
2458 assert_different_registers(cache, index, rax);
2459 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2460 __ testl(rax, rax);
2461 __ jcc(Assembler::zero, L1);
2463 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2465 if (is_static) {
2466 // Life is simple. Null out the object pointer.
2467 __ xorl(c_rarg1, c_rarg1);
2468 } else {
2469 // Life is harder. The stack holds the value on top, followed by
2470 // the object. We don't know the size of the value, though; it
2471 // could be one or two words depending on its type. As a result,
2472 // we must find the type to determine where the object is.
2473 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2474 Address::times_8,
2475 in_bytes(cp_base_offset +
2476 ConstantPoolCacheEntry::flags_offset())));
2477 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
2478 // Make sure we don't need to mask rcx after the above shift
2479 ConstantPoolCacheEntry::verify_tos_state_shift();
2480 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2481 __ cmpl(c_rarg3, ltos);
2482 __ cmovptr(Assembler::equal,
2483 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2484 __ cmpl(c_rarg3, dtos);
2485 __ cmovptr(Assembler::equal,
2486 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2487 }
2488 // cache entry pointer
2489 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2490 __ shll(rscratch1, LogBytesPerWord);
2491 __ addptr(c_rarg2, rscratch1);
2492 // object (tos)
2493 __ mov(c_rarg3, rsp);
2494 // c_rarg1: object pointer set up above (NULL if static)
2495 // c_rarg2: cache entry pointer
2496 // c_rarg3: jvalue object on the stack
2497 __ call_VM(noreg,
2498 CAST_FROM_FN_PTR(address,
2499 InterpreterRuntime::post_field_modification),
2500 c_rarg1, c_rarg2, c_rarg3);
2501 __ get_cache_and_index_at_bcp(cache, index, 1);
2502 __ bind(L1);
2503 }
2504 }
2506 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2507 transition(vtos, vtos);
2509 const Register cache = rcx;
2510 const Register index = rdx;
2511 const Register obj = rcx;
2512 const Register off = rbx;
2513 const Register flags = rax;
2514 const Register bc = c_rarg3;
2516 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2517 jvmti_post_field_mod(cache, index, is_static);
2518 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2520 // [jk] not needed currently
2521 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2522 // Assembler::StoreStore));
2524 Label notVolatile, Done;
2525 __ movl(rdx, flags);
2526 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2527 __ andl(rdx, 0x1);
2529 // field address
2530 const Address field(obj, off, Address::times_1);
2532 Label notByte, notBool, notInt, notShort, notChar,
2533 notLong, notFloat, notObj, notDouble;
2535 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2537 assert(btos == 0, "change code, btos != 0");
2538 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2539 __ jcc(Assembler::notZero, notByte);
2541 // btos
2542 {
2543 __ pop(btos);
2544 if (!is_static) pop_and_check_object(obj);
2545 __ movb(field, rax);
2546 if (!is_static) {
2547 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2548 }
2549 __ jmp(Done);
2550 }
2552 __ bind(notByte);
2553 __ cmpl(flags, ztos);
2554 __ jcc(Assembler::notEqual, notBool);
2556 // ztos
2557 {
2558 __ pop(ztos);
2559 if (!is_static) pop_and_check_object(obj);
2560 __ andl(rax, 0x1);
2561 __ movb(field, rax);
2562 if (!is_static) {
2563 patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
2564 }
2565 __ jmp(Done);
2566 }
2568 __ bind(notBool);
2569 __ cmpl(flags, atos);
2570 __ jcc(Assembler::notEqual, notObj);
2572 // atos
2573 {
2574 __ pop(atos);
2575 if (!is_static) pop_and_check_object(obj);
2576 // Store into the field
2577 do_oop_store(_masm, field, rax, _bs->kind(), false);
2578 if (!is_static) {
2579 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2580 }
2581 __ jmp(Done);
2582 }
2584 __ bind(notObj);
2585 __ cmpl(flags, itos);
2586 __ jcc(Assembler::notEqual, notInt);
2588 // itos
2589 {
2590 __ pop(itos);
2591 if (!is_static) pop_and_check_object(obj);
2592 __ movl(field, rax);
2593 if (!is_static) {
2594 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2595 }
2596 __ jmp(Done);
2597 }
2599 __ bind(notInt);
2600 __ cmpl(flags, ctos);
2601 __ jcc(Assembler::notEqual, notChar);
2603 // ctos
2604 {
2605 __ pop(ctos);
2606 if (!is_static) pop_and_check_object(obj);
2607 __ movw(field, rax);
2608 if (!is_static) {
2609 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2610 }
2611 __ jmp(Done);
2612 }
2614 __ bind(notChar);
2615 __ cmpl(flags, stos);
2616 __ jcc(Assembler::notEqual, notShort);
2618 // stos
2619 {
2620 __ pop(stos);
2621 if (!is_static) pop_and_check_object(obj);
2622 __ movw(field, rax);
2623 if (!is_static) {
2624 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2625 }
2626 __ jmp(Done);
2627 }
2629 __ bind(notShort);
2630 __ cmpl(flags, ltos);
2631 __ jcc(Assembler::notEqual, notLong);
2633 // ltos
2634 {
2635 __ pop(ltos);
2636 if (!is_static) pop_and_check_object(obj);
2637 __ movq(field, rax);
2638 if (!is_static) {
2639 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2640 }
2641 __ jmp(Done);
2642 }
2644 __ bind(notLong);
2645 __ cmpl(flags, ftos);
2646 __ jcc(Assembler::notEqual, notFloat);
2648 // ftos
2649 {
2650 __ pop(ftos);
2651 if (!is_static) pop_and_check_object(obj);
2652 __ movflt(field, xmm0);
2653 if (!is_static) {
2654 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2655 }
2656 __ jmp(Done);
2657 }
2659 __ bind(notFloat);
2660 #ifdef ASSERT
2661 __ cmpl(flags, dtos);
2662 __ jcc(Assembler::notEqual, notDouble);
2663 #endif
2665 // dtos
2666 {
2667 __ pop(dtos);
2668 if (!is_static) pop_and_check_object(obj);
2669 __ movdbl(field, xmm0);
2670 if (!is_static) {
2671 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2672 }
2673 }
2675 #ifdef ASSERT
2676 __ jmp(Done);
2678 __ bind(notDouble);
2679 __ stop("Bad state");
2680 #endif
2682 __ bind(Done);
2684 // Check for volatile store
2685 __ testl(rdx, rdx);
2686 __ jcc(Assembler::zero, notVolatile);
2687 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2688 Assembler::StoreStore));
2689 __ bind(notVolatile);
2690 }
2692 void TemplateTable::putfield(int byte_no) {
2693 putfield_or_static(byte_no, false);
2694 }
2696 void TemplateTable::putstatic(int byte_no) {
2697 putfield_or_static(byte_no, true);
2698 }
2700 void TemplateTable::jvmti_post_fast_field_mod() {
2701 if (JvmtiExport::can_post_field_modification()) {
2702 // Check to see if a field modification watch has been set before
2703 // we take the time to call into the VM.
2704 Label L2;
2705 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2706 __ testl(c_rarg3, c_rarg3);
2707 __ jcc(Assembler::zero, L2);
2708 __ pop_ptr(rbx); // copy the object pointer from tos
2709 __ verify_oop(rbx);
2710 __ push_ptr(rbx); // put the object pointer back on tos
2711 // Save tos values before call_VM() clobbers them. Since we have
2712 // to do it for every data type, we use the saved values as the
2713 // jvalue object.
2714 switch (bytecode()) { // load values into the jvalue object
2715 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2716 case Bytecodes::_fast_bputfield: // fall through
2717 case Bytecodes::_fast_zputfield: // fall through
2718 case Bytecodes::_fast_sputfield: // fall through
2719 case Bytecodes::_fast_cputfield: // fall through
2720 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2721 case Bytecodes::_fast_dputfield: __ push_d(); break;
2722 case Bytecodes::_fast_fputfield: __ push_f(); break;
2723 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2725 default:
2726 ShouldNotReachHere();
2727 }
2728 __ mov(c_rarg3, rsp); // points to jvalue on the stack
2729 // access constant pool cache entry
2730 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2731 __ verify_oop(rbx);
2732 // rbx: object pointer copied above
2733 // c_rarg2: cache entry pointer
2734 // c_rarg3: jvalue object on the stack
2735 __ call_VM(noreg,
2736 CAST_FROM_FN_PTR(address,
2737 InterpreterRuntime::post_field_modification),
2738 rbx, c_rarg2, c_rarg3);
2740 switch (bytecode()) { // restore tos values
2741 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2742 case Bytecodes::_fast_bputfield: // fall through
2743 case Bytecodes::_fast_zputfield: // fall through
2744 case Bytecodes::_fast_sputfield: // fall through
2745 case Bytecodes::_fast_cputfield: // fall through
2746 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2747 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2748 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2749 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2750 }
2751 __ bind(L2);
2752 }
2753 }
2755 void TemplateTable::fast_storefield(TosState state) {
2756 transition(state, vtos);
2758 ByteSize base = ConstantPoolCache::base_offset();
2760 jvmti_post_fast_field_mod();
2762 // access constant pool cache
2763 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2765 // test for volatile with rdx
2766 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2767 in_bytes(base +
2768 ConstantPoolCacheEntry::flags_offset())));
2770 // replace index with field offset from cache entry
2771 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2772 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2774 // [jk] not needed currently
2775 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2776 // Assembler::StoreStore));
2778 Label notVolatile;
2779 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2780 __ andl(rdx, 0x1);
2782 // Get object from stack
2783 pop_and_check_object(rcx);
2785 // field address
2786 const Address field(rcx, rbx, Address::times_1);
2788 // access field
2789 switch (bytecode()) {
2790 case Bytecodes::_fast_aputfield:
2791 do_oop_store(_masm, field, rax, _bs->kind(), false);
2792 break;
2793 case Bytecodes::_fast_lputfield:
2794 __ movq(field, rax);
2795 break;
2796 case Bytecodes::_fast_iputfield:
2797 __ movl(field, rax);
2798 break;
2799 case Bytecodes::_fast_zputfield:
2800 __ andl(rax, 0x1); // boolean is true if LSB is 1
2801 // fall through to bputfield
2802 case Bytecodes::_fast_bputfield:
2803 __ movb(field, rax);
2804 break;
2805 case Bytecodes::_fast_sputfield:
2806 // fall through
2807 case Bytecodes::_fast_cputfield:
2808 __ movw(field, rax);
2809 break;
2810 case Bytecodes::_fast_fputfield:
2811 __ movflt(field, xmm0);
2812 break;
2813 case Bytecodes::_fast_dputfield:
2814 __ movdbl(field, xmm0);
2815 break;
2816 default:
2817 ShouldNotReachHere();
2818 }
2820 // Check for volatile store
2821 __ testl(rdx, rdx);
2822 __ jcc(Assembler::zero, notVolatile);
2823 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2824 Assembler::StoreStore));
2825 __ bind(notVolatile);
2826 }
2829 void TemplateTable::fast_accessfield(TosState state) {
2830 transition(atos, state);
2832 // Do the JVMTI work here to avoid disturbing the register state below
2833 if (JvmtiExport::can_post_field_access()) {
2834 // Check to see if a field access watch has been set before we
2835 // take the time to call into the VM.
2836 Label L1;
2837 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2838 __ testl(rcx, rcx);
2839 __ jcc(Assembler::zero, L1);
2840 // access constant pool cache entry
2841 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2842 __ verify_oop(rax);
2843 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2844 __ mov(c_rarg1, rax);
2845 // c_rarg1: object pointer copied above
2846 // c_rarg2: cache entry pointer
2847 __ call_VM(noreg,
2848 CAST_FROM_FN_PTR(address,
2849 InterpreterRuntime::post_field_access),
2850 c_rarg1, c_rarg2);
2851 __ pop_ptr(rax); // restore object pointer
2852 __ bind(L1);
2853 }
2855 // access constant pool cache
2856 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2857 // replace index with field offset from cache entry
2858 // [jk] not needed currently
2859 // if (os::is_MP()) {
2860 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2861 // in_bytes(ConstantPoolCache::base_offset() +
2862 // ConstantPoolCacheEntry::flags_offset())));
2863 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2864 // __ andl(rdx, 0x1);
2865 // }
2866 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2867 in_bytes(ConstantPoolCache::base_offset() +
2868 ConstantPoolCacheEntry::f2_offset())));
2870 // rax: object
2871 __ verify_oop(rax);
2872 __ null_check(rax);
2873 Address field(rax, rbx, Address::times_1);
2875 // access field
2876 switch (bytecode()) {
2877 case Bytecodes::_fast_agetfield:
2878 __ load_heap_oop(rax, field);
2879 __ verify_oop(rax);
2880 break;
2881 case Bytecodes::_fast_lgetfield:
2882 __ movq(rax, field);
2883 break;
2884 case Bytecodes::_fast_igetfield:
2885 __ movl(rax, field);
2886 break;
2887 case Bytecodes::_fast_bgetfield:
2888 __ movsbl(rax, field);
2889 break;
2890 case Bytecodes::_fast_sgetfield:
2891 __ load_signed_short(rax, field);
2892 break;
2893 case Bytecodes::_fast_cgetfield:
2894 __ load_unsigned_short(rax, field);
2895 break;
2896 case Bytecodes::_fast_fgetfield:
2897 __ movflt(xmm0, field);
2898 break;
2899 case Bytecodes::_fast_dgetfield:
2900 __ movdbl(xmm0, field);
2901 break;
2902 default:
2903 ShouldNotReachHere();
2904 }
2905 // [jk] not needed currently
2906 // if (os::is_MP()) {
2907 // Label notVolatile;
2908 // __ testl(rdx, rdx);
2909 // __ jcc(Assembler::zero, notVolatile);
2910 // __ membar(Assembler::LoadLoad);
2911 // __ bind(notVolatile);
2912 //};
2913 }
2915 void TemplateTable::fast_xaccess(TosState state) {
2916 transition(vtos, state);
2918 // get receiver
2919 __ movptr(rax, aaddress(0));
2920 // access constant pool cache
2921 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2922 __ movptr(rbx,
2923 Address(rcx, rdx, Address::times_8,
2924 in_bytes(ConstantPoolCache::base_offset() +
2925 ConstantPoolCacheEntry::f2_offset())));
2926 // make sure exception is reported in correct bcp range (getfield is
2927 // next instruction)
2928 __ increment(r13);
2929 __ null_check(rax);
2930 switch (state) {
2931 case itos:
2932 __ movl(rax, Address(rax, rbx, Address::times_1));
2933 break;
2934 case atos:
2935 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2936 __ verify_oop(rax);
2937 break;
2938 case ftos:
2939 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2940 break;
2941 default:
2942 ShouldNotReachHere();
2943 }
2945 // [jk] not needed currently
2946 // if (os::is_MP()) {
2947 // Label notVolatile;
2948 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2949 // in_bytes(ConstantPoolCache::base_offset() +
2950 // ConstantPoolCacheEntry::flags_offset())));
2951 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2952 // __ testl(rdx, 0x1);
2953 // __ jcc(Assembler::zero, notVolatile);
2954 // __ membar(Assembler::LoadLoad);
2955 // __ bind(notVolatile);
2956 // }
2958 __ decrement(r13);
2959 }
2963 //-----------------------------------------------------------------------------
2964 // Calls
2966 void TemplateTable::count_calls(Register method, Register temp) {
2967 // implemented elsewhere
2968 ShouldNotReachHere();
2969 }
2971 void TemplateTable::prepare_invoke(int byte_no,
2972 Register method, // linked method (or i-klass)
2973 Register index, // itable index, MethodType, etc.
2974 Register recv, // if caller wants to see it
2975 Register flags // if caller wants to test it
2976 ) {
2977 // determine flags
2978 const Bytecodes::Code code = bytecode();
2979 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2980 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2981 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2982 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2983 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2984 const bool load_receiver = (recv != noreg);
2985 const bool save_flags = (flags != noreg);
2986 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2987 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2988 assert(flags == noreg || flags == rdx, "");
2989 assert(recv == noreg || recv == rcx, "");
2991 // setup registers & access constant pool cache
2992 if (recv == noreg) recv = rcx;
2993 if (flags == noreg) flags = rdx;
2994 assert_different_registers(method, index, recv, flags);
2996 // save 'interpreter return address'
2997 __ save_bcp();
2999 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3001 // maybe push appendix to arguments (just before return address)
3002 if (is_invokedynamic || is_invokehandle) {
3003 Label L_no_push;
3004 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3005 __ jcc(Assembler::zero, L_no_push);
3006 // Push the appendix as a trailing parameter.
3007 // This must be done before we get the receiver,
3008 // since the parameter_size includes it.
3009 __ push(rbx);
3010 __ mov(rbx, index);
3011 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3012 __ load_resolved_reference_at_index(index, rbx);
3013 __ pop(rbx);
3014 __ push(index); // push appendix (MethodType, CallSite, etc.)
3015 __ bind(L_no_push);
3016 }
3018 // load receiver if needed (after appendix is pushed so parameter size is correct)
3019 // Note: no return address pushed yet
3020 if (load_receiver) {
3021 __ movl(recv, flags);
3022 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3023 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3024 const int receiver_is_at_end = -1; // back off one slot to get receiver
3025 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3026 __ movptr(recv, recv_addr);
3027 __ verify_oop(recv);
3028 }
3030 if (save_flags) {
3031 __ movl(r13, flags);
3032 }
3034 // compute return type
3035 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3036 // Make sure we don't need to mask flags after the above shift
3037 ConstantPoolCacheEntry::verify_tos_state_shift();
3038 // load return address
3039 {
3040 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3041 ExternalAddress table(table_addr);
3042 __ lea(rscratch1, table);
3043 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
3044 }
3046 // push return address
3047 __ push(flags);
3049 // Restore flags value from the constant pool cache, and restore rsi
3050 // for later null checks. r13 is the bytecode pointer
3051 if (save_flags) {
3052 __ movl(flags, r13);
3053 __ restore_bcp();
3054 }
3055 }
3058 void TemplateTable::invokevirtual_helper(Register index,
3059 Register recv,
3060 Register flags) {
3061 // Uses temporary registers rax, rdx
3062 assert_different_registers(index, recv, rax, rdx);
3063 assert(index == rbx, "");
3064 assert(recv == rcx, "");
3066 // Test for an invoke of a final method
3067 Label notFinal;
3068 __ movl(rax, flags);
3069 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3070 __ jcc(Assembler::zero, notFinal);
3072 const Register method = index; // method must be rbx
3073 assert(method == rbx,
3074 "Method* must be rbx for interpreter calling convention");
3076 // do the call - the index is actually the method to call
3077 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3079 // It's final, need a null check here!
3080 __ null_check(recv);
3082 // profile this call
3083 __ profile_final_call(rax);
3084 __ profile_arguments_type(rax, method, r13, true);
3086 __ jump_from_interpreted(method, rax);
3088 __ bind(notFinal);
3090 // get receiver klass
3091 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3092 __ load_klass(rax, recv);
3094 // profile this call
3095 __ profile_virtual_call(rax, r14, rdx);
3097 // get target Method* & entry point
3098 __ lookup_virtual_method(rax, index, method);
3099 __ profile_arguments_type(rdx, method, r13, true);
3100 __ jump_from_interpreted(method, rdx);
3101 }
3104 void TemplateTable::invokevirtual(int byte_no) {
3105 transition(vtos, vtos);
3106 assert(byte_no == f2_byte, "use this argument");
3107 prepare_invoke(byte_no,
3108 rbx, // method or vtable index
3109 noreg, // unused itable index
3110 rcx, rdx); // recv, flags
3112 // rbx: index
3113 // rcx: receiver
3114 // rdx: flags
3116 invokevirtual_helper(rbx, rcx, rdx);
3117 }
3120 void TemplateTable::invokespecial(int byte_no) {
3121 transition(vtos, vtos);
3122 assert(byte_no == f1_byte, "use this argument");
3123 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3124 rcx); // get receiver also for null check
3125 __ verify_oop(rcx);
3126 __ null_check(rcx);
3127 // do the call
3128 __ profile_call(rax);
3129 __ profile_arguments_type(rax, rbx, r13, false);
3130 __ jump_from_interpreted(rbx, rax);
3131 }
3134 void TemplateTable::invokestatic(int byte_no) {
3135 transition(vtos, vtos);
3136 assert(byte_no == f1_byte, "use this argument");
3137 prepare_invoke(byte_no, rbx); // get f1 Method*
3138 // do the call
3139 __ profile_call(rax);
3140 __ profile_arguments_type(rax, rbx, r13, false);
3141 __ jump_from_interpreted(rbx, rax);
3142 }
3144 void TemplateTable::fast_invokevfinal(int byte_no) {
3145 transition(vtos, vtos);
3146 assert(byte_no == f2_byte, "use this argument");
3147 __ stop("fast_invokevfinal not used on amd64");
3148 }
3150 void TemplateTable::invokeinterface(int byte_no) {
3151 transition(vtos, vtos);
3152 assert(byte_no == f1_byte, "use this argument");
3153 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3154 rcx, rdx); // recv, flags
3156 // rax: interface klass (from f1)
3157 // rbx: itable index (from f2)
3158 // rcx: receiver
3159 // rdx: flags
3161 // Special case of invokeinterface called for virtual method of
3162 // java.lang.Object. See cpCacheOop.cpp for details.
3163 // This code isn't produced by javac, but could be produced by
3164 // another compliant java compiler.
3165 Label notMethod;
3166 __ movl(r14, rdx);
3167 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3168 __ jcc(Assembler::zero, notMethod);
3170 invokevirtual_helper(rbx, rcx, rdx);
3171 __ bind(notMethod);
3173 // Get receiver klass into rdx - also a null check
3174 __ restore_locals(); // restore r14
3175 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3176 __ load_klass(rdx, rcx);
3178 // profile this call
3179 __ profile_virtual_call(rdx, r13, r14);
3181 Label no_such_interface, no_such_method;
3183 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3184 rdx, rax, rbx,
3185 // outputs: method, scan temp. reg
3186 rbx, r13,
3187 no_such_interface);
3189 // rbx: Method* to call
3190 // rcx: receiver
3191 // Check for abstract method error
3192 // Note: This should be done more efficiently via a throw_abstract_method_error
3193 // interpreter entry point and a conditional jump to it in case of a null
3194 // method.
3195 __ testptr(rbx, rbx);
3196 __ jcc(Assembler::zero, no_such_method);
3198 __ profile_arguments_type(rdx, rbx, r13, true);
3200 // do the call
3201 // rcx: receiver
3202 // rbx,: Method*
3203 __ jump_from_interpreted(rbx, rdx);
3204 __ should_not_reach_here();
3206 // exception handling code follows...
3207 // note: must restore interpreter registers to canonical
3208 // state for exception handling to work correctly!
3210 __ bind(no_such_method);
3211 // throw exception
3212 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3213 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3214 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3215 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3216 // the call_VM checks for exception, so we should never return here.
3217 __ should_not_reach_here();
3219 __ bind(no_such_interface);
3220 // throw exception
3221 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3222 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3223 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3224 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3225 InterpreterRuntime::throw_IncompatibleClassChangeError));
3226 // the call_VM checks for exception, so we should never return here.
3227 __ should_not_reach_here();
3228 }
3231 void TemplateTable::invokehandle(int byte_no) {
3232 transition(vtos, vtos);
3233 assert(byte_no == f1_byte, "use this argument");
3234 const Register rbx_method = rbx;
3235 const Register rax_mtype = rax;
3236 const Register rcx_recv = rcx;
3237 const Register rdx_flags = rdx;
3239 if (!EnableInvokeDynamic) {
3240 // rewriter does not generate this bytecode
3241 __ should_not_reach_here();
3242 return;
3243 }
3245 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3246 __ verify_method_ptr(rbx_method);
3247 __ verify_oop(rcx_recv);
3248 __ null_check(rcx_recv);
3250 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3251 // rbx: MH.invokeExact_MT method (from f2)
3253 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3255 // FIXME: profile the LambdaForm also
3256 __ profile_final_call(rax);
3257 __ profile_arguments_type(rdx, rbx_method, r13, true);
3259 __ jump_from_interpreted(rbx_method, rdx);
3260 }
3263 void TemplateTable::invokedynamic(int byte_no) {
3264 transition(vtos, vtos);
3265 assert(byte_no == f1_byte, "use this argument");
3267 if (!EnableInvokeDynamic) {
3268 // We should not encounter this bytecode if !EnableInvokeDynamic.
3269 // The verifier will stop it. However, if we get past the verifier,
3270 // this will stop the thread in a reasonable way, without crashing the JVM.
3271 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3272 InterpreterRuntime::throw_IncompatibleClassChangeError));
3273 // the call_VM checks for exception, so we should never return here.
3274 __ should_not_reach_here();
3275 return;
3276 }
3278 const Register rbx_method = rbx;
3279 const Register rax_callsite = rax;
3281 prepare_invoke(byte_no, rbx_method, rax_callsite);
3283 // rax: CallSite object (from cpool->resolved_references[f1])
3284 // rbx: MH.linkToCallSite method (from f2)
3286 // Note: rax_callsite is already pushed by prepare_invoke
3288 // %%% should make a type profile for any invokedynamic that takes a ref argument
3289 // profile this call
3290 __ profile_call(r13);
3291 __ profile_arguments_type(rdx, rbx_method, r13, false);
3293 __ verify_oop(rax_callsite);
3295 __ jump_from_interpreted(rbx_method, rdx);
3296 }
3299 //-----------------------------------------------------------------------------
3300 // Allocation
3302 void TemplateTable::_new() {
3303 transition(vtos, atos);
3304 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3305 Label slow_case;
3306 Label done;
3307 Label initialize_header;
3308 Label initialize_object; // including clearing the fields
3309 Label allocate_shared;
3311 __ get_cpool_and_tags(rsi, rax);
3312 // Make sure the class we're about to instantiate has been resolved.
3313 // This is done before loading InstanceKlass to be consistent with the order
3314 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3315 const int tags_offset = Array<u1>::base_offset_in_bytes();
3316 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3317 JVM_CONSTANT_Class);
3318 __ jcc(Assembler::notEqual, slow_case);
3320 // get InstanceKlass
3321 __ movptr(rsi, Address(rsi, rdx,
3322 Address::times_8, sizeof(ConstantPool)));
3324 // make sure klass is initialized & doesn't have finalizer
3325 // make sure klass is fully initialized
3326 __ cmpb(Address(rsi,
3327 InstanceKlass::init_state_offset()),
3328 InstanceKlass::fully_initialized);
3329 __ jcc(Assembler::notEqual, slow_case);
3331 // get instance_size in InstanceKlass (scaled to a count of bytes)
3332 __ movl(rdx,
3333 Address(rsi,
3334 Klass::layout_helper_offset()));
3335 // test to see if it has a finalizer or is malformed in some way
3336 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3337 __ jcc(Assembler::notZero, slow_case);
3339 // Allocate the instance
3340 // 1) Try to allocate in the TLAB
3341 // 2) if fail and the object is large allocate in the shared Eden
3342 // 3) if the above fails (or is not applicable), go to a slow case
3343 // (creates a new TLAB, etc.)
3345 const bool allow_shared_alloc =
3346 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3348 if (UseTLAB) {
3349 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3350 __ lea(rbx, Address(rax, rdx, Address::times_1));
3351 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3352 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3353 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3354 if (ZeroTLAB) {
3355 // the fields have been already cleared
3356 __ jmp(initialize_header);
3357 } else {
3358 // initialize both the header and fields
3359 __ jmp(initialize_object);
3360 }
3361 }
3363 // Allocation in the shared Eden, if allowed.
3364 //
3365 // rdx: instance size in bytes
3366 if (allow_shared_alloc) {
3367 __ bind(allocate_shared);
3369 ExternalAddress top((address)Universe::heap()->top_addr());
3370 ExternalAddress end((address)Universe::heap()->end_addr());
3372 const Register RtopAddr = rscratch1;
3373 const Register RendAddr = rscratch2;
3375 __ lea(RtopAddr, top);
3376 __ lea(RendAddr, end);
3377 __ movptr(rax, Address(RtopAddr, 0));
3379 // For retries rax gets set by cmpxchgq
3380 Label retry;
3381 __ bind(retry);
3382 __ lea(rbx, Address(rax, rdx, Address::times_1));
3383 __ cmpptr(rbx, Address(RendAddr, 0));
3384 __ jcc(Assembler::above, slow_case);
3386 // Compare rax with the top addr, and if still equal, store the new
3387 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3388 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3389 //
3390 // rax: object begin
3391 // rbx: object end
3392 // rdx: instance size in bytes
3393 if (os::is_MP()) {
3394 __ lock();
3395 }
3396 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3398 // if someone beat us on the allocation, try again, otherwise continue
3399 __ jcc(Assembler::notEqual, retry);
3401 __ incr_allocated_bytes(r15_thread, rdx, 0);
3402 }
3404 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3405 // The object is initialized before the header. If the object size is
3406 // zero, go directly to the header initialization.
3407 __ bind(initialize_object);
3408 __ decrementl(rdx, sizeof(oopDesc));
3409 __ jcc(Assembler::zero, initialize_header);
3411 // Initialize object fields
3412 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3413 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3414 {
3415 Label loop;
3416 __ bind(loop);
3417 __ movq(Address(rax, rdx, Address::times_8,
3418 sizeof(oopDesc) - oopSize),
3419 rcx);
3420 __ decrementl(rdx);
3421 __ jcc(Assembler::notZero, loop);
3422 }
3424 // initialize object header only.
3425 __ bind(initialize_header);
3426 if (UseBiasedLocking) {
3427 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
3428 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3429 } else {
3430 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3431 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3432 }
3433 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3434 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3435 __ store_klass(rax, rsi); // store klass last
3437 {
3438 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3439 // Trigger dtrace event for fastpath
3440 __ push(atos); // save the return value
3441 __ call_VM_leaf(
3442 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3443 __ pop(atos); // restore the return value
3445 }
3446 __ jmp(done);
3447 }
3450 // slow case
3451 __ bind(slow_case);
3452 __ get_constant_pool(c_rarg1);
3453 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3454 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3455 __ verify_oop(rax);
3457 // continue
3458 __ bind(done);
3459 }
3461 void TemplateTable::newarray() {
3462 transition(itos, atos);
3463 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3464 __ movl(c_rarg2, rax);
3465 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3466 c_rarg1, c_rarg2);
3467 }
3469 void TemplateTable::anewarray() {
3470 transition(itos, atos);
3471 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3472 __ get_constant_pool(c_rarg1);
3473 __ movl(c_rarg3, rax);
3474 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3475 c_rarg1, c_rarg2, c_rarg3);
3476 }
3478 void TemplateTable::arraylength() {
3479 transition(atos, itos);
3480 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3481 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3482 }
3484 void TemplateTable::checkcast() {
3485 transition(atos, atos);
3486 Label done, is_null, ok_is_subtype, quicked, resolved;
3487 __ testptr(rax, rax); // object is in rax
3488 __ jcc(Assembler::zero, is_null);
3490 // Get cpool & tags index
3491 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3492 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3493 // See if bytecode has already been quicked
3494 __ cmpb(Address(rdx, rbx,
3495 Address::times_1,
3496 Array<u1>::base_offset_in_bytes()),
3497 JVM_CONSTANT_Class);
3498 __ jcc(Assembler::equal, quicked);
3499 __ push(atos); // save receiver for result, and for GC
3500 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3501 // vm_result_2 has metadata result
3502 __ get_vm_result_2(rax, r15_thread);
3503 __ pop_ptr(rdx); // restore receiver
3504 __ jmpb(resolved);
3506 // Get superklass in rax and subklass in rbx
3507 __ bind(quicked);
3508 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3509 __ movptr(rax, Address(rcx, rbx,
3510 Address::times_8, sizeof(ConstantPool)));
3512 __ bind(resolved);
3513 __ load_klass(rbx, rdx);
3515 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3516 // Superklass in rax. Subklass in rbx.
3517 __ gen_subtype_check(rbx, ok_is_subtype);
3519 // Come here on failure
3520 __ push_ptr(rdx);
3521 // object is at TOS
3522 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3524 // Come here on success
3525 __ bind(ok_is_subtype);
3526 __ mov(rax, rdx); // Restore object in rdx
3528 // Collect counts on whether this check-cast sees NULLs a lot or not.
3529 if (ProfileInterpreter) {
3530 __ jmp(done);
3531 __ bind(is_null);
3532 __ profile_null_seen(rcx);
3533 } else {
3534 __ bind(is_null); // same as 'done'
3535 }
3536 __ bind(done);
3537 }
3539 void TemplateTable::instanceof() {
3540 transition(atos, itos);
3541 Label done, is_null, ok_is_subtype, quicked, resolved;
3542 __ testptr(rax, rax);
3543 __ jcc(Assembler::zero, is_null);
3545 // Get cpool & tags index
3546 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3547 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3548 // See if bytecode has already been quicked
3549 __ cmpb(Address(rdx, rbx,
3550 Address::times_1,
3551 Array<u1>::base_offset_in_bytes()),
3552 JVM_CONSTANT_Class);
3553 __ jcc(Assembler::equal, quicked);
3555 __ push(atos); // save receiver for result, and for GC
3556 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3557 // vm_result_2 has metadata result
3558 __ get_vm_result_2(rax, r15_thread);
3559 __ pop_ptr(rdx); // restore receiver
3560 __ verify_oop(rdx);
3561 __ load_klass(rdx, rdx);
3562 __ jmpb(resolved);
3564 // Get superklass in rax and subklass in rdx
3565 __ bind(quicked);
3566 __ load_klass(rdx, rax);
3567 __ movptr(rax, Address(rcx, rbx,
3568 Address::times_8, sizeof(ConstantPool)));
3570 __ bind(resolved);
3572 // Generate subtype check. Blows rcx, rdi
3573 // Superklass in rax. Subklass in rdx.
3574 __ gen_subtype_check(rdx, ok_is_subtype);
3576 // Come here on failure
3577 __ xorl(rax, rax);
3578 __ jmpb(done);
3579 // Come here on success
3580 __ bind(ok_is_subtype);
3581 __ movl(rax, 1);
3583 // Collect counts on whether this test sees NULLs a lot or not.
3584 if (ProfileInterpreter) {
3585 __ jmp(done);
3586 __ bind(is_null);
3587 __ profile_null_seen(rcx);
3588 } else {
3589 __ bind(is_null); // same as 'done'
3590 }
3591 __ bind(done);
3592 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3593 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3594 }
3596 //-----------------------------------------------------------------------------
3597 // Breakpoints
3598 void TemplateTable::_breakpoint() {
3599 // Note: We get here even if we are single stepping..
3600 // jbug inists on setting breakpoints at every bytecode
3601 // even if we are in single step mode.
3603 transition(vtos, vtos);
3605 // get the unpatched byte code
3606 __ get_method(c_rarg1);
3607 __ call_VM(noreg,
3608 CAST_FROM_FN_PTR(address,
3609 InterpreterRuntime::get_original_bytecode_at),
3610 c_rarg1, r13);
3611 __ mov(rbx, rax);
3613 // post the breakpoint event
3614 __ get_method(c_rarg1);
3615 __ call_VM(noreg,
3616 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3617 c_rarg1, r13);
3619 // complete the execution of original bytecode
3620 __ dispatch_only_normal(vtos);
3621 }
3623 //-----------------------------------------------------------------------------
3624 // Exceptions
3626 void TemplateTable::athrow() {
3627 transition(atos, vtos);
3628 __ null_check(rax);
3629 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3630 }
3632 //-----------------------------------------------------------------------------
3633 // Synchronization
3634 //
3635 // Note: monitorenter & exit are symmetric routines; which is reflected
3636 // in the assembly code structure as well
3637 //
3638 // Stack layout:
3639 //
3640 // [expressions ] <--- rsp = expression stack top
3641 // ..
3642 // [expressions ]
3643 // [monitor entry] <--- monitor block top = expression stack bot
3644 // ..
3645 // [monitor entry]
3646 // [frame data ] <--- monitor block bot
3647 // ...
3648 // [saved rbp ] <--- rbp
3649 void TemplateTable::monitorenter() {
3650 transition(atos, vtos);
3652 // check for NULL object
3653 __ null_check(rax);
3655 const Address monitor_block_top(
3656 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3657 const Address monitor_block_bot(
3658 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3659 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3661 Label allocated;
3663 // initialize entry pointer
3664 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3666 // find a free slot in the monitor block (result in c_rarg1)
3667 {
3668 Label entry, loop, exit;
3669 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3670 // starting with top-most entry
3671 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3672 // of monitor block
3673 __ jmpb(entry);
3675 __ bind(loop);
3676 // check if current entry is used
3677 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3678 // if not used then remember entry in c_rarg1
3679 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3680 // check if current entry is for same object
3681 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3682 // if same object then stop searching
3683 __ jccb(Assembler::equal, exit);
3684 // otherwise advance to next entry
3685 __ addptr(c_rarg3, entry_size);
3686 __ bind(entry);
3687 // check if bottom reached
3688 __ cmpptr(c_rarg3, c_rarg2);
3689 // if not at bottom then check this entry
3690 __ jcc(Assembler::notEqual, loop);
3691 __ bind(exit);
3692 }
3694 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3695 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3697 // allocate one if there's no free slot
3698 {
3699 Label entry, loop;
3700 // 1. compute new pointers // rsp: old expression stack top
3701 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3702 __ subptr(rsp, entry_size); // move expression stack top
3703 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3704 __ mov(c_rarg3, rsp); // set start value for copy loop
3705 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3706 __ jmp(entry);
3707 // 2. move expression stack contents
3708 __ bind(loop);
3709 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3710 // word from old location
3711 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3712 __ addptr(c_rarg3, wordSize); // advance to next word
3713 __ bind(entry);
3714 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3715 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3716 // copy next word
3717 }
3719 // call run-time routine
3720 // c_rarg1: points to monitor entry
3721 __ bind(allocated);
3723 // Increment bcp to point to the next bytecode, so exception
3724 // handling for async. exceptions work correctly.
3725 // The object has already been poped from the stack, so the
3726 // expression stack looks correct.
3727 __ increment(r13);
3729 // store object
3730 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3731 __ lock_object(c_rarg1);
3733 // check to make sure this monitor doesn't cause stack overflow after locking
3734 __ save_bcp(); // in case of exception
3735 __ generate_stack_overflow_check(0);
3737 // The bcp has already been incremented. Just need to dispatch to
3738 // next instruction.
3739 __ dispatch_next(vtos);
3740 }
3743 void TemplateTable::monitorexit() {
3744 transition(atos, vtos);
3746 // check for NULL object
3747 __ null_check(rax);
3749 const Address monitor_block_top(
3750 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3751 const Address monitor_block_bot(
3752 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3753 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3755 Label found;
3757 // find matching slot
3758 {
3759 Label entry, loop;
3760 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3761 // starting with top-most entry
3762 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3763 // of monitor block
3764 __ jmpb(entry);
3766 __ bind(loop);
3767 // check if current entry is for same object
3768 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3769 // if same object then stop searching
3770 __ jcc(Assembler::equal, found);
3771 // otherwise advance to next entry
3772 __ addptr(c_rarg1, entry_size);
3773 __ bind(entry);
3774 // check if bottom reached
3775 __ cmpptr(c_rarg1, c_rarg2);
3776 // if not at bottom then check this entry
3777 __ jcc(Assembler::notEqual, loop);
3778 }
3780 // error handling. Unlocking was not block-structured
3781 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3782 InterpreterRuntime::throw_illegal_monitor_state_exception));
3783 __ should_not_reach_here();
3785 // call run-time routine
3786 // rsi: points to monitor entry
3787 __ bind(found);
3788 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3789 __ unlock_object(c_rarg1);
3790 __ pop_ptr(rax); // discard object
3791 }
3794 // Wide instructions
3795 void TemplateTable::wide() {
3796 transition(vtos, vtos);
3797 __ load_unsigned_byte(rbx, at_bcp(1));
3798 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3799 __ jmp(Address(rscratch1, rbx, Address::times_8));
3800 // Note: the r13 increment step is part of the individual wide
3801 // bytecode implementations
3802 }
3805 // Multi arrays
3806 void TemplateTable::multianewarray() {
3807 transition(vtos, atos);
3808 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3809 // last dim is on top of stack; we want address of first one:
3810 // first_addr = last_addr + (ndims - 1) * wordSize
3811 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3812 call_VM(rax,
3813 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3814 c_rarg1);
3815 __ load_unsigned_byte(rbx, at_bcp(3));
3816 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3817 }
3818 #endif // !CC_INTERP