Thu, 03 Jan 2013 16:30:47 -0800
8005544: Use 256bit YMM registers in arraycopy stubs on x86
Summary: Use YMM registers in arraycopy and array_fill stubs.
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
39 #ifndef CC_INTERP
41 #define __ _masm->
43 // Platform-dependent initialization
45 void TemplateTable::pd_initialize() {
46 // No amd64 specific initialization
47 }
49 // Address computation: local variables
51 static inline Address iaddress(int n) {
52 return Address(r14, Interpreter::local_offset_in_bytes(n));
53 }
55 static inline Address laddress(int n) {
56 return iaddress(n + 1);
57 }
59 static inline Address faddress(int n) {
60 return iaddress(n);
61 }
63 static inline Address daddress(int n) {
64 return laddress(n);
65 }
67 static inline Address aaddress(int n) {
68 return iaddress(n);
69 }
71 static inline Address iaddress(Register r) {
72 return Address(r14, r, Address::times_8);
73 }
75 static inline Address laddress(Register r) {
76 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
77 }
79 static inline Address faddress(Register r) {
80 return iaddress(r);
81 }
83 static inline Address daddress(Register r) {
84 return laddress(r);
85 }
87 static inline Address aaddress(Register r) {
88 return iaddress(r);
89 }
91 static inline Address at_rsp() {
92 return Address(rsp, 0);
93 }
95 // At top of Java expression stack which may be different than esp(). It
96 // isn't for category 1 objects.
97 static inline Address at_tos () {
98 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
99 }
101 static inline Address at_tos_p1() {
102 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
103 }
105 static inline Address at_tos_p2() {
106 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
107 }
109 static inline Address at_tos_p3() {
110 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
111 }
113 // Condition conversion
114 static Assembler::Condition j_not(TemplateTable::Condition cc) {
115 switch (cc) {
116 case TemplateTable::equal : return Assembler::notEqual;
117 case TemplateTable::not_equal : return Assembler::equal;
118 case TemplateTable::less : return Assembler::greaterEqual;
119 case TemplateTable::less_equal : return Assembler::greater;
120 case TemplateTable::greater : return Assembler::lessEqual;
121 case TemplateTable::greater_equal: return Assembler::less;
122 }
123 ShouldNotReachHere();
124 return Assembler::zero;
125 }
128 // Miscelaneous helper routines
129 // Store an oop (or NULL) at the address described by obj.
130 // If val == noreg this means store a NULL
132 static void do_oop_store(InterpreterMacroAssembler* _masm,
133 Address obj,
134 Register val,
135 BarrierSet::Name barrier,
136 bool precise) {
137 assert(val == noreg || val == rax, "parameter is just for looks");
138 switch (barrier) {
139 #ifndef SERIALGC
140 case BarrierSet::G1SATBCT:
141 case BarrierSet::G1SATBCTLogging:
142 {
143 // flatten object address if needed
144 if (obj.index() == noreg && obj.disp() == 0) {
145 if (obj.base() != rdx) {
146 __ movq(rdx, obj.base());
147 }
148 } else {
149 __ leaq(rdx, obj);
150 }
151 __ g1_write_barrier_pre(rdx /* obj */,
152 rbx /* pre_val */,
153 r15_thread /* thread */,
154 r8 /* tmp */,
155 val != noreg /* tosca_live */,
156 false /* expand_call */);
157 if (val == noreg) {
158 __ store_heap_oop_null(Address(rdx, 0));
159 } else {
160 __ store_heap_oop(Address(rdx, 0), val);
161 __ g1_write_barrier_post(rdx /* store_adr */,
162 val /* new_val */,
163 r15_thread /* thread */,
164 r8 /* tmp */,
165 rbx /* tmp2 */);
166 }
168 }
169 break;
170 #endif // SERIALGC
171 case BarrierSet::CardTableModRef:
172 case BarrierSet::CardTableExtension:
173 {
174 if (val == noreg) {
175 __ store_heap_oop_null(obj);
176 } else {
177 __ store_heap_oop(obj, val);
178 // flatten object address if needed
179 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
180 __ store_check(obj.base());
181 } else {
182 __ leaq(rdx, obj);
183 __ store_check(rdx);
184 }
185 }
186 }
187 break;
188 case BarrierSet::ModRef:
189 case BarrierSet::Other:
190 if (val == noreg) {
191 __ store_heap_oop_null(obj);
192 } else {
193 __ store_heap_oop(obj, val);
194 }
195 break;
196 default :
197 ShouldNotReachHere();
199 }
200 }
202 Address TemplateTable::at_bcp(int offset) {
203 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
204 return Address(r13, offset);
205 }
207 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
208 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
209 int byte_no) {
210 if (!RewriteBytecodes) return;
211 Label L_patch_done;
213 switch (bc) {
214 case Bytecodes::_fast_aputfield:
215 case Bytecodes::_fast_bputfield:
216 case Bytecodes::_fast_cputfield:
217 case Bytecodes::_fast_dputfield:
218 case Bytecodes::_fast_fputfield:
219 case Bytecodes::_fast_iputfield:
220 case Bytecodes::_fast_lputfield:
221 case Bytecodes::_fast_sputfield:
222 {
223 // We skip bytecode quickening for putfield instructions when
224 // the put_code written to the constant pool cache is zero.
225 // This is required so that every execution of this instruction
226 // calls out to InterpreterRuntime::resolve_get_put to do
227 // additional, required work.
228 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
229 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
230 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
231 __ movl(bc_reg, bc);
232 __ cmpl(temp_reg, (int) 0);
233 __ jcc(Assembler::zero, L_patch_done); // don't patch
234 }
235 break;
236 default:
237 assert(byte_no == -1, "sanity");
238 // the pair bytecodes have already done the load.
239 if (load_bc_into_bc_reg) {
240 __ movl(bc_reg, bc);
241 }
242 }
244 if (JvmtiExport::can_post_breakpoint()) {
245 Label L_fast_patch;
246 // if a breakpoint is present we can't rewrite the stream directly
247 __ movzbl(temp_reg, at_bcp(0));
248 __ cmpl(temp_reg, Bytecodes::_breakpoint);
249 __ jcc(Assembler::notEqual, L_fast_patch);
250 __ get_method(temp_reg);
251 // Let breakpoint table handling rewrite to quicker bytecode
252 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
253 #ifndef ASSERT
254 __ jmpb(L_patch_done);
255 #else
256 __ jmp(L_patch_done);
257 #endif
258 __ bind(L_fast_patch);
259 }
261 #ifdef ASSERT
262 Label L_okay;
263 __ load_unsigned_byte(temp_reg, at_bcp(0));
264 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
265 __ jcc(Assembler::equal, L_okay);
266 __ cmpl(temp_reg, bc_reg);
267 __ jcc(Assembler::equal, L_okay);
268 __ stop("patching the wrong bytecode");
269 __ bind(L_okay);
270 #endif
272 // patch bytecode
273 __ movb(at_bcp(0), bc_reg);
274 __ bind(L_patch_done);
275 }
278 // Individual instructions
280 void TemplateTable::nop() {
281 transition(vtos, vtos);
282 // nothing to do
283 }
285 void TemplateTable::shouldnotreachhere() {
286 transition(vtos, vtos);
287 __ stop("shouldnotreachhere bytecode");
288 }
290 void TemplateTable::aconst_null() {
291 transition(vtos, atos);
292 __ xorl(rax, rax);
293 }
295 void TemplateTable::iconst(int value) {
296 transition(vtos, itos);
297 if (value == 0) {
298 __ xorl(rax, rax);
299 } else {
300 __ movl(rax, value);
301 }
302 }
304 void TemplateTable::lconst(int value) {
305 transition(vtos, ltos);
306 if (value == 0) {
307 __ xorl(rax, rax);
308 } else {
309 __ movl(rax, value);
310 }
311 }
313 void TemplateTable::fconst(int value) {
314 transition(vtos, ftos);
315 static float one = 1.0f, two = 2.0f;
316 switch (value) {
317 case 0:
318 __ xorps(xmm0, xmm0);
319 break;
320 case 1:
321 __ movflt(xmm0, ExternalAddress((address) &one));
322 break;
323 case 2:
324 __ movflt(xmm0, ExternalAddress((address) &two));
325 break;
326 default:
327 ShouldNotReachHere();
328 break;
329 }
330 }
332 void TemplateTable::dconst(int value) {
333 transition(vtos, dtos);
334 static double one = 1.0;
335 switch (value) {
336 case 0:
337 __ xorpd(xmm0, xmm0);
338 break;
339 case 1:
340 __ movdbl(xmm0, ExternalAddress((address) &one));
341 break;
342 default:
343 ShouldNotReachHere();
344 break;
345 }
346 }
348 void TemplateTable::bipush() {
349 transition(vtos, itos);
350 __ load_signed_byte(rax, at_bcp(1));
351 }
353 void TemplateTable::sipush() {
354 transition(vtos, itos);
355 __ load_unsigned_short(rax, at_bcp(1));
356 __ bswapl(rax);
357 __ sarl(rax, 16);
358 }
360 void TemplateTable::ldc(bool wide) {
361 transition(vtos, vtos);
362 Label call_ldc, notFloat, notClass, Done;
364 if (wide) {
365 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
366 } else {
367 __ load_unsigned_byte(rbx, at_bcp(1));
368 }
370 __ get_cpool_and_tags(rcx, rax);
371 const int base_offset = ConstantPool::header_size() * wordSize;
372 const int tags_offset = Array<u1>::base_offset_in_bytes();
374 // get type
375 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
377 // unresolved class - get the resolved class
378 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
379 __ jccb(Assembler::equal, call_ldc);
381 // unresolved class in error state - call into runtime to throw the error
382 // from the first resolution attempt
383 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
384 __ jccb(Assembler::equal, call_ldc);
386 // resolved class - need to call vm to get java mirror of the class
387 __ cmpl(rdx, JVM_CONSTANT_Class);
388 __ jcc(Assembler::notEqual, notClass);
390 __ bind(call_ldc);
391 __ movl(c_rarg1, wide);
392 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
393 __ push_ptr(rax);
394 __ verify_oop(rax);
395 __ jmp(Done);
397 __ bind(notClass);
398 __ cmpl(rdx, JVM_CONSTANT_Float);
399 __ jccb(Assembler::notEqual, notFloat);
400 // ftos
401 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
402 __ push_f();
403 __ jmp(Done);
405 __ bind(notFloat);
406 #ifdef ASSERT
407 {
408 Label L;
409 __ cmpl(rdx, JVM_CONSTANT_Integer);
410 __ jcc(Assembler::equal, L);
411 // String and Object are rewritten to fast_aldc
412 __ stop("unexpected tag type in ldc");
413 __ bind(L);
414 }
415 #endif
416 // itos JVM_CONSTANT_Integer only
417 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
418 __ push_i(rax);
419 __ bind(Done);
420 }
422 // Fast path for caching oop constants.
423 void TemplateTable::fast_aldc(bool wide) {
424 transition(vtos, atos);
426 Register result = rax;
427 Register tmp = rdx;
428 int index_size = wide ? sizeof(u2) : sizeof(u1);
430 Label resolved;
432 // We are resolved if the resolved reference cache entry contains a
433 // non-null object (String, MethodType, etc.)
434 assert_different_registers(result, tmp);
435 __ get_cache_index_at_bcp(tmp, 1, index_size);
436 __ load_resolved_reference_at_index(result, tmp);
437 __ testl(result, result);
438 __ jcc(Assembler::notZero, resolved);
440 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
442 // first time invocation - must resolve first
443 __ movl(tmp, (int)bytecode());
444 __ call_VM(result, entry, tmp);
446 __ bind(resolved);
448 if (VerifyOops) {
449 __ verify_oop(result);
450 }
451 }
453 void TemplateTable::ldc2_w() {
454 transition(vtos, vtos);
455 Label Long, Done;
456 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
458 __ get_cpool_and_tags(rcx, rax);
459 const int base_offset = ConstantPool::header_size() * wordSize;
460 const int tags_offset = Array<u1>::base_offset_in_bytes();
462 // get type
463 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
464 JVM_CONSTANT_Double);
465 __ jccb(Assembler::notEqual, Long);
466 // dtos
467 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
468 __ push_d();
469 __ jmpb(Done);
471 __ bind(Long);
472 // ltos
473 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
474 __ push_l();
476 __ bind(Done);
477 }
479 void TemplateTable::locals_index(Register reg, int offset) {
480 __ load_unsigned_byte(reg, at_bcp(offset));
481 __ negptr(reg);
482 }
484 void TemplateTable::iload() {
485 transition(vtos, itos);
486 if (RewriteFrequentPairs) {
487 Label rewrite, done;
488 const Register bc = c_rarg3;
489 assert(rbx != bc, "register damaged");
491 // get next byte
492 __ load_unsigned_byte(rbx,
493 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
494 // if _iload, wait to rewrite to iload2. We only want to rewrite the
495 // last two iloads in a pair. Comparing against fast_iload means that
496 // the next bytecode is neither an iload or a caload, and therefore
497 // an iload pair.
498 __ cmpl(rbx, Bytecodes::_iload);
499 __ jcc(Assembler::equal, done);
501 __ cmpl(rbx, Bytecodes::_fast_iload);
502 __ movl(bc, Bytecodes::_fast_iload2);
503 __ jccb(Assembler::equal, rewrite);
505 // if _caload, rewrite to fast_icaload
506 __ cmpl(rbx, Bytecodes::_caload);
507 __ movl(bc, Bytecodes::_fast_icaload);
508 __ jccb(Assembler::equal, rewrite);
510 // rewrite so iload doesn't check again.
511 __ movl(bc, Bytecodes::_fast_iload);
513 // rewrite
514 // bc: fast bytecode
515 __ bind(rewrite);
516 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
517 __ bind(done);
518 }
520 // Get the local value into tos
521 locals_index(rbx);
522 __ movl(rax, iaddress(rbx));
523 }
525 void TemplateTable::fast_iload2() {
526 transition(vtos, itos);
527 locals_index(rbx);
528 __ movl(rax, iaddress(rbx));
529 __ push(itos);
530 locals_index(rbx, 3);
531 __ movl(rax, iaddress(rbx));
532 }
534 void TemplateTable::fast_iload() {
535 transition(vtos, itos);
536 locals_index(rbx);
537 __ movl(rax, iaddress(rbx));
538 }
540 void TemplateTable::lload() {
541 transition(vtos, ltos);
542 locals_index(rbx);
543 __ movq(rax, laddress(rbx));
544 }
546 void TemplateTable::fload() {
547 transition(vtos, ftos);
548 locals_index(rbx);
549 __ movflt(xmm0, faddress(rbx));
550 }
552 void TemplateTable::dload() {
553 transition(vtos, dtos);
554 locals_index(rbx);
555 __ movdbl(xmm0, daddress(rbx));
556 }
558 void TemplateTable::aload() {
559 transition(vtos, atos);
560 locals_index(rbx);
561 __ movptr(rax, aaddress(rbx));
562 }
564 void TemplateTable::locals_index_wide(Register reg) {
565 __ movl(reg, at_bcp(2));
566 __ bswapl(reg);
567 __ shrl(reg, 16);
568 __ negptr(reg);
569 }
571 void TemplateTable::wide_iload() {
572 transition(vtos, itos);
573 locals_index_wide(rbx);
574 __ movl(rax, iaddress(rbx));
575 }
577 void TemplateTable::wide_lload() {
578 transition(vtos, ltos);
579 locals_index_wide(rbx);
580 __ movq(rax, laddress(rbx));
581 }
583 void TemplateTable::wide_fload() {
584 transition(vtos, ftos);
585 locals_index_wide(rbx);
586 __ movflt(xmm0, faddress(rbx));
587 }
589 void TemplateTable::wide_dload() {
590 transition(vtos, dtos);
591 locals_index_wide(rbx);
592 __ movdbl(xmm0, daddress(rbx));
593 }
595 void TemplateTable::wide_aload() {
596 transition(vtos, atos);
597 locals_index_wide(rbx);
598 __ movptr(rax, aaddress(rbx));
599 }
601 void TemplateTable::index_check(Register array, Register index) {
602 // destroys rbx
603 // check array
604 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
605 // sign extend index for use by indexed load
606 __ movl2ptr(index, index);
607 // check index
608 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
609 if (index != rbx) {
610 // ??? convention: move aberrant index into ebx for exception message
611 assert(rbx != array, "different registers");
612 __ movl(rbx, index);
613 }
614 __ jump_cc(Assembler::aboveEqual,
615 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
616 }
618 void TemplateTable::iaload() {
619 transition(itos, itos);
620 __ pop_ptr(rdx);
621 // eax: index
622 // rdx: array
623 index_check(rdx, rax); // kills rbx
624 __ movl(rax, Address(rdx, rax,
625 Address::times_4,
626 arrayOopDesc::base_offset_in_bytes(T_INT)));
627 }
629 void TemplateTable::laload() {
630 transition(itos, ltos);
631 __ pop_ptr(rdx);
632 // eax: index
633 // rdx: array
634 index_check(rdx, rax); // kills rbx
635 __ movq(rax, Address(rdx, rbx,
636 Address::times_8,
637 arrayOopDesc::base_offset_in_bytes(T_LONG)));
638 }
640 void TemplateTable::faload() {
641 transition(itos, ftos);
642 __ pop_ptr(rdx);
643 // eax: index
644 // rdx: array
645 index_check(rdx, rax); // kills rbx
646 __ movflt(xmm0, Address(rdx, rax,
647 Address::times_4,
648 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
649 }
651 void TemplateTable::daload() {
652 transition(itos, dtos);
653 __ pop_ptr(rdx);
654 // eax: index
655 // rdx: array
656 index_check(rdx, rax); // kills rbx
657 __ movdbl(xmm0, Address(rdx, rax,
658 Address::times_8,
659 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
660 }
662 void TemplateTable::aaload() {
663 transition(itos, atos);
664 __ pop_ptr(rdx);
665 // eax: index
666 // rdx: array
667 index_check(rdx, rax); // kills rbx
668 __ load_heap_oop(rax, Address(rdx, rax,
669 UseCompressedOops ? Address::times_4 : Address::times_8,
670 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
671 }
673 void TemplateTable::baload() {
674 transition(itos, itos);
675 __ pop_ptr(rdx);
676 // eax: index
677 // rdx: array
678 index_check(rdx, rax); // kills rbx
679 __ load_signed_byte(rax,
680 Address(rdx, rax,
681 Address::times_1,
682 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
683 }
685 void TemplateTable::caload() {
686 transition(itos, itos);
687 __ pop_ptr(rdx);
688 // eax: index
689 // rdx: array
690 index_check(rdx, rax); // kills rbx
691 __ load_unsigned_short(rax,
692 Address(rdx, rax,
693 Address::times_2,
694 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
695 }
697 // iload followed by caload frequent pair
698 void TemplateTable::fast_icaload() {
699 transition(vtos, itos);
700 // load index out of locals
701 locals_index(rbx);
702 __ movl(rax, iaddress(rbx));
704 // eax: index
705 // rdx: array
706 __ pop_ptr(rdx);
707 index_check(rdx, rax); // kills rbx
708 __ load_unsigned_short(rax,
709 Address(rdx, rax,
710 Address::times_2,
711 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
712 }
714 void TemplateTable::saload() {
715 transition(itos, itos);
716 __ pop_ptr(rdx);
717 // eax: index
718 // rdx: array
719 index_check(rdx, rax); // kills rbx
720 __ load_signed_short(rax,
721 Address(rdx, rax,
722 Address::times_2,
723 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
724 }
726 void TemplateTable::iload(int n) {
727 transition(vtos, itos);
728 __ movl(rax, iaddress(n));
729 }
731 void TemplateTable::lload(int n) {
732 transition(vtos, ltos);
733 __ movq(rax, laddress(n));
734 }
736 void TemplateTable::fload(int n) {
737 transition(vtos, ftos);
738 __ movflt(xmm0, faddress(n));
739 }
741 void TemplateTable::dload(int n) {
742 transition(vtos, dtos);
743 __ movdbl(xmm0, daddress(n));
744 }
746 void TemplateTable::aload(int n) {
747 transition(vtos, atos);
748 __ movptr(rax, aaddress(n));
749 }
751 void TemplateTable::aload_0() {
752 transition(vtos, atos);
753 // According to bytecode histograms, the pairs:
754 //
755 // _aload_0, _fast_igetfield
756 // _aload_0, _fast_agetfield
757 // _aload_0, _fast_fgetfield
758 //
759 // occur frequently. If RewriteFrequentPairs is set, the (slow)
760 // _aload_0 bytecode checks if the next bytecode is either
761 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
762 // rewrites the current bytecode into a pair bytecode; otherwise it
763 // rewrites the current bytecode into _fast_aload_0 that doesn't do
764 // the pair check anymore.
765 //
766 // Note: If the next bytecode is _getfield, the rewrite must be
767 // delayed, otherwise we may miss an opportunity for a pair.
768 //
769 // Also rewrite frequent pairs
770 // aload_0, aload_1
771 // aload_0, iload_1
772 // These bytecodes with a small amount of code are most profitable
773 // to rewrite
774 if (RewriteFrequentPairs) {
775 Label rewrite, done;
776 const Register bc = c_rarg3;
777 assert(rbx != bc, "register damaged");
778 // get next byte
779 __ load_unsigned_byte(rbx,
780 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
782 // do actual aload_0
783 aload(0);
785 // if _getfield then wait with rewrite
786 __ cmpl(rbx, Bytecodes::_getfield);
787 __ jcc(Assembler::equal, done);
789 // if _igetfield then reqrite to _fast_iaccess_0
790 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
791 Bytecodes::_aload_0,
792 "fix bytecode definition");
793 __ cmpl(rbx, Bytecodes::_fast_igetfield);
794 __ movl(bc, Bytecodes::_fast_iaccess_0);
795 __ jccb(Assembler::equal, rewrite);
797 // if _agetfield then reqrite to _fast_aaccess_0
798 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
799 Bytecodes::_aload_0,
800 "fix bytecode definition");
801 __ cmpl(rbx, Bytecodes::_fast_agetfield);
802 __ movl(bc, Bytecodes::_fast_aaccess_0);
803 __ jccb(Assembler::equal, rewrite);
805 // if _fgetfield then reqrite to _fast_faccess_0
806 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
807 Bytecodes::_aload_0,
808 "fix bytecode definition");
809 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
810 __ movl(bc, Bytecodes::_fast_faccess_0);
811 __ jccb(Assembler::equal, rewrite);
813 // else rewrite to _fast_aload0
814 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
815 Bytecodes::_aload_0,
816 "fix bytecode definition");
817 __ movl(bc, Bytecodes::_fast_aload_0);
819 // rewrite
820 // bc: fast bytecode
821 __ bind(rewrite);
822 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
824 __ bind(done);
825 } else {
826 aload(0);
827 }
828 }
830 void TemplateTable::istore() {
831 transition(itos, vtos);
832 locals_index(rbx);
833 __ movl(iaddress(rbx), rax);
834 }
836 void TemplateTable::lstore() {
837 transition(ltos, vtos);
838 locals_index(rbx);
839 __ movq(laddress(rbx), rax);
840 }
842 void TemplateTable::fstore() {
843 transition(ftos, vtos);
844 locals_index(rbx);
845 __ movflt(faddress(rbx), xmm0);
846 }
848 void TemplateTable::dstore() {
849 transition(dtos, vtos);
850 locals_index(rbx);
851 __ movdbl(daddress(rbx), xmm0);
852 }
854 void TemplateTable::astore() {
855 transition(vtos, vtos);
856 __ pop_ptr(rax);
857 locals_index(rbx);
858 __ movptr(aaddress(rbx), rax);
859 }
861 void TemplateTable::wide_istore() {
862 transition(vtos, vtos);
863 __ pop_i();
864 locals_index_wide(rbx);
865 __ movl(iaddress(rbx), rax);
866 }
868 void TemplateTable::wide_lstore() {
869 transition(vtos, vtos);
870 __ pop_l();
871 locals_index_wide(rbx);
872 __ movq(laddress(rbx), rax);
873 }
875 void TemplateTable::wide_fstore() {
876 transition(vtos, vtos);
877 __ pop_f();
878 locals_index_wide(rbx);
879 __ movflt(faddress(rbx), xmm0);
880 }
882 void TemplateTable::wide_dstore() {
883 transition(vtos, vtos);
884 __ pop_d();
885 locals_index_wide(rbx);
886 __ movdbl(daddress(rbx), xmm0);
887 }
889 void TemplateTable::wide_astore() {
890 transition(vtos, vtos);
891 __ pop_ptr(rax);
892 locals_index_wide(rbx);
893 __ movptr(aaddress(rbx), rax);
894 }
896 void TemplateTable::iastore() {
897 transition(itos, vtos);
898 __ pop_i(rbx);
899 __ pop_ptr(rdx);
900 // eax: value
901 // ebx: index
902 // rdx: array
903 index_check(rdx, rbx); // prefer index in ebx
904 __ movl(Address(rdx, rbx,
905 Address::times_4,
906 arrayOopDesc::base_offset_in_bytes(T_INT)),
907 rax);
908 }
910 void TemplateTable::lastore() {
911 transition(ltos, vtos);
912 __ pop_i(rbx);
913 __ pop_ptr(rdx);
914 // rax: value
915 // ebx: index
916 // rdx: array
917 index_check(rdx, rbx); // prefer index in ebx
918 __ movq(Address(rdx, rbx,
919 Address::times_8,
920 arrayOopDesc::base_offset_in_bytes(T_LONG)),
921 rax);
922 }
924 void TemplateTable::fastore() {
925 transition(ftos, vtos);
926 __ pop_i(rbx);
927 __ pop_ptr(rdx);
928 // xmm0: value
929 // ebx: index
930 // rdx: array
931 index_check(rdx, rbx); // prefer index in ebx
932 __ movflt(Address(rdx, rbx,
933 Address::times_4,
934 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
935 xmm0);
936 }
938 void TemplateTable::dastore() {
939 transition(dtos, vtos);
940 __ pop_i(rbx);
941 __ pop_ptr(rdx);
942 // xmm0: value
943 // ebx: index
944 // rdx: array
945 index_check(rdx, rbx); // prefer index in ebx
946 __ movdbl(Address(rdx, rbx,
947 Address::times_8,
948 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
949 xmm0);
950 }
952 void TemplateTable::aastore() {
953 Label is_null, ok_is_subtype, done;
954 transition(vtos, vtos);
955 // stack: ..., array, index, value
956 __ movptr(rax, at_tos()); // value
957 __ movl(rcx, at_tos_p1()); // index
958 __ movptr(rdx, at_tos_p2()); // array
960 Address element_address(rdx, rcx,
961 UseCompressedOops? Address::times_4 : Address::times_8,
962 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
964 index_check(rdx, rcx); // kills rbx
965 // do array store check - check for NULL value first
966 __ testptr(rax, rax);
967 __ jcc(Assembler::zero, is_null);
969 // Move subklass into rbx
970 __ load_klass(rbx, rax);
971 // Move superklass into rax
972 __ load_klass(rax, rdx);
973 __ movptr(rax, Address(rax,
974 ObjArrayKlass::element_klass_offset()));
975 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
976 __ lea(rdx, element_address);
978 // Generate subtype check. Blows rcx, rdi
979 // Superklass in rax. Subklass in rbx.
980 __ gen_subtype_check(rbx, ok_is_subtype);
982 // Come here on failure
983 // object is at TOS
984 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
986 // Come here on success
987 __ bind(ok_is_subtype);
989 // Get the value we will store
990 __ movptr(rax, at_tos());
991 // Now store using the appropriate barrier
992 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
993 __ jmp(done);
995 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
996 __ bind(is_null);
997 __ profile_null_seen(rbx);
999 // Store a NULL
1000 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1002 // Pop stack arguments
1003 __ bind(done);
1004 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1005 }
1007 void TemplateTable::bastore() {
1008 transition(itos, vtos);
1009 __ pop_i(rbx);
1010 __ pop_ptr(rdx);
1011 // eax: value
1012 // ebx: index
1013 // rdx: array
1014 index_check(rdx, rbx); // prefer index in ebx
1015 __ movb(Address(rdx, rbx,
1016 Address::times_1,
1017 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1018 rax);
1019 }
1021 void TemplateTable::castore() {
1022 transition(itos, vtos);
1023 __ pop_i(rbx);
1024 __ pop_ptr(rdx);
1025 // eax: value
1026 // ebx: index
1027 // rdx: array
1028 index_check(rdx, rbx); // prefer index in ebx
1029 __ movw(Address(rdx, rbx,
1030 Address::times_2,
1031 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1032 rax);
1033 }
1035 void TemplateTable::sastore() {
1036 castore();
1037 }
1039 void TemplateTable::istore(int n) {
1040 transition(itos, vtos);
1041 __ movl(iaddress(n), rax);
1042 }
1044 void TemplateTable::lstore(int n) {
1045 transition(ltos, vtos);
1046 __ movq(laddress(n), rax);
1047 }
1049 void TemplateTable::fstore(int n) {
1050 transition(ftos, vtos);
1051 __ movflt(faddress(n), xmm0);
1052 }
1054 void TemplateTable::dstore(int n) {
1055 transition(dtos, vtos);
1056 __ movdbl(daddress(n), xmm0);
1057 }
1059 void TemplateTable::astore(int n) {
1060 transition(vtos, vtos);
1061 __ pop_ptr(rax);
1062 __ movptr(aaddress(n), rax);
1063 }
1065 void TemplateTable::pop() {
1066 transition(vtos, vtos);
1067 __ addptr(rsp, Interpreter::stackElementSize);
1068 }
1070 void TemplateTable::pop2() {
1071 transition(vtos, vtos);
1072 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1073 }
1075 void TemplateTable::dup() {
1076 transition(vtos, vtos);
1077 __ load_ptr(0, rax);
1078 __ push_ptr(rax);
1079 // stack: ..., a, a
1080 }
1082 void TemplateTable::dup_x1() {
1083 transition(vtos, vtos);
1084 // stack: ..., a, b
1085 __ load_ptr( 0, rax); // load b
1086 __ load_ptr( 1, rcx); // load a
1087 __ store_ptr(1, rax); // store b
1088 __ store_ptr(0, rcx); // store a
1089 __ push_ptr(rax); // push b
1090 // stack: ..., b, a, b
1091 }
1093 void TemplateTable::dup_x2() {
1094 transition(vtos, vtos);
1095 // stack: ..., a, b, c
1096 __ load_ptr( 0, rax); // load c
1097 __ load_ptr( 2, rcx); // load a
1098 __ store_ptr(2, rax); // store c in a
1099 __ push_ptr(rax); // push c
1100 // stack: ..., c, b, c, c
1101 __ load_ptr( 2, rax); // load b
1102 __ store_ptr(2, rcx); // store a in b
1103 // stack: ..., c, a, c, c
1104 __ store_ptr(1, rax); // store b in c
1105 // stack: ..., c, a, b, c
1106 }
1108 void TemplateTable::dup2() {
1109 transition(vtos, vtos);
1110 // stack: ..., a, b
1111 __ load_ptr(1, rax); // load a
1112 __ push_ptr(rax); // push a
1113 __ load_ptr(1, rax); // load b
1114 __ push_ptr(rax); // push b
1115 // stack: ..., a, b, a, b
1116 }
1118 void TemplateTable::dup2_x1() {
1119 transition(vtos, vtos);
1120 // stack: ..., a, b, c
1121 __ load_ptr( 0, rcx); // load c
1122 __ load_ptr( 1, rax); // load b
1123 __ push_ptr(rax); // push b
1124 __ push_ptr(rcx); // push c
1125 // stack: ..., a, b, c, b, c
1126 __ store_ptr(3, rcx); // store c in b
1127 // stack: ..., a, c, c, b, c
1128 __ load_ptr( 4, rcx); // load a
1129 __ store_ptr(2, rcx); // store a in 2nd c
1130 // stack: ..., a, c, a, b, c
1131 __ store_ptr(4, rax); // store b in a
1132 // stack: ..., b, c, a, b, c
1133 }
1135 void TemplateTable::dup2_x2() {
1136 transition(vtos, vtos);
1137 // stack: ..., a, b, c, d
1138 __ load_ptr( 0, rcx); // load d
1139 __ load_ptr( 1, rax); // load c
1140 __ push_ptr(rax); // push c
1141 __ push_ptr(rcx); // push d
1142 // stack: ..., a, b, c, d, c, d
1143 __ load_ptr( 4, rax); // load b
1144 __ store_ptr(2, rax); // store b in d
1145 __ store_ptr(4, rcx); // store d in b
1146 // stack: ..., a, d, c, b, c, d
1147 __ load_ptr( 5, rcx); // load a
1148 __ load_ptr( 3, rax); // load c
1149 __ store_ptr(3, rcx); // store a in c
1150 __ store_ptr(5, rax); // store c in a
1151 // stack: ..., c, d, a, b, c, d
1152 }
1154 void TemplateTable::swap() {
1155 transition(vtos, vtos);
1156 // stack: ..., a, b
1157 __ load_ptr( 1, rcx); // load a
1158 __ load_ptr( 0, rax); // load b
1159 __ store_ptr(0, rcx); // store a in b
1160 __ store_ptr(1, rax); // store b in a
1161 // stack: ..., b, a
1162 }
1164 void TemplateTable::iop2(Operation op) {
1165 transition(itos, itos);
1166 switch (op) {
1167 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1168 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1169 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1170 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1171 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1172 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1173 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1174 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1175 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1176 default : ShouldNotReachHere();
1177 }
1178 }
1180 void TemplateTable::lop2(Operation op) {
1181 transition(ltos, ltos);
1182 switch (op) {
1183 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1184 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1185 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1186 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1187 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1188 default : ShouldNotReachHere();
1189 }
1190 }
1192 void TemplateTable::idiv() {
1193 transition(itos, itos);
1194 __ movl(rcx, rax);
1195 __ pop_i(rax);
1196 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1197 // they are not equal, one could do a normal division (no correction
1198 // needed), which may speed up this implementation for the common case.
1199 // (see also JVM spec., p.243 & p.271)
1200 __ corrected_idivl(rcx);
1201 }
1203 void TemplateTable::irem() {
1204 transition(itos, itos);
1205 __ movl(rcx, rax);
1206 __ pop_i(rax);
1207 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1208 // they are not equal, one could do a normal division (no correction
1209 // needed), which may speed up this implementation for the common case.
1210 // (see also JVM spec., p.243 & p.271)
1211 __ corrected_idivl(rcx);
1212 __ movl(rax, rdx);
1213 }
1215 void TemplateTable::lmul() {
1216 transition(ltos, ltos);
1217 __ pop_l(rdx);
1218 __ imulq(rax, rdx);
1219 }
1221 void TemplateTable::ldiv() {
1222 transition(ltos, ltos);
1223 __ mov(rcx, rax);
1224 __ pop_l(rax);
1225 // generate explicit div0 check
1226 __ testq(rcx, rcx);
1227 __ jump_cc(Assembler::zero,
1228 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1229 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1230 // they are not equal, one could do a normal division (no correction
1231 // needed), which may speed up this implementation for the common case.
1232 // (see also JVM spec., p.243 & p.271)
1233 __ corrected_idivq(rcx); // kills rbx
1234 }
1236 void TemplateTable::lrem() {
1237 transition(ltos, ltos);
1238 __ mov(rcx, rax);
1239 __ pop_l(rax);
1240 __ testq(rcx, rcx);
1241 __ jump_cc(Assembler::zero,
1242 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1243 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1244 // they are not equal, one could do a normal division (no correction
1245 // needed), which may speed up this implementation for the common case.
1246 // (see also JVM spec., p.243 & p.271)
1247 __ corrected_idivq(rcx); // kills rbx
1248 __ mov(rax, rdx);
1249 }
1251 void TemplateTable::lshl() {
1252 transition(itos, ltos);
1253 __ movl(rcx, rax); // get shift count
1254 __ pop_l(rax); // get shift value
1255 __ shlq(rax);
1256 }
1258 void TemplateTable::lshr() {
1259 transition(itos, ltos);
1260 __ movl(rcx, rax); // get shift count
1261 __ pop_l(rax); // get shift value
1262 __ sarq(rax);
1263 }
1265 void TemplateTable::lushr() {
1266 transition(itos, ltos);
1267 __ movl(rcx, rax); // get shift count
1268 __ pop_l(rax); // get shift value
1269 __ shrq(rax);
1270 }
1272 void TemplateTable::fop2(Operation op) {
1273 transition(ftos, ftos);
1274 switch (op) {
1275 case add:
1276 __ addss(xmm0, at_rsp());
1277 __ addptr(rsp, Interpreter::stackElementSize);
1278 break;
1279 case sub:
1280 __ movflt(xmm1, xmm0);
1281 __ pop_f(xmm0);
1282 __ subss(xmm0, xmm1);
1283 break;
1284 case mul:
1285 __ mulss(xmm0, at_rsp());
1286 __ addptr(rsp, Interpreter::stackElementSize);
1287 break;
1288 case div:
1289 __ movflt(xmm1, xmm0);
1290 __ pop_f(xmm0);
1291 __ divss(xmm0, xmm1);
1292 break;
1293 case rem:
1294 __ movflt(xmm1, xmm0);
1295 __ pop_f(xmm0);
1296 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1297 break;
1298 default:
1299 ShouldNotReachHere();
1300 break;
1301 }
1302 }
1304 void TemplateTable::dop2(Operation op) {
1305 transition(dtos, dtos);
1306 switch (op) {
1307 case add:
1308 __ addsd(xmm0, at_rsp());
1309 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1310 break;
1311 case sub:
1312 __ movdbl(xmm1, xmm0);
1313 __ pop_d(xmm0);
1314 __ subsd(xmm0, xmm1);
1315 break;
1316 case mul:
1317 __ mulsd(xmm0, at_rsp());
1318 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1319 break;
1320 case div:
1321 __ movdbl(xmm1, xmm0);
1322 __ pop_d(xmm0);
1323 __ divsd(xmm0, xmm1);
1324 break;
1325 case rem:
1326 __ movdbl(xmm1, xmm0);
1327 __ pop_d(xmm0);
1328 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1329 break;
1330 default:
1331 ShouldNotReachHere();
1332 break;
1333 }
1334 }
1336 void TemplateTable::ineg() {
1337 transition(itos, itos);
1338 __ negl(rax);
1339 }
1341 void TemplateTable::lneg() {
1342 transition(ltos, ltos);
1343 __ negq(rax);
1344 }
1346 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1347 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1348 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1349 // of 128-bits operands for SSE instructions.
1350 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1351 // Store the value to a 128-bits operand.
1352 operand[0] = lo;
1353 operand[1] = hi;
1354 return operand;
1355 }
1357 // Buffer for 128-bits masks used by SSE instructions.
1358 static jlong float_signflip_pool[2*2];
1359 static jlong double_signflip_pool[2*2];
1361 void TemplateTable::fneg() {
1362 transition(ftos, ftos);
1363 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1364 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1365 }
1367 void TemplateTable::dneg() {
1368 transition(dtos, dtos);
1369 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1370 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1371 }
1373 void TemplateTable::iinc() {
1374 transition(vtos, vtos);
1375 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1376 locals_index(rbx);
1377 __ addl(iaddress(rbx), rdx);
1378 }
1380 void TemplateTable::wide_iinc() {
1381 transition(vtos, vtos);
1382 __ movl(rdx, at_bcp(4)); // get constant
1383 locals_index_wide(rbx);
1384 __ bswapl(rdx); // swap bytes & sign-extend constant
1385 __ sarl(rdx, 16);
1386 __ addl(iaddress(rbx), rdx);
1387 // Note: should probably use only one movl to get both
1388 // the index and the constant -> fix this
1389 }
1391 void TemplateTable::convert() {
1392 // Checking
1393 #ifdef ASSERT
1394 {
1395 TosState tos_in = ilgl;
1396 TosState tos_out = ilgl;
1397 switch (bytecode()) {
1398 case Bytecodes::_i2l: // fall through
1399 case Bytecodes::_i2f: // fall through
1400 case Bytecodes::_i2d: // fall through
1401 case Bytecodes::_i2b: // fall through
1402 case Bytecodes::_i2c: // fall through
1403 case Bytecodes::_i2s: tos_in = itos; break;
1404 case Bytecodes::_l2i: // fall through
1405 case Bytecodes::_l2f: // fall through
1406 case Bytecodes::_l2d: tos_in = ltos; break;
1407 case Bytecodes::_f2i: // fall through
1408 case Bytecodes::_f2l: // fall through
1409 case Bytecodes::_f2d: tos_in = ftos; break;
1410 case Bytecodes::_d2i: // fall through
1411 case Bytecodes::_d2l: // fall through
1412 case Bytecodes::_d2f: tos_in = dtos; break;
1413 default : ShouldNotReachHere();
1414 }
1415 switch (bytecode()) {
1416 case Bytecodes::_l2i: // fall through
1417 case Bytecodes::_f2i: // fall through
1418 case Bytecodes::_d2i: // fall through
1419 case Bytecodes::_i2b: // fall through
1420 case Bytecodes::_i2c: // fall through
1421 case Bytecodes::_i2s: tos_out = itos; break;
1422 case Bytecodes::_i2l: // fall through
1423 case Bytecodes::_f2l: // fall through
1424 case Bytecodes::_d2l: tos_out = ltos; break;
1425 case Bytecodes::_i2f: // fall through
1426 case Bytecodes::_l2f: // fall through
1427 case Bytecodes::_d2f: tos_out = ftos; break;
1428 case Bytecodes::_i2d: // fall through
1429 case Bytecodes::_l2d: // fall through
1430 case Bytecodes::_f2d: tos_out = dtos; break;
1431 default : ShouldNotReachHere();
1432 }
1433 transition(tos_in, tos_out);
1434 }
1435 #endif // ASSERT
1437 static const int64_t is_nan = 0x8000000000000000L;
1439 // Conversion
1440 switch (bytecode()) {
1441 case Bytecodes::_i2l:
1442 __ movslq(rax, rax);
1443 break;
1444 case Bytecodes::_i2f:
1445 __ cvtsi2ssl(xmm0, rax);
1446 break;
1447 case Bytecodes::_i2d:
1448 __ cvtsi2sdl(xmm0, rax);
1449 break;
1450 case Bytecodes::_i2b:
1451 __ movsbl(rax, rax);
1452 break;
1453 case Bytecodes::_i2c:
1454 __ movzwl(rax, rax);
1455 break;
1456 case Bytecodes::_i2s:
1457 __ movswl(rax, rax);
1458 break;
1459 case Bytecodes::_l2i:
1460 __ movl(rax, rax);
1461 break;
1462 case Bytecodes::_l2f:
1463 __ cvtsi2ssq(xmm0, rax);
1464 break;
1465 case Bytecodes::_l2d:
1466 __ cvtsi2sdq(xmm0, rax);
1467 break;
1468 case Bytecodes::_f2i:
1469 {
1470 Label L;
1471 __ cvttss2sil(rax, xmm0);
1472 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1473 __ jcc(Assembler::notEqual, L);
1474 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1475 __ bind(L);
1476 }
1477 break;
1478 case Bytecodes::_f2l:
1479 {
1480 Label L;
1481 __ cvttss2siq(rax, xmm0);
1482 // NaN or overflow/underflow?
1483 __ cmp64(rax, ExternalAddress((address) &is_nan));
1484 __ jcc(Assembler::notEqual, L);
1485 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1486 __ bind(L);
1487 }
1488 break;
1489 case Bytecodes::_f2d:
1490 __ cvtss2sd(xmm0, xmm0);
1491 break;
1492 case Bytecodes::_d2i:
1493 {
1494 Label L;
1495 __ cvttsd2sil(rax, xmm0);
1496 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1497 __ jcc(Assembler::notEqual, L);
1498 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1499 __ bind(L);
1500 }
1501 break;
1502 case Bytecodes::_d2l:
1503 {
1504 Label L;
1505 __ cvttsd2siq(rax, xmm0);
1506 // NaN or overflow/underflow?
1507 __ cmp64(rax, ExternalAddress((address) &is_nan));
1508 __ jcc(Assembler::notEqual, L);
1509 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1510 __ bind(L);
1511 }
1512 break;
1513 case Bytecodes::_d2f:
1514 __ cvtsd2ss(xmm0, xmm0);
1515 break;
1516 default:
1517 ShouldNotReachHere();
1518 }
1519 }
1521 void TemplateTable::lcmp() {
1522 transition(ltos, itos);
1523 Label done;
1524 __ pop_l(rdx);
1525 __ cmpq(rdx, rax);
1526 __ movl(rax, -1);
1527 __ jccb(Assembler::less, done);
1528 __ setb(Assembler::notEqual, rax);
1529 __ movzbl(rax, rax);
1530 __ bind(done);
1531 }
1533 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1534 Label done;
1535 if (is_float) {
1536 // XXX get rid of pop here, use ... reg, mem32
1537 __ pop_f(xmm1);
1538 __ ucomiss(xmm1, xmm0);
1539 } else {
1540 // XXX get rid of pop here, use ... reg, mem64
1541 __ pop_d(xmm1);
1542 __ ucomisd(xmm1, xmm0);
1543 }
1544 if (unordered_result < 0) {
1545 __ movl(rax, -1);
1546 __ jccb(Assembler::parity, done);
1547 __ jccb(Assembler::below, done);
1548 __ setb(Assembler::notEqual, rdx);
1549 __ movzbl(rax, rdx);
1550 } else {
1551 __ movl(rax, 1);
1552 __ jccb(Assembler::parity, done);
1553 __ jccb(Assembler::above, done);
1554 __ movl(rax, 0);
1555 __ jccb(Assembler::equal, done);
1556 __ decrementl(rax);
1557 }
1558 __ bind(done);
1559 }
1561 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1562 __ get_method(rcx); // rcx holds method
1563 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1564 // holds bumped taken count
1566 const ByteSize be_offset = Method::backedge_counter_offset() +
1567 InvocationCounter::counter_offset();
1568 const ByteSize inv_offset = Method::invocation_counter_offset() +
1569 InvocationCounter::counter_offset();
1570 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1572 // Load up edx with the branch displacement
1573 __ movl(rdx, at_bcp(1));
1574 __ bswapl(rdx);
1576 if (!is_wide) {
1577 __ sarl(rdx, 16);
1578 }
1579 __ movl2ptr(rdx, rdx);
1581 // Handle all the JSR stuff here, then exit.
1582 // It's much shorter and cleaner than intermingling with the non-JSR
1583 // normal-branch stuff occurring below.
1584 if (is_jsr) {
1585 // Pre-load the next target bytecode into rbx
1586 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1588 // compute return address as bci in rax
1589 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1590 in_bytes(ConstMethod::codes_offset())));
1591 __ subptr(rax, Address(rcx, Method::const_offset()));
1592 // Adjust the bcp in r13 by the displacement in rdx
1593 __ addptr(r13, rdx);
1594 // jsr returns atos that is not an oop
1595 __ push_i(rax);
1596 __ dispatch_only(vtos);
1597 return;
1598 }
1600 // Normal (non-jsr) branch handling
1602 // Adjust the bcp in r13 by the displacement in rdx
1603 __ addptr(r13, rdx);
1605 assert(UseLoopCounter || !UseOnStackReplacement,
1606 "on-stack-replacement requires loop counters");
1607 Label backedge_counter_overflow;
1608 Label profile_method;
1609 Label dispatch;
1610 if (UseLoopCounter) {
1611 // increment backedge counter for backward branches
1612 // rax: MDO
1613 // ebx: MDO bumped taken-count
1614 // rcx: method
1615 // rdx: target offset
1616 // r13: target bcp
1617 // r14: locals pointer
1618 __ testl(rdx, rdx); // check if forward or backward branch
1619 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1620 if (TieredCompilation) {
1621 Label no_mdo;
1622 int increment = InvocationCounter::count_increment;
1623 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1624 if (ProfileInterpreter) {
1625 // Are we profiling?
1626 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1627 __ testptr(rbx, rbx);
1628 __ jccb(Assembler::zero, no_mdo);
1629 // Increment the MDO backedge counter
1630 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1631 in_bytes(InvocationCounter::counter_offset()));
1632 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1633 rax, false, Assembler::zero, &backedge_counter_overflow);
1634 __ jmp(dispatch);
1635 }
1636 __ bind(no_mdo);
1637 // Increment backedge counter in Method*
1638 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1639 rax, false, Assembler::zero, &backedge_counter_overflow);
1640 } else {
1641 // increment counter
1642 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1643 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1644 __ movl(Address(rcx, be_offset), rax); // store counter
1646 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1647 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1648 __ addl(rax, Address(rcx, be_offset)); // add both counters
1650 if (ProfileInterpreter) {
1651 // Test to see if we should create a method data oop
1652 __ cmp32(rax,
1653 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1654 __ jcc(Assembler::less, dispatch);
1656 // if no method data exists, go to profile method
1657 __ test_method_data_pointer(rax, profile_method);
1659 if (UseOnStackReplacement) {
1660 // check for overflow against ebx which is the MDO taken count
1661 __ cmp32(rbx,
1662 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1663 __ jcc(Assembler::below, dispatch);
1665 // When ProfileInterpreter is on, the backedge_count comes
1666 // from the MethodData*, which value does not get reset on
1667 // the call to frequency_counter_overflow(). To avoid
1668 // excessive calls to the overflow routine while the method is
1669 // being compiled, add a second test to make sure the overflow
1670 // function is called only once every overflow_frequency.
1671 const int overflow_frequency = 1024;
1672 __ andl(rbx, overflow_frequency - 1);
1673 __ jcc(Assembler::zero, backedge_counter_overflow);
1675 }
1676 } else {
1677 if (UseOnStackReplacement) {
1678 // check for overflow against eax, which is the sum of the
1679 // counters
1680 __ cmp32(rax,
1681 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1682 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1684 }
1685 }
1686 }
1687 __ bind(dispatch);
1688 }
1690 // Pre-load the next target bytecode into rbx
1691 __ load_unsigned_byte(rbx, Address(r13, 0));
1693 // continue with the bytecode @ target
1694 // eax: return bci for jsr's, unused otherwise
1695 // ebx: target bytecode
1696 // r13: target bcp
1697 __ dispatch_only(vtos);
1699 if (UseLoopCounter) {
1700 if (ProfileInterpreter) {
1701 // Out-of-line code to allocate method data oop.
1702 __ bind(profile_method);
1703 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1704 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1705 __ set_method_data_pointer_for_bcp();
1706 __ jmp(dispatch);
1707 }
1709 if (UseOnStackReplacement) {
1710 // invocation counter overflow
1711 __ bind(backedge_counter_overflow);
1712 __ negptr(rdx);
1713 __ addptr(rdx, r13); // branch bcp
1714 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1715 __ call_VM(noreg,
1716 CAST_FROM_FN_PTR(address,
1717 InterpreterRuntime::frequency_counter_overflow),
1718 rdx);
1719 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1721 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1722 // ebx: target bytecode
1723 // rdx: scratch
1724 // r14: locals pointer
1725 // r13: bcp
1726 __ testptr(rax, rax); // test result
1727 __ jcc(Assembler::zero, dispatch); // no osr if null
1728 // nmethod may have been invalidated (VM may block upon call_VM return)
1729 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1730 __ cmpl(rcx, InvalidOSREntryBci);
1731 __ jcc(Assembler::equal, dispatch);
1733 // We have the address of an on stack replacement routine in eax
1734 // We need to prepare to execute the OSR method. First we must
1735 // migrate the locals and monitors off of the stack.
1737 __ mov(r13, rax); // save the nmethod
1739 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1741 // eax is OSR buffer, move it to expected parameter location
1742 __ mov(j_rarg0, rax);
1744 // We use j_rarg definitions here so that registers don't conflict as parameter
1745 // registers change across platforms as we are in the midst of a calling
1746 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1748 const Register retaddr = j_rarg2;
1749 const Register sender_sp = j_rarg1;
1751 // pop the interpreter frame
1752 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1753 __ leave(); // remove frame anchor
1754 __ pop(retaddr); // get return address
1755 __ mov(rsp, sender_sp); // set sp to sender sp
1756 // Ensure compiled code always sees stack at proper alignment
1757 __ andptr(rsp, -(StackAlignmentInBytes));
1759 // unlike x86 we need no specialized return from compiled code
1760 // to the interpreter or the call stub.
1762 // push the return address
1763 __ push(retaddr);
1765 // and begin the OSR nmethod
1766 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1767 }
1768 }
1769 }
1772 void TemplateTable::if_0cmp(Condition cc) {
1773 transition(itos, vtos);
1774 // assume branch is more often taken than not (loops use backward branches)
1775 Label not_taken;
1776 __ testl(rax, rax);
1777 __ jcc(j_not(cc), not_taken);
1778 branch(false, false);
1779 __ bind(not_taken);
1780 __ profile_not_taken_branch(rax);
1781 }
1783 void TemplateTable::if_icmp(Condition cc) {
1784 transition(itos, vtos);
1785 // assume branch is more often taken than not (loops use backward branches)
1786 Label not_taken;
1787 __ pop_i(rdx);
1788 __ cmpl(rdx, rax);
1789 __ jcc(j_not(cc), not_taken);
1790 branch(false, false);
1791 __ bind(not_taken);
1792 __ profile_not_taken_branch(rax);
1793 }
1795 void TemplateTable::if_nullcmp(Condition cc) {
1796 transition(atos, vtos);
1797 // assume branch is more often taken than not (loops use backward branches)
1798 Label not_taken;
1799 __ testptr(rax, rax);
1800 __ jcc(j_not(cc), not_taken);
1801 branch(false, false);
1802 __ bind(not_taken);
1803 __ profile_not_taken_branch(rax);
1804 }
1806 void TemplateTable::if_acmp(Condition cc) {
1807 transition(atos, vtos);
1808 // assume branch is more often taken than not (loops use backward branches)
1809 Label not_taken;
1810 __ pop_ptr(rdx);
1811 __ cmpptr(rdx, rax);
1812 __ jcc(j_not(cc), not_taken);
1813 branch(false, false);
1814 __ bind(not_taken);
1815 __ profile_not_taken_branch(rax);
1816 }
1818 void TemplateTable::ret() {
1819 transition(vtos, vtos);
1820 locals_index(rbx);
1821 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1822 __ profile_ret(rbx, rcx);
1823 __ get_method(rax);
1824 __ movptr(r13, Address(rax, Method::const_offset()));
1825 __ lea(r13, Address(r13, rbx, Address::times_1,
1826 ConstMethod::codes_offset()));
1827 __ dispatch_next(vtos);
1828 }
1830 void TemplateTable::wide_ret() {
1831 transition(vtos, vtos);
1832 locals_index_wide(rbx);
1833 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1834 __ profile_ret(rbx, rcx);
1835 __ get_method(rax);
1836 __ movptr(r13, Address(rax, Method::const_offset()));
1837 __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
1838 __ dispatch_next(vtos);
1839 }
1841 void TemplateTable::tableswitch() {
1842 Label default_case, continue_execution;
1843 transition(itos, vtos);
1844 // align r13
1845 __ lea(rbx, at_bcp(BytesPerInt));
1846 __ andptr(rbx, -BytesPerInt);
1847 // load lo & hi
1848 __ movl(rcx, Address(rbx, BytesPerInt));
1849 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1850 __ bswapl(rcx);
1851 __ bswapl(rdx);
1852 // check against lo & hi
1853 __ cmpl(rax, rcx);
1854 __ jcc(Assembler::less, default_case);
1855 __ cmpl(rax, rdx);
1856 __ jcc(Assembler::greater, default_case);
1857 // lookup dispatch offset
1858 __ subl(rax, rcx);
1859 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1860 __ profile_switch_case(rax, rbx, rcx);
1861 // continue execution
1862 __ bind(continue_execution);
1863 __ bswapl(rdx);
1864 __ movl2ptr(rdx, rdx);
1865 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1866 __ addptr(r13, rdx);
1867 __ dispatch_only(vtos);
1868 // handle default
1869 __ bind(default_case);
1870 __ profile_switch_default(rax);
1871 __ movl(rdx, Address(rbx, 0));
1872 __ jmp(continue_execution);
1873 }
1875 void TemplateTable::lookupswitch() {
1876 transition(itos, itos);
1877 __ stop("lookupswitch bytecode should have been rewritten");
1878 }
1880 void TemplateTable::fast_linearswitch() {
1881 transition(itos, vtos);
1882 Label loop_entry, loop, found, continue_execution;
1883 // bswap rax so we can avoid bswapping the table entries
1884 __ bswapl(rax);
1885 // align r13
1886 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1887 // this instruction (change offsets
1888 // below)
1889 __ andptr(rbx, -BytesPerInt);
1890 // set counter
1891 __ movl(rcx, Address(rbx, BytesPerInt));
1892 __ bswapl(rcx);
1893 __ jmpb(loop_entry);
1894 // table search
1895 __ bind(loop);
1896 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1897 __ jcc(Assembler::equal, found);
1898 __ bind(loop_entry);
1899 __ decrementl(rcx);
1900 __ jcc(Assembler::greaterEqual, loop);
1901 // default case
1902 __ profile_switch_default(rax);
1903 __ movl(rdx, Address(rbx, 0));
1904 __ jmp(continue_execution);
1905 // entry found -> get offset
1906 __ bind(found);
1907 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1908 __ profile_switch_case(rcx, rax, rbx);
1909 // continue execution
1910 __ bind(continue_execution);
1911 __ bswapl(rdx);
1912 __ movl2ptr(rdx, rdx);
1913 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1914 __ addptr(r13, rdx);
1915 __ dispatch_only(vtos);
1916 }
1918 void TemplateTable::fast_binaryswitch() {
1919 transition(itos, vtos);
1920 // Implementation using the following core algorithm:
1921 //
1922 // int binary_search(int key, LookupswitchPair* array, int n) {
1923 // // Binary search according to "Methodik des Programmierens" by
1924 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1925 // int i = 0;
1926 // int j = n;
1927 // while (i+1 < j) {
1928 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1929 // // with Q: for all i: 0 <= i < n: key < a[i]
1930 // // where a stands for the array and assuming that the (inexisting)
1931 // // element a[n] is infinitely big.
1932 // int h = (i + j) >> 1;
1933 // // i < h < j
1934 // if (key < array[h].fast_match()) {
1935 // j = h;
1936 // } else {
1937 // i = h;
1938 // }
1939 // }
1940 // // R: a[i] <= key < a[i+1] or Q
1941 // // (i.e., if key is within array, i is the correct index)
1942 // return i;
1943 // }
1945 // Register allocation
1946 const Register key = rax; // already set (tosca)
1947 const Register array = rbx;
1948 const Register i = rcx;
1949 const Register j = rdx;
1950 const Register h = rdi;
1951 const Register temp = rsi;
1953 // Find array start
1954 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1955 // get rid of this
1956 // instruction (change
1957 // offsets below)
1958 __ andptr(array, -BytesPerInt);
1960 // Initialize i & j
1961 __ xorl(i, i); // i = 0;
1962 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1964 // Convert j into native byteordering
1965 __ bswapl(j);
1967 // And start
1968 Label entry;
1969 __ jmp(entry);
1971 // binary search loop
1972 {
1973 Label loop;
1974 __ bind(loop);
1975 // int h = (i + j) >> 1;
1976 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1977 __ sarl(h, 1); // h = (i + j) >> 1;
1978 // if (key < array[h].fast_match()) {
1979 // j = h;
1980 // } else {
1981 // i = h;
1982 // }
1983 // Convert array[h].match to native byte-ordering before compare
1984 __ movl(temp, Address(array, h, Address::times_8));
1985 __ bswapl(temp);
1986 __ cmpl(key, temp);
1987 // j = h if (key < array[h].fast_match())
1988 __ cmovl(Assembler::less, j, h);
1989 // i = h if (key >= array[h].fast_match())
1990 __ cmovl(Assembler::greaterEqual, i, h);
1991 // while (i+1 < j)
1992 __ bind(entry);
1993 __ leal(h, Address(i, 1)); // i+1
1994 __ cmpl(h, j); // i+1 < j
1995 __ jcc(Assembler::less, loop);
1996 }
1998 // end of binary search, result index is i (must check again!)
1999 Label default_case;
2000 // Convert array[i].match to native byte-ordering before compare
2001 __ movl(temp, Address(array, i, Address::times_8));
2002 __ bswapl(temp);
2003 __ cmpl(key, temp);
2004 __ jcc(Assembler::notEqual, default_case);
2006 // entry found -> j = offset
2007 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2008 __ profile_switch_case(i, key, array);
2009 __ bswapl(j);
2010 __ movl2ptr(j, j);
2011 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2012 __ addptr(r13, j);
2013 __ dispatch_only(vtos);
2015 // default case -> j = default offset
2016 __ bind(default_case);
2017 __ profile_switch_default(i);
2018 __ movl(j, Address(array, -2 * BytesPerInt));
2019 __ bswapl(j);
2020 __ movl2ptr(j, j);
2021 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2022 __ addptr(r13, j);
2023 __ dispatch_only(vtos);
2024 }
2027 void TemplateTable::_return(TosState state) {
2028 transition(state, state);
2029 assert(_desc->calls_vm(),
2030 "inconsistent calls_vm information"); // call in remove_activation
2032 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2033 assert(state == vtos, "only valid state");
2034 __ movptr(c_rarg1, aaddress(0));
2035 __ load_klass(rdi, c_rarg1);
2036 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2037 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2038 Label skip_register_finalizer;
2039 __ jcc(Assembler::zero, skip_register_finalizer);
2041 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2043 __ bind(skip_register_finalizer);
2044 }
2046 __ remove_activation(state, r13);
2047 __ jmp(r13);
2048 }
2050 // ----------------------------------------------------------------------------
2051 // Volatile variables demand their effects be made known to all CPU's
2052 // in order. Store buffers on most chips allow reads & writes to
2053 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2054 // without some kind of memory barrier (i.e., it's not sufficient that
2055 // the interpreter does not reorder volatile references, the hardware
2056 // also must not reorder them).
2057 //
2058 // According to the new Java Memory Model (JMM):
2059 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2060 // writes act as aquire & release, so:
2061 // (2) A read cannot let unrelated NON-volatile memory refs that
2062 // happen after the read float up to before the read. It's OK for
2063 // non-volatile memory refs that happen before the volatile read to
2064 // float down below it.
2065 // (3) Similar a volatile write cannot let unrelated NON-volatile
2066 // memory refs that happen BEFORE the write float down to after the
2067 // write. It's OK for non-volatile memory refs that happen after the
2068 // volatile write to float up before it.
2069 //
2070 // We only put in barriers around volatile refs (they are expensive),
2071 // not _between_ memory refs (that would require us to track the
2072 // flavor of the previous memory refs). Requirements (2) and (3)
2073 // require some barriers before volatile stores and after volatile
2074 // loads. These nearly cover requirement (1) but miss the
2075 // volatile-store-volatile-load case. This final case is placed after
2076 // volatile-stores although it could just as well go before
2077 // volatile-loads.
2078 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2079 order_constraint) {
2080 // Helper function to insert a is-volatile test and memory barrier
2081 if (os::is_MP()) { // Not needed on single CPU
2082 __ membar(order_constraint);
2083 }
2084 }
2086 void TemplateTable::resolve_cache_and_index(int byte_no,
2087 Register Rcache,
2088 Register index,
2089 size_t index_size) {
2090 const Register temp = rbx;
2091 assert_different_registers(Rcache, index, temp);
2093 Label resolved;
2094 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2095 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2096 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2097 __ jcc(Assembler::equal, resolved);
2099 // resolve first time through
2100 address entry;
2101 switch (bytecode()) {
2102 case Bytecodes::_getstatic:
2103 case Bytecodes::_putstatic:
2104 case Bytecodes::_getfield:
2105 case Bytecodes::_putfield:
2106 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2107 break;
2108 case Bytecodes::_invokevirtual:
2109 case Bytecodes::_invokespecial:
2110 case Bytecodes::_invokestatic:
2111 case Bytecodes::_invokeinterface:
2112 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2113 break;
2114 case Bytecodes::_invokehandle:
2115 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
2116 break;
2117 case Bytecodes::_invokedynamic:
2118 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2119 break;
2120 default:
2121 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2122 break;
2123 }
2124 __ movl(temp, (int) bytecode());
2125 __ call_VM(noreg, entry, temp);
2127 // Update registers with resolved info
2128 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2129 __ bind(resolved);
2130 }
2132 // The cache and index registers must be set before call
2133 void TemplateTable::load_field_cp_cache_entry(Register obj,
2134 Register cache,
2135 Register index,
2136 Register off,
2137 Register flags,
2138 bool is_static = false) {
2139 assert_different_registers(cache, index, flags, off);
2141 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2142 // Field offset
2143 __ movptr(off, Address(cache, index, Address::times_ptr,
2144 in_bytes(cp_base_offset +
2145 ConstantPoolCacheEntry::f2_offset())));
2146 // Flags
2147 __ movl(flags, Address(cache, index, Address::times_ptr,
2148 in_bytes(cp_base_offset +
2149 ConstantPoolCacheEntry::flags_offset())));
2151 // klass overwrite register
2152 if (is_static) {
2153 __ movptr(obj, Address(cache, index, Address::times_ptr,
2154 in_bytes(cp_base_offset +
2155 ConstantPoolCacheEntry::f1_offset())));
2156 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2157 __ movptr(obj, Address(obj, mirror_offset));
2158 }
2159 }
2161 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2162 Register method,
2163 Register itable_index,
2164 Register flags,
2165 bool is_invokevirtual,
2166 bool is_invokevfinal, /*unused*/
2167 bool is_invokedynamic) {
2168 // setup registers
2169 const Register cache = rcx;
2170 const Register index = rdx;
2171 assert_different_registers(method, flags);
2172 assert_different_registers(method, cache, index);
2173 assert_different_registers(itable_index, flags);
2174 assert_different_registers(itable_index, cache, index);
2175 // determine constant pool cache field offsets
2176 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2177 const int method_offset = in_bytes(
2178 ConstantPoolCache::base_offset() +
2179 ((byte_no == f2_byte)
2180 ? ConstantPoolCacheEntry::f2_offset()
2181 : ConstantPoolCacheEntry::f1_offset()));
2182 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2183 ConstantPoolCacheEntry::flags_offset());
2184 // access constant pool cache fields
2185 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2186 ConstantPoolCacheEntry::f2_offset());
2188 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2189 resolve_cache_and_index(byte_no, cache, index, index_size);
2190 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2192 if (itable_index != noreg) {
2193 // pick up itable or appendix index from f2 also:
2194 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2195 }
2196 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2197 }
2199 // Correct values of the cache and index registers are preserved.
2200 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2201 bool is_static, bool has_tos) {
2202 // do the JVMTI work here to avoid disturbing the register state below
2203 // We use c_rarg registers here because we want to use the register used in
2204 // the call to the VM
2205 if (JvmtiExport::can_post_field_access()) {
2206 // Check to see if a field access watch has been set before we
2207 // take the time to call into the VM.
2208 Label L1;
2209 assert_different_registers(cache, index, rax);
2210 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2211 __ testl(rax, rax);
2212 __ jcc(Assembler::zero, L1);
2214 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2216 // cache entry pointer
2217 __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
2218 __ shll(c_rarg3, LogBytesPerWord);
2219 __ addptr(c_rarg2, c_rarg3);
2220 if (is_static) {
2221 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2222 } else {
2223 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2224 __ verify_oop(c_rarg1);
2225 }
2226 // c_rarg1: object pointer or NULL
2227 // c_rarg2: cache entry pointer
2228 // c_rarg3: jvalue object on the stack
2229 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2230 InterpreterRuntime::post_field_access),
2231 c_rarg1, c_rarg2, c_rarg3);
2232 __ get_cache_and_index_at_bcp(cache, index, 1);
2233 __ bind(L1);
2234 }
2235 }
2237 void TemplateTable::pop_and_check_object(Register r) {
2238 __ pop_ptr(r);
2239 __ null_check(r); // for field access must check obj.
2240 __ verify_oop(r);
2241 }
2243 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2244 transition(vtos, vtos);
2246 const Register cache = rcx;
2247 const Register index = rdx;
2248 const Register obj = c_rarg3;
2249 const Register off = rbx;
2250 const Register flags = rax;
2251 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2253 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2254 jvmti_post_field_access(cache, index, is_static, false);
2255 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2257 if (!is_static) {
2258 // obj is on the stack
2259 pop_and_check_object(obj);
2260 }
2262 const Address field(obj, off, Address::times_1);
2264 Label Done, notByte, notInt, notShort, notChar,
2265 notLong, notFloat, notObj, notDouble;
2267 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2268 // Make sure we don't need to mask edx after the above shift
2269 assert(btos == 0, "change code, btos != 0");
2271 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2272 __ jcc(Assembler::notZero, notByte);
2273 // btos
2274 __ load_signed_byte(rax, field);
2275 __ push(btos);
2276 // Rewrite bytecode to be faster
2277 if (!is_static) {
2278 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2279 }
2280 __ jmp(Done);
2282 __ bind(notByte);
2283 __ cmpl(flags, atos);
2284 __ jcc(Assembler::notEqual, notObj);
2285 // atos
2286 __ load_heap_oop(rax, field);
2287 __ push(atos);
2288 if (!is_static) {
2289 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2290 }
2291 __ jmp(Done);
2293 __ bind(notObj);
2294 __ cmpl(flags, itos);
2295 __ jcc(Assembler::notEqual, notInt);
2296 // itos
2297 __ movl(rax, field);
2298 __ push(itos);
2299 // Rewrite bytecode to be faster
2300 if (!is_static) {
2301 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2302 }
2303 __ jmp(Done);
2305 __ bind(notInt);
2306 __ cmpl(flags, ctos);
2307 __ jcc(Assembler::notEqual, notChar);
2308 // ctos
2309 __ load_unsigned_short(rax, field);
2310 __ push(ctos);
2311 // Rewrite bytecode to be faster
2312 if (!is_static) {
2313 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2314 }
2315 __ jmp(Done);
2317 __ bind(notChar);
2318 __ cmpl(flags, stos);
2319 __ jcc(Assembler::notEqual, notShort);
2320 // stos
2321 __ load_signed_short(rax, field);
2322 __ push(stos);
2323 // Rewrite bytecode to be faster
2324 if (!is_static) {
2325 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2326 }
2327 __ jmp(Done);
2329 __ bind(notShort);
2330 __ cmpl(flags, ltos);
2331 __ jcc(Assembler::notEqual, notLong);
2332 // ltos
2333 __ movq(rax, field);
2334 __ push(ltos);
2335 // Rewrite bytecode to be faster
2336 if (!is_static) {
2337 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2338 }
2339 __ jmp(Done);
2341 __ bind(notLong);
2342 __ cmpl(flags, ftos);
2343 __ jcc(Assembler::notEqual, notFloat);
2344 // ftos
2345 __ movflt(xmm0, field);
2346 __ push(ftos);
2347 // Rewrite bytecode to be faster
2348 if (!is_static) {
2349 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2350 }
2351 __ jmp(Done);
2353 __ bind(notFloat);
2354 #ifdef ASSERT
2355 __ cmpl(flags, dtos);
2356 __ jcc(Assembler::notEqual, notDouble);
2357 #endif
2358 // dtos
2359 __ movdbl(xmm0, field);
2360 __ push(dtos);
2361 // Rewrite bytecode to be faster
2362 if (!is_static) {
2363 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2364 }
2365 #ifdef ASSERT
2366 __ jmp(Done);
2368 __ bind(notDouble);
2369 __ stop("Bad state");
2370 #endif
2372 __ bind(Done);
2373 // [jk] not needed currently
2374 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2375 // Assembler::LoadStore));
2376 }
2379 void TemplateTable::getfield(int byte_no) {
2380 getfield_or_static(byte_no, false);
2381 }
2383 void TemplateTable::getstatic(int byte_no) {
2384 getfield_or_static(byte_no, true);
2385 }
2387 // The registers cache and index expected to be set before call.
2388 // The function may destroy various registers, just not the cache and index registers.
2389 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2390 transition(vtos, vtos);
2392 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2394 if (JvmtiExport::can_post_field_modification()) {
2395 // Check to see if a field modification watch has been set before
2396 // we take the time to call into the VM.
2397 Label L1;
2398 assert_different_registers(cache, index, rax);
2399 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2400 __ testl(rax, rax);
2401 __ jcc(Assembler::zero, L1);
2403 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2405 if (is_static) {
2406 // Life is simple. Null out the object pointer.
2407 __ xorl(c_rarg1, c_rarg1);
2408 } else {
2409 // Life is harder. The stack holds the value on top, followed by
2410 // the object. We don't know the size of the value, though; it
2411 // could be one or two words depending on its type. As a result,
2412 // we must find the type to determine where the object is.
2413 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2414 Address::times_8,
2415 in_bytes(cp_base_offset +
2416 ConstantPoolCacheEntry::flags_offset())));
2417 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
2418 // Make sure we don't need to mask rcx after the above shift
2419 ConstantPoolCacheEntry::verify_tos_state_shift();
2420 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2421 __ cmpl(c_rarg3, ltos);
2422 __ cmovptr(Assembler::equal,
2423 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2424 __ cmpl(c_rarg3, dtos);
2425 __ cmovptr(Assembler::equal,
2426 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2427 }
2428 // cache entry pointer
2429 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2430 __ shll(rscratch1, LogBytesPerWord);
2431 __ addptr(c_rarg2, rscratch1);
2432 // object (tos)
2433 __ mov(c_rarg3, rsp);
2434 // c_rarg1: object pointer set up above (NULL if static)
2435 // c_rarg2: cache entry pointer
2436 // c_rarg3: jvalue object on the stack
2437 __ call_VM(noreg,
2438 CAST_FROM_FN_PTR(address,
2439 InterpreterRuntime::post_field_modification),
2440 c_rarg1, c_rarg2, c_rarg3);
2441 __ get_cache_and_index_at_bcp(cache, index, 1);
2442 __ bind(L1);
2443 }
2444 }
2446 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2447 transition(vtos, vtos);
2449 const Register cache = rcx;
2450 const Register index = rdx;
2451 const Register obj = rcx;
2452 const Register off = rbx;
2453 const Register flags = rax;
2454 const Register bc = c_rarg3;
2456 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2457 jvmti_post_field_mod(cache, index, is_static);
2458 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2460 // [jk] not needed currently
2461 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2462 // Assembler::StoreStore));
2464 Label notVolatile, Done;
2465 __ movl(rdx, flags);
2466 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2467 __ andl(rdx, 0x1);
2469 // field address
2470 const Address field(obj, off, Address::times_1);
2472 Label notByte, notInt, notShort, notChar,
2473 notLong, notFloat, notObj, notDouble;
2475 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2477 assert(btos == 0, "change code, btos != 0");
2478 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2479 __ jcc(Assembler::notZero, notByte);
2481 // btos
2482 {
2483 __ pop(btos);
2484 if (!is_static) pop_and_check_object(obj);
2485 __ movb(field, rax);
2486 if (!is_static) {
2487 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2488 }
2489 __ jmp(Done);
2490 }
2492 __ bind(notByte);
2493 __ cmpl(flags, atos);
2494 __ jcc(Assembler::notEqual, notObj);
2496 // atos
2497 {
2498 __ pop(atos);
2499 if (!is_static) pop_and_check_object(obj);
2500 // Store into the field
2501 do_oop_store(_masm, field, rax, _bs->kind(), false);
2502 if (!is_static) {
2503 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2504 }
2505 __ jmp(Done);
2506 }
2508 __ bind(notObj);
2509 __ cmpl(flags, itos);
2510 __ jcc(Assembler::notEqual, notInt);
2512 // itos
2513 {
2514 __ pop(itos);
2515 if (!is_static) pop_and_check_object(obj);
2516 __ movl(field, rax);
2517 if (!is_static) {
2518 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2519 }
2520 __ jmp(Done);
2521 }
2523 __ bind(notInt);
2524 __ cmpl(flags, ctos);
2525 __ jcc(Assembler::notEqual, notChar);
2527 // ctos
2528 {
2529 __ pop(ctos);
2530 if (!is_static) pop_and_check_object(obj);
2531 __ movw(field, rax);
2532 if (!is_static) {
2533 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2534 }
2535 __ jmp(Done);
2536 }
2538 __ bind(notChar);
2539 __ cmpl(flags, stos);
2540 __ jcc(Assembler::notEqual, notShort);
2542 // stos
2543 {
2544 __ pop(stos);
2545 if (!is_static) pop_and_check_object(obj);
2546 __ movw(field, rax);
2547 if (!is_static) {
2548 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2549 }
2550 __ jmp(Done);
2551 }
2553 __ bind(notShort);
2554 __ cmpl(flags, ltos);
2555 __ jcc(Assembler::notEqual, notLong);
2557 // ltos
2558 {
2559 __ pop(ltos);
2560 if (!is_static) pop_and_check_object(obj);
2561 __ movq(field, rax);
2562 if (!is_static) {
2563 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2564 }
2565 __ jmp(Done);
2566 }
2568 __ bind(notLong);
2569 __ cmpl(flags, ftos);
2570 __ jcc(Assembler::notEqual, notFloat);
2572 // ftos
2573 {
2574 __ pop(ftos);
2575 if (!is_static) pop_and_check_object(obj);
2576 __ movflt(field, xmm0);
2577 if (!is_static) {
2578 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2579 }
2580 __ jmp(Done);
2581 }
2583 __ bind(notFloat);
2584 #ifdef ASSERT
2585 __ cmpl(flags, dtos);
2586 __ jcc(Assembler::notEqual, notDouble);
2587 #endif
2589 // dtos
2590 {
2591 __ pop(dtos);
2592 if (!is_static) pop_and_check_object(obj);
2593 __ movdbl(field, xmm0);
2594 if (!is_static) {
2595 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2596 }
2597 }
2599 #ifdef ASSERT
2600 __ jmp(Done);
2602 __ bind(notDouble);
2603 __ stop("Bad state");
2604 #endif
2606 __ bind(Done);
2608 // Check for volatile store
2609 __ testl(rdx, rdx);
2610 __ jcc(Assembler::zero, notVolatile);
2611 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2612 Assembler::StoreStore));
2613 __ bind(notVolatile);
2614 }
2616 void TemplateTable::putfield(int byte_no) {
2617 putfield_or_static(byte_no, false);
2618 }
2620 void TemplateTable::putstatic(int byte_no) {
2621 putfield_or_static(byte_no, true);
2622 }
2624 void TemplateTable::jvmti_post_fast_field_mod() {
2625 if (JvmtiExport::can_post_field_modification()) {
2626 // Check to see if a field modification watch has been set before
2627 // we take the time to call into the VM.
2628 Label L2;
2629 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2630 __ testl(c_rarg3, c_rarg3);
2631 __ jcc(Assembler::zero, L2);
2632 __ pop_ptr(rbx); // copy the object pointer from tos
2633 __ verify_oop(rbx);
2634 __ push_ptr(rbx); // put the object pointer back on tos
2635 // Save tos values before call_VM() clobbers them. Since we have
2636 // to do it for every data type, we use the saved values as the
2637 // jvalue object.
2638 switch (bytecode()) { // load values into the jvalue object
2639 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2640 case Bytecodes::_fast_bputfield: // fall through
2641 case Bytecodes::_fast_sputfield: // fall through
2642 case Bytecodes::_fast_cputfield: // fall through
2643 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2644 case Bytecodes::_fast_dputfield: __ push_d(); break;
2645 case Bytecodes::_fast_fputfield: __ push_f(); break;
2646 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2648 default:
2649 ShouldNotReachHere();
2650 }
2651 __ mov(c_rarg3, rsp); // points to jvalue on the stack
2652 // access constant pool cache entry
2653 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2654 __ verify_oop(rbx);
2655 // rbx: object pointer copied above
2656 // c_rarg2: cache entry pointer
2657 // c_rarg3: jvalue object on the stack
2658 __ call_VM(noreg,
2659 CAST_FROM_FN_PTR(address,
2660 InterpreterRuntime::post_field_modification),
2661 rbx, c_rarg2, c_rarg3);
2663 switch (bytecode()) { // restore tos values
2664 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2665 case Bytecodes::_fast_bputfield: // fall through
2666 case Bytecodes::_fast_sputfield: // fall through
2667 case Bytecodes::_fast_cputfield: // fall through
2668 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2669 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2670 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2671 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2672 }
2673 __ bind(L2);
2674 }
2675 }
2677 void TemplateTable::fast_storefield(TosState state) {
2678 transition(state, vtos);
2680 ByteSize base = ConstantPoolCache::base_offset();
2682 jvmti_post_fast_field_mod();
2684 // access constant pool cache
2685 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2687 // test for volatile with rdx
2688 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2689 in_bytes(base +
2690 ConstantPoolCacheEntry::flags_offset())));
2692 // replace index with field offset from cache entry
2693 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2694 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2696 // [jk] not needed currently
2697 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2698 // Assembler::StoreStore));
2700 Label notVolatile;
2701 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2702 __ andl(rdx, 0x1);
2704 // Get object from stack
2705 pop_and_check_object(rcx);
2707 // field address
2708 const Address field(rcx, rbx, Address::times_1);
2710 // access field
2711 switch (bytecode()) {
2712 case Bytecodes::_fast_aputfield:
2713 do_oop_store(_masm, field, rax, _bs->kind(), false);
2714 break;
2715 case Bytecodes::_fast_lputfield:
2716 __ movq(field, rax);
2717 break;
2718 case Bytecodes::_fast_iputfield:
2719 __ movl(field, rax);
2720 break;
2721 case Bytecodes::_fast_bputfield:
2722 __ movb(field, rax);
2723 break;
2724 case Bytecodes::_fast_sputfield:
2725 // fall through
2726 case Bytecodes::_fast_cputfield:
2727 __ movw(field, rax);
2728 break;
2729 case Bytecodes::_fast_fputfield:
2730 __ movflt(field, xmm0);
2731 break;
2732 case Bytecodes::_fast_dputfield:
2733 __ movdbl(field, xmm0);
2734 break;
2735 default:
2736 ShouldNotReachHere();
2737 }
2739 // Check for volatile store
2740 __ testl(rdx, rdx);
2741 __ jcc(Assembler::zero, notVolatile);
2742 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2743 Assembler::StoreStore));
2744 __ bind(notVolatile);
2745 }
2748 void TemplateTable::fast_accessfield(TosState state) {
2749 transition(atos, state);
2751 // Do the JVMTI work here to avoid disturbing the register state below
2752 if (JvmtiExport::can_post_field_access()) {
2753 // Check to see if a field access watch has been set before we
2754 // take the time to call into the VM.
2755 Label L1;
2756 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2757 __ testl(rcx, rcx);
2758 __ jcc(Assembler::zero, L1);
2759 // access constant pool cache entry
2760 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2761 __ verify_oop(rax);
2762 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2763 __ mov(c_rarg1, rax);
2764 // c_rarg1: object pointer copied above
2765 // c_rarg2: cache entry pointer
2766 __ call_VM(noreg,
2767 CAST_FROM_FN_PTR(address,
2768 InterpreterRuntime::post_field_access),
2769 c_rarg1, c_rarg2);
2770 __ pop_ptr(rax); // restore object pointer
2771 __ bind(L1);
2772 }
2774 // access constant pool cache
2775 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2776 // replace index with field offset from cache entry
2777 // [jk] not needed currently
2778 // if (os::is_MP()) {
2779 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2780 // in_bytes(ConstantPoolCache::base_offset() +
2781 // ConstantPoolCacheEntry::flags_offset())));
2782 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2783 // __ andl(rdx, 0x1);
2784 // }
2785 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2786 in_bytes(ConstantPoolCache::base_offset() +
2787 ConstantPoolCacheEntry::f2_offset())));
2789 // rax: object
2790 __ verify_oop(rax);
2791 __ null_check(rax);
2792 Address field(rax, rbx, Address::times_1);
2794 // access field
2795 switch (bytecode()) {
2796 case Bytecodes::_fast_agetfield:
2797 __ load_heap_oop(rax, field);
2798 __ verify_oop(rax);
2799 break;
2800 case Bytecodes::_fast_lgetfield:
2801 __ movq(rax, field);
2802 break;
2803 case Bytecodes::_fast_igetfield:
2804 __ movl(rax, field);
2805 break;
2806 case Bytecodes::_fast_bgetfield:
2807 __ movsbl(rax, field);
2808 break;
2809 case Bytecodes::_fast_sgetfield:
2810 __ load_signed_short(rax, field);
2811 break;
2812 case Bytecodes::_fast_cgetfield:
2813 __ load_unsigned_short(rax, field);
2814 break;
2815 case Bytecodes::_fast_fgetfield:
2816 __ movflt(xmm0, field);
2817 break;
2818 case Bytecodes::_fast_dgetfield:
2819 __ movdbl(xmm0, field);
2820 break;
2821 default:
2822 ShouldNotReachHere();
2823 }
2824 // [jk] not needed currently
2825 // if (os::is_MP()) {
2826 // Label notVolatile;
2827 // __ testl(rdx, rdx);
2828 // __ jcc(Assembler::zero, notVolatile);
2829 // __ membar(Assembler::LoadLoad);
2830 // __ bind(notVolatile);
2831 //};
2832 }
2834 void TemplateTable::fast_xaccess(TosState state) {
2835 transition(vtos, state);
2837 // get receiver
2838 __ movptr(rax, aaddress(0));
2839 // access constant pool cache
2840 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2841 __ movptr(rbx,
2842 Address(rcx, rdx, Address::times_8,
2843 in_bytes(ConstantPoolCache::base_offset() +
2844 ConstantPoolCacheEntry::f2_offset())));
2845 // make sure exception is reported in correct bcp range (getfield is
2846 // next instruction)
2847 __ increment(r13);
2848 __ null_check(rax);
2849 switch (state) {
2850 case itos:
2851 __ movl(rax, Address(rax, rbx, Address::times_1));
2852 break;
2853 case atos:
2854 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2855 __ verify_oop(rax);
2856 break;
2857 case ftos:
2858 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2859 break;
2860 default:
2861 ShouldNotReachHere();
2862 }
2864 // [jk] not needed currently
2865 // if (os::is_MP()) {
2866 // Label notVolatile;
2867 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2868 // in_bytes(ConstantPoolCache::base_offset() +
2869 // ConstantPoolCacheEntry::flags_offset())));
2870 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2871 // __ testl(rdx, 0x1);
2872 // __ jcc(Assembler::zero, notVolatile);
2873 // __ membar(Assembler::LoadLoad);
2874 // __ bind(notVolatile);
2875 // }
2877 __ decrement(r13);
2878 }
2882 //-----------------------------------------------------------------------------
2883 // Calls
2885 void TemplateTable::count_calls(Register method, Register temp) {
2886 // implemented elsewhere
2887 ShouldNotReachHere();
2888 }
2890 void TemplateTable::prepare_invoke(int byte_no,
2891 Register method, // linked method (or i-klass)
2892 Register index, // itable index, MethodType, etc.
2893 Register recv, // if caller wants to see it
2894 Register flags // if caller wants to test it
2895 ) {
2896 // determine flags
2897 const Bytecodes::Code code = bytecode();
2898 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2899 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2900 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2901 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2902 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2903 const bool load_receiver = (recv != noreg);
2904 const bool save_flags = (flags != noreg);
2905 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2906 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2907 assert(flags == noreg || flags == rdx, "");
2908 assert(recv == noreg || recv == rcx, "");
2910 // setup registers & access constant pool cache
2911 if (recv == noreg) recv = rcx;
2912 if (flags == noreg) flags = rdx;
2913 assert_different_registers(method, index, recv, flags);
2915 // save 'interpreter return address'
2916 __ save_bcp();
2918 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2920 // maybe push appendix to arguments (just before return address)
2921 if (is_invokedynamic || is_invokehandle) {
2922 Label L_no_push;
2923 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2924 __ jcc(Assembler::zero, L_no_push);
2925 // Push the appendix as a trailing parameter.
2926 // This must be done before we get the receiver,
2927 // since the parameter_size includes it.
2928 __ push(rbx);
2929 __ mov(rbx, index);
2930 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2931 __ load_resolved_reference_at_index(index, rbx);
2932 __ pop(rbx);
2933 __ push(index); // push appendix (MethodType, CallSite, etc.)
2934 __ bind(L_no_push);
2935 }
2937 // load receiver if needed (after appendix is pushed so parameter size is correct)
2938 // Note: no return address pushed yet
2939 if (load_receiver) {
2940 __ movl(recv, flags);
2941 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2942 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2943 const int receiver_is_at_end = -1; // back off one slot to get receiver
2944 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2945 __ movptr(recv, recv_addr);
2946 __ verify_oop(recv);
2947 }
2949 if (save_flags) {
2950 __ movl(r13, flags);
2951 }
2953 // compute return type
2954 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2955 // Make sure we don't need to mask flags after the above shift
2956 ConstantPoolCacheEntry::verify_tos_state_shift();
2957 // load return address
2958 {
2959 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2960 (address)Interpreter::return_5_addrs_by_index_table() :
2961 (address)Interpreter::return_3_addrs_by_index_table();
2962 ExternalAddress table(table_addr);
2963 __ lea(rscratch1, table);
2964 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2965 }
2967 // push return address
2968 __ push(flags);
2970 // Restore flags value from the constant pool cache, and restore rsi
2971 // for later null checks. r13 is the bytecode pointer
2972 if (save_flags) {
2973 __ movl(flags, r13);
2974 __ restore_bcp();
2975 }
2976 }
2979 void TemplateTable::invokevirtual_helper(Register index,
2980 Register recv,
2981 Register flags) {
2982 // Uses temporary registers rax, rdx
2983 assert_different_registers(index, recv, rax, rdx);
2984 assert(index == rbx, "");
2985 assert(recv == rcx, "");
2987 // Test for an invoke of a final method
2988 Label notFinal;
2989 __ movl(rax, flags);
2990 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
2991 __ jcc(Assembler::zero, notFinal);
2993 const Register method = index; // method must be rbx
2994 assert(method == rbx,
2995 "Method* must be rbx for interpreter calling convention");
2997 // do the call - the index is actually the method to call
2998 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3000 // It's final, need a null check here!
3001 __ null_check(recv);
3003 // profile this call
3004 __ profile_final_call(rax);
3006 __ jump_from_interpreted(method, rax);
3008 __ bind(notFinal);
3010 // get receiver klass
3011 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3012 __ load_klass(rax, recv);
3014 // profile this call
3015 __ profile_virtual_call(rax, r14, rdx);
3017 // get target Method* & entry point
3018 __ lookup_virtual_method(rax, index, method);
3019 __ jump_from_interpreted(method, rdx);
3020 }
3023 void TemplateTable::invokevirtual(int byte_no) {
3024 transition(vtos, vtos);
3025 assert(byte_no == f2_byte, "use this argument");
3026 prepare_invoke(byte_no,
3027 rbx, // method or vtable index
3028 noreg, // unused itable index
3029 rcx, rdx); // recv, flags
3031 // rbx: index
3032 // rcx: receiver
3033 // rdx: flags
3035 invokevirtual_helper(rbx, rcx, rdx);
3036 }
3039 void TemplateTable::invokespecial(int byte_no) {
3040 transition(vtos, vtos);
3041 assert(byte_no == f1_byte, "use this argument");
3042 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3043 rcx); // get receiver also for null check
3044 __ verify_oop(rcx);
3045 __ null_check(rcx);
3046 // do the call
3047 __ profile_call(rax);
3048 __ jump_from_interpreted(rbx, rax);
3049 }
3052 void TemplateTable::invokestatic(int byte_no) {
3053 transition(vtos, vtos);
3054 assert(byte_no == f1_byte, "use this argument");
3055 prepare_invoke(byte_no, rbx); // get f1 Method*
3056 // do the call
3057 __ profile_call(rax);
3058 __ jump_from_interpreted(rbx, rax);
3059 }
3061 void TemplateTable::fast_invokevfinal(int byte_no) {
3062 transition(vtos, vtos);
3063 assert(byte_no == f2_byte, "use this argument");
3064 __ stop("fast_invokevfinal not used on amd64");
3065 }
3067 void TemplateTable::invokeinterface(int byte_no) {
3068 transition(vtos, vtos);
3069 assert(byte_no == f1_byte, "use this argument");
3070 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3071 rcx, rdx); // recv, flags
3073 // rax: interface klass (from f1)
3074 // rbx: itable index (from f2)
3075 // rcx: receiver
3076 // rdx: flags
3078 // Special case of invokeinterface called for virtual method of
3079 // java.lang.Object. See cpCacheOop.cpp for details.
3080 // This code isn't produced by javac, but could be produced by
3081 // another compliant java compiler.
3082 Label notMethod;
3083 __ movl(r14, rdx);
3084 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3085 __ jcc(Assembler::zero, notMethod);
3087 invokevirtual_helper(rbx, rcx, rdx);
3088 __ bind(notMethod);
3090 // Get receiver klass into rdx - also a null check
3091 __ restore_locals(); // restore r14
3092 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3093 __ load_klass(rdx, rcx);
3095 // profile this call
3096 __ profile_virtual_call(rdx, r13, r14);
3098 Label no_such_interface, no_such_method;
3100 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3101 rdx, rax, rbx,
3102 // outputs: method, scan temp. reg
3103 rbx, r13,
3104 no_such_interface);
3106 // rbx: Method* to call
3107 // rcx: receiver
3108 // Check for abstract method error
3109 // Note: This should be done more efficiently via a throw_abstract_method_error
3110 // interpreter entry point and a conditional jump to it in case of a null
3111 // method.
3112 __ testptr(rbx, rbx);
3113 __ jcc(Assembler::zero, no_such_method);
3115 // do the call
3116 // rcx: receiver
3117 // rbx,: Method*
3118 __ jump_from_interpreted(rbx, rdx);
3119 __ should_not_reach_here();
3121 // exception handling code follows...
3122 // note: must restore interpreter registers to canonical
3123 // state for exception handling to work correctly!
3125 __ bind(no_such_method);
3126 // throw exception
3127 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3128 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3129 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3130 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3131 // the call_VM checks for exception, so we should never return here.
3132 __ should_not_reach_here();
3134 __ bind(no_such_interface);
3135 // throw exception
3136 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3137 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3138 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3139 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3140 InterpreterRuntime::throw_IncompatibleClassChangeError));
3141 // the call_VM checks for exception, so we should never return here.
3142 __ should_not_reach_here();
3143 }
3146 void TemplateTable::invokehandle(int byte_no) {
3147 transition(vtos, vtos);
3148 assert(byte_no == f1_byte, "use this argument");
3149 const Register rbx_method = rbx;
3150 const Register rax_mtype = rax;
3151 const Register rcx_recv = rcx;
3152 const Register rdx_flags = rdx;
3154 if (!EnableInvokeDynamic) {
3155 // rewriter does not generate this bytecode
3156 __ should_not_reach_here();
3157 return;
3158 }
3160 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3161 __ verify_method_ptr(rbx_method);
3162 __ verify_oop(rcx_recv);
3163 __ null_check(rcx_recv);
3165 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3166 // rbx: MH.invokeExact_MT method (from f2)
3168 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3170 // FIXME: profile the LambdaForm also
3171 __ profile_final_call(rax);
3173 __ jump_from_interpreted(rbx_method, rdx);
3174 }
3177 void TemplateTable::invokedynamic(int byte_no) {
3178 transition(vtos, vtos);
3179 assert(byte_no == f1_byte, "use this argument");
3181 if (!EnableInvokeDynamic) {
3182 // We should not encounter this bytecode if !EnableInvokeDynamic.
3183 // The verifier will stop it. However, if we get past the verifier,
3184 // this will stop the thread in a reasonable way, without crashing the JVM.
3185 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3186 InterpreterRuntime::throw_IncompatibleClassChangeError));
3187 // the call_VM checks for exception, so we should never return here.
3188 __ should_not_reach_here();
3189 return;
3190 }
3192 const Register rbx_method = rbx;
3193 const Register rax_callsite = rax;
3195 prepare_invoke(byte_no, rbx_method, rax_callsite);
3197 // rax: CallSite object (from cpool->resolved_references[f1])
3198 // rbx: MH.linkToCallSite method (from f2)
3200 // Note: rax_callsite is already pushed by prepare_invoke
3202 // %%% should make a type profile for any invokedynamic that takes a ref argument
3203 // profile this call
3204 __ profile_call(r13);
3206 __ verify_oop(rax_callsite);
3208 __ jump_from_interpreted(rbx_method, rdx);
3209 }
3212 //-----------------------------------------------------------------------------
3213 // Allocation
3215 void TemplateTable::_new() {
3216 transition(vtos, atos);
3217 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3218 Label slow_case;
3219 Label done;
3220 Label initialize_header;
3221 Label initialize_object; // including clearing the fields
3222 Label allocate_shared;
3224 __ get_cpool_and_tags(rsi, rax);
3225 // Make sure the class we're about to instantiate has been resolved.
3226 // This is done before loading InstanceKlass to be consistent with the order
3227 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3228 const int tags_offset = Array<u1>::base_offset_in_bytes();
3229 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3230 JVM_CONSTANT_Class);
3231 __ jcc(Assembler::notEqual, slow_case);
3233 // get InstanceKlass
3234 __ movptr(rsi, Address(rsi, rdx,
3235 Address::times_8, sizeof(ConstantPool)));
3237 // make sure klass is initialized & doesn't have finalizer
3238 // make sure klass is fully initialized
3239 __ cmpb(Address(rsi,
3240 InstanceKlass::init_state_offset()),
3241 InstanceKlass::fully_initialized);
3242 __ jcc(Assembler::notEqual, slow_case);
3244 // get instance_size in InstanceKlass (scaled to a count of bytes)
3245 __ movl(rdx,
3246 Address(rsi,
3247 Klass::layout_helper_offset()));
3248 // test to see if it has a finalizer or is malformed in some way
3249 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3250 __ jcc(Assembler::notZero, slow_case);
3252 // Allocate the instance
3253 // 1) Try to allocate in the TLAB
3254 // 2) if fail and the object is large allocate in the shared Eden
3255 // 3) if the above fails (or is not applicable), go to a slow case
3256 // (creates a new TLAB, etc.)
3258 const bool allow_shared_alloc =
3259 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3261 if (UseTLAB) {
3262 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3263 __ lea(rbx, Address(rax, rdx, Address::times_1));
3264 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3265 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3266 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3267 if (ZeroTLAB) {
3268 // the fields have been already cleared
3269 __ jmp(initialize_header);
3270 } else {
3271 // initialize both the header and fields
3272 __ jmp(initialize_object);
3273 }
3274 }
3276 // Allocation in the shared Eden, if allowed.
3277 //
3278 // rdx: instance size in bytes
3279 if (allow_shared_alloc) {
3280 __ bind(allocate_shared);
3282 ExternalAddress top((address)Universe::heap()->top_addr());
3283 ExternalAddress end((address)Universe::heap()->end_addr());
3285 const Register RtopAddr = rscratch1;
3286 const Register RendAddr = rscratch2;
3288 __ lea(RtopAddr, top);
3289 __ lea(RendAddr, end);
3290 __ movptr(rax, Address(RtopAddr, 0));
3292 // For retries rax gets set by cmpxchgq
3293 Label retry;
3294 __ bind(retry);
3295 __ lea(rbx, Address(rax, rdx, Address::times_1));
3296 __ cmpptr(rbx, Address(RendAddr, 0));
3297 __ jcc(Assembler::above, slow_case);
3299 // Compare rax with the top addr, and if still equal, store the new
3300 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3301 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3302 //
3303 // rax: object begin
3304 // rbx: object end
3305 // rdx: instance size in bytes
3306 if (os::is_MP()) {
3307 __ lock();
3308 }
3309 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3311 // if someone beat us on the allocation, try again, otherwise continue
3312 __ jcc(Assembler::notEqual, retry);
3314 __ incr_allocated_bytes(r15_thread, rdx, 0);
3315 }
3317 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3318 // The object is initialized before the header. If the object size is
3319 // zero, go directly to the header initialization.
3320 __ bind(initialize_object);
3321 __ decrementl(rdx, sizeof(oopDesc));
3322 __ jcc(Assembler::zero, initialize_header);
3324 // Initialize object fields
3325 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3326 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3327 {
3328 Label loop;
3329 __ bind(loop);
3330 __ movq(Address(rax, rdx, Address::times_8,
3331 sizeof(oopDesc) - oopSize),
3332 rcx);
3333 __ decrementl(rdx);
3334 __ jcc(Assembler::notZero, loop);
3335 }
3337 // initialize object header only.
3338 __ bind(initialize_header);
3339 if (UseBiasedLocking) {
3340 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
3341 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3342 } else {
3343 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3344 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3345 }
3346 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3347 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3348 __ store_klass(rax, rsi); // store klass last
3350 {
3351 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3352 // Trigger dtrace event for fastpath
3353 __ push(atos); // save the return value
3354 __ call_VM_leaf(
3355 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3356 __ pop(atos); // restore the return value
3358 }
3359 __ jmp(done);
3360 }
3363 // slow case
3364 __ bind(slow_case);
3365 __ get_constant_pool(c_rarg1);
3366 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3367 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3368 __ verify_oop(rax);
3370 // continue
3371 __ bind(done);
3372 }
3374 void TemplateTable::newarray() {
3375 transition(itos, atos);
3376 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3377 __ movl(c_rarg2, rax);
3378 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3379 c_rarg1, c_rarg2);
3380 }
3382 void TemplateTable::anewarray() {
3383 transition(itos, atos);
3384 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3385 __ get_constant_pool(c_rarg1);
3386 __ movl(c_rarg3, rax);
3387 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3388 c_rarg1, c_rarg2, c_rarg3);
3389 }
3391 void TemplateTable::arraylength() {
3392 transition(atos, itos);
3393 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3394 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3395 }
3397 void TemplateTable::checkcast() {
3398 transition(atos, atos);
3399 Label done, is_null, ok_is_subtype, quicked, resolved;
3400 __ testptr(rax, rax); // object is in rax
3401 __ jcc(Assembler::zero, is_null);
3403 // Get cpool & tags index
3404 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3405 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3406 // See if bytecode has already been quicked
3407 __ cmpb(Address(rdx, rbx,
3408 Address::times_1,
3409 Array<u1>::base_offset_in_bytes()),
3410 JVM_CONSTANT_Class);
3411 __ jcc(Assembler::equal, quicked);
3412 __ push(atos); // save receiver for result, and for GC
3413 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3414 // vm_result_2 has metadata result
3415 __ get_vm_result_2(rax, r15_thread);
3416 __ pop_ptr(rdx); // restore receiver
3417 __ jmpb(resolved);
3419 // Get superklass in rax and subklass in rbx
3420 __ bind(quicked);
3421 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3422 __ movptr(rax, Address(rcx, rbx,
3423 Address::times_8, sizeof(ConstantPool)));
3425 __ bind(resolved);
3426 __ load_klass(rbx, rdx);
3428 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3429 // Superklass in rax. Subklass in rbx.
3430 __ gen_subtype_check(rbx, ok_is_subtype);
3432 // Come here on failure
3433 __ push_ptr(rdx);
3434 // object is at TOS
3435 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3437 // Come here on success
3438 __ bind(ok_is_subtype);
3439 __ mov(rax, rdx); // Restore object in rdx
3441 // Collect counts on whether this check-cast sees NULLs a lot or not.
3442 if (ProfileInterpreter) {
3443 __ jmp(done);
3444 __ bind(is_null);
3445 __ profile_null_seen(rcx);
3446 } else {
3447 __ bind(is_null); // same as 'done'
3448 }
3449 __ bind(done);
3450 }
3452 void TemplateTable::instanceof() {
3453 transition(atos, itos);
3454 Label done, is_null, ok_is_subtype, quicked, resolved;
3455 __ testptr(rax, rax);
3456 __ jcc(Assembler::zero, is_null);
3458 // Get cpool & tags index
3459 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3460 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3461 // See if bytecode has already been quicked
3462 __ cmpb(Address(rdx, rbx,
3463 Address::times_1,
3464 Array<u1>::base_offset_in_bytes()),
3465 JVM_CONSTANT_Class);
3466 __ jcc(Assembler::equal, quicked);
3468 __ push(atos); // save receiver for result, and for GC
3469 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3470 // vm_result_2 has metadata result
3471 __ get_vm_result_2(rax, r15_thread);
3472 __ pop_ptr(rdx); // restore receiver
3473 __ verify_oop(rdx);
3474 __ load_klass(rdx, rdx);
3475 __ jmpb(resolved);
3477 // Get superklass in rax and subklass in rdx
3478 __ bind(quicked);
3479 __ load_klass(rdx, rax);
3480 __ movptr(rax, Address(rcx, rbx,
3481 Address::times_8, sizeof(ConstantPool)));
3483 __ bind(resolved);
3485 // Generate subtype check. Blows rcx, rdi
3486 // Superklass in rax. Subklass in rdx.
3487 __ gen_subtype_check(rdx, ok_is_subtype);
3489 // Come here on failure
3490 __ xorl(rax, rax);
3491 __ jmpb(done);
3492 // Come here on success
3493 __ bind(ok_is_subtype);
3494 __ movl(rax, 1);
3496 // Collect counts on whether this test sees NULLs a lot or not.
3497 if (ProfileInterpreter) {
3498 __ jmp(done);
3499 __ bind(is_null);
3500 __ profile_null_seen(rcx);
3501 } else {
3502 __ bind(is_null); // same as 'done'
3503 }
3504 __ bind(done);
3505 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3506 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3507 }
3509 //-----------------------------------------------------------------------------
3510 // Breakpoints
3511 void TemplateTable::_breakpoint() {
3512 // Note: We get here even if we are single stepping..
3513 // jbug inists on setting breakpoints at every bytecode
3514 // even if we are in single step mode.
3516 transition(vtos, vtos);
3518 // get the unpatched byte code
3519 __ get_method(c_rarg1);
3520 __ call_VM(noreg,
3521 CAST_FROM_FN_PTR(address,
3522 InterpreterRuntime::get_original_bytecode_at),
3523 c_rarg1, r13);
3524 __ mov(rbx, rax);
3526 // post the breakpoint event
3527 __ get_method(c_rarg1);
3528 __ call_VM(noreg,
3529 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3530 c_rarg1, r13);
3532 // complete the execution of original bytecode
3533 __ dispatch_only_normal(vtos);
3534 }
3536 //-----------------------------------------------------------------------------
3537 // Exceptions
3539 void TemplateTable::athrow() {
3540 transition(atos, vtos);
3541 __ null_check(rax);
3542 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3543 }
3545 //-----------------------------------------------------------------------------
3546 // Synchronization
3547 //
3548 // Note: monitorenter & exit are symmetric routines; which is reflected
3549 // in the assembly code structure as well
3550 //
3551 // Stack layout:
3552 //
3553 // [expressions ] <--- rsp = expression stack top
3554 // ..
3555 // [expressions ]
3556 // [monitor entry] <--- monitor block top = expression stack bot
3557 // ..
3558 // [monitor entry]
3559 // [frame data ] <--- monitor block bot
3560 // ...
3561 // [saved rbp ] <--- rbp
3562 void TemplateTable::monitorenter() {
3563 transition(atos, vtos);
3565 // check for NULL object
3566 __ null_check(rax);
3568 const Address monitor_block_top(
3569 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3570 const Address monitor_block_bot(
3571 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3572 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3574 Label allocated;
3576 // initialize entry pointer
3577 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3579 // find a free slot in the monitor block (result in c_rarg1)
3580 {
3581 Label entry, loop, exit;
3582 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3583 // starting with top-most entry
3584 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3585 // of monitor block
3586 __ jmpb(entry);
3588 __ bind(loop);
3589 // check if current entry is used
3590 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3591 // if not used then remember entry in c_rarg1
3592 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3593 // check if current entry is for same object
3594 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3595 // if same object then stop searching
3596 __ jccb(Assembler::equal, exit);
3597 // otherwise advance to next entry
3598 __ addptr(c_rarg3, entry_size);
3599 __ bind(entry);
3600 // check if bottom reached
3601 __ cmpptr(c_rarg3, c_rarg2);
3602 // if not at bottom then check this entry
3603 __ jcc(Assembler::notEqual, loop);
3604 __ bind(exit);
3605 }
3607 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3608 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3610 // allocate one if there's no free slot
3611 {
3612 Label entry, loop;
3613 // 1. compute new pointers // rsp: old expression stack top
3614 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3615 __ subptr(rsp, entry_size); // move expression stack top
3616 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3617 __ mov(c_rarg3, rsp); // set start value for copy loop
3618 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3619 __ jmp(entry);
3620 // 2. move expression stack contents
3621 __ bind(loop);
3622 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3623 // word from old location
3624 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3625 __ addptr(c_rarg3, wordSize); // advance to next word
3626 __ bind(entry);
3627 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3628 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3629 // copy next word
3630 }
3632 // call run-time routine
3633 // c_rarg1: points to monitor entry
3634 __ bind(allocated);
3636 // Increment bcp to point to the next bytecode, so exception
3637 // handling for async. exceptions work correctly.
3638 // The object has already been poped from the stack, so the
3639 // expression stack looks correct.
3640 __ increment(r13);
3642 // store object
3643 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3644 __ lock_object(c_rarg1);
3646 // check to make sure this monitor doesn't cause stack overflow after locking
3647 __ save_bcp(); // in case of exception
3648 __ generate_stack_overflow_check(0);
3650 // The bcp has already been incremented. Just need to dispatch to
3651 // next instruction.
3652 __ dispatch_next(vtos);
3653 }
3656 void TemplateTable::monitorexit() {
3657 transition(atos, vtos);
3659 // check for NULL object
3660 __ null_check(rax);
3662 const Address monitor_block_top(
3663 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3664 const Address monitor_block_bot(
3665 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3666 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3668 Label found;
3670 // find matching slot
3671 {
3672 Label entry, loop;
3673 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3674 // starting with top-most entry
3675 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3676 // of monitor block
3677 __ jmpb(entry);
3679 __ bind(loop);
3680 // check if current entry is for same object
3681 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3682 // if same object then stop searching
3683 __ jcc(Assembler::equal, found);
3684 // otherwise advance to next entry
3685 __ addptr(c_rarg1, entry_size);
3686 __ bind(entry);
3687 // check if bottom reached
3688 __ cmpptr(c_rarg1, c_rarg2);
3689 // if not at bottom then check this entry
3690 __ jcc(Assembler::notEqual, loop);
3691 }
3693 // error handling. Unlocking was not block-structured
3694 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3695 InterpreterRuntime::throw_illegal_monitor_state_exception));
3696 __ should_not_reach_here();
3698 // call run-time routine
3699 // rsi: points to monitor entry
3700 __ bind(found);
3701 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3702 __ unlock_object(c_rarg1);
3703 __ pop_ptr(rax); // discard object
3704 }
3707 // Wide instructions
3708 void TemplateTable::wide() {
3709 transition(vtos, vtos);
3710 __ load_unsigned_byte(rbx, at_bcp(1));
3711 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3712 __ jmp(Address(rscratch1, rbx, Address::times_8));
3713 // Note: the r13 increment step is part of the individual wide
3714 // bytecode implementations
3715 }
3718 // Multi arrays
3719 void TemplateTable::multianewarray() {
3720 transition(vtos, atos);
3721 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3722 // last dim is on top of stack; we want address of first one:
3723 // first_addr = last_addr + (ndims - 1) * wordSize
3724 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3725 call_VM(rax,
3726 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3727 c_rarg1);
3728 __ load_unsigned_byte(rbx, at_bcp(3));
3729 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3730 }
3731 #endif // !CC_INTERP