Wed, 02 Jun 2010 22:45:42 -0700
Merge
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateTable_x86_64.cpp.incl"
28 #ifndef CC_INTERP
30 #define __ _masm->
32 // Platform-dependent initialization
34 void TemplateTable::pd_initialize() {
35 // No amd64 specific initialization
36 }
38 // Address computation: local variables
40 static inline Address iaddress(int n) {
41 return Address(r14, Interpreter::local_offset_in_bytes(n));
42 }
44 static inline Address laddress(int n) {
45 return iaddress(n + 1);
46 }
48 static inline Address faddress(int n) {
49 return iaddress(n);
50 }
52 static inline Address daddress(int n) {
53 return laddress(n);
54 }
56 static inline Address aaddress(int n) {
57 return iaddress(n);
58 }
60 static inline Address iaddress(Register r) {
61 return Address(r14, r, Address::times_8);
62 }
64 static inline Address laddress(Register r) {
65 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
66 }
68 static inline Address faddress(Register r) {
69 return iaddress(r);
70 }
72 static inline Address daddress(Register r) {
73 return laddress(r);
74 }
76 static inline Address aaddress(Register r) {
77 return iaddress(r);
78 }
80 static inline Address at_rsp() {
81 return Address(rsp, 0);
82 }
84 // At top of Java expression stack which may be different than esp(). It
85 // isn't for category 1 objects.
86 static inline Address at_tos () {
87 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
88 }
90 static inline Address at_tos_p1() {
91 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
92 }
94 static inline Address at_tos_p2() {
95 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
96 }
98 static inline Address at_tos_p3() {
99 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
100 }
102 // Condition conversion
103 static Assembler::Condition j_not(TemplateTable::Condition cc) {
104 switch (cc) {
105 case TemplateTable::equal : return Assembler::notEqual;
106 case TemplateTable::not_equal : return Assembler::equal;
107 case TemplateTable::less : return Assembler::greaterEqual;
108 case TemplateTable::less_equal : return Assembler::greater;
109 case TemplateTable::greater : return Assembler::lessEqual;
110 case TemplateTable::greater_equal: return Assembler::less;
111 }
112 ShouldNotReachHere();
113 return Assembler::zero;
114 }
117 // Miscelaneous helper routines
118 // Store an oop (or NULL) at the address described by obj.
119 // If val == noreg this means store a NULL
121 static void do_oop_store(InterpreterMacroAssembler* _masm,
122 Address obj,
123 Register val,
124 BarrierSet::Name barrier,
125 bool precise) {
126 assert(val == noreg || val == rax, "parameter is just for looks");
127 switch (barrier) {
128 #ifndef SERIALGC
129 case BarrierSet::G1SATBCT:
130 case BarrierSet::G1SATBCTLogging:
131 {
132 // flatten object address if needed
133 if (obj.index() == noreg && obj.disp() == 0) {
134 if (obj.base() != rdx) {
135 __ movq(rdx, obj.base());
136 }
137 } else {
138 __ leaq(rdx, obj);
139 }
140 __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg);
141 if (val == noreg) {
142 __ store_heap_oop_null(Address(rdx, 0));
143 } else {
144 __ store_heap_oop(Address(rdx, 0), val);
145 __ g1_write_barrier_post(rdx, val, r8, rbx);
146 }
148 }
149 break;
150 #endif // SERIALGC
151 case BarrierSet::CardTableModRef:
152 case BarrierSet::CardTableExtension:
153 {
154 if (val == noreg) {
155 __ store_heap_oop_null(obj);
156 } else {
157 __ store_heap_oop(obj, val);
158 // flatten object address if needed
159 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
160 __ store_check(obj.base());
161 } else {
162 __ leaq(rdx, obj);
163 __ store_check(rdx);
164 }
165 }
166 }
167 break;
168 case BarrierSet::ModRef:
169 case BarrierSet::Other:
170 if (val == noreg) {
171 __ store_heap_oop_null(obj);
172 } else {
173 __ store_heap_oop(obj, val);
174 }
175 break;
176 default :
177 ShouldNotReachHere();
179 }
180 }
182 Address TemplateTable::at_bcp(int offset) {
183 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
184 return Address(r13, offset);
185 }
187 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
188 Register scratch,
189 bool load_bc_into_scratch/*=true*/) {
190 if (!RewriteBytecodes) {
191 return;
192 }
193 // the pair bytecodes have already done the load.
194 if (load_bc_into_scratch) {
195 __ movl(bc, bytecode);
196 }
197 Label patch_done;
198 if (JvmtiExport::can_post_breakpoint()) {
199 Label fast_patch;
200 // if a breakpoint is present we can't rewrite the stream directly
201 __ movzbl(scratch, at_bcp(0));
202 __ cmpl(scratch, Bytecodes::_breakpoint);
203 __ jcc(Assembler::notEqual, fast_patch);
204 __ get_method(scratch);
205 // Let breakpoint table handling rewrite to quicker bytecode
206 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
207 #ifndef ASSERT
208 __ jmpb(patch_done);
209 #else
210 __ jmp(patch_done);
211 #endif
212 __ bind(fast_patch);
213 }
214 #ifdef ASSERT
215 Label okay;
216 __ load_unsigned_byte(scratch, at_bcp(0));
217 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
218 __ jcc(Assembler::equal, okay);
219 __ cmpl(scratch, bc);
220 __ jcc(Assembler::equal, okay);
221 __ stop("patching the wrong bytecode");
222 __ bind(okay);
223 #endif
224 // patch bytecode
225 __ movb(at_bcp(0), bc);
226 __ bind(patch_done);
227 }
230 // Individual instructions
232 void TemplateTable::nop() {
233 transition(vtos, vtos);
234 // nothing to do
235 }
237 void TemplateTable::shouldnotreachhere() {
238 transition(vtos, vtos);
239 __ stop("shouldnotreachhere bytecode");
240 }
242 void TemplateTable::aconst_null() {
243 transition(vtos, atos);
244 __ xorl(rax, rax);
245 }
247 void TemplateTable::iconst(int value) {
248 transition(vtos, itos);
249 if (value == 0) {
250 __ xorl(rax, rax);
251 } else {
252 __ movl(rax, value);
253 }
254 }
256 void TemplateTable::lconst(int value) {
257 transition(vtos, ltos);
258 if (value == 0) {
259 __ xorl(rax, rax);
260 } else {
261 __ movl(rax, value);
262 }
263 }
265 void TemplateTable::fconst(int value) {
266 transition(vtos, ftos);
267 static float one = 1.0f, two = 2.0f;
268 switch (value) {
269 case 0:
270 __ xorps(xmm0, xmm0);
271 break;
272 case 1:
273 __ movflt(xmm0, ExternalAddress((address) &one));
274 break;
275 case 2:
276 __ movflt(xmm0, ExternalAddress((address) &two));
277 break;
278 default:
279 ShouldNotReachHere();
280 break;
281 }
282 }
284 void TemplateTable::dconst(int value) {
285 transition(vtos, dtos);
286 static double one = 1.0;
287 switch (value) {
288 case 0:
289 __ xorpd(xmm0, xmm0);
290 break;
291 case 1:
292 __ movdbl(xmm0, ExternalAddress((address) &one));
293 break;
294 default:
295 ShouldNotReachHere();
296 break;
297 }
298 }
300 void TemplateTable::bipush() {
301 transition(vtos, itos);
302 __ load_signed_byte(rax, at_bcp(1));
303 }
305 void TemplateTable::sipush() {
306 transition(vtos, itos);
307 __ load_unsigned_short(rax, at_bcp(1));
308 __ bswapl(rax);
309 __ sarl(rax, 16);
310 }
312 void TemplateTable::ldc(bool wide) {
313 transition(vtos, vtos);
314 Label call_ldc, notFloat, notClass, Done;
316 if (wide) {
317 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
318 } else {
319 __ load_unsigned_byte(rbx, at_bcp(1));
320 }
322 __ get_cpool_and_tags(rcx, rax);
323 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
324 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
326 // get type
327 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
329 // unresolved string - get the resolved string
330 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
331 __ jccb(Assembler::equal, call_ldc);
333 // unresolved class - get the resolved class
334 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
335 __ jccb(Assembler::equal, call_ldc);
337 // unresolved class in error state - call into runtime to throw the error
338 // from the first resolution attempt
339 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
340 __ jccb(Assembler::equal, call_ldc);
342 // resolved class - need to call vm to get java mirror of the class
343 __ cmpl(rdx, JVM_CONSTANT_Class);
344 __ jcc(Assembler::notEqual, notClass);
346 __ bind(call_ldc);
347 __ movl(c_rarg1, wide);
348 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
349 __ push_ptr(rax);
350 __ verify_oop(rax);
351 __ jmp(Done);
353 __ bind(notClass);
354 __ cmpl(rdx, JVM_CONSTANT_Float);
355 __ jccb(Assembler::notEqual, notFloat);
356 // ftos
357 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
358 __ push_f();
359 __ jmp(Done);
361 __ bind(notFloat);
362 #ifdef ASSERT
363 {
364 Label L;
365 __ cmpl(rdx, JVM_CONSTANT_Integer);
366 __ jcc(Assembler::equal, L);
367 __ cmpl(rdx, JVM_CONSTANT_String);
368 __ jcc(Assembler::equal, L);
369 __ stop("unexpected tag type in ldc");
370 __ bind(L);
371 }
372 #endif
373 // atos and itos
374 Label isOop;
375 __ cmpl(rdx, JVM_CONSTANT_Integer);
376 __ jcc(Assembler::notEqual, isOop);
377 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
378 __ push_i(rax);
379 __ jmp(Done);
381 __ bind(isOop);
382 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
383 __ push_ptr(rax);
385 if (VerifyOops) {
386 __ verify_oop(rax);
387 }
389 __ bind(Done);
390 }
392 void TemplateTable::ldc2_w() {
393 transition(vtos, vtos);
394 Label Long, Done;
395 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
397 __ get_cpool_and_tags(rcx, rax);
398 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
399 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
401 // get type
402 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
403 JVM_CONSTANT_Double);
404 __ jccb(Assembler::notEqual, Long);
405 // dtos
406 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
407 __ push_d();
408 __ jmpb(Done);
410 __ bind(Long);
411 // ltos
412 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
413 __ push_l();
415 __ bind(Done);
416 }
418 void TemplateTable::locals_index(Register reg, int offset) {
419 __ load_unsigned_byte(reg, at_bcp(offset));
420 __ negptr(reg);
421 }
423 void TemplateTable::iload() {
424 transition(vtos, itos);
425 if (RewriteFrequentPairs) {
426 Label rewrite, done;
427 const Register bc = c_rarg3;
428 assert(rbx != bc, "register damaged");
430 // get next byte
431 __ load_unsigned_byte(rbx,
432 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
433 // if _iload, wait to rewrite to iload2. We only want to rewrite the
434 // last two iloads in a pair. Comparing against fast_iload means that
435 // the next bytecode is neither an iload or a caload, and therefore
436 // an iload pair.
437 __ cmpl(rbx, Bytecodes::_iload);
438 __ jcc(Assembler::equal, done);
440 __ cmpl(rbx, Bytecodes::_fast_iload);
441 __ movl(bc, Bytecodes::_fast_iload2);
442 __ jccb(Assembler::equal, rewrite);
444 // if _caload, rewrite to fast_icaload
445 __ cmpl(rbx, Bytecodes::_caload);
446 __ movl(bc, Bytecodes::_fast_icaload);
447 __ jccb(Assembler::equal, rewrite);
449 // rewrite so iload doesn't check again.
450 __ movl(bc, Bytecodes::_fast_iload);
452 // rewrite
453 // bc: fast bytecode
454 __ bind(rewrite);
455 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
456 __ bind(done);
457 }
459 // Get the local value into tos
460 locals_index(rbx);
461 __ movl(rax, iaddress(rbx));
462 }
464 void TemplateTable::fast_iload2() {
465 transition(vtos, itos);
466 locals_index(rbx);
467 __ movl(rax, iaddress(rbx));
468 __ push(itos);
469 locals_index(rbx, 3);
470 __ movl(rax, iaddress(rbx));
471 }
473 void TemplateTable::fast_iload() {
474 transition(vtos, itos);
475 locals_index(rbx);
476 __ movl(rax, iaddress(rbx));
477 }
479 void TemplateTable::lload() {
480 transition(vtos, ltos);
481 locals_index(rbx);
482 __ movq(rax, laddress(rbx));
483 }
485 void TemplateTable::fload() {
486 transition(vtos, ftos);
487 locals_index(rbx);
488 __ movflt(xmm0, faddress(rbx));
489 }
491 void TemplateTable::dload() {
492 transition(vtos, dtos);
493 locals_index(rbx);
494 __ movdbl(xmm0, daddress(rbx));
495 }
497 void TemplateTable::aload() {
498 transition(vtos, atos);
499 locals_index(rbx);
500 __ movptr(rax, aaddress(rbx));
501 }
503 void TemplateTable::locals_index_wide(Register reg) {
504 __ movl(reg, at_bcp(2));
505 __ bswapl(reg);
506 __ shrl(reg, 16);
507 __ negptr(reg);
508 }
510 void TemplateTable::wide_iload() {
511 transition(vtos, itos);
512 locals_index_wide(rbx);
513 __ movl(rax, iaddress(rbx));
514 }
516 void TemplateTable::wide_lload() {
517 transition(vtos, ltos);
518 locals_index_wide(rbx);
519 __ movq(rax, laddress(rbx));
520 }
522 void TemplateTable::wide_fload() {
523 transition(vtos, ftos);
524 locals_index_wide(rbx);
525 __ movflt(xmm0, faddress(rbx));
526 }
528 void TemplateTable::wide_dload() {
529 transition(vtos, dtos);
530 locals_index_wide(rbx);
531 __ movdbl(xmm0, daddress(rbx));
532 }
534 void TemplateTable::wide_aload() {
535 transition(vtos, atos);
536 locals_index_wide(rbx);
537 __ movptr(rax, aaddress(rbx));
538 }
540 void TemplateTable::index_check(Register array, Register index) {
541 // destroys rbx
542 // check array
543 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
544 // sign extend index for use by indexed load
545 __ movl2ptr(index, index);
546 // check index
547 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
548 if (index != rbx) {
549 // ??? convention: move aberrant index into ebx for exception message
550 assert(rbx != array, "different registers");
551 __ movl(rbx, index);
552 }
553 __ jump_cc(Assembler::aboveEqual,
554 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
555 }
557 void TemplateTable::iaload() {
558 transition(itos, itos);
559 __ pop_ptr(rdx);
560 // eax: index
561 // rdx: array
562 index_check(rdx, rax); // kills rbx
563 __ movl(rax, Address(rdx, rax,
564 Address::times_4,
565 arrayOopDesc::base_offset_in_bytes(T_INT)));
566 }
568 void TemplateTable::laload() {
569 transition(itos, ltos);
570 __ pop_ptr(rdx);
571 // eax: index
572 // rdx: array
573 index_check(rdx, rax); // kills rbx
574 __ movq(rax, Address(rdx, rbx,
575 Address::times_8,
576 arrayOopDesc::base_offset_in_bytes(T_LONG)));
577 }
579 void TemplateTable::faload() {
580 transition(itos, ftos);
581 __ pop_ptr(rdx);
582 // eax: index
583 // rdx: array
584 index_check(rdx, rax); // kills rbx
585 __ movflt(xmm0, Address(rdx, rax,
586 Address::times_4,
587 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
588 }
590 void TemplateTable::daload() {
591 transition(itos, dtos);
592 __ pop_ptr(rdx);
593 // eax: index
594 // rdx: array
595 index_check(rdx, rax); // kills rbx
596 __ movdbl(xmm0, Address(rdx, rax,
597 Address::times_8,
598 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
599 }
601 void TemplateTable::aaload() {
602 transition(itos, atos);
603 __ pop_ptr(rdx);
604 // eax: index
605 // rdx: array
606 index_check(rdx, rax); // kills rbx
607 __ load_heap_oop(rax, Address(rdx, rax,
608 UseCompressedOops ? Address::times_4 : Address::times_8,
609 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
610 }
612 void TemplateTable::baload() {
613 transition(itos, itos);
614 __ pop_ptr(rdx);
615 // eax: index
616 // rdx: array
617 index_check(rdx, rax); // kills rbx
618 __ load_signed_byte(rax,
619 Address(rdx, rax,
620 Address::times_1,
621 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
622 }
624 void TemplateTable::caload() {
625 transition(itos, itos);
626 __ pop_ptr(rdx);
627 // eax: index
628 // rdx: array
629 index_check(rdx, rax); // kills rbx
630 __ load_unsigned_short(rax,
631 Address(rdx, rax,
632 Address::times_2,
633 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
634 }
636 // iload followed by caload frequent pair
637 void TemplateTable::fast_icaload() {
638 transition(vtos, itos);
639 // load index out of locals
640 locals_index(rbx);
641 __ movl(rax, iaddress(rbx));
643 // eax: index
644 // rdx: array
645 __ pop_ptr(rdx);
646 index_check(rdx, rax); // kills rbx
647 __ load_unsigned_short(rax,
648 Address(rdx, rax,
649 Address::times_2,
650 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
651 }
653 void TemplateTable::saload() {
654 transition(itos, itos);
655 __ pop_ptr(rdx);
656 // eax: index
657 // rdx: array
658 index_check(rdx, rax); // kills rbx
659 __ load_signed_short(rax,
660 Address(rdx, rax,
661 Address::times_2,
662 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
663 }
665 void TemplateTable::iload(int n) {
666 transition(vtos, itos);
667 __ movl(rax, iaddress(n));
668 }
670 void TemplateTable::lload(int n) {
671 transition(vtos, ltos);
672 __ movq(rax, laddress(n));
673 }
675 void TemplateTable::fload(int n) {
676 transition(vtos, ftos);
677 __ movflt(xmm0, faddress(n));
678 }
680 void TemplateTable::dload(int n) {
681 transition(vtos, dtos);
682 __ movdbl(xmm0, daddress(n));
683 }
685 void TemplateTable::aload(int n) {
686 transition(vtos, atos);
687 __ movptr(rax, aaddress(n));
688 }
690 void TemplateTable::aload_0() {
691 transition(vtos, atos);
692 // According to bytecode histograms, the pairs:
693 //
694 // _aload_0, _fast_igetfield
695 // _aload_0, _fast_agetfield
696 // _aload_0, _fast_fgetfield
697 //
698 // occur frequently. If RewriteFrequentPairs is set, the (slow)
699 // _aload_0 bytecode checks if the next bytecode is either
700 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
701 // rewrites the current bytecode into a pair bytecode; otherwise it
702 // rewrites the current bytecode into _fast_aload_0 that doesn't do
703 // the pair check anymore.
704 //
705 // Note: If the next bytecode is _getfield, the rewrite must be
706 // delayed, otherwise we may miss an opportunity for a pair.
707 //
708 // Also rewrite frequent pairs
709 // aload_0, aload_1
710 // aload_0, iload_1
711 // These bytecodes with a small amount of code are most profitable
712 // to rewrite
713 if (RewriteFrequentPairs) {
714 Label rewrite, done;
715 const Register bc = c_rarg3;
716 assert(rbx != bc, "register damaged");
717 // get next byte
718 __ load_unsigned_byte(rbx,
719 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
721 // do actual aload_0
722 aload(0);
724 // if _getfield then wait with rewrite
725 __ cmpl(rbx, Bytecodes::_getfield);
726 __ jcc(Assembler::equal, done);
728 // if _igetfield then reqrite to _fast_iaccess_0
729 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
730 Bytecodes::_aload_0,
731 "fix bytecode definition");
732 __ cmpl(rbx, Bytecodes::_fast_igetfield);
733 __ movl(bc, Bytecodes::_fast_iaccess_0);
734 __ jccb(Assembler::equal, rewrite);
736 // if _agetfield then reqrite to _fast_aaccess_0
737 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
738 Bytecodes::_aload_0,
739 "fix bytecode definition");
740 __ cmpl(rbx, Bytecodes::_fast_agetfield);
741 __ movl(bc, Bytecodes::_fast_aaccess_0);
742 __ jccb(Assembler::equal, rewrite);
744 // if _fgetfield then reqrite to _fast_faccess_0
745 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
746 Bytecodes::_aload_0,
747 "fix bytecode definition");
748 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
749 __ movl(bc, Bytecodes::_fast_faccess_0);
750 __ jccb(Assembler::equal, rewrite);
752 // else rewrite to _fast_aload0
753 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
754 Bytecodes::_aload_0,
755 "fix bytecode definition");
756 __ movl(bc, Bytecodes::_fast_aload_0);
758 // rewrite
759 // bc: fast bytecode
760 __ bind(rewrite);
761 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
763 __ bind(done);
764 } else {
765 aload(0);
766 }
767 }
769 void TemplateTable::istore() {
770 transition(itos, vtos);
771 locals_index(rbx);
772 __ movl(iaddress(rbx), rax);
773 }
775 void TemplateTable::lstore() {
776 transition(ltos, vtos);
777 locals_index(rbx);
778 __ movq(laddress(rbx), rax);
779 }
781 void TemplateTable::fstore() {
782 transition(ftos, vtos);
783 locals_index(rbx);
784 __ movflt(faddress(rbx), xmm0);
785 }
787 void TemplateTable::dstore() {
788 transition(dtos, vtos);
789 locals_index(rbx);
790 __ movdbl(daddress(rbx), xmm0);
791 }
793 void TemplateTable::astore() {
794 transition(vtos, vtos);
795 __ pop_ptr(rax);
796 locals_index(rbx);
797 __ movptr(aaddress(rbx), rax);
798 }
800 void TemplateTable::wide_istore() {
801 transition(vtos, vtos);
802 __ pop_i();
803 locals_index_wide(rbx);
804 __ movl(iaddress(rbx), rax);
805 }
807 void TemplateTable::wide_lstore() {
808 transition(vtos, vtos);
809 __ pop_l();
810 locals_index_wide(rbx);
811 __ movq(laddress(rbx), rax);
812 }
814 void TemplateTable::wide_fstore() {
815 transition(vtos, vtos);
816 __ pop_f();
817 locals_index_wide(rbx);
818 __ movflt(faddress(rbx), xmm0);
819 }
821 void TemplateTable::wide_dstore() {
822 transition(vtos, vtos);
823 __ pop_d();
824 locals_index_wide(rbx);
825 __ movdbl(daddress(rbx), xmm0);
826 }
828 void TemplateTable::wide_astore() {
829 transition(vtos, vtos);
830 __ pop_ptr(rax);
831 locals_index_wide(rbx);
832 __ movptr(aaddress(rbx), rax);
833 }
835 void TemplateTable::iastore() {
836 transition(itos, vtos);
837 __ pop_i(rbx);
838 __ pop_ptr(rdx);
839 // eax: value
840 // ebx: index
841 // rdx: array
842 index_check(rdx, rbx); // prefer index in ebx
843 __ movl(Address(rdx, rbx,
844 Address::times_4,
845 arrayOopDesc::base_offset_in_bytes(T_INT)),
846 rax);
847 }
849 void TemplateTable::lastore() {
850 transition(ltos, vtos);
851 __ pop_i(rbx);
852 __ pop_ptr(rdx);
853 // rax: value
854 // ebx: index
855 // rdx: array
856 index_check(rdx, rbx); // prefer index in ebx
857 __ movq(Address(rdx, rbx,
858 Address::times_8,
859 arrayOopDesc::base_offset_in_bytes(T_LONG)),
860 rax);
861 }
863 void TemplateTable::fastore() {
864 transition(ftos, vtos);
865 __ pop_i(rbx);
866 __ pop_ptr(rdx);
867 // xmm0: value
868 // ebx: index
869 // rdx: array
870 index_check(rdx, rbx); // prefer index in ebx
871 __ movflt(Address(rdx, rbx,
872 Address::times_4,
873 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
874 xmm0);
875 }
877 void TemplateTable::dastore() {
878 transition(dtos, vtos);
879 __ pop_i(rbx);
880 __ pop_ptr(rdx);
881 // xmm0: value
882 // ebx: index
883 // rdx: array
884 index_check(rdx, rbx); // prefer index in ebx
885 __ movdbl(Address(rdx, rbx,
886 Address::times_8,
887 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
888 xmm0);
889 }
891 void TemplateTable::aastore() {
892 Label is_null, ok_is_subtype, done;
893 transition(vtos, vtos);
894 // stack: ..., array, index, value
895 __ movptr(rax, at_tos()); // value
896 __ movl(rcx, at_tos_p1()); // index
897 __ movptr(rdx, at_tos_p2()); // array
899 Address element_address(rdx, rcx,
900 UseCompressedOops? Address::times_4 : Address::times_8,
901 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
903 index_check(rdx, rcx); // kills rbx
904 // do array store check - check for NULL value first
905 __ testptr(rax, rax);
906 __ jcc(Assembler::zero, is_null);
908 // Move subklass into rbx
909 __ load_klass(rbx, rax);
910 // Move superklass into rax
911 __ load_klass(rax, rdx);
912 __ movptr(rax, Address(rax,
913 sizeof(oopDesc) +
914 objArrayKlass::element_klass_offset_in_bytes()));
915 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
916 __ lea(rdx, element_address);
918 // Generate subtype check. Blows rcx, rdi
919 // Superklass in rax. Subklass in rbx.
920 __ gen_subtype_check(rbx, ok_is_subtype);
922 // Come here on failure
923 // object is at TOS
924 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
926 // Come here on success
927 __ bind(ok_is_subtype);
929 // Get the value we will store
930 __ movptr(rax, at_tos());
931 // Now store using the appropriate barrier
932 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
933 __ jmp(done);
935 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
936 __ bind(is_null);
937 __ profile_null_seen(rbx);
939 // Store a NULL
940 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
942 // Pop stack arguments
943 __ bind(done);
944 __ addptr(rsp, 3 * Interpreter::stackElementSize);
945 }
947 void TemplateTable::bastore() {
948 transition(itos, vtos);
949 __ pop_i(rbx);
950 __ pop_ptr(rdx);
951 // eax: value
952 // ebx: index
953 // rdx: array
954 index_check(rdx, rbx); // prefer index in ebx
955 __ movb(Address(rdx, rbx,
956 Address::times_1,
957 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
958 rax);
959 }
961 void TemplateTable::castore() {
962 transition(itos, vtos);
963 __ pop_i(rbx);
964 __ pop_ptr(rdx);
965 // eax: value
966 // ebx: index
967 // rdx: array
968 index_check(rdx, rbx); // prefer index in ebx
969 __ movw(Address(rdx, rbx,
970 Address::times_2,
971 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
972 rax);
973 }
975 void TemplateTable::sastore() {
976 castore();
977 }
979 void TemplateTable::istore(int n) {
980 transition(itos, vtos);
981 __ movl(iaddress(n), rax);
982 }
984 void TemplateTable::lstore(int n) {
985 transition(ltos, vtos);
986 __ movq(laddress(n), rax);
987 }
989 void TemplateTable::fstore(int n) {
990 transition(ftos, vtos);
991 __ movflt(faddress(n), xmm0);
992 }
994 void TemplateTable::dstore(int n) {
995 transition(dtos, vtos);
996 __ movdbl(daddress(n), xmm0);
997 }
999 void TemplateTable::astore(int n) {
1000 transition(vtos, vtos);
1001 __ pop_ptr(rax);
1002 __ movptr(aaddress(n), rax);
1003 }
1005 void TemplateTable::pop() {
1006 transition(vtos, vtos);
1007 __ addptr(rsp, Interpreter::stackElementSize);
1008 }
1010 void TemplateTable::pop2() {
1011 transition(vtos, vtos);
1012 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1013 }
1015 void TemplateTable::dup() {
1016 transition(vtos, vtos);
1017 __ load_ptr(0, rax);
1018 __ push_ptr(rax);
1019 // stack: ..., a, a
1020 }
1022 void TemplateTable::dup_x1() {
1023 transition(vtos, vtos);
1024 // stack: ..., a, b
1025 __ load_ptr( 0, rax); // load b
1026 __ load_ptr( 1, rcx); // load a
1027 __ store_ptr(1, rax); // store b
1028 __ store_ptr(0, rcx); // store a
1029 __ push_ptr(rax); // push b
1030 // stack: ..., b, a, b
1031 }
1033 void TemplateTable::dup_x2() {
1034 transition(vtos, vtos);
1035 // stack: ..., a, b, c
1036 __ load_ptr( 0, rax); // load c
1037 __ load_ptr( 2, rcx); // load a
1038 __ store_ptr(2, rax); // store c in a
1039 __ push_ptr(rax); // push c
1040 // stack: ..., c, b, c, c
1041 __ load_ptr( 2, rax); // load b
1042 __ store_ptr(2, rcx); // store a in b
1043 // stack: ..., c, a, c, c
1044 __ store_ptr(1, rax); // store b in c
1045 // stack: ..., c, a, b, c
1046 }
1048 void TemplateTable::dup2() {
1049 transition(vtos, vtos);
1050 // stack: ..., a, b
1051 __ load_ptr(1, rax); // load a
1052 __ push_ptr(rax); // push a
1053 __ load_ptr(1, rax); // load b
1054 __ push_ptr(rax); // push b
1055 // stack: ..., a, b, a, b
1056 }
1058 void TemplateTable::dup2_x1() {
1059 transition(vtos, vtos);
1060 // stack: ..., a, b, c
1061 __ load_ptr( 0, rcx); // load c
1062 __ load_ptr( 1, rax); // load b
1063 __ push_ptr(rax); // push b
1064 __ push_ptr(rcx); // push c
1065 // stack: ..., a, b, c, b, c
1066 __ store_ptr(3, rcx); // store c in b
1067 // stack: ..., a, c, c, b, c
1068 __ load_ptr( 4, rcx); // load a
1069 __ store_ptr(2, rcx); // store a in 2nd c
1070 // stack: ..., a, c, a, b, c
1071 __ store_ptr(4, rax); // store b in a
1072 // stack: ..., b, c, a, b, c
1073 }
1075 void TemplateTable::dup2_x2() {
1076 transition(vtos, vtos);
1077 // stack: ..., a, b, c, d
1078 __ load_ptr( 0, rcx); // load d
1079 __ load_ptr( 1, rax); // load c
1080 __ push_ptr(rax); // push c
1081 __ push_ptr(rcx); // push d
1082 // stack: ..., a, b, c, d, c, d
1083 __ load_ptr( 4, rax); // load b
1084 __ store_ptr(2, rax); // store b in d
1085 __ store_ptr(4, rcx); // store d in b
1086 // stack: ..., a, d, c, b, c, d
1087 __ load_ptr( 5, rcx); // load a
1088 __ load_ptr( 3, rax); // load c
1089 __ store_ptr(3, rcx); // store a in c
1090 __ store_ptr(5, rax); // store c in a
1091 // stack: ..., c, d, a, b, c, d
1092 }
1094 void TemplateTable::swap() {
1095 transition(vtos, vtos);
1096 // stack: ..., a, b
1097 __ load_ptr( 1, rcx); // load a
1098 __ load_ptr( 0, rax); // load b
1099 __ store_ptr(0, rcx); // store a in b
1100 __ store_ptr(1, rax); // store b in a
1101 // stack: ..., b, a
1102 }
1104 void TemplateTable::iop2(Operation op) {
1105 transition(itos, itos);
1106 switch (op) {
1107 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1108 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1109 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1110 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1111 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1112 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1113 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1114 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1115 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1116 default : ShouldNotReachHere();
1117 }
1118 }
1120 void TemplateTable::lop2(Operation op) {
1121 transition(ltos, ltos);
1122 switch (op) {
1123 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1124 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1125 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1126 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1127 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1128 default : ShouldNotReachHere();
1129 }
1130 }
1132 void TemplateTable::idiv() {
1133 transition(itos, itos);
1134 __ movl(rcx, rax);
1135 __ pop_i(rax);
1136 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1137 // they are not equal, one could do a normal division (no correction
1138 // needed), which may speed up this implementation for the common case.
1139 // (see also JVM spec., p.243 & p.271)
1140 __ corrected_idivl(rcx);
1141 }
1143 void TemplateTable::irem() {
1144 transition(itos, itos);
1145 __ movl(rcx, rax);
1146 __ pop_i(rax);
1147 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1148 // they are not equal, one could do a normal division (no correction
1149 // needed), which may speed up this implementation for the common case.
1150 // (see also JVM spec., p.243 & p.271)
1151 __ corrected_idivl(rcx);
1152 __ movl(rax, rdx);
1153 }
1155 void TemplateTable::lmul() {
1156 transition(ltos, ltos);
1157 __ pop_l(rdx);
1158 __ imulq(rax, rdx);
1159 }
1161 void TemplateTable::ldiv() {
1162 transition(ltos, ltos);
1163 __ mov(rcx, rax);
1164 __ pop_l(rax);
1165 // generate explicit div0 check
1166 __ testq(rcx, rcx);
1167 __ jump_cc(Assembler::zero,
1168 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1169 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1170 // they are not equal, one could do a normal division (no correction
1171 // needed), which may speed up this implementation for the common case.
1172 // (see also JVM spec., p.243 & p.271)
1173 __ corrected_idivq(rcx); // kills rbx
1174 }
1176 void TemplateTable::lrem() {
1177 transition(ltos, ltos);
1178 __ mov(rcx, rax);
1179 __ pop_l(rax);
1180 __ testq(rcx, rcx);
1181 __ jump_cc(Assembler::zero,
1182 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1183 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1184 // they are not equal, one could do a normal division (no correction
1185 // needed), which may speed up this implementation for the common case.
1186 // (see also JVM spec., p.243 & p.271)
1187 __ corrected_idivq(rcx); // kills rbx
1188 __ mov(rax, rdx);
1189 }
1191 void TemplateTable::lshl() {
1192 transition(itos, ltos);
1193 __ movl(rcx, rax); // get shift count
1194 __ pop_l(rax); // get shift value
1195 __ shlq(rax);
1196 }
1198 void TemplateTable::lshr() {
1199 transition(itos, ltos);
1200 __ movl(rcx, rax); // get shift count
1201 __ pop_l(rax); // get shift value
1202 __ sarq(rax);
1203 }
1205 void TemplateTable::lushr() {
1206 transition(itos, ltos);
1207 __ movl(rcx, rax); // get shift count
1208 __ pop_l(rax); // get shift value
1209 __ shrq(rax);
1210 }
1212 void TemplateTable::fop2(Operation op) {
1213 transition(ftos, ftos);
1214 switch (op) {
1215 case add:
1216 __ addss(xmm0, at_rsp());
1217 __ addptr(rsp, Interpreter::stackElementSize);
1218 break;
1219 case sub:
1220 __ movflt(xmm1, xmm0);
1221 __ pop_f(xmm0);
1222 __ subss(xmm0, xmm1);
1223 break;
1224 case mul:
1225 __ mulss(xmm0, at_rsp());
1226 __ addptr(rsp, Interpreter::stackElementSize);
1227 break;
1228 case div:
1229 __ movflt(xmm1, xmm0);
1230 __ pop_f(xmm0);
1231 __ divss(xmm0, xmm1);
1232 break;
1233 case rem:
1234 __ movflt(xmm1, xmm0);
1235 __ pop_f(xmm0);
1236 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1237 break;
1238 default:
1239 ShouldNotReachHere();
1240 break;
1241 }
1242 }
1244 void TemplateTable::dop2(Operation op) {
1245 transition(dtos, dtos);
1246 switch (op) {
1247 case add:
1248 __ addsd(xmm0, at_rsp());
1249 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1250 break;
1251 case sub:
1252 __ movdbl(xmm1, xmm0);
1253 __ pop_d(xmm0);
1254 __ subsd(xmm0, xmm1);
1255 break;
1256 case mul:
1257 __ mulsd(xmm0, at_rsp());
1258 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1259 break;
1260 case div:
1261 __ movdbl(xmm1, xmm0);
1262 __ pop_d(xmm0);
1263 __ divsd(xmm0, xmm1);
1264 break;
1265 case rem:
1266 __ movdbl(xmm1, xmm0);
1267 __ pop_d(xmm0);
1268 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1269 break;
1270 default:
1271 ShouldNotReachHere();
1272 break;
1273 }
1274 }
1276 void TemplateTable::ineg() {
1277 transition(itos, itos);
1278 __ negl(rax);
1279 }
1281 void TemplateTable::lneg() {
1282 transition(ltos, ltos);
1283 __ negq(rax);
1284 }
1286 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1287 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1288 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1289 // of 128-bits operands for SSE instructions.
1290 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1291 // Store the value to a 128-bits operand.
1292 operand[0] = lo;
1293 operand[1] = hi;
1294 return operand;
1295 }
1297 // Buffer for 128-bits masks used by SSE instructions.
1298 static jlong float_signflip_pool[2*2];
1299 static jlong double_signflip_pool[2*2];
1301 void TemplateTable::fneg() {
1302 transition(ftos, ftos);
1303 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1304 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1305 }
1307 void TemplateTable::dneg() {
1308 transition(dtos, dtos);
1309 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1310 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1311 }
1313 void TemplateTable::iinc() {
1314 transition(vtos, vtos);
1315 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1316 locals_index(rbx);
1317 __ addl(iaddress(rbx), rdx);
1318 }
1320 void TemplateTable::wide_iinc() {
1321 transition(vtos, vtos);
1322 __ movl(rdx, at_bcp(4)); // get constant
1323 locals_index_wide(rbx);
1324 __ bswapl(rdx); // swap bytes & sign-extend constant
1325 __ sarl(rdx, 16);
1326 __ addl(iaddress(rbx), rdx);
1327 // Note: should probably use only one movl to get both
1328 // the index and the constant -> fix this
1329 }
1331 void TemplateTable::convert() {
1332 // Checking
1333 #ifdef ASSERT
1334 {
1335 TosState tos_in = ilgl;
1336 TosState tos_out = ilgl;
1337 switch (bytecode()) {
1338 case Bytecodes::_i2l: // fall through
1339 case Bytecodes::_i2f: // fall through
1340 case Bytecodes::_i2d: // fall through
1341 case Bytecodes::_i2b: // fall through
1342 case Bytecodes::_i2c: // fall through
1343 case Bytecodes::_i2s: tos_in = itos; break;
1344 case Bytecodes::_l2i: // fall through
1345 case Bytecodes::_l2f: // fall through
1346 case Bytecodes::_l2d: tos_in = ltos; break;
1347 case Bytecodes::_f2i: // fall through
1348 case Bytecodes::_f2l: // fall through
1349 case Bytecodes::_f2d: tos_in = ftos; break;
1350 case Bytecodes::_d2i: // fall through
1351 case Bytecodes::_d2l: // fall through
1352 case Bytecodes::_d2f: tos_in = dtos; break;
1353 default : ShouldNotReachHere();
1354 }
1355 switch (bytecode()) {
1356 case Bytecodes::_l2i: // fall through
1357 case Bytecodes::_f2i: // fall through
1358 case Bytecodes::_d2i: // fall through
1359 case Bytecodes::_i2b: // fall through
1360 case Bytecodes::_i2c: // fall through
1361 case Bytecodes::_i2s: tos_out = itos; break;
1362 case Bytecodes::_i2l: // fall through
1363 case Bytecodes::_f2l: // fall through
1364 case Bytecodes::_d2l: tos_out = ltos; break;
1365 case Bytecodes::_i2f: // fall through
1366 case Bytecodes::_l2f: // fall through
1367 case Bytecodes::_d2f: tos_out = ftos; break;
1368 case Bytecodes::_i2d: // fall through
1369 case Bytecodes::_l2d: // fall through
1370 case Bytecodes::_f2d: tos_out = dtos; break;
1371 default : ShouldNotReachHere();
1372 }
1373 transition(tos_in, tos_out);
1374 }
1375 #endif // ASSERT
1377 static const int64_t is_nan = 0x8000000000000000L;
1379 // Conversion
1380 switch (bytecode()) {
1381 case Bytecodes::_i2l:
1382 __ movslq(rax, rax);
1383 break;
1384 case Bytecodes::_i2f:
1385 __ cvtsi2ssl(xmm0, rax);
1386 break;
1387 case Bytecodes::_i2d:
1388 __ cvtsi2sdl(xmm0, rax);
1389 break;
1390 case Bytecodes::_i2b:
1391 __ movsbl(rax, rax);
1392 break;
1393 case Bytecodes::_i2c:
1394 __ movzwl(rax, rax);
1395 break;
1396 case Bytecodes::_i2s:
1397 __ movswl(rax, rax);
1398 break;
1399 case Bytecodes::_l2i:
1400 __ movl(rax, rax);
1401 break;
1402 case Bytecodes::_l2f:
1403 __ cvtsi2ssq(xmm0, rax);
1404 break;
1405 case Bytecodes::_l2d:
1406 __ cvtsi2sdq(xmm0, rax);
1407 break;
1408 case Bytecodes::_f2i:
1409 {
1410 Label L;
1411 __ cvttss2sil(rax, xmm0);
1412 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1413 __ jcc(Assembler::notEqual, L);
1414 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1415 __ bind(L);
1416 }
1417 break;
1418 case Bytecodes::_f2l:
1419 {
1420 Label L;
1421 __ cvttss2siq(rax, xmm0);
1422 // NaN or overflow/underflow?
1423 __ cmp64(rax, ExternalAddress((address) &is_nan));
1424 __ jcc(Assembler::notEqual, L);
1425 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1426 __ bind(L);
1427 }
1428 break;
1429 case Bytecodes::_f2d:
1430 __ cvtss2sd(xmm0, xmm0);
1431 break;
1432 case Bytecodes::_d2i:
1433 {
1434 Label L;
1435 __ cvttsd2sil(rax, xmm0);
1436 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1437 __ jcc(Assembler::notEqual, L);
1438 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1439 __ bind(L);
1440 }
1441 break;
1442 case Bytecodes::_d2l:
1443 {
1444 Label L;
1445 __ cvttsd2siq(rax, xmm0);
1446 // NaN or overflow/underflow?
1447 __ cmp64(rax, ExternalAddress((address) &is_nan));
1448 __ jcc(Assembler::notEqual, L);
1449 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1450 __ bind(L);
1451 }
1452 break;
1453 case Bytecodes::_d2f:
1454 __ cvtsd2ss(xmm0, xmm0);
1455 break;
1456 default:
1457 ShouldNotReachHere();
1458 }
1459 }
1461 void TemplateTable::lcmp() {
1462 transition(ltos, itos);
1463 Label done;
1464 __ pop_l(rdx);
1465 __ cmpq(rdx, rax);
1466 __ movl(rax, -1);
1467 __ jccb(Assembler::less, done);
1468 __ setb(Assembler::notEqual, rax);
1469 __ movzbl(rax, rax);
1470 __ bind(done);
1471 }
1473 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1474 Label done;
1475 if (is_float) {
1476 // XXX get rid of pop here, use ... reg, mem32
1477 __ pop_f(xmm1);
1478 __ ucomiss(xmm1, xmm0);
1479 } else {
1480 // XXX get rid of pop here, use ... reg, mem64
1481 __ pop_d(xmm1);
1482 __ ucomisd(xmm1, xmm0);
1483 }
1484 if (unordered_result < 0) {
1485 __ movl(rax, -1);
1486 __ jccb(Assembler::parity, done);
1487 __ jccb(Assembler::below, done);
1488 __ setb(Assembler::notEqual, rdx);
1489 __ movzbl(rax, rdx);
1490 } else {
1491 __ movl(rax, 1);
1492 __ jccb(Assembler::parity, done);
1493 __ jccb(Assembler::above, done);
1494 __ movl(rax, 0);
1495 __ jccb(Assembler::equal, done);
1496 __ decrementl(rax);
1497 }
1498 __ bind(done);
1499 }
1501 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1502 __ get_method(rcx); // rcx holds method
1503 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1504 // holds bumped taken count
1506 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
1507 InvocationCounter::counter_offset();
1508 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() +
1509 InvocationCounter::counter_offset();
1510 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1512 // Load up edx with the branch displacement
1513 __ movl(rdx, at_bcp(1));
1514 __ bswapl(rdx);
1516 if (!is_wide) {
1517 __ sarl(rdx, 16);
1518 }
1519 __ movl2ptr(rdx, rdx);
1521 // Handle all the JSR stuff here, then exit.
1522 // It's much shorter and cleaner than intermingling with the non-JSR
1523 // normal-branch stuff occurring below.
1524 if (is_jsr) {
1525 // Pre-load the next target bytecode into rbx
1526 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1528 // compute return address as bci in rax
1529 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1530 in_bytes(constMethodOopDesc::codes_offset())));
1531 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1532 // Adjust the bcp in r13 by the displacement in rdx
1533 __ addptr(r13, rdx);
1534 // jsr returns atos that is not an oop
1535 __ push_i(rax);
1536 __ dispatch_only(vtos);
1537 return;
1538 }
1540 // Normal (non-jsr) branch handling
1542 // Adjust the bcp in r13 by the displacement in rdx
1543 __ addptr(r13, rdx);
1545 assert(UseLoopCounter || !UseOnStackReplacement,
1546 "on-stack-replacement requires loop counters");
1547 Label backedge_counter_overflow;
1548 Label profile_method;
1549 Label dispatch;
1550 if (UseLoopCounter) {
1551 // increment backedge counter for backward branches
1552 // rax: MDO
1553 // ebx: MDO bumped taken-count
1554 // rcx: method
1555 // rdx: target offset
1556 // r13: target bcp
1557 // r14: locals pointer
1558 __ testl(rdx, rdx); // check if forward or backward branch
1559 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1561 // increment counter
1562 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1563 __ incrementl(rax, InvocationCounter::count_increment); // increment
1564 // counter
1565 __ movl(Address(rcx, be_offset), rax); // store counter
1567 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1568 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1569 __ addl(rax, Address(rcx, be_offset)); // add both counters
1571 if (ProfileInterpreter) {
1572 // Test to see if we should create a method data oop
1573 __ cmp32(rax,
1574 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1575 __ jcc(Assembler::less, dispatch);
1577 // if no method data exists, go to profile method
1578 __ test_method_data_pointer(rax, profile_method);
1580 if (UseOnStackReplacement) {
1581 // check for overflow against ebx which is the MDO taken count
1582 __ cmp32(rbx,
1583 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1584 __ jcc(Assembler::below, dispatch);
1586 // When ProfileInterpreter is on, the backedge_count comes
1587 // from the methodDataOop, which value does not get reset on
1588 // the call to frequency_counter_overflow(). To avoid
1589 // excessive calls to the overflow routine while the method is
1590 // being compiled, add a second test to make sure the overflow
1591 // function is called only once every overflow_frequency.
1592 const int overflow_frequency = 1024;
1593 __ andl(rbx, overflow_frequency - 1);
1594 __ jcc(Assembler::zero, backedge_counter_overflow);
1596 }
1597 } else {
1598 if (UseOnStackReplacement) {
1599 // check for overflow against eax, which is the sum of the
1600 // counters
1601 __ cmp32(rax,
1602 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1603 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1605 }
1606 }
1607 __ bind(dispatch);
1608 }
1610 // Pre-load the next target bytecode into rbx
1611 __ load_unsigned_byte(rbx, Address(r13, 0));
1613 // continue with the bytecode @ target
1614 // eax: return bci for jsr's, unused otherwise
1615 // ebx: target bytecode
1616 // r13: target bcp
1617 __ dispatch_only(vtos);
1619 if (UseLoopCounter) {
1620 if (ProfileInterpreter) {
1621 // Out-of-line code to allocate method data oop.
1622 __ bind(profile_method);
1623 __ call_VM(noreg,
1624 CAST_FROM_FN_PTR(address,
1625 InterpreterRuntime::profile_method), r13);
1626 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1627 __ movptr(rcx, Address(rbp, method_offset));
1628 __ movptr(rcx, Address(rcx,
1629 in_bytes(methodOopDesc::method_data_offset())));
1630 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
1631 rcx);
1632 __ test_method_data_pointer(rcx, dispatch);
1633 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
1634 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
1635 __ addptr(rcx, rax);
1636 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
1637 rcx);
1638 __ jmp(dispatch);
1639 }
1641 if (UseOnStackReplacement) {
1642 // invocation counter overflow
1643 __ bind(backedge_counter_overflow);
1644 __ negptr(rdx);
1645 __ addptr(rdx, r13); // branch bcp
1646 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1647 __ call_VM(noreg,
1648 CAST_FROM_FN_PTR(address,
1649 InterpreterRuntime::frequency_counter_overflow),
1650 rdx);
1651 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1653 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1654 // ebx: target bytecode
1655 // rdx: scratch
1656 // r14: locals pointer
1657 // r13: bcp
1658 __ testptr(rax, rax); // test result
1659 __ jcc(Assembler::zero, dispatch); // no osr if null
1660 // nmethod may have been invalidated (VM may block upon call_VM return)
1661 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1662 __ cmpl(rcx, InvalidOSREntryBci);
1663 __ jcc(Assembler::equal, dispatch);
1665 // We have the address of an on stack replacement routine in eax
1666 // We need to prepare to execute the OSR method. First we must
1667 // migrate the locals and monitors off of the stack.
1669 __ mov(r13, rax); // save the nmethod
1671 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1673 // eax is OSR buffer, move it to expected parameter location
1674 __ mov(j_rarg0, rax);
1676 // We use j_rarg definitions here so that registers don't conflict as parameter
1677 // registers change across platforms as we are in the midst of a calling
1678 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1680 const Register retaddr = j_rarg2;
1681 const Register sender_sp = j_rarg1;
1683 // pop the interpreter frame
1684 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1685 __ leave(); // remove frame anchor
1686 __ pop(retaddr); // get return address
1687 __ mov(rsp, sender_sp); // set sp to sender sp
1688 // Ensure compiled code always sees stack at proper alignment
1689 __ andptr(rsp, -(StackAlignmentInBytes));
1691 // unlike x86 we need no specialized return from compiled code
1692 // to the interpreter or the call stub.
1694 // push the return address
1695 __ push(retaddr);
1697 // and begin the OSR nmethod
1698 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1699 }
1700 }
1701 }
1704 void TemplateTable::if_0cmp(Condition cc) {
1705 transition(itos, vtos);
1706 // assume branch is more often taken than not (loops use backward branches)
1707 Label not_taken;
1708 __ testl(rax, rax);
1709 __ jcc(j_not(cc), not_taken);
1710 branch(false, false);
1711 __ bind(not_taken);
1712 __ profile_not_taken_branch(rax);
1713 }
1715 void TemplateTable::if_icmp(Condition cc) {
1716 transition(itos, vtos);
1717 // assume branch is more often taken than not (loops use backward branches)
1718 Label not_taken;
1719 __ pop_i(rdx);
1720 __ cmpl(rdx, rax);
1721 __ jcc(j_not(cc), not_taken);
1722 branch(false, false);
1723 __ bind(not_taken);
1724 __ profile_not_taken_branch(rax);
1725 }
1727 void TemplateTable::if_nullcmp(Condition cc) {
1728 transition(atos, vtos);
1729 // assume branch is more often taken than not (loops use backward branches)
1730 Label not_taken;
1731 __ testptr(rax, rax);
1732 __ jcc(j_not(cc), not_taken);
1733 branch(false, false);
1734 __ bind(not_taken);
1735 __ profile_not_taken_branch(rax);
1736 }
1738 void TemplateTable::if_acmp(Condition cc) {
1739 transition(atos, vtos);
1740 // assume branch is more often taken than not (loops use backward branches)
1741 Label not_taken;
1742 __ pop_ptr(rdx);
1743 __ cmpptr(rdx, rax);
1744 __ jcc(j_not(cc), not_taken);
1745 branch(false, false);
1746 __ bind(not_taken);
1747 __ profile_not_taken_branch(rax);
1748 }
1750 void TemplateTable::ret() {
1751 transition(vtos, vtos);
1752 locals_index(rbx);
1753 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1754 __ profile_ret(rbx, rcx);
1755 __ get_method(rax);
1756 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1757 __ lea(r13, Address(r13, rbx, Address::times_1,
1758 constMethodOopDesc::codes_offset()));
1759 __ dispatch_next(vtos);
1760 }
1762 void TemplateTable::wide_ret() {
1763 transition(vtos, vtos);
1764 locals_index_wide(rbx);
1765 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1766 __ profile_ret(rbx, rcx);
1767 __ get_method(rax);
1768 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1769 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1770 __ dispatch_next(vtos);
1771 }
1773 void TemplateTable::tableswitch() {
1774 Label default_case, continue_execution;
1775 transition(itos, vtos);
1776 // align r13
1777 __ lea(rbx, at_bcp(BytesPerInt));
1778 __ andptr(rbx, -BytesPerInt);
1779 // load lo & hi
1780 __ movl(rcx, Address(rbx, BytesPerInt));
1781 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1782 __ bswapl(rcx);
1783 __ bswapl(rdx);
1784 // check against lo & hi
1785 __ cmpl(rax, rcx);
1786 __ jcc(Assembler::less, default_case);
1787 __ cmpl(rax, rdx);
1788 __ jcc(Assembler::greater, default_case);
1789 // lookup dispatch offset
1790 __ subl(rax, rcx);
1791 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1792 __ profile_switch_case(rax, rbx, rcx);
1793 // continue execution
1794 __ bind(continue_execution);
1795 __ bswapl(rdx);
1796 __ movl2ptr(rdx, rdx);
1797 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1798 __ addptr(r13, rdx);
1799 __ dispatch_only(vtos);
1800 // handle default
1801 __ bind(default_case);
1802 __ profile_switch_default(rax);
1803 __ movl(rdx, Address(rbx, 0));
1804 __ jmp(continue_execution);
1805 }
1807 void TemplateTable::lookupswitch() {
1808 transition(itos, itos);
1809 __ stop("lookupswitch bytecode should have been rewritten");
1810 }
1812 void TemplateTable::fast_linearswitch() {
1813 transition(itos, vtos);
1814 Label loop_entry, loop, found, continue_execution;
1815 // bswap rax so we can avoid bswapping the table entries
1816 __ bswapl(rax);
1817 // align r13
1818 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1819 // this instruction (change offsets
1820 // below)
1821 __ andptr(rbx, -BytesPerInt);
1822 // set counter
1823 __ movl(rcx, Address(rbx, BytesPerInt));
1824 __ bswapl(rcx);
1825 __ jmpb(loop_entry);
1826 // table search
1827 __ bind(loop);
1828 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1829 __ jcc(Assembler::equal, found);
1830 __ bind(loop_entry);
1831 __ decrementl(rcx);
1832 __ jcc(Assembler::greaterEqual, loop);
1833 // default case
1834 __ profile_switch_default(rax);
1835 __ movl(rdx, Address(rbx, 0));
1836 __ jmp(continue_execution);
1837 // entry found -> get offset
1838 __ bind(found);
1839 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1840 __ profile_switch_case(rcx, rax, rbx);
1841 // continue execution
1842 __ bind(continue_execution);
1843 __ bswapl(rdx);
1844 __ movl2ptr(rdx, rdx);
1845 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1846 __ addptr(r13, rdx);
1847 __ dispatch_only(vtos);
1848 }
1850 void TemplateTable::fast_binaryswitch() {
1851 transition(itos, vtos);
1852 // Implementation using the following core algorithm:
1853 //
1854 // int binary_search(int key, LookupswitchPair* array, int n) {
1855 // // Binary search according to "Methodik des Programmierens" by
1856 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1857 // int i = 0;
1858 // int j = n;
1859 // while (i+1 < j) {
1860 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1861 // // with Q: for all i: 0 <= i < n: key < a[i]
1862 // // where a stands for the array and assuming that the (inexisting)
1863 // // element a[n] is infinitely big.
1864 // int h = (i + j) >> 1;
1865 // // i < h < j
1866 // if (key < array[h].fast_match()) {
1867 // j = h;
1868 // } else {
1869 // i = h;
1870 // }
1871 // }
1872 // // R: a[i] <= key < a[i+1] or Q
1873 // // (i.e., if key is within array, i is the correct index)
1874 // return i;
1875 // }
1877 // Register allocation
1878 const Register key = rax; // already set (tosca)
1879 const Register array = rbx;
1880 const Register i = rcx;
1881 const Register j = rdx;
1882 const Register h = rdi;
1883 const Register temp = rsi;
1885 // Find array start
1886 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1887 // get rid of this
1888 // instruction (change
1889 // offsets below)
1890 __ andptr(array, -BytesPerInt);
1892 // Initialize i & j
1893 __ xorl(i, i); // i = 0;
1894 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1896 // Convert j into native byteordering
1897 __ bswapl(j);
1899 // And start
1900 Label entry;
1901 __ jmp(entry);
1903 // binary search loop
1904 {
1905 Label loop;
1906 __ bind(loop);
1907 // int h = (i + j) >> 1;
1908 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1909 __ sarl(h, 1); // h = (i + j) >> 1;
1910 // if (key < array[h].fast_match()) {
1911 // j = h;
1912 // } else {
1913 // i = h;
1914 // }
1915 // Convert array[h].match to native byte-ordering before compare
1916 __ movl(temp, Address(array, h, Address::times_8));
1917 __ bswapl(temp);
1918 __ cmpl(key, temp);
1919 // j = h if (key < array[h].fast_match())
1920 __ cmovl(Assembler::less, j, h);
1921 // i = h if (key >= array[h].fast_match())
1922 __ cmovl(Assembler::greaterEqual, i, h);
1923 // while (i+1 < j)
1924 __ bind(entry);
1925 __ leal(h, Address(i, 1)); // i+1
1926 __ cmpl(h, j); // i+1 < j
1927 __ jcc(Assembler::less, loop);
1928 }
1930 // end of binary search, result index is i (must check again!)
1931 Label default_case;
1932 // Convert array[i].match to native byte-ordering before compare
1933 __ movl(temp, Address(array, i, Address::times_8));
1934 __ bswapl(temp);
1935 __ cmpl(key, temp);
1936 __ jcc(Assembler::notEqual, default_case);
1938 // entry found -> j = offset
1939 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
1940 __ profile_switch_case(i, key, array);
1941 __ bswapl(j);
1942 __ movl2ptr(j, j);
1943 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
1944 __ addptr(r13, j);
1945 __ dispatch_only(vtos);
1947 // default case -> j = default offset
1948 __ bind(default_case);
1949 __ profile_switch_default(i);
1950 __ movl(j, Address(array, -2 * BytesPerInt));
1951 __ bswapl(j);
1952 __ movl2ptr(j, j);
1953 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
1954 __ addptr(r13, j);
1955 __ dispatch_only(vtos);
1956 }
1959 void TemplateTable::_return(TosState state) {
1960 transition(state, state);
1961 assert(_desc->calls_vm(),
1962 "inconsistent calls_vm information"); // call in remove_activation
1964 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1965 assert(state == vtos, "only valid state");
1966 __ movptr(c_rarg1, aaddress(0));
1967 __ load_klass(rdi, c_rarg1);
1968 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1969 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
1970 Label skip_register_finalizer;
1971 __ jcc(Assembler::zero, skip_register_finalizer);
1973 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
1975 __ bind(skip_register_finalizer);
1976 }
1978 __ remove_activation(state, r13);
1979 __ jmp(r13);
1980 }
1982 // ----------------------------------------------------------------------------
1983 // Volatile variables demand their effects be made known to all CPU's
1984 // in order. Store buffers on most chips allow reads & writes to
1985 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
1986 // without some kind of memory barrier (i.e., it's not sufficient that
1987 // the interpreter does not reorder volatile references, the hardware
1988 // also must not reorder them).
1989 //
1990 // According to the new Java Memory Model (JMM):
1991 // (1) All volatiles are serialized wrt to each other. ALSO reads &
1992 // writes act as aquire & release, so:
1993 // (2) A read cannot let unrelated NON-volatile memory refs that
1994 // happen after the read float up to before the read. It's OK for
1995 // non-volatile memory refs that happen before the volatile read to
1996 // float down below it.
1997 // (3) Similar a volatile write cannot let unrelated NON-volatile
1998 // memory refs that happen BEFORE the write float down to after the
1999 // write. It's OK for non-volatile memory refs that happen after the
2000 // volatile write to float up before it.
2001 //
2002 // We only put in barriers around volatile refs (they are expensive),
2003 // not _between_ memory refs (that would require us to track the
2004 // flavor of the previous memory refs). Requirements (2) and (3)
2005 // require some barriers before volatile stores and after volatile
2006 // loads. These nearly cover requirement (1) but miss the
2007 // volatile-store-volatile-load case. This final case is placed after
2008 // volatile-stores although it could just as well go before
2009 // volatile-loads.
2010 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2011 order_constraint) {
2012 // Helper function to insert a is-volatile test and memory barrier
2013 if (os::is_MP()) { // Not needed on single CPU
2014 __ membar(order_constraint);
2015 }
2016 }
2018 void TemplateTable::resolve_cache_and_index(int byte_no,
2019 Register result,
2020 Register Rcache,
2021 Register index,
2022 size_t index_size) {
2023 const Register temp = rbx;
2024 assert_different_registers(result, Rcache, index, temp);
2026 Label resolved;
2027 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2028 if (byte_no == f1_oop) {
2029 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2030 // This kind of CP cache entry does not need to match the flags byte, because
2031 // there is a 1-1 relation between bytecode type and CP entry type.
2032 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2033 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2034 __ testptr(result, result);
2035 __ jcc(Assembler::notEqual, resolved);
2036 } else {
2037 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2038 assert(result == noreg, ""); //else change code for setting result
2039 const int shift_count = (1 + byte_no) * BitsPerByte;
2040 __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2041 __ shrl(temp, shift_count);
2042 // have we resolved this bytecode?
2043 __ andl(temp, 0xFF);
2044 __ cmpl(temp, (int) bytecode());
2045 __ jcc(Assembler::equal, resolved);
2046 }
2048 // resolve first time through
2049 address entry;
2050 switch (bytecode()) {
2051 case Bytecodes::_getstatic:
2052 case Bytecodes::_putstatic:
2053 case Bytecodes::_getfield:
2054 case Bytecodes::_putfield:
2055 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2056 break;
2057 case Bytecodes::_invokevirtual:
2058 case Bytecodes::_invokespecial:
2059 case Bytecodes::_invokestatic:
2060 case Bytecodes::_invokeinterface:
2061 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2062 break;
2063 case Bytecodes::_invokedynamic:
2064 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2065 break;
2066 default:
2067 ShouldNotReachHere();
2068 break;
2069 }
2070 __ movl(temp, (int) bytecode());
2071 __ call_VM(noreg, entry, temp);
2073 // Update registers with resolved info
2074 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2075 if (result != noreg)
2076 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2077 __ bind(resolved);
2078 }
2080 // The Rcache and index registers must be set before call
2081 void TemplateTable::load_field_cp_cache_entry(Register obj,
2082 Register cache,
2083 Register index,
2084 Register off,
2085 Register flags,
2086 bool is_static = false) {
2087 assert_different_registers(cache, index, flags, off);
2089 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2090 // Field offset
2091 __ movptr(off, Address(cache, index, Address::times_8,
2092 in_bytes(cp_base_offset +
2093 ConstantPoolCacheEntry::f2_offset())));
2094 // Flags
2095 __ movl(flags, Address(cache, index, Address::times_8,
2096 in_bytes(cp_base_offset +
2097 ConstantPoolCacheEntry::flags_offset())));
2099 // klass overwrite register
2100 if (is_static) {
2101 __ movptr(obj, Address(cache, index, Address::times_8,
2102 in_bytes(cp_base_offset +
2103 ConstantPoolCacheEntry::f1_offset())));
2104 }
2105 }
2107 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2108 Register method,
2109 Register itable_index,
2110 Register flags,
2111 bool is_invokevirtual,
2112 bool is_invokevfinal, /*unused*/
2113 bool is_invokedynamic) {
2114 // setup registers
2115 const Register cache = rcx;
2116 const Register index = rdx;
2117 assert_different_registers(method, flags);
2118 assert_different_registers(method, cache, index);
2119 assert_different_registers(itable_index, flags);
2120 assert_different_registers(itable_index, cache, index);
2121 // determine constant pool cache field offsets
2122 const int method_offset = in_bytes(
2123 constantPoolCacheOopDesc::base_offset() +
2124 (is_invokevirtual
2125 ? ConstantPoolCacheEntry::f2_offset()
2126 : ConstantPoolCacheEntry::f1_offset()));
2127 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2128 ConstantPoolCacheEntry::flags_offset());
2129 // access constant pool cache fields
2130 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2131 ConstantPoolCacheEntry::f2_offset());
2133 if (byte_no == f1_oop) {
2134 // Resolved f1_oop goes directly into 'method' register.
2135 assert(is_invokedynamic, "");
2136 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2137 } else {
2138 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2139 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2140 }
2141 if (itable_index != noreg) {
2142 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2143 }
2144 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2145 }
2148 // The registers cache and index expected to be set before call.
2149 // Correct values of the cache and index registers are preserved.
2150 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2151 bool is_static, bool has_tos) {
2152 // do the JVMTI work here to avoid disturbing the register state below
2153 // We use c_rarg registers here because we want to use the register used in
2154 // the call to the VM
2155 if (JvmtiExport::can_post_field_access()) {
2156 // Check to see if a field access watch has been set before we
2157 // take the time to call into the VM.
2158 Label L1;
2159 assert_different_registers(cache, index, rax);
2160 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2161 __ testl(rax, rax);
2162 __ jcc(Assembler::zero, L1);
2164 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2166 // cache entry pointer
2167 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
2168 __ shll(c_rarg3, LogBytesPerWord);
2169 __ addptr(c_rarg2, c_rarg3);
2170 if (is_static) {
2171 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2172 } else {
2173 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2174 __ verify_oop(c_rarg1);
2175 }
2176 // c_rarg1: object pointer or NULL
2177 // c_rarg2: cache entry pointer
2178 // c_rarg3: jvalue object on the stack
2179 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2180 InterpreterRuntime::post_field_access),
2181 c_rarg1, c_rarg2, c_rarg3);
2182 __ get_cache_and_index_at_bcp(cache, index, 1);
2183 __ bind(L1);
2184 }
2185 }
2187 void TemplateTable::pop_and_check_object(Register r) {
2188 __ pop_ptr(r);
2189 __ null_check(r); // for field access must check obj.
2190 __ verify_oop(r);
2191 }
2193 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2194 transition(vtos, vtos);
2196 const Register cache = rcx;
2197 const Register index = rdx;
2198 const Register obj = c_rarg3;
2199 const Register off = rbx;
2200 const Register flags = rax;
2201 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2203 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2204 jvmti_post_field_access(cache, index, is_static, false);
2205 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2207 if (!is_static) {
2208 // obj is on the stack
2209 pop_and_check_object(obj);
2210 }
2212 const Address field(obj, off, Address::times_1);
2214 Label Done, notByte, notInt, notShort, notChar,
2215 notLong, notFloat, notObj, notDouble;
2217 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2218 assert(btos == 0, "change code, btos != 0");
2220 __ andl(flags, 0x0F);
2221 __ jcc(Assembler::notZero, notByte);
2222 // btos
2223 __ load_signed_byte(rax, field);
2224 __ push(btos);
2225 // Rewrite bytecode to be faster
2226 if (!is_static) {
2227 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2228 }
2229 __ jmp(Done);
2231 __ bind(notByte);
2232 __ cmpl(flags, atos);
2233 __ jcc(Assembler::notEqual, notObj);
2234 // atos
2235 __ load_heap_oop(rax, field);
2236 __ push(atos);
2237 if (!is_static) {
2238 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2239 }
2240 __ jmp(Done);
2242 __ bind(notObj);
2243 __ cmpl(flags, itos);
2244 __ jcc(Assembler::notEqual, notInt);
2245 // itos
2246 __ movl(rax, field);
2247 __ push(itos);
2248 // Rewrite bytecode to be faster
2249 if (!is_static) {
2250 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2251 }
2252 __ jmp(Done);
2254 __ bind(notInt);
2255 __ cmpl(flags, ctos);
2256 __ jcc(Assembler::notEqual, notChar);
2257 // ctos
2258 __ load_unsigned_short(rax, field);
2259 __ push(ctos);
2260 // Rewrite bytecode to be faster
2261 if (!is_static) {
2262 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2263 }
2264 __ jmp(Done);
2266 __ bind(notChar);
2267 __ cmpl(flags, stos);
2268 __ jcc(Assembler::notEqual, notShort);
2269 // stos
2270 __ load_signed_short(rax, field);
2271 __ push(stos);
2272 // Rewrite bytecode to be faster
2273 if (!is_static) {
2274 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2275 }
2276 __ jmp(Done);
2278 __ bind(notShort);
2279 __ cmpl(flags, ltos);
2280 __ jcc(Assembler::notEqual, notLong);
2281 // ltos
2282 __ movq(rax, field);
2283 __ push(ltos);
2284 // Rewrite bytecode to be faster
2285 if (!is_static) {
2286 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2287 }
2288 __ jmp(Done);
2290 __ bind(notLong);
2291 __ cmpl(flags, ftos);
2292 __ jcc(Assembler::notEqual, notFloat);
2293 // ftos
2294 __ movflt(xmm0, field);
2295 __ push(ftos);
2296 // Rewrite bytecode to be faster
2297 if (!is_static) {
2298 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2299 }
2300 __ jmp(Done);
2302 __ bind(notFloat);
2303 #ifdef ASSERT
2304 __ cmpl(flags, dtos);
2305 __ jcc(Assembler::notEqual, notDouble);
2306 #endif
2307 // dtos
2308 __ movdbl(xmm0, field);
2309 __ push(dtos);
2310 // Rewrite bytecode to be faster
2311 if (!is_static) {
2312 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2313 }
2314 #ifdef ASSERT
2315 __ jmp(Done);
2317 __ bind(notDouble);
2318 __ stop("Bad state");
2319 #endif
2321 __ bind(Done);
2322 // [jk] not needed currently
2323 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2324 // Assembler::LoadStore));
2325 }
2328 void TemplateTable::getfield(int byte_no) {
2329 getfield_or_static(byte_no, false);
2330 }
2332 void TemplateTable::getstatic(int byte_no) {
2333 getfield_or_static(byte_no, true);
2334 }
2336 // The registers cache and index expected to be set before call.
2337 // The function may destroy various registers, just not the cache and index registers.
2338 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2339 transition(vtos, vtos);
2341 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2343 if (JvmtiExport::can_post_field_modification()) {
2344 // Check to see if a field modification watch has been set before
2345 // we take the time to call into the VM.
2346 Label L1;
2347 assert_different_registers(cache, index, rax);
2348 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2349 __ testl(rax, rax);
2350 __ jcc(Assembler::zero, L1);
2352 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2354 if (is_static) {
2355 // Life is simple. Null out the object pointer.
2356 __ xorl(c_rarg1, c_rarg1);
2357 } else {
2358 // Life is harder. The stack holds the value on top, followed by
2359 // the object. We don't know the size of the value, though; it
2360 // could be one or two words depending on its type. As a result,
2361 // we must find the type to determine where the object is.
2362 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2363 Address::times_8,
2364 in_bytes(cp_base_offset +
2365 ConstantPoolCacheEntry::flags_offset())));
2366 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits);
2367 // Make sure we don't need to mask rcx for tosBits after the
2368 // above shift
2369 ConstantPoolCacheEntry::verify_tosBits();
2370 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2371 __ cmpl(c_rarg3, ltos);
2372 __ cmovptr(Assembler::equal,
2373 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2374 __ cmpl(c_rarg3, dtos);
2375 __ cmovptr(Assembler::equal,
2376 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2377 }
2378 // cache entry pointer
2379 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2380 __ shll(rscratch1, LogBytesPerWord);
2381 __ addptr(c_rarg2, rscratch1);
2382 // object (tos)
2383 __ mov(c_rarg3, rsp);
2384 // c_rarg1: object pointer set up above (NULL if static)
2385 // c_rarg2: cache entry pointer
2386 // c_rarg3: jvalue object on the stack
2387 __ call_VM(noreg,
2388 CAST_FROM_FN_PTR(address,
2389 InterpreterRuntime::post_field_modification),
2390 c_rarg1, c_rarg2, c_rarg3);
2391 __ get_cache_and_index_at_bcp(cache, index, 1);
2392 __ bind(L1);
2393 }
2394 }
2396 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2397 transition(vtos, vtos);
2399 const Register cache = rcx;
2400 const Register index = rdx;
2401 const Register obj = rcx;
2402 const Register off = rbx;
2403 const Register flags = rax;
2404 const Register bc = c_rarg3;
2406 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2407 jvmti_post_field_mod(cache, index, is_static);
2408 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2410 // [jk] not needed currently
2411 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2412 // Assembler::StoreStore));
2414 Label notVolatile, Done;
2415 __ movl(rdx, flags);
2416 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2417 __ andl(rdx, 0x1);
2419 // field address
2420 const Address field(obj, off, Address::times_1);
2422 Label notByte, notInt, notShort, notChar,
2423 notLong, notFloat, notObj, notDouble;
2425 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2427 assert(btos == 0, "change code, btos != 0");
2428 __ andl(flags, 0x0f);
2429 __ jcc(Assembler::notZero, notByte);
2430 // btos
2431 __ pop(btos);
2432 if (!is_static) pop_and_check_object(obj);
2433 __ movb(field, rax);
2434 if (!is_static) {
2435 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx);
2436 }
2437 __ jmp(Done);
2439 __ bind(notByte);
2440 __ cmpl(flags, atos);
2441 __ jcc(Assembler::notEqual, notObj);
2442 // atos
2443 __ pop(atos);
2444 if (!is_static) pop_and_check_object(obj);
2446 // Store into the field
2447 do_oop_store(_masm, field, rax, _bs->kind(), false);
2449 if (!is_static) {
2450 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
2451 }
2452 __ jmp(Done);
2454 __ bind(notObj);
2455 __ cmpl(flags, itos);
2456 __ jcc(Assembler::notEqual, notInt);
2457 // itos
2458 __ pop(itos);
2459 if (!is_static) pop_and_check_object(obj);
2460 __ movl(field, rax);
2461 if (!is_static) {
2462 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx);
2463 }
2464 __ jmp(Done);
2466 __ bind(notInt);
2467 __ cmpl(flags, ctos);
2468 __ jcc(Assembler::notEqual, notChar);
2469 // ctos
2470 __ pop(ctos);
2471 if (!is_static) pop_and_check_object(obj);
2472 __ movw(field, rax);
2473 if (!is_static) {
2474 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx);
2475 }
2476 __ jmp(Done);
2478 __ bind(notChar);
2479 __ cmpl(flags, stos);
2480 __ jcc(Assembler::notEqual, notShort);
2481 // stos
2482 __ pop(stos);
2483 if (!is_static) pop_and_check_object(obj);
2484 __ movw(field, rax);
2485 if (!is_static) {
2486 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx);
2487 }
2488 __ jmp(Done);
2490 __ bind(notShort);
2491 __ cmpl(flags, ltos);
2492 __ jcc(Assembler::notEqual, notLong);
2493 // ltos
2494 __ pop(ltos);
2495 if (!is_static) pop_and_check_object(obj);
2496 __ movq(field, rax);
2497 if (!is_static) {
2498 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx);
2499 }
2500 __ jmp(Done);
2502 __ bind(notLong);
2503 __ cmpl(flags, ftos);
2504 __ jcc(Assembler::notEqual, notFloat);
2505 // ftos
2506 __ pop(ftos);
2507 if (!is_static) pop_and_check_object(obj);
2508 __ movflt(field, xmm0);
2509 if (!is_static) {
2510 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx);
2511 }
2512 __ jmp(Done);
2514 __ bind(notFloat);
2515 #ifdef ASSERT
2516 __ cmpl(flags, dtos);
2517 __ jcc(Assembler::notEqual, notDouble);
2518 #endif
2519 // dtos
2520 __ pop(dtos);
2521 if (!is_static) pop_and_check_object(obj);
2522 __ movdbl(field, xmm0);
2523 if (!is_static) {
2524 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx);
2525 }
2527 #ifdef ASSERT
2528 __ jmp(Done);
2530 __ bind(notDouble);
2531 __ stop("Bad state");
2532 #endif
2534 __ bind(Done);
2535 // Check for volatile store
2536 __ testl(rdx, rdx);
2537 __ jcc(Assembler::zero, notVolatile);
2538 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2539 Assembler::StoreStore));
2541 __ bind(notVolatile);
2542 }
2544 void TemplateTable::putfield(int byte_no) {
2545 putfield_or_static(byte_no, false);
2546 }
2548 void TemplateTable::putstatic(int byte_no) {
2549 putfield_or_static(byte_no, true);
2550 }
2552 void TemplateTable::jvmti_post_fast_field_mod() {
2553 if (JvmtiExport::can_post_field_modification()) {
2554 // Check to see if a field modification watch has been set before
2555 // we take the time to call into the VM.
2556 Label L2;
2557 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2558 __ testl(c_rarg3, c_rarg3);
2559 __ jcc(Assembler::zero, L2);
2560 __ pop_ptr(rbx); // copy the object pointer from tos
2561 __ verify_oop(rbx);
2562 __ push_ptr(rbx); // put the object pointer back on tos
2563 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2564 __ mov(c_rarg3, rsp);
2565 const Address field(c_rarg3, 0);
2567 switch (bytecode()) { // load values into the jvalue object
2568 case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
2569 case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
2570 case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
2571 case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
2572 case Bytecodes::_fast_sputfield: // fall through
2573 case Bytecodes::_fast_cputfield: __ movw(field, rax); break;
2574 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break;
2575 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break;
2576 default:
2577 ShouldNotReachHere();
2578 }
2580 // Save rax because call_VM() will clobber it, then use it for
2581 // JVMTI purposes
2582 __ push(rax);
2583 // access constant pool cache entry
2584 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2585 __ verify_oop(rbx);
2586 // rbx: object pointer copied above
2587 // c_rarg2: cache entry pointer
2588 // c_rarg3: jvalue object on the stack
2589 __ call_VM(noreg,
2590 CAST_FROM_FN_PTR(address,
2591 InterpreterRuntime::post_field_modification),
2592 rbx, c_rarg2, c_rarg3);
2593 __ pop(rax); // restore lower value
2594 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2595 __ bind(L2);
2596 }
2597 }
2599 void TemplateTable::fast_storefield(TosState state) {
2600 transition(state, vtos);
2602 ByteSize base = constantPoolCacheOopDesc::base_offset();
2604 jvmti_post_fast_field_mod();
2606 // access constant pool cache
2607 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2609 // test for volatile with rdx
2610 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2611 in_bytes(base +
2612 ConstantPoolCacheEntry::flags_offset())));
2614 // replace index with field offset from cache entry
2615 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2616 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2618 // [jk] not needed currently
2619 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2620 // Assembler::StoreStore));
2622 Label notVolatile;
2623 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2624 __ andl(rdx, 0x1);
2626 // Get object from stack
2627 pop_and_check_object(rcx);
2629 // field address
2630 const Address field(rcx, rbx, Address::times_1);
2632 // access field
2633 switch (bytecode()) {
2634 case Bytecodes::_fast_aputfield:
2635 do_oop_store(_masm, field, rax, _bs->kind(), false);
2636 break;
2637 case Bytecodes::_fast_lputfield:
2638 __ movq(field, rax);
2639 break;
2640 case Bytecodes::_fast_iputfield:
2641 __ movl(field, rax);
2642 break;
2643 case Bytecodes::_fast_bputfield:
2644 __ movb(field, rax);
2645 break;
2646 case Bytecodes::_fast_sputfield:
2647 // fall through
2648 case Bytecodes::_fast_cputfield:
2649 __ movw(field, rax);
2650 break;
2651 case Bytecodes::_fast_fputfield:
2652 __ movflt(field, xmm0);
2653 break;
2654 case Bytecodes::_fast_dputfield:
2655 __ movdbl(field, xmm0);
2656 break;
2657 default:
2658 ShouldNotReachHere();
2659 }
2661 // Check for volatile store
2662 __ testl(rdx, rdx);
2663 __ jcc(Assembler::zero, notVolatile);
2664 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2665 Assembler::StoreStore));
2666 __ bind(notVolatile);
2667 }
2670 void TemplateTable::fast_accessfield(TosState state) {
2671 transition(atos, state);
2673 // Do the JVMTI work here to avoid disturbing the register state below
2674 if (JvmtiExport::can_post_field_access()) {
2675 // Check to see if a field access watch has been set before we
2676 // take the time to call into the VM.
2677 Label L1;
2678 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2679 __ testl(rcx, rcx);
2680 __ jcc(Assembler::zero, L1);
2681 // access constant pool cache entry
2682 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2683 __ verify_oop(rax);
2684 __ mov(r12, rax); // save object pointer before call_VM() clobbers it
2685 __ mov(c_rarg1, rax);
2686 // c_rarg1: object pointer copied above
2687 // c_rarg2: cache entry pointer
2688 __ call_VM(noreg,
2689 CAST_FROM_FN_PTR(address,
2690 InterpreterRuntime::post_field_access),
2691 c_rarg1, c_rarg2);
2692 __ mov(rax, r12); // restore object pointer
2693 __ reinit_heapbase();
2694 __ bind(L1);
2695 }
2697 // access constant pool cache
2698 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2699 // replace index with field offset from cache entry
2700 // [jk] not needed currently
2701 // if (os::is_MP()) {
2702 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2703 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2704 // ConstantPoolCacheEntry::flags_offset())));
2705 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2706 // __ andl(rdx, 0x1);
2707 // }
2708 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2709 in_bytes(constantPoolCacheOopDesc::base_offset() +
2710 ConstantPoolCacheEntry::f2_offset())));
2712 // rax: object
2713 __ verify_oop(rax);
2714 __ null_check(rax);
2715 Address field(rax, rbx, Address::times_1);
2717 // access field
2718 switch (bytecode()) {
2719 case Bytecodes::_fast_agetfield:
2720 __ load_heap_oop(rax, field);
2721 __ verify_oop(rax);
2722 break;
2723 case Bytecodes::_fast_lgetfield:
2724 __ movq(rax, field);
2725 break;
2726 case Bytecodes::_fast_igetfield:
2727 __ movl(rax, field);
2728 break;
2729 case Bytecodes::_fast_bgetfield:
2730 __ movsbl(rax, field);
2731 break;
2732 case Bytecodes::_fast_sgetfield:
2733 __ load_signed_short(rax, field);
2734 break;
2735 case Bytecodes::_fast_cgetfield:
2736 __ load_unsigned_short(rax, field);
2737 break;
2738 case Bytecodes::_fast_fgetfield:
2739 __ movflt(xmm0, field);
2740 break;
2741 case Bytecodes::_fast_dgetfield:
2742 __ movdbl(xmm0, field);
2743 break;
2744 default:
2745 ShouldNotReachHere();
2746 }
2747 // [jk] not needed currently
2748 // if (os::is_MP()) {
2749 // Label notVolatile;
2750 // __ testl(rdx, rdx);
2751 // __ jcc(Assembler::zero, notVolatile);
2752 // __ membar(Assembler::LoadLoad);
2753 // __ bind(notVolatile);
2754 //};
2755 }
2757 void TemplateTable::fast_xaccess(TosState state) {
2758 transition(vtos, state);
2760 // get receiver
2761 __ movptr(rax, aaddress(0));
2762 // access constant pool cache
2763 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2764 __ movptr(rbx,
2765 Address(rcx, rdx, Address::times_8,
2766 in_bytes(constantPoolCacheOopDesc::base_offset() +
2767 ConstantPoolCacheEntry::f2_offset())));
2768 // make sure exception is reported in correct bcp range (getfield is
2769 // next instruction)
2770 __ increment(r13);
2771 __ null_check(rax);
2772 switch (state) {
2773 case itos:
2774 __ movl(rax, Address(rax, rbx, Address::times_1));
2775 break;
2776 case atos:
2777 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2778 __ verify_oop(rax);
2779 break;
2780 case ftos:
2781 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2782 break;
2783 default:
2784 ShouldNotReachHere();
2785 }
2787 // [jk] not needed currently
2788 // if (os::is_MP()) {
2789 // Label notVolatile;
2790 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2791 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2792 // ConstantPoolCacheEntry::flags_offset())));
2793 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2794 // __ testl(rdx, 0x1);
2795 // __ jcc(Assembler::zero, notVolatile);
2796 // __ membar(Assembler::LoadLoad);
2797 // __ bind(notVolatile);
2798 // }
2800 __ decrement(r13);
2801 }
2805 //-----------------------------------------------------------------------------
2806 // Calls
2808 void TemplateTable::count_calls(Register method, Register temp) {
2809 // implemented elsewhere
2810 ShouldNotReachHere();
2811 }
2813 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2814 // determine flags
2815 Bytecodes::Code code = bytecode();
2816 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2817 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2818 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2819 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2820 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2821 const bool receiver_null_check = is_invokespecial;
2822 const bool save_flags = is_invokeinterface || is_invokevirtual;
2823 // setup registers & access constant pool cache
2824 const Register recv = rcx;
2825 const Register flags = rdx;
2826 assert_different_registers(method, index, recv, flags);
2828 // save 'interpreter return address'
2829 __ save_bcp();
2831 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2833 // load receiver if needed (note: no return address pushed yet)
2834 if (load_receiver) {
2835 assert(!is_invokedynamic, "");
2836 __ movl(recv, flags);
2837 __ andl(recv, 0xFF);
2838 Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
2839 __ movptr(recv, recv_addr);
2840 __ verify_oop(recv);
2841 }
2843 // do null check if needed
2844 if (receiver_null_check) {
2845 __ null_check(recv);
2846 }
2848 if (save_flags) {
2849 __ movl(r13, flags);
2850 }
2852 // compute return type
2853 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2854 // Make sure we don't need to mask flags for tosBits after the above shift
2855 ConstantPoolCacheEntry::verify_tosBits();
2856 // load return address
2857 {
2858 address table_addr;
2859 if (is_invokeinterface || is_invokedynamic)
2860 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2861 else
2862 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2863 ExternalAddress table(table_addr);
2864 __ lea(rscratch1, table);
2865 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2866 }
2868 // push return address
2869 __ push(flags);
2871 // Restore flag field from the constant pool cache, and restore esi
2872 // for later null checks. r13 is the bytecode pointer
2873 if (save_flags) {
2874 __ movl(flags, r13);
2875 __ restore_bcp();
2876 }
2877 }
2880 void TemplateTable::invokevirtual_helper(Register index,
2881 Register recv,
2882 Register flags) {
2883 // Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx);
2885 // Test for an invoke of a final method
2886 Label notFinal;
2887 __ movl(rax, flags);
2888 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2889 __ jcc(Assembler::zero, notFinal);
2891 const Register method = index; // method must be rbx
2892 assert(method == rbx,
2893 "methodOop must be rbx for interpreter calling convention");
2895 // do the call - the index is actually the method to call
2896 __ verify_oop(method);
2898 // It's final, need a null check here!
2899 __ null_check(recv);
2901 // profile this call
2902 __ profile_final_call(rax);
2904 __ jump_from_interpreted(method, rax);
2906 __ bind(notFinal);
2908 // get receiver klass
2909 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2910 __ load_klass(rax, recv);
2912 __ verify_oop(rax);
2914 // profile this call
2915 __ profile_virtual_call(rax, r14, rdx);
2917 // get target methodOop & entry point
2918 const int base = instanceKlass::vtable_start_offset() * wordSize;
2919 assert(vtableEntry::size() * wordSize == 8,
2920 "adjust the scaling in the code below");
2921 __ movptr(method, Address(rax, index,
2922 Address::times_8,
2923 base + vtableEntry::method_offset_in_bytes()));
2924 __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
2925 __ jump_from_interpreted(method, rdx);
2926 }
2929 void TemplateTable::invokevirtual(int byte_no) {
2930 transition(vtos, vtos);
2931 assert(byte_no == f2_byte, "use this argument");
2932 prepare_invoke(rbx, noreg, byte_no);
2934 // rbx: index
2935 // rcx: receiver
2936 // rdx: flags
2938 invokevirtual_helper(rbx, rcx, rdx);
2939 }
2942 void TemplateTable::invokespecial(int byte_no) {
2943 transition(vtos, vtos);
2944 assert(byte_no == f1_byte, "use this argument");
2945 prepare_invoke(rbx, noreg, byte_no);
2946 // do the call
2947 __ verify_oop(rbx);
2948 __ profile_call(rax);
2949 __ jump_from_interpreted(rbx, rax);
2950 }
2953 void TemplateTable::invokestatic(int byte_no) {
2954 transition(vtos, vtos);
2955 assert(byte_no == f1_byte, "use this argument");
2956 prepare_invoke(rbx, noreg, byte_no);
2957 // do the call
2958 __ verify_oop(rbx);
2959 __ profile_call(rax);
2960 __ jump_from_interpreted(rbx, rax);
2961 }
2963 void TemplateTable::fast_invokevfinal(int byte_no) {
2964 transition(vtos, vtos);
2965 assert(byte_no == f2_byte, "use this argument");
2966 __ stop("fast_invokevfinal not used on amd64");
2967 }
2969 void TemplateTable::invokeinterface(int byte_no) {
2970 transition(vtos, vtos);
2971 assert(byte_no == f1_byte, "use this argument");
2972 prepare_invoke(rax, rbx, byte_no);
2974 // rax: Interface
2975 // rbx: index
2976 // rcx: receiver
2977 // rdx: flags
2979 // Special case of invokeinterface called for virtual method of
2980 // java.lang.Object. See cpCacheOop.cpp for details.
2981 // This code isn't produced by javac, but could be produced by
2982 // another compliant java compiler.
2983 Label notMethod;
2984 __ movl(r14, rdx);
2985 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface));
2986 __ jcc(Assembler::zero, notMethod);
2988 invokevirtual_helper(rbx, rcx, rdx);
2989 __ bind(notMethod);
2991 // Get receiver klass into rdx - also a null check
2992 __ restore_locals(); // restore r14
2993 __ load_klass(rdx, rcx);
2994 __ verify_oop(rdx);
2996 // profile this call
2997 __ profile_virtual_call(rdx, r13, r14);
2999 Label no_such_interface, no_such_method;
3001 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3002 rdx, rax, rbx,
3003 // outputs: method, scan temp. reg
3004 rbx, r13,
3005 no_such_interface);
3007 // rbx,: methodOop to call
3008 // rcx: receiver
3009 // Check for abstract method error
3010 // Note: This should be done more efficiently via a throw_abstract_method_error
3011 // interpreter entry point and a conditional jump to it in case of a null
3012 // method.
3013 __ testptr(rbx, rbx);
3014 __ jcc(Assembler::zero, no_such_method);
3016 // do the call
3017 // rcx: receiver
3018 // rbx,: methodOop
3019 __ jump_from_interpreted(rbx, rdx);
3020 __ should_not_reach_here();
3022 // exception handling code follows...
3023 // note: must restore interpreter registers to canonical
3024 // state for exception handling to work correctly!
3026 __ bind(no_such_method);
3027 // throw exception
3028 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3029 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3030 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3031 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3032 // the call_VM checks for exception, so we should never return here.
3033 __ should_not_reach_here();
3035 __ bind(no_such_interface);
3036 // throw exception
3037 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3038 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3039 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3040 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3041 InterpreterRuntime::throw_IncompatibleClassChangeError));
3042 // the call_VM checks for exception, so we should never return here.
3043 __ should_not_reach_here();
3044 return;
3045 }
3047 void TemplateTable::invokedynamic(int byte_no) {
3048 transition(vtos, vtos);
3049 assert(byte_no == f1_oop, "use this argument");
3051 if (!EnableInvokeDynamic) {
3052 // We should not encounter this bytecode if !EnableInvokeDynamic.
3053 // The verifier will stop it. However, if we get past the verifier,
3054 // this will stop the thread in a reasonable way, without crashing the JVM.
3055 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3056 InterpreterRuntime::throw_IncompatibleClassChangeError));
3057 // the call_VM checks for exception, so we should never return here.
3058 __ should_not_reach_here();
3059 return;
3060 }
3062 assert(byte_no == f1_oop, "use this argument");
3063 prepare_invoke(rax, rbx, byte_no);
3065 // rax: CallSite object (f1)
3066 // rbx: unused (f2)
3067 // rcx: receiver address
3068 // rdx: flags (unused)
3070 if (ProfileInterpreter) {
3071 Label L;
3072 // %%% should make a type profile for any invokedynamic that takes a ref argument
3073 // profile this call
3074 __ profile_call(r13);
3075 }
3077 __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
3078 __ null_check(rcx);
3079 __ prepare_to_jump_from_interpreted();
3080 __ jump_to_method_handle_entry(rcx, rdx);
3081 }
3084 //-----------------------------------------------------------------------------
3085 // Allocation
3087 void TemplateTable::_new() {
3088 transition(vtos, atos);
3089 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3090 Label slow_case;
3091 Label done;
3092 Label initialize_header;
3093 Label initialize_object; // including clearing the fields
3094 Label allocate_shared;
3096 __ get_cpool_and_tags(rsi, rax);
3097 // get instanceKlass
3098 __ movptr(rsi, Address(rsi, rdx,
3099 Address::times_8, sizeof(constantPoolOopDesc)));
3101 // make sure the class we're about to instantiate has been
3102 // resolved. Note: slow_case does a pop of stack, which is why we
3103 // loaded class/pushed above
3104 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3105 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3106 JVM_CONSTANT_Class);
3107 __ jcc(Assembler::notEqual, slow_case);
3109 // make sure klass is initialized & doesn't have finalizer
3110 // make sure klass is fully initialized
3111 __ cmpl(Address(rsi,
3112 instanceKlass::init_state_offset_in_bytes() +
3113 sizeof(oopDesc)),
3114 instanceKlass::fully_initialized);
3115 __ jcc(Assembler::notEqual, slow_case);
3117 // get instance_size in instanceKlass (scaled to a count of bytes)
3118 __ movl(rdx,
3119 Address(rsi,
3120 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3121 // test to see if it has a finalizer or is malformed in some way
3122 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3123 __ jcc(Assembler::notZero, slow_case);
3125 // Allocate the instance
3126 // 1) Try to allocate in the TLAB
3127 // 2) if fail and the object is large allocate in the shared Eden
3128 // 3) if the above fails (or is not applicable), go to a slow case
3129 // (creates a new TLAB, etc.)
3131 const bool allow_shared_alloc =
3132 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3134 if (UseTLAB) {
3135 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3136 __ lea(rbx, Address(rax, rdx, Address::times_1));
3137 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3138 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3139 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3140 if (ZeroTLAB) {
3141 // the fields have been already cleared
3142 __ jmp(initialize_header);
3143 } else {
3144 // initialize both the header and fields
3145 __ jmp(initialize_object);
3146 }
3147 }
3149 // Allocation in the shared Eden, if allowed.
3150 //
3151 // rdx: instance size in bytes
3152 if (allow_shared_alloc) {
3153 __ bind(allocate_shared);
3155 ExternalAddress top((address)Universe::heap()->top_addr());
3156 ExternalAddress end((address)Universe::heap()->end_addr());
3158 const Register RtopAddr = rscratch1;
3159 const Register RendAddr = rscratch2;
3161 __ lea(RtopAddr, top);
3162 __ lea(RendAddr, end);
3163 __ movptr(rax, Address(RtopAddr, 0));
3165 // For retries rax gets set by cmpxchgq
3166 Label retry;
3167 __ bind(retry);
3168 __ lea(rbx, Address(rax, rdx, Address::times_1));
3169 __ cmpptr(rbx, Address(RendAddr, 0));
3170 __ jcc(Assembler::above, slow_case);
3172 // Compare rax with the top addr, and if still equal, store the new
3173 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3174 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3175 //
3176 // rax: object begin
3177 // rbx: object end
3178 // rdx: instance size in bytes
3179 if (os::is_MP()) {
3180 __ lock();
3181 }
3182 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3184 // if someone beat us on the allocation, try again, otherwise continue
3185 __ jcc(Assembler::notEqual, retry);
3186 }
3188 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3189 // The object is initialized before the header. If the object size is
3190 // zero, go directly to the header initialization.
3191 __ bind(initialize_object);
3192 __ decrementl(rdx, sizeof(oopDesc));
3193 __ jcc(Assembler::zero, initialize_header);
3195 // Initialize object fields
3196 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3197 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3198 {
3199 Label loop;
3200 __ bind(loop);
3201 __ movq(Address(rax, rdx, Address::times_8,
3202 sizeof(oopDesc) - oopSize),
3203 rcx);
3204 __ decrementl(rdx);
3205 __ jcc(Assembler::notZero, loop);
3206 }
3208 // initialize object header only.
3209 __ bind(initialize_header);
3210 if (UseBiasedLocking) {
3211 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3212 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3213 } else {
3214 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3215 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3216 }
3217 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3218 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3219 __ store_klass(rax, rsi); // store klass last
3221 {
3222 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3223 // Trigger dtrace event for fastpath
3224 __ push(atos); // save the return value
3225 __ call_VM_leaf(
3226 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3227 __ pop(atos); // restore the return value
3229 }
3230 __ jmp(done);
3231 }
3234 // slow case
3235 __ bind(slow_case);
3236 __ get_constant_pool(c_rarg1);
3237 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3238 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3239 __ verify_oop(rax);
3241 // continue
3242 __ bind(done);
3243 }
3245 void TemplateTable::newarray() {
3246 transition(itos, atos);
3247 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3248 __ movl(c_rarg2, rax);
3249 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3250 c_rarg1, c_rarg2);
3251 }
3253 void TemplateTable::anewarray() {
3254 transition(itos, atos);
3255 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3256 __ get_constant_pool(c_rarg1);
3257 __ movl(c_rarg3, rax);
3258 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3259 c_rarg1, c_rarg2, c_rarg3);
3260 }
3262 void TemplateTable::arraylength() {
3263 transition(atos, itos);
3264 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3265 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3266 }
3268 void TemplateTable::checkcast() {
3269 transition(atos, atos);
3270 Label done, is_null, ok_is_subtype, quicked, resolved;
3271 __ testptr(rax, rax); // object is in rax
3272 __ jcc(Assembler::zero, is_null);
3274 // Get cpool & tags index
3275 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3276 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3277 // See if bytecode has already been quicked
3278 __ cmpb(Address(rdx, rbx,
3279 Address::times_1,
3280 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3281 JVM_CONSTANT_Class);
3282 __ jcc(Assembler::equal, quicked);
3283 __ push(atos); // save receiver for result, and for GC
3284 __ mov(r12, rcx); // save rcx XXX
3285 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3286 __ movq(rcx, r12); // restore rcx XXX
3287 __ reinit_heapbase();
3288 __ pop_ptr(rdx); // restore receiver
3289 __ jmpb(resolved);
3291 // Get superklass in rax and subklass in rbx
3292 __ bind(quicked);
3293 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3294 __ movptr(rax, Address(rcx, rbx,
3295 Address::times_8, sizeof(constantPoolOopDesc)));
3297 __ bind(resolved);
3298 __ load_klass(rbx, rdx);
3300 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3301 // Superklass in rax. Subklass in rbx.
3302 __ gen_subtype_check(rbx, ok_is_subtype);
3304 // Come here on failure
3305 __ push_ptr(rdx);
3306 // object is at TOS
3307 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3309 // Come here on success
3310 __ bind(ok_is_subtype);
3311 __ mov(rax, rdx); // Restore object in rdx
3313 // Collect counts on whether this check-cast sees NULLs a lot or not.
3314 if (ProfileInterpreter) {
3315 __ jmp(done);
3316 __ bind(is_null);
3317 __ profile_null_seen(rcx);
3318 } else {
3319 __ bind(is_null); // same as 'done'
3320 }
3321 __ bind(done);
3322 }
3324 void TemplateTable::instanceof() {
3325 transition(atos, itos);
3326 Label done, is_null, ok_is_subtype, quicked, resolved;
3327 __ testptr(rax, rax);
3328 __ jcc(Assembler::zero, is_null);
3330 // Get cpool & tags index
3331 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3332 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3333 // See if bytecode has already been quicked
3334 __ cmpb(Address(rdx, rbx,
3335 Address::times_1,
3336 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3337 JVM_CONSTANT_Class);
3338 __ jcc(Assembler::equal, quicked);
3340 __ push(atos); // save receiver for result, and for GC
3341 __ mov(r12, rcx); // save rcx
3342 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3343 __ movq(rcx, r12); // restore rcx
3344 __ reinit_heapbase();
3345 __ pop_ptr(rdx); // restore receiver
3346 __ load_klass(rdx, rdx);
3347 __ jmpb(resolved);
3349 // Get superklass in rax and subklass in rdx
3350 __ bind(quicked);
3351 __ load_klass(rdx, rax);
3352 __ movptr(rax, Address(rcx, rbx,
3353 Address::times_8, sizeof(constantPoolOopDesc)));
3355 __ bind(resolved);
3357 // Generate subtype check. Blows rcx, rdi
3358 // Superklass in rax. Subklass in rdx.
3359 __ gen_subtype_check(rdx, ok_is_subtype);
3361 // Come here on failure
3362 __ xorl(rax, rax);
3363 __ jmpb(done);
3364 // Come here on success
3365 __ bind(ok_is_subtype);
3366 __ movl(rax, 1);
3368 // Collect counts on whether this test sees NULLs a lot or not.
3369 if (ProfileInterpreter) {
3370 __ jmp(done);
3371 __ bind(is_null);
3372 __ profile_null_seen(rcx);
3373 } else {
3374 __ bind(is_null); // same as 'done'
3375 }
3376 __ bind(done);
3377 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3378 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3379 }
3381 //-----------------------------------------------------------------------------
3382 // Breakpoints
3383 void TemplateTable::_breakpoint() {
3384 // Note: We get here even if we are single stepping..
3385 // jbug inists on setting breakpoints at every bytecode
3386 // even if we are in single step mode.
3388 transition(vtos, vtos);
3390 // get the unpatched byte code
3391 __ get_method(c_rarg1);
3392 __ call_VM(noreg,
3393 CAST_FROM_FN_PTR(address,
3394 InterpreterRuntime::get_original_bytecode_at),
3395 c_rarg1, r13);
3396 __ mov(rbx, rax);
3398 // post the breakpoint event
3399 __ get_method(c_rarg1);
3400 __ call_VM(noreg,
3401 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3402 c_rarg1, r13);
3404 // complete the execution of original bytecode
3405 __ dispatch_only_normal(vtos);
3406 }
3408 //-----------------------------------------------------------------------------
3409 // Exceptions
3411 void TemplateTable::athrow() {
3412 transition(atos, vtos);
3413 __ null_check(rax);
3414 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3415 }
3417 //-----------------------------------------------------------------------------
3418 // Synchronization
3419 //
3420 // Note: monitorenter & exit are symmetric routines; which is reflected
3421 // in the assembly code structure as well
3422 //
3423 // Stack layout:
3424 //
3425 // [expressions ] <--- rsp = expression stack top
3426 // ..
3427 // [expressions ]
3428 // [monitor entry] <--- monitor block top = expression stack bot
3429 // ..
3430 // [monitor entry]
3431 // [frame data ] <--- monitor block bot
3432 // ...
3433 // [saved rbp ] <--- rbp
3434 void TemplateTable::monitorenter() {
3435 transition(atos, vtos);
3437 // check for NULL object
3438 __ null_check(rax);
3440 const Address monitor_block_top(
3441 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3442 const Address monitor_block_bot(
3443 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3444 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3446 Label allocated;
3448 // initialize entry pointer
3449 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3451 // find a free slot in the monitor block (result in c_rarg1)
3452 {
3453 Label entry, loop, exit;
3454 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3455 // starting with top-most entry
3456 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3457 // of monitor block
3458 __ jmpb(entry);
3460 __ bind(loop);
3461 // check if current entry is used
3462 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3463 // if not used then remember entry in c_rarg1
3464 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3465 // check if current entry is for same object
3466 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3467 // if same object then stop searching
3468 __ jccb(Assembler::equal, exit);
3469 // otherwise advance to next entry
3470 __ addptr(c_rarg3, entry_size);
3471 __ bind(entry);
3472 // check if bottom reached
3473 __ cmpptr(c_rarg3, c_rarg2);
3474 // if not at bottom then check this entry
3475 __ jcc(Assembler::notEqual, loop);
3476 __ bind(exit);
3477 }
3479 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3480 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3482 // allocate one if there's no free slot
3483 {
3484 Label entry, loop;
3485 // 1. compute new pointers // rsp: old expression stack top
3486 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3487 __ subptr(rsp, entry_size); // move expression stack top
3488 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3489 __ mov(c_rarg3, rsp); // set start value for copy loop
3490 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3491 __ jmp(entry);
3492 // 2. move expression stack contents
3493 __ bind(loop);
3494 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3495 // word from old location
3496 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3497 __ addptr(c_rarg3, wordSize); // advance to next word
3498 __ bind(entry);
3499 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3500 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3501 // copy next word
3502 }
3504 // call run-time routine
3505 // c_rarg1: points to monitor entry
3506 __ bind(allocated);
3508 // Increment bcp to point to the next bytecode, so exception
3509 // handling for async. exceptions work correctly.
3510 // The object has already been poped from the stack, so the
3511 // expression stack looks correct.
3512 __ increment(r13);
3514 // store object
3515 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3516 __ lock_object(c_rarg1);
3518 // check to make sure this monitor doesn't cause stack overflow after locking
3519 __ save_bcp(); // in case of exception
3520 __ generate_stack_overflow_check(0);
3522 // The bcp has already been incremented. Just need to dispatch to
3523 // next instruction.
3524 __ dispatch_next(vtos);
3525 }
3528 void TemplateTable::monitorexit() {
3529 transition(atos, vtos);
3531 // check for NULL object
3532 __ null_check(rax);
3534 const Address monitor_block_top(
3535 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3536 const Address monitor_block_bot(
3537 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3538 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3540 Label found;
3542 // find matching slot
3543 {
3544 Label entry, loop;
3545 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3546 // starting with top-most entry
3547 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3548 // of monitor block
3549 __ jmpb(entry);
3551 __ bind(loop);
3552 // check if current entry is for same object
3553 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3554 // if same object then stop searching
3555 __ jcc(Assembler::equal, found);
3556 // otherwise advance to next entry
3557 __ addptr(c_rarg1, entry_size);
3558 __ bind(entry);
3559 // check if bottom reached
3560 __ cmpptr(c_rarg1, c_rarg2);
3561 // if not at bottom then check this entry
3562 __ jcc(Assembler::notEqual, loop);
3563 }
3565 // error handling. Unlocking was not block-structured
3566 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3567 InterpreterRuntime::throw_illegal_monitor_state_exception));
3568 __ should_not_reach_here();
3570 // call run-time routine
3571 // rsi: points to monitor entry
3572 __ bind(found);
3573 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3574 __ unlock_object(c_rarg1);
3575 __ pop_ptr(rax); // discard object
3576 }
3579 // Wide instructions
3580 void TemplateTable::wide() {
3581 transition(vtos, vtos);
3582 __ load_unsigned_byte(rbx, at_bcp(1));
3583 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3584 __ jmp(Address(rscratch1, rbx, Address::times_8));
3585 // Note: the r13 increment step is part of the individual wide
3586 // bytecode implementations
3587 }
3590 // Multi arrays
3591 void TemplateTable::multianewarray() {
3592 transition(vtos, atos);
3593 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3594 // last dim is on top of stack; we want address of first one:
3595 // first_addr = last_addr + (ndims - 1) * wordSize
3596 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3597 call_VM(rax,
3598 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3599 c_rarg1);
3600 __ load_unsigned_byte(rbx, at_bcp(3));
3601 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3602 }
3603 #endif // !CC_INTERP