Fri, 29 Sep 2017 14:30:05 -0400
8174962: Better interface invocations
Reviewed-by: jrose, coleenp, ahgross, acorn, vlivanov
1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
40 #ifndef CC_INTERP
41 #define __ _masm->
43 //----------------------------------------------------------------------------------------------------
44 // Platform-dependent initialization
46 void TemplateTable::pd_initialize() {
47 // No i486 specific initialization
48 }
50 //----------------------------------------------------------------------------------------------------
51 // Address computation
53 // local variables
54 static inline Address iaddress(int n) {
55 return Address(rdi, Interpreter::local_offset_in_bytes(n));
56 }
58 static inline Address laddress(int n) { return iaddress(n + 1); }
59 static inline Address haddress(int n) { return iaddress(n + 0); }
60 static inline Address faddress(int n) { return iaddress(n); }
61 static inline Address daddress(int n) { return laddress(n); }
62 static inline Address aaddress(int n) { return iaddress(n); }
64 static inline Address iaddress(Register r) {
65 return Address(rdi, r, Interpreter::stackElementScale());
66 }
67 static inline Address laddress(Register r) {
68 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
69 }
70 static inline Address haddress(Register r) {
71 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
72 }
74 static inline Address faddress(Register r) { return iaddress(r); }
75 static inline Address daddress(Register r) { return laddress(r); }
76 static inline Address aaddress(Register r) { return iaddress(r); }
78 // expression stack
79 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
80 // data beyond the rsp which is potentially unsafe in an MT environment;
81 // an interrupt may overwrite that data.)
82 static inline Address at_rsp () {
83 return Address(rsp, 0);
84 }
86 // At top of Java expression stack which may be different than rsp(). It
87 // isn't for category 1 objects.
88 static inline Address at_tos () {
89 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
90 return tos;
91 }
93 static inline Address at_tos_p1() {
94 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
95 }
97 static inline Address at_tos_p2() {
98 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
99 }
101 // Condition conversion
102 static Assembler::Condition j_not(TemplateTable::Condition cc) {
103 switch (cc) {
104 case TemplateTable::equal : return Assembler::notEqual;
105 case TemplateTable::not_equal : return Assembler::equal;
106 case TemplateTable::less : return Assembler::greaterEqual;
107 case TemplateTable::less_equal : return Assembler::greater;
108 case TemplateTable::greater : return Assembler::lessEqual;
109 case TemplateTable::greater_equal: return Assembler::less;
110 }
111 ShouldNotReachHere();
112 return Assembler::zero;
113 }
116 //----------------------------------------------------------------------------------------------------
117 // Miscelaneous helper routines
119 // Store an oop (or NULL) at the address described by obj.
120 // If val == noreg this means store a NULL
122 static void do_oop_store(InterpreterMacroAssembler* _masm,
123 Address obj,
124 Register val,
125 BarrierSet::Name barrier,
126 bool precise) {
127 assert(val == noreg || val == rax, "parameter is just for looks");
128 switch (barrier) {
129 #if INCLUDE_ALL_GCS
130 case BarrierSet::G1SATBCT:
131 case BarrierSet::G1SATBCTLogging:
132 {
133 // flatten object address if needed
134 // We do it regardless of precise because we need the registers
135 if (obj.index() == noreg && obj.disp() == 0) {
136 if (obj.base() != rdx) {
137 __ movl(rdx, obj.base());
138 }
139 } else {
140 __ leal(rdx, obj);
141 }
142 __ get_thread(rcx);
143 __ save_bcp();
144 __ g1_write_barrier_pre(rdx /* obj */,
145 rbx /* pre_val */,
146 rcx /* thread */,
147 rsi /* tmp */,
148 val != noreg /* tosca_live */,
149 false /* expand_call */);
151 // Do the actual store
152 // noreg means NULL
153 if (val == noreg) {
154 __ movptr(Address(rdx, 0), NULL_WORD);
155 // No post barrier for NULL
156 } else {
157 __ movl(Address(rdx, 0), val);
158 __ g1_write_barrier_post(rdx /* store_adr */,
159 val /* new_val */,
160 rcx /* thread */,
161 rbx /* tmp */,
162 rsi /* tmp2 */);
163 }
164 __ restore_bcp();
166 }
167 break;
168 #endif // INCLUDE_ALL_GCS
169 case BarrierSet::CardTableModRef:
170 case BarrierSet::CardTableExtension:
171 {
172 if (val == noreg) {
173 __ movptr(obj, NULL_WORD);
174 } else {
175 __ movl(obj, val);
176 // flatten object address if needed
177 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
178 __ store_check(obj.base());
179 } else {
180 __ leal(rdx, obj);
181 __ store_check(rdx);
182 }
183 }
184 }
185 break;
186 case BarrierSet::ModRef:
187 case BarrierSet::Other:
188 if (val == noreg) {
189 __ movptr(obj, NULL_WORD);
190 } else {
191 __ movl(obj, val);
192 }
193 break;
194 default :
195 ShouldNotReachHere();
197 }
198 }
200 Address TemplateTable::at_bcp(int offset) {
201 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
202 return Address(rsi, offset);
203 }
206 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
207 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
208 int byte_no) {
209 if (!RewriteBytecodes) return;
210 Label L_patch_done;
212 switch (bc) {
213 case Bytecodes::_fast_aputfield:
214 case Bytecodes::_fast_bputfield:
215 case Bytecodes::_fast_zputfield:
216 case Bytecodes::_fast_cputfield:
217 case Bytecodes::_fast_dputfield:
218 case Bytecodes::_fast_fputfield:
219 case Bytecodes::_fast_iputfield:
220 case Bytecodes::_fast_lputfield:
221 case Bytecodes::_fast_sputfield:
222 {
223 // We skip bytecode quickening for putfield instructions when
224 // the put_code written to the constant pool cache is zero.
225 // This is required so that every execution of this instruction
226 // calls out to InterpreterRuntime::resolve_get_put to do
227 // additional, required work.
228 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
229 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
230 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
231 __ movl(bc_reg, bc);
232 __ cmpl(temp_reg, (int) 0);
233 __ jcc(Assembler::zero, L_patch_done); // don't patch
234 }
235 break;
236 default:
237 assert(byte_no == -1, "sanity");
238 // the pair bytecodes have already done the load.
239 if (load_bc_into_bc_reg) {
240 __ movl(bc_reg, bc);
241 }
242 }
244 if (JvmtiExport::can_post_breakpoint()) {
245 Label L_fast_patch;
246 // if a breakpoint is present we can't rewrite the stream directly
247 __ movzbl(temp_reg, at_bcp(0));
248 __ cmpl(temp_reg, Bytecodes::_breakpoint);
249 __ jcc(Assembler::notEqual, L_fast_patch);
250 __ get_method(temp_reg);
251 // Let breakpoint table handling rewrite to quicker bytecode
252 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
253 #ifndef ASSERT
254 __ jmpb(L_patch_done);
255 #else
256 __ jmp(L_patch_done);
257 #endif
258 __ bind(L_fast_patch);
259 }
261 #ifdef ASSERT
262 Label L_okay;
263 __ load_unsigned_byte(temp_reg, at_bcp(0));
264 __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
265 __ jccb(Assembler::equal, L_okay);
266 __ cmpl(temp_reg, bc_reg);
267 __ jcc(Assembler::equal, L_okay);
268 __ stop("patching the wrong bytecode");
269 __ bind(L_okay);
270 #endif
272 // patch bytecode
273 __ movb(at_bcp(0), bc_reg);
274 __ bind(L_patch_done);
275 }
277 //----------------------------------------------------------------------------------------------------
278 // Individual instructions
280 void TemplateTable::nop() {
281 transition(vtos, vtos);
282 // nothing to do
283 }
285 void TemplateTable::shouldnotreachhere() {
286 transition(vtos, vtos);
287 __ stop("shouldnotreachhere bytecode");
288 }
292 void TemplateTable::aconst_null() {
293 transition(vtos, atos);
294 __ xorptr(rax, rax);
295 }
298 void TemplateTable::iconst(int value) {
299 transition(vtos, itos);
300 if (value == 0) {
301 __ xorptr(rax, rax);
302 } else {
303 __ movptr(rax, value);
304 }
305 }
308 void TemplateTable::lconst(int value) {
309 transition(vtos, ltos);
310 if (value == 0) {
311 __ xorptr(rax, rax);
312 } else {
313 __ movptr(rax, value);
314 }
315 assert(value >= 0, "check this code");
316 __ xorptr(rdx, rdx);
317 }
320 void TemplateTable::fconst(int value) {
321 transition(vtos, ftos);
322 if (value == 0) { __ fldz();
323 } else if (value == 1) { __ fld1();
324 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
325 } else { ShouldNotReachHere();
326 }
327 }
330 void TemplateTable::dconst(int value) {
331 transition(vtos, dtos);
332 if (value == 0) { __ fldz();
333 } else if (value == 1) { __ fld1();
334 } else { ShouldNotReachHere();
335 }
336 }
339 void TemplateTable::bipush() {
340 transition(vtos, itos);
341 __ load_signed_byte(rax, at_bcp(1));
342 }
345 void TemplateTable::sipush() {
346 transition(vtos, itos);
347 __ load_unsigned_short(rax, at_bcp(1));
348 __ bswapl(rax);
349 __ sarl(rax, 16);
350 }
352 void TemplateTable::ldc(bool wide) {
353 transition(vtos, vtos);
354 Label call_ldc, notFloat, notClass, Done;
356 if (wide) {
357 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
358 } else {
359 __ load_unsigned_byte(rbx, at_bcp(1));
360 }
361 __ get_cpool_and_tags(rcx, rax);
362 const int base_offset = ConstantPool::header_size() * wordSize;
363 const int tags_offset = Array<u1>::base_offset_in_bytes();
365 // get type
366 __ xorptr(rdx, rdx);
367 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
369 // unresolved class - get the resolved class
370 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
371 __ jccb(Assembler::equal, call_ldc);
373 // unresolved class in error (resolution failed) - call into runtime
374 // so that the same error from first resolution attempt is thrown.
375 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
376 __ jccb(Assembler::equal, call_ldc);
378 // resolved class - need to call vm to get java mirror of the class
379 __ cmpl(rdx, JVM_CONSTANT_Class);
380 __ jcc(Assembler::notEqual, notClass);
382 __ bind(call_ldc);
383 __ movl(rcx, wide);
384 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
385 __ push(atos);
386 __ jmp(Done);
388 __ bind(notClass);
389 __ cmpl(rdx, JVM_CONSTANT_Float);
390 __ jccb(Assembler::notEqual, notFloat);
391 // ftos
392 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
393 __ push(ftos);
394 __ jmp(Done);
396 __ bind(notFloat);
397 #ifdef ASSERT
398 { Label L;
399 __ cmpl(rdx, JVM_CONSTANT_Integer);
400 __ jcc(Assembler::equal, L);
401 // String and Object are rewritten to fast_aldc
402 __ stop("unexpected tag type in ldc");
403 __ bind(L);
404 }
405 #endif
406 // itos JVM_CONSTANT_Integer only
407 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
408 __ push(itos);
409 __ bind(Done);
410 }
412 // Fast path for caching oop constants.
413 void TemplateTable::fast_aldc(bool wide) {
414 transition(vtos, atos);
416 Register result = rax;
417 Register tmp = rdx;
418 int index_size = wide ? sizeof(u2) : sizeof(u1);
420 Label resolved;
422 // We are resolved if the resolved reference cache entry contains a
423 // non-null object (String, MethodType, etc.)
424 assert_different_registers(result, tmp);
425 __ get_cache_index_at_bcp(tmp, 1, index_size);
426 __ load_resolved_reference_at_index(result, tmp);
427 __ testl(result, result);
428 __ jcc(Assembler::notZero, resolved);
430 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
432 // first time invocation - must resolve first
433 __ movl(tmp, (int)bytecode());
434 __ call_VM(result, entry, tmp);
436 __ bind(resolved);
438 if (VerifyOops) {
439 __ verify_oop(result);
440 }
441 }
443 void TemplateTable::ldc2_w() {
444 transition(vtos, vtos);
445 Label Long, Done;
446 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
448 __ get_cpool_and_tags(rcx, rax);
449 const int base_offset = ConstantPool::header_size() * wordSize;
450 const int tags_offset = Array<u1>::base_offset_in_bytes();
452 // get type
453 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
454 __ jccb(Assembler::notEqual, Long);
455 // dtos
456 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
457 __ push(dtos);
458 __ jmpb(Done);
460 __ bind(Long);
461 // ltos
462 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
463 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
465 __ push(ltos);
467 __ bind(Done);
468 }
471 void TemplateTable::locals_index(Register reg, int offset) {
472 __ load_unsigned_byte(reg, at_bcp(offset));
473 __ negptr(reg);
474 }
477 void TemplateTable::iload() {
478 transition(vtos, itos);
479 if (RewriteFrequentPairs) {
480 Label rewrite, done;
482 // get next byte
483 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
484 // if _iload, wait to rewrite to iload2. We only want to rewrite the
485 // last two iloads in a pair. Comparing against fast_iload means that
486 // the next bytecode is neither an iload or a caload, and therefore
487 // an iload pair.
488 __ cmpl(rbx, Bytecodes::_iload);
489 __ jcc(Assembler::equal, done);
491 __ cmpl(rbx, Bytecodes::_fast_iload);
492 __ movl(rcx, Bytecodes::_fast_iload2);
493 __ jccb(Assembler::equal, rewrite);
495 // if _caload, rewrite to fast_icaload
496 __ cmpl(rbx, Bytecodes::_caload);
497 __ movl(rcx, Bytecodes::_fast_icaload);
498 __ jccb(Assembler::equal, rewrite);
500 // rewrite so iload doesn't check again.
501 __ movl(rcx, Bytecodes::_fast_iload);
503 // rewrite
504 // rcx: fast bytecode
505 __ bind(rewrite);
506 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
507 __ bind(done);
508 }
510 // Get the local value into tos
511 locals_index(rbx);
512 __ movl(rax, iaddress(rbx));
513 }
516 void TemplateTable::fast_iload2() {
517 transition(vtos, itos);
518 locals_index(rbx);
519 __ movl(rax, iaddress(rbx));
520 __ push(itos);
521 locals_index(rbx, 3);
522 __ movl(rax, iaddress(rbx));
523 }
525 void TemplateTable::fast_iload() {
526 transition(vtos, itos);
527 locals_index(rbx);
528 __ movl(rax, iaddress(rbx));
529 }
532 void TemplateTable::lload() {
533 transition(vtos, ltos);
534 locals_index(rbx);
535 __ movptr(rax, laddress(rbx));
536 NOT_LP64(__ movl(rdx, haddress(rbx)));
537 }
540 void TemplateTable::fload() {
541 transition(vtos, ftos);
542 locals_index(rbx);
543 __ fld_s(faddress(rbx));
544 }
547 void TemplateTable::dload() {
548 transition(vtos, dtos);
549 locals_index(rbx);
550 __ fld_d(daddress(rbx));
551 }
554 void TemplateTable::aload() {
555 transition(vtos, atos);
556 locals_index(rbx);
557 __ movptr(rax, aaddress(rbx));
558 }
561 void TemplateTable::locals_index_wide(Register reg) {
562 __ load_unsigned_short(reg, at_bcp(2));
563 __ bswapl(reg);
564 __ shrl(reg, 16);
565 __ negptr(reg);
566 }
569 void TemplateTable::wide_iload() {
570 transition(vtos, itos);
571 locals_index_wide(rbx);
572 __ movl(rax, iaddress(rbx));
573 }
576 void TemplateTable::wide_lload() {
577 transition(vtos, ltos);
578 locals_index_wide(rbx);
579 __ movptr(rax, laddress(rbx));
580 NOT_LP64(__ movl(rdx, haddress(rbx)));
581 }
584 void TemplateTable::wide_fload() {
585 transition(vtos, ftos);
586 locals_index_wide(rbx);
587 __ fld_s(faddress(rbx));
588 }
591 void TemplateTable::wide_dload() {
592 transition(vtos, dtos);
593 locals_index_wide(rbx);
594 __ fld_d(daddress(rbx));
595 }
598 void TemplateTable::wide_aload() {
599 transition(vtos, atos);
600 locals_index_wide(rbx);
601 __ movptr(rax, aaddress(rbx));
602 }
604 void TemplateTable::index_check(Register array, Register index) {
605 // Pop ptr into array
606 __ pop_ptr(array);
607 index_check_without_pop(array, index);
608 }
610 void TemplateTable::index_check_without_pop(Register array, Register index) {
611 // destroys rbx,
612 // check array
613 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
614 LP64_ONLY(__ movslq(index, index));
615 // check index
616 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
617 if (index != rbx) {
618 // ??? convention: move aberrant index into rbx, for exception message
619 assert(rbx != array, "different registers");
620 __ mov(rbx, index);
621 }
622 __ jump_cc(Assembler::aboveEqual,
623 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
624 }
627 void TemplateTable::iaload() {
628 transition(itos, itos);
629 // rdx: array
630 index_check(rdx, rax); // kills rbx,
631 // rax,: index
632 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
633 }
636 void TemplateTable::laload() {
637 transition(itos, ltos);
638 // rax,: index
639 // rdx: array
640 index_check(rdx, rax);
641 __ mov(rbx, rax);
642 // rbx,: index
643 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
644 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
645 }
648 void TemplateTable::faload() {
649 transition(itos, ftos);
650 // rdx: array
651 index_check(rdx, rax); // kills rbx,
652 // rax,: index
653 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
654 }
657 void TemplateTable::daload() {
658 transition(itos, dtos);
659 // rdx: array
660 index_check(rdx, rax); // kills rbx,
661 // rax,: index
662 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
663 }
666 void TemplateTable::aaload() {
667 transition(itos, atos);
668 // rdx: array
669 index_check(rdx, rax); // kills rbx,
670 // rax,: index
671 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
672 }
675 void TemplateTable::baload() {
676 transition(itos, itos);
677 // rdx: array
678 index_check(rdx, rax); // kills rbx,
679 // rax,: index
680 // can do better code for P5 - fix this at some point
681 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
682 __ mov(rax, rbx);
683 }
686 void TemplateTable::caload() {
687 transition(itos, itos);
688 // rdx: array
689 index_check(rdx, rax); // kills rbx,
690 // rax,: index
691 // can do better code for P5 - may want to improve this at some point
692 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
693 __ mov(rax, rbx);
694 }
696 // iload followed by caload frequent pair
697 void TemplateTable::fast_icaload() {
698 transition(vtos, itos);
699 // load index out of locals
700 locals_index(rbx);
701 __ movl(rax, iaddress(rbx));
703 // rdx: array
704 index_check(rdx, rax);
705 // rax,: index
706 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
707 __ mov(rax, rbx);
708 }
710 void TemplateTable::saload() {
711 transition(itos, itos);
712 // rdx: array
713 index_check(rdx, rax); // kills rbx,
714 // rax,: index
715 // can do better code for P5 - may want to improve this at some point
716 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
717 __ mov(rax, rbx);
718 }
721 void TemplateTable::iload(int n) {
722 transition(vtos, itos);
723 __ movl(rax, iaddress(n));
724 }
727 void TemplateTable::lload(int n) {
728 transition(vtos, ltos);
729 __ movptr(rax, laddress(n));
730 NOT_LP64(__ movptr(rdx, haddress(n)));
731 }
734 void TemplateTable::fload(int n) {
735 transition(vtos, ftos);
736 __ fld_s(faddress(n));
737 }
740 void TemplateTable::dload(int n) {
741 transition(vtos, dtos);
742 __ fld_d(daddress(n));
743 }
746 void TemplateTable::aload(int n) {
747 transition(vtos, atos);
748 __ movptr(rax, aaddress(n));
749 }
752 void TemplateTable::aload_0() {
753 transition(vtos, atos);
754 // According to bytecode histograms, the pairs:
755 //
756 // _aload_0, _fast_igetfield
757 // _aload_0, _fast_agetfield
758 // _aload_0, _fast_fgetfield
759 //
760 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
761 // bytecode checks if the next bytecode is either _fast_igetfield,
762 // _fast_agetfield or _fast_fgetfield and then rewrites the
763 // current bytecode into a pair bytecode; otherwise it rewrites the current
764 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
765 //
766 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
767 // otherwise we may miss an opportunity for a pair.
768 //
769 // Also rewrite frequent pairs
770 // aload_0, aload_1
771 // aload_0, iload_1
772 // These bytecodes with a small amount of code are most profitable to rewrite
773 if (RewriteFrequentPairs) {
774 Label rewrite, done;
775 // get next byte
776 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
778 // do actual aload_0
779 aload(0);
781 // if _getfield then wait with rewrite
782 __ cmpl(rbx, Bytecodes::_getfield);
783 __ jcc(Assembler::equal, done);
785 // if _igetfield then reqrite to _fast_iaccess_0
786 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
787 __ cmpl(rbx, Bytecodes::_fast_igetfield);
788 __ movl(rcx, Bytecodes::_fast_iaccess_0);
789 __ jccb(Assembler::equal, rewrite);
791 // if _agetfield then reqrite to _fast_aaccess_0
792 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
793 __ cmpl(rbx, Bytecodes::_fast_agetfield);
794 __ movl(rcx, Bytecodes::_fast_aaccess_0);
795 __ jccb(Assembler::equal, rewrite);
797 // if _fgetfield then reqrite to _fast_faccess_0
798 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
799 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
800 __ movl(rcx, Bytecodes::_fast_faccess_0);
801 __ jccb(Assembler::equal, rewrite);
803 // else rewrite to _fast_aload0
804 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
805 __ movl(rcx, Bytecodes::_fast_aload_0);
807 // rewrite
808 // rcx: fast bytecode
809 __ bind(rewrite);
810 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
812 __ bind(done);
813 } else {
814 aload(0);
815 }
816 }
818 void TemplateTable::istore() {
819 transition(itos, vtos);
820 locals_index(rbx);
821 __ movl(iaddress(rbx), rax);
822 }
825 void TemplateTable::lstore() {
826 transition(ltos, vtos);
827 locals_index(rbx);
828 __ movptr(laddress(rbx), rax);
829 NOT_LP64(__ movptr(haddress(rbx), rdx));
830 }
833 void TemplateTable::fstore() {
834 transition(ftos, vtos);
835 locals_index(rbx);
836 __ fstp_s(faddress(rbx));
837 }
840 void TemplateTable::dstore() {
841 transition(dtos, vtos);
842 locals_index(rbx);
843 __ fstp_d(daddress(rbx));
844 }
847 void TemplateTable::astore() {
848 transition(vtos, vtos);
849 __ pop_ptr(rax);
850 locals_index(rbx);
851 __ movptr(aaddress(rbx), rax);
852 }
855 void TemplateTable::wide_istore() {
856 transition(vtos, vtos);
857 __ pop_i(rax);
858 locals_index_wide(rbx);
859 __ movl(iaddress(rbx), rax);
860 }
863 void TemplateTable::wide_lstore() {
864 transition(vtos, vtos);
865 __ pop_l(rax, rdx);
866 locals_index_wide(rbx);
867 __ movptr(laddress(rbx), rax);
868 NOT_LP64(__ movl(haddress(rbx), rdx));
869 }
872 void TemplateTable::wide_fstore() {
873 wide_istore();
874 }
877 void TemplateTable::wide_dstore() {
878 wide_lstore();
879 }
882 void TemplateTable::wide_astore() {
883 transition(vtos, vtos);
884 __ pop_ptr(rax);
885 locals_index_wide(rbx);
886 __ movptr(aaddress(rbx), rax);
887 }
890 void TemplateTable::iastore() {
891 transition(itos, vtos);
892 __ pop_i(rbx);
893 // rax,: value
894 // rdx: array
895 index_check(rdx, rbx); // prefer index in rbx,
896 // rbx,: index
897 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
898 }
901 void TemplateTable::lastore() {
902 transition(ltos, vtos);
903 __ pop_i(rbx);
904 // rax,: low(value)
905 // rcx: array
906 // rdx: high(value)
907 index_check(rcx, rbx); // prefer index in rbx,
908 // rbx,: index
909 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
910 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
911 }
914 void TemplateTable::fastore() {
915 transition(ftos, vtos);
916 __ pop_i(rbx);
917 // rdx: array
918 // st0: value
919 index_check(rdx, rbx); // prefer index in rbx,
920 // rbx,: index
921 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
922 }
925 void TemplateTable::dastore() {
926 transition(dtos, vtos);
927 __ pop_i(rbx);
928 // rdx: array
929 // st0: value
930 index_check(rdx, rbx); // prefer index in rbx,
931 // rbx,: index
932 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
933 }
936 void TemplateTable::aastore() {
937 Label is_null, ok_is_subtype, done;
938 transition(vtos, vtos);
939 // stack: ..., array, index, value
940 __ movptr(rax, at_tos()); // Value
941 __ movl(rcx, at_tos_p1()); // Index
942 __ movptr(rdx, at_tos_p2()); // Array
944 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
945 index_check_without_pop(rdx, rcx); // kills rbx,
946 // do array store check - check for NULL value first
947 __ testptr(rax, rax);
948 __ jcc(Assembler::zero, is_null);
950 // Move subklass into EBX
951 __ load_klass(rbx, rax);
952 // Move superklass into EAX
953 __ load_klass(rax, rdx);
954 __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset()));
955 // Compress array+index*wordSize+12 into a single register. Frees ECX.
956 __ lea(rdx, element_address);
958 // Generate subtype check. Blows ECX. Resets EDI to locals.
959 // Superklass in EAX. Subklass in EBX.
960 __ gen_subtype_check( rbx, ok_is_subtype );
962 // Come here on failure
963 // object is at TOS
964 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
966 // Come here on success
967 __ bind(ok_is_subtype);
969 // Get the value to store
970 __ movptr(rax, at_rsp());
971 // and store it with appropriate barrier
972 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
974 __ jmp(done);
976 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
977 __ bind(is_null);
978 __ profile_null_seen(rbx);
980 // Store NULL, (noreg means NULL to do_oop_store)
981 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
983 // Pop stack arguments
984 __ bind(done);
985 __ addptr(rsp, 3 * Interpreter::stackElementSize);
986 }
989 void TemplateTable::bastore() {
990 transition(itos, vtos);
991 __ pop_i(rbx);
992 // rax: value
993 // rbx: index
994 // rdx: array
995 index_check(rdx, rbx); // prefer index in rbx
996 // Need to check whether array is boolean or byte
997 // since both types share the bastore bytecode.
998 __ load_klass(rcx, rdx);
999 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1000 int diffbit = Klass::layout_helper_boolean_diffbit();
1001 __ testl(rcx, diffbit);
1002 Label L_skip;
1003 __ jccb(Assembler::zero, L_skip);
1004 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1005 __ bind(L_skip);
1006 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
1007 }
1010 void TemplateTable::castore() {
1011 transition(itos, vtos);
1012 __ pop_i(rbx);
1013 // rax,: value
1014 // rdx: array
1015 index_check(rdx, rbx); // prefer index in rbx,
1016 // rbx,: index
1017 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1018 }
1021 void TemplateTable::sastore() {
1022 castore();
1023 }
1026 void TemplateTable::istore(int n) {
1027 transition(itos, vtos);
1028 __ movl(iaddress(n), rax);
1029 }
1032 void TemplateTable::lstore(int n) {
1033 transition(ltos, vtos);
1034 __ movptr(laddress(n), rax);
1035 NOT_LP64(__ movptr(haddress(n), rdx));
1036 }
1039 void TemplateTable::fstore(int n) {
1040 transition(ftos, vtos);
1041 __ fstp_s(faddress(n));
1042 }
1045 void TemplateTable::dstore(int n) {
1046 transition(dtos, vtos);
1047 __ fstp_d(daddress(n));
1048 }
1051 void TemplateTable::astore(int n) {
1052 transition(vtos, vtos);
1053 __ pop_ptr(rax);
1054 __ movptr(aaddress(n), rax);
1055 }
1058 void TemplateTable::pop() {
1059 transition(vtos, vtos);
1060 __ addptr(rsp, Interpreter::stackElementSize);
1061 }
1064 void TemplateTable::pop2() {
1065 transition(vtos, vtos);
1066 __ addptr(rsp, 2*Interpreter::stackElementSize);
1067 }
1070 void TemplateTable::dup() {
1071 transition(vtos, vtos);
1072 // stack: ..., a
1073 __ load_ptr(0, rax);
1074 __ push_ptr(rax);
1075 // stack: ..., a, a
1076 }
1079 void TemplateTable::dup_x1() {
1080 transition(vtos, vtos);
1081 // stack: ..., a, b
1082 __ load_ptr( 0, rax); // load b
1083 __ load_ptr( 1, rcx); // load a
1084 __ store_ptr(1, rax); // store b
1085 __ store_ptr(0, rcx); // store a
1086 __ push_ptr(rax); // push b
1087 // stack: ..., b, a, b
1088 }
1091 void TemplateTable::dup_x2() {
1092 transition(vtos, vtos);
1093 // stack: ..., a, b, c
1094 __ load_ptr( 0, rax); // load c
1095 __ load_ptr( 2, rcx); // load a
1096 __ store_ptr(2, rax); // store c in a
1097 __ push_ptr(rax); // push c
1098 // stack: ..., c, b, c, c
1099 __ load_ptr( 2, rax); // load b
1100 __ store_ptr(2, rcx); // store a in b
1101 // stack: ..., c, a, c, c
1102 __ store_ptr(1, rax); // store b in c
1103 // stack: ..., c, a, b, c
1104 }
1107 void TemplateTable::dup2() {
1108 transition(vtos, vtos);
1109 // stack: ..., a, b
1110 __ load_ptr(1, rax); // load a
1111 __ push_ptr(rax); // push a
1112 __ load_ptr(1, rax); // load b
1113 __ push_ptr(rax); // push b
1114 // stack: ..., a, b, a, b
1115 }
1118 void TemplateTable::dup2_x1() {
1119 transition(vtos, vtos);
1120 // stack: ..., a, b, c
1121 __ load_ptr( 0, rcx); // load c
1122 __ load_ptr( 1, rax); // load b
1123 __ push_ptr(rax); // push b
1124 __ push_ptr(rcx); // push c
1125 // stack: ..., a, b, c, b, c
1126 __ store_ptr(3, rcx); // store c in b
1127 // stack: ..., a, c, c, b, c
1128 __ load_ptr( 4, rcx); // load a
1129 __ store_ptr(2, rcx); // store a in 2nd c
1130 // stack: ..., a, c, a, b, c
1131 __ store_ptr(4, rax); // store b in a
1132 // stack: ..., b, c, a, b, c
1133 // stack: ..., b, c, a, b, c
1134 }
1137 void TemplateTable::dup2_x2() {
1138 transition(vtos, vtos);
1139 // stack: ..., a, b, c, d
1140 __ load_ptr( 0, rcx); // load d
1141 __ load_ptr( 1, rax); // load c
1142 __ push_ptr(rax); // push c
1143 __ push_ptr(rcx); // push d
1144 // stack: ..., a, b, c, d, c, d
1145 __ load_ptr( 4, rax); // load b
1146 __ store_ptr(2, rax); // store b in d
1147 __ store_ptr(4, rcx); // store d in b
1148 // stack: ..., a, d, c, b, c, d
1149 __ load_ptr( 5, rcx); // load a
1150 __ load_ptr( 3, rax); // load c
1151 __ store_ptr(3, rcx); // store a in c
1152 __ store_ptr(5, rax); // store c in a
1153 // stack: ..., c, d, a, b, c, d
1154 // stack: ..., c, d, a, b, c, d
1155 }
1158 void TemplateTable::swap() {
1159 transition(vtos, vtos);
1160 // stack: ..., a, b
1161 __ load_ptr( 1, rcx); // load a
1162 __ load_ptr( 0, rax); // load b
1163 __ store_ptr(0, rcx); // store a in b
1164 __ store_ptr(1, rax); // store b in a
1165 // stack: ..., b, a
1166 }
1169 void TemplateTable::iop2(Operation op) {
1170 transition(itos, itos);
1171 switch (op) {
1172 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1173 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1174 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1175 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1176 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1177 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1178 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1179 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1180 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1181 default : ShouldNotReachHere();
1182 }
1183 }
1186 void TemplateTable::lop2(Operation op) {
1187 transition(ltos, ltos);
1188 __ pop_l(rbx, rcx);
1189 switch (op) {
1190 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1191 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1192 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1193 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1194 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1195 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1196 default : ShouldNotReachHere();
1197 }
1198 }
1201 void TemplateTable::idiv() {
1202 transition(itos, itos);
1203 __ mov(rcx, rax);
1204 __ pop_i(rax);
1205 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1206 // they are not equal, one could do a normal division (no correction
1207 // needed), which may speed up this implementation for the common case.
1208 // (see also JVM spec., p.243 & p.271)
1209 __ corrected_idivl(rcx);
1210 }
1213 void TemplateTable::irem() {
1214 transition(itos, itos);
1215 __ mov(rcx, rax);
1216 __ pop_i(rax);
1217 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1218 // they are not equal, one could do a normal division (no correction
1219 // needed), which may speed up this implementation for the common case.
1220 // (see also JVM spec., p.243 & p.271)
1221 __ corrected_idivl(rcx);
1222 __ mov(rax, rdx);
1223 }
1226 void TemplateTable::lmul() {
1227 transition(ltos, ltos);
1228 __ pop_l(rbx, rcx);
1229 __ push(rcx); __ push(rbx);
1230 __ push(rdx); __ push(rax);
1231 __ lmul(2 * wordSize, 0);
1232 __ addptr(rsp, 4 * wordSize); // take off temporaries
1233 }
1236 void TemplateTable::ldiv() {
1237 transition(ltos, ltos);
1238 __ pop_l(rbx, rcx);
1239 __ push(rcx); __ push(rbx);
1240 __ push(rdx); __ push(rax);
1241 // check if y = 0
1242 __ orl(rax, rdx);
1243 __ jump_cc(Assembler::zero,
1244 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1245 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1246 __ addptr(rsp, 4 * wordSize); // take off temporaries
1247 }
1250 void TemplateTable::lrem() {
1251 transition(ltos, ltos);
1252 __ pop_l(rbx, rcx);
1253 __ push(rcx); __ push(rbx);
1254 __ push(rdx); __ push(rax);
1255 // check if y = 0
1256 __ orl(rax, rdx);
1257 __ jump_cc(Assembler::zero,
1258 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1259 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1260 __ addptr(rsp, 4 * wordSize);
1261 }
1264 void TemplateTable::lshl() {
1265 transition(itos, ltos);
1266 __ movl(rcx, rax); // get shift count
1267 __ pop_l(rax, rdx); // get shift value
1268 __ lshl(rdx, rax);
1269 }
1272 void TemplateTable::lshr() {
1273 transition(itos, ltos);
1274 __ mov(rcx, rax); // get shift count
1275 __ pop_l(rax, rdx); // get shift value
1276 __ lshr(rdx, rax, true);
1277 }
1280 void TemplateTable::lushr() {
1281 transition(itos, ltos);
1282 __ mov(rcx, rax); // get shift count
1283 __ pop_l(rax, rdx); // get shift value
1284 __ lshr(rdx, rax);
1285 }
1288 void TemplateTable::fop2(Operation op) {
1289 transition(ftos, ftos);
1290 switch (op) {
1291 case add: __ fadd_s (at_rsp()); break;
1292 case sub: __ fsubr_s(at_rsp()); break;
1293 case mul: __ fmul_s (at_rsp()); break;
1294 case div: __ fdivr_s(at_rsp()); break;
1295 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1296 default : ShouldNotReachHere();
1297 }
1298 __ f2ieee();
1299 __ pop(rax); // pop float thing off
1300 }
1303 void TemplateTable::dop2(Operation op) {
1304 transition(dtos, dtos);
1306 switch (op) {
1307 case add: __ fadd_d (at_rsp()); break;
1308 case sub: __ fsubr_d(at_rsp()); break;
1309 case mul: {
1310 Label L_strict;
1311 Label L_join;
1312 const Address access_flags (rcx, Method::access_flags_offset());
1313 __ get_method(rcx);
1314 __ movl(rcx, access_flags);
1315 __ testl(rcx, JVM_ACC_STRICT);
1316 __ jccb(Assembler::notZero, L_strict);
1317 __ fmul_d (at_rsp());
1318 __ jmpb(L_join);
1319 __ bind(L_strict);
1320 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1321 __ fmulp();
1322 __ fmul_d (at_rsp());
1323 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1324 __ fmulp();
1325 __ bind(L_join);
1326 break;
1327 }
1328 case div: {
1329 Label L_strict;
1330 Label L_join;
1331 const Address access_flags (rcx, Method::access_flags_offset());
1332 __ get_method(rcx);
1333 __ movl(rcx, access_flags);
1334 __ testl(rcx, JVM_ACC_STRICT);
1335 __ jccb(Assembler::notZero, L_strict);
1336 __ fdivr_d(at_rsp());
1337 __ jmp(L_join);
1338 __ bind(L_strict);
1339 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1340 __ fmul_d (at_rsp());
1341 __ fdivrp();
1342 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1343 __ fmulp();
1344 __ bind(L_join);
1345 break;
1346 }
1347 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1348 default : ShouldNotReachHere();
1349 }
1350 __ d2ieee();
1351 // Pop double precision number from rsp.
1352 __ pop(rax);
1353 __ pop(rdx);
1354 }
1357 void TemplateTable::ineg() {
1358 transition(itos, itos);
1359 __ negl(rax);
1360 }
1363 void TemplateTable::lneg() {
1364 transition(ltos, ltos);
1365 __ lneg(rdx, rax);
1366 }
1369 void TemplateTable::fneg() {
1370 transition(ftos, ftos);
1371 __ fchs();
1372 }
1375 void TemplateTable::dneg() {
1376 transition(dtos, dtos);
1377 __ fchs();
1378 }
1381 void TemplateTable::iinc() {
1382 transition(vtos, vtos);
1383 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1384 locals_index(rbx);
1385 __ addl(iaddress(rbx), rdx);
1386 }
1389 void TemplateTable::wide_iinc() {
1390 transition(vtos, vtos);
1391 __ movl(rdx, at_bcp(4)); // get constant
1392 locals_index_wide(rbx);
1393 __ bswapl(rdx); // swap bytes & sign-extend constant
1394 __ sarl(rdx, 16);
1395 __ addl(iaddress(rbx), rdx);
1396 // Note: should probably use only one movl to get both
1397 // the index and the constant -> fix this
1398 }
1401 void TemplateTable::convert() {
1402 // Checking
1403 #ifdef ASSERT
1404 { TosState tos_in = ilgl;
1405 TosState tos_out = ilgl;
1406 switch (bytecode()) {
1407 case Bytecodes::_i2l: // fall through
1408 case Bytecodes::_i2f: // fall through
1409 case Bytecodes::_i2d: // fall through
1410 case Bytecodes::_i2b: // fall through
1411 case Bytecodes::_i2c: // fall through
1412 case Bytecodes::_i2s: tos_in = itos; break;
1413 case Bytecodes::_l2i: // fall through
1414 case Bytecodes::_l2f: // fall through
1415 case Bytecodes::_l2d: tos_in = ltos; break;
1416 case Bytecodes::_f2i: // fall through
1417 case Bytecodes::_f2l: // fall through
1418 case Bytecodes::_f2d: tos_in = ftos; break;
1419 case Bytecodes::_d2i: // fall through
1420 case Bytecodes::_d2l: // fall through
1421 case Bytecodes::_d2f: tos_in = dtos; break;
1422 default : ShouldNotReachHere();
1423 }
1424 switch (bytecode()) {
1425 case Bytecodes::_l2i: // fall through
1426 case Bytecodes::_f2i: // fall through
1427 case Bytecodes::_d2i: // fall through
1428 case Bytecodes::_i2b: // fall through
1429 case Bytecodes::_i2c: // fall through
1430 case Bytecodes::_i2s: tos_out = itos; break;
1431 case Bytecodes::_i2l: // fall through
1432 case Bytecodes::_f2l: // fall through
1433 case Bytecodes::_d2l: tos_out = ltos; break;
1434 case Bytecodes::_i2f: // fall through
1435 case Bytecodes::_l2f: // fall through
1436 case Bytecodes::_d2f: tos_out = ftos; break;
1437 case Bytecodes::_i2d: // fall through
1438 case Bytecodes::_l2d: // fall through
1439 case Bytecodes::_f2d: tos_out = dtos; break;
1440 default : ShouldNotReachHere();
1441 }
1442 transition(tos_in, tos_out);
1443 }
1444 #endif // ASSERT
1446 // Conversion
1447 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1448 switch (bytecode()) {
1449 case Bytecodes::_i2l:
1450 __ extend_sign(rdx, rax);
1451 break;
1452 case Bytecodes::_i2f:
1453 __ push(rax); // store int on tos
1454 __ fild_s(at_rsp()); // load int to ST0
1455 __ f2ieee(); // truncate to float size
1456 __ pop(rcx); // adjust rsp
1457 break;
1458 case Bytecodes::_i2d:
1459 __ push(rax); // add one slot for d2ieee()
1460 __ push(rax); // store int on tos
1461 __ fild_s(at_rsp()); // load int to ST0
1462 __ d2ieee(); // truncate to double size
1463 __ pop(rcx); // adjust rsp
1464 __ pop(rcx);
1465 break;
1466 case Bytecodes::_i2b:
1467 __ shll(rax, 24); // truncate upper 24 bits
1468 __ sarl(rax, 24); // and sign-extend byte
1469 LP64_ONLY(__ movsbl(rax, rax));
1470 break;
1471 case Bytecodes::_i2c:
1472 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1473 LP64_ONLY(__ movzwl(rax, rax));
1474 break;
1475 case Bytecodes::_i2s:
1476 __ shll(rax, 16); // truncate upper 16 bits
1477 __ sarl(rax, 16); // and sign-extend short
1478 LP64_ONLY(__ movswl(rax, rax));
1479 break;
1480 case Bytecodes::_l2i:
1481 /* nothing to do */
1482 break;
1483 case Bytecodes::_l2f:
1484 __ push(rdx); // store long on tos
1485 __ push(rax);
1486 __ fild_d(at_rsp()); // load long to ST0
1487 __ f2ieee(); // truncate to float size
1488 __ pop(rcx); // adjust rsp
1489 __ pop(rcx);
1490 break;
1491 case Bytecodes::_l2d:
1492 __ push(rdx); // store long on tos
1493 __ push(rax);
1494 __ fild_d(at_rsp()); // load long to ST0
1495 __ d2ieee(); // truncate to double size
1496 __ pop(rcx); // adjust rsp
1497 __ pop(rcx);
1498 break;
1499 case Bytecodes::_f2i:
1500 __ push(rcx); // reserve space for argument
1501 __ fstp_s(at_rsp()); // pass float argument on stack
1502 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1503 break;
1504 case Bytecodes::_f2l:
1505 __ push(rcx); // reserve space for argument
1506 __ fstp_s(at_rsp()); // pass float argument on stack
1507 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1508 break;
1509 case Bytecodes::_f2d:
1510 /* nothing to do */
1511 break;
1512 case Bytecodes::_d2i:
1513 __ push(rcx); // reserve space for argument
1514 __ push(rcx);
1515 __ fstp_d(at_rsp()); // pass double argument on stack
1516 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1517 break;
1518 case Bytecodes::_d2l:
1519 __ push(rcx); // reserve space for argument
1520 __ push(rcx);
1521 __ fstp_d(at_rsp()); // pass double argument on stack
1522 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1523 break;
1524 case Bytecodes::_d2f:
1525 __ push(rcx); // reserve space for f2ieee()
1526 __ f2ieee(); // truncate to float size
1527 __ pop(rcx); // adjust rsp
1528 break;
1529 default :
1530 ShouldNotReachHere();
1531 }
1532 }
1535 void TemplateTable::lcmp() {
1536 transition(ltos, itos);
1537 // y = rdx:rax
1538 __ pop_l(rbx, rcx); // get x = rcx:rbx
1539 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1540 __ mov(rax, rcx);
1541 }
1544 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1545 if (is_float) {
1546 __ fld_s(at_rsp());
1547 } else {
1548 __ fld_d(at_rsp());
1549 __ pop(rdx);
1550 }
1551 __ pop(rcx);
1552 __ fcmp2int(rax, unordered_result < 0);
1553 }
1556 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1557 __ get_method(rcx); // ECX holds method
1558 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1560 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1561 InvocationCounter::counter_offset();
1562 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1563 InvocationCounter::counter_offset();
1565 // Load up EDX with the branch displacement
1566 if (is_wide) {
1567 __ movl(rdx, at_bcp(1));
1568 } else {
1569 __ load_signed_short(rdx, at_bcp(1));
1570 }
1571 __ bswapl(rdx);
1572 if (!is_wide) __ sarl(rdx, 16);
1573 LP64_ONLY(__ movslq(rdx, rdx));
1576 // Handle all the JSR stuff here, then exit.
1577 // It's much shorter and cleaner than intermingling with the
1578 // non-JSR normal-branch stuff occurring below.
1579 if (is_jsr) {
1580 // Pre-load the next target bytecode into EBX
1581 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1583 // compute return address as bci in rax,
1584 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset())));
1585 __ subptr(rax, Address(rcx, Method::const_offset()));
1586 // Adjust the bcp in RSI by the displacement in EDX
1587 __ addptr(rsi, rdx);
1588 // Push return address
1589 __ push_i(rax);
1590 // jsr returns vtos
1591 __ dispatch_only_noverify(vtos);
1592 return;
1593 }
1595 // Normal (non-jsr) branch handling
1597 // Adjust the bcp in RSI by the displacement in EDX
1598 __ addptr(rsi, rdx);
1600 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1601 Label backedge_counter_overflow;
1602 Label profile_method;
1603 Label dispatch;
1604 if (UseLoopCounter) {
1605 // increment backedge counter for backward branches
1606 // rax,: MDO
1607 // rbx,: MDO bumped taken-count
1608 // rcx: method
1609 // rdx: target offset
1610 // rsi: target bcp
1611 // rdi: locals pointer
1612 __ testl(rdx, rdx); // check if forward or backward branch
1613 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1615 // check if MethodCounters exists
1616 Label has_counters;
1617 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1618 __ testptr(rax, rax);
1619 __ jcc(Assembler::notZero, has_counters);
1620 __ push(rdx);
1621 __ push(rcx);
1622 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
1623 rcx);
1624 __ pop(rcx);
1625 __ pop(rdx);
1626 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1627 __ testptr(rax, rax);
1628 __ jcc(Assembler::zero, dispatch);
1629 __ bind(has_counters);
1631 if (TieredCompilation) {
1632 Label no_mdo;
1633 int increment = InvocationCounter::count_increment;
1634 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1635 if (ProfileInterpreter) {
1636 // Are we profiling?
1637 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1638 __ testptr(rbx, rbx);
1639 __ jccb(Assembler::zero, no_mdo);
1640 // Increment the MDO backedge counter
1641 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1642 in_bytes(InvocationCounter::counter_offset()));
1643 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1644 rax, false, Assembler::zero, &backedge_counter_overflow);
1645 __ jmp(dispatch);
1646 }
1647 __ bind(no_mdo);
1648 // Increment backedge counter in MethodCounters*
1649 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1650 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1651 rax, false, Assembler::zero, &backedge_counter_overflow);
1652 } else {
1653 // increment counter
1654 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1655 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1656 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1657 __ movl(Address(rcx, be_offset), rax); // store counter
1659 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1661 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1662 __ addl(rax, Address(rcx, be_offset)); // add both counters
1664 if (ProfileInterpreter) {
1665 // Test to see if we should create a method data oop
1666 __ cmp32(rax,
1667 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1668 __ jcc(Assembler::less, dispatch);
1670 // if no method data exists, go to profile method
1671 __ test_method_data_pointer(rax, profile_method);
1673 if (UseOnStackReplacement) {
1674 // check for overflow against rbx, which is the MDO taken count
1675 __ cmp32(rbx,
1676 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1677 __ jcc(Assembler::below, dispatch);
1679 // When ProfileInterpreter is on, the backedge_count comes from the
1680 // MethodData*, which value does not get reset on the call to
1681 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1682 // routine while the method is being compiled, add a second test to make
1683 // sure the overflow function is called only once every overflow_frequency.
1684 const int overflow_frequency = 1024;
1685 __ andptr(rbx, overflow_frequency-1);
1686 __ jcc(Assembler::zero, backedge_counter_overflow);
1687 }
1688 } else {
1689 if (UseOnStackReplacement) {
1690 // check for overflow against rax, which is the sum of the counters
1691 __ cmp32(rax,
1692 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1693 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1695 }
1696 }
1697 }
1698 __ bind(dispatch);
1699 }
1701 // Pre-load the next target bytecode into EBX
1702 __ load_unsigned_byte(rbx, Address(rsi, 0));
1704 // continue with the bytecode @ target
1705 // rax,: return bci for jsr's, unused otherwise
1706 // rbx,: target bytecode
1707 // rsi: target bcp
1708 __ dispatch_only(vtos);
1710 if (UseLoopCounter) {
1711 if (ProfileInterpreter) {
1712 // Out-of-line code to allocate method data oop.
1713 __ bind(profile_method);
1714 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1715 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1716 __ set_method_data_pointer_for_bcp();
1717 __ jmp(dispatch);
1718 }
1720 if (UseOnStackReplacement) {
1722 // invocation counter overflow
1723 __ bind(backedge_counter_overflow);
1724 __ negptr(rdx);
1725 __ addptr(rdx, rsi); // branch bcp
1726 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1727 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1729 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1730 // rbx,: target bytecode
1731 // rdx: scratch
1732 // rdi: locals pointer
1733 // rsi: bcp
1734 __ testptr(rax, rax); // test result
1735 __ jcc(Assembler::zero, dispatch); // no osr if null
1736 // nmethod may have been invalidated (VM may block upon call_VM return)
1737 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1738 __ cmpl(rcx, InvalidOSREntryBci);
1739 __ jcc(Assembler::equal, dispatch);
1741 // We have the address of an on stack replacement routine in rax,
1742 // We need to prepare to execute the OSR method. First we must
1743 // migrate the locals and monitors off of the stack.
1745 __ mov(rbx, rax); // save the nmethod
1747 const Register thread = rcx;
1748 __ get_thread(thread);
1749 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1750 // rax, is OSR buffer, move it to expected parameter location
1751 __ mov(rcx, rax);
1753 // pop the interpreter frame
1754 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1755 __ leave(); // remove frame anchor
1756 __ pop(rdi); // get return address
1757 __ mov(rsp, rdx); // set sp to sender sp
1759 // Align stack pointer for compiled code (note that caller is
1760 // responsible for undoing this fixup by remembering the old SP
1761 // in an rbp,-relative location)
1762 __ andptr(rsp, -(StackAlignmentInBytes));
1764 // push the (possibly adjusted) return address
1765 __ push(rdi);
1767 // and begin the OSR nmethod
1768 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1769 }
1770 }
1771 }
1774 void TemplateTable::if_0cmp(Condition cc) {
1775 transition(itos, vtos);
1776 // assume branch is more often taken than not (loops use backward branches)
1777 Label not_taken;
1778 __ testl(rax, rax);
1779 __ jcc(j_not(cc), not_taken);
1780 branch(false, false);
1781 __ bind(not_taken);
1782 __ profile_not_taken_branch(rax);
1783 }
1786 void TemplateTable::if_icmp(Condition cc) {
1787 transition(itos, vtos);
1788 // assume branch is more often taken than not (loops use backward branches)
1789 Label not_taken;
1790 __ pop_i(rdx);
1791 __ cmpl(rdx, rax);
1792 __ jcc(j_not(cc), not_taken);
1793 branch(false, false);
1794 __ bind(not_taken);
1795 __ profile_not_taken_branch(rax);
1796 }
1799 void TemplateTable::if_nullcmp(Condition cc) {
1800 transition(atos, vtos);
1801 // assume branch is more often taken than not (loops use backward branches)
1802 Label not_taken;
1803 __ testptr(rax, rax);
1804 __ jcc(j_not(cc), not_taken);
1805 branch(false, false);
1806 __ bind(not_taken);
1807 __ profile_not_taken_branch(rax);
1808 }
1811 void TemplateTable::if_acmp(Condition cc) {
1812 transition(atos, vtos);
1813 // assume branch is more often taken than not (loops use backward branches)
1814 Label not_taken;
1815 __ pop_ptr(rdx);
1816 __ cmpptr(rdx, rax);
1817 __ jcc(j_not(cc), not_taken);
1818 branch(false, false);
1819 __ bind(not_taken);
1820 __ profile_not_taken_branch(rax);
1821 }
1824 void TemplateTable::ret() {
1825 transition(vtos, vtos);
1826 locals_index(rbx);
1827 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1828 __ profile_ret(rbx, rcx);
1829 __ get_method(rax);
1830 __ movptr(rsi, Address(rax, Method::const_offset()));
1831 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1832 ConstMethod::codes_offset()));
1833 __ dispatch_next(vtos);
1834 }
1837 void TemplateTable::wide_ret() {
1838 transition(vtos, vtos);
1839 locals_index_wide(rbx);
1840 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1841 __ profile_ret(rbx, rcx);
1842 __ get_method(rax);
1843 __ movptr(rsi, Address(rax, Method::const_offset()));
1844 __ lea(rsi, Address(rsi, rbx, Address::times_1, ConstMethod::codes_offset()));
1845 __ dispatch_next(vtos);
1846 }
1849 void TemplateTable::tableswitch() {
1850 Label default_case, continue_execution;
1851 transition(itos, vtos);
1852 // align rsi
1853 __ lea(rbx, at_bcp(wordSize));
1854 __ andptr(rbx, -wordSize);
1855 // load lo & hi
1856 __ movl(rcx, Address(rbx, 1 * wordSize));
1857 __ movl(rdx, Address(rbx, 2 * wordSize));
1858 __ bswapl(rcx);
1859 __ bswapl(rdx);
1860 // check against lo & hi
1861 __ cmpl(rax, rcx);
1862 __ jccb(Assembler::less, default_case);
1863 __ cmpl(rax, rdx);
1864 __ jccb(Assembler::greater, default_case);
1865 // lookup dispatch offset
1866 __ subl(rax, rcx);
1867 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1868 __ profile_switch_case(rax, rbx, rcx);
1869 // continue execution
1870 __ bind(continue_execution);
1871 __ bswapl(rdx);
1872 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1873 __ addptr(rsi, rdx);
1874 __ dispatch_only(vtos);
1875 // handle default
1876 __ bind(default_case);
1877 __ profile_switch_default(rax);
1878 __ movl(rdx, Address(rbx, 0));
1879 __ jmp(continue_execution);
1880 }
1883 void TemplateTable::lookupswitch() {
1884 transition(itos, itos);
1885 __ stop("lookupswitch bytecode should have been rewritten");
1886 }
1889 void TemplateTable::fast_linearswitch() {
1890 transition(itos, vtos);
1891 Label loop_entry, loop, found, continue_execution;
1892 // bswapl rax, so we can avoid bswapping the table entries
1893 __ bswapl(rax);
1894 // align rsi
1895 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1896 __ andptr(rbx, -wordSize);
1897 // set counter
1898 __ movl(rcx, Address(rbx, wordSize));
1899 __ bswapl(rcx);
1900 __ jmpb(loop_entry);
1901 // table search
1902 __ bind(loop);
1903 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1904 __ jccb(Assembler::equal, found);
1905 __ bind(loop_entry);
1906 __ decrementl(rcx);
1907 __ jcc(Assembler::greaterEqual, loop);
1908 // default case
1909 __ profile_switch_default(rax);
1910 __ movl(rdx, Address(rbx, 0));
1911 __ jmpb(continue_execution);
1912 // entry found -> get offset
1913 __ bind(found);
1914 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1915 __ profile_switch_case(rcx, rax, rbx);
1916 // continue execution
1917 __ bind(continue_execution);
1918 __ bswapl(rdx);
1919 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1920 __ addptr(rsi, rdx);
1921 __ dispatch_only(vtos);
1922 }
1925 void TemplateTable::fast_binaryswitch() {
1926 transition(itos, vtos);
1927 // Implementation using the following core algorithm:
1928 //
1929 // int binary_search(int key, LookupswitchPair* array, int n) {
1930 // // Binary search according to "Methodik des Programmierens" by
1931 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1932 // int i = 0;
1933 // int j = n;
1934 // while (i+1 < j) {
1935 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1936 // // with Q: for all i: 0 <= i < n: key < a[i]
1937 // // where a stands for the array and assuming that the (inexisting)
1938 // // element a[n] is infinitely big.
1939 // int h = (i + j) >> 1;
1940 // // i < h < j
1941 // if (key < array[h].fast_match()) {
1942 // j = h;
1943 // } else {
1944 // i = h;
1945 // }
1946 // }
1947 // // R: a[i] <= key < a[i+1] or Q
1948 // // (i.e., if key is within array, i is the correct index)
1949 // return i;
1950 // }
1952 // register allocation
1953 const Register key = rax; // already set (tosca)
1954 const Register array = rbx;
1955 const Register i = rcx;
1956 const Register j = rdx;
1957 const Register h = rdi; // needs to be restored
1958 const Register temp = rsi;
1959 // setup array
1960 __ save_bcp();
1962 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1963 __ andptr(array, -wordSize);
1964 // initialize i & j
1965 __ xorl(i, i); // i = 0;
1966 __ movl(j, Address(array, -wordSize)); // j = length(array);
1967 // Convert j into native byteordering
1968 __ bswapl(j);
1969 // and start
1970 Label entry;
1971 __ jmp(entry);
1973 // binary search loop
1974 { Label loop;
1975 __ bind(loop);
1976 // int h = (i + j) >> 1;
1977 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1978 __ sarl(h, 1); // h = (i + j) >> 1;
1979 // if (key < array[h].fast_match()) {
1980 // j = h;
1981 // } else {
1982 // i = h;
1983 // }
1984 // Convert array[h].match to native byte-ordering before compare
1985 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1986 __ bswapl(temp);
1987 __ cmpl(key, temp);
1988 // j = h if (key < array[h].fast_match())
1989 __ cmov32(Assembler::less , j, h);
1990 // i = h if (key >= array[h].fast_match())
1991 __ cmov32(Assembler::greaterEqual, i, h);
1992 // while (i+1 < j)
1993 __ bind(entry);
1994 __ leal(h, Address(i, 1)); // i+1
1995 __ cmpl(h, j); // i+1 < j
1996 __ jcc(Assembler::less, loop);
1997 }
1999 // end of binary search, result index is i (must check again!)
2000 Label default_case;
2001 // Convert array[i].match to native byte-ordering before compare
2002 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
2003 __ bswapl(temp);
2004 __ cmpl(key, temp);
2005 __ jcc(Assembler::notEqual, default_case);
2007 // entry found -> j = offset
2008 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
2009 __ profile_switch_case(i, key, array);
2010 __ bswapl(j);
2011 LP64_ONLY(__ movslq(j, j));
2012 __ restore_bcp();
2013 __ restore_locals(); // restore rdi
2014 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2016 __ addptr(rsi, j);
2017 __ dispatch_only(vtos);
2019 // default case -> j = default offset
2020 __ bind(default_case);
2021 __ profile_switch_default(i);
2022 __ movl(j, Address(array, -2*wordSize));
2023 __ bswapl(j);
2024 LP64_ONLY(__ movslq(j, j));
2025 __ restore_bcp();
2026 __ restore_locals(); // restore rdi
2027 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2028 __ addptr(rsi, j);
2029 __ dispatch_only(vtos);
2030 }
2033 void TemplateTable::_return(TosState state) {
2034 transition(state, state);
2035 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2037 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2038 assert(state == vtos, "only valid state");
2039 __ movptr(rax, aaddress(0));
2040 __ load_klass(rdi, rax);
2041 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2042 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2043 Label skip_register_finalizer;
2044 __ jcc(Assembler::zero, skip_register_finalizer);
2046 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2048 __ bind(skip_register_finalizer);
2049 }
2051 // Narrow result if state is itos but result type is smaller.
2052 // Need to narrow in the return bytecode rather than in generate_return_entry
2053 // since compiled code callers expect the result to already be narrowed.
2054 if (state == itos) {
2055 __ narrow(rax);
2056 }
2057 __ remove_activation(state, rsi);
2059 __ jmp(rsi);
2060 }
2063 // ----------------------------------------------------------------------------
2064 // Volatile variables demand their effects be made known to all CPU's in
2065 // order. Store buffers on most chips allow reads & writes to reorder; the
2066 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2067 // memory barrier (i.e., it's not sufficient that the interpreter does not
2068 // reorder volatile references, the hardware also must not reorder them).
2069 //
2070 // According to the new Java Memory Model (JMM):
2071 // (1) All volatiles are serialized wrt to each other.
2072 // ALSO reads & writes act as aquire & release, so:
2073 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2074 // the read float up to before the read. It's OK for non-volatile memory refs
2075 // that happen before the volatile read to float down below it.
2076 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2077 // that happen BEFORE the write float down to after the write. It's OK for
2078 // non-volatile memory refs that happen after the volatile write to float up
2079 // before it.
2080 //
2081 // We only put in barriers around volatile refs (they are expensive), not
2082 // _between_ memory refs (that would require us to track the flavor of the
2083 // previous memory refs). Requirements (2) and (3) require some barriers
2084 // before volatile stores and after volatile loads. These nearly cover
2085 // requirement (1) but miss the volatile-store-volatile-load case. This final
2086 // case is placed after volatile-stores although it could just as well go
2087 // before volatile-loads.
2088 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2089 // Helper function to insert a is-volatile test and memory barrier
2090 if( !os::is_MP() ) return; // Not needed on single CPU
2091 __ membar(order_constraint);
2092 }
2094 void TemplateTable::resolve_cache_and_index(int byte_no,
2095 Register Rcache,
2096 Register index,
2097 size_t index_size) {
2098 const Register temp = rbx;
2099 assert_different_registers(Rcache, index, temp);
2101 Label resolved;
2102 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2103 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2104 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2105 __ jcc(Assembler::equal, resolved);
2107 // resolve first time through
2108 address entry;
2109 switch (bytecode()) {
2110 case Bytecodes::_getstatic : // fall through
2111 case Bytecodes::_putstatic : // fall through
2112 case Bytecodes::_getfield : // fall through
2113 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2114 case Bytecodes::_invokevirtual : // fall through
2115 case Bytecodes::_invokespecial : // fall through
2116 case Bytecodes::_invokestatic : // fall through
2117 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2118 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2119 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2120 default:
2121 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2122 break;
2123 }
2124 __ movl(temp, (int)bytecode());
2125 __ call_VM(noreg, entry, temp);
2126 // Update registers with resolved info
2127 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2128 __ bind(resolved);
2129 }
2132 // The cache and index registers must be set before call
2133 void TemplateTable::load_field_cp_cache_entry(Register obj,
2134 Register cache,
2135 Register index,
2136 Register off,
2137 Register flags,
2138 bool is_static = false) {
2139 assert_different_registers(cache, index, flags, off);
2141 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2142 // Field offset
2143 __ movptr(off, Address(cache, index, Address::times_ptr,
2144 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2145 // Flags
2146 __ movl(flags, Address(cache, index, Address::times_ptr,
2147 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2149 // klass overwrite register
2150 if (is_static) {
2151 __ movptr(obj, Address(cache, index, Address::times_ptr,
2152 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2153 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2154 __ movptr(obj, Address(obj, mirror_offset));
2155 }
2156 }
2158 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2159 Register method,
2160 Register itable_index,
2161 Register flags,
2162 bool is_invokevirtual,
2163 bool is_invokevfinal, /*unused*/
2164 bool is_invokedynamic) {
2165 // setup registers
2166 const Register cache = rcx;
2167 const Register index = rdx;
2168 assert_different_registers(method, flags);
2169 assert_different_registers(method, cache, index);
2170 assert_different_registers(itable_index, flags);
2171 assert_different_registers(itable_index, cache, index);
2172 // determine constant pool cache field offsets
2173 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2174 const int method_offset = in_bytes(
2175 ConstantPoolCache::base_offset() +
2176 ((byte_no == f2_byte)
2177 ? ConstantPoolCacheEntry::f2_offset()
2178 : ConstantPoolCacheEntry::f1_offset()));
2179 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2180 ConstantPoolCacheEntry::flags_offset());
2181 // access constant pool cache fields
2182 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2183 ConstantPoolCacheEntry::f2_offset());
2185 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2186 resolve_cache_and_index(byte_no, cache, index, index_size);
2187 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2189 if (itable_index != noreg) {
2190 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2191 }
2192 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2193 }
2196 // The registers cache and index expected to be set before call.
2197 // Correct values of the cache and index registers are preserved.
2198 void TemplateTable::jvmti_post_field_access(Register cache,
2199 Register index,
2200 bool is_static,
2201 bool has_tos) {
2202 if (JvmtiExport::can_post_field_access()) {
2203 // Check to see if a field access watch has been set before we take
2204 // the time to call into the VM.
2205 Label L1;
2206 assert_different_registers(cache, index, rax);
2207 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2208 __ testl(rax,rax);
2209 __ jcc(Assembler::zero, L1);
2211 // cache entry pointer
2212 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2213 __ shll(index, LogBytesPerWord);
2214 __ addptr(cache, index);
2215 if (is_static) {
2216 __ xorptr(rax, rax); // NULL object reference
2217 } else {
2218 __ pop(atos); // Get the object
2219 __ verify_oop(rax);
2220 __ push(atos); // Restore stack state
2221 }
2222 // rax,: object pointer or NULL
2223 // cache: cache entry pointer
2224 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2225 rax, cache);
2226 __ get_cache_and_index_at_bcp(cache, index, 1);
2227 __ bind(L1);
2228 }
2229 }
2231 void TemplateTable::pop_and_check_object(Register r) {
2232 __ pop_ptr(r);
2233 __ null_check(r); // for field access must check obj.
2234 __ verify_oop(r);
2235 }
2237 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2238 transition(vtos, vtos);
2240 const Register cache = rcx;
2241 const Register index = rdx;
2242 const Register obj = rcx;
2243 const Register off = rbx;
2244 const Register flags = rax;
2246 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2247 jvmti_post_field_access(cache, index, is_static, false);
2248 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2250 if (!is_static) pop_and_check_object(obj);
2252 const Address lo(obj, off, Address::times_1, 0*wordSize);
2253 const Address hi(obj, off, Address::times_1, 1*wordSize);
2255 Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2257 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2258 assert(btos == 0, "change code, btos != 0");
2259 // btos
2260 __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
2261 __ jcc(Assembler::notZero, notByte);
2263 __ load_signed_byte(rax, lo );
2264 __ push(btos);
2265 // Rewrite bytecode to be faster
2266 if (!is_static) {
2267 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2268 }
2269 __ jmp(Done);
2271 __ bind(notByte);
2273 __ cmpl(flags, ztos);
2274 __ jcc(Assembler::notEqual, notBool);
2276 // ztos (same code as btos)
2277 __ load_signed_byte(rax, lo);
2278 __ push(ztos);
2279 // Rewrite bytecode to be faster
2280 if (!is_static) {
2281 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2282 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2283 }
2284 __ jmp(Done);
2286 __ bind(notBool);
2288 // itos
2289 __ cmpl(flags, itos );
2290 __ jcc(Assembler::notEqual, notInt);
2292 __ movl(rax, lo );
2293 __ push(itos);
2294 // Rewrite bytecode to be faster
2295 if (!is_static) {
2296 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2297 }
2298 __ jmp(Done);
2300 __ bind(notInt);
2301 // atos
2302 __ cmpl(flags, atos );
2303 __ jcc(Assembler::notEqual, notObj);
2305 __ movl(rax, lo );
2306 __ push(atos);
2307 if (!is_static) {
2308 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2309 }
2310 __ jmp(Done);
2312 __ bind(notObj);
2313 // ctos
2314 __ cmpl(flags, ctos );
2315 __ jcc(Assembler::notEqual, notChar);
2317 __ load_unsigned_short(rax, lo );
2318 __ push(ctos);
2319 if (!is_static) {
2320 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2321 }
2322 __ jmp(Done);
2324 __ bind(notChar);
2325 // stos
2326 __ cmpl(flags, stos );
2327 __ jcc(Assembler::notEqual, notShort);
2329 __ load_signed_short(rax, lo );
2330 __ push(stos);
2331 if (!is_static) {
2332 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2333 }
2334 __ jmp(Done);
2336 __ bind(notShort);
2337 // ltos
2338 __ cmpl(flags, ltos );
2339 __ jcc(Assembler::notEqual, notLong);
2341 // Generate code as if volatile. There just aren't enough registers to
2342 // save that information and this code is faster than the test.
2343 __ fild_d(lo); // Must load atomically
2344 __ subptr(rsp,2*wordSize); // Make space for store
2345 __ fistp_d(Address(rsp,0));
2346 __ pop(rax);
2347 __ pop(rdx);
2349 __ push(ltos);
2350 // Don't rewrite to _fast_lgetfield for potential volatile case.
2351 __ jmp(Done);
2353 __ bind(notLong);
2354 // ftos
2355 __ cmpl(flags, ftos );
2356 __ jcc(Assembler::notEqual, notFloat);
2358 __ fld_s(lo);
2359 __ push(ftos);
2360 if (!is_static) {
2361 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2362 }
2363 __ jmp(Done);
2365 __ bind(notFloat);
2366 // dtos
2367 __ cmpl(flags, dtos );
2368 __ jcc(Assembler::notEqual, notDouble);
2370 __ fld_d(lo);
2371 __ push(dtos);
2372 if (!is_static) {
2373 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2374 }
2375 __ jmpb(Done);
2377 __ bind(notDouble);
2379 __ stop("Bad state");
2381 __ bind(Done);
2382 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2383 // volatile_barrier( );
2384 }
2387 void TemplateTable::getfield(int byte_no) {
2388 getfield_or_static(byte_no, false);
2389 }
2392 void TemplateTable::getstatic(int byte_no) {
2393 getfield_or_static(byte_no, true);
2394 }
2396 // The registers cache and index expected to be set before call.
2397 // The function may destroy various registers, just not the cache and index registers.
2398 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2400 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2402 if (JvmtiExport::can_post_field_modification()) {
2403 // Check to see if a field modification watch has been set before we take
2404 // the time to call into the VM.
2405 Label L1;
2406 assert_different_registers(cache, index, rax);
2407 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2408 __ testl(rax, rax);
2409 __ jcc(Assembler::zero, L1);
2411 // The cache and index registers have been already set.
2412 // This allows to eliminate this call but the cache and index
2413 // registers have to be correspondingly used after this line.
2414 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2416 if (is_static) {
2417 // Life is simple. Null out the object pointer.
2418 __ xorptr(rbx, rbx);
2419 } else {
2420 // Life is harder. The stack holds the value on top, followed by the object.
2421 // We don't know the size of the value, though; it could be one or two words
2422 // depending on its type. As a result, we must find the type to determine where
2423 // the object is.
2424 Label two_word, valsize_known;
2425 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2426 ConstantPoolCacheEntry::flags_offset())));
2427 __ mov(rbx, rsp);
2428 __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift);
2429 // Make sure we don't need to mask rcx after the above shift
2430 ConstantPoolCacheEntry::verify_tos_state_shift();
2431 __ cmpl(rcx, ltos);
2432 __ jccb(Assembler::equal, two_word);
2433 __ cmpl(rcx, dtos);
2434 __ jccb(Assembler::equal, two_word);
2435 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2436 __ jmpb(valsize_known);
2438 __ bind(two_word);
2439 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2441 __ bind(valsize_known);
2442 // setup object pointer
2443 __ movptr(rbx, Address(rbx, 0));
2444 }
2445 // cache entry pointer
2446 __ addptr(rax, in_bytes(cp_base_offset));
2447 __ shll(rdx, LogBytesPerWord);
2448 __ addptr(rax, rdx);
2449 // object (tos)
2450 __ mov(rcx, rsp);
2451 // rbx,: object pointer set up above (NULL if static)
2452 // rax,: cache entry pointer
2453 // rcx: jvalue object on the stack
2454 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2455 rbx, rax, rcx);
2456 __ get_cache_and_index_at_bcp(cache, index, 1);
2457 __ bind(L1);
2458 }
2459 }
2462 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2463 transition(vtos, vtos);
2465 const Register cache = rcx;
2466 const Register index = rdx;
2467 const Register obj = rcx;
2468 const Register off = rbx;
2469 const Register flags = rax;
2471 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2472 jvmti_post_field_mod(cache, index, is_static);
2473 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2475 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2476 // volatile_barrier( );
2478 Label notVolatile, Done;
2479 __ movl(rdx, flags);
2480 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2481 __ andl(rdx, 0x1);
2483 // field addresses
2484 const Address lo(obj, off, Address::times_1, 0*wordSize);
2485 const Address hi(obj, off, Address::times_1, 1*wordSize);
2487 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2489 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2490 assert(btos == 0, "change code, btos != 0");
2491 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2492 __ jcc(Assembler::notZero, notByte);
2494 // btos
2495 {
2496 __ pop(btos);
2497 if (!is_static) pop_and_check_object(obj);
2498 __ movb(lo, rax);
2499 if (!is_static) {
2500 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2501 }
2502 __ jmp(Done);
2503 }
2505 __ bind(notByte);
2506 __ cmpl(flags, ztos);
2507 __ jcc(Assembler::notEqual, notBool);
2509 // ztos
2510 {
2511 __ pop(ztos);
2512 if (!is_static) pop_and_check_object(obj);
2513 __ andl(rax, 0x1);
2514 __ movb(lo, rax);
2515 if (!is_static) {
2516 patch_bytecode(Bytecodes::_fast_zputfield, rcx, rbx, true, byte_no);
2517 }
2518 __ jmp(Done);
2519 }
2521 __ bind(notBool);
2522 __ cmpl(flags, itos);
2523 __ jcc(Assembler::notEqual, notInt);
2525 // itos
2526 {
2527 __ pop(itos);
2528 if (!is_static) pop_and_check_object(obj);
2529 __ movl(lo, rax);
2530 if (!is_static) {
2531 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2532 }
2533 __ jmp(Done);
2534 }
2536 __ bind(notInt);
2537 __ cmpl(flags, atos);
2538 __ jcc(Assembler::notEqual, notObj);
2540 // atos
2541 {
2542 __ pop(atos);
2543 if (!is_static) pop_and_check_object(obj);
2544 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2545 if (!is_static) {
2546 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2547 }
2548 __ jmp(Done);
2549 }
2551 __ bind(notObj);
2552 __ cmpl(flags, ctos);
2553 __ jcc(Assembler::notEqual, notChar);
2555 // ctos
2556 {
2557 __ pop(ctos);
2558 if (!is_static) pop_and_check_object(obj);
2559 __ movw(lo, rax);
2560 if (!is_static) {
2561 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2562 }
2563 __ jmp(Done);
2564 }
2566 __ bind(notChar);
2567 __ cmpl(flags, stos);
2568 __ jcc(Assembler::notEqual, notShort);
2570 // stos
2571 {
2572 __ pop(stos);
2573 if (!is_static) pop_and_check_object(obj);
2574 __ movw(lo, rax);
2575 if (!is_static) {
2576 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2577 }
2578 __ jmp(Done);
2579 }
2581 __ bind(notShort);
2582 __ cmpl(flags, ltos);
2583 __ jcc(Assembler::notEqual, notLong);
2585 // ltos
2586 {
2587 Label notVolatileLong;
2588 __ testl(rdx, rdx);
2589 __ jcc(Assembler::zero, notVolatileLong);
2591 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2592 if (!is_static) pop_and_check_object(obj);
2594 // Replace with real volatile test
2595 __ push(rdx);
2596 __ push(rax); // Must update atomically with FIST
2597 __ fild_d(Address(rsp,0)); // So load into FPU register
2598 __ fistp_d(lo); // and put into memory atomically
2599 __ addptr(rsp, 2*wordSize);
2600 // volatile_barrier();
2601 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2602 Assembler::StoreStore));
2603 // Don't rewrite volatile version
2604 __ jmp(notVolatile);
2606 __ bind(notVolatileLong);
2608 __ pop(ltos); // overwrites rdx
2609 if (!is_static) pop_and_check_object(obj);
2610 NOT_LP64(__ movptr(hi, rdx));
2611 __ movptr(lo, rax);
2612 if (!is_static) {
2613 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2614 }
2615 __ jmp(notVolatile);
2616 }
2618 __ bind(notLong);
2619 __ cmpl(flags, ftos);
2620 __ jcc(Assembler::notEqual, notFloat);
2622 // ftos
2623 {
2624 __ pop(ftos);
2625 if (!is_static) pop_and_check_object(obj);
2626 __ fstp_s(lo);
2627 if (!is_static) {
2628 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2629 }
2630 __ jmp(Done);
2631 }
2633 __ bind(notFloat);
2634 #ifdef ASSERT
2635 __ cmpl(flags, dtos);
2636 __ jcc(Assembler::notEqual, notDouble);
2637 #endif
2639 // dtos
2640 {
2641 __ pop(dtos);
2642 if (!is_static) pop_and_check_object(obj);
2643 __ fstp_d(lo);
2644 if (!is_static) {
2645 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2646 }
2647 __ jmp(Done);
2648 }
2650 #ifdef ASSERT
2651 __ bind(notDouble);
2652 __ stop("Bad state");
2653 #endif
2655 __ bind(Done);
2657 // Check for volatile store
2658 __ testl(rdx, rdx);
2659 __ jcc(Assembler::zero, notVolatile);
2660 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2661 Assembler::StoreStore));
2662 __ bind(notVolatile);
2663 }
2666 void TemplateTable::putfield(int byte_no) {
2667 putfield_or_static(byte_no, false);
2668 }
2671 void TemplateTable::putstatic(int byte_no) {
2672 putfield_or_static(byte_no, true);
2673 }
2675 void TemplateTable::jvmti_post_fast_field_mod() {
2676 if (JvmtiExport::can_post_field_modification()) {
2677 // Check to see if a field modification watch has been set before we take
2678 // the time to call into the VM.
2679 Label L2;
2680 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2681 __ testl(rcx,rcx);
2682 __ jcc(Assembler::zero, L2);
2683 __ pop_ptr(rbx); // copy the object pointer from tos
2684 __ verify_oop(rbx);
2685 __ push_ptr(rbx); // put the object pointer back on tos
2687 // Save tos values before call_VM() clobbers them. Since we have
2688 // to do it for every data type, we use the saved values as the
2689 // jvalue object.
2690 switch (bytecode()) { // load values into the jvalue object
2691 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2692 case Bytecodes::_fast_bputfield: // fall through
2693 case Bytecodes::_fast_zputfield: // fall through
2694 case Bytecodes::_fast_sputfield: // fall through
2695 case Bytecodes::_fast_cputfield: // fall through
2696 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2697 case Bytecodes::_fast_dputfield: __ push_d(); break;
2698 case Bytecodes::_fast_fputfield: __ push_f(); break;
2699 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2701 default:
2702 ShouldNotReachHere();
2703 }
2704 __ mov(rcx, rsp); // points to jvalue on the stack
2705 // access constant pool cache entry
2706 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2707 __ verify_oop(rbx);
2708 // rbx,: object pointer copied above
2709 // rax,: cache entry pointer
2710 // rcx: jvalue object on the stack
2711 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2713 switch (bytecode()) { // restore tos values
2714 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2715 case Bytecodes::_fast_bputfield: // fall through
2716 case Bytecodes::_fast_zputfield: // fall through
2717 case Bytecodes::_fast_sputfield: // fall through
2718 case Bytecodes::_fast_cputfield: // fall through
2719 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2720 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2721 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2722 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2723 }
2724 __ bind(L2);
2725 }
2726 }
2728 void TemplateTable::fast_storefield(TosState state) {
2729 transition(state, vtos);
2731 ByteSize base = ConstantPoolCache::base_offset();
2733 jvmti_post_fast_field_mod();
2735 // access constant pool cache
2736 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2738 // test for volatile with rdx but rdx is tos register for lputfield.
2739 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2740 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2741 ConstantPoolCacheEntry::flags_offset())));
2743 // replace index with field offset from cache entry
2744 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2746 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2747 // volatile_barrier( );
2749 Label notVolatile, Done;
2750 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2751 __ andl(rdx, 0x1);
2752 // Check for volatile store
2753 __ testl(rdx, rdx);
2754 __ jcc(Assembler::zero, notVolatile);
2756 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2758 // Get object from stack
2759 pop_and_check_object(rcx);
2761 // field addresses
2762 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2763 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2765 // access field
2766 switch (bytecode()) {
2767 case Bytecodes::_fast_zputfield: __ andl(rax, 0x1); // boolean is true if LSB is 1
2768 // fall through to bputfield
2769 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2770 case Bytecodes::_fast_sputfield: // fall through
2771 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2772 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2773 case Bytecodes::_fast_lputfield:
2774 NOT_LP64(__ movptr(hi, rdx));
2775 __ movptr(lo, rax);
2776 break;
2777 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2778 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2779 case Bytecodes::_fast_aputfield: {
2780 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2781 break;
2782 }
2783 default:
2784 ShouldNotReachHere();
2785 }
2787 Label done;
2788 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2789 Assembler::StoreStore));
2790 // Barriers are so large that short branch doesn't reach!
2791 __ jmp(done);
2793 // Same code as above, but don't need rdx to test for volatile.
2794 __ bind(notVolatile);
2796 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2798 // Get object from stack
2799 pop_and_check_object(rcx);
2801 // access field
2802 switch (bytecode()) {
2803 case Bytecodes::_fast_zputfield: __ andl(rax, 0x1); // boolean is true if LSB is 1
2804 // fall through to bputfield
2805 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2806 case Bytecodes::_fast_sputfield: // fall through
2807 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2808 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2809 case Bytecodes::_fast_lputfield:
2810 NOT_LP64(__ movptr(hi, rdx));
2811 __ movptr(lo, rax);
2812 break;
2813 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2814 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2815 case Bytecodes::_fast_aputfield: {
2816 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2817 break;
2818 }
2819 default:
2820 ShouldNotReachHere();
2821 }
2822 __ bind(done);
2823 }
2826 void TemplateTable::fast_accessfield(TosState state) {
2827 transition(atos, state);
2829 // do the JVMTI work here to avoid disturbing the register state below
2830 if (JvmtiExport::can_post_field_access()) {
2831 // Check to see if a field access watch has been set before we take
2832 // the time to call into the VM.
2833 Label L1;
2834 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2835 __ testl(rcx,rcx);
2836 __ jcc(Assembler::zero, L1);
2837 // access constant pool cache entry
2838 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2839 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2840 __ verify_oop(rax);
2841 // rax,: object pointer copied above
2842 // rcx: cache entry pointer
2843 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2844 __ pop_ptr(rax); // restore object pointer
2845 __ bind(L1);
2846 }
2848 // access constant pool cache
2849 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2850 // replace index with field offset from cache entry
2851 __ movptr(rbx, Address(rcx,
2852 rbx,
2853 Address::times_ptr,
2854 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2857 // rax,: object
2858 __ verify_oop(rax);
2859 __ null_check(rax);
2860 // field addresses
2861 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2862 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2864 // access field
2865 switch (bytecode()) {
2866 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2867 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2868 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2869 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2870 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2871 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2872 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2873 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2874 default:
2875 ShouldNotReachHere();
2876 }
2878 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2879 // volatile_barrier( );
2880 }
2882 void TemplateTable::fast_xaccess(TosState state) {
2883 transition(vtos, state);
2884 // get receiver
2885 __ movptr(rax, aaddress(0));
2886 // access constant pool cache
2887 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2888 __ movptr(rbx, Address(rcx,
2889 rdx,
2890 Address::times_ptr,
2891 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2892 // make sure exception is reported in correct bcp range (getfield is next instruction)
2893 __ increment(rsi);
2894 __ null_check(rax);
2895 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2896 if (state == itos) {
2897 __ movl(rax, lo);
2898 } else if (state == atos) {
2899 __ movptr(rax, lo);
2900 __ verify_oop(rax);
2901 } else if (state == ftos) {
2902 __ fld_s(lo);
2903 } else {
2904 ShouldNotReachHere();
2905 }
2906 __ decrement(rsi);
2907 }
2911 //----------------------------------------------------------------------------------------------------
2912 // Calls
2914 void TemplateTable::count_calls(Register method, Register temp) {
2915 // implemented elsewhere
2916 ShouldNotReachHere();
2917 }
2920 void TemplateTable::prepare_invoke(int byte_no,
2921 Register method, // linked method (or i-klass)
2922 Register index, // itable index, MethodType, etc.
2923 Register recv, // if caller wants to see it
2924 Register flags // if caller wants to test it
2925 ) {
2926 // determine flags
2927 const Bytecodes::Code code = bytecode();
2928 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2929 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2930 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2931 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2932 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2933 const bool load_receiver = (recv != noreg);
2934 const bool save_flags = (flags != noreg);
2935 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2936 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2937 assert(flags == noreg || flags == rdx, "");
2938 assert(recv == noreg || recv == rcx, "");
2940 // setup registers & access constant pool cache
2941 if (recv == noreg) recv = rcx;
2942 if (flags == noreg) flags = rdx;
2943 assert_different_registers(method, index, recv, flags);
2945 // save 'interpreter return address'
2946 __ save_bcp();
2948 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2950 // maybe push appendix to arguments (just before return address)
2951 if (is_invokedynamic || is_invokehandle) {
2952 Label L_no_push;
2953 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2954 __ jccb(Assembler::zero, L_no_push);
2955 // Push the appendix as a trailing parameter.
2956 // This must be done before we get the receiver,
2957 // since the parameter_size includes it.
2958 __ push(rbx);
2959 __ mov(rbx, index);
2960 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2961 __ load_resolved_reference_at_index(index, rbx);
2962 __ pop(rbx);
2963 __ push(index); // push appendix (MethodType, CallSite, etc.)
2964 __ bind(L_no_push);
2965 }
2967 // load receiver if needed (note: no return address pushed yet)
2968 if (load_receiver) {
2969 __ movl(recv, flags);
2970 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2971 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2972 const int receiver_is_at_end = -1; // back off one slot to get receiver
2973 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2974 __ movptr(recv, recv_addr);
2975 __ verify_oop(recv);
2976 }
2978 if (save_flags) {
2979 __ mov(rsi, flags);
2980 }
2982 // compute return type
2983 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2984 // Make sure we don't need to mask flags after the above shift
2985 ConstantPoolCacheEntry::verify_tos_state_shift();
2986 // load return address
2987 {
2988 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2989 ExternalAddress table(table_addr);
2990 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2991 }
2993 // push return address
2994 __ push(flags);
2996 // Restore flags value from the constant pool cache, and restore rsi
2997 // for later null checks. rsi is the bytecode pointer
2998 if (save_flags) {
2999 __ mov(flags, rsi);
3000 __ restore_bcp();
3001 }
3002 }
3005 void TemplateTable::invokevirtual_helper(Register index,
3006 Register recv,
3007 Register flags) {
3008 // Uses temporary registers rax, rdx
3009 assert_different_registers(index, recv, rax, rdx);
3010 assert(index == rbx, "");
3011 assert(recv == rcx, "");
3013 // Test for an invoke of a final method
3014 Label notFinal;
3015 __ movl(rax, flags);
3016 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3017 __ jcc(Assembler::zero, notFinal);
3019 const Register method = index; // method must be rbx
3020 assert(method == rbx,
3021 "Method* must be rbx for interpreter calling convention");
3023 // do the call - the index is actually the method to call
3024 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3026 // It's final, need a null check here!
3027 __ null_check(recv);
3029 // profile this call
3030 __ profile_final_call(rax);
3031 __ profile_arguments_type(rax, method, rsi, true);
3033 __ jump_from_interpreted(method, rax);
3035 __ bind(notFinal);
3037 // get receiver klass
3038 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3039 __ load_klass(rax, recv);
3041 // profile this call
3042 __ profile_virtual_call(rax, rdi, rdx);
3044 // get target Method* & entry point
3045 __ lookup_virtual_method(rax, index, method);
3046 __ profile_arguments_type(rdx, method, rsi, true);
3047 __ jump_from_interpreted(method, rdx);
3048 }
3051 void TemplateTable::invokevirtual(int byte_no) {
3052 transition(vtos, vtos);
3053 assert(byte_no == f2_byte, "use this argument");
3054 prepare_invoke(byte_no,
3055 rbx, // method or vtable index
3056 noreg, // unused itable index
3057 rcx, rdx); // recv, flags
3059 // rbx: index
3060 // rcx: receiver
3061 // rdx: flags
3063 invokevirtual_helper(rbx, rcx, rdx);
3064 }
3067 void TemplateTable::invokespecial(int byte_no) {
3068 transition(vtos, vtos);
3069 assert(byte_no == f1_byte, "use this argument");
3070 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3071 rcx); // get receiver also for null check
3072 __ verify_oop(rcx);
3073 __ null_check(rcx);
3074 // do the call
3075 __ profile_call(rax);
3076 __ profile_arguments_type(rax, rbx, rsi, false);
3077 __ jump_from_interpreted(rbx, rax);
3078 }
3081 void TemplateTable::invokestatic(int byte_no) {
3082 transition(vtos, vtos);
3083 assert(byte_no == f1_byte, "use this argument");
3084 prepare_invoke(byte_no, rbx); // get f1 Method*
3085 // do the call
3086 __ profile_call(rax);
3087 __ profile_arguments_type(rax, rbx, rsi, false);
3088 __ jump_from_interpreted(rbx, rax);
3089 }
3092 void TemplateTable::fast_invokevfinal(int byte_no) {
3093 transition(vtos, vtos);
3094 assert(byte_no == f2_byte, "use this argument");
3095 __ stop("fast_invokevfinal not used on x86");
3096 }
3099 void TemplateTable::invokeinterface(int byte_no) {
3100 transition(vtos, vtos);
3101 assert(byte_no == f1_byte, "use this argument");
3102 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 Method*
3103 rcx, rdx); // recv, flags
3105 // rax: reference klass (from f1)
3106 // rbx: method (from f2)
3107 // rcx: receiver
3108 // rdx: flags
3110 // Special case of invokeinterface called for virtual method of
3111 // java.lang.Object. See cpCacheOop.cpp for details.
3112 // This code isn't produced by javac, but could be produced by
3113 // another compliant java compiler.
3114 Label notMethod;
3115 __ movl(rdi, rdx);
3116 __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3117 __ jcc(Assembler::zero, notMethod);
3119 invokevirtual_helper(rbx, rcx, rdx);
3120 __ bind(notMethod);
3122 // Get receiver klass into rdx - also a null check
3123 __ restore_locals(); // restore rdi
3124 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3125 __ load_klass(rdx, rcx);
3127 Label no_such_interface, no_such_method;
3129 // Receiver subtype check against REFC.
3130 // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3131 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3132 rdx, rax, noreg,
3133 // outputs: scan temp. reg, scan temp. reg
3134 rsi, rdi,
3135 no_such_interface,
3136 /*return_method=*/false);
3139 // profile this call
3140 __ restore_bcp(); // rbcp was destroyed by receiver type check
3141 __ profile_virtual_call(rdx, rsi, rdi);
3143 // Get declaring interface class from method, and itable index
3144 __ movptr(rax, Address(rbx, Method::const_offset()));
3145 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
3146 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
3147 __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3148 __ subl(rbx, Method::itable_index_max);
3149 __ negl(rbx);
3151 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3152 rdx, rax, rbx,
3153 // outputs: method, scan temp. reg
3154 rbx, rsi,
3155 no_such_interface);
3157 // rbx: Method* to call
3158 // rcx: receiver
3159 // Check for abstract method error
3160 // Note: This should be done more efficiently via a throw_abstract_method_error
3161 // interpreter entry point and a conditional jump to it in case of a null
3162 // method.
3163 __ testptr(rbx, rbx);
3164 __ jcc(Assembler::zero, no_such_method);
3166 __ profile_arguments_type(rdx, rbx, rsi, true);
3168 // do the call
3169 // rcx: receiver
3170 // rbx,: Method*
3171 __ jump_from_interpreted(rbx, rdx);
3172 __ should_not_reach_here();
3174 // exception handling code follows...
3175 // note: must restore interpreter registers to canonical
3176 // state for exception handling to work correctly!
3178 __ bind(no_such_method);
3179 // throw exception
3180 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3181 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3182 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3183 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3184 // the call_VM checks for exception, so we should never return here.
3185 __ should_not_reach_here();
3187 __ bind(no_such_interface);
3188 // throw exception
3189 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3190 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3191 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3192 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3193 InterpreterRuntime::throw_IncompatibleClassChangeError));
3194 // the call_VM checks for exception, so we should never return here.
3195 __ should_not_reach_here();
3196 }
3198 void TemplateTable::invokehandle(int byte_no) {
3199 transition(vtos, vtos);
3200 assert(byte_no == f1_byte, "use this argument");
3201 const Register rbx_method = rbx;
3202 const Register rax_mtype = rax;
3203 const Register rcx_recv = rcx;
3204 const Register rdx_flags = rdx;
3206 if (!EnableInvokeDynamic) {
3207 // rewriter does not generate this bytecode
3208 __ should_not_reach_here();
3209 return;
3210 }
3212 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3213 __ verify_method_ptr(rbx_method);
3214 __ verify_oop(rcx_recv);
3215 __ null_check(rcx_recv);
3217 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3218 // rbx: MH.invokeExact_MT method (from f2)
3220 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3222 // FIXME: profile the LambdaForm also
3223 __ profile_final_call(rax);
3224 __ profile_arguments_type(rdx, rbx_method, rsi, true);
3226 __ jump_from_interpreted(rbx_method, rdx);
3227 }
3230 void TemplateTable::invokedynamic(int byte_no) {
3231 transition(vtos, vtos);
3232 assert(byte_no == f1_byte, "use this argument");
3234 if (!EnableInvokeDynamic) {
3235 // We should not encounter this bytecode if !EnableInvokeDynamic.
3236 // The verifier will stop it. However, if we get past the verifier,
3237 // this will stop the thread in a reasonable way, without crashing the JVM.
3238 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3239 InterpreterRuntime::throw_IncompatibleClassChangeError));
3240 // the call_VM checks for exception, so we should never return here.
3241 __ should_not_reach_here();
3242 return;
3243 }
3245 const Register rbx_method = rbx;
3246 const Register rax_callsite = rax;
3248 prepare_invoke(byte_no, rbx_method, rax_callsite);
3250 // rax: CallSite object (from cpool->resolved_references[f1])
3251 // rbx: MH.linkToCallSite method (from f2)
3253 // Note: rax_callsite is already pushed by prepare_invoke
3255 // %%% should make a type profile for any invokedynamic that takes a ref argument
3256 // profile this call
3257 __ profile_call(rsi);
3258 __ profile_arguments_type(rdx, rbx, rsi, false);
3260 __ verify_oop(rax_callsite);
3262 __ jump_from_interpreted(rbx_method, rdx);
3263 }
3265 //----------------------------------------------------------------------------------------------------
3266 // Allocation
3268 void TemplateTable::_new() {
3269 transition(vtos, atos);
3270 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3271 Label slow_case;
3272 Label slow_case_no_pop;
3273 Label done;
3274 Label initialize_header;
3275 Label initialize_object; // including clearing the fields
3276 Label allocate_shared;
3278 __ get_cpool_and_tags(rcx, rax);
3280 // Make sure the class we're about to instantiate has been resolved.
3281 // This is done before loading InstanceKlass to be consistent with the order
3282 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3283 const int tags_offset = Array<u1>::base_offset_in_bytes();
3284 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3285 __ jcc(Assembler::notEqual, slow_case_no_pop);
3287 // get InstanceKlass
3288 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
3289 __ push(rcx); // save the contexts of klass for initializing the header
3291 // make sure klass is initialized & doesn't have finalizer
3292 // make sure klass is fully initialized
3293 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3294 __ jcc(Assembler::notEqual, slow_case);
3296 // get instance_size in InstanceKlass (scaled to a count of bytes)
3297 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3298 // test to see if it has a finalizer or is malformed in some way
3299 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3300 __ jcc(Assembler::notZero, slow_case);
3302 //
3303 // Allocate the instance
3304 // 1) Try to allocate in the TLAB
3305 // 2) if fail and the object is large allocate in the shared Eden
3306 // 3) if the above fails (or is not applicable), go to a slow case
3307 // (creates a new TLAB, etc.)
3309 const bool allow_shared_alloc =
3310 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3312 const Register thread = rcx;
3313 if (UseTLAB || allow_shared_alloc) {
3314 __ get_thread(thread);
3315 }
3317 if (UseTLAB) {
3318 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3319 __ lea(rbx, Address(rax, rdx, Address::times_1));
3320 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3321 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3322 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3323 if (ZeroTLAB) {
3324 // the fields have been already cleared
3325 __ jmp(initialize_header);
3326 } else {
3327 // initialize both the header and fields
3328 __ jmp(initialize_object);
3329 }
3330 }
3332 // Allocation in the shared Eden, if allowed.
3333 //
3334 // rdx: instance size in bytes
3335 if (allow_shared_alloc) {
3336 __ bind(allocate_shared);
3338 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3340 Label retry;
3341 __ bind(retry);
3342 __ movptr(rax, heap_top);
3343 __ lea(rbx, Address(rax, rdx, Address::times_1));
3344 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3345 __ jcc(Assembler::above, slow_case);
3347 // Compare rax, with the top addr, and if still equal, store the new
3348 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3349 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3350 //
3351 // rax,: object begin
3352 // rbx,: object end
3353 // rdx: instance size in bytes
3354 __ locked_cmpxchgptr(rbx, heap_top);
3356 // if someone beat us on the allocation, try again, otherwise continue
3357 __ jcc(Assembler::notEqual, retry);
3359 __ incr_allocated_bytes(thread, rdx, 0);
3360 }
3362 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3363 // The object is initialized before the header. If the object size is
3364 // zero, go directly to the header initialization.
3365 __ bind(initialize_object);
3366 __ decrement(rdx, sizeof(oopDesc));
3367 __ jcc(Assembler::zero, initialize_header);
3369 // Initialize topmost object field, divide rdx by 8, check if odd and
3370 // test if zero.
3371 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3372 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3374 // rdx must have been multiple of 8
3375 #ifdef ASSERT
3376 // make sure rdx was multiple of 8
3377 Label L;
3378 // Ignore partial flag stall after shrl() since it is debug VM
3379 __ jccb(Assembler::carryClear, L);
3380 __ stop("object size is not multiple of 2 - adjust this code");
3381 __ bind(L);
3382 // rdx must be > 0, no extra check needed here
3383 #endif
3385 // initialize remaining object fields: rdx was a multiple of 8
3386 { Label loop;
3387 __ bind(loop);
3388 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3389 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3390 __ decrement(rdx);
3391 __ jcc(Assembler::notZero, loop);
3392 }
3394 // initialize object header only.
3395 __ bind(initialize_header);
3396 if (UseBiasedLocking) {
3397 __ pop(rcx); // get saved klass back in the register.
3398 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3399 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3400 } else {
3401 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3402 (int32_t)markOopDesc::prototype()); // header
3403 __ pop(rcx); // get saved klass back in the register.
3404 }
3405 __ store_klass(rax, rcx); // klass
3407 {
3408 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3409 // Trigger dtrace event for fastpath
3410 __ push(atos);
3411 __ call_VM_leaf(
3412 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3413 __ pop(atos);
3414 }
3416 __ jmp(done);
3417 }
3419 // slow case
3420 __ bind(slow_case);
3421 __ pop(rcx); // restore stack pointer to what it was when we came in.
3422 __ bind(slow_case_no_pop);
3423 __ get_constant_pool(rax);
3424 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3425 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3427 // continue
3428 __ bind(done);
3429 }
3432 void TemplateTable::newarray() {
3433 transition(itos, atos);
3434 __ push_i(rax); // make sure everything is on the stack
3435 __ load_unsigned_byte(rdx, at_bcp(1));
3436 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3437 __ pop_i(rdx); // discard size
3438 }
3441 void TemplateTable::anewarray() {
3442 transition(itos, atos);
3443 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3444 __ get_constant_pool(rcx);
3445 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3446 }
3449 void TemplateTable::arraylength() {
3450 transition(atos, itos);
3451 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3452 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3453 }
3456 void TemplateTable::checkcast() {
3457 transition(atos, atos);
3458 Label done, is_null, ok_is_subtype, quicked, resolved;
3459 __ testptr(rax, rax); // Object is in EAX
3460 __ jcc(Assembler::zero, is_null);
3462 // Get cpool & tags index
3463 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3464 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3465 // See if bytecode has already been quicked
3466 __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3467 __ jcc(Assembler::equal, quicked);
3469 __ push(atos);
3470 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3471 // vm_result_2 has metadata result
3472 // borrow rdi from locals
3473 __ get_thread(rdi);
3474 __ get_vm_result_2(rax, rdi);
3475 __ restore_locals();
3476 __ pop_ptr(rdx);
3477 __ jmpb(resolved);
3479 // Get superklass in EAX and subklass in EBX
3480 __ bind(quicked);
3481 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3482 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3484 __ bind(resolved);
3485 __ load_klass(rbx, rdx);
3487 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3488 // Superklass in EAX. Subklass in EBX.
3489 __ gen_subtype_check( rbx, ok_is_subtype );
3491 // Come here on failure
3492 __ push(rdx);
3493 // object is at TOS
3494 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3496 // Come here on success
3497 __ bind(ok_is_subtype);
3498 __ mov(rax,rdx); // Restore object in EDX
3500 // Collect counts on whether this check-cast sees NULLs a lot or not.
3501 if (ProfileInterpreter) {
3502 __ jmp(done);
3503 __ bind(is_null);
3504 __ profile_null_seen(rcx);
3505 } else {
3506 __ bind(is_null); // same as 'done'
3507 }
3508 __ bind(done);
3509 }
3512 void TemplateTable::instanceof() {
3513 transition(atos, itos);
3514 Label done, is_null, ok_is_subtype, quicked, resolved;
3515 __ testptr(rax, rax);
3516 __ jcc(Assembler::zero, is_null);
3518 // Get cpool & tags index
3519 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3520 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3521 // See if bytecode has already been quicked
3522 __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3523 __ jcc(Assembler::equal, quicked);
3525 __ push(atos);
3526 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3527 // vm_result_2 has metadata result
3528 // borrow rdi from locals
3529 __ get_thread(rdi);
3530 __ get_vm_result_2(rax, rdi);
3531 __ restore_locals();
3532 __ pop_ptr(rdx);
3533 __ load_klass(rdx, rdx);
3534 __ jmp(resolved);
3536 // Get superklass in EAX and subklass in EDX
3537 __ bind(quicked);
3538 __ load_klass(rdx, rax);
3539 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3541 __ bind(resolved);
3543 // Generate subtype check. Blows ECX. Resets EDI.
3544 // Superklass in EAX. Subklass in EDX.
3545 __ gen_subtype_check( rdx, ok_is_subtype );
3547 // Come here on failure
3548 __ xorl(rax,rax);
3549 __ jmpb(done);
3550 // Come here on success
3551 __ bind(ok_is_subtype);
3552 __ movl(rax, 1);
3554 // Collect counts on whether this test sees NULLs a lot or not.
3555 if (ProfileInterpreter) {
3556 __ jmp(done);
3557 __ bind(is_null);
3558 __ profile_null_seen(rcx);
3559 } else {
3560 __ bind(is_null); // same as 'done'
3561 }
3562 __ bind(done);
3563 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3564 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3565 }
3568 //----------------------------------------------------------------------------------------------------
3569 // Breakpoints
3570 void TemplateTable::_breakpoint() {
3572 // Note: We get here even if we are single stepping..
3573 // jbug inists on setting breakpoints at every bytecode
3574 // even if we are in single step mode.
3576 transition(vtos, vtos);
3578 // get the unpatched byte code
3579 __ get_method(rcx);
3580 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3581 __ mov(rbx, rax);
3583 // post the breakpoint event
3584 __ get_method(rcx);
3585 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3587 // complete the execution of original bytecode
3588 __ dispatch_only_normal(vtos);
3589 }
3592 //----------------------------------------------------------------------------------------------------
3593 // Exceptions
3595 void TemplateTable::athrow() {
3596 transition(atos, vtos);
3597 __ null_check(rax);
3598 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3599 }
3602 //----------------------------------------------------------------------------------------------------
3603 // Synchronization
3604 //
3605 // Note: monitorenter & exit are symmetric routines; which is reflected
3606 // in the assembly code structure as well
3607 //
3608 // Stack layout:
3609 //
3610 // [expressions ] <--- rsp = expression stack top
3611 // ..
3612 // [expressions ]
3613 // [monitor entry] <--- monitor block top = expression stack bot
3614 // ..
3615 // [monitor entry]
3616 // [frame data ] <--- monitor block bot
3617 // ...
3618 // [saved rbp, ] <--- rbp,
3621 void TemplateTable::monitorenter() {
3622 transition(atos, vtos);
3624 // check for NULL object
3625 __ null_check(rax);
3627 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3628 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3629 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3630 Label allocated;
3632 // initialize entry pointer
3633 __ xorl(rdx, rdx); // points to free slot or NULL
3635 // find a free slot in the monitor block (result in rdx)
3636 { Label entry, loop, exit;
3637 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3639 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3640 __ jmpb(entry);
3642 __ bind(loop);
3643 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3644 __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3645 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3646 __ jccb(Assembler::equal, exit); // if same object then stop searching
3647 __ addptr(rcx, entry_size); // otherwise advance to next entry
3648 __ bind(entry);
3649 __ cmpptr(rcx, rbx); // check if bottom reached
3650 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3651 __ bind(exit);
3652 }
3654 __ testptr(rdx, rdx); // check if a slot has been found
3655 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3657 // allocate one if there's no free slot
3658 { Label entry, loop;
3659 // 1. compute new pointers // rsp: old expression stack top
3660 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3661 __ subptr(rsp, entry_size); // move expression stack top
3662 __ subptr(rdx, entry_size); // move expression stack bottom
3663 __ mov(rcx, rsp); // set start value for copy loop
3664 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3665 __ jmp(entry);
3666 // 2. move expression stack contents
3667 __ bind(loop);
3668 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3669 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3670 __ addptr(rcx, wordSize); // advance to next word
3671 __ bind(entry);
3672 __ cmpptr(rcx, rdx); // check if bottom reached
3673 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3674 }
3676 // call run-time routine
3677 // rdx: points to monitor entry
3678 __ bind(allocated);
3680 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3681 // The object has already been poped from the stack, so the expression stack looks correct.
3682 __ increment(rsi);
3684 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3685 __ lock_object(rdx);
3687 // check to make sure this monitor doesn't cause stack overflow after locking
3688 __ save_bcp(); // in case of exception
3689 __ generate_stack_overflow_check(0);
3691 // The bcp has already been incremented. Just need to dispatch to next instruction.
3692 __ dispatch_next(vtos);
3693 }
3696 void TemplateTable::monitorexit() {
3697 transition(atos, vtos);
3699 // check for NULL object
3700 __ null_check(rax);
3702 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3703 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3704 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3705 Label found;
3707 // find matching slot
3708 { Label entry, loop;
3709 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3710 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3711 __ jmpb(entry);
3713 __ bind(loop);
3714 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3715 __ jcc(Assembler::equal, found); // if same object then stop searching
3716 __ addptr(rdx, entry_size); // otherwise advance to next entry
3717 __ bind(entry);
3718 __ cmpptr(rdx, rbx); // check if bottom reached
3719 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3720 }
3722 // error handling. Unlocking was not block-structured
3723 Label end;
3724 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3725 __ should_not_reach_here();
3727 // call run-time routine
3728 // rcx: points to monitor entry
3729 __ bind(found);
3730 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3731 __ unlock_object(rdx);
3732 __ pop_ptr(rax); // discard object
3733 __ bind(end);
3734 }
3737 //----------------------------------------------------------------------------------------------------
3738 // Wide instructions
3740 void TemplateTable::wide() {
3741 transition(vtos, vtos);
3742 __ load_unsigned_byte(rbx, at_bcp(1));
3743 ExternalAddress wtable((address)Interpreter::_wentry_point);
3744 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3745 // Note: the rsi increment step is part of the individual wide bytecode implementations
3746 }
3749 //----------------------------------------------------------------------------------------------------
3750 // Multi arrays
3752 void TemplateTable::multianewarray() {
3753 transition(vtos, atos);
3754 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3755 // last dim is on top of stack; we want address of first one:
3756 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3757 // the latter wordSize to point to the beginning of the array.
3758 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3759 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3760 __ load_unsigned_byte(rbx, at_bcp(3));
3761 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3762 }
3764 #endif /* !CC_INTERP */