Wed, 23 Jan 2013 13:02:39 -0500
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
40 #ifndef CC_INTERP
42 #define __ _masm->
44 // Platform-dependent initialization
46 void TemplateTable::pd_initialize() {
47 // No amd64 specific initialization
48 }
50 // Address computation: local variables
52 static inline Address iaddress(int n) {
53 return Address(r14, Interpreter::local_offset_in_bytes(n));
54 }
56 static inline Address laddress(int n) {
57 return iaddress(n + 1);
58 }
60 static inline Address faddress(int n) {
61 return iaddress(n);
62 }
64 static inline Address daddress(int n) {
65 return laddress(n);
66 }
68 static inline Address aaddress(int n) {
69 return iaddress(n);
70 }
72 static inline Address iaddress(Register r) {
73 return Address(r14, r, Address::times_8);
74 }
76 static inline Address laddress(Register r) {
77 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
78 }
80 static inline Address faddress(Register r) {
81 return iaddress(r);
82 }
84 static inline Address daddress(Register r) {
85 return laddress(r);
86 }
88 static inline Address aaddress(Register r) {
89 return iaddress(r);
90 }
92 static inline Address at_rsp() {
93 return Address(rsp, 0);
94 }
96 // At top of Java expression stack which may be different than esp(). It
97 // isn't for category 1 objects.
98 static inline Address at_tos () {
99 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
100 }
102 static inline Address at_tos_p1() {
103 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
104 }
106 static inline Address at_tos_p2() {
107 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
108 }
110 static inline Address at_tos_p3() {
111 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
112 }
114 // Condition conversion
115 static Assembler::Condition j_not(TemplateTable::Condition cc) {
116 switch (cc) {
117 case TemplateTable::equal : return Assembler::notEqual;
118 case TemplateTable::not_equal : return Assembler::equal;
119 case TemplateTable::less : return Assembler::greaterEqual;
120 case TemplateTable::less_equal : return Assembler::greater;
121 case TemplateTable::greater : return Assembler::lessEqual;
122 case TemplateTable::greater_equal: return Assembler::less;
123 }
124 ShouldNotReachHere();
125 return Assembler::zero;
126 }
129 // Miscelaneous helper routines
130 // Store an oop (or NULL) at the address described by obj.
131 // If val == noreg this means store a NULL
133 static void do_oop_store(InterpreterMacroAssembler* _masm,
134 Address obj,
135 Register val,
136 BarrierSet::Name barrier,
137 bool precise) {
138 assert(val == noreg || val == rax, "parameter is just for looks");
139 switch (barrier) {
140 #if INCLUDE_ALL_GCS
141 case BarrierSet::G1SATBCT:
142 case BarrierSet::G1SATBCTLogging:
143 {
144 // flatten object address if needed
145 if (obj.index() == noreg && obj.disp() == 0) {
146 if (obj.base() != rdx) {
147 __ movq(rdx, obj.base());
148 }
149 } else {
150 __ leaq(rdx, obj);
151 }
152 __ g1_write_barrier_pre(rdx /* obj */,
153 rbx /* pre_val */,
154 r15_thread /* thread */,
155 r8 /* tmp */,
156 val != noreg /* tosca_live */,
157 false /* expand_call */);
158 if (val == noreg) {
159 __ store_heap_oop_null(Address(rdx, 0));
160 } else {
161 __ store_heap_oop(Address(rdx, 0), val);
162 __ g1_write_barrier_post(rdx /* store_adr */,
163 val /* new_val */,
164 r15_thread /* thread */,
165 r8 /* tmp */,
166 rbx /* tmp2 */);
167 }
169 }
170 break;
171 #endif // INCLUDE_ALL_GCS
172 case BarrierSet::CardTableModRef:
173 case BarrierSet::CardTableExtension:
174 {
175 if (val == noreg) {
176 __ store_heap_oop_null(obj);
177 } else {
178 __ store_heap_oop(obj, val);
179 // flatten object address if needed
180 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
181 __ store_check(obj.base());
182 } else {
183 __ leaq(rdx, obj);
184 __ store_check(rdx);
185 }
186 }
187 }
188 break;
189 case BarrierSet::ModRef:
190 case BarrierSet::Other:
191 if (val == noreg) {
192 __ store_heap_oop_null(obj);
193 } else {
194 __ store_heap_oop(obj, val);
195 }
196 break;
197 default :
198 ShouldNotReachHere();
200 }
201 }
203 Address TemplateTable::at_bcp(int offset) {
204 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
205 return Address(r13, offset);
206 }
208 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
209 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
210 int byte_no) {
211 if (!RewriteBytecodes) return;
212 Label L_patch_done;
214 switch (bc) {
215 case Bytecodes::_fast_aputfield:
216 case Bytecodes::_fast_bputfield:
217 case Bytecodes::_fast_cputfield:
218 case Bytecodes::_fast_dputfield:
219 case Bytecodes::_fast_fputfield:
220 case Bytecodes::_fast_iputfield:
221 case Bytecodes::_fast_lputfield:
222 case Bytecodes::_fast_sputfield:
223 {
224 // We skip bytecode quickening for putfield instructions when
225 // the put_code written to the constant pool cache is zero.
226 // This is required so that every execution of this instruction
227 // calls out to InterpreterRuntime::resolve_get_put to do
228 // additional, required work.
229 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
230 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
231 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
232 __ movl(bc_reg, bc);
233 __ cmpl(temp_reg, (int) 0);
234 __ jcc(Assembler::zero, L_patch_done); // don't patch
235 }
236 break;
237 default:
238 assert(byte_no == -1, "sanity");
239 // the pair bytecodes have already done the load.
240 if (load_bc_into_bc_reg) {
241 __ movl(bc_reg, bc);
242 }
243 }
245 if (JvmtiExport::can_post_breakpoint()) {
246 Label L_fast_patch;
247 // if a breakpoint is present we can't rewrite the stream directly
248 __ movzbl(temp_reg, at_bcp(0));
249 __ cmpl(temp_reg, Bytecodes::_breakpoint);
250 __ jcc(Assembler::notEqual, L_fast_patch);
251 __ get_method(temp_reg);
252 // Let breakpoint table handling rewrite to quicker bytecode
253 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
254 #ifndef ASSERT
255 __ jmpb(L_patch_done);
256 #else
257 __ jmp(L_patch_done);
258 #endif
259 __ bind(L_fast_patch);
260 }
262 #ifdef ASSERT
263 Label L_okay;
264 __ load_unsigned_byte(temp_reg, at_bcp(0));
265 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
266 __ jcc(Assembler::equal, L_okay);
267 __ cmpl(temp_reg, bc_reg);
268 __ jcc(Assembler::equal, L_okay);
269 __ stop("patching the wrong bytecode");
270 __ bind(L_okay);
271 #endif
273 // patch bytecode
274 __ movb(at_bcp(0), bc_reg);
275 __ bind(L_patch_done);
276 }
279 // Individual instructions
281 void TemplateTable::nop() {
282 transition(vtos, vtos);
283 // nothing to do
284 }
286 void TemplateTable::shouldnotreachhere() {
287 transition(vtos, vtos);
288 __ stop("shouldnotreachhere bytecode");
289 }
291 void TemplateTable::aconst_null() {
292 transition(vtos, atos);
293 __ xorl(rax, rax);
294 }
296 void TemplateTable::iconst(int value) {
297 transition(vtos, itos);
298 if (value == 0) {
299 __ xorl(rax, rax);
300 } else {
301 __ movl(rax, value);
302 }
303 }
305 void TemplateTable::lconst(int value) {
306 transition(vtos, ltos);
307 if (value == 0) {
308 __ xorl(rax, rax);
309 } else {
310 __ movl(rax, value);
311 }
312 }
314 void TemplateTable::fconst(int value) {
315 transition(vtos, ftos);
316 static float one = 1.0f, two = 2.0f;
317 switch (value) {
318 case 0:
319 __ xorps(xmm0, xmm0);
320 break;
321 case 1:
322 __ movflt(xmm0, ExternalAddress((address) &one));
323 break;
324 case 2:
325 __ movflt(xmm0, ExternalAddress((address) &two));
326 break;
327 default:
328 ShouldNotReachHere();
329 break;
330 }
331 }
333 void TemplateTable::dconst(int value) {
334 transition(vtos, dtos);
335 static double one = 1.0;
336 switch (value) {
337 case 0:
338 __ xorpd(xmm0, xmm0);
339 break;
340 case 1:
341 __ movdbl(xmm0, ExternalAddress((address) &one));
342 break;
343 default:
344 ShouldNotReachHere();
345 break;
346 }
347 }
349 void TemplateTable::bipush() {
350 transition(vtos, itos);
351 __ load_signed_byte(rax, at_bcp(1));
352 }
354 void TemplateTable::sipush() {
355 transition(vtos, itos);
356 __ load_unsigned_short(rax, at_bcp(1));
357 __ bswapl(rax);
358 __ sarl(rax, 16);
359 }
361 void TemplateTable::ldc(bool wide) {
362 transition(vtos, vtos);
363 Label call_ldc, notFloat, notClass, Done;
365 if (wide) {
366 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
367 } else {
368 __ load_unsigned_byte(rbx, at_bcp(1));
369 }
371 __ get_cpool_and_tags(rcx, rax);
372 const int base_offset = ConstantPool::header_size() * wordSize;
373 const int tags_offset = Array<u1>::base_offset_in_bytes();
375 // get type
376 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
378 // unresolved class - get the resolved class
379 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
380 __ jccb(Assembler::equal, call_ldc);
382 // unresolved class in error state - call into runtime to throw the error
383 // from the first resolution attempt
384 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
385 __ jccb(Assembler::equal, call_ldc);
387 // resolved class - need to call vm to get java mirror of the class
388 __ cmpl(rdx, JVM_CONSTANT_Class);
389 __ jcc(Assembler::notEqual, notClass);
391 __ bind(call_ldc);
392 __ movl(c_rarg1, wide);
393 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
394 __ push_ptr(rax);
395 __ verify_oop(rax);
396 __ jmp(Done);
398 __ bind(notClass);
399 __ cmpl(rdx, JVM_CONSTANT_Float);
400 __ jccb(Assembler::notEqual, notFloat);
401 // ftos
402 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
403 __ push_f();
404 __ jmp(Done);
406 __ bind(notFloat);
407 #ifdef ASSERT
408 {
409 Label L;
410 __ cmpl(rdx, JVM_CONSTANT_Integer);
411 __ jcc(Assembler::equal, L);
412 // String and Object are rewritten to fast_aldc
413 __ stop("unexpected tag type in ldc");
414 __ bind(L);
415 }
416 #endif
417 // itos JVM_CONSTANT_Integer only
418 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
419 __ push_i(rax);
420 __ bind(Done);
421 }
423 // Fast path for caching oop constants.
424 void TemplateTable::fast_aldc(bool wide) {
425 transition(vtos, atos);
427 Register result = rax;
428 Register tmp = rdx;
429 int index_size = wide ? sizeof(u2) : sizeof(u1);
431 Label resolved;
433 // We are resolved if the resolved reference cache entry contains a
434 // non-null object (String, MethodType, etc.)
435 assert_different_registers(result, tmp);
436 __ get_cache_index_at_bcp(tmp, 1, index_size);
437 __ load_resolved_reference_at_index(result, tmp);
438 __ testl(result, result);
439 __ jcc(Assembler::notZero, resolved);
441 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
443 // first time invocation - must resolve first
444 __ movl(tmp, (int)bytecode());
445 __ call_VM(result, entry, tmp);
447 __ bind(resolved);
449 if (VerifyOops) {
450 __ verify_oop(result);
451 }
452 }
454 void TemplateTable::ldc2_w() {
455 transition(vtos, vtos);
456 Label Long, Done;
457 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
459 __ get_cpool_and_tags(rcx, rax);
460 const int base_offset = ConstantPool::header_size() * wordSize;
461 const int tags_offset = Array<u1>::base_offset_in_bytes();
463 // get type
464 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
465 JVM_CONSTANT_Double);
466 __ jccb(Assembler::notEqual, Long);
467 // dtos
468 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
469 __ push_d();
470 __ jmpb(Done);
472 __ bind(Long);
473 // ltos
474 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
475 __ push_l();
477 __ bind(Done);
478 }
480 void TemplateTable::locals_index(Register reg, int offset) {
481 __ load_unsigned_byte(reg, at_bcp(offset));
482 __ negptr(reg);
483 }
485 void TemplateTable::iload() {
486 transition(vtos, itos);
487 if (RewriteFrequentPairs) {
488 Label rewrite, done;
489 const Register bc = c_rarg3;
490 assert(rbx != bc, "register damaged");
492 // get next byte
493 __ load_unsigned_byte(rbx,
494 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
495 // if _iload, wait to rewrite to iload2. We only want to rewrite the
496 // last two iloads in a pair. Comparing against fast_iload means that
497 // the next bytecode is neither an iload or a caload, and therefore
498 // an iload pair.
499 __ cmpl(rbx, Bytecodes::_iload);
500 __ jcc(Assembler::equal, done);
502 __ cmpl(rbx, Bytecodes::_fast_iload);
503 __ movl(bc, Bytecodes::_fast_iload2);
504 __ jccb(Assembler::equal, rewrite);
506 // if _caload, rewrite to fast_icaload
507 __ cmpl(rbx, Bytecodes::_caload);
508 __ movl(bc, Bytecodes::_fast_icaload);
509 __ jccb(Assembler::equal, rewrite);
511 // rewrite so iload doesn't check again.
512 __ movl(bc, Bytecodes::_fast_iload);
514 // rewrite
515 // bc: fast bytecode
516 __ bind(rewrite);
517 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
518 __ bind(done);
519 }
521 // Get the local value into tos
522 locals_index(rbx);
523 __ movl(rax, iaddress(rbx));
524 }
526 void TemplateTable::fast_iload2() {
527 transition(vtos, itos);
528 locals_index(rbx);
529 __ movl(rax, iaddress(rbx));
530 __ push(itos);
531 locals_index(rbx, 3);
532 __ movl(rax, iaddress(rbx));
533 }
535 void TemplateTable::fast_iload() {
536 transition(vtos, itos);
537 locals_index(rbx);
538 __ movl(rax, iaddress(rbx));
539 }
541 void TemplateTable::lload() {
542 transition(vtos, ltos);
543 locals_index(rbx);
544 __ movq(rax, laddress(rbx));
545 }
547 void TemplateTable::fload() {
548 transition(vtos, ftos);
549 locals_index(rbx);
550 __ movflt(xmm0, faddress(rbx));
551 }
553 void TemplateTable::dload() {
554 transition(vtos, dtos);
555 locals_index(rbx);
556 __ movdbl(xmm0, daddress(rbx));
557 }
559 void TemplateTable::aload() {
560 transition(vtos, atos);
561 locals_index(rbx);
562 __ movptr(rax, aaddress(rbx));
563 }
565 void TemplateTable::locals_index_wide(Register reg) {
566 __ movl(reg, at_bcp(2));
567 __ bswapl(reg);
568 __ shrl(reg, 16);
569 __ negptr(reg);
570 }
572 void TemplateTable::wide_iload() {
573 transition(vtos, itos);
574 locals_index_wide(rbx);
575 __ movl(rax, iaddress(rbx));
576 }
578 void TemplateTable::wide_lload() {
579 transition(vtos, ltos);
580 locals_index_wide(rbx);
581 __ movq(rax, laddress(rbx));
582 }
584 void TemplateTable::wide_fload() {
585 transition(vtos, ftos);
586 locals_index_wide(rbx);
587 __ movflt(xmm0, faddress(rbx));
588 }
590 void TemplateTable::wide_dload() {
591 transition(vtos, dtos);
592 locals_index_wide(rbx);
593 __ movdbl(xmm0, daddress(rbx));
594 }
596 void TemplateTable::wide_aload() {
597 transition(vtos, atos);
598 locals_index_wide(rbx);
599 __ movptr(rax, aaddress(rbx));
600 }
602 void TemplateTable::index_check(Register array, Register index) {
603 // destroys rbx
604 // check array
605 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
606 // sign extend index for use by indexed load
607 __ movl2ptr(index, index);
608 // check index
609 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
610 if (index != rbx) {
611 // ??? convention: move aberrant index into ebx for exception message
612 assert(rbx != array, "different registers");
613 __ movl(rbx, index);
614 }
615 __ jump_cc(Assembler::aboveEqual,
616 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
617 }
619 void TemplateTable::iaload() {
620 transition(itos, itos);
621 __ pop_ptr(rdx);
622 // eax: index
623 // rdx: array
624 index_check(rdx, rax); // kills rbx
625 __ movl(rax, Address(rdx, rax,
626 Address::times_4,
627 arrayOopDesc::base_offset_in_bytes(T_INT)));
628 }
630 void TemplateTable::laload() {
631 transition(itos, ltos);
632 __ pop_ptr(rdx);
633 // eax: index
634 // rdx: array
635 index_check(rdx, rax); // kills rbx
636 __ movq(rax, Address(rdx, rbx,
637 Address::times_8,
638 arrayOopDesc::base_offset_in_bytes(T_LONG)));
639 }
641 void TemplateTable::faload() {
642 transition(itos, ftos);
643 __ pop_ptr(rdx);
644 // eax: index
645 // rdx: array
646 index_check(rdx, rax); // kills rbx
647 __ movflt(xmm0, Address(rdx, rax,
648 Address::times_4,
649 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
650 }
652 void TemplateTable::daload() {
653 transition(itos, dtos);
654 __ pop_ptr(rdx);
655 // eax: index
656 // rdx: array
657 index_check(rdx, rax); // kills rbx
658 __ movdbl(xmm0, Address(rdx, rax,
659 Address::times_8,
660 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
661 }
663 void TemplateTable::aaload() {
664 transition(itos, atos);
665 __ pop_ptr(rdx);
666 // eax: index
667 // rdx: array
668 index_check(rdx, rax); // kills rbx
669 __ load_heap_oop(rax, Address(rdx, rax,
670 UseCompressedOops ? Address::times_4 : Address::times_8,
671 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
672 }
674 void TemplateTable::baload() {
675 transition(itos, itos);
676 __ pop_ptr(rdx);
677 // eax: index
678 // rdx: array
679 index_check(rdx, rax); // kills rbx
680 __ load_signed_byte(rax,
681 Address(rdx, rax,
682 Address::times_1,
683 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
684 }
686 void TemplateTable::caload() {
687 transition(itos, itos);
688 __ pop_ptr(rdx);
689 // eax: index
690 // rdx: array
691 index_check(rdx, rax); // kills rbx
692 __ load_unsigned_short(rax,
693 Address(rdx, rax,
694 Address::times_2,
695 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
696 }
698 // iload followed by caload frequent pair
699 void TemplateTable::fast_icaload() {
700 transition(vtos, itos);
701 // load index out of locals
702 locals_index(rbx);
703 __ movl(rax, iaddress(rbx));
705 // eax: index
706 // rdx: array
707 __ pop_ptr(rdx);
708 index_check(rdx, rax); // kills rbx
709 __ load_unsigned_short(rax,
710 Address(rdx, rax,
711 Address::times_2,
712 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
713 }
715 void TemplateTable::saload() {
716 transition(itos, itos);
717 __ pop_ptr(rdx);
718 // eax: index
719 // rdx: array
720 index_check(rdx, rax); // kills rbx
721 __ load_signed_short(rax,
722 Address(rdx, rax,
723 Address::times_2,
724 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
725 }
727 void TemplateTable::iload(int n) {
728 transition(vtos, itos);
729 __ movl(rax, iaddress(n));
730 }
732 void TemplateTable::lload(int n) {
733 transition(vtos, ltos);
734 __ movq(rax, laddress(n));
735 }
737 void TemplateTable::fload(int n) {
738 transition(vtos, ftos);
739 __ movflt(xmm0, faddress(n));
740 }
742 void TemplateTable::dload(int n) {
743 transition(vtos, dtos);
744 __ movdbl(xmm0, daddress(n));
745 }
747 void TemplateTable::aload(int n) {
748 transition(vtos, atos);
749 __ movptr(rax, aaddress(n));
750 }
752 void TemplateTable::aload_0() {
753 transition(vtos, atos);
754 // According to bytecode histograms, the pairs:
755 //
756 // _aload_0, _fast_igetfield
757 // _aload_0, _fast_agetfield
758 // _aload_0, _fast_fgetfield
759 //
760 // occur frequently. If RewriteFrequentPairs is set, the (slow)
761 // _aload_0 bytecode checks if the next bytecode is either
762 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
763 // rewrites the current bytecode into a pair bytecode; otherwise it
764 // rewrites the current bytecode into _fast_aload_0 that doesn't do
765 // the pair check anymore.
766 //
767 // Note: If the next bytecode is _getfield, the rewrite must be
768 // delayed, otherwise we may miss an opportunity for a pair.
769 //
770 // Also rewrite frequent pairs
771 // aload_0, aload_1
772 // aload_0, iload_1
773 // These bytecodes with a small amount of code are most profitable
774 // to rewrite
775 if (RewriteFrequentPairs) {
776 Label rewrite, done;
777 const Register bc = c_rarg3;
778 assert(rbx != bc, "register damaged");
779 // get next byte
780 __ load_unsigned_byte(rbx,
781 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
783 // do actual aload_0
784 aload(0);
786 // if _getfield then wait with rewrite
787 __ cmpl(rbx, Bytecodes::_getfield);
788 __ jcc(Assembler::equal, done);
790 // if _igetfield then reqrite to _fast_iaccess_0
791 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
792 Bytecodes::_aload_0,
793 "fix bytecode definition");
794 __ cmpl(rbx, Bytecodes::_fast_igetfield);
795 __ movl(bc, Bytecodes::_fast_iaccess_0);
796 __ jccb(Assembler::equal, rewrite);
798 // if _agetfield then reqrite to _fast_aaccess_0
799 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
800 Bytecodes::_aload_0,
801 "fix bytecode definition");
802 __ cmpl(rbx, Bytecodes::_fast_agetfield);
803 __ movl(bc, Bytecodes::_fast_aaccess_0);
804 __ jccb(Assembler::equal, rewrite);
806 // if _fgetfield then reqrite to _fast_faccess_0
807 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
808 Bytecodes::_aload_0,
809 "fix bytecode definition");
810 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
811 __ movl(bc, Bytecodes::_fast_faccess_0);
812 __ jccb(Assembler::equal, rewrite);
814 // else rewrite to _fast_aload0
815 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
816 Bytecodes::_aload_0,
817 "fix bytecode definition");
818 __ movl(bc, Bytecodes::_fast_aload_0);
820 // rewrite
821 // bc: fast bytecode
822 __ bind(rewrite);
823 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
825 __ bind(done);
826 } else {
827 aload(0);
828 }
829 }
831 void TemplateTable::istore() {
832 transition(itos, vtos);
833 locals_index(rbx);
834 __ movl(iaddress(rbx), rax);
835 }
837 void TemplateTable::lstore() {
838 transition(ltos, vtos);
839 locals_index(rbx);
840 __ movq(laddress(rbx), rax);
841 }
843 void TemplateTable::fstore() {
844 transition(ftos, vtos);
845 locals_index(rbx);
846 __ movflt(faddress(rbx), xmm0);
847 }
849 void TemplateTable::dstore() {
850 transition(dtos, vtos);
851 locals_index(rbx);
852 __ movdbl(daddress(rbx), xmm0);
853 }
855 void TemplateTable::astore() {
856 transition(vtos, vtos);
857 __ pop_ptr(rax);
858 locals_index(rbx);
859 __ movptr(aaddress(rbx), rax);
860 }
862 void TemplateTable::wide_istore() {
863 transition(vtos, vtos);
864 __ pop_i();
865 locals_index_wide(rbx);
866 __ movl(iaddress(rbx), rax);
867 }
869 void TemplateTable::wide_lstore() {
870 transition(vtos, vtos);
871 __ pop_l();
872 locals_index_wide(rbx);
873 __ movq(laddress(rbx), rax);
874 }
876 void TemplateTable::wide_fstore() {
877 transition(vtos, vtos);
878 __ pop_f();
879 locals_index_wide(rbx);
880 __ movflt(faddress(rbx), xmm0);
881 }
883 void TemplateTable::wide_dstore() {
884 transition(vtos, vtos);
885 __ pop_d();
886 locals_index_wide(rbx);
887 __ movdbl(daddress(rbx), xmm0);
888 }
890 void TemplateTable::wide_astore() {
891 transition(vtos, vtos);
892 __ pop_ptr(rax);
893 locals_index_wide(rbx);
894 __ movptr(aaddress(rbx), rax);
895 }
897 void TemplateTable::iastore() {
898 transition(itos, vtos);
899 __ pop_i(rbx);
900 __ pop_ptr(rdx);
901 // eax: value
902 // ebx: index
903 // rdx: array
904 index_check(rdx, rbx); // prefer index in ebx
905 __ movl(Address(rdx, rbx,
906 Address::times_4,
907 arrayOopDesc::base_offset_in_bytes(T_INT)),
908 rax);
909 }
911 void TemplateTable::lastore() {
912 transition(ltos, vtos);
913 __ pop_i(rbx);
914 __ pop_ptr(rdx);
915 // rax: value
916 // ebx: index
917 // rdx: array
918 index_check(rdx, rbx); // prefer index in ebx
919 __ movq(Address(rdx, rbx,
920 Address::times_8,
921 arrayOopDesc::base_offset_in_bytes(T_LONG)),
922 rax);
923 }
925 void TemplateTable::fastore() {
926 transition(ftos, vtos);
927 __ pop_i(rbx);
928 __ pop_ptr(rdx);
929 // xmm0: value
930 // ebx: index
931 // rdx: array
932 index_check(rdx, rbx); // prefer index in ebx
933 __ movflt(Address(rdx, rbx,
934 Address::times_4,
935 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
936 xmm0);
937 }
939 void TemplateTable::dastore() {
940 transition(dtos, vtos);
941 __ pop_i(rbx);
942 __ pop_ptr(rdx);
943 // xmm0: value
944 // ebx: index
945 // rdx: array
946 index_check(rdx, rbx); // prefer index in ebx
947 __ movdbl(Address(rdx, rbx,
948 Address::times_8,
949 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
950 xmm0);
951 }
953 void TemplateTable::aastore() {
954 Label is_null, ok_is_subtype, done;
955 transition(vtos, vtos);
956 // stack: ..., array, index, value
957 __ movptr(rax, at_tos()); // value
958 __ movl(rcx, at_tos_p1()); // index
959 __ movptr(rdx, at_tos_p2()); // array
961 Address element_address(rdx, rcx,
962 UseCompressedOops? Address::times_4 : Address::times_8,
963 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
965 index_check(rdx, rcx); // kills rbx
966 // do array store check - check for NULL value first
967 __ testptr(rax, rax);
968 __ jcc(Assembler::zero, is_null);
970 // Move subklass into rbx
971 __ load_klass(rbx, rax);
972 // Move superklass into rax
973 __ load_klass(rax, rdx);
974 __ movptr(rax, Address(rax,
975 ObjArrayKlass::element_klass_offset()));
976 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
977 __ lea(rdx, element_address);
979 // Generate subtype check. Blows rcx, rdi
980 // Superklass in rax. Subklass in rbx.
981 __ gen_subtype_check(rbx, ok_is_subtype);
983 // Come here on failure
984 // object is at TOS
985 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
987 // Come here on success
988 __ bind(ok_is_subtype);
990 // Get the value we will store
991 __ movptr(rax, at_tos());
992 // Now store using the appropriate barrier
993 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
994 __ jmp(done);
996 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
997 __ bind(is_null);
998 __ profile_null_seen(rbx);
1000 // Store a NULL
1001 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1003 // Pop stack arguments
1004 __ bind(done);
1005 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1006 }
1008 void TemplateTable::bastore() {
1009 transition(itos, vtos);
1010 __ pop_i(rbx);
1011 __ pop_ptr(rdx);
1012 // eax: value
1013 // ebx: index
1014 // rdx: array
1015 index_check(rdx, rbx); // prefer index in ebx
1016 __ movb(Address(rdx, rbx,
1017 Address::times_1,
1018 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1019 rax);
1020 }
1022 void TemplateTable::castore() {
1023 transition(itos, vtos);
1024 __ pop_i(rbx);
1025 __ pop_ptr(rdx);
1026 // eax: value
1027 // ebx: index
1028 // rdx: array
1029 index_check(rdx, rbx); // prefer index in ebx
1030 __ movw(Address(rdx, rbx,
1031 Address::times_2,
1032 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1033 rax);
1034 }
1036 void TemplateTable::sastore() {
1037 castore();
1038 }
1040 void TemplateTable::istore(int n) {
1041 transition(itos, vtos);
1042 __ movl(iaddress(n), rax);
1043 }
1045 void TemplateTable::lstore(int n) {
1046 transition(ltos, vtos);
1047 __ movq(laddress(n), rax);
1048 }
1050 void TemplateTable::fstore(int n) {
1051 transition(ftos, vtos);
1052 __ movflt(faddress(n), xmm0);
1053 }
1055 void TemplateTable::dstore(int n) {
1056 transition(dtos, vtos);
1057 __ movdbl(daddress(n), xmm0);
1058 }
1060 void TemplateTable::astore(int n) {
1061 transition(vtos, vtos);
1062 __ pop_ptr(rax);
1063 __ movptr(aaddress(n), rax);
1064 }
1066 void TemplateTable::pop() {
1067 transition(vtos, vtos);
1068 __ addptr(rsp, Interpreter::stackElementSize);
1069 }
1071 void TemplateTable::pop2() {
1072 transition(vtos, vtos);
1073 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1074 }
1076 void TemplateTable::dup() {
1077 transition(vtos, vtos);
1078 __ load_ptr(0, rax);
1079 __ push_ptr(rax);
1080 // stack: ..., a, a
1081 }
1083 void TemplateTable::dup_x1() {
1084 transition(vtos, vtos);
1085 // stack: ..., a, b
1086 __ load_ptr( 0, rax); // load b
1087 __ load_ptr( 1, rcx); // load a
1088 __ store_ptr(1, rax); // store b
1089 __ store_ptr(0, rcx); // store a
1090 __ push_ptr(rax); // push b
1091 // stack: ..., b, a, b
1092 }
1094 void TemplateTable::dup_x2() {
1095 transition(vtos, vtos);
1096 // stack: ..., a, b, c
1097 __ load_ptr( 0, rax); // load c
1098 __ load_ptr( 2, rcx); // load a
1099 __ store_ptr(2, rax); // store c in a
1100 __ push_ptr(rax); // push c
1101 // stack: ..., c, b, c, c
1102 __ load_ptr( 2, rax); // load b
1103 __ store_ptr(2, rcx); // store a in b
1104 // stack: ..., c, a, c, c
1105 __ store_ptr(1, rax); // store b in c
1106 // stack: ..., c, a, b, c
1107 }
1109 void TemplateTable::dup2() {
1110 transition(vtos, vtos);
1111 // stack: ..., a, b
1112 __ load_ptr(1, rax); // load a
1113 __ push_ptr(rax); // push a
1114 __ load_ptr(1, rax); // load b
1115 __ push_ptr(rax); // push b
1116 // stack: ..., a, b, a, b
1117 }
1119 void TemplateTable::dup2_x1() {
1120 transition(vtos, vtos);
1121 // stack: ..., a, b, c
1122 __ load_ptr( 0, rcx); // load c
1123 __ load_ptr( 1, rax); // load b
1124 __ push_ptr(rax); // push b
1125 __ push_ptr(rcx); // push c
1126 // stack: ..., a, b, c, b, c
1127 __ store_ptr(3, rcx); // store c in b
1128 // stack: ..., a, c, c, b, c
1129 __ load_ptr( 4, rcx); // load a
1130 __ store_ptr(2, rcx); // store a in 2nd c
1131 // stack: ..., a, c, a, b, c
1132 __ store_ptr(4, rax); // store b in a
1133 // stack: ..., b, c, a, b, c
1134 }
1136 void TemplateTable::dup2_x2() {
1137 transition(vtos, vtos);
1138 // stack: ..., a, b, c, d
1139 __ load_ptr( 0, rcx); // load d
1140 __ load_ptr( 1, rax); // load c
1141 __ push_ptr(rax); // push c
1142 __ push_ptr(rcx); // push d
1143 // stack: ..., a, b, c, d, c, d
1144 __ load_ptr( 4, rax); // load b
1145 __ store_ptr(2, rax); // store b in d
1146 __ store_ptr(4, rcx); // store d in b
1147 // stack: ..., a, d, c, b, c, d
1148 __ load_ptr( 5, rcx); // load a
1149 __ load_ptr( 3, rax); // load c
1150 __ store_ptr(3, rcx); // store a in c
1151 __ store_ptr(5, rax); // store c in a
1152 // stack: ..., c, d, a, b, c, d
1153 }
1155 void TemplateTable::swap() {
1156 transition(vtos, vtos);
1157 // stack: ..., a, b
1158 __ load_ptr( 1, rcx); // load a
1159 __ load_ptr( 0, rax); // load b
1160 __ store_ptr(0, rcx); // store a in b
1161 __ store_ptr(1, rax); // store b in a
1162 // stack: ..., b, a
1163 }
1165 void TemplateTable::iop2(Operation op) {
1166 transition(itos, itos);
1167 switch (op) {
1168 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1169 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1170 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1171 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1172 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1173 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1174 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1175 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1176 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1177 default : ShouldNotReachHere();
1178 }
1179 }
1181 void TemplateTable::lop2(Operation op) {
1182 transition(ltos, ltos);
1183 switch (op) {
1184 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1185 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1186 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1187 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1188 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1189 default : ShouldNotReachHere();
1190 }
1191 }
1193 void TemplateTable::idiv() {
1194 transition(itos, itos);
1195 __ movl(rcx, rax);
1196 __ pop_i(rax);
1197 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1198 // they are not equal, one could do a normal division (no correction
1199 // needed), which may speed up this implementation for the common case.
1200 // (see also JVM spec., p.243 & p.271)
1201 __ corrected_idivl(rcx);
1202 }
1204 void TemplateTable::irem() {
1205 transition(itos, itos);
1206 __ movl(rcx, rax);
1207 __ pop_i(rax);
1208 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1209 // they are not equal, one could do a normal division (no correction
1210 // needed), which may speed up this implementation for the common case.
1211 // (see also JVM spec., p.243 & p.271)
1212 __ corrected_idivl(rcx);
1213 __ movl(rax, rdx);
1214 }
1216 void TemplateTable::lmul() {
1217 transition(ltos, ltos);
1218 __ pop_l(rdx);
1219 __ imulq(rax, rdx);
1220 }
1222 void TemplateTable::ldiv() {
1223 transition(ltos, ltos);
1224 __ mov(rcx, rax);
1225 __ pop_l(rax);
1226 // generate explicit div0 check
1227 __ testq(rcx, rcx);
1228 __ jump_cc(Assembler::zero,
1229 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1230 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1231 // they are not equal, one could do a normal division (no correction
1232 // needed), which may speed up this implementation for the common case.
1233 // (see also JVM spec., p.243 & p.271)
1234 __ corrected_idivq(rcx); // kills rbx
1235 }
1237 void TemplateTable::lrem() {
1238 transition(ltos, ltos);
1239 __ mov(rcx, rax);
1240 __ pop_l(rax);
1241 __ testq(rcx, rcx);
1242 __ jump_cc(Assembler::zero,
1243 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1244 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1245 // they are not equal, one could do a normal division (no correction
1246 // needed), which may speed up this implementation for the common case.
1247 // (see also JVM spec., p.243 & p.271)
1248 __ corrected_idivq(rcx); // kills rbx
1249 __ mov(rax, rdx);
1250 }
1252 void TemplateTable::lshl() {
1253 transition(itos, ltos);
1254 __ movl(rcx, rax); // get shift count
1255 __ pop_l(rax); // get shift value
1256 __ shlq(rax);
1257 }
1259 void TemplateTable::lshr() {
1260 transition(itos, ltos);
1261 __ movl(rcx, rax); // get shift count
1262 __ pop_l(rax); // get shift value
1263 __ sarq(rax);
1264 }
1266 void TemplateTable::lushr() {
1267 transition(itos, ltos);
1268 __ movl(rcx, rax); // get shift count
1269 __ pop_l(rax); // get shift value
1270 __ shrq(rax);
1271 }
1273 void TemplateTable::fop2(Operation op) {
1274 transition(ftos, ftos);
1275 switch (op) {
1276 case add:
1277 __ addss(xmm0, at_rsp());
1278 __ addptr(rsp, Interpreter::stackElementSize);
1279 break;
1280 case sub:
1281 __ movflt(xmm1, xmm0);
1282 __ pop_f(xmm0);
1283 __ subss(xmm0, xmm1);
1284 break;
1285 case mul:
1286 __ mulss(xmm0, at_rsp());
1287 __ addptr(rsp, Interpreter::stackElementSize);
1288 break;
1289 case div:
1290 __ movflt(xmm1, xmm0);
1291 __ pop_f(xmm0);
1292 __ divss(xmm0, xmm1);
1293 break;
1294 case rem:
1295 __ movflt(xmm1, xmm0);
1296 __ pop_f(xmm0);
1297 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1298 break;
1299 default:
1300 ShouldNotReachHere();
1301 break;
1302 }
1303 }
1305 void TemplateTable::dop2(Operation op) {
1306 transition(dtos, dtos);
1307 switch (op) {
1308 case add:
1309 __ addsd(xmm0, at_rsp());
1310 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1311 break;
1312 case sub:
1313 __ movdbl(xmm1, xmm0);
1314 __ pop_d(xmm0);
1315 __ subsd(xmm0, xmm1);
1316 break;
1317 case mul:
1318 __ mulsd(xmm0, at_rsp());
1319 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1320 break;
1321 case div:
1322 __ movdbl(xmm1, xmm0);
1323 __ pop_d(xmm0);
1324 __ divsd(xmm0, xmm1);
1325 break;
1326 case rem:
1327 __ movdbl(xmm1, xmm0);
1328 __ pop_d(xmm0);
1329 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1330 break;
1331 default:
1332 ShouldNotReachHere();
1333 break;
1334 }
1335 }
1337 void TemplateTable::ineg() {
1338 transition(itos, itos);
1339 __ negl(rax);
1340 }
1342 void TemplateTable::lneg() {
1343 transition(ltos, ltos);
1344 __ negq(rax);
1345 }
1347 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1348 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1349 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1350 // of 128-bits operands for SSE instructions.
1351 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1352 // Store the value to a 128-bits operand.
1353 operand[0] = lo;
1354 operand[1] = hi;
1355 return operand;
1356 }
1358 // Buffer for 128-bits masks used by SSE instructions.
1359 static jlong float_signflip_pool[2*2];
1360 static jlong double_signflip_pool[2*2];
1362 void TemplateTable::fneg() {
1363 transition(ftos, ftos);
1364 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1365 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1366 }
1368 void TemplateTable::dneg() {
1369 transition(dtos, dtos);
1370 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1371 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1372 }
1374 void TemplateTable::iinc() {
1375 transition(vtos, vtos);
1376 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1377 locals_index(rbx);
1378 __ addl(iaddress(rbx), rdx);
1379 }
1381 void TemplateTable::wide_iinc() {
1382 transition(vtos, vtos);
1383 __ movl(rdx, at_bcp(4)); // get constant
1384 locals_index_wide(rbx);
1385 __ bswapl(rdx); // swap bytes & sign-extend constant
1386 __ sarl(rdx, 16);
1387 __ addl(iaddress(rbx), rdx);
1388 // Note: should probably use only one movl to get both
1389 // the index and the constant -> fix this
1390 }
1392 void TemplateTable::convert() {
1393 // Checking
1394 #ifdef ASSERT
1395 {
1396 TosState tos_in = ilgl;
1397 TosState tos_out = ilgl;
1398 switch (bytecode()) {
1399 case Bytecodes::_i2l: // fall through
1400 case Bytecodes::_i2f: // fall through
1401 case Bytecodes::_i2d: // fall through
1402 case Bytecodes::_i2b: // fall through
1403 case Bytecodes::_i2c: // fall through
1404 case Bytecodes::_i2s: tos_in = itos; break;
1405 case Bytecodes::_l2i: // fall through
1406 case Bytecodes::_l2f: // fall through
1407 case Bytecodes::_l2d: tos_in = ltos; break;
1408 case Bytecodes::_f2i: // fall through
1409 case Bytecodes::_f2l: // fall through
1410 case Bytecodes::_f2d: tos_in = ftos; break;
1411 case Bytecodes::_d2i: // fall through
1412 case Bytecodes::_d2l: // fall through
1413 case Bytecodes::_d2f: tos_in = dtos; break;
1414 default : ShouldNotReachHere();
1415 }
1416 switch (bytecode()) {
1417 case Bytecodes::_l2i: // fall through
1418 case Bytecodes::_f2i: // fall through
1419 case Bytecodes::_d2i: // fall through
1420 case Bytecodes::_i2b: // fall through
1421 case Bytecodes::_i2c: // fall through
1422 case Bytecodes::_i2s: tos_out = itos; break;
1423 case Bytecodes::_i2l: // fall through
1424 case Bytecodes::_f2l: // fall through
1425 case Bytecodes::_d2l: tos_out = ltos; break;
1426 case Bytecodes::_i2f: // fall through
1427 case Bytecodes::_l2f: // fall through
1428 case Bytecodes::_d2f: tos_out = ftos; break;
1429 case Bytecodes::_i2d: // fall through
1430 case Bytecodes::_l2d: // fall through
1431 case Bytecodes::_f2d: tos_out = dtos; break;
1432 default : ShouldNotReachHere();
1433 }
1434 transition(tos_in, tos_out);
1435 }
1436 #endif // ASSERT
1438 static const int64_t is_nan = 0x8000000000000000L;
1440 // Conversion
1441 switch (bytecode()) {
1442 case Bytecodes::_i2l:
1443 __ movslq(rax, rax);
1444 break;
1445 case Bytecodes::_i2f:
1446 __ cvtsi2ssl(xmm0, rax);
1447 break;
1448 case Bytecodes::_i2d:
1449 __ cvtsi2sdl(xmm0, rax);
1450 break;
1451 case Bytecodes::_i2b:
1452 __ movsbl(rax, rax);
1453 break;
1454 case Bytecodes::_i2c:
1455 __ movzwl(rax, rax);
1456 break;
1457 case Bytecodes::_i2s:
1458 __ movswl(rax, rax);
1459 break;
1460 case Bytecodes::_l2i:
1461 __ movl(rax, rax);
1462 break;
1463 case Bytecodes::_l2f:
1464 __ cvtsi2ssq(xmm0, rax);
1465 break;
1466 case Bytecodes::_l2d:
1467 __ cvtsi2sdq(xmm0, rax);
1468 break;
1469 case Bytecodes::_f2i:
1470 {
1471 Label L;
1472 __ cvttss2sil(rax, xmm0);
1473 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1474 __ jcc(Assembler::notEqual, L);
1475 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1476 __ bind(L);
1477 }
1478 break;
1479 case Bytecodes::_f2l:
1480 {
1481 Label L;
1482 __ cvttss2siq(rax, xmm0);
1483 // NaN or overflow/underflow?
1484 __ cmp64(rax, ExternalAddress((address) &is_nan));
1485 __ jcc(Assembler::notEqual, L);
1486 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1487 __ bind(L);
1488 }
1489 break;
1490 case Bytecodes::_f2d:
1491 __ cvtss2sd(xmm0, xmm0);
1492 break;
1493 case Bytecodes::_d2i:
1494 {
1495 Label L;
1496 __ cvttsd2sil(rax, xmm0);
1497 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1498 __ jcc(Assembler::notEqual, L);
1499 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1500 __ bind(L);
1501 }
1502 break;
1503 case Bytecodes::_d2l:
1504 {
1505 Label L;
1506 __ cvttsd2siq(rax, xmm0);
1507 // NaN or overflow/underflow?
1508 __ cmp64(rax, ExternalAddress((address) &is_nan));
1509 __ jcc(Assembler::notEqual, L);
1510 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1511 __ bind(L);
1512 }
1513 break;
1514 case Bytecodes::_d2f:
1515 __ cvtsd2ss(xmm0, xmm0);
1516 break;
1517 default:
1518 ShouldNotReachHere();
1519 }
1520 }
1522 void TemplateTable::lcmp() {
1523 transition(ltos, itos);
1524 Label done;
1525 __ pop_l(rdx);
1526 __ cmpq(rdx, rax);
1527 __ movl(rax, -1);
1528 __ jccb(Assembler::less, done);
1529 __ setb(Assembler::notEqual, rax);
1530 __ movzbl(rax, rax);
1531 __ bind(done);
1532 }
1534 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1535 Label done;
1536 if (is_float) {
1537 // XXX get rid of pop here, use ... reg, mem32
1538 __ pop_f(xmm1);
1539 __ ucomiss(xmm1, xmm0);
1540 } else {
1541 // XXX get rid of pop here, use ... reg, mem64
1542 __ pop_d(xmm1);
1543 __ ucomisd(xmm1, xmm0);
1544 }
1545 if (unordered_result < 0) {
1546 __ movl(rax, -1);
1547 __ jccb(Assembler::parity, done);
1548 __ jccb(Assembler::below, done);
1549 __ setb(Assembler::notEqual, rdx);
1550 __ movzbl(rax, rdx);
1551 } else {
1552 __ movl(rax, 1);
1553 __ jccb(Assembler::parity, done);
1554 __ jccb(Assembler::above, done);
1555 __ movl(rax, 0);
1556 __ jccb(Assembler::equal, done);
1557 __ decrementl(rax);
1558 }
1559 __ bind(done);
1560 }
1562 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1563 __ get_method(rcx); // rcx holds method
1564 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1565 // holds bumped taken count
1567 const ByteSize be_offset = Method::backedge_counter_offset() +
1568 InvocationCounter::counter_offset();
1569 const ByteSize inv_offset = Method::invocation_counter_offset() +
1570 InvocationCounter::counter_offset();
1571 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1573 // Load up edx with the branch displacement
1574 __ movl(rdx, at_bcp(1));
1575 __ bswapl(rdx);
1577 if (!is_wide) {
1578 __ sarl(rdx, 16);
1579 }
1580 __ movl2ptr(rdx, rdx);
1582 // Handle all the JSR stuff here, then exit.
1583 // It's much shorter and cleaner than intermingling with the non-JSR
1584 // normal-branch stuff occurring below.
1585 if (is_jsr) {
1586 // Pre-load the next target bytecode into rbx
1587 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1589 // compute return address as bci in rax
1590 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1591 in_bytes(ConstMethod::codes_offset())));
1592 __ subptr(rax, Address(rcx, Method::const_offset()));
1593 // Adjust the bcp in r13 by the displacement in rdx
1594 __ addptr(r13, rdx);
1595 // jsr returns atos that is not an oop
1596 __ push_i(rax);
1597 __ dispatch_only(vtos);
1598 return;
1599 }
1601 // Normal (non-jsr) branch handling
1603 // Adjust the bcp in r13 by the displacement in rdx
1604 __ addptr(r13, rdx);
1606 assert(UseLoopCounter || !UseOnStackReplacement,
1607 "on-stack-replacement requires loop counters");
1608 Label backedge_counter_overflow;
1609 Label profile_method;
1610 Label dispatch;
1611 if (UseLoopCounter) {
1612 // increment backedge counter for backward branches
1613 // rax: MDO
1614 // ebx: MDO bumped taken-count
1615 // rcx: method
1616 // rdx: target offset
1617 // r13: target bcp
1618 // r14: locals pointer
1619 __ testl(rdx, rdx); // check if forward or backward branch
1620 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1621 if (TieredCompilation) {
1622 Label no_mdo;
1623 int increment = InvocationCounter::count_increment;
1624 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1625 if (ProfileInterpreter) {
1626 // Are we profiling?
1627 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1628 __ testptr(rbx, rbx);
1629 __ jccb(Assembler::zero, no_mdo);
1630 // Increment the MDO backedge counter
1631 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1632 in_bytes(InvocationCounter::counter_offset()));
1633 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1634 rax, false, Assembler::zero, &backedge_counter_overflow);
1635 __ jmp(dispatch);
1636 }
1637 __ bind(no_mdo);
1638 // Increment backedge counter in Method*
1639 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1640 rax, false, Assembler::zero, &backedge_counter_overflow);
1641 } else {
1642 // increment counter
1643 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1644 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1645 __ movl(Address(rcx, be_offset), rax); // store counter
1647 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1648 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1649 __ addl(rax, Address(rcx, be_offset)); // add both counters
1651 if (ProfileInterpreter) {
1652 // Test to see if we should create a method data oop
1653 __ cmp32(rax,
1654 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1655 __ jcc(Assembler::less, dispatch);
1657 // if no method data exists, go to profile method
1658 __ test_method_data_pointer(rax, profile_method);
1660 if (UseOnStackReplacement) {
1661 // check for overflow against ebx which is the MDO taken count
1662 __ cmp32(rbx,
1663 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1664 __ jcc(Assembler::below, dispatch);
1666 // When ProfileInterpreter is on, the backedge_count comes
1667 // from the MethodData*, which value does not get reset on
1668 // the call to frequency_counter_overflow(). To avoid
1669 // excessive calls to the overflow routine while the method is
1670 // being compiled, add a second test to make sure the overflow
1671 // function is called only once every overflow_frequency.
1672 const int overflow_frequency = 1024;
1673 __ andl(rbx, overflow_frequency - 1);
1674 __ jcc(Assembler::zero, backedge_counter_overflow);
1676 }
1677 } else {
1678 if (UseOnStackReplacement) {
1679 // check for overflow against eax, which is the sum of the
1680 // counters
1681 __ cmp32(rax,
1682 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1683 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1685 }
1686 }
1687 }
1688 __ bind(dispatch);
1689 }
1691 // Pre-load the next target bytecode into rbx
1692 __ load_unsigned_byte(rbx, Address(r13, 0));
1694 // continue with the bytecode @ target
1695 // eax: return bci for jsr's, unused otherwise
1696 // ebx: target bytecode
1697 // r13: target bcp
1698 __ dispatch_only(vtos);
1700 if (UseLoopCounter) {
1701 if (ProfileInterpreter) {
1702 // Out-of-line code to allocate method data oop.
1703 __ bind(profile_method);
1704 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1705 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1706 __ set_method_data_pointer_for_bcp();
1707 __ jmp(dispatch);
1708 }
1710 if (UseOnStackReplacement) {
1711 // invocation counter overflow
1712 __ bind(backedge_counter_overflow);
1713 __ negptr(rdx);
1714 __ addptr(rdx, r13); // branch bcp
1715 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1716 __ call_VM(noreg,
1717 CAST_FROM_FN_PTR(address,
1718 InterpreterRuntime::frequency_counter_overflow),
1719 rdx);
1720 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1722 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1723 // ebx: target bytecode
1724 // rdx: scratch
1725 // r14: locals pointer
1726 // r13: bcp
1727 __ testptr(rax, rax); // test result
1728 __ jcc(Assembler::zero, dispatch); // no osr if null
1729 // nmethod may have been invalidated (VM may block upon call_VM return)
1730 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1731 __ cmpl(rcx, InvalidOSREntryBci);
1732 __ jcc(Assembler::equal, dispatch);
1734 // We have the address of an on stack replacement routine in eax
1735 // We need to prepare to execute the OSR method. First we must
1736 // migrate the locals and monitors off of the stack.
1738 __ mov(r13, rax); // save the nmethod
1740 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1742 // eax is OSR buffer, move it to expected parameter location
1743 __ mov(j_rarg0, rax);
1745 // We use j_rarg definitions here so that registers don't conflict as parameter
1746 // registers change across platforms as we are in the midst of a calling
1747 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1749 const Register retaddr = j_rarg2;
1750 const Register sender_sp = j_rarg1;
1752 // pop the interpreter frame
1753 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1754 __ leave(); // remove frame anchor
1755 __ pop(retaddr); // get return address
1756 __ mov(rsp, sender_sp); // set sp to sender sp
1757 // Ensure compiled code always sees stack at proper alignment
1758 __ andptr(rsp, -(StackAlignmentInBytes));
1760 // unlike x86 we need no specialized return from compiled code
1761 // to the interpreter or the call stub.
1763 // push the return address
1764 __ push(retaddr);
1766 // and begin the OSR nmethod
1767 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1768 }
1769 }
1770 }
1773 void TemplateTable::if_0cmp(Condition cc) {
1774 transition(itos, vtos);
1775 // assume branch is more often taken than not (loops use backward branches)
1776 Label not_taken;
1777 __ testl(rax, rax);
1778 __ jcc(j_not(cc), not_taken);
1779 branch(false, false);
1780 __ bind(not_taken);
1781 __ profile_not_taken_branch(rax);
1782 }
1784 void TemplateTable::if_icmp(Condition cc) {
1785 transition(itos, vtos);
1786 // assume branch is more often taken than not (loops use backward branches)
1787 Label not_taken;
1788 __ pop_i(rdx);
1789 __ cmpl(rdx, rax);
1790 __ jcc(j_not(cc), not_taken);
1791 branch(false, false);
1792 __ bind(not_taken);
1793 __ profile_not_taken_branch(rax);
1794 }
1796 void TemplateTable::if_nullcmp(Condition cc) {
1797 transition(atos, vtos);
1798 // assume branch is more often taken than not (loops use backward branches)
1799 Label not_taken;
1800 __ testptr(rax, rax);
1801 __ jcc(j_not(cc), not_taken);
1802 branch(false, false);
1803 __ bind(not_taken);
1804 __ profile_not_taken_branch(rax);
1805 }
1807 void TemplateTable::if_acmp(Condition cc) {
1808 transition(atos, vtos);
1809 // assume branch is more often taken than not (loops use backward branches)
1810 Label not_taken;
1811 __ pop_ptr(rdx);
1812 __ cmpptr(rdx, rax);
1813 __ jcc(j_not(cc), not_taken);
1814 branch(false, false);
1815 __ bind(not_taken);
1816 __ profile_not_taken_branch(rax);
1817 }
1819 void TemplateTable::ret() {
1820 transition(vtos, vtos);
1821 locals_index(rbx);
1822 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1823 __ profile_ret(rbx, rcx);
1824 __ get_method(rax);
1825 __ movptr(r13, Address(rax, Method::const_offset()));
1826 __ lea(r13, Address(r13, rbx, Address::times_1,
1827 ConstMethod::codes_offset()));
1828 __ dispatch_next(vtos);
1829 }
1831 void TemplateTable::wide_ret() {
1832 transition(vtos, vtos);
1833 locals_index_wide(rbx);
1834 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1835 __ profile_ret(rbx, rcx);
1836 __ get_method(rax);
1837 __ movptr(r13, Address(rax, Method::const_offset()));
1838 __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
1839 __ dispatch_next(vtos);
1840 }
1842 void TemplateTable::tableswitch() {
1843 Label default_case, continue_execution;
1844 transition(itos, vtos);
1845 // align r13
1846 __ lea(rbx, at_bcp(BytesPerInt));
1847 __ andptr(rbx, -BytesPerInt);
1848 // load lo & hi
1849 __ movl(rcx, Address(rbx, BytesPerInt));
1850 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1851 __ bswapl(rcx);
1852 __ bswapl(rdx);
1853 // check against lo & hi
1854 __ cmpl(rax, rcx);
1855 __ jcc(Assembler::less, default_case);
1856 __ cmpl(rax, rdx);
1857 __ jcc(Assembler::greater, default_case);
1858 // lookup dispatch offset
1859 __ subl(rax, rcx);
1860 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1861 __ profile_switch_case(rax, rbx, rcx);
1862 // continue execution
1863 __ bind(continue_execution);
1864 __ bswapl(rdx);
1865 __ movl2ptr(rdx, rdx);
1866 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1867 __ addptr(r13, rdx);
1868 __ dispatch_only(vtos);
1869 // handle default
1870 __ bind(default_case);
1871 __ profile_switch_default(rax);
1872 __ movl(rdx, Address(rbx, 0));
1873 __ jmp(continue_execution);
1874 }
1876 void TemplateTable::lookupswitch() {
1877 transition(itos, itos);
1878 __ stop("lookupswitch bytecode should have been rewritten");
1879 }
1881 void TemplateTable::fast_linearswitch() {
1882 transition(itos, vtos);
1883 Label loop_entry, loop, found, continue_execution;
1884 // bswap rax so we can avoid bswapping the table entries
1885 __ bswapl(rax);
1886 // align r13
1887 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1888 // this instruction (change offsets
1889 // below)
1890 __ andptr(rbx, -BytesPerInt);
1891 // set counter
1892 __ movl(rcx, Address(rbx, BytesPerInt));
1893 __ bswapl(rcx);
1894 __ jmpb(loop_entry);
1895 // table search
1896 __ bind(loop);
1897 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1898 __ jcc(Assembler::equal, found);
1899 __ bind(loop_entry);
1900 __ decrementl(rcx);
1901 __ jcc(Assembler::greaterEqual, loop);
1902 // default case
1903 __ profile_switch_default(rax);
1904 __ movl(rdx, Address(rbx, 0));
1905 __ jmp(continue_execution);
1906 // entry found -> get offset
1907 __ bind(found);
1908 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1909 __ profile_switch_case(rcx, rax, rbx);
1910 // continue execution
1911 __ bind(continue_execution);
1912 __ bswapl(rdx);
1913 __ movl2ptr(rdx, rdx);
1914 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1915 __ addptr(r13, rdx);
1916 __ dispatch_only(vtos);
1917 }
1919 void TemplateTable::fast_binaryswitch() {
1920 transition(itos, vtos);
1921 // Implementation using the following core algorithm:
1922 //
1923 // int binary_search(int key, LookupswitchPair* array, int n) {
1924 // // Binary search according to "Methodik des Programmierens" by
1925 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1926 // int i = 0;
1927 // int j = n;
1928 // while (i+1 < j) {
1929 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1930 // // with Q: for all i: 0 <= i < n: key < a[i]
1931 // // where a stands for the array and assuming that the (inexisting)
1932 // // element a[n] is infinitely big.
1933 // int h = (i + j) >> 1;
1934 // // i < h < j
1935 // if (key < array[h].fast_match()) {
1936 // j = h;
1937 // } else {
1938 // i = h;
1939 // }
1940 // }
1941 // // R: a[i] <= key < a[i+1] or Q
1942 // // (i.e., if key is within array, i is the correct index)
1943 // return i;
1944 // }
1946 // Register allocation
1947 const Register key = rax; // already set (tosca)
1948 const Register array = rbx;
1949 const Register i = rcx;
1950 const Register j = rdx;
1951 const Register h = rdi;
1952 const Register temp = rsi;
1954 // Find array start
1955 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1956 // get rid of this
1957 // instruction (change
1958 // offsets below)
1959 __ andptr(array, -BytesPerInt);
1961 // Initialize i & j
1962 __ xorl(i, i); // i = 0;
1963 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1965 // Convert j into native byteordering
1966 __ bswapl(j);
1968 // And start
1969 Label entry;
1970 __ jmp(entry);
1972 // binary search loop
1973 {
1974 Label loop;
1975 __ bind(loop);
1976 // int h = (i + j) >> 1;
1977 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1978 __ sarl(h, 1); // h = (i + j) >> 1;
1979 // if (key < array[h].fast_match()) {
1980 // j = h;
1981 // } else {
1982 // i = h;
1983 // }
1984 // Convert array[h].match to native byte-ordering before compare
1985 __ movl(temp, Address(array, h, Address::times_8));
1986 __ bswapl(temp);
1987 __ cmpl(key, temp);
1988 // j = h if (key < array[h].fast_match())
1989 __ cmovl(Assembler::less, j, h);
1990 // i = h if (key >= array[h].fast_match())
1991 __ cmovl(Assembler::greaterEqual, i, h);
1992 // while (i+1 < j)
1993 __ bind(entry);
1994 __ leal(h, Address(i, 1)); // i+1
1995 __ cmpl(h, j); // i+1 < j
1996 __ jcc(Assembler::less, loop);
1997 }
1999 // end of binary search, result index is i (must check again!)
2000 Label default_case;
2001 // Convert array[i].match to native byte-ordering before compare
2002 __ movl(temp, Address(array, i, Address::times_8));
2003 __ bswapl(temp);
2004 __ cmpl(key, temp);
2005 __ jcc(Assembler::notEqual, default_case);
2007 // entry found -> j = offset
2008 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2009 __ profile_switch_case(i, key, array);
2010 __ bswapl(j);
2011 __ movl2ptr(j, j);
2012 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2013 __ addptr(r13, j);
2014 __ dispatch_only(vtos);
2016 // default case -> j = default offset
2017 __ bind(default_case);
2018 __ profile_switch_default(i);
2019 __ movl(j, Address(array, -2 * BytesPerInt));
2020 __ bswapl(j);
2021 __ movl2ptr(j, j);
2022 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2023 __ addptr(r13, j);
2024 __ dispatch_only(vtos);
2025 }
2028 void TemplateTable::_return(TosState state) {
2029 transition(state, state);
2030 assert(_desc->calls_vm(),
2031 "inconsistent calls_vm information"); // call in remove_activation
2033 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2034 assert(state == vtos, "only valid state");
2035 __ movptr(c_rarg1, aaddress(0));
2036 __ load_klass(rdi, c_rarg1);
2037 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2038 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2039 Label skip_register_finalizer;
2040 __ jcc(Assembler::zero, skip_register_finalizer);
2042 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2044 __ bind(skip_register_finalizer);
2045 }
2047 __ remove_activation(state, r13);
2048 __ jmp(r13);
2049 }
2051 // ----------------------------------------------------------------------------
2052 // Volatile variables demand their effects be made known to all CPU's
2053 // in order. Store buffers on most chips allow reads & writes to
2054 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2055 // without some kind of memory barrier (i.e., it's not sufficient that
2056 // the interpreter does not reorder volatile references, the hardware
2057 // also must not reorder them).
2058 //
2059 // According to the new Java Memory Model (JMM):
2060 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2061 // writes act as aquire & release, so:
2062 // (2) A read cannot let unrelated NON-volatile memory refs that
2063 // happen after the read float up to before the read. It's OK for
2064 // non-volatile memory refs that happen before the volatile read to
2065 // float down below it.
2066 // (3) Similar a volatile write cannot let unrelated NON-volatile
2067 // memory refs that happen BEFORE the write float down to after the
2068 // write. It's OK for non-volatile memory refs that happen after the
2069 // volatile write to float up before it.
2070 //
2071 // We only put in barriers around volatile refs (they are expensive),
2072 // not _between_ memory refs (that would require us to track the
2073 // flavor of the previous memory refs). Requirements (2) and (3)
2074 // require some barriers before volatile stores and after volatile
2075 // loads. These nearly cover requirement (1) but miss the
2076 // volatile-store-volatile-load case. This final case is placed after
2077 // volatile-stores although it could just as well go before
2078 // volatile-loads.
2079 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2080 order_constraint) {
2081 // Helper function to insert a is-volatile test and memory barrier
2082 if (os::is_MP()) { // Not needed on single CPU
2083 __ membar(order_constraint);
2084 }
2085 }
2087 void TemplateTable::resolve_cache_and_index(int byte_no,
2088 Register Rcache,
2089 Register index,
2090 size_t index_size) {
2091 const Register temp = rbx;
2092 assert_different_registers(Rcache, index, temp);
2094 Label resolved;
2095 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2096 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2097 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2098 __ jcc(Assembler::equal, resolved);
2100 // resolve first time through
2101 address entry;
2102 switch (bytecode()) {
2103 case Bytecodes::_getstatic:
2104 case Bytecodes::_putstatic:
2105 case Bytecodes::_getfield:
2106 case Bytecodes::_putfield:
2107 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2108 break;
2109 case Bytecodes::_invokevirtual:
2110 case Bytecodes::_invokespecial:
2111 case Bytecodes::_invokestatic:
2112 case Bytecodes::_invokeinterface:
2113 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2114 break;
2115 case Bytecodes::_invokehandle:
2116 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
2117 break;
2118 case Bytecodes::_invokedynamic:
2119 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2120 break;
2121 default:
2122 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2123 break;
2124 }
2125 __ movl(temp, (int) bytecode());
2126 __ call_VM(noreg, entry, temp);
2128 // Update registers with resolved info
2129 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2130 __ bind(resolved);
2131 }
2133 // The cache and index registers must be set before call
2134 void TemplateTable::load_field_cp_cache_entry(Register obj,
2135 Register cache,
2136 Register index,
2137 Register off,
2138 Register flags,
2139 bool is_static = false) {
2140 assert_different_registers(cache, index, flags, off);
2142 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2143 // Field offset
2144 __ movptr(off, Address(cache, index, Address::times_ptr,
2145 in_bytes(cp_base_offset +
2146 ConstantPoolCacheEntry::f2_offset())));
2147 // Flags
2148 __ movl(flags, Address(cache, index, Address::times_ptr,
2149 in_bytes(cp_base_offset +
2150 ConstantPoolCacheEntry::flags_offset())));
2152 // klass overwrite register
2153 if (is_static) {
2154 __ movptr(obj, Address(cache, index, Address::times_ptr,
2155 in_bytes(cp_base_offset +
2156 ConstantPoolCacheEntry::f1_offset())));
2157 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2158 __ movptr(obj, Address(obj, mirror_offset));
2159 }
2160 }
2162 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2163 Register method,
2164 Register itable_index,
2165 Register flags,
2166 bool is_invokevirtual,
2167 bool is_invokevfinal, /*unused*/
2168 bool is_invokedynamic) {
2169 // setup registers
2170 const Register cache = rcx;
2171 const Register index = rdx;
2172 assert_different_registers(method, flags);
2173 assert_different_registers(method, cache, index);
2174 assert_different_registers(itable_index, flags);
2175 assert_different_registers(itable_index, cache, index);
2176 // determine constant pool cache field offsets
2177 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2178 const int method_offset = in_bytes(
2179 ConstantPoolCache::base_offset() +
2180 ((byte_no == f2_byte)
2181 ? ConstantPoolCacheEntry::f2_offset()
2182 : ConstantPoolCacheEntry::f1_offset()));
2183 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2184 ConstantPoolCacheEntry::flags_offset());
2185 // access constant pool cache fields
2186 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2187 ConstantPoolCacheEntry::f2_offset());
2189 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2190 resolve_cache_and_index(byte_no, cache, index, index_size);
2191 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2193 if (itable_index != noreg) {
2194 // pick up itable or appendix index from f2 also:
2195 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2196 }
2197 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2198 }
2200 // Correct values of the cache and index registers are preserved.
2201 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2202 bool is_static, bool has_tos) {
2203 // do the JVMTI work here to avoid disturbing the register state below
2204 // We use c_rarg registers here because we want to use the register used in
2205 // the call to the VM
2206 if (JvmtiExport::can_post_field_access()) {
2207 // Check to see if a field access watch has been set before we
2208 // take the time to call into the VM.
2209 Label L1;
2210 assert_different_registers(cache, index, rax);
2211 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2212 __ testl(rax, rax);
2213 __ jcc(Assembler::zero, L1);
2215 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2217 // cache entry pointer
2218 __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
2219 __ shll(c_rarg3, LogBytesPerWord);
2220 __ addptr(c_rarg2, c_rarg3);
2221 if (is_static) {
2222 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2223 } else {
2224 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2225 __ verify_oop(c_rarg1);
2226 }
2227 // c_rarg1: object pointer or NULL
2228 // c_rarg2: cache entry pointer
2229 // c_rarg3: jvalue object on the stack
2230 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2231 InterpreterRuntime::post_field_access),
2232 c_rarg1, c_rarg2, c_rarg3);
2233 __ get_cache_and_index_at_bcp(cache, index, 1);
2234 __ bind(L1);
2235 }
2236 }
2238 void TemplateTable::pop_and_check_object(Register r) {
2239 __ pop_ptr(r);
2240 __ null_check(r); // for field access must check obj.
2241 __ verify_oop(r);
2242 }
2244 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2245 transition(vtos, vtos);
2247 const Register cache = rcx;
2248 const Register index = rdx;
2249 const Register obj = c_rarg3;
2250 const Register off = rbx;
2251 const Register flags = rax;
2252 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2254 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2255 jvmti_post_field_access(cache, index, is_static, false);
2256 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2258 if (!is_static) {
2259 // obj is on the stack
2260 pop_and_check_object(obj);
2261 }
2263 const Address field(obj, off, Address::times_1);
2265 Label Done, notByte, notInt, notShort, notChar,
2266 notLong, notFloat, notObj, notDouble;
2268 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2269 // Make sure we don't need to mask edx after the above shift
2270 assert(btos == 0, "change code, btos != 0");
2272 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2273 __ jcc(Assembler::notZero, notByte);
2274 // btos
2275 __ load_signed_byte(rax, field);
2276 __ push(btos);
2277 // Rewrite bytecode to be faster
2278 if (!is_static) {
2279 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2280 }
2281 __ jmp(Done);
2283 __ bind(notByte);
2284 __ cmpl(flags, atos);
2285 __ jcc(Assembler::notEqual, notObj);
2286 // atos
2287 __ load_heap_oop(rax, field);
2288 __ push(atos);
2289 if (!is_static) {
2290 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2291 }
2292 __ jmp(Done);
2294 __ bind(notObj);
2295 __ cmpl(flags, itos);
2296 __ jcc(Assembler::notEqual, notInt);
2297 // itos
2298 __ movl(rax, field);
2299 __ push(itos);
2300 // Rewrite bytecode to be faster
2301 if (!is_static) {
2302 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2303 }
2304 __ jmp(Done);
2306 __ bind(notInt);
2307 __ cmpl(flags, ctos);
2308 __ jcc(Assembler::notEqual, notChar);
2309 // ctos
2310 __ load_unsigned_short(rax, field);
2311 __ push(ctos);
2312 // Rewrite bytecode to be faster
2313 if (!is_static) {
2314 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2315 }
2316 __ jmp(Done);
2318 __ bind(notChar);
2319 __ cmpl(flags, stos);
2320 __ jcc(Assembler::notEqual, notShort);
2321 // stos
2322 __ load_signed_short(rax, field);
2323 __ push(stos);
2324 // Rewrite bytecode to be faster
2325 if (!is_static) {
2326 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2327 }
2328 __ jmp(Done);
2330 __ bind(notShort);
2331 __ cmpl(flags, ltos);
2332 __ jcc(Assembler::notEqual, notLong);
2333 // ltos
2334 __ movq(rax, field);
2335 __ push(ltos);
2336 // Rewrite bytecode to be faster
2337 if (!is_static) {
2338 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2339 }
2340 __ jmp(Done);
2342 __ bind(notLong);
2343 __ cmpl(flags, ftos);
2344 __ jcc(Assembler::notEqual, notFloat);
2345 // ftos
2346 __ movflt(xmm0, field);
2347 __ push(ftos);
2348 // Rewrite bytecode to be faster
2349 if (!is_static) {
2350 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2351 }
2352 __ jmp(Done);
2354 __ bind(notFloat);
2355 #ifdef ASSERT
2356 __ cmpl(flags, dtos);
2357 __ jcc(Assembler::notEqual, notDouble);
2358 #endif
2359 // dtos
2360 __ movdbl(xmm0, field);
2361 __ push(dtos);
2362 // Rewrite bytecode to be faster
2363 if (!is_static) {
2364 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2365 }
2366 #ifdef ASSERT
2367 __ jmp(Done);
2369 __ bind(notDouble);
2370 __ stop("Bad state");
2371 #endif
2373 __ bind(Done);
2374 // [jk] not needed currently
2375 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2376 // Assembler::LoadStore));
2377 }
2380 void TemplateTable::getfield(int byte_no) {
2381 getfield_or_static(byte_no, false);
2382 }
2384 void TemplateTable::getstatic(int byte_no) {
2385 getfield_or_static(byte_no, true);
2386 }
2388 // The registers cache and index expected to be set before call.
2389 // The function may destroy various registers, just not the cache and index registers.
2390 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2391 transition(vtos, vtos);
2393 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2395 if (JvmtiExport::can_post_field_modification()) {
2396 // Check to see if a field modification watch has been set before
2397 // we take the time to call into the VM.
2398 Label L1;
2399 assert_different_registers(cache, index, rax);
2400 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2401 __ testl(rax, rax);
2402 __ jcc(Assembler::zero, L1);
2404 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2406 if (is_static) {
2407 // Life is simple. Null out the object pointer.
2408 __ xorl(c_rarg1, c_rarg1);
2409 } else {
2410 // Life is harder. The stack holds the value on top, followed by
2411 // the object. We don't know the size of the value, though; it
2412 // could be one or two words depending on its type. As a result,
2413 // we must find the type to determine where the object is.
2414 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2415 Address::times_8,
2416 in_bytes(cp_base_offset +
2417 ConstantPoolCacheEntry::flags_offset())));
2418 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
2419 // Make sure we don't need to mask rcx after the above shift
2420 ConstantPoolCacheEntry::verify_tos_state_shift();
2421 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2422 __ cmpl(c_rarg3, ltos);
2423 __ cmovptr(Assembler::equal,
2424 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2425 __ cmpl(c_rarg3, dtos);
2426 __ cmovptr(Assembler::equal,
2427 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2428 }
2429 // cache entry pointer
2430 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2431 __ shll(rscratch1, LogBytesPerWord);
2432 __ addptr(c_rarg2, rscratch1);
2433 // object (tos)
2434 __ mov(c_rarg3, rsp);
2435 // c_rarg1: object pointer set up above (NULL if static)
2436 // c_rarg2: cache entry pointer
2437 // c_rarg3: jvalue object on the stack
2438 __ call_VM(noreg,
2439 CAST_FROM_FN_PTR(address,
2440 InterpreterRuntime::post_field_modification),
2441 c_rarg1, c_rarg2, c_rarg3);
2442 __ get_cache_and_index_at_bcp(cache, index, 1);
2443 __ bind(L1);
2444 }
2445 }
2447 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2448 transition(vtos, vtos);
2450 const Register cache = rcx;
2451 const Register index = rdx;
2452 const Register obj = rcx;
2453 const Register off = rbx;
2454 const Register flags = rax;
2455 const Register bc = c_rarg3;
2457 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2458 jvmti_post_field_mod(cache, index, is_static);
2459 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2461 // [jk] not needed currently
2462 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2463 // Assembler::StoreStore));
2465 Label notVolatile, Done;
2466 __ movl(rdx, flags);
2467 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2468 __ andl(rdx, 0x1);
2470 // field address
2471 const Address field(obj, off, Address::times_1);
2473 Label notByte, notInt, notShort, notChar,
2474 notLong, notFloat, notObj, notDouble;
2476 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2478 assert(btos == 0, "change code, btos != 0");
2479 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2480 __ jcc(Assembler::notZero, notByte);
2482 // btos
2483 {
2484 __ pop(btos);
2485 if (!is_static) pop_and_check_object(obj);
2486 __ movb(field, rax);
2487 if (!is_static) {
2488 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2489 }
2490 __ jmp(Done);
2491 }
2493 __ bind(notByte);
2494 __ cmpl(flags, atos);
2495 __ jcc(Assembler::notEqual, notObj);
2497 // atos
2498 {
2499 __ pop(atos);
2500 if (!is_static) pop_and_check_object(obj);
2501 // Store into the field
2502 do_oop_store(_masm, field, rax, _bs->kind(), false);
2503 if (!is_static) {
2504 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2505 }
2506 __ jmp(Done);
2507 }
2509 __ bind(notObj);
2510 __ cmpl(flags, itos);
2511 __ jcc(Assembler::notEqual, notInt);
2513 // itos
2514 {
2515 __ pop(itos);
2516 if (!is_static) pop_and_check_object(obj);
2517 __ movl(field, rax);
2518 if (!is_static) {
2519 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2520 }
2521 __ jmp(Done);
2522 }
2524 __ bind(notInt);
2525 __ cmpl(flags, ctos);
2526 __ jcc(Assembler::notEqual, notChar);
2528 // ctos
2529 {
2530 __ pop(ctos);
2531 if (!is_static) pop_and_check_object(obj);
2532 __ movw(field, rax);
2533 if (!is_static) {
2534 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2535 }
2536 __ jmp(Done);
2537 }
2539 __ bind(notChar);
2540 __ cmpl(flags, stos);
2541 __ jcc(Assembler::notEqual, notShort);
2543 // stos
2544 {
2545 __ pop(stos);
2546 if (!is_static) pop_and_check_object(obj);
2547 __ movw(field, rax);
2548 if (!is_static) {
2549 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2550 }
2551 __ jmp(Done);
2552 }
2554 __ bind(notShort);
2555 __ cmpl(flags, ltos);
2556 __ jcc(Assembler::notEqual, notLong);
2558 // ltos
2559 {
2560 __ pop(ltos);
2561 if (!is_static) pop_and_check_object(obj);
2562 __ movq(field, rax);
2563 if (!is_static) {
2564 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2565 }
2566 __ jmp(Done);
2567 }
2569 __ bind(notLong);
2570 __ cmpl(flags, ftos);
2571 __ jcc(Assembler::notEqual, notFloat);
2573 // ftos
2574 {
2575 __ pop(ftos);
2576 if (!is_static) pop_and_check_object(obj);
2577 __ movflt(field, xmm0);
2578 if (!is_static) {
2579 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2580 }
2581 __ jmp(Done);
2582 }
2584 __ bind(notFloat);
2585 #ifdef ASSERT
2586 __ cmpl(flags, dtos);
2587 __ jcc(Assembler::notEqual, notDouble);
2588 #endif
2590 // dtos
2591 {
2592 __ pop(dtos);
2593 if (!is_static) pop_and_check_object(obj);
2594 __ movdbl(field, xmm0);
2595 if (!is_static) {
2596 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2597 }
2598 }
2600 #ifdef ASSERT
2601 __ jmp(Done);
2603 __ bind(notDouble);
2604 __ stop("Bad state");
2605 #endif
2607 __ bind(Done);
2609 // Check for volatile store
2610 __ testl(rdx, rdx);
2611 __ jcc(Assembler::zero, notVolatile);
2612 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2613 Assembler::StoreStore));
2614 __ bind(notVolatile);
2615 }
2617 void TemplateTable::putfield(int byte_no) {
2618 putfield_or_static(byte_no, false);
2619 }
2621 void TemplateTable::putstatic(int byte_no) {
2622 putfield_or_static(byte_no, true);
2623 }
2625 void TemplateTable::jvmti_post_fast_field_mod() {
2626 if (JvmtiExport::can_post_field_modification()) {
2627 // Check to see if a field modification watch has been set before
2628 // we take the time to call into the VM.
2629 Label L2;
2630 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2631 __ testl(c_rarg3, c_rarg3);
2632 __ jcc(Assembler::zero, L2);
2633 __ pop_ptr(rbx); // copy the object pointer from tos
2634 __ verify_oop(rbx);
2635 __ push_ptr(rbx); // put the object pointer back on tos
2636 // Save tos values before call_VM() clobbers them. Since we have
2637 // to do it for every data type, we use the saved values as the
2638 // jvalue object.
2639 switch (bytecode()) { // load values into the jvalue object
2640 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2641 case Bytecodes::_fast_bputfield: // fall through
2642 case Bytecodes::_fast_sputfield: // fall through
2643 case Bytecodes::_fast_cputfield: // fall through
2644 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2645 case Bytecodes::_fast_dputfield: __ push_d(); break;
2646 case Bytecodes::_fast_fputfield: __ push_f(); break;
2647 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2649 default:
2650 ShouldNotReachHere();
2651 }
2652 __ mov(c_rarg3, rsp); // points to jvalue on the stack
2653 // access constant pool cache entry
2654 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2655 __ verify_oop(rbx);
2656 // rbx: object pointer copied above
2657 // c_rarg2: cache entry pointer
2658 // c_rarg3: jvalue object on the stack
2659 __ call_VM(noreg,
2660 CAST_FROM_FN_PTR(address,
2661 InterpreterRuntime::post_field_modification),
2662 rbx, c_rarg2, c_rarg3);
2664 switch (bytecode()) { // restore tos values
2665 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2666 case Bytecodes::_fast_bputfield: // fall through
2667 case Bytecodes::_fast_sputfield: // fall through
2668 case Bytecodes::_fast_cputfield: // fall through
2669 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2670 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2671 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2672 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2673 }
2674 __ bind(L2);
2675 }
2676 }
2678 void TemplateTable::fast_storefield(TosState state) {
2679 transition(state, vtos);
2681 ByteSize base = ConstantPoolCache::base_offset();
2683 jvmti_post_fast_field_mod();
2685 // access constant pool cache
2686 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2688 // test for volatile with rdx
2689 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2690 in_bytes(base +
2691 ConstantPoolCacheEntry::flags_offset())));
2693 // replace index with field offset from cache entry
2694 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2695 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2697 // [jk] not needed currently
2698 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2699 // Assembler::StoreStore));
2701 Label notVolatile;
2702 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2703 __ andl(rdx, 0x1);
2705 // Get object from stack
2706 pop_and_check_object(rcx);
2708 // field address
2709 const Address field(rcx, rbx, Address::times_1);
2711 // access field
2712 switch (bytecode()) {
2713 case Bytecodes::_fast_aputfield:
2714 do_oop_store(_masm, field, rax, _bs->kind(), false);
2715 break;
2716 case Bytecodes::_fast_lputfield:
2717 __ movq(field, rax);
2718 break;
2719 case Bytecodes::_fast_iputfield:
2720 __ movl(field, rax);
2721 break;
2722 case Bytecodes::_fast_bputfield:
2723 __ movb(field, rax);
2724 break;
2725 case Bytecodes::_fast_sputfield:
2726 // fall through
2727 case Bytecodes::_fast_cputfield:
2728 __ movw(field, rax);
2729 break;
2730 case Bytecodes::_fast_fputfield:
2731 __ movflt(field, xmm0);
2732 break;
2733 case Bytecodes::_fast_dputfield:
2734 __ movdbl(field, xmm0);
2735 break;
2736 default:
2737 ShouldNotReachHere();
2738 }
2740 // Check for volatile store
2741 __ testl(rdx, rdx);
2742 __ jcc(Assembler::zero, notVolatile);
2743 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2744 Assembler::StoreStore));
2745 __ bind(notVolatile);
2746 }
2749 void TemplateTable::fast_accessfield(TosState state) {
2750 transition(atos, state);
2752 // Do the JVMTI work here to avoid disturbing the register state below
2753 if (JvmtiExport::can_post_field_access()) {
2754 // Check to see if a field access watch has been set before we
2755 // take the time to call into the VM.
2756 Label L1;
2757 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2758 __ testl(rcx, rcx);
2759 __ jcc(Assembler::zero, L1);
2760 // access constant pool cache entry
2761 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2762 __ verify_oop(rax);
2763 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2764 __ mov(c_rarg1, rax);
2765 // c_rarg1: object pointer copied above
2766 // c_rarg2: cache entry pointer
2767 __ call_VM(noreg,
2768 CAST_FROM_FN_PTR(address,
2769 InterpreterRuntime::post_field_access),
2770 c_rarg1, c_rarg2);
2771 __ pop_ptr(rax); // restore object pointer
2772 __ bind(L1);
2773 }
2775 // access constant pool cache
2776 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2777 // replace index with field offset from cache entry
2778 // [jk] not needed currently
2779 // if (os::is_MP()) {
2780 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2781 // in_bytes(ConstantPoolCache::base_offset() +
2782 // ConstantPoolCacheEntry::flags_offset())));
2783 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2784 // __ andl(rdx, 0x1);
2785 // }
2786 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2787 in_bytes(ConstantPoolCache::base_offset() +
2788 ConstantPoolCacheEntry::f2_offset())));
2790 // rax: object
2791 __ verify_oop(rax);
2792 __ null_check(rax);
2793 Address field(rax, rbx, Address::times_1);
2795 // access field
2796 switch (bytecode()) {
2797 case Bytecodes::_fast_agetfield:
2798 __ load_heap_oop(rax, field);
2799 __ verify_oop(rax);
2800 break;
2801 case Bytecodes::_fast_lgetfield:
2802 __ movq(rax, field);
2803 break;
2804 case Bytecodes::_fast_igetfield:
2805 __ movl(rax, field);
2806 break;
2807 case Bytecodes::_fast_bgetfield:
2808 __ movsbl(rax, field);
2809 break;
2810 case Bytecodes::_fast_sgetfield:
2811 __ load_signed_short(rax, field);
2812 break;
2813 case Bytecodes::_fast_cgetfield:
2814 __ load_unsigned_short(rax, field);
2815 break;
2816 case Bytecodes::_fast_fgetfield:
2817 __ movflt(xmm0, field);
2818 break;
2819 case Bytecodes::_fast_dgetfield:
2820 __ movdbl(xmm0, field);
2821 break;
2822 default:
2823 ShouldNotReachHere();
2824 }
2825 // [jk] not needed currently
2826 // if (os::is_MP()) {
2827 // Label notVolatile;
2828 // __ testl(rdx, rdx);
2829 // __ jcc(Assembler::zero, notVolatile);
2830 // __ membar(Assembler::LoadLoad);
2831 // __ bind(notVolatile);
2832 //};
2833 }
2835 void TemplateTable::fast_xaccess(TosState state) {
2836 transition(vtos, state);
2838 // get receiver
2839 __ movptr(rax, aaddress(0));
2840 // access constant pool cache
2841 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2842 __ movptr(rbx,
2843 Address(rcx, rdx, Address::times_8,
2844 in_bytes(ConstantPoolCache::base_offset() +
2845 ConstantPoolCacheEntry::f2_offset())));
2846 // make sure exception is reported in correct bcp range (getfield is
2847 // next instruction)
2848 __ increment(r13);
2849 __ null_check(rax);
2850 switch (state) {
2851 case itos:
2852 __ movl(rax, Address(rax, rbx, Address::times_1));
2853 break;
2854 case atos:
2855 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2856 __ verify_oop(rax);
2857 break;
2858 case ftos:
2859 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2860 break;
2861 default:
2862 ShouldNotReachHere();
2863 }
2865 // [jk] not needed currently
2866 // if (os::is_MP()) {
2867 // Label notVolatile;
2868 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2869 // in_bytes(ConstantPoolCache::base_offset() +
2870 // ConstantPoolCacheEntry::flags_offset())));
2871 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2872 // __ testl(rdx, 0x1);
2873 // __ jcc(Assembler::zero, notVolatile);
2874 // __ membar(Assembler::LoadLoad);
2875 // __ bind(notVolatile);
2876 // }
2878 __ decrement(r13);
2879 }
2883 //-----------------------------------------------------------------------------
2884 // Calls
2886 void TemplateTable::count_calls(Register method, Register temp) {
2887 // implemented elsewhere
2888 ShouldNotReachHere();
2889 }
2891 void TemplateTable::prepare_invoke(int byte_no,
2892 Register method, // linked method (or i-klass)
2893 Register index, // itable index, MethodType, etc.
2894 Register recv, // if caller wants to see it
2895 Register flags // if caller wants to test it
2896 ) {
2897 // determine flags
2898 const Bytecodes::Code code = bytecode();
2899 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2900 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2901 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2902 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2903 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2904 const bool load_receiver = (recv != noreg);
2905 const bool save_flags = (flags != noreg);
2906 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2907 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2908 assert(flags == noreg || flags == rdx, "");
2909 assert(recv == noreg || recv == rcx, "");
2911 // setup registers & access constant pool cache
2912 if (recv == noreg) recv = rcx;
2913 if (flags == noreg) flags = rdx;
2914 assert_different_registers(method, index, recv, flags);
2916 // save 'interpreter return address'
2917 __ save_bcp();
2919 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2921 // maybe push appendix to arguments (just before return address)
2922 if (is_invokedynamic || is_invokehandle) {
2923 Label L_no_push;
2924 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2925 __ jcc(Assembler::zero, L_no_push);
2926 // Push the appendix as a trailing parameter.
2927 // This must be done before we get the receiver,
2928 // since the parameter_size includes it.
2929 __ push(rbx);
2930 __ mov(rbx, index);
2931 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2932 __ load_resolved_reference_at_index(index, rbx);
2933 __ pop(rbx);
2934 __ push(index); // push appendix (MethodType, CallSite, etc.)
2935 __ bind(L_no_push);
2936 }
2938 // load receiver if needed (after appendix is pushed so parameter size is correct)
2939 // Note: no return address pushed yet
2940 if (load_receiver) {
2941 __ movl(recv, flags);
2942 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2943 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2944 const int receiver_is_at_end = -1; // back off one slot to get receiver
2945 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2946 __ movptr(recv, recv_addr);
2947 __ verify_oop(recv);
2948 }
2950 if (save_flags) {
2951 __ movl(r13, flags);
2952 }
2954 // compute return type
2955 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2956 // Make sure we don't need to mask flags after the above shift
2957 ConstantPoolCacheEntry::verify_tos_state_shift();
2958 // load return address
2959 {
2960 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2961 (address)Interpreter::return_5_addrs_by_index_table() :
2962 (address)Interpreter::return_3_addrs_by_index_table();
2963 ExternalAddress table(table_addr);
2964 __ lea(rscratch1, table);
2965 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2966 }
2968 // push return address
2969 __ push(flags);
2971 // Restore flags value from the constant pool cache, and restore rsi
2972 // for later null checks. r13 is the bytecode pointer
2973 if (save_flags) {
2974 __ movl(flags, r13);
2975 __ restore_bcp();
2976 }
2977 }
2980 void TemplateTable::invokevirtual_helper(Register index,
2981 Register recv,
2982 Register flags) {
2983 // Uses temporary registers rax, rdx
2984 assert_different_registers(index, recv, rax, rdx);
2985 assert(index == rbx, "");
2986 assert(recv == rcx, "");
2988 // Test for an invoke of a final method
2989 Label notFinal;
2990 __ movl(rax, flags);
2991 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
2992 __ jcc(Assembler::zero, notFinal);
2994 const Register method = index; // method must be rbx
2995 assert(method == rbx,
2996 "Method* must be rbx for interpreter calling convention");
2998 // do the call - the index is actually the method to call
2999 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3001 // It's final, need a null check here!
3002 __ null_check(recv);
3004 // profile this call
3005 __ profile_final_call(rax);
3007 __ jump_from_interpreted(method, rax);
3009 __ bind(notFinal);
3011 // get receiver klass
3012 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3013 __ load_klass(rax, recv);
3015 // profile this call
3016 __ profile_virtual_call(rax, r14, rdx);
3018 // get target Method* & entry point
3019 __ lookup_virtual_method(rax, index, method);
3020 __ jump_from_interpreted(method, rdx);
3021 }
3024 void TemplateTable::invokevirtual(int byte_no) {
3025 transition(vtos, vtos);
3026 assert(byte_no == f2_byte, "use this argument");
3027 prepare_invoke(byte_no,
3028 rbx, // method or vtable index
3029 noreg, // unused itable index
3030 rcx, rdx); // recv, flags
3032 // rbx: index
3033 // rcx: receiver
3034 // rdx: flags
3036 invokevirtual_helper(rbx, rcx, rdx);
3037 }
3040 void TemplateTable::invokespecial(int byte_no) {
3041 transition(vtos, vtos);
3042 assert(byte_no == f1_byte, "use this argument");
3043 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3044 rcx); // get receiver also for null check
3045 __ verify_oop(rcx);
3046 __ null_check(rcx);
3047 // do the call
3048 __ profile_call(rax);
3049 __ jump_from_interpreted(rbx, rax);
3050 }
3053 void TemplateTable::invokestatic(int byte_no) {
3054 transition(vtos, vtos);
3055 assert(byte_no == f1_byte, "use this argument");
3056 prepare_invoke(byte_no, rbx); // get f1 Method*
3057 // do the call
3058 __ profile_call(rax);
3059 __ jump_from_interpreted(rbx, rax);
3060 }
3062 void TemplateTable::fast_invokevfinal(int byte_no) {
3063 transition(vtos, vtos);
3064 assert(byte_no == f2_byte, "use this argument");
3065 __ stop("fast_invokevfinal not used on amd64");
3066 }
3068 void TemplateTable::invokeinterface(int byte_no) {
3069 transition(vtos, vtos);
3070 assert(byte_no == f1_byte, "use this argument");
3071 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3072 rcx, rdx); // recv, flags
3074 // rax: interface klass (from f1)
3075 // rbx: itable index (from f2)
3076 // rcx: receiver
3077 // rdx: flags
3079 // Special case of invokeinterface called for virtual method of
3080 // java.lang.Object. See cpCacheOop.cpp for details.
3081 // This code isn't produced by javac, but could be produced by
3082 // another compliant java compiler.
3083 Label notMethod;
3084 __ movl(r14, rdx);
3085 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3086 __ jcc(Assembler::zero, notMethod);
3088 invokevirtual_helper(rbx, rcx, rdx);
3089 __ bind(notMethod);
3091 // Get receiver klass into rdx - also a null check
3092 __ restore_locals(); // restore r14
3093 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3094 __ load_klass(rdx, rcx);
3096 // profile this call
3097 __ profile_virtual_call(rdx, r13, r14);
3099 Label no_such_interface, no_such_method;
3101 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3102 rdx, rax, rbx,
3103 // outputs: method, scan temp. reg
3104 rbx, r13,
3105 no_such_interface);
3107 // rbx: Method* to call
3108 // rcx: receiver
3109 // Check for abstract method error
3110 // Note: This should be done more efficiently via a throw_abstract_method_error
3111 // interpreter entry point and a conditional jump to it in case of a null
3112 // method.
3113 __ testptr(rbx, rbx);
3114 __ jcc(Assembler::zero, no_such_method);
3116 // do the call
3117 // rcx: receiver
3118 // rbx,: Method*
3119 __ jump_from_interpreted(rbx, rdx);
3120 __ should_not_reach_here();
3122 // exception handling code follows...
3123 // note: must restore interpreter registers to canonical
3124 // state for exception handling to work correctly!
3126 __ bind(no_such_method);
3127 // throw exception
3128 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3129 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3130 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3131 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3132 // the call_VM checks for exception, so we should never return here.
3133 __ should_not_reach_here();
3135 __ bind(no_such_interface);
3136 // throw exception
3137 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3138 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3139 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3140 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3141 InterpreterRuntime::throw_IncompatibleClassChangeError));
3142 // the call_VM checks for exception, so we should never return here.
3143 __ should_not_reach_here();
3144 }
3147 void TemplateTable::invokehandle(int byte_no) {
3148 transition(vtos, vtos);
3149 assert(byte_no == f1_byte, "use this argument");
3150 const Register rbx_method = rbx;
3151 const Register rax_mtype = rax;
3152 const Register rcx_recv = rcx;
3153 const Register rdx_flags = rdx;
3155 if (!EnableInvokeDynamic) {
3156 // rewriter does not generate this bytecode
3157 __ should_not_reach_here();
3158 return;
3159 }
3161 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3162 __ verify_method_ptr(rbx_method);
3163 __ verify_oop(rcx_recv);
3164 __ null_check(rcx_recv);
3166 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3167 // rbx: MH.invokeExact_MT method (from f2)
3169 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3171 // FIXME: profile the LambdaForm also
3172 __ profile_final_call(rax);
3174 __ jump_from_interpreted(rbx_method, rdx);
3175 }
3178 void TemplateTable::invokedynamic(int byte_no) {
3179 transition(vtos, vtos);
3180 assert(byte_no == f1_byte, "use this argument");
3182 if (!EnableInvokeDynamic) {
3183 // We should not encounter this bytecode if !EnableInvokeDynamic.
3184 // The verifier will stop it. However, if we get past the verifier,
3185 // this will stop the thread in a reasonable way, without crashing the JVM.
3186 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3187 InterpreterRuntime::throw_IncompatibleClassChangeError));
3188 // the call_VM checks for exception, so we should never return here.
3189 __ should_not_reach_here();
3190 return;
3191 }
3193 const Register rbx_method = rbx;
3194 const Register rax_callsite = rax;
3196 prepare_invoke(byte_no, rbx_method, rax_callsite);
3198 // rax: CallSite object (from cpool->resolved_references[f1])
3199 // rbx: MH.linkToCallSite method (from f2)
3201 // Note: rax_callsite is already pushed by prepare_invoke
3203 // %%% should make a type profile for any invokedynamic that takes a ref argument
3204 // profile this call
3205 __ profile_call(r13);
3207 __ verify_oop(rax_callsite);
3209 __ jump_from_interpreted(rbx_method, rdx);
3210 }
3213 //-----------------------------------------------------------------------------
3214 // Allocation
3216 void TemplateTable::_new() {
3217 transition(vtos, atos);
3218 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3219 Label slow_case;
3220 Label done;
3221 Label initialize_header;
3222 Label initialize_object; // including clearing the fields
3223 Label allocate_shared;
3225 __ get_cpool_and_tags(rsi, rax);
3226 // Make sure the class we're about to instantiate has been resolved.
3227 // This is done before loading InstanceKlass to be consistent with the order
3228 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3229 const int tags_offset = Array<u1>::base_offset_in_bytes();
3230 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3231 JVM_CONSTANT_Class);
3232 __ jcc(Assembler::notEqual, slow_case);
3234 // get InstanceKlass
3235 __ movptr(rsi, Address(rsi, rdx,
3236 Address::times_8, sizeof(ConstantPool)));
3238 // make sure klass is initialized & doesn't have finalizer
3239 // make sure klass is fully initialized
3240 __ cmpb(Address(rsi,
3241 InstanceKlass::init_state_offset()),
3242 InstanceKlass::fully_initialized);
3243 __ jcc(Assembler::notEqual, slow_case);
3245 // get instance_size in InstanceKlass (scaled to a count of bytes)
3246 __ movl(rdx,
3247 Address(rsi,
3248 Klass::layout_helper_offset()));
3249 // test to see if it has a finalizer or is malformed in some way
3250 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3251 __ jcc(Assembler::notZero, slow_case);
3253 // Allocate the instance
3254 // 1) Try to allocate in the TLAB
3255 // 2) if fail and the object is large allocate in the shared Eden
3256 // 3) if the above fails (or is not applicable), go to a slow case
3257 // (creates a new TLAB, etc.)
3259 const bool allow_shared_alloc =
3260 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3262 if (UseTLAB) {
3263 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3264 __ lea(rbx, Address(rax, rdx, Address::times_1));
3265 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3266 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3267 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3268 if (ZeroTLAB) {
3269 // the fields have been already cleared
3270 __ jmp(initialize_header);
3271 } else {
3272 // initialize both the header and fields
3273 __ jmp(initialize_object);
3274 }
3275 }
3277 // Allocation in the shared Eden, if allowed.
3278 //
3279 // rdx: instance size in bytes
3280 if (allow_shared_alloc) {
3281 __ bind(allocate_shared);
3283 ExternalAddress top((address)Universe::heap()->top_addr());
3284 ExternalAddress end((address)Universe::heap()->end_addr());
3286 const Register RtopAddr = rscratch1;
3287 const Register RendAddr = rscratch2;
3289 __ lea(RtopAddr, top);
3290 __ lea(RendAddr, end);
3291 __ movptr(rax, Address(RtopAddr, 0));
3293 // For retries rax gets set by cmpxchgq
3294 Label retry;
3295 __ bind(retry);
3296 __ lea(rbx, Address(rax, rdx, Address::times_1));
3297 __ cmpptr(rbx, Address(RendAddr, 0));
3298 __ jcc(Assembler::above, slow_case);
3300 // Compare rax with the top addr, and if still equal, store the new
3301 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3302 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3303 //
3304 // rax: object begin
3305 // rbx: object end
3306 // rdx: instance size in bytes
3307 if (os::is_MP()) {
3308 __ lock();
3309 }
3310 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3312 // if someone beat us on the allocation, try again, otherwise continue
3313 __ jcc(Assembler::notEqual, retry);
3315 __ incr_allocated_bytes(r15_thread, rdx, 0);
3316 }
3318 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3319 // The object is initialized before the header. If the object size is
3320 // zero, go directly to the header initialization.
3321 __ bind(initialize_object);
3322 __ decrementl(rdx, sizeof(oopDesc));
3323 __ jcc(Assembler::zero, initialize_header);
3325 // Initialize object fields
3326 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3327 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3328 {
3329 Label loop;
3330 __ bind(loop);
3331 __ movq(Address(rax, rdx, Address::times_8,
3332 sizeof(oopDesc) - oopSize),
3333 rcx);
3334 __ decrementl(rdx);
3335 __ jcc(Assembler::notZero, loop);
3336 }
3338 // initialize object header only.
3339 __ bind(initialize_header);
3340 if (UseBiasedLocking) {
3341 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
3342 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3343 } else {
3344 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3345 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3346 }
3347 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3348 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3349 __ store_klass(rax, rsi); // store klass last
3351 {
3352 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3353 // Trigger dtrace event for fastpath
3354 __ push(atos); // save the return value
3355 __ call_VM_leaf(
3356 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3357 __ pop(atos); // restore the return value
3359 }
3360 __ jmp(done);
3361 }
3364 // slow case
3365 __ bind(slow_case);
3366 __ get_constant_pool(c_rarg1);
3367 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3368 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3369 __ verify_oop(rax);
3371 // continue
3372 __ bind(done);
3373 }
3375 void TemplateTable::newarray() {
3376 transition(itos, atos);
3377 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3378 __ movl(c_rarg2, rax);
3379 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3380 c_rarg1, c_rarg2);
3381 }
3383 void TemplateTable::anewarray() {
3384 transition(itos, atos);
3385 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3386 __ get_constant_pool(c_rarg1);
3387 __ movl(c_rarg3, rax);
3388 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3389 c_rarg1, c_rarg2, c_rarg3);
3390 }
3392 void TemplateTable::arraylength() {
3393 transition(atos, itos);
3394 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3395 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3396 }
3398 void TemplateTable::checkcast() {
3399 transition(atos, atos);
3400 Label done, is_null, ok_is_subtype, quicked, resolved;
3401 __ testptr(rax, rax); // object is in rax
3402 __ jcc(Assembler::zero, is_null);
3404 // Get cpool & tags index
3405 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3406 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3407 // See if bytecode has already been quicked
3408 __ cmpb(Address(rdx, rbx,
3409 Address::times_1,
3410 Array<u1>::base_offset_in_bytes()),
3411 JVM_CONSTANT_Class);
3412 __ jcc(Assembler::equal, quicked);
3413 __ push(atos); // save receiver for result, and for GC
3414 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3415 // vm_result_2 has metadata result
3416 __ get_vm_result_2(rax, r15_thread);
3417 __ pop_ptr(rdx); // restore receiver
3418 __ jmpb(resolved);
3420 // Get superklass in rax and subklass in rbx
3421 __ bind(quicked);
3422 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3423 __ movptr(rax, Address(rcx, rbx,
3424 Address::times_8, sizeof(ConstantPool)));
3426 __ bind(resolved);
3427 __ load_klass(rbx, rdx);
3429 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3430 // Superklass in rax. Subklass in rbx.
3431 __ gen_subtype_check(rbx, ok_is_subtype);
3433 // Come here on failure
3434 __ push_ptr(rdx);
3435 // object is at TOS
3436 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3438 // Come here on success
3439 __ bind(ok_is_subtype);
3440 __ mov(rax, rdx); // Restore object in rdx
3442 // Collect counts on whether this check-cast sees NULLs a lot or not.
3443 if (ProfileInterpreter) {
3444 __ jmp(done);
3445 __ bind(is_null);
3446 __ profile_null_seen(rcx);
3447 } else {
3448 __ bind(is_null); // same as 'done'
3449 }
3450 __ bind(done);
3451 }
3453 void TemplateTable::instanceof() {
3454 transition(atos, itos);
3455 Label done, is_null, ok_is_subtype, quicked, resolved;
3456 __ testptr(rax, rax);
3457 __ jcc(Assembler::zero, is_null);
3459 // Get cpool & tags index
3460 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3461 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3462 // See if bytecode has already been quicked
3463 __ cmpb(Address(rdx, rbx,
3464 Address::times_1,
3465 Array<u1>::base_offset_in_bytes()),
3466 JVM_CONSTANT_Class);
3467 __ jcc(Assembler::equal, quicked);
3469 __ push(atos); // save receiver for result, and for GC
3470 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3471 // vm_result_2 has metadata result
3472 __ get_vm_result_2(rax, r15_thread);
3473 __ pop_ptr(rdx); // restore receiver
3474 __ verify_oop(rdx);
3475 __ load_klass(rdx, rdx);
3476 __ jmpb(resolved);
3478 // Get superklass in rax and subklass in rdx
3479 __ bind(quicked);
3480 __ load_klass(rdx, rax);
3481 __ movptr(rax, Address(rcx, rbx,
3482 Address::times_8, sizeof(ConstantPool)));
3484 __ bind(resolved);
3486 // Generate subtype check. Blows rcx, rdi
3487 // Superklass in rax. Subklass in rdx.
3488 __ gen_subtype_check(rdx, ok_is_subtype);
3490 // Come here on failure
3491 __ xorl(rax, rax);
3492 __ jmpb(done);
3493 // Come here on success
3494 __ bind(ok_is_subtype);
3495 __ movl(rax, 1);
3497 // Collect counts on whether this test sees NULLs a lot or not.
3498 if (ProfileInterpreter) {
3499 __ jmp(done);
3500 __ bind(is_null);
3501 __ profile_null_seen(rcx);
3502 } else {
3503 __ bind(is_null); // same as 'done'
3504 }
3505 __ bind(done);
3506 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3507 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3508 }
3510 //-----------------------------------------------------------------------------
3511 // Breakpoints
3512 void TemplateTable::_breakpoint() {
3513 // Note: We get here even if we are single stepping..
3514 // jbug inists on setting breakpoints at every bytecode
3515 // even if we are in single step mode.
3517 transition(vtos, vtos);
3519 // get the unpatched byte code
3520 __ get_method(c_rarg1);
3521 __ call_VM(noreg,
3522 CAST_FROM_FN_PTR(address,
3523 InterpreterRuntime::get_original_bytecode_at),
3524 c_rarg1, r13);
3525 __ mov(rbx, rax);
3527 // post the breakpoint event
3528 __ get_method(c_rarg1);
3529 __ call_VM(noreg,
3530 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3531 c_rarg1, r13);
3533 // complete the execution of original bytecode
3534 __ dispatch_only_normal(vtos);
3535 }
3537 //-----------------------------------------------------------------------------
3538 // Exceptions
3540 void TemplateTable::athrow() {
3541 transition(atos, vtos);
3542 __ null_check(rax);
3543 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3544 }
3546 //-----------------------------------------------------------------------------
3547 // Synchronization
3548 //
3549 // Note: monitorenter & exit are symmetric routines; which is reflected
3550 // in the assembly code structure as well
3551 //
3552 // Stack layout:
3553 //
3554 // [expressions ] <--- rsp = expression stack top
3555 // ..
3556 // [expressions ]
3557 // [monitor entry] <--- monitor block top = expression stack bot
3558 // ..
3559 // [monitor entry]
3560 // [frame data ] <--- monitor block bot
3561 // ...
3562 // [saved rbp ] <--- rbp
3563 void TemplateTable::monitorenter() {
3564 transition(atos, vtos);
3566 // check for NULL object
3567 __ null_check(rax);
3569 const Address monitor_block_top(
3570 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3571 const Address monitor_block_bot(
3572 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3573 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3575 Label allocated;
3577 // initialize entry pointer
3578 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3580 // find a free slot in the monitor block (result in c_rarg1)
3581 {
3582 Label entry, loop, exit;
3583 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3584 // starting with top-most entry
3585 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3586 // of monitor block
3587 __ jmpb(entry);
3589 __ bind(loop);
3590 // check if current entry is used
3591 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3592 // if not used then remember entry in c_rarg1
3593 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3594 // check if current entry is for same object
3595 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3596 // if same object then stop searching
3597 __ jccb(Assembler::equal, exit);
3598 // otherwise advance to next entry
3599 __ addptr(c_rarg3, entry_size);
3600 __ bind(entry);
3601 // check if bottom reached
3602 __ cmpptr(c_rarg3, c_rarg2);
3603 // if not at bottom then check this entry
3604 __ jcc(Assembler::notEqual, loop);
3605 __ bind(exit);
3606 }
3608 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3609 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3611 // allocate one if there's no free slot
3612 {
3613 Label entry, loop;
3614 // 1. compute new pointers // rsp: old expression stack top
3615 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3616 __ subptr(rsp, entry_size); // move expression stack top
3617 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3618 __ mov(c_rarg3, rsp); // set start value for copy loop
3619 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3620 __ jmp(entry);
3621 // 2. move expression stack contents
3622 __ bind(loop);
3623 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3624 // word from old location
3625 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3626 __ addptr(c_rarg3, wordSize); // advance to next word
3627 __ bind(entry);
3628 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3629 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3630 // copy next word
3631 }
3633 // call run-time routine
3634 // c_rarg1: points to monitor entry
3635 __ bind(allocated);
3637 // Increment bcp to point to the next bytecode, so exception
3638 // handling for async. exceptions work correctly.
3639 // The object has already been poped from the stack, so the
3640 // expression stack looks correct.
3641 __ increment(r13);
3643 // store object
3644 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3645 __ lock_object(c_rarg1);
3647 // check to make sure this monitor doesn't cause stack overflow after locking
3648 __ save_bcp(); // in case of exception
3649 __ generate_stack_overflow_check(0);
3651 // The bcp has already been incremented. Just need to dispatch to
3652 // next instruction.
3653 __ dispatch_next(vtos);
3654 }
3657 void TemplateTable::monitorexit() {
3658 transition(atos, vtos);
3660 // check for NULL object
3661 __ null_check(rax);
3663 const Address monitor_block_top(
3664 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3665 const Address monitor_block_bot(
3666 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3667 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3669 Label found;
3671 // find matching slot
3672 {
3673 Label entry, loop;
3674 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3675 // starting with top-most entry
3676 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3677 // of monitor block
3678 __ jmpb(entry);
3680 __ bind(loop);
3681 // check if current entry is for same object
3682 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3683 // if same object then stop searching
3684 __ jcc(Assembler::equal, found);
3685 // otherwise advance to next entry
3686 __ addptr(c_rarg1, entry_size);
3687 __ bind(entry);
3688 // check if bottom reached
3689 __ cmpptr(c_rarg1, c_rarg2);
3690 // if not at bottom then check this entry
3691 __ jcc(Assembler::notEqual, loop);
3692 }
3694 // error handling. Unlocking was not block-structured
3695 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3696 InterpreterRuntime::throw_illegal_monitor_state_exception));
3697 __ should_not_reach_here();
3699 // call run-time routine
3700 // rsi: points to monitor entry
3701 __ bind(found);
3702 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3703 __ unlock_object(c_rarg1);
3704 __ pop_ptr(rax); // discard object
3705 }
3708 // Wide instructions
3709 void TemplateTable::wide() {
3710 transition(vtos, vtos);
3711 __ load_unsigned_byte(rbx, at_bcp(1));
3712 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3713 __ jmp(Address(rscratch1, rbx, Address::times_8));
3714 // Note: the r13 increment step is part of the individual wide
3715 // bytecode implementations
3716 }
3719 // Multi arrays
3720 void TemplateTable::multianewarray() {
3721 transition(vtos, atos);
3722 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3723 // last dim is on top of stack; we want address of first one:
3724 // first_addr = last_addr + (ndims - 1) * wordSize
3725 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3726 call_VM(rax,
3727 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3728 c_rarg1);
3729 __ load_unsigned_byte(rbx, at_bcp(3));
3730 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3731 }
3732 #endif // !CC_INTERP