Thu, 14 Apr 2011 13:45:41 -0700
Merge
1 /*
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodDataOop.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
38 #ifndef CC_INTERP
40 #define __ _masm->
42 // Platform-dependent initialization
44 void TemplateTable::pd_initialize() {
45 // No amd64 specific initialization
46 }
48 // Address computation: local variables
50 static inline Address iaddress(int n) {
51 return Address(r14, Interpreter::local_offset_in_bytes(n));
52 }
54 static inline Address laddress(int n) {
55 return iaddress(n + 1);
56 }
58 static inline Address faddress(int n) {
59 return iaddress(n);
60 }
62 static inline Address daddress(int n) {
63 return laddress(n);
64 }
66 static inline Address aaddress(int n) {
67 return iaddress(n);
68 }
70 static inline Address iaddress(Register r) {
71 return Address(r14, r, Address::times_8);
72 }
74 static inline Address laddress(Register r) {
75 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
76 }
78 static inline Address faddress(Register r) {
79 return iaddress(r);
80 }
82 static inline Address daddress(Register r) {
83 return laddress(r);
84 }
86 static inline Address aaddress(Register r) {
87 return iaddress(r);
88 }
90 static inline Address at_rsp() {
91 return Address(rsp, 0);
92 }
94 // At top of Java expression stack which may be different than esp(). It
95 // isn't for category 1 objects.
96 static inline Address at_tos () {
97 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
98 }
100 static inline Address at_tos_p1() {
101 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
102 }
104 static inline Address at_tos_p2() {
105 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
106 }
108 static inline Address at_tos_p3() {
109 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
110 }
112 // Condition conversion
113 static Assembler::Condition j_not(TemplateTable::Condition cc) {
114 switch (cc) {
115 case TemplateTable::equal : return Assembler::notEqual;
116 case TemplateTable::not_equal : return Assembler::equal;
117 case TemplateTable::less : return Assembler::greaterEqual;
118 case TemplateTable::less_equal : return Assembler::greater;
119 case TemplateTable::greater : return Assembler::lessEqual;
120 case TemplateTable::greater_equal: return Assembler::less;
121 }
122 ShouldNotReachHere();
123 return Assembler::zero;
124 }
127 // Miscelaneous helper routines
128 // Store an oop (or NULL) at the address described by obj.
129 // If val == noreg this means store a NULL
131 static void do_oop_store(InterpreterMacroAssembler* _masm,
132 Address obj,
133 Register val,
134 BarrierSet::Name barrier,
135 bool precise) {
136 assert(val == noreg || val == rax, "parameter is just for looks");
137 switch (barrier) {
138 #ifndef SERIALGC
139 case BarrierSet::G1SATBCT:
140 case BarrierSet::G1SATBCTLogging:
141 {
142 // flatten object address if needed
143 if (obj.index() == noreg && obj.disp() == 0) {
144 if (obj.base() != rdx) {
145 __ movq(rdx, obj.base());
146 }
147 } else {
148 __ leaq(rdx, obj);
149 }
150 __ g1_write_barrier_pre(rdx /* obj */,
151 rbx /* pre_val */,
152 r15_thread /* thread */,
153 r8 /* tmp */,
154 val != noreg /* tosca_live */,
155 false /* expand_call */);
156 if (val == noreg) {
157 __ store_heap_oop_null(Address(rdx, 0));
158 } else {
159 __ store_heap_oop(Address(rdx, 0), val);
160 __ g1_write_barrier_post(rdx /* store_adr */,
161 val /* new_val */,
162 r15_thread /* thread */,
163 r8 /* tmp */,
164 rbx /* tmp2 */);
165 }
167 }
168 break;
169 #endif // SERIALGC
170 case BarrierSet::CardTableModRef:
171 case BarrierSet::CardTableExtension:
172 {
173 if (val == noreg) {
174 __ store_heap_oop_null(obj);
175 } else {
176 __ store_heap_oop(obj, val);
177 // flatten object address if needed
178 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
179 __ store_check(obj.base());
180 } else {
181 __ leaq(rdx, obj);
182 __ store_check(rdx);
183 }
184 }
185 }
186 break;
187 case BarrierSet::ModRef:
188 case BarrierSet::Other:
189 if (val == noreg) {
190 __ store_heap_oop_null(obj);
191 } else {
192 __ store_heap_oop(obj, val);
193 }
194 break;
195 default :
196 ShouldNotReachHere();
198 }
199 }
201 Address TemplateTable::at_bcp(int offset) {
202 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
203 return Address(r13, offset);
204 }
206 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
207 Register scratch,
208 bool load_bc_into_scratch/*=true*/) {
209 if (!RewriteBytecodes) {
210 return;
211 }
212 // the pair bytecodes have already done the load.
213 if (load_bc_into_scratch) {
214 __ movl(bc, bytecode);
215 }
216 Label patch_done;
217 if (JvmtiExport::can_post_breakpoint()) {
218 Label fast_patch;
219 // if a breakpoint is present we can't rewrite the stream directly
220 __ movzbl(scratch, at_bcp(0));
221 __ cmpl(scratch, Bytecodes::_breakpoint);
222 __ jcc(Assembler::notEqual, fast_patch);
223 __ get_method(scratch);
224 // Let breakpoint table handling rewrite to quicker bytecode
225 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
226 #ifndef ASSERT
227 __ jmpb(patch_done);
228 #else
229 __ jmp(patch_done);
230 #endif
231 __ bind(fast_patch);
232 }
233 #ifdef ASSERT
234 Label okay;
235 __ load_unsigned_byte(scratch, at_bcp(0));
236 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
237 __ jcc(Assembler::equal, okay);
238 __ cmpl(scratch, bc);
239 __ jcc(Assembler::equal, okay);
240 __ stop("patching the wrong bytecode");
241 __ bind(okay);
242 #endif
243 // patch bytecode
244 __ movb(at_bcp(0), bc);
245 __ bind(patch_done);
246 }
249 // Individual instructions
251 void TemplateTable::nop() {
252 transition(vtos, vtos);
253 // nothing to do
254 }
256 void TemplateTable::shouldnotreachhere() {
257 transition(vtos, vtos);
258 __ stop("shouldnotreachhere bytecode");
259 }
261 void TemplateTable::aconst_null() {
262 transition(vtos, atos);
263 __ xorl(rax, rax);
264 }
266 void TemplateTable::iconst(int value) {
267 transition(vtos, itos);
268 if (value == 0) {
269 __ xorl(rax, rax);
270 } else {
271 __ movl(rax, value);
272 }
273 }
275 void TemplateTable::lconst(int value) {
276 transition(vtos, ltos);
277 if (value == 0) {
278 __ xorl(rax, rax);
279 } else {
280 __ movl(rax, value);
281 }
282 }
284 void TemplateTable::fconst(int value) {
285 transition(vtos, ftos);
286 static float one = 1.0f, two = 2.0f;
287 switch (value) {
288 case 0:
289 __ xorps(xmm0, xmm0);
290 break;
291 case 1:
292 __ movflt(xmm0, ExternalAddress((address) &one));
293 break;
294 case 2:
295 __ movflt(xmm0, ExternalAddress((address) &two));
296 break;
297 default:
298 ShouldNotReachHere();
299 break;
300 }
301 }
303 void TemplateTable::dconst(int value) {
304 transition(vtos, dtos);
305 static double one = 1.0;
306 switch (value) {
307 case 0:
308 __ xorpd(xmm0, xmm0);
309 break;
310 case 1:
311 __ movdbl(xmm0, ExternalAddress((address) &one));
312 break;
313 default:
314 ShouldNotReachHere();
315 break;
316 }
317 }
319 void TemplateTable::bipush() {
320 transition(vtos, itos);
321 __ load_signed_byte(rax, at_bcp(1));
322 }
324 void TemplateTable::sipush() {
325 transition(vtos, itos);
326 __ load_unsigned_short(rax, at_bcp(1));
327 __ bswapl(rax);
328 __ sarl(rax, 16);
329 }
331 void TemplateTable::ldc(bool wide) {
332 transition(vtos, vtos);
333 Label call_ldc, notFloat, notClass, Done;
335 if (wide) {
336 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
337 } else {
338 __ load_unsigned_byte(rbx, at_bcp(1));
339 }
341 __ get_cpool_and_tags(rcx, rax);
342 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
343 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
345 // get type
346 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
348 // unresolved string - get the resolved string
349 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
350 __ jccb(Assembler::equal, call_ldc);
352 // unresolved class - get the resolved class
353 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
354 __ jccb(Assembler::equal, call_ldc);
356 // unresolved class in error state - call into runtime to throw the error
357 // from the first resolution attempt
358 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
359 __ jccb(Assembler::equal, call_ldc);
361 // resolved class - need to call vm to get java mirror of the class
362 __ cmpl(rdx, JVM_CONSTANT_Class);
363 __ jcc(Assembler::notEqual, notClass);
365 __ bind(call_ldc);
366 __ movl(c_rarg1, wide);
367 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
368 __ push_ptr(rax);
369 __ verify_oop(rax);
370 __ jmp(Done);
372 __ bind(notClass);
373 __ cmpl(rdx, JVM_CONSTANT_Float);
374 __ jccb(Assembler::notEqual, notFloat);
375 // ftos
376 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
377 __ push_f();
378 __ jmp(Done);
380 __ bind(notFloat);
381 #ifdef ASSERT
382 {
383 Label L;
384 __ cmpl(rdx, JVM_CONSTANT_Integer);
385 __ jcc(Assembler::equal, L);
386 __ cmpl(rdx, JVM_CONSTANT_String);
387 __ jcc(Assembler::equal, L);
388 __ stop("unexpected tag type in ldc");
389 __ bind(L);
390 }
391 #endif
392 // atos and itos
393 Label isOop;
394 __ cmpl(rdx, JVM_CONSTANT_Integer);
395 __ jcc(Assembler::notEqual, isOop);
396 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
397 __ push_i(rax);
398 __ jmp(Done);
400 __ bind(isOop);
401 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
402 __ push_ptr(rax);
404 if (VerifyOops) {
405 __ verify_oop(rax);
406 }
408 __ bind(Done);
409 }
411 // Fast path for caching oop constants.
412 // %%% We should use this to handle Class and String constants also.
413 // %%% It will simplify the ldc/primitive path considerably.
414 void TemplateTable::fast_aldc(bool wide) {
415 transition(vtos, atos);
417 if (!EnableInvokeDynamic) {
418 // We should not encounter this bytecode if !EnableInvokeDynamic.
419 // The verifier will stop it. However, if we get past the verifier,
420 // this will stop the thread in a reasonable way, without crashing the JVM.
421 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
422 InterpreterRuntime::throw_IncompatibleClassChangeError));
423 // the call_VM checks for exception, so we should never return here.
424 __ should_not_reach_here();
425 return;
426 }
428 const Register cache = rcx;
429 const Register index = rdx;
431 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
432 if (VerifyOops) {
433 __ verify_oop(rax);
434 }
436 Label L_done, L_throw_exception;
437 const Register con_klass_temp = rcx; // same as cache
438 const Register array_klass_temp = rdx; // same as index
439 __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
440 __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
441 __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
442 __ jcc(Assembler::notEqual, L_done);
443 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
444 __ jcc(Assembler::notEqual, L_throw_exception);
445 __ xorptr(rax, rax);
446 __ jmp(L_done);
448 // Load the exception from the system-array which wraps it:
449 __ bind(L_throw_exception);
450 __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
451 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
453 __ bind(L_done);
454 }
456 void TemplateTable::ldc2_w() {
457 transition(vtos, vtos);
458 Label Long, Done;
459 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
461 __ get_cpool_and_tags(rcx, rax);
462 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
463 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
465 // get type
466 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
467 JVM_CONSTANT_Double);
468 __ jccb(Assembler::notEqual, Long);
469 // dtos
470 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
471 __ push_d();
472 __ jmpb(Done);
474 __ bind(Long);
475 // ltos
476 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
477 __ push_l();
479 __ bind(Done);
480 }
482 void TemplateTable::locals_index(Register reg, int offset) {
483 __ load_unsigned_byte(reg, at_bcp(offset));
484 __ negptr(reg);
485 }
487 void TemplateTable::iload() {
488 transition(vtos, itos);
489 if (RewriteFrequentPairs) {
490 Label rewrite, done;
491 const Register bc = c_rarg3;
492 assert(rbx != bc, "register damaged");
494 // get next byte
495 __ load_unsigned_byte(rbx,
496 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
497 // if _iload, wait to rewrite to iload2. We only want to rewrite the
498 // last two iloads in a pair. Comparing against fast_iload means that
499 // the next bytecode is neither an iload or a caload, and therefore
500 // an iload pair.
501 __ cmpl(rbx, Bytecodes::_iload);
502 __ jcc(Assembler::equal, done);
504 __ cmpl(rbx, Bytecodes::_fast_iload);
505 __ movl(bc, Bytecodes::_fast_iload2);
506 __ jccb(Assembler::equal, rewrite);
508 // if _caload, rewrite to fast_icaload
509 __ cmpl(rbx, Bytecodes::_caload);
510 __ movl(bc, Bytecodes::_fast_icaload);
511 __ jccb(Assembler::equal, rewrite);
513 // rewrite so iload doesn't check again.
514 __ movl(bc, Bytecodes::_fast_iload);
516 // rewrite
517 // bc: fast bytecode
518 __ bind(rewrite);
519 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
520 __ bind(done);
521 }
523 // Get the local value into tos
524 locals_index(rbx);
525 __ movl(rax, iaddress(rbx));
526 }
528 void TemplateTable::fast_iload2() {
529 transition(vtos, itos);
530 locals_index(rbx);
531 __ movl(rax, iaddress(rbx));
532 __ push(itos);
533 locals_index(rbx, 3);
534 __ movl(rax, iaddress(rbx));
535 }
537 void TemplateTable::fast_iload() {
538 transition(vtos, itos);
539 locals_index(rbx);
540 __ movl(rax, iaddress(rbx));
541 }
543 void TemplateTable::lload() {
544 transition(vtos, ltos);
545 locals_index(rbx);
546 __ movq(rax, laddress(rbx));
547 }
549 void TemplateTable::fload() {
550 transition(vtos, ftos);
551 locals_index(rbx);
552 __ movflt(xmm0, faddress(rbx));
553 }
555 void TemplateTable::dload() {
556 transition(vtos, dtos);
557 locals_index(rbx);
558 __ movdbl(xmm0, daddress(rbx));
559 }
561 void TemplateTable::aload() {
562 transition(vtos, atos);
563 locals_index(rbx);
564 __ movptr(rax, aaddress(rbx));
565 }
567 void TemplateTable::locals_index_wide(Register reg) {
568 __ movl(reg, at_bcp(2));
569 __ bswapl(reg);
570 __ shrl(reg, 16);
571 __ negptr(reg);
572 }
574 void TemplateTable::wide_iload() {
575 transition(vtos, itos);
576 locals_index_wide(rbx);
577 __ movl(rax, iaddress(rbx));
578 }
580 void TemplateTable::wide_lload() {
581 transition(vtos, ltos);
582 locals_index_wide(rbx);
583 __ movq(rax, laddress(rbx));
584 }
586 void TemplateTable::wide_fload() {
587 transition(vtos, ftos);
588 locals_index_wide(rbx);
589 __ movflt(xmm0, faddress(rbx));
590 }
592 void TemplateTable::wide_dload() {
593 transition(vtos, dtos);
594 locals_index_wide(rbx);
595 __ movdbl(xmm0, daddress(rbx));
596 }
598 void TemplateTable::wide_aload() {
599 transition(vtos, atos);
600 locals_index_wide(rbx);
601 __ movptr(rax, aaddress(rbx));
602 }
604 void TemplateTable::index_check(Register array, Register index) {
605 // destroys rbx
606 // check array
607 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
608 // sign extend index for use by indexed load
609 __ movl2ptr(index, index);
610 // check index
611 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
612 if (index != rbx) {
613 // ??? convention: move aberrant index into ebx for exception message
614 assert(rbx != array, "different registers");
615 __ movl(rbx, index);
616 }
617 __ jump_cc(Assembler::aboveEqual,
618 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
619 }
621 void TemplateTable::iaload() {
622 transition(itos, itos);
623 __ pop_ptr(rdx);
624 // eax: index
625 // rdx: array
626 index_check(rdx, rax); // kills rbx
627 __ movl(rax, Address(rdx, rax,
628 Address::times_4,
629 arrayOopDesc::base_offset_in_bytes(T_INT)));
630 }
632 void TemplateTable::laload() {
633 transition(itos, ltos);
634 __ pop_ptr(rdx);
635 // eax: index
636 // rdx: array
637 index_check(rdx, rax); // kills rbx
638 __ movq(rax, Address(rdx, rbx,
639 Address::times_8,
640 arrayOopDesc::base_offset_in_bytes(T_LONG)));
641 }
643 void TemplateTable::faload() {
644 transition(itos, ftos);
645 __ pop_ptr(rdx);
646 // eax: index
647 // rdx: array
648 index_check(rdx, rax); // kills rbx
649 __ movflt(xmm0, Address(rdx, rax,
650 Address::times_4,
651 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
652 }
654 void TemplateTable::daload() {
655 transition(itos, dtos);
656 __ pop_ptr(rdx);
657 // eax: index
658 // rdx: array
659 index_check(rdx, rax); // kills rbx
660 __ movdbl(xmm0, Address(rdx, rax,
661 Address::times_8,
662 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
663 }
665 void TemplateTable::aaload() {
666 transition(itos, atos);
667 __ pop_ptr(rdx);
668 // eax: index
669 // rdx: array
670 index_check(rdx, rax); // kills rbx
671 __ load_heap_oop(rax, Address(rdx, rax,
672 UseCompressedOops ? Address::times_4 : Address::times_8,
673 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
674 }
676 void TemplateTable::baload() {
677 transition(itos, itos);
678 __ pop_ptr(rdx);
679 // eax: index
680 // rdx: array
681 index_check(rdx, rax); // kills rbx
682 __ load_signed_byte(rax,
683 Address(rdx, rax,
684 Address::times_1,
685 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
686 }
688 void TemplateTable::caload() {
689 transition(itos, itos);
690 __ pop_ptr(rdx);
691 // eax: index
692 // rdx: array
693 index_check(rdx, rax); // kills rbx
694 __ load_unsigned_short(rax,
695 Address(rdx, rax,
696 Address::times_2,
697 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
698 }
700 // iload followed by caload frequent pair
701 void TemplateTable::fast_icaload() {
702 transition(vtos, itos);
703 // load index out of locals
704 locals_index(rbx);
705 __ movl(rax, iaddress(rbx));
707 // eax: index
708 // rdx: array
709 __ pop_ptr(rdx);
710 index_check(rdx, rax); // kills rbx
711 __ load_unsigned_short(rax,
712 Address(rdx, rax,
713 Address::times_2,
714 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
715 }
717 void TemplateTable::saload() {
718 transition(itos, itos);
719 __ pop_ptr(rdx);
720 // eax: index
721 // rdx: array
722 index_check(rdx, rax); // kills rbx
723 __ load_signed_short(rax,
724 Address(rdx, rax,
725 Address::times_2,
726 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
727 }
729 void TemplateTable::iload(int n) {
730 transition(vtos, itos);
731 __ movl(rax, iaddress(n));
732 }
734 void TemplateTable::lload(int n) {
735 transition(vtos, ltos);
736 __ movq(rax, laddress(n));
737 }
739 void TemplateTable::fload(int n) {
740 transition(vtos, ftos);
741 __ movflt(xmm0, faddress(n));
742 }
744 void TemplateTable::dload(int n) {
745 transition(vtos, dtos);
746 __ movdbl(xmm0, daddress(n));
747 }
749 void TemplateTable::aload(int n) {
750 transition(vtos, atos);
751 __ movptr(rax, aaddress(n));
752 }
754 void TemplateTable::aload_0() {
755 transition(vtos, atos);
756 // According to bytecode histograms, the pairs:
757 //
758 // _aload_0, _fast_igetfield
759 // _aload_0, _fast_agetfield
760 // _aload_0, _fast_fgetfield
761 //
762 // occur frequently. If RewriteFrequentPairs is set, the (slow)
763 // _aload_0 bytecode checks if the next bytecode is either
764 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
765 // rewrites the current bytecode into a pair bytecode; otherwise it
766 // rewrites the current bytecode into _fast_aload_0 that doesn't do
767 // the pair check anymore.
768 //
769 // Note: If the next bytecode is _getfield, the rewrite must be
770 // delayed, otherwise we may miss an opportunity for a pair.
771 //
772 // Also rewrite frequent pairs
773 // aload_0, aload_1
774 // aload_0, iload_1
775 // These bytecodes with a small amount of code are most profitable
776 // to rewrite
777 if (RewriteFrequentPairs) {
778 Label rewrite, done;
779 const Register bc = c_rarg3;
780 assert(rbx != bc, "register damaged");
781 // get next byte
782 __ load_unsigned_byte(rbx,
783 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
785 // do actual aload_0
786 aload(0);
788 // if _getfield then wait with rewrite
789 __ cmpl(rbx, Bytecodes::_getfield);
790 __ jcc(Assembler::equal, done);
792 // if _igetfield then reqrite to _fast_iaccess_0
793 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
794 Bytecodes::_aload_0,
795 "fix bytecode definition");
796 __ cmpl(rbx, Bytecodes::_fast_igetfield);
797 __ movl(bc, Bytecodes::_fast_iaccess_0);
798 __ jccb(Assembler::equal, rewrite);
800 // if _agetfield then reqrite to _fast_aaccess_0
801 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
802 Bytecodes::_aload_0,
803 "fix bytecode definition");
804 __ cmpl(rbx, Bytecodes::_fast_agetfield);
805 __ movl(bc, Bytecodes::_fast_aaccess_0);
806 __ jccb(Assembler::equal, rewrite);
808 // if _fgetfield then reqrite to _fast_faccess_0
809 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
810 Bytecodes::_aload_0,
811 "fix bytecode definition");
812 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
813 __ movl(bc, Bytecodes::_fast_faccess_0);
814 __ jccb(Assembler::equal, rewrite);
816 // else rewrite to _fast_aload0
817 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
818 Bytecodes::_aload_0,
819 "fix bytecode definition");
820 __ movl(bc, Bytecodes::_fast_aload_0);
822 // rewrite
823 // bc: fast bytecode
824 __ bind(rewrite);
825 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
827 __ bind(done);
828 } else {
829 aload(0);
830 }
831 }
833 void TemplateTable::istore() {
834 transition(itos, vtos);
835 locals_index(rbx);
836 __ movl(iaddress(rbx), rax);
837 }
839 void TemplateTable::lstore() {
840 transition(ltos, vtos);
841 locals_index(rbx);
842 __ movq(laddress(rbx), rax);
843 }
845 void TemplateTable::fstore() {
846 transition(ftos, vtos);
847 locals_index(rbx);
848 __ movflt(faddress(rbx), xmm0);
849 }
851 void TemplateTable::dstore() {
852 transition(dtos, vtos);
853 locals_index(rbx);
854 __ movdbl(daddress(rbx), xmm0);
855 }
857 void TemplateTable::astore() {
858 transition(vtos, vtos);
859 __ pop_ptr(rax);
860 locals_index(rbx);
861 __ movptr(aaddress(rbx), rax);
862 }
864 void TemplateTable::wide_istore() {
865 transition(vtos, vtos);
866 __ pop_i();
867 locals_index_wide(rbx);
868 __ movl(iaddress(rbx), rax);
869 }
871 void TemplateTable::wide_lstore() {
872 transition(vtos, vtos);
873 __ pop_l();
874 locals_index_wide(rbx);
875 __ movq(laddress(rbx), rax);
876 }
878 void TemplateTable::wide_fstore() {
879 transition(vtos, vtos);
880 __ pop_f();
881 locals_index_wide(rbx);
882 __ movflt(faddress(rbx), xmm0);
883 }
885 void TemplateTable::wide_dstore() {
886 transition(vtos, vtos);
887 __ pop_d();
888 locals_index_wide(rbx);
889 __ movdbl(daddress(rbx), xmm0);
890 }
892 void TemplateTable::wide_astore() {
893 transition(vtos, vtos);
894 __ pop_ptr(rax);
895 locals_index_wide(rbx);
896 __ movptr(aaddress(rbx), rax);
897 }
899 void TemplateTable::iastore() {
900 transition(itos, vtos);
901 __ pop_i(rbx);
902 __ pop_ptr(rdx);
903 // eax: value
904 // ebx: index
905 // rdx: array
906 index_check(rdx, rbx); // prefer index in ebx
907 __ movl(Address(rdx, rbx,
908 Address::times_4,
909 arrayOopDesc::base_offset_in_bytes(T_INT)),
910 rax);
911 }
913 void TemplateTable::lastore() {
914 transition(ltos, vtos);
915 __ pop_i(rbx);
916 __ pop_ptr(rdx);
917 // rax: value
918 // ebx: index
919 // rdx: array
920 index_check(rdx, rbx); // prefer index in ebx
921 __ movq(Address(rdx, rbx,
922 Address::times_8,
923 arrayOopDesc::base_offset_in_bytes(T_LONG)),
924 rax);
925 }
927 void TemplateTable::fastore() {
928 transition(ftos, vtos);
929 __ pop_i(rbx);
930 __ pop_ptr(rdx);
931 // xmm0: value
932 // ebx: index
933 // rdx: array
934 index_check(rdx, rbx); // prefer index in ebx
935 __ movflt(Address(rdx, rbx,
936 Address::times_4,
937 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
938 xmm0);
939 }
941 void TemplateTable::dastore() {
942 transition(dtos, vtos);
943 __ pop_i(rbx);
944 __ pop_ptr(rdx);
945 // xmm0: value
946 // ebx: index
947 // rdx: array
948 index_check(rdx, rbx); // prefer index in ebx
949 __ movdbl(Address(rdx, rbx,
950 Address::times_8,
951 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
952 xmm0);
953 }
955 void TemplateTable::aastore() {
956 Label is_null, ok_is_subtype, done;
957 transition(vtos, vtos);
958 // stack: ..., array, index, value
959 __ movptr(rax, at_tos()); // value
960 __ movl(rcx, at_tos_p1()); // index
961 __ movptr(rdx, at_tos_p2()); // array
963 Address element_address(rdx, rcx,
964 UseCompressedOops? Address::times_4 : Address::times_8,
965 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
967 index_check(rdx, rcx); // kills rbx
968 // do array store check - check for NULL value first
969 __ testptr(rax, rax);
970 __ jcc(Assembler::zero, is_null);
972 // Move subklass into rbx
973 __ load_klass(rbx, rax);
974 // Move superklass into rax
975 __ load_klass(rax, rdx);
976 __ movptr(rax, Address(rax,
977 sizeof(oopDesc) +
978 objArrayKlass::element_klass_offset_in_bytes()));
979 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
980 __ lea(rdx, element_address);
982 // Generate subtype check. Blows rcx, rdi
983 // Superklass in rax. Subklass in rbx.
984 __ gen_subtype_check(rbx, ok_is_subtype);
986 // Come here on failure
987 // object is at TOS
988 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
990 // Come here on success
991 __ bind(ok_is_subtype);
993 // Get the value we will store
994 __ movptr(rax, at_tos());
995 // Now store using the appropriate barrier
996 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
997 __ jmp(done);
999 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1000 __ bind(is_null);
1001 __ profile_null_seen(rbx);
1003 // Store a NULL
1004 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1006 // Pop stack arguments
1007 __ bind(done);
1008 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1009 }
1011 void TemplateTable::bastore() {
1012 transition(itos, vtos);
1013 __ pop_i(rbx);
1014 __ pop_ptr(rdx);
1015 // eax: value
1016 // ebx: index
1017 // rdx: array
1018 index_check(rdx, rbx); // prefer index in ebx
1019 __ movb(Address(rdx, rbx,
1020 Address::times_1,
1021 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1022 rax);
1023 }
1025 void TemplateTable::castore() {
1026 transition(itos, vtos);
1027 __ pop_i(rbx);
1028 __ pop_ptr(rdx);
1029 // eax: value
1030 // ebx: index
1031 // rdx: array
1032 index_check(rdx, rbx); // prefer index in ebx
1033 __ movw(Address(rdx, rbx,
1034 Address::times_2,
1035 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1036 rax);
1037 }
1039 void TemplateTable::sastore() {
1040 castore();
1041 }
1043 void TemplateTable::istore(int n) {
1044 transition(itos, vtos);
1045 __ movl(iaddress(n), rax);
1046 }
1048 void TemplateTable::lstore(int n) {
1049 transition(ltos, vtos);
1050 __ movq(laddress(n), rax);
1051 }
1053 void TemplateTable::fstore(int n) {
1054 transition(ftos, vtos);
1055 __ movflt(faddress(n), xmm0);
1056 }
1058 void TemplateTable::dstore(int n) {
1059 transition(dtos, vtos);
1060 __ movdbl(daddress(n), xmm0);
1061 }
1063 void TemplateTable::astore(int n) {
1064 transition(vtos, vtos);
1065 __ pop_ptr(rax);
1066 __ movptr(aaddress(n), rax);
1067 }
1069 void TemplateTable::pop() {
1070 transition(vtos, vtos);
1071 __ addptr(rsp, Interpreter::stackElementSize);
1072 }
1074 void TemplateTable::pop2() {
1075 transition(vtos, vtos);
1076 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1077 }
1079 void TemplateTable::dup() {
1080 transition(vtos, vtos);
1081 __ load_ptr(0, rax);
1082 __ push_ptr(rax);
1083 // stack: ..., a, a
1084 }
1086 void TemplateTable::dup_x1() {
1087 transition(vtos, vtos);
1088 // stack: ..., a, b
1089 __ load_ptr( 0, rax); // load b
1090 __ load_ptr( 1, rcx); // load a
1091 __ store_ptr(1, rax); // store b
1092 __ store_ptr(0, rcx); // store a
1093 __ push_ptr(rax); // push b
1094 // stack: ..., b, a, b
1095 }
1097 void TemplateTable::dup_x2() {
1098 transition(vtos, vtos);
1099 // stack: ..., a, b, c
1100 __ load_ptr( 0, rax); // load c
1101 __ load_ptr( 2, rcx); // load a
1102 __ store_ptr(2, rax); // store c in a
1103 __ push_ptr(rax); // push c
1104 // stack: ..., c, b, c, c
1105 __ load_ptr( 2, rax); // load b
1106 __ store_ptr(2, rcx); // store a in b
1107 // stack: ..., c, a, c, c
1108 __ store_ptr(1, rax); // store b in c
1109 // stack: ..., c, a, b, c
1110 }
1112 void TemplateTable::dup2() {
1113 transition(vtos, vtos);
1114 // stack: ..., a, b
1115 __ load_ptr(1, rax); // load a
1116 __ push_ptr(rax); // push a
1117 __ load_ptr(1, rax); // load b
1118 __ push_ptr(rax); // push b
1119 // stack: ..., a, b, a, b
1120 }
1122 void TemplateTable::dup2_x1() {
1123 transition(vtos, vtos);
1124 // stack: ..., a, b, c
1125 __ load_ptr( 0, rcx); // load c
1126 __ load_ptr( 1, rax); // load b
1127 __ push_ptr(rax); // push b
1128 __ push_ptr(rcx); // push c
1129 // stack: ..., a, b, c, b, c
1130 __ store_ptr(3, rcx); // store c in b
1131 // stack: ..., a, c, c, b, c
1132 __ load_ptr( 4, rcx); // load a
1133 __ store_ptr(2, rcx); // store a in 2nd c
1134 // stack: ..., a, c, a, b, c
1135 __ store_ptr(4, rax); // store b in a
1136 // stack: ..., b, c, a, b, c
1137 }
1139 void TemplateTable::dup2_x2() {
1140 transition(vtos, vtos);
1141 // stack: ..., a, b, c, d
1142 __ load_ptr( 0, rcx); // load d
1143 __ load_ptr( 1, rax); // load c
1144 __ push_ptr(rax); // push c
1145 __ push_ptr(rcx); // push d
1146 // stack: ..., a, b, c, d, c, d
1147 __ load_ptr( 4, rax); // load b
1148 __ store_ptr(2, rax); // store b in d
1149 __ store_ptr(4, rcx); // store d in b
1150 // stack: ..., a, d, c, b, c, d
1151 __ load_ptr( 5, rcx); // load a
1152 __ load_ptr( 3, rax); // load c
1153 __ store_ptr(3, rcx); // store a in c
1154 __ store_ptr(5, rax); // store c in a
1155 // stack: ..., c, d, a, b, c, d
1156 }
1158 void TemplateTable::swap() {
1159 transition(vtos, vtos);
1160 // stack: ..., a, b
1161 __ load_ptr( 1, rcx); // load a
1162 __ load_ptr( 0, rax); // load b
1163 __ store_ptr(0, rcx); // store a in b
1164 __ store_ptr(1, rax); // store b in a
1165 // stack: ..., b, a
1166 }
1168 void TemplateTable::iop2(Operation op) {
1169 transition(itos, itos);
1170 switch (op) {
1171 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1172 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1173 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1174 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1175 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1176 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1177 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1178 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1179 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1180 default : ShouldNotReachHere();
1181 }
1182 }
1184 void TemplateTable::lop2(Operation op) {
1185 transition(ltos, ltos);
1186 switch (op) {
1187 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1188 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1189 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1190 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1191 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1192 default : ShouldNotReachHere();
1193 }
1194 }
1196 void TemplateTable::idiv() {
1197 transition(itos, itos);
1198 __ movl(rcx, rax);
1199 __ pop_i(rax);
1200 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1201 // they are not equal, one could do a normal division (no correction
1202 // needed), which may speed up this implementation for the common case.
1203 // (see also JVM spec., p.243 & p.271)
1204 __ corrected_idivl(rcx);
1205 }
1207 void TemplateTable::irem() {
1208 transition(itos, itos);
1209 __ movl(rcx, rax);
1210 __ pop_i(rax);
1211 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1212 // they are not equal, one could do a normal division (no correction
1213 // needed), which may speed up this implementation for the common case.
1214 // (see also JVM spec., p.243 & p.271)
1215 __ corrected_idivl(rcx);
1216 __ movl(rax, rdx);
1217 }
1219 void TemplateTable::lmul() {
1220 transition(ltos, ltos);
1221 __ pop_l(rdx);
1222 __ imulq(rax, rdx);
1223 }
1225 void TemplateTable::ldiv() {
1226 transition(ltos, ltos);
1227 __ mov(rcx, rax);
1228 __ pop_l(rax);
1229 // generate explicit div0 check
1230 __ testq(rcx, rcx);
1231 __ jump_cc(Assembler::zero,
1232 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1233 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1234 // they are not equal, one could do a normal division (no correction
1235 // needed), which may speed up this implementation for the common case.
1236 // (see also JVM spec., p.243 & p.271)
1237 __ corrected_idivq(rcx); // kills rbx
1238 }
1240 void TemplateTable::lrem() {
1241 transition(ltos, ltos);
1242 __ mov(rcx, rax);
1243 __ pop_l(rax);
1244 __ testq(rcx, rcx);
1245 __ jump_cc(Assembler::zero,
1246 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1247 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1248 // they are not equal, one could do a normal division (no correction
1249 // needed), which may speed up this implementation for the common case.
1250 // (see also JVM spec., p.243 & p.271)
1251 __ corrected_idivq(rcx); // kills rbx
1252 __ mov(rax, rdx);
1253 }
1255 void TemplateTable::lshl() {
1256 transition(itos, ltos);
1257 __ movl(rcx, rax); // get shift count
1258 __ pop_l(rax); // get shift value
1259 __ shlq(rax);
1260 }
1262 void TemplateTable::lshr() {
1263 transition(itos, ltos);
1264 __ movl(rcx, rax); // get shift count
1265 __ pop_l(rax); // get shift value
1266 __ sarq(rax);
1267 }
1269 void TemplateTable::lushr() {
1270 transition(itos, ltos);
1271 __ movl(rcx, rax); // get shift count
1272 __ pop_l(rax); // get shift value
1273 __ shrq(rax);
1274 }
1276 void TemplateTable::fop2(Operation op) {
1277 transition(ftos, ftos);
1278 switch (op) {
1279 case add:
1280 __ addss(xmm0, at_rsp());
1281 __ addptr(rsp, Interpreter::stackElementSize);
1282 break;
1283 case sub:
1284 __ movflt(xmm1, xmm0);
1285 __ pop_f(xmm0);
1286 __ subss(xmm0, xmm1);
1287 break;
1288 case mul:
1289 __ mulss(xmm0, at_rsp());
1290 __ addptr(rsp, Interpreter::stackElementSize);
1291 break;
1292 case div:
1293 __ movflt(xmm1, xmm0);
1294 __ pop_f(xmm0);
1295 __ divss(xmm0, xmm1);
1296 break;
1297 case rem:
1298 __ movflt(xmm1, xmm0);
1299 __ pop_f(xmm0);
1300 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1301 break;
1302 default:
1303 ShouldNotReachHere();
1304 break;
1305 }
1306 }
1308 void TemplateTable::dop2(Operation op) {
1309 transition(dtos, dtos);
1310 switch (op) {
1311 case add:
1312 __ addsd(xmm0, at_rsp());
1313 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1314 break;
1315 case sub:
1316 __ movdbl(xmm1, xmm0);
1317 __ pop_d(xmm0);
1318 __ subsd(xmm0, xmm1);
1319 break;
1320 case mul:
1321 __ mulsd(xmm0, at_rsp());
1322 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1323 break;
1324 case div:
1325 __ movdbl(xmm1, xmm0);
1326 __ pop_d(xmm0);
1327 __ divsd(xmm0, xmm1);
1328 break;
1329 case rem:
1330 __ movdbl(xmm1, xmm0);
1331 __ pop_d(xmm0);
1332 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1333 break;
1334 default:
1335 ShouldNotReachHere();
1336 break;
1337 }
1338 }
1340 void TemplateTable::ineg() {
1341 transition(itos, itos);
1342 __ negl(rax);
1343 }
1345 void TemplateTable::lneg() {
1346 transition(ltos, ltos);
1347 __ negq(rax);
1348 }
1350 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1351 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1352 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1353 // of 128-bits operands for SSE instructions.
1354 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1355 // Store the value to a 128-bits operand.
1356 operand[0] = lo;
1357 operand[1] = hi;
1358 return operand;
1359 }
1361 // Buffer for 128-bits masks used by SSE instructions.
1362 static jlong float_signflip_pool[2*2];
1363 static jlong double_signflip_pool[2*2];
1365 void TemplateTable::fneg() {
1366 transition(ftos, ftos);
1367 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1368 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1369 }
1371 void TemplateTable::dneg() {
1372 transition(dtos, dtos);
1373 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1374 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1375 }
1377 void TemplateTable::iinc() {
1378 transition(vtos, vtos);
1379 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1380 locals_index(rbx);
1381 __ addl(iaddress(rbx), rdx);
1382 }
1384 void TemplateTable::wide_iinc() {
1385 transition(vtos, vtos);
1386 __ movl(rdx, at_bcp(4)); // get constant
1387 locals_index_wide(rbx);
1388 __ bswapl(rdx); // swap bytes & sign-extend constant
1389 __ sarl(rdx, 16);
1390 __ addl(iaddress(rbx), rdx);
1391 // Note: should probably use only one movl to get both
1392 // the index and the constant -> fix this
1393 }
1395 void TemplateTable::convert() {
1396 // Checking
1397 #ifdef ASSERT
1398 {
1399 TosState tos_in = ilgl;
1400 TosState tos_out = ilgl;
1401 switch (bytecode()) {
1402 case Bytecodes::_i2l: // fall through
1403 case Bytecodes::_i2f: // fall through
1404 case Bytecodes::_i2d: // fall through
1405 case Bytecodes::_i2b: // fall through
1406 case Bytecodes::_i2c: // fall through
1407 case Bytecodes::_i2s: tos_in = itos; break;
1408 case Bytecodes::_l2i: // fall through
1409 case Bytecodes::_l2f: // fall through
1410 case Bytecodes::_l2d: tos_in = ltos; break;
1411 case Bytecodes::_f2i: // fall through
1412 case Bytecodes::_f2l: // fall through
1413 case Bytecodes::_f2d: tos_in = ftos; break;
1414 case Bytecodes::_d2i: // fall through
1415 case Bytecodes::_d2l: // fall through
1416 case Bytecodes::_d2f: tos_in = dtos; break;
1417 default : ShouldNotReachHere();
1418 }
1419 switch (bytecode()) {
1420 case Bytecodes::_l2i: // fall through
1421 case Bytecodes::_f2i: // fall through
1422 case Bytecodes::_d2i: // fall through
1423 case Bytecodes::_i2b: // fall through
1424 case Bytecodes::_i2c: // fall through
1425 case Bytecodes::_i2s: tos_out = itos; break;
1426 case Bytecodes::_i2l: // fall through
1427 case Bytecodes::_f2l: // fall through
1428 case Bytecodes::_d2l: tos_out = ltos; break;
1429 case Bytecodes::_i2f: // fall through
1430 case Bytecodes::_l2f: // fall through
1431 case Bytecodes::_d2f: tos_out = ftos; break;
1432 case Bytecodes::_i2d: // fall through
1433 case Bytecodes::_l2d: // fall through
1434 case Bytecodes::_f2d: tos_out = dtos; break;
1435 default : ShouldNotReachHere();
1436 }
1437 transition(tos_in, tos_out);
1438 }
1439 #endif // ASSERT
1441 static const int64_t is_nan = 0x8000000000000000L;
1443 // Conversion
1444 switch (bytecode()) {
1445 case Bytecodes::_i2l:
1446 __ movslq(rax, rax);
1447 break;
1448 case Bytecodes::_i2f:
1449 __ cvtsi2ssl(xmm0, rax);
1450 break;
1451 case Bytecodes::_i2d:
1452 __ cvtsi2sdl(xmm0, rax);
1453 break;
1454 case Bytecodes::_i2b:
1455 __ movsbl(rax, rax);
1456 break;
1457 case Bytecodes::_i2c:
1458 __ movzwl(rax, rax);
1459 break;
1460 case Bytecodes::_i2s:
1461 __ movswl(rax, rax);
1462 break;
1463 case Bytecodes::_l2i:
1464 __ movl(rax, rax);
1465 break;
1466 case Bytecodes::_l2f:
1467 __ cvtsi2ssq(xmm0, rax);
1468 break;
1469 case Bytecodes::_l2d:
1470 __ cvtsi2sdq(xmm0, rax);
1471 break;
1472 case Bytecodes::_f2i:
1473 {
1474 Label L;
1475 __ cvttss2sil(rax, xmm0);
1476 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1477 __ jcc(Assembler::notEqual, L);
1478 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1479 __ bind(L);
1480 }
1481 break;
1482 case Bytecodes::_f2l:
1483 {
1484 Label L;
1485 __ cvttss2siq(rax, xmm0);
1486 // NaN or overflow/underflow?
1487 __ cmp64(rax, ExternalAddress((address) &is_nan));
1488 __ jcc(Assembler::notEqual, L);
1489 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1490 __ bind(L);
1491 }
1492 break;
1493 case Bytecodes::_f2d:
1494 __ cvtss2sd(xmm0, xmm0);
1495 break;
1496 case Bytecodes::_d2i:
1497 {
1498 Label L;
1499 __ cvttsd2sil(rax, xmm0);
1500 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1501 __ jcc(Assembler::notEqual, L);
1502 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1503 __ bind(L);
1504 }
1505 break;
1506 case Bytecodes::_d2l:
1507 {
1508 Label L;
1509 __ cvttsd2siq(rax, xmm0);
1510 // NaN or overflow/underflow?
1511 __ cmp64(rax, ExternalAddress((address) &is_nan));
1512 __ jcc(Assembler::notEqual, L);
1513 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1514 __ bind(L);
1515 }
1516 break;
1517 case Bytecodes::_d2f:
1518 __ cvtsd2ss(xmm0, xmm0);
1519 break;
1520 default:
1521 ShouldNotReachHere();
1522 }
1523 }
1525 void TemplateTable::lcmp() {
1526 transition(ltos, itos);
1527 Label done;
1528 __ pop_l(rdx);
1529 __ cmpq(rdx, rax);
1530 __ movl(rax, -1);
1531 __ jccb(Assembler::less, done);
1532 __ setb(Assembler::notEqual, rax);
1533 __ movzbl(rax, rax);
1534 __ bind(done);
1535 }
1537 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1538 Label done;
1539 if (is_float) {
1540 // XXX get rid of pop here, use ... reg, mem32
1541 __ pop_f(xmm1);
1542 __ ucomiss(xmm1, xmm0);
1543 } else {
1544 // XXX get rid of pop here, use ... reg, mem64
1545 __ pop_d(xmm1);
1546 __ ucomisd(xmm1, xmm0);
1547 }
1548 if (unordered_result < 0) {
1549 __ movl(rax, -1);
1550 __ jccb(Assembler::parity, done);
1551 __ jccb(Assembler::below, done);
1552 __ setb(Assembler::notEqual, rdx);
1553 __ movzbl(rax, rdx);
1554 } else {
1555 __ movl(rax, 1);
1556 __ jccb(Assembler::parity, done);
1557 __ jccb(Assembler::above, done);
1558 __ movl(rax, 0);
1559 __ jccb(Assembler::equal, done);
1560 __ decrementl(rax);
1561 }
1562 __ bind(done);
1563 }
1565 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1566 __ get_method(rcx); // rcx holds method
1567 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1568 // holds bumped taken count
1570 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
1571 InvocationCounter::counter_offset();
1572 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() +
1573 InvocationCounter::counter_offset();
1574 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1576 // Load up edx with the branch displacement
1577 __ movl(rdx, at_bcp(1));
1578 __ bswapl(rdx);
1580 if (!is_wide) {
1581 __ sarl(rdx, 16);
1582 }
1583 __ movl2ptr(rdx, rdx);
1585 // Handle all the JSR stuff here, then exit.
1586 // It's much shorter and cleaner than intermingling with the non-JSR
1587 // normal-branch stuff occurring below.
1588 if (is_jsr) {
1589 // Pre-load the next target bytecode into rbx
1590 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1592 // compute return address as bci in rax
1593 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1594 in_bytes(constMethodOopDesc::codes_offset())));
1595 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1596 // Adjust the bcp in r13 by the displacement in rdx
1597 __ addptr(r13, rdx);
1598 // jsr returns atos that is not an oop
1599 __ push_i(rax);
1600 __ dispatch_only(vtos);
1601 return;
1602 }
1604 // Normal (non-jsr) branch handling
1606 // Adjust the bcp in r13 by the displacement in rdx
1607 __ addptr(r13, rdx);
1609 assert(UseLoopCounter || !UseOnStackReplacement,
1610 "on-stack-replacement requires loop counters");
1611 Label backedge_counter_overflow;
1612 Label profile_method;
1613 Label dispatch;
1614 if (UseLoopCounter) {
1615 // increment backedge counter for backward branches
1616 // rax: MDO
1617 // ebx: MDO bumped taken-count
1618 // rcx: method
1619 // rdx: target offset
1620 // r13: target bcp
1621 // r14: locals pointer
1622 __ testl(rdx, rdx); // check if forward or backward branch
1623 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1624 if (TieredCompilation) {
1625 Label no_mdo;
1626 int increment = InvocationCounter::count_increment;
1627 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1628 if (ProfileInterpreter) {
1629 // Are we profiling?
1630 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1631 __ testptr(rbx, rbx);
1632 __ jccb(Assembler::zero, no_mdo);
1633 // Increment the MDO backedge counter
1634 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1635 in_bytes(InvocationCounter::counter_offset()));
1636 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1637 rax, false, Assembler::zero, &backedge_counter_overflow);
1638 __ jmp(dispatch);
1639 }
1640 __ bind(no_mdo);
1641 // Increment backedge counter in methodOop
1642 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1643 rax, false, Assembler::zero, &backedge_counter_overflow);
1644 } else {
1645 // increment counter
1646 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1647 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1648 __ movl(Address(rcx, be_offset), rax); // store counter
1650 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1651 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1652 __ addl(rax, Address(rcx, be_offset)); // add both counters
1654 if (ProfileInterpreter) {
1655 // Test to see if we should create a method data oop
1656 __ cmp32(rax,
1657 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1658 __ jcc(Assembler::less, dispatch);
1660 // if no method data exists, go to profile method
1661 __ test_method_data_pointer(rax, profile_method);
1663 if (UseOnStackReplacement) {
1664 // check for overflow against ebx which is the MDO taken count
1665 __ cmp32(rbx,
1666 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1667 __ jcc(Assembler::below, dispatch);
1669 // When ProfileInterpreter is on, the backedge_count comes
1670 // from the methodDataOop, which value does not get reset on
1671 // the call to frequency_counter_overflow(). To avoid
1672 // excessive calls to the overflow routine while the method is
1673 // being compiled, add a second test to make sure the overflow
1674 // function is called only once every overflow_frequency.
1675 const int overflow_frequency = 1024;
1676 __ andl(rbx, overflow_frequency - 1);
1677 __ jcc(Assembler::zero, backedge_counter_overflow);
1679 }
1680 } else {
1681 if (UseOnStackReplacement) {
1682 // check for overflow against eax, which is the sum of the
1683 // counters
1684 __ cmp32(rax,
1685 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1686 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1688 }
1689 }
1690 }
1691 __ bind(dispatch);
1692 }
1694 // Pre-load the next target bytecode into rbx
1695 __ load_unsigned_byte(rbx, Address(r13, 0));
1697 // continue with the bytecode @ target
1698 // eax: return bci for jsr's, unused otherwise
1699 // ebx: target bytecode
1700 // r13: target bcp
1701 __ dispatch_only(vtos);
1703 if (UseLoopCounter) {
1704 if (ProfileInterpreter) {
1705 // Out-of-line code to allocate method data oop.
1706 __ bind(profile_method);
1707 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1708 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1709 __ set_method_data_pointer_for_bcp();
1710 __ jmp(dispatch);
1711 }
1713 if (UseOnStackReplacement) {
1714 // invocation counter overflow
1715 __ bind(backedge_counter_overflow);
1716 __ negptr(rdx);
1717 __ addptr(rdx, r13); // branch bcp
1718 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1719 __ call_VM(noreg,
1720 CAST_FROM_FN_PTR(address,
1721 InterpreterRuntime::frequency_counter_overflow),
1722 rdx);
1723 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1725 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1726 // ebx: target bytecode
1727 // rdx: scratch
1728 // r14: locals pointer
1729 // r13: bcp
1730 __ testptr(rax, rax); // test result
1731 __ jcc(Assembler::zero, dispatch); // no osr if null
1732 // nmethod may have been invalidated (VM may block upon call_VM return)
1733 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1734 __ cmpl(rcx, InvalidOSREntryBci);
1735 __ jcc(Assembler::equal, dispatch);
1737 // We have the address of an on stack replacement routine in eax
1738 // We need to prepare to execute the OSR method. First we must
1739 // migrate the locals and monitors off of the stack.
1741 __ mov(r13, rax); // save the nmethod
1743 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1745 // eax is OSR buffer, move it to expected parameter location
1746 __ mov(j_rarg0, rax);
1748 // We use j_rarg definitions here so that registers don't conflict as parameter
1749 // registers change across platforms as we are in the midst of a calling
1750 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1752 const Register retaddr = j_rarg2;
1753 const Register sender_sp = j_rarg1;
1755 // pop the interpreter frame
1756 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1757 __ leave(); // remove frame anchor
1758 __ pop(retaddr); // get return address
1759 __ mov(rsp, sender_sp); // set sp to sender sp
1760 // Ensure compiled code always sees stack at proper alignment
1761 __ andptr(rsp, -(StackAlignmentInBytes));
1763 // unlike x86 we need no specialized return from compiled code
1764 // to the interpreter or the call stub.
1766 // push the return address
1767 __ push(retaddr);
1769 // and begin the OSR nmethod
1770 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1771 }
1772 }
1773 }
1776 void TemplateTable::if_0cmp(Condition cc) {
1777 transition(itos, vtos);
1778 // assume branch is more often taken than not (loops use backward branches)
1779 Label not_taken;
1780 __ testl(rax, rax);
1781 __ jcc(j_not(cc), not_taken);
1782 branch(false, false);
1783 __ bind(not_taken);
1784 __ profile_not_taken_branch(rax);
1785 }
1787 void TemplateTable::if_icmp(Condition cc) {
1788 transition(itos, vtos);
1789 // assume branch is more often taken than not (loops use backward branches)
1790 Label not_taken;
1791 __ pop_i(rdx);
1792 __ cmpl(rdx, rax);
1793 __ jcc(j_not(cc), not_taken);
1794 branch(false, false);
1795 __ bind(not_taken);
1796 __ profile_not_taken_branch(rax);
1797 }
1799 void TemplateTable::if_nullcmp(Condition cc) {
1800 transition(atos, vtos);
1801 // assume branch is more often taken than not (loops use backward branches)
1802 Label not_taken;
1803 __ testptr(rax, rax);
1804 __ jcc(j_not(cc), not_taken);
1805 branch(false, false);
1806 __ bind(not_taken);
1807 __ profile_not_taken_branch(rax);
1808 }
1810 void TemplateTable::if_acmp(Condition cc) {
1811 transition(atos, vtos);
1812 // assume branch is more often taken than not (loops use backward branches)
1813 Label not_taken;
1814 __ pop_ptr(rdx);
1815 __ cmpptr(rdx, rax);
1816 __ jcc(j_not(cc), not_taken);
1817 branch(false, false);
1818 __ bind(not_taken);
1819 __ profile_not_taken_branch(rax);
1820 }
1822 void TemplateTable::ret() {
1823 transition(vtos, vtos);
1824 locals_index(rbx);
1825 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1826 __ profile_ret(rbx, rcx);
1827 __ get_method(rax);
1828 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1829 __ lea(r13, Address(r13, rbx, Address::times_1,
1830 constMethodOopDesc::codes_offset()));
1831 __ dispatch_next(vtos);
1832 }
1834 void TemplateTable::wide_ret() {
1835 transition(vtos, vtos);
1836 locals_index_wide(rbx);
1837 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1838 __ profile_ret(rbx, rcx);
1839 __ get_method(rax);
1840 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1841 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1842 __ dispatch_next(vtos);
1843 }
1845 void TemplateTable::tableswitch() {
1846 Label default_case, continue_execution;
1847 transition(itos, vtos);
1848 // align r13
1849 __ lea(rbx, at_bcp(BytesPerInt));
1850 __ andptr(rbx, -BytesPerInt);
1851 // load lo & hi
1852 __ movl(rcx, Address(rbx, BytesPerInt));
1853 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1854 __ bswapl(rcx);
1855 __ bswapl(rdx);
1856 // check against lo & hi
1857 __ cmpl(rax, rcx);
1858 __ jcc(Assembler::less, default_case);
1859 __ cmpl(rax, rdx);
1860 __ jcc(Assembler::greater, default_case);
1861 // lookup dispatch offset
1862 __ subl(rax, rcx);
1863 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1864 __ profile_switch_case(rax, rbx, rcx);
1865 // continue execution
1866 __ bind(continue_execution);
1867 __ bswapl(rdx);
1868 __ movl2ptr(rdx, rdx);
1869 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1870 __ addptr(r13, rdx);
1871 __ dispatch_only(vtos);
1872 // handle default
1873 __ bind(default_case);
1874 __ profile_switch_default(rax);
1875 __ movl(rdx, Address(rbx, 0));
1876 __ jmp(continue_execution);
1877 }
1879 void TemplateTable::lookupswitch() {
1880 transition(itos, itos);
1881 __ stop("lookupswitch bytecode should have been rewritten");
1882 }
1884 void TemplateTable::fast_linearswitch() {
1885 transition(itos, vtos);
1886 Label loop_entry, loop, found, continue_execution;
1887 // bswap rax so we can avoid bswapping the table entries
1888 __ bswapl(rax);
1889 // align r13
1890 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1891 // this instruction (change offsets
1892 // below)
1893 __ andptr(rbx, -BytesPerInt);
1894 // set counter
1895 __ movl(rcx, Address(rbx, BytesPerInt));
1896 __ bswapl(rcx);
1897 __ jmpb(loop_entry);
1898 // table search
1899 __ bind(loop);
1900 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1901 __ jcc(Assembler::equal, found);
1902 __ bind(loop_entry);
1903 __ decrementl(rcx);
1904 __ jcc(Assembler::greaterEqual, loop);
1905 // default case
1906 __ profile_switch_default(rax);
1907 __ movl(rdx, Address(rbx, 0));
1908 __ jmp(continue_execution);
1909 // entry found -> get offset
1910 __ bind(found);
1911 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1912 __ profile_switch_case(rcx, rax, rbx);
1913 // continue execution
1914 __ bind(continue_execution);
1915 __ bswapl(rdx);
1916 __ movl2ptr(rdx, rdx);
1917 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1918 __ addptr(r13, rdx);
1919 __ dispatch_only(vtos);
1920 }
1922 void TemplateTable::fast_binaryswitch() {
1923 transition(itos, vtos);
1924 // Implementation using the following core algorithm:
1925 //
1926 // int binary_search(int key, LookupswitchPair* array, int n) {
1927 // // Binary search according to "Methodik des Programmierens" by
1928 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1929 // int i = 0;
1930 // int j = n;
1931 // while (i+1 < j) {
1932 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1933 // // with Q: for all i: 0 <= i < n: key < a[i]
1934 // // where a stands for the array and assuming that the (inexisting)
1935 // // element a[n] is infinitely big.
1936 // int h = (i + j) >> 1;
1937 // // i < h < j
1938 // if (key < array[h].fast_match()) {
1939 // j = h;
1940 // } else {
1941 // i = h;
1942 // }
1943 // }
1944 // // R: a[i] <= key < a[i+1] or Q
1945 // // (i.e., if key is within array, i is the correct index)
1946 // return i;
1947 // }
1949 // Register allocation
1950 const Register key = rax; // already set (tosca)
1951 const Register array = rbx;
1952 const Register i = rcx;
1953 const Register j = rdx;
1954 const Register h = rdi;
1955 const Register temp = rsi;
1957 // Find array start
1958 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1959 // get rid of this
1960 // instruction (change
1961 // offsets below)
1962 __ andptr(array, -BytesPerInt);
1964 // Initialize i & j
1965 __ xorl(i, i); // i = 0;
1966 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1968 // Convert j into native byteordering
1969 __ bswapl(j);
1971 // And start
1972 Label entry;
1973 __ jmp(entry);
1975 // binary search loop
1976 {
1977 Label loop;
1978 __ bind(loop);
1979 // int h = (i + j) >> 1;
1980 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1981 __ sarl(h, 1); // h = (i + j) >> 1;
1982 // if (key < array[h].fast_match()) {
1983 // j = h;
1984 // } else {
1985 // i = h;
1986 // }
1987 // Convert array[h].match to native byte-ordering before compare
1988 __ movl(temp, Address(array, h, Address::times_8));
1989 __ bswapl(temp);
1990 __ cmpl(key, temp);
1991 // j = h if (key < array[h].fast_match())
1992 __ cmovl(Assembler::less, j, h);
1993 // i = h if (key >= array[h].fast_match())
1994 __ cmovl(Assembler::greaterEqual, i, h);
1995 // while (i+1 < j)
1996 __ bind(entry);
1997 __ leal(h, Address(i, 1)); // i+1
1998 __ cmpl(h, j); // i+1 < j
1999 __ jcc(Assembler::less, loop);
2000 }
2002 // end of binary search, result index is i (must check again!)
2003 Label default_case;
2004 // Convert array[i].match to native byte-ordering before compare
2005 __ movl(temp, Address(array, i, Address::times_8));
2006 __ bswapl(temp);
2007 __ cmpl(key, temp);
2008 __ jcc(Assembler::notEqual, default_case);
2010 // entry found -> j = offset
2011 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2012 __ profile_switch_case(i, key, array);
2013 __ bswapl(j);
2014 __ movl2ptr(j, j);
2015 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2016 __ addptr(r13, j);
2017 __ dispatch_only(vtos);
2019 // default case -> j = default offset
2020 __ bind(default_case);
2021 __ profile_switch_default(i);
2022 __ movl(j, Address(array, -2 * BytesPerInt));
2023 __ bswapl(j);
2024 __ movl2ptr(j, j);
2025 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2026 __ addptr(r13, j);
2027 __ dispatch_only(vtos);
2028 }
2031 void TemplateTable::_return(TosState state) {
2032 transition(state, state);
2033 assert(_desc->calls_vm(),
2034 "inconsistent calls_vm information"); // call in remove_activation
2036 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2037 assert(state == vtos, "only valid state");
2038 __ movptr(c_rarg1, aaddress(0));
2039 __ load_klass(rdi, c_rarg1);
2040 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2041 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2042 Label skip_register_finalizer;
2043 __ jcc(Assembler::zero, skip_register_finalizer);
2045 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2047 __ bind(skip_register_finalizer);
2048 }
2050 __ remove_activation(state, r13);
2051 __ jmp(r13);
2052 }
2054 // ----------------------------------------------------------------------------
2055 // Volatile variables demand their effects be made known to all CPU's
2056 // in order. Store buffers on most chips allow reads & writes to
2057 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2058 // without some kind of memory barrier (i.e., it's not sufficient that
2059 // the interpreter does not reorder volatile references, the hardware
2060 // also must not reorder them).
2061 //
2062 // According to the new Java Memory Model (JMM):
2063 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2064 // writes act as aquire & release, so:
2065 // (2) A read cannot let unrelated NON-volatile memory refs that
2066 // happen after the read float up to before the read. It's OK for
2067 // non-volatile memory refs that happen before the volatile read to
2068 // float down below it.
2069 // (3) Similar a volatile write cannot let unrelated NON-volatile
2070 // memory refs that happen BEFORE the write float down to after the
2071 // write. It's OK for non-volatile memory refs that happen after the
2072 // volatile write to float up before it.
2073 //
2074 // We only put in barriers around volatile refs (they are expensive),
2075 // not _between_ memory refs (that would require us to track the
2076 // flavor of the previous memory refs). Requirements (2) and (3)
2077 // require some barriers before volatile stores and after volatile
2078 // loads. These nearly cover requirement (1) but miss the
2079 // volatile-store-volatile-load case. This final case is placed after
2080 // volatile-stores although it could just as well go before
2081 // volatile-loads.
2082 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2083 order_constraint) {
2084 // Helper function to insert a is-volatile test and memory barrier
2085 if (os::is_MP()) { // Not needed on single CPU
2086 __ membar(order_constraint);
2087 }
2088 }
2090 void TemplateTable::resolve_cache_and_index(int byte_no,
2091 Register result,
2092 Register Rcache,
2093 Register index,
2094 size_t index_size) {
2095 const Register temp = rbx;
2096 assert_different_registers(result, Rcache, index, temp);
2098 Label resolved;
2099 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2100 if (byte_no == f1_oop) {
2101 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2102 // This kind of CP cache entry does not need to match the flags byte, because
2103 // there is a 1-1 relation between bytecode type and CP entry type.
2104 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2105 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2106 __ testptr(result, result);
2107 __ jcc(Assembler::notEqual, resolved);
2108 } else {
2109 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2110 assert(result == noreg, ""); //else change code for setting result
2111 const int shift_count = (1 + byte_no) * BitsPerByte;
2112 __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2113 __ shrl(temp, shift_count);
2114 // have we resolved this bytecode?
2115 __ andl(temp, 0xFF);
2116 __ cmpl(temp, (int) bytecode());
2117 __ jcc(Assembler::equal, resolved);
2118 }
2120 // resolve first time through
2121 address entry;
2122 switch (bytecode()) {
2123 case Bytecodes::_getstatic:
2124 case Bytecodes::_putstatic:
2125 case Bytecodes::_getfield:
2126 case Bytecodes::_putfield:
2127 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2128 break;
2129 case Bytecodes::_invokevirtual:
2130 case Bytecodes::_invokespecial:
2131 case Bytecodes::_invokestatic:
2132 case Bytecodes::_invokeinterface:
2133 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2134 break;
2135 case Bytecodes::_invokedynamic:
2136 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2137 break;
2138 case Bytecodes::_fast_aldc:
2139 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2140 break;
2141 case Bytecodes::_fast_aldc_w:
2142 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2143 break;
2144 default:
2145 ShouldNotReachHere();
2146 break;
2147 }
2148 __ movl(temp, (int) bytecode());
2149 __ call_VM(noreg, entry, temp);
2151 // Update registers with resolved info
2152 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2153 if (result != noreg)
2154 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2155 __ bind(resolved);
2156 }
2158 // The Rcache and index registers must be set before call
2159 void TemplateTable::load_field_cp_cache_entry(Register obj,
2160 Register cache,
2161 Register index,
2162 Register off,
2163 Register flags,
2164 bool is_static = false) {
2165 assert_different_registers(cache, index, flags, off);
2167 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2168 // Field offset
2169 __ movptr(off, Address(cache, index, Address::times_8,
2170 in_bytes(cp_base_offset +
2171 ConstantPoolCacheEntry::f2_offset())));
2172 // Flags
2173 __ movl(flags, Address(cache, index, Address::times_8,
2174 in_bytes(cp_base_offset +
2175 ConstantPoolCacheEntry::flags_offset())));
2177 // klass overwrite register
2178 if (is_static) {
2179 __ movptr(obj, Address(cache, index, Address::times_8,
2180 in_bytes(cp_base_offset +
2181 ConstantPoolCacheEntry::f1_offset())));
2182 }
2183 }
2185 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2186 Register method,
2187 Register itable_index,
2188 Register flags,
2189 bool is_invokevirtual,
2190 bool is_invokevfinal, /*unused*/
2191 bool is_invokedynamic) {
2192 // setup registers
2193 const Register cache = rcx;
2194 const Register index = rdx;
2195 assert_different_registers(method, flags);
2196 assert_different_registers(method, cache, index);
2197 assert_different_registers(itable_index, flags);
2198 assert_different_registers(itable_index, cache, index);
2199 // determine constant pool cache field offsets
2200 const int method_offset = in_bytes(
2201 constantPoolCacheOopDesc::base_offset() +
2202 (is_invokevirtual
2203 ? ConstantPoolCacheEntry::f2_offset()
2204 : ConstantPoolCacheEntry::f1_offset()));
2205 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2206 ConstantPoolCacheEntry::flags_offset());
2207 // access constant pool cache fields
2208 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2209 ConstantPoolCacheEntry::f2_offset());
2211 if (byte_no == f1_oop) {
2212 // Resolved f1_oop goes directly into 'method' register.
2213 assert(is_invokedynamic, "");
2214 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2215 } else {
2216 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2217 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2218 }
2219 if (itable_index != noreg) {
2220 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2221 }
2222 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2223 }
2226 // The registers cache and index expected to be set before call.
2227 // Correct values of the cache and index registers are preserved.
2228 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2229 bool is_static, bool has_tos) {
2230 // do the JVMTI work here to avoid disturbing the register state below
2231 // We use c_rarg registers here because we want to use the register used in
2232 // the call to the VM
2233 if (JvmtiExport::can_post_field_access()) {
2234 // Check to see if a field access watch has been set before we
2235 // take the time to call into the VM.
2236 Label L1;
2237 assert_different_registers(cache, index, rax);
2238 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2239 __ testl(rax, rax);
2240 __ jcc(Assembler::zero, L1);
2242 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2244 // cache entry pointer
2245 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
2246 __ shll(c_rarg3, LogBytesPerWord);
2247 __ addptr(c_rarg2, c_rarg3);
2248 if (is_static) {
2249 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2250 } else {
2251 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2252 __ verify_oop(c_rarg1);
2253 }
2254 // c_rarg1: object pointer or NULL
2255 // c_rarg2: cache entry pointer
2256 // c_rarg3: jvalue object on the stack
2257 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2258 InterpreterRuntime::post_field_access),
2259 c_rarg1, c_rarg2, c_rarg3);
2260 __ get_cache_and_index_at_bcp(cache, index, 1);
2261 __ bind(L1);
2262 }
2263 }
2265 void TemplateTable::pop_and_check_object(Register r) {
2266 __ pop_ptr(r);
2267 __ null_check(r); // for field access must check obj.
2268 __ verify_oop(r);
2269 }
2271 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2272 transition(vtos, vtos);
2274 const Register cache = rcx;
2275 const Register index = rdx;
2276 const Register obj = c_rarg3;
2277 const Register off = rbx;
2278 const Register flags = rax;
2279 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2281 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2282 jvmti_post_field_access(cache, index, is_static, false);
2283 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2285 if (!is_static) {
2286 // obj is on the stack
2287 pop_and_check_object(obj);
2288 }
2290 const Address field(obj, off, Address::times_1);
2292 Label Done, notByte, notInt, notShort, notChar,
2293 notLong, notFloat, notObj, notDouble;
2295 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2296 assert(btos == 0, "change code, btos != 0");
2298 __ andl(flags, 0x0F);
2299 __ jcc(Assembler::notZero, notByte);
2300 // btos
2301 __ load_signed_byte(rax, field);
2302 __ push(btos);
2303 // Rewrite bytecode to be faster
2304 if (!is_static) {
2305 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2306 }
2307 __ jmp(Done);
2309 __ bind(notByte);
2310 __ cmpl(flags, atos);
2311 __ jcc(Assembler::notEqual, notObj);
2312 // atos
2313 __ load_heap_oop(rax, field);
2314 __ push(atos);
2315 if (!is_static) {
2316 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2317 }
2318 __ jmp(Done);
2320 __ bind(notObj);
2321 __ cmpl(flags, itos);
2322 __ jcc(Assembler::notEqual, notInt);
2323 // itos
2324 __ movl(rax, field);
2325 __ push(itos);
2326 // Rewrite bytecode to be faster
2327 if (!is_static) {
2328 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2329 }
2330 __ jmp(Done);
2332 __ bind(notInt);
2333 __ cmpl(flags, ctos);
2334 __ jcc(Assembler::notEqual, notChar);
2335 // ctos
2336 __ load_unsigned_short(rax, field);
2337 __ push(ctos);
2338 // Rewrite bytecode to be faster
2339 if (!is_static) {
2340 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2341 }
2342 __ jmp(Done);
2344 __ bind(notChar);
2345 __ cmpl(flags, stos);
2346 __ jcc(Assembler::notEqual, notShort);
2347 // stos
2348 __ load_signed_short(rax, field);
2349 __ push(stos);
2350 // Rewrite bytecode to be faster
2351 if (!is_static) {
2352 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2353 }
2354 __ jmp(Done);
2356 __ bind(notShort);
2357 __ cmpl(flags, ltos);
2358 __ jcc(Assembler::notEqual, notLong);
2359 // ltos
2360 __ movq(rax, field);
2361 __ push(ltos);
2362 // Rewrite bytecode to be faster
2363 if (!is_static) {
2364 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2365 }
2366 __ jmp(Done);
2368 __ bind(notLong);
2369 __ cmpl(flags, ftos);
2370 __ jcc(Assembler::notEqual, notFloat);
2371 // ftos
2372 __ movflt(xmm0, field);
2373 __ push(ftos);
2374 // Rewrite bytecode to be faster
2375 if (!is_static) {
2376 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2377 }
2378 __ jmp(Done);
2380 __ bind(notFloat);
2381 #ifdef ASSERT
2382 __ cmpl(flags, dtos);
2383 __ jcc(Assembler::notEqual, notDouble);
2384 #endif
2385 // dtos
2386 __ movdbl(xmm0, field);
2387 __ push(dtos);
2388 // Rewrite bytecode to be faster
2389 if (!is_static) {
2390 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2391 }
2392 #ifdef ASSERT
2393 __ jmp(Done);
2395 __ bind(notDouble);
2396 __ stop("Bad state");
2397 #endif
2399 __ bind(Done);
2400 // [jk] not needed currently
2401 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2402 // Assembler::LoadStore));
2403 }
2406 void TemplateTable::getfield(int byte_no) {
2407 getfield_or_static(byte_no, false);
2408 }
2410 void TemplateTable::getstatic(int byte_no) {
2411 getfield_or_static(byte_no, true);
2412 }
2414 // The registers cache and index expected to be set before call.
2415 // The function may destroy various registers, just not the cache and index registers.
2416 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2417 transition(vtos, vtos);
2419 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2421 if (JvmtiExport::can_post_field_modification()) {
2422 // Check to see if a field modification watch has been set before
2423 // we take the time to call into the VM.
2424 Label L1;
2425 assert_different_registers(cache, index, rax);
2426 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2427 __ testl(rax, rax);
2428 __ jcc(Assembler::zero, L1);
2430 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2432 if (is_static) {
2433 // Life is simple. Null out the object pointer.
2434 __ xorl(c_rarg1, c_rarg1);
2435 } else {
2436 // Life is harder. The stack holds the value on top, followed by
2437 // the object. We don't know the size of the value, though; it
2438 // could be one or two words depending on its type. As a result,
2439 // we must find the type to determine where the object is.
2440 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2441 Address::times_8,
2442 in_bytes(cp_base_offset +
2443 ConstantPoolCacheEntry::flags_offset())));
2444 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits);
2445 // Make sure we don't need to mask rcx for tosBits after the
2446 // above shift
2447 ConstantPoolCacheEntry::verify_tosBits();
2448 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2449 __ cmpl(c_rarg3, ltos);
2450 __ cmovptr(Assembler::equal,
2451 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2452 __ cmpl(c_rarg3, dtos);
2453 __ cmovptr(Assembler::equal,
2454 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2455 }
2456 // cache entry pointer
2457 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2458 __ shll(rscratch1, LogBytesPerWord);
2459 __ addptr(c_rarg2, rscratch1);
2460 // object (tos)
2461 __ mov(c_rarg3, rsp);
2462 // c_rarg1: object pointer set up above (NULL if static)
2463 // c_rarg2: cache entry pointer
2464 // c_rarg3: jvalue object on the stack
2465 __ call_VM(noreg,
2466 CAST_FROM_FN_PTR(address,
2467 InterpreterRuntime::post_field_modification),
2468 c_rarg1, c_rarg2, c_rarg3);
2469 __ get_cache_and_index_at_bcp(cache, index, 1);
2470 __ bind(L1);
2471 }
2472 }
2474 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2475 transition(vtos, vtos);
2477 const Register cache = rcx;
2478 const Register index = rdx;
2479 const Register obj = rcx;
2480 const Register off = rbx;
2481 const Register flags = rax;
2482 const Register bc = c_rarg3;
2484 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2485 jvmti_post_field_mod(cache, index, is_static);
2486 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2488 // [jk] not needed currently
2489 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2490 // Assembler::StoreStore));
2492 Label notVolatile, Done;
2493 __ movl(rdx, flags);
2494 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2495 __ andl(rdx, 0x1);
2497 // field address
2498 const Address field(obj, off, Address::times_1);
2500 Label notByte, notInt, notShort, notChar,
2501 notLong, notFloat, notObj, notDouble;
2503 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2505 assert(btos == 0, "change code, btos != 0");
2506 __ andl(flags, 0x0f);
2507 __ jcc(Assembler::notZero, notByte);
2508 // btos
2509 __ pop(btos);
2510 if (!is_static) pop_and_check_object(obj);
2511 __ movb(field, rax);
2512 if (!is_static) {
2513 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx);
2514 }
2515 __ jmp(Done);
2517 __ bind(notByte);
2518 __ cmpl(flags, atos);
2519 __ jcc(Assembler::notEqual, notObj);
2520 // atos
2521 __ pop(atos);
2522 if (!is_static) pop_and_check_object(obj);
2524 // Store into the field
2525 do_oop_store(_masm, field, rax, _bs->kind(), false);
2527 if (!is_static) {
2528 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
2529 }
2530 __ jmp(Done);
2532 __ bind(notObj);
2533 __ cmpl(flags, itos);
2534 __ jcc(Assembler::notEqual, notInt);
2535 // itos
2536 __ pop(itos);
2537 if (!is_static) pop_and_check_object(obj);
2538 __ movl(field, rax);
2539 if (!is_static) {
2540 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx);
2541 }
2542 __ jmp(Done);
2544 __ bind(notInt);
2545 __ cmpl(flags, ctos);
2546 __ jcc(Assembler::notEqual, notChar);
2547 // ctos
2548 __ pop(ctos);
2549 if (!is_static) pop_and_check_object(obj);
2550 __ movw(field, rax);
2551 if (!is_static) {
2552 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx);
2553 }
2554 __ jmp(Done);
2556 __ bind(notChar);
2557 __ cmpl(flags, stos);
2558 __ jcc(Assembler::notEqual, notShort);
2559 // stos
2560 __ pop(stos);
2561 if (!is_static) pop_and_check_object(obj);
2562 __ movw(field, rax);
2563 if (!is_static) {
2564 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx);
2565 }
2566 __ jmp(Done);
2568 __ bind(notShort);
2569 __ cmpl(flags, ltos);
2570 __ jcc(Assembler::notEqual, notLong);
2571 // ltos
2572 __ pop(ltos);
2573 if (!is_static) pop_and_check_object(obj);
2574 __ movq(field, rax);
2575 if (!is_static) {
2576 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx);
2577 }
2578 __ jmp(Done);
2580 __ bind(notLong);
2581 __ cmpl(flags, ftos);
2582 __ jcc(Assembler::notEqual, notFloat);
2583 // ftos
2584 __ pop(ftos);
2585 if (!is_static) pop_and_check_object(obj);
2586 __ movflt(field, xmm0);
2587 if (!is_static) {
2588 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx);
2589 }
2590 __ jmp(Done);
2592 __ bind(notFloat);
2593 #ifdef ASSERT
2594 __ cmpl(flags, dtos);
2595 __ jcc(Assembler::notEqual, notDouble);
2596 #endif
2597 // dtos
2598 __ pop(dtos);
2599 if (!is_static) pop_and_check_object(obj);
2600 __ movdbl(field, xmm0);
2601 if (!is_static) {
2602 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx);
2603 }
2605 #ifdef ASSERT
2606 __ jmp(Done);
2608 __ bind(notDouble);
2609 __ stop("Bad state");
2610 #endif
2612 __ bind(Done);
2613 // Check for volatile store
2614 __ testl(rdx, rdx);
2615 __ jcc(Assembler::zero, notVolatile);
2616 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2617 Assembler::StoreStore));
2619 __ bind(notVolatile);
2620 }
2622 void TemplateTable::putfield(int byte_no) {
2623 putfield_or_static(byte_no, false);
2624 }
2626 void TemplateTable::putstatic(int byte_no) {
2627 putfield_or_static(byte_no, true);
2628 }
2630 void TemplateTable::jvmti_post_fast_field_mod() {
2631 if (JvmtiExport::can_post_field_modification()) {
2632 // Check to see if a field modification watch has been set before
2633 // we take the time to call into the VM.
2634 Label L2;
2635 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2636 __ testl(c_rarg3, c_rarg3);
2637 __ jcc(Assembler::zero, L2);
2638 __ pop_ptr(rbx); // copy the object pointer from tos
2639 __ verify_oop(rbx);
2640 __ push_ptr(rbx); // put the object pointer back on tos
2641 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2642 __ mov(c_rarg3, rsp);
2643 const Address field(c_rarg3, 0);
2645 switch (bytecode()) { // load values into the jvalue object
2646 case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
2647 case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
2648 case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
2649 case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
2650 case Bytecodes::_fast_sputfield: // fall through
2651 case Bytecodes::_fast_cputfield: __ movw(field, rax); break;
2652 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break;
2653 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break;
2654 default:
2655 ShouldNotReachHere();
2656 }
2658 // Save rax because call_VM() will clobber it, then use it for
2659 // JVMTI purposes
2660 __ push(rax);
2661 // access constant pool cache entry
2662 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2663 __ verify_oop(rbx);
2664 // rbx: object pointer copied above
2665 // c_rarg2: cache entry pointer
2666 // c_rarg3: jvalue object on the stack
2667 __ call_VM(noreg,
2668 CAST_FROM_FN_PTR(address,
2669 InterpreterRuntime::post_field_modification),
2670 rbx, c_rarg2, c_rarg3);
2671 __ pop(rax); // restore lower value
2672 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2673 __ bind(L2);
2674 }
2675 }
2677 void TemplateTable::fast_storefield(TosState state) {
2678 transition(state, vtos);
2680 ByteSize base = constantPoolCacheOopDesc::base_offset();
2682 jvmti_post_fast_field_mod();
2684 // access constant pool cache
2685 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2687 // test for volatile with rdx
2688 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2689 in_bytes(base +
2690 ConstantPoolCacheEntry::flags_offset())));
2692 // replace index with field offset from cache entry
2693 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2694 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2696 // [jk] not needed currently
2697 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2698 // Assembler::StoreStore));
2700 Label notVolatile;
2701 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2702 __ andl(rdx, 0x1);
2704 // Get object from stack
2705 pop_and_check_object(rcx);
2707 // field address
2708 const Address field(rcx, rbx, Address::times_1);
2710 // access field
2711 switch (bytecode()) {
2712 case Bytecodes::_fast_aputfield:
2713 do_oop_store(_masm, field, rax, _bs->kind(), false);
2714 break;
2715 case Bytecodes::_fast_lputfield:
2716 __ movq(field, rax);
2717 break;
2718 case Bytecodes::_fast_iputfield:
2719 __ movl(field, rax);
2720 break;
2721 case Bytecodes::_fast_bputfield:
2722 __ movb(field, rax);
2723 break;
2724 case Bytecodes::_fast_sputfield:
2725 // fall through
2726 case Bytecodes::_fast_cputfield:
2727 __ movw(field, rax);
2728 break;
2729 case Bytecodes::_fast_fputfield:
2730 __ movflt(field, xmm0);
2731 break;
2732 case Bytecodes::_fast_dputfield:
2733 __ movdbl(field, xmm0);
2734 break;
2735 default:
2736 ShouldNotReachHere();
2737 }
2739 // Check for volatile store
2740 __ testl(rdx, rdx);
2741 __ jcc(Assembler::zero, notVolatile);
2742 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2743 Assembler::StoreStore));
2744 __ bind(notVolatile);
2745 }
2748 void TemplateTable::fast_accessfield(TosState state) {
2749 transition(atos, state);
2751 // Do the JVMTI work here to avoid disturbing the register state below
2752 if (JvmtiExport::can_post_field_access()) {
2753 // Check to see if a field access watch has been set before we
2754 // take the time to call into the VM.
2755 Label L1;
2756 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2757 __ testl(rcx, rcx);
2758 __ jcc(Assembler::zero, L1);
2759 // access constant pool cache entry
2760 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2761 __ verify_oop(rax);
2762 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2763 __ mov(c_rarg1, rax);
2764 // c_rarg1: object pointer copied above
2765 // c_rarg2: cache entry pointer
2766 __ call_VM(noreg,
2767 CAST_FROM_FN_PTR(address,
2768 InterpreterRuntime::post_field_access),
2769 c_rarg1, c_rarg2);
2770 __ pop_ptr(rax); // restore object pointer
2771 __ bind(L1);
2772 }
2774 // access constant pool cache
2775 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2776 // replace index with field offset from cache entry
2777 // [jk] not needed currently
2778 // if (os::is_MP()) {
2779 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2780 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2781 // ConstantPoolCacheEntry::flags_offset())));
2782 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2783 // __ andl(rdx, 0x1);
2784 // }
2785 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2786 in_bytes(constantPoolCacheOopDesc::base_offset() +
2787 ConstantPoolCacheEntry::f2_offset())));
2789 // rax: object
2790 __ verify_oop(rax);
2791 __ null_check(rax);
2792 Address field(rax, rbx, Address::times_1);
2794 // access field
2795 switch (bytecode()) {
2796 case Bytecodes::_fast_agetfield:
2797 __ load_heap_oop(rax, field);
2798 __ verify_oop(rax);
2799 break;
2800 case Bytecodes::_fast_lgetfield:
2801 __ movq(rax, field);
2802 break;
2803 case Bytecodes::_fast_igetfield:
2804 __ movl(rax, field);
2805 break;
2806 case Bytecodes::_fast_bgetfield:
2807 __ movsbl(rax, field);
2808 break;
2809 case Bytecodes::_fast_sgetfield:
2810 __ load_signed_short(rax, field);
2811 break;
2812 case Bytecodes::_fast_cgetfield:
2813 __ load_unsigned_short(rax, field);
2814 break;
2815 case Bytecodes::_fast_fgetfield:
2816 __ movflt(xmm0, field);
2817 break;
2818 case Bytecodes::_fast_dgetfield:
2819 __ movdbl(xmm0, field);
2820 break;
2821 default:
2822 ShouldNotReachHere();
2823 }
2824 // [jk] not needed currently
2825 // if (os::is_MP()) {
2826 // Label notVolatile;
2827 // __ testl(rdx, rdx);
2828 // __ jcc(Assembler::zero, notVolatile);
2829 // __ membar(Assembler::LoadLoad);
2830 // __ bind(notVolatile);
2831 //};
2832 }
2834 void TemplateTable::fast_xaccess(TosState state) {
2835 transition(vtos, state);
2837 // get receiver
2838 __ movptr(rax, aaddress(0));
2839 // access constant pool cache
2840 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2841 __ movptr(rbx,
2842 Address(rcx, rdx, Address::times_8,
2843 in_bytes(constantPoolCacheOopDesc::base_offset() +
2844 ConstantPoolCacheEntry::f2_offset())));
2845 // make sure exception is reported in correct bcp range (getfield is
2846 // next instruction)
2847 __ increment(r13);
2848 __ null_check(rax);
2849 switch (state) {
2850 case itos:
2851 __ movl(rax, Address(rax, rbx, Address::times_1));
2852 break;
2853 case atos:
2854 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2855 __ verify_oop(rax);
2856 break;
2857 case ftos:
2858 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2859 break;
2860 default:
2861 ShouldNotReachHere();
2862 }
2864 // [jk] not needed currently
2865 // if (os::is_MP()) {
2866 // Label notVolatile;
2867 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2868 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2869 // ConstantPoolCacheEntry::flags_offset())));
2870 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2871 // __ testl(rdx, 0x1);
2872 // __ jcc(Assembler::zero, notVolatile);
2873 // __ membar(Assembler::LoadLoad);
2874 // __ bind(notVolatile);
2875 // }
2877 __ decrement(r13);
2878 }
2882 //-----------------------------------------------------------------------------
2883 // Calls
2885 void TemplateTable::count_calls(Register method, Register temp) {
2886 // implemented elsewhere
2887 ShouldNotReachHere();
2888 }
2890 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2891 // determine flags
2892 Bytecodes::Code code = bytecode();
2893 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2894 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2895 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2896 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2897 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2898 const bool receiver_null_check = is_invokespecial;
2899 const bool save_flags = is_invokeinterface || is_invokevirtual;
2900 // setup registers & access constant pool cache
2901 const Register recv = rcx;
2902 const Register flags = rdx;
2903 assert_different_registers(method, index, recv, flags);
2905 // save 'interpreter return address'
2906 __ save_bcp();
2908 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2910 // load receiver if needed (note: no return address pushed yet)
2911 if (load_receiver) {
2912 assert(!is_invokedynamic, "");
2913 __ movl(recv, flags);
2914 __ andl(recv, 0xFF);
2915 Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
2916 __ movptr(recv, recv_addr);
2917 __ verify_oop(recv);
2918 }
2920 // do null check if needed
2921 if (receiver_null_check) {
2922 __ null_check(recv);
2923 }
2925 if (save_flags) {
2926 __ movl(r13, flags);
2927 }
2929 // compute return type
2930 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2931 // Make sure we don't need to mask flags for tosBits after the above shift
2932 ConstantPoolCacheEntry::verify_tosBits();
2933 // load return address
2934 {
2935 address table_addr;
2936 if (is_invokeinterface || is_invokedynamic)
2937 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2938 else
2939 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2940 ExternalAddress table(table_addr);
2941 __ lea(rscratch1, table);
2942 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2943 }
2945 // push return address
2946 __ push(flags);
2948 // Restore flag field from the constant pool cache, and restore esi
2949 // for later null checks. r13 is the bytecode pointer
2950 if (save_flags) {
2951 __ movl(flags, r13);
2952 __ restore_bcp();
2953 }
2954 }
2957 void TemplateTable::invokevirtual_helper(Register index,
2958 Register recv,
2959 Register flags) {
2960 // Uses temporary registers rax, rdx
2961 assert_different_registers(index, recv, rax, rdx);
2963 // Test for an invoke of a final method
2964 Label notFinal;
2965 __ movl(rax, flags);
2966 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2967 __ jcc(Assembler::zero, notFinal);
2969 const Register method = index; // method must be rbx
2970 assert(method == rbx,
2971 "methodOop must be rbx for interpreter calling convention");
2973 // do the call - the index is actually the method to call
2974 __ verify_oop(method);
2976 // It's final, need a null check here!
2977 __ null_check(recv);
2979 // profile this call
2980 __ profile_final_call(rax);
2982 __ jump_from_interpreted(method, rax);
2984 __ bind(notFinal);
2986 // get receiver klass
2987 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2988 __ load_klass(rax, recv);
2990 __ verify_oop(rax);
2992 // profile this call
2993 __ profile_virtual_call(rax, r14, rdx);
2995 // get target methodOop & entry point
2996 const int base = instanceKlass::vtable_start_offset() * wordSize;
2997 assert(vtableEntry::size() * wordSize == 8,
2998 "adjust the scaling in the code below");
2999 __ movptr(method, Address(rax, index,
3000 Address::times_8,
3001 base + vtableEntry::method_offset_in_bytes()));
3002 __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
3003 __ jump_from_interpreted(method, rdx);
3004 }
3007 void TemplateTable::invokevirtual(int byte_no) {
3008 transition(vtos, vtos);
3009 assert(byte_no == f2_byte, "use this argument");
3010 prepare_invoke(rbx, noreg, byte_no);
3012 // rbx: index
3013 // rcx: receiver
3014 // rdx: flags
3016 invokevirtual_helper(rbx, rcx, rdx);
3017 }
3020 void TemplateTable::invokespecial(int byte_no) {
3021 transition(vtos, vtos);
3022 assert(byte_no == f1_byte, "use this argument");
3023 prepare_invoke(rbx, noreg, byte_no);
3024 // do the call
3025 __ verify_oop(rbx);
3026 __ profile_call(rax);
3027 __ jump_from_interpreted(rbx, rax);
3028 }
3031 void TemplateTable::invokestatic(int byte_no) {
3032 transition(vtos, vtos);
3033 assert(byte_no == f1_byte, "use this argument");
3034 prepare_invoke(rbx, noreg, byte_no);
3035 // do the call
3036 __ verify_oop(rbx);
3037 __ profile_call(rax);
3038 __ jump_from_interpreted(rbx, rax);
3039 }
3041 void TemplateTable::fast_invokevfinal(int byte_no) {
3042 transition(vtos, vtos);
3043 assert(byte_no == f2_byte, "use this argument");
3044 __ stop("fast_invokevfinal not used on amd64");
3045 }
3047 void TemplateTable::invokeinterface(int byte_no) {
3048 transition(vtos, vtos);
3049 assert(byte_no == f1_byte, "use this argument");
3050 prepare_invoke(rax, rbx, byte_no);
3052 // rax: Interface
3053 // rbx: index
3054 // rcx: receiver
3055 // rdx: flags
3057 // Special case of invokeinterface called for virtual method of
3058 // java.lang.Object. See cpCacheOop.cpp for details.
3059 // This code isn't produced by javac, but could be produced by
3060 // another compliant java compiler.
3061 Label notMethod;
3062 __ movl(r14, rdx);
3063 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface));
3064 __ jcc(Assembler::zero, notMethod);
3066 invokevirtual_helper(rbx, rcx, rdx);
3067 __ bind(notMethod);
3069 // Get receiver klass into rdx - also a null check
3070 __ restore_locals(); // restore r14
3071 __ load_klass(rdx, rcx);
3072 __ verify_oop(rdx);
3074 // profile this call
3075 __ profile_virtual_call(rdx, r13, r14);
3077 Label no_such_interface, no_such_method;
3079 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3080 rdx, rax, rbx,
3081 // outputs: method, scan temp. reg
3082 rbx, r13,
3083 no_such_interface);
3085 // rbx,: methodOop to call
3086 // rcx: receiver
3087 // Check for abstract method error
3088 // Note: This should be done more efficiently via a throw_abstract_method_error
3089 // interpreter entry point and a conditional jump to it in case of a null
3090 // method.
3091 __ testptr(rbx, rbx);
3092 __ jcc(Assembler::zero, no_such_method);
3094 // do the call
3095 // rcx: receiver
3096 // rbx,: methodOop
3097 __ jump_from_interpreted(rbx, rdx);
3098 __ should_not_reach_here();
3100 // exception handling code follows...
3101 // note: must restore interpreter registers to canonical
3102 // state for exception handling to work correctly!
3104 __ bind(no_such_method);
3105 // throw exception
3106 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3107 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3108 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3109 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3110 // the call_VM checks for exception, so we should never return here.
3111 __ should_not_reach_here();
3113 __ bind(no_such_interface);
3114 // throw exception
3115 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3116 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3117 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3118 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3119 InterpreterRuntime::throw_IncompatibleClassChangeError));
3120 // the call_VM checks for exception, so we should never return here.
3121 __ should_not_reach_here();
3122 return;
3123 }
3125 void TemplateTable::invokedynamic(int byte_no) {
3126 transition(vtos, vtos);
3127 assert(byte_no == f1_oop, "use this argument");
3129 if (!EnableInvokeDynamic) {
3130 // We should not encounter this bytecode if !EnableInvokeDynamic.
3131 // The verifier will stop it. However, if we get past the verifier,
3132 // this will stop the thread in a reasonable way, without crashing the JVM.
3133 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3134 InterpreterRuntime::throw_IncompatibleClassChangeError));
3135 // the call_VM checks for exception, so we should never return here.
3136 __ should_not_reach_here();
3137 return;
3138 }
3140 assert(byte_no == f1_oop, "use this argument");
3141 prepare_invoke(rax, rbx, byte_no);
3143 // rax: CallSite object (f1)
3144 // rbx: unused (f2)
3145 // rcx: receiver address
3146 // rdx: flags (unused)
3148 Register rax_callsite = rax;
3149 Register rcx_method_handle = rcx;
3151 if (ProfileInterpreter) {
3152 // %%% should make a type profile for any invokedynamic that takes a ref argument
3153 // profile this call
3154 __ profile_call(r13);
3155 }
3157 __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
3158 __ null_check(rcx_method_handle);
3159 __ prepare_to_jump_from_interpreted();
3160 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
3161 }
3164 //-----------------------------------------------------------------------------
3165 // Allocation
3167 void TemplateTable::_new() {
3168 transition(vtos, atos);
3169 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3170 Label slow_case;
3171 Label done;
3172 Label initialize_header;
3173 Label initialize_object; // including clearing the fields
3174 Label allocate_shared;
3176 __ get_cpool_and_tags(rsi, rax);
3177 // Make sure the class we're about to instantiate has been resolved.
3178 // This is done before loading instanceKlass to be consistent with the order
3179 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3180 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3181 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3182 JVM_CONSTANT_Class);
3183 __ jcc(Assembler::notEqual, slow_case);
3185 // get instanceKlass
3186 __ movptr(rsi, Address(rsi, rdx,
3187 Address::times_8, sizeof(constantPoolOopDesc)));
3189 // make sure klass is initialized & doesn't have finalizer
3190 // make sure klass is fully initialized
3191 __ cmpl(Address(rsi,
3192 instanceKlass::init_state_offset_in_bytes() +
3193 sizeof(oopDesc)),
3194 instanceKlass::fully_initialized);
3195 __ jcc(Assembler::notEqual, slow_case);
3197 // get instance_size in instanceKlass (scaled to a count of bytes)
3198 __ movl(rdx,
3199 Address(rsi,
3200 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3201 // test to see if it has a finalizer or is malformed in some way
3202 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3203 __ jcc(Assembler::notZero, slow_case);
3205 // Allocate the instance
3206 // 1) Try to allocate in the TLAB
3207 // 2) if fail and the object is large allocate in the shared Eden
3208 // 3) if the above fails (or is not applicable), go to a slow case
3209 // (creates a new TLAB, etc.)
3211 const bool allow_shared_alloc =
3212 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3214 if (UseTLAB) {
3215 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3216 __ lea(rbx, Address(rax, rdx, Address::times_1));
3217 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3218 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3219 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3220 if (ZeroTLAB) {
3221 // the fields have been already cleared
3222 __ jmp(initialize_header);
3223 } else {
3224 // initialize both the header and fields
3225 __ jmp(initialize_object);
3226 }
3227 }
3229 // Allocation in the shared Eden, if allowed.
3230 //
3231 // rdx: instance size in bytes
3232 if (allow_shared_alloc) {
3233 __ bind(allocate_shared);
3235 ExternalAddress top((address)Universe::heap()->top_addr());
3236 ExternalAddress end((address)Universe::heap()->end_addr());
3238 const Register RtopAddr = rscratch1;
3239 const Register RendAddr = rscratch2;
3241 __ lea(RtopAddr, top);
3242 __ lea(RendAddr, end);
3243 __ movptr(rax, Address(RtopAddr, 0));
3245 // For retries rax gets set by cmpxchgq
3246 Label retry;
3247 __ bind(retry);
3248 __ lea(rbx, Address(rax, rdx, Address::times_1));
3249 __ cmpptr(rbx, Address(RendAddr, 0));
3250 __ jcc(Assembler::above, slow_case);
3252 // Compare rax with the top addr, and if still equal, store the new
3253 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3254 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3255 //
3256 // rax: object begin
3257 // rbx: object end
3258 // rdx: instance size in bytes
3259 if (os::is_MP()) {
3260 __ lock();
3261 }
3262 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3264 // if someone beat us on the allocation, try again, otherwise continue
3265 __ jcc(Assembler::notEqual, retry);
3267 __ incr_allocated_bytes(r15_thread, rdx, 0);
3268 }
3270 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3271 // The object is initialized before the header. If the object size is
3272 // zero, go directly to the header initialization.
3273 __ bind(initialize_object);
3274 __ decrementl(rdx, sizeof(oopDesc));
3275 __ jcc(Assembler::zero, initialize_header);
3277 // Initialize object fields
3278 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3279 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3280 {
3281 Label loop;
3282 __ bind(loop);
3283 __ movq(Address(rax, rdx, Address::times_8,
3284 sizeof(oopDesc) - oopSize),
3285 rcx);
3286 __ decrementl(rdx);
3287 __ jcc(Assembler::notZero, loop);
3288 }
3290 // initialize object header only.
3291 __ bind(initialize_header);
3292 if (UseBiasedLocking) {
3293 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3294 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3295 } else {
3296 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3297 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3298 }
3299 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3300 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3301 __ store_klass(rax, rsi); // store klass last
3303 {
3304 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3305 // Trigger dtrace event for fastpath
3306 __ push(atos); // save the return value
3307 __ call_VM_leaf(
3308 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3309 __ pop(atos); // restore the return value
3311 }
3312 __ jmp(done);
3313 }
3316 // slow case
3317 __ bind(slow_case);
3318 __ get_constant_pool(c_rarg1);
3319 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3320 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3321 __ verify_oop(rax);
3323 // continue
3324 __ bind(done);
3325 }
3327 void TemplateTable::newarray() {
3328 transition(itos, atos);
3329 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3330 __ movl(c_rarg2, rax);
3331 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3332 c_rarg1, c_rarg2);
3333 }
3335 void TemplateTable::anewarray() {
3336 transition(itos, atos);
3337 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3338 __ get_constant_pool(c_rarg1);
3339 __ movl(c_rarg3, rax);
3340 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3341 c_rarg1, c_rarg2, c_rarg3);
3342 }
3344 void TemplateTable::arraylength() {
3345 transition(atos, itos);
3346 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3347 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3348 }
3350 void TemplateTable::checkcast() {
3351 transition(atos, atos);
3352 Label done, is_null, ok_is_subtype, quicked, resolved;
3353 __ testptr(rax, rax); // object is in rax
3354 __ jcc(Assembler::zero, is_null);
3356 // Get cpool & tags index
3357 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3358 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3359 // See if bytecode has already been quicked
3360 __ cmpb(Address(rdx, rbx,
3361 Address::times_1,
3362 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3363 JVM_CONSTANT_Class);
3364 __ jcc(Assembler::equal, quicked);
3365 __ push(atos); // save receiver for result, and for GC
3366 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3367 __ pop_ptr(rdx); // restore receiver
3368 __ jmpb(resolved);
3370 // Get superklass in rax and subklass in rbx
3371 __ bind(quicked);
3372 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3373 __ movptr(rax, Address(rcx, rbx,
3374 Address::times_8, sizeof(constantPoolOopDesc)));
3376 __ bind(resolved);
3377 __ load_klass(rbx, rdx);
3379 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3380 // Superklass in rax. Subklass in rbx.
3381 __ gen_subtype_check(rbx, ok_is_subtype);
3383 // Come here on failure
3384 __ push_ptr(rdx);
3385 // object is at TOS
3386 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3388 // Come here on success
3389 __ bind(ok_is_subtype);
3390 __ mov(rax, rdx); // Restore object in rdx
3392 // Collect counts on whether this check-cast sees NULLs a lot or not.
3393 if (ProfileInterpreter) {
3394 __ jmp(done);
3395 __ bind(is_null);
3396 __ profile_null_seen(rcx);
3397 } else {
3398 __ bind(is_null); // same as 'done'
3399 }
3400 __ bind(done);
3401 }
3403 void TemplateTable::instanceof() {
3404 transition(atos, itos);
3405 Label done, is_null, ok_is_subtype, quicked, resolved;
3406 __ testptr(rax, rax);
3407 __ jcc(Assembler::zero, is_null);
3409 // Get cpool & tags index
3410 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3411 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3412 // See if bytecode has already been quicked
3413 __ cmpb(Address(rdx, rbx,
3414 Address::times_1,
3415 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3416 JVM_CONSTANT_Class);
3417 __ jcc(Assembler::equal, quicked);
3419 __ push(atos); // save receiver for result, and for GC
3420 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3421 __ pop_ptr(rdx); // restore receiver
3422 __ verify_oop(rdx);
3423 __ load_klass(rdx, rdx);
3424 __ jmpb(resolved);
3426 // Get superklass in rax and subklass in rdx
3427 __ bind(quicked);
3428 __ load_klass(rdx, rax);
3429 __ movptr(rax, Address(rcx, rbx,
3430 Address::times_8, sizeof(constantPoolOopDesc)));
3432 __ bind(resolved);
3434 // Generate subtype check. Blows rcx, rdi
3435 // Superklass in rax. Subklass in rdx.
3436 __ gen_subtype_check(rdx, ok_is_subtype);
3438 // Come here on failure
3439 __ xorl(rax, rax);
3440 __ jmpb(done);
3441 // Come here on success
3442 __ bind(ok_is_subtype);
3443 __ movl(rax, 1);
3445 // Collect counts on whether this test sees NULLs a lot or not.
3446 if (ProfileInterpreter) {
3447 __ jmp(done);
3448 __ bind(is_null);
3449 __ profile_null_seen(rcx);
3450 } else {
3451 __ bind(is_null); // same as 'done'
3452 }
3453 __ bind(done);
3454 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3455 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3456 }
3458 //-----------------------------------------------------------------------------
3459 // Breakpoints
3460 void TemplateTable::_breakpoint() {
3461 // Note: We get here even if we are single stepping..
3462 // jbug inists on setting breakpoints at every bytecode
3463 // even if we are in single step mode.
3465 transition(vtos, vtos);
3467 // get the unpatched byte code
3468 __ get_method(c_rarg1);
3469 __ call_VM(noreg,
3470 CAST_FROM_FN_PTR(address,
3471 InterpreterRuntime::get_original_bytecode_at),
3472 c_rarg1, r13);
3473 __ mov(rbx, rax);
3475 // post the breakpoint event
3476 __ get_method(c_rarg1);
3477 __ call_VM(noreg,
3478 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3479 c_rarg1, r13);
3481 // complete the execution of original bytecode
3482 __ dispatch_only_normal(vtos);
3483 }
3485 //-----------------------------------------------------------------------------
3486 // Exceptions
3488 void TemplateTable::athrow() {
3489 transition(atos, vtos);
3490 __ null_check(rax);
3491 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3492 }
3494 //-----------------------------------------------------------------------------
3495 // Synchronization
3496 //
3497 // Note: monitorenter & exit are symmetric routines; which is reflected
3498 // in the assembly code structure as well
3499 //
3500 // Stack layout:
3501 //
3502 // [expressions ] <--- rsp = expression stack top
3503 // ..
3504 // [expressions ]
3505 // [monitor entry] <--- monitor block top = expression stack bot
3506 // ..
3507 // [monitor entry]
3508 // [frame data ] <--- monitor block bot
3509 // ...
3510 // [saved rbp ] <--- rbp
3511 void TemplateTable::monitorenter() {
3512 transition(atos, vtos);
3514 // check for NULL object
3515 __ null_check(rax);
3517 const Address monitor_block_top(
3518 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3519 const Address monitor_block_bot(
3520 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3521 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3523 Label allocated;
3525 // initialize entry pointer
3526 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3528 // find a free slot in the monitor block (result in c_rarg1)
3529 {
3530 Label entry, loop, exit;
3531 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3532 // starting with top-most entry
3533 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3534 // of monitor block
3535 __ jmpb(entry);
3537 __ bind(loop);
3538 // check if current entry is used
3539 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3540 // if not used then remember entry in c_rarg1
3541 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3542 // check if current entry is for same object
3543 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3544 // if same object then stop searching
3545 __ jccb(Assembler::equal, exit);
3546 // otherwise advance to next entry
3547 __ addptr(c_rarg3, entry_size);
3548 __ bind(entry);
3549 // check if bottom reached
3550 __ cmpptr(c_rarg3, c_rarg2);
3551 // if not at bottom then check this entry
3552 __ jcc(Assembler::notEqual, loop);
3553 __ bind(exit);
3554 }
3556 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3557 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3559 // allocate one if there's no free slot
3560 {
3561 Label entry, loop;
3562 // 1. compute new pointers // rsp: old expression stack top
3563 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3564 __ subptr(rsp, entry_size); // move expression stack top
3565 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3566 __ mov(c_rarg3, rsp); // set start value for copy loop
3567 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3568 __ jmp(entry);
3569 // 2. move expression stack contents
3570 __ bind(loop);
3571 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3572 // word from old location
3573 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3574 __ addptr(c_rarg3, wordSize); // advance to next word
3575 __ bind(entry);
3576 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3577 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3578 // copy next word
3579 }
3581 // call run-time routine
3582 // c_rarg1: points to monitor entry
3583 __ bind(allocated);
3585 // Increment bcp to point to the next bytecode, so exception
3586 // handling for async. exceptions work correctly.
3587 // The object has already been poped from the stack, so the
3588 // expression stack looks correct.
3589 __ increment(r13);
3591 // store object
3592 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3593 __ lock_object(c_rarg1);
3595 // check to make sure this monitor doesn't cause stack overflow after locking
3596 __ save_bcp(); // in case of exception
3597 __ generate_stack_overflow_check(0);
3599 // The bcp has already been incremented. Just need to dispatch to
3600 // next instruction.
3601 __ dispatch_next(vtos);
3602 }
3605 void TemplateTable::monitorexit() {
3606 transition(atos, vtos);
3608 // check for NULL object
3609 __ null_check(rax);
3611 const Address monitor_block_top(
3612 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3613 const Address monitor_block_bot(
3614 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3615 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3617 Label found;
3619 // find matching slot
3620 {
3621 Label entry, loop;
3622 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3623 // starting with top-most entry
3624 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3625 // of monitor block
3626 __ jmpb(entry);
3628 __ bind(loop);
3629 // check if current entry is for same object
3630 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3631 // if same object then stop searching
3632 __ jcc(Assembler::equal, found);
3633 // otherwise advance to next entry
3634 __ addptr(c_rarg1, entry_size);
3635 __ bind(entry);
3636 // check if bottom reached
3637 __ cmpptr(c_rarg1, c_rarg2);
3638 // if not at bottom then check this entry
3639 __ jcc(Assembler::notEqual, loop);
3640 }
3642 // error handling. Unlocking was not block-structured
3643 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3644 InterpreterRuntime::throw_illegal_monitor_state_exception));
3645 __ should_not_reach_here();
3647 // call run-time routine
3648 // rsi: points to monitor entry
3649 __ bind(found);
3650 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3651 __ unlock_object(c_rarg1);
3652 __ pop_ptr(rax); // discard object
3653 }
3656 // Wide instructions
3657 void TemplateTable::wide() {
3658 transition(vtos, vtos);
3659 __ load_unsigned_byte(rbx, at_bcp(1));
3660 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3661 __ jmp(Address(rscratch1, rbx, Address::times_8));
3662 // Note: the r13 increment step is part of the individual wide
3663 // bytecode implementations
3664 }
3667 // Multi arrays
3668 void TemplateTable::multianewarray() {
3669 transition(vtos, atos);
3670 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3671 // last dim is on top of stack; we want address of first one:
3672 // first_addr = last_addr + (ndims - 1) * wordSize
3673 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3674 call_VM(rax,
3675 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3676 c_rarg1);
3677 __ load_unsigned_byte(rbx, at_bcp(3));
3678 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3679 }
3680 #endif // !CC_INTERP