Tue, 29 Jun 2010 10:34:00 -0700
6964774: Adjust optimization flags setting
Summary: Adjust performance flags settings.
Reviewed-by: never, phh
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateTable_x86_64.cpp.incl"
28 #ifndef CC_INTERP
30 #define __ _masm->
32 // Platform-dependent initialization
34 void TemplateTable::pd_initialize() {
35 // No amd64 specific initialization
36 }
38 // Address computation: local variables
40 static inline Address iaddress(int n) {
41 return Address(r14, Interpreter::local_offset_in_bytes(n));
42 }
44 static inline Address laddress(int n) {
45 return iaddress(n + 1);
46 }
48 static inline Address faddress(int n) {
49 return iaddress(n);
50 }
52 static inline Address daddress(int n) {
53 return laddress(n);
54 }
56 static inline Address aaddress(int n) {
57 return iaddress(n);
58 }
60 static inline Address iaddress(Register r) {
61 return Address(r14, r, Address::times_8);
62 }
64 static inline Address laddress(Register r) {
65 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
66 }
68 static inline Address faddress(Register r) {
69 return iaddress(r);
70 }
72 static inline Address daddress(Register r) {
73 return laddress(r);
74 }
76 static inline Address aaddress(Register r) {
77 return iaddress(r);
78 }
80 static inline Address at_rsp() {
81 return Address(rsp, 0);
82 }
84 // At top of Java expression stack which may be different than esp(). It
85 // isn't for category 1 objects.
86 static inline Address at_tos () {
87 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
88 }
90 static inline Address at_tos_p1() {
91 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
92 }
94 static inline Address at_tos_p2() {
95 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
96 }
98 static inline Address at_tos_p3() {
99 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
100 }
102 // Condition conversion
103 static Assembler::Condition j_not(TemplateTable::Condition cc) {
104 switch (cc) {
105 case TemplateTable::equal : return Assembler::notEqual;
106 case TemplateTable::not_equal : return Assembler::equal;
107 case TemplateTable::less : return Assembler::greaterEqual;
108 case TemplateTable::less_equal : return Assembler::greater;
109 case TemplateTable::greater : return Assembler::lessEqual;
110 case TemplateTable::greater_equal: return Assembler::less;
111 }
112 ShouldNotReachHere();
113 return Assembler::zero;
114 }
117 // Miscelaneous helper routines
118 // Store an oop (or NULL) at the address described by obj.
119 // If val == noreg this means store a NULL
121 static void do_oop_store(InterpreterMacroAssembler* _masm,
122 Address obj,
123 Register val,
124 BarrierSet::Name barrier,
125 bool precise) {
126 assert(val == noreg || val == rax, "parameter is just for looks");
127 switch (barrier) {
128 #ifndef SERIALGC
129 case BarrierSet::G1SATBCT:
130 case BarrierSet::G1SATBCTLogging:
131 {
132 // flatten object address if needed
133 if (obj.index() == noreg && obj.disp() == 0) {
134 if (obj.base() != rdx) {
135 __ movq(rdx, obj.base());
136 }
137 } else {
138 __ leaq(rdx, obj);
139 }
140 __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg);
141 if (val == noreg) {
142 __ store_heap_oop_null(Address(rdx, 0));
143 } else {
144 __ store_heap_oop(Address(rdx, 0), val);
145 __ g1_write_barrier_post(rdx, val, r8, rbx);
146 }
148 }
149 break;
150 #endif // SERIALGC
151 case BarrierSet::CardTableModRef:
152 case BarrierSet::CardTableExtension:
153 {
154 if (val == noreg) {
155 __ store_heap_oop_null(obj);
156 } else {
157 __ store_heap_oop(obj, val);
158 // flatten object address if needed
159 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
160 __ store_check(obj.base());
161 } else {
162 __ leaq(rdx, obj);
163 __ store_check(rdx);
164 }
165 }
166 }
167 break;
168 case BarrierSet::ModRef:
169 case BarrierSet::Other:
170 if (val == noreg) {
171 __ store_heap_oop_null(obj);
172 } else {
173 __ store_heap_oop(obj, val);
174 }
175 break;
176 default :
177 ShouldNotReachHere();
179 }
180 }
182 Address TemplateTable::at_bcp(int offset) {
183 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
184 return Address(r13, offset);
185 }
187 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
188 Register scratch,
189 bool load_bc_into_scratch/*=true*/) {
190 if (!RewriteBytecodes) {
191 return;
192 }
193 // the pair bytecodes have already done the load.
194 if (load_bc_into_scratch) {
195 __ movl(bc, bytecode);
196 }
197 Label patch_done;
198 if (JvmtiExport::can_post_breakpoint()) {
199 Label fast_patch;
200 // if a breakpoint is present we can't rewrite the stream directly
201 __ movzbl(scratch, at_bcp(0));
202 __ cmpl(scratch, Bytecodes::_breakpoint);
203 __ jcc(Assembler::notEqual, fast_patch);
204 __ get_method(scratch);
205 // Let breakpoint table handling rewrite to quicker bytecode
206 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
207 #ifndef ASSERT
208 __ jmpb(patch_done);
209 #else
210 __ jmp(patch_done);
211 #endif
212 __ bind(fast_patch);
213 }
214 #ifdef ASSERT
215 Label okay;
216 __ load_unsigned_byte(scratch, at_bcp(0));
217 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
218 __ jcc(Assembler::equal, okay);
219 __ cmpl(scratch, bc);
220 __ jcc(Assembler::equal, okay);
221 __ stop("patching the wrong bytecode");
222 __ bind(okay);
223 #endif
224 // patch bytecode
225 __ movb(at_bcp(0), bc);
226 __ bind(patch_done);
227 }
230 // Individual instructions
232 void TemplateTable::nop() {
233 transition(vtos, vtos);
234 // nothing to do
235 }
237 void TemplateTable::shouldnotreachhere() {
238 transition(vtos, vtos);
239 __ stop("shouldnotreachhere bytecode");
240 }
242 void TemplateTable::aconst_null() {
243 transition(vtos, atos);
244 __ xorl(rax, rax);
245 }
247 void TemplateTable::iconst(int value) {
248 transition(vtos, itos);
249 if (value == 0) {
250 __ xorl(rax, rax);
251 } else {
252 __ movl(rax, value);
253 }
254 }
256 void TemplateTable::lconst(int value) {
257 transition(vtos, ltos);
258 if (value == 0) {
259 __ xorl(rax, rax);
260 } else {
261 __ movl(rax, value);
262 }
263 }
265 void TemplateTable::fconst(int value) {
266 transition(vtos, ftos);
267 static float one = 1.0f, two = 2.0f;
268 switch (value) {
269 case 0:
270 __ xorps(xmm0, xmm0);
271 break;
272 case 1:
273 __ movflt(xmm0, ExternalAddress((address) &one));
274 break;
275 case 2:
276 __ movflt(xmm0, ExternalAddress((address) &two));
277 break;
278 default:
279 ShouldNotReachHere();
280 break;
281 }
282 }
284 void TemplateTable::dconst(int value) {
285 transition(vtos, dtos);
286 static double one = 1.0;
287 switch (value) {
288 case 0:
289 __ xorpd(xmm0, xmm0);
290 break;
291 case 1:
292 __ movdbl(xmm0, ExternalAddress((address) &one));
293 break;
294 default:
295 ShouldNotReachHere();
296 break;
297 }
298 }
300 void TemplateTable::bipush() {
301 transition(vtos, itos);
302 __ load_signed_byte(rax, at_bcp(1));
303 }
305 void TemplateTable::sipush() {
306 transition(vtos, itos);
307 __ load_unsigned_short(rax, at_bcp(1));
308 __ bswapl(rax);
309 __ sarl(rax, 16);
310 }
312 void TemplateTable::ldc(bool wide) {
313 transition(vtos, vtos);
314 Label call_ldc, notFloat, notClass, Done;
316 if (wide) {
317 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
318 } else {
319 __ load_unsigned_byte(rbx, at_bcp(1));
320 }
322 __ get_cpool_and_tags(rcx, rax);
323 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
324 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
326 // get type
327 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
329 // unresolved string - get the resolved string
330 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
331 __ jccb(Assembler::equal, call_ldc);
333 // unresolved class - get the resolved class
334 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
335 __ jccb(Assembler::equal, call_ldc);
337 // unresolved class in error state - call into runtime to throw the error
338 // from the first resolution attempt
339 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
340 __ jccb(Assembler::equal, call_ldc);
342 // resolved class - need to call vm to get java mirror of the class
343 __ cmpl(rdx, JVM_CONSTANT_Class);
344 __ jcc(Assembler::notEqual, notClass);
346 __ bind(call_ldc);
347 __ movl(c_rarg1, wide);
348 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
349 __ push_ptr(rax);
350 __ verify_oop(rax);
351 __ jmp(Done);
353 __ bind(notClass);
354 __ cmpl(rdx, JVM_CONSTANT_Float);
355 __ jccb(Assembler::notEqual, notFloat);
356 // ftos
357 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
358 __ push_f();
359 __ jmp(Done);
361 __ bind(notFloat);
362 #ifdef ASSERT
363 {
364 Label L;
365 __ cmpl(rdx, JVM_CONSTANT_Integer);
366 __ jcc(Assembler::equal, L);
367 __ cmpl(rdx, JVM_CONSTANT_String);
368 __ jcc(Assembler::equal, L);
369 __ stop("unexpected tag type in ldc");
370 __ bind(L);
371 }
372 #endif
373 // atos and itos
374 Label isOop;
375 __ cmpl(rdx, JVM_CONSTANT_Integer);
376 __ jcc(Assembler::notEqual, isOop);
377 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
378 __ push_i(rax);
379 __ jmp(Done);
381 __ bind(isOop);
382 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
383 __ push_ptr(rax);
385 if (VerifyOops) {
386 __ verify_oop(rax);
387 }
389 __ bind(Done);
390 }
392 // Fast path for caching oop constants.
393 // %%% We should use this to handle Class and String constants also.
394 // %%% It will simplify the ldc/primitive path considerably.
395 void TemplateTable::fast_aldc(bool wide) {
396 transition(vtos, atos);
398 if (!EnableMethodHandles) {
399 // We should not encounter this bytecode if !EnableMethodHandles.
400 // The verifier will stop it. However, if we get past the verifier,
401 // this will stop the thread in a reasonable way, without crashing the JVM.
402 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
403 InterpreterRuntime::throw_IncompatibleClassChangeError));
404 // the call_VM checks for exception, so we should never return here.
405 __ should_not_reach_here();
406 return;
407 }
409 const Register cache = rcx;
410 const Register index = rdx;
412 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
413 if (VerifyOops) {
414 __ verify_oop(rax);
415 }
416 }
418 void TemplateTable::ldc2_w() {
419 transition(vtos, vtos);
420 Label Long, Done;
421 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
423 __ get_cpool_and_tags(rcx, rax);
424 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
425 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
427 // get type
428 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
429 JVM_CONSTANT_Double);
430 __ jccb(Assembler::notEqual, Long);
431 // dtos
432 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
433 __ push_d();
434 __ jmpb(Done);
436 __ bind(Long);
437 // ltos
438 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
439 __ push_l();
441 __ bind(Done);
442 }
444 void TemplateTable::locals_index(Register reg, int offset) {
445 __ load_unsigned_byte(reg, at_bcp(offset));
446 __ negptr(reg);
447 }
449 void TemplateTable::iload() {
450 transition(vtos, itos);
451 if (RewriteFrequentPairs) {
452 Label rewrite, done;
453 const Register bc = c_rarg3;
454 assert(rbx != bc, "register damaged");
456 // get next byte
457 __ load_unsigned_byte(rbx,
458 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
459 // if _iload, wait to rewrite to iload2. We only want to rewrite the
460 // last two iloads in a pair. Comparing against fast_iload means that
461 // the next bytecode is neither an iload or a caload, and therefore
462 // an iload pair.
463 __ cmpl(rbx, Bytecodes::_iload);
464 __ jcc(Assembler::equal, done);
466 __ cmpl(rbx, Bytecodes::_fast_iload);
467 __ movl(bc, Bytecodes::_fast_iload2);
468 __ jccb(Assembler::equal, rewrite);
470 // if _caload, rewrite to fast_icaload
471 __ cmpl(rbx, Bytecodes::_caload);
472 __ movl(bc, Bytecodes::_fast_icaload);
473 __ jccb(Assembler::equal, rewrite);
475 // rewrite so iload doesn't check again.
476 __ movl(bc, Bytecodes::_fast_iload);
478 // rewrite
479 // bc: fast bytecode
480 __ bind(rewrite);
481 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
482 __ bind(done);
483 }
485 // Get the local value into tos
486 locals_index(rbx);
487 __ movl(rax, iaddress(rbx));
488 }
490 void TemplateTable::fast_iload2() {
491 transition(vtos, itos);
492 locals_index(rbx);
493 __ movl(rax, iaddress(rbx));
494 __ push(itos);
495 locals_index(rbx, 3);
496 __ movl(rax, iaddress(rbx));
497 }
499 void TemplateTable::fast_iload() {
500 transition(vtos, itos);
501 locals_index(rbx);
502 __ movl(rax, iaddress(rbx));
503 }
505 void TemplateTable::lload() {
506 transition(vtos, ltos);
507 locals_index(rbx);
508 __ movq(rax, laddress(rbx));
509 }
511 void TemplateTable::fload() {
512 transition(vtos, ftos);
513 locals_index(rbx);
514 __ movflt(xmm0, faddress(rbx));
515 }
517 void TemplateTable::dload() {
518 transition(vtos, dtos);
519 locals_index(rbx);
520 __ movdbl(xmm0, daddress(rbx));
521 }
523 void TemplateTable::aload() {
524 transition(vtos, atos);
525 locals_index(rbx);
526 __ movptr(rax, aaddress(rbx));
527 }
529 void TemplateTable::locals_index_wide(Register reg) {
530 __ movl(reg, at_bcp(2));
531 __ bswapl(reg);
532 __ shrl(reg, 16);
533 __ negptr(reg);
534 }
536 void TemplateTable::wide_iload() {
537 transition(vtos, itos);
538 locals_index_wide(rbx);
539 __ movl(rax, iaddress(rbx));
540 }
542 void TemplateTable::wide_lload() {
543 transition(vtos, ltos);
544 locals_index_wide(rbx);
545 __ movq(rax, laddress(rbx));
546 }
548 void TemplateTable::wide_fload() {
549 transition(vtos, ftos);
550 locals_index_wide(rbx);
551 __ movflt(xmm0, faddress(rbx));
552 }
554 void TemplateTable::wide_dload() {
555 transition(vtos, dtos);
556 locals_index_wide(rbx);
557 __ movdbl(xmm0, daddress(rbx));
558 }
560 void TemplateTable::wide_aload() {
561 transition(vtos, atos);
562 locals_index_wide(rbx);
563 __ movptr(rax, aaddress(rbx));
564 }
566 void TemplateTable::index_check(Register array, Register index) {
567 // destroys rbx
568 // check array
569 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
570 // sign extend index for use by indexed load
571 __ movl2ptr(index, index);
572 // check index
573 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
574 if (index != rbx) {
575 // ??? convention: move aberrant index into ebx for exception message
576 assert(rbx != array, "different registers");
577 __ movl(rbx, index);
578 }
579 __ jump_cc(Assembler::aboveEqual,
580 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
581 }
583 void TemplateTable::iaload() {
584 transition(itos, itos);
585 __ pop_ptr(rdx);
586 // eax: index
587 // rdx: array
588 index_check(rdx, rax); // kills rbx
589 __ movl(rax, Address(rdx, rax,
590 Address::times_4,
591 arrayOopDesc::base_offset_in_bytes(T_INT)));
592 }
594 void TemplateTable::laload() {
595 transition(itos, ltos);
596 __ pop_ptr(rdx);
597 // eax: index
598 // rdx: array
599 index_check(rdx, rax); // kills rbx
600 __ movq(rax, Address(rdx, rbx,
601 Address::times_8,
602 arrayOopDesc::base_offset_in_bytes(T_LONG)));
603 }
605 void TemplateTable::faload() {
606 transition(itos, ftos);
607 __ pop_ptr(rdx);
608 // eax: index
609 // rdx: array
610 index_check(rdx, rax); // kills rbx
611 __ movflt(xmm0, Address(rdx, rax,
612 Address::times_4,
613 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
614 }
616 void TemplateTable::daload() {
617 transition(itos, dtos);
618 __ pop_ptr(rdx);
619 // eax: index
620 // rdx: array
621 index_check(rdx, rax); // kills rbx
622 __ movdbl(xmm0, Address(rdx, rax,
623 Address::times_8,
624 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
625 }
627 void TemplateTable::aaload() {
628 transition(itos, atos);
629 __ pop_ptr(rdx);
630 // eax: index
631 // rdx: array
632 index_check(rdx, rax); // kills rbx
633 __ load_heap_oop(rax, Address(rdx, rax,
634 UseCompressedOops ? Address::times_4 : Address::times_8,
635 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
636 }
638 void TemplateTable::baload() {
639 transition(itos, itos);
640 __ pop_ptr(rdx);
641 // eax: index
642 // rdx: array
643 index_check(rdx, rax); // kills rbx
644 __ load_signed_byte(rax,
645 Address(rdx, rax,
646 Address::times_1,
647 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
648 }
650 void TemplateTable::caload() {
651 transition(itos, itos);
652 __ pop_ptr(rdx);
653 // eax: index
654 // rdx: array
655 index_check(rdx, rax); // kills rbx
656 __ load_unsigned_short(rax,
657 Address(rdx, rax,
658 Address::times_2,
659 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
660 }
662 // iload followed by caload frequent pair
663 void TemplateTable::fast_icaload() {
664 transition(vtos, itos);
665 // load index out of locals
666 locals_index(rbx);
667 __ movl(rax, iaddress(rbx));
669 // eax: index
670 // rdx: array
671 __ pop_ptr(rdx);
672 index_check(rdx, rax); // kills rbx
673 __ load_unsigned_short(rax,
674 Address(rdx, rax,
675 Address::times_2,
676 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
677 }
679 void TemplateTable::saload() {
680 transition(itos, itos);
681 __ pop_ptr(rdx);
682 // eax: index
683 // rdx: array
684 index_check(rdx, rax); // kills rbx
685 __ load_signed_short(rax,
686 Address(rdx, rax,
687 Address::times_2,
688 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
689 }
691 void TemplateTable::iload(int n) {
692 transition(vtos, itos);
693 __ movl(rax, iaddress(n));
694 }
696 void TemplateTable::lload(int n) {
697 transition(vtos, ltos);
698 __ movq(rax, laddress(n));
699 }
701 void TemplateTable::fload(int n) {
702 transition(vtos, ftos);
703 __ movflt(xmm0, faddress(n));
704 }
706 void TemplateTable::dload(int n) {
707 transition(vtos, dtos);
708 __ movdbl(xmm0, daddress(n));
709 }
711 void TemplateTable::aload(int n) {
712 transition(vtos, atos);
713 __ movptr(rax, aaddress(n));
714 }
716 void TemplateTable::aload_0() {
717 transition(vtos, atos);
718 // According to bytecode histograms, the pairs:
719 //
720 // _aload_0, _fast_igetfield
721 // _aload_0, _fast_agetfield
722 // _aload_0, _fast_fgetfield
723 //
724 // occur frequently. If RewriteFrequentPairs is set, the (slow)
725 // _aload_0 bytecode checks if the next bytecode is either
726 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
727 // rewrites the current bytecode into a pair bytecode; otherwise it
728 // rewrites the current bytecode into _fast_aload_0 that doesn't do
729 // the pair check anymore.
730 //
731 // Note: If the next bytecode is _getfield, the rewrite must be
732 // delayed, otherwise we may miss an opportunity for a pair.
733 //
734 // Also rewrite frequent pairs
735 // aload_0, aload_1
736 // aload_0, iload_1
737 // These bytecodes with a small amount of code are most profitable
738 // to rewrite
739 if (RewriteFrequentPairs) {
740 Label rewrite, done;
741 const Register bc = c_rarg3;
742 assert(rbx != bc, "register damaged");
743 // get next byte
744 __ load_unsigned_byte(rbx,
745 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
747 // do actual aload_0
748 aload(0);
750 // if _getfield then wait with rewrite
751 __ cmpl(rbx, Bytecodes::_getfield);
752 __ jcc(Assembler::equal, done);
754 // if _igetfield then reqrite to _fast_iaccess_0
755 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
756 Bytecodes::_aload_0,
757 "fix bytecode definition");
758 __ cmpl(rbx, Bytecodes::_fast_igetfield);
759 __ movl(bc, Bytecodes::_fast_iaccess_0);
760 __ jccb(Assembler::equal, rewrite);
762 // if _agetfield then reqrite to _fast_aaccess_0
763 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
764 Bytecodes::_aload_0,
765 "fix bytecode definition");
766 __ cmpl(rbx, Bytecodes::_fast_agetfield);
767 __ movl(bc, Bytecodes::_fast_aaccess_0);
768 __ jccb(Assembler::equal, rewrite);
770 // if _fgetfield then reqrite to _fast_faccess_0
771 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
772 Bytecodes::_aload_0,
773 "fix bytecode definition");
774 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
775 __ movl(bc, Bytecodes::_fast_faccess_0);
776 __ jccb(Assembler::equal, rewrite);
778 // else rewrite to _fast_aload0
779 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
780 Bytecodes::_aload_0,
781 "fix bytecode definition");
782 __ movl(bc, Bytecodes::_fast_aload_0);
784 // rewrite
785 // bc: fast bytecode
786 __ bind(rewrite);
787 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
789 __ bind(done);
790 } else {
791 aload(0);
792 }
793 }
795 void TemplateTable::istore() {
796 transition(itos, vtos);
797 locals_index(rbx);
798 __ movl(iaddress(rbx), rax);
799 }
801 void TemplateTable::lstore() {
802 transition(ltos, vtos);
803 locals_index(rbx);
804 __ movq(laddress(rbx), rax);
805 }
807 void TemplateTable::fstore() {
808 transition(ftos, vtos);
809 locals_index(rbx);
810 __ movflt(faddress(rbx), xmm0);
811 }
813 void TemplateTable::dstore() {
814 transition(dtos, vtos);
815 locals_index(rbx);
816 __ movdbl(daddress(rbx), xmm0);
817 }
819 void TemplateTable::astore() {
820 transition(vtos, vtos);
821 __ pop_ptr(rax);
822 locals_index(rbx);
823 __ movptr(aaddress(rbx), rax);
824 }
826 void TemplateTable::wide_istore() {
827 transition(vtos, vtos);
828 __ pop_i();
829 locals_index_wide(rbx);
830 __ movl(iaddress(rbx), rax);
831 }
833 void TemplateTable::wide_lstore() {
834 transition(vtos, vtos);
835 __ pop_l();
836 locals_index_wide(rbx);
837 __ movq(laddress(rbx), rax);
838 }
840 void TemplateTable::wide_fstore() {
841 transition(vtos, vtos);
842 __ pop_f();
843 locals_index_wide(rbx);
844 __ movflt(faddress(rbx), xmm0);
845 }
847 void TemplateTable::wide_dstore() {
848 transition(vtos, vtos);
849 __ pop_d();
850 locals_index_wide(rbx);
851 __ movdbl(daddress(rbx), xmm0);
852 }
854 void TemplateTable::wide_astore() {
855 transition(vtos, vtos);
856 __ pop_ptr(rax);
857 locals_index_wide(rbx);
858 __ movptr(aaddress(rbx), rax);
859 }
861 void TemplateTable::iastore() {
862 transition(itos, vtos);
863 __ pop_i(rbx);
864 __ pop_ptr(rdx);
865 // eax: value
866 // ebx: index
867 // rdx: array
868 index_check(rdx, rbx); // prefer index in ebx
869 __ movl(Address(rdx, rbx,
870 Address::times_4,
871 arrayOopDesc::base_offset_in_bytes(T_INT)),
872 rax);
873 }
875 void TemplateTable::lastore() {
876 transition(ltos, vtos);
877 __ pop_i(rbx);
878 __ pop_ptr(rdx);
879 // rax: value
880 // ebx: index
881 // rdx: array
882 index_check(rdx, rbx); // prefer index in ebx
883 __ movq(Address(rdx, rbx,
884 Address::times_8,
885 arrayOopDesc::base_offset_in_bytes(T_LONG)),
886 rax);
887 }
889 void TemplateTable::fastore() {
890 transition(ftos, vtos);
891 __ pop_i(rbx);
892 __ pop_ptr(rdx);
893 // xmm0: value
894 // ebx: index
895 // rdx: array
896 index_check(rdx, rbx); // prefer index in ebx
897 __ movflt(Address(rdx, rbx,
898 Address::times_4,
899 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
900 xmm0);
901 }
903 void TemplateTable::dastore() {
904 transition(dtos, vtos);
905 __ pop_i(rbx);
906 __ pop_ptr(rdx);
907 // xmm0: value
908 // ebx: index
909 // rdx: array
910 index_check(rdx, rbx); // prefer index in ebx
911 __ movdbl(Address(rdx, rbx,
912 Address::times_8,
913 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
914 xmm0);
915 }
917 void TemplateTable::aastore() {
918 Label is_null, ok_is_subtype, done;
919 transition(vtos, vtos);
920 // stack: ..., array, index, value
921 __ movptr(rax, at_tos()); // value
922 __ movl(rcx, at_tos_p1()); // index
923 __ movptr(rdx, at_tos_p2()); // array
925 Address element_address(rdx, rcx,
926 UseCompressedOops? Address::times_4 : Address::times_8,
927 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
929 index_check(rdx, rcx); // kills rbx
930 // do array store check - check for NULL value first
931 __ testptr(rax, rax);
932 __ jcc(Assembler::zero, is_null);
934 // Move subklass into rbx
935 __ load_klass(rbx, rax);
936 // Move superklass into rax
937 __ load_klass(rax, rdx);
938 __ movptr(rax, Address(rax,
939 sizeof(oopDesc) +
940 objArrayKlass::element_klass_offset_in_bytes()));
941 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
942 __ lea(rdx, element_address);
944 // Generate subtype check. Blows rcx, rdi
945 // Superklass in rax. Subklass in rbx.
946 __ gen_subtype_check(rbx, ok_is_subtype);
948 // Come here on failure
949 // object is at TOS
950 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
952 // Come here on success
953 __ bind(ok_is_subtype);
955 // Get the value we will store
956 __ movptr(rax, at_tos());
957 // Now store using the appropriate barrier
958 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
959 __ jmp(done);
961 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
962 __ bind(is_null);
963 __ profile_null_seen(rbx);
965 // Store a NULL
966 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
968 // Pop stack arguments
969 __ bind(done);
970 __ addptr(rsp, 3 * Interpreter::stackElementSize);
971 }
973 void TemplateTable::bastore() {
974 transition(itos, vtos);
975 __ pop_i(rbx);
976 __ pop_ptr(rdx);
977 // eax: value
978 // ebx: index
979 // rdx: array
980 index_check(rdx, rbx); // prefer index in ebx
981 __ movb(Address(rdx, rbx,
982 Address::times_1,
983 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
984 rax);
985 }
987 void TemplateTable::castore() {
988 transition(itos, vtos);
989 __ pop_i(rbx);
990 __ pop_ptr(rdx);
991 // eax: value
992 // ebx: index
993 // rdx: array
994 index_check(rdx, rbx); // prefer index in ebx
995 __ movw(Address(rdx, rbx,
996 Address::times_2,
997 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
998 rax);
999 }
1001 void TemplateTable::sastore() {
1002 castore();
1003 }
1005 void TemplateTable::istore(int n) {
1006 transition(itos, vtos);
1007 __ movl(iaddress(n), rax);
1008 }
1010 void TemplateTable::lstore(int n) {
1011 transition(ltos, vtos);
1012 __ movq(laddress(n), rax);
1013 }
1015 void TemplateTable::fstore(int n) {
1016 transition(ftos, vtos);
1017 __ movflt(faddress(n), xmm0);
1018 }
1020 void TemplateTable::dstore(int n) {
1021 transition(dtos, vtos);
1022 __ movdbl(daddress(n), xmm0);
1023 }
1025 void TemplateTable::astore(int n) {
1026 transition(vtos, vtos);
1027 __ pop_ptr(rax);
1028 __ movptr(aaddress(n), rax);
1029 }
1031 void TemplateTable::pop() {
1032 transition(vtos, vtos);
1033 __ addptr(rsp, Interpreter::stackElementSize);
1034 }
1036 void TemplateTable::pop2() {
1037 transition(vtos, vtos);
1038 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1039 }
1041 void TemplateTable::dup() {
1042 transition(vtos, vtos);
1043 __ load_ptr(0, rax);
1044 __ push_ptr(rax);
1045 // stack: ..., a, a
1046 }
1048 void TemplateTable::dup_x1() {
1049 transition(vtos, vtos);
1050 // stack: ..., a, b
1051 __ load_ptr( 0, rax); // load b
1052 __ load_ptr( 1, rcx); // load a
1053 __ store_ptr(1, rax); // store b
1054 __ store_ptr(0, rcx); // store a
1055 __ push_ptr(rax); // push b
1056 // stack: ..., b, a, b
1057 }
1059 void TemplateTable::dup_x2() {
1060 transition(vtos, vtos);
1061 // stack: ..., a, b, c
1062 __ load_ptr( 0, rax); // load c
1063 __ load_ptr( 2, rcx); // load a
1064 __ store_ptr(2, rax); // store c in a
1065 __ push_ptr(rax); // push c
1066 // stack: ..., c, b, c, c
1067 __ load_ptr( 2, rax); // load b
1068 __ store_ptr(2, rcx); // store a in b
1069 // stack: ..., c, a, c, c
1070 __ store_ptr(1, rax); // store b in c
1071 // stack: ..., c, a, b, c
1072 }
1074 void TemplateTable::dup2() {
1075 transition(vtos, vtos);
1076 // stack: ..., a, b
1077 __ load_ptr(1, rax); // load a
1078 __ push_ptr(rax); // push a
1079 __ load_ptr(1, rax); // load b
1080 __ push_ptr(rax); // push b
1081 // stack: ..., a, b, a, b
1082 }
1084 void TemplateTable::dup2_x1() {
1085 transition(vtos, vtos);
1086 // stack: ..., a, b, c
1087 __ load_ptr( 0, rcx); // load c
1088 __ load_ptr( 1, rax); // load b
1089 __ push_ptr(rax); // push b
1090 __ push_ptr(rcx); // push c
1091 // stack: ..., a, b, c, b, c
1092 __ store_ptr(3, rcx); // store c in b
1093 // stack: ..., a, c, c, b, c
1094 __ load_ptr( 4, rcx); // load a
1095 __ store_ptr(2, rcx); // store a in 2nd c
1096 // stack: ..., a, c, a, b, c
1097 __ store_ptr(4, rax); // store b in a
1098 // stack: ..., b, c, a, b, c
1099 }
1101 void TemplateTable::dup2_x2() {
1102 transition(vtos, vtos);
1103 // stack: ..., a, b, c, d
1104 __ load_ptr( 0, rcx); // load d
1105 __ load_ptr( 1, rax); // load c
1106 __ push_ptr(rax); // push c
1107 __ push_ptr(rcx); // push d
1108 // stack: ..., a, b, c, d, c, d
1109 __ load_ptr( 4, rax); // load b
1110 __ store_ptr(2, rax); // store b in d
1111 __ store_ptr(4, rcx); // store d in b
1112 // stack: ..., a, d, c, b, c, d
1113 __ load_ptr( 5, rcx); // load a
1114 __ load_ptr( 3, rax); // load c
1115 __ store_ptr(3, rcx); // store a in c
1116 __ store_ptr(5, rax); // store c in a
1117 // stack: ..., c, d, a, b, c, d
1118 }
1120 void TemplateTable::swap() {
1121 transition(vtos, vtos);
1122 // stack: ..., a, b
1123 __ load_ptr( 1, rcx); // load a
1124 __ load_ptr( 0, rax); // load b
1125 __ store_ptr(0, rcx); // store a in b
1126 __ store_ptr(1, rax); // store b in a
1127 // stack: ..., b, a
1128 }
1130 void TemplateTable::iop2(Operation op) {
1131 transition(itos, itos);
1132 switch (op) {
1133 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1134 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1135 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1136 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1137 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1138 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1139 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1140 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1141 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1142 default : ShouldNotReachHere();
1143 }
1144 }
1146 void TemplateTable::lop2(Operation op) {
1147 transition(ltos, ltos);
1148 switch (op) {
1149 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1150 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1151 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1152 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1153 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1154 default : ShouldNotReachHere();
1155 }
1156 }
1158 void TemplateTable::idiv() {
1159 transition(itos, itos);
1160 __ movl(rcx, rax);
1161 __ pop_i(rax);
1162 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1163 // they are not equal, one could do a normal division (no correction
1164 // needed), which may speed up this implementation for the common case.
1165 // (see also JVM spec., p.243 & p.271)
1166 __ corrected_idivl(rcx);
1167 }
1169 void TemplateTable::irem() {
1170 transition(itos, itos);
1171 __ movl(rcx, rax);
1172 __ pop_i(rax);
1173 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1174 // they are not equal, one could do a normal division (no correction
1175 // needed), which may speed up this implementation for the common case.
1176 // (see also JVM spec., p.243 & p.271)
1177 __ corrected_idivl(rcx);
1178 __ movl(rax, rdx);
1179 }
1181 void TemplateTable::lmul() {
1182 transition(ltos, ltos);
1183 __ pop_l(rdx);
1184 __ imulq(rax, rdx);
1185 }
1187 void TemplateTable::ldiv() {
1188 transition(ltos, ltos);
1189 __ mov(rcx, rax);
1190 __ pop_l(rax);
1191 // generate explicit div0 check
1192 __ testq(rcx, rcx);
1193 __ jump_cc(Assembler::zero,
1194 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1195 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1196 // they are not equal, one could do a normal division (no correction
1197 // needed), which may speed up this implementation for the common case.
1198 // (see also JVM spec., p.243 & p.271)
1199 __ corrected_idivq(rcx); // kills rbx
1200 }
1202 void TemplateTable::lrem() {
1203 transition(ltos, ltos);
1204 __ mov(rcx, rax);
1205 __ pop_l(rax);
1206 __ testq(rcx, rcx);
1207 __ jump_cc(Assembler::zero,
1208 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1209 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1210 // they are not equal, one could do a normal division (no correction
1211 // needed), which may speed up this implementation for the common case.
1212 // (see also JVM spec., p.243 & p.271)
1213 __ corrected_idivq(rcx); // kills rbx
1214 __ mov(rax, rdx);
1215 }
1217 void TemplateTable::lshl() {
1218 transition(itos, ltos);
1219 __ movl(rcx, rax); // get shift count
1220 __ pop_l(rax); // get shift value
1221 __ shlq(rax);
1222 }
1224 void TemplateTable::lshr() {
1225 transition(itos, ltos);
1226 __ movl(rcx, rax); // get shift count
1227 __ pop_l(rax); // get shift value
1228 __ sarq(rax);
1229 }
1231 void TemplateTable::lushr() {
1232 transition(itos, ltos);
1233 __ movl(rcx, rax); // get shift count
1234 __ pop_l(rax); // get shift value
1235 __ shrq(rax);
1236 }
1238 void TemplateTable::fop2(Operation op) {
1239 transition(ftos, ftos);
1240 switch (op) {
1241 case add:
1242 __ addss(xmm0, at_rsp());
1243 __ addptr(rsp, Interpreter::stackElementSize);
1244 break;
1245 case sub:
1246 __ movflt(xmm1, xmm0);
1247 __ pop_f(xmm0);
1248 __ subss(xmm0, xmm1);
1249 break;
1250 case mul:
1251 __ mulss(xmm0, at_rsp());
1252 __ addptr(rsp, Interpreter::stackElementSize);
1253 break;
1254 case div:
1255 __ movflt(xmm1, xmm0);
1256 __ pop_f(xmm0);
1257 __ divss(xmm0, xmm1);
1258 break;
1259 case rem:
1260 __ movflt(xmm1, xmm0);
1261 __ pop_f(xmm0);
1262 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1263 break;
1264 default:
1265 ShouldNotReachHere();
1266 break;
1267 }
1268 }
1270 void TemplateTable::dop2(Operation op) {
1271 transition(dtos, dtos);
1272 switch (op) {
1273 case add:
1274 __ addsd(xmm0, at_rsp());
1275 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1276 break;
1277 case sub:
1278 __ movdbl(xmm1, xmm0);
1279 __ pop_d(xmm0);
1280 __ subsd(xmm0, xmm1);
1281 break;
1282 case mul:
1283 __ mulsd(xmm0, at_rsp());
1284 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1285 break;
1286 case div:
1287 __ movdbl(xmm1, xmm0);
1288 __ pop_d(xmm0);
1289 __ divsd(xmm0, xmm1);
1290 break;
1291 case rem:
1292 __ movdbl(xmm1, xmm0);
1293 __ pop_d(xmm0);
1294 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1295 break;
1296 default:
1297 ShouldNotReachHere();
1298 break;
1299 }
1300 }
1302 void TemplateTable::ineg() {
1303 transition(itos, itos);
1304 __ negl(rax);
1305 }
1307 void TemplateTable::lneg() {
1308 transition(ltos, ltos);
1309 __ negq(rax);
1310 }
1312 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1313 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1314 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1315 // of 128-bits operands for SSE instructions.
1316 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1317 // Store the value to a 128-bits operand.
1318 operand[0] = lo;
1319 operand[1] = hi;
1320 return operand;
1321 }
1323 // Buffer for 128-bits masks used by SSE instructions.
1324 static jlong float_signflip_pool[2*2];
1325 static jlong double_signflip_pool[2*2];
1327 void TemplateTable::fneg() {
1328 transition(ftos, ftos);
1329 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1330 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1331 }
1333 void TemplateTable::dneg() {
1334 transition(dtos, dtos);
1335 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1336 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1337 }
1339 void TemplateTable::iinc() {
1340 transition(vtos, vtos);
1341 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1342 locals_index(rbx);
1343 __ addl(iaddress(rbx), rdx);
1344 }
1346 void TemplateTable::wide_iinc() {
1347 transition(vtos, vtos);
1348 __ movl(rdx, at_bcp(4)); // get constant
1349 locals_index_wide(rbx);
1350 __ bswapl(rdx); // swap bytes & sign-extend constant
1351 __ sarl(rdx, 16);
1352 __ addl(iaddress(rbx), rdx);
1353 // Note: should probably use only one movl to get both
1354 // the index and the constant -> fix this
1355 }
1357 void TemplateTable::convert() {
1358 // Checking
1359 #ifdef ASSERT
1360 {
1361 TosState tos_in = ilgl;
1362 TosState tos_out = ilgl;
1363 switch (bytecode()) {
1364 case Bytecodes::_i2l: // fall through
1365 case Bytecodes::_i2f: // fall through
1366 case Bytecodes::_i2d: // fall through
1367 case Bytecodes::_i2b: // fall through
1368 case Bytecodes::_i2c: // fall through
1369 case Bytecodes::_i2s: tos_in = itos; break;
1370 case Bytecodes::_l2i: // fall through
1371 case Bytecodes::_l2f: // fall through
1372 case Bytecodes::_l2d: tos_in = ltos; break;
1373 case Bytecodes::_f2i: // fall through
1374 case Bytecodes::_f2l: // fall through
1375 case Bytecodes::_f2d: tos_in = ftos; break;
1376 case Bytecodes::_d2i: // fall through
1377 case Bytecodes::_d2l: // fall through
1378 case Bytecodes::_d2f: tos_in = dtos; break;
1379 default : ShouldNotReachHere();
1380 }
1381 switch (bytecode()) {
1382 case Bytecodes::_l2i: // fall through
1383 case Bytecodes::_f2i: // fall through
1384 case Bytecodes::_d2i: // fall through
1385 case Bytecodes::_i2b: // fall through
1386 case Bytecodes::_i2c: // fall through
1387 case Bytecodes::_i2s: tos_out = itos; break;
1388 case Bytecodes::_i2l: // fall through
1389 case Bytecodes::_f2l: // fall through
1390 case Bytecodes::_d2l: tos_out = ltos; break;
1391 case Bytecodes::_i2f: // fall through
1392 case Bytecodes::_l2f: // fall through
1393 case Bytecodes::_d2f: tos_out = ftos; break;
1394 case Bytecodes::_i2d: // fall through
1395 case Bytecodes::_l2d: // fall through
1396 case Bytecodes::_f2d: tos_out = dtos; break;
1397 default : ShouldNotReachHere();
1398 }
1399 transition(tos_in, tos_out);
1400 }
1401 #endif // ASSERT
1403 static const int64_t is_nan = 0x8000000000000000L;
1405 // Conversion
1406 switch (bytecode()) {
1407 case Bytecodes::_i2l:
1408 __ movslq(rax, rax);
1409 break;
1410 case Bytecodes::_i2f:
1411 __ cvtsi2ssl(xmm0, rax);
1412 break;
1413 case Bytecodes::_i2d:
1414 __ cvtsi2sdl(xmm0, rax);
1415 break;
1416 case Bytecodes::_i2b:
1417 __ movsbl(rax, rax);
1418 break;
1419 case Bytecodes::_i2c:
1420 __ movzwl(rax, rax);
1421 break;
1422 case Bytecodes::_i2s:
1423 __ movswl(rax, rax);
1424 break;
1425 case Bytecodes::_l2i:
1426 __ movl(rax, rax);
1427 break;
1428 case Bytecodes::_l2f:
1429 __ cvtsi2ssq(xmm0, rax);
1430 break;
1431 case Bytecodes::_l2d:
1432 __ cvtsi2sdq(xmm0, rax);
1433 break;
1434 case Bytecodes::_f2i:
1435 {
1436 Label L;
1437 __ cvttss2sil(rax, xmm0);
1438 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1439 __ jcc(Assembler::notEqual, L);
1440 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1441 __ bind(L);
1442 }
1443 break;
1444 case Bytecodes::_f2l:
1445 {
1446 Label L;
1447 __ cvttss2siq(rax, xmm0);
1448 // NaN or overflow/underflow?
1449 __ cmp64(rax, ExternalAddress((address) &is_nan));
1450 __ jcc(Assembler::notEqual, L);
1451 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1452 __ bind(L);
1453 }
1454 break;
1455 case Bytecodes::_f2d:
1456 __ cvtss2sd(xmm0, xmm0);
1457 break;
1458 case Bytecodes::_d2i:
1459 {
1460 Label L;
1461 __ cvttsd2sil(rax, xmm0);
1462 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1463 __ jcc(Assembler::notEqual, L);
1464 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1465 __ bind(L);
1466 }
1467 break;
1468 case Bytecodes::_d2l:
1469 {
1470 Label L;
1471 __ cvttsd2siq(rax, xmm0);
1472 // NaN or overflow/underflow?
1473 __ cmp64(rax, ExternalAddress((address) &is_nan));
1474 __ jcc(Assembler::notEqual, L);
1475 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1476 __ bind(L);
1477 }
1478 break;
1479 case Bytecodes::_d2f:
1480 __ cvtsd2ss(xmm0, xmm0);
1481 break;
1482 default:
1483 ShouldNotReachHere();
1484 }
1485 }
1487 void TemplateTable::lcmp() {
1488 transition(ltos, itos);
1489 Label done;
1490 __ pop_l(rdx);
1491 __ cmpq(rdx, rax);
1492 __ movl(rax, -1);
1493 __ jccb(Assembler::less, done);
1494 __ setb(Assembler::notEqual, rax);
1495 __ movzbl(rax, rax);
1496 __ bind(done);
1497 }
1499 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1500 Label done;
1501 if (is_float) {
1502 // XXX get rid of pop here, use ... reg, mem32
1503 __ pop_f(xmm1);
1504 __ ucomiss(xmm1, xmm0);
1505 } else {
1506 // XXX get rid of pop here, use ... reg, mem64
1507 __ pop_d(xmm1);
1508 __ ucomisd(xmm1, xmm0);
1509 }
1510 if (unordered_result < 0) {
1511 __ movl(rax, -1);
1512 __ jccb(Assembler::parity, done);
1513 __ jccb(Assembler::below, done);
1514 __ setb(Assembler::notEqual, rdx);
1515 __ movzbl(rax, rdx);
1516 } else {
1517 __ movl(rax, 1);
1518 __ jccb(Assembler::parity, done);
1519 __ jccb(Assembler::above, done);
1520 __ movl(rax, 0);
1521 __ jccb(Assembler::equal, done);
1522 __ decrementl(rax);
1523 }
1524 __ bind(done);
1525 }
1527 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1528 __ get_method(rcx); // rcx holds method
1529 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1530 // holds bumped taken count
1532 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
1533 InvocationCounter::counter_offset();
1534 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() +
1535 InvocationCounter::counter_offset();
1536 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1538 // Load up edx with the branch displacement
1539 __ movl(rdx, at_bcp(1));
1540 __ bswapl(rdx);
1542 if (!is_wide) {
1543 __ sarl(rdx, 16);
1544 }
1545 __ movl2ptr(rdx, rdx);
1547 // Handle all the JSR stuff here, then exit.
1548 // It's much shorter and cleaner than intermingling with the non-JSR
1549 // normal-branch stuff occurring below.
1550 if (is_jsr) {
1551 // Pre-load the next target bytecode into rbx
1552 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1554 // compute return address as bci in rax
1555 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1556 in_bytes(constMethodOopDesc::codes_offset())));
1557 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1558 // Adjust the bcp in r13 by the displacement in rdx
1559 __ addptr(r13, rdx);
1560 // jsr returns atos that is not an oop
1561 __ push_i(rax);
1562 __ dispatch_only(vtos);
1563 return;
1564 }
1566 // Normal (non-jsr) branch handling
1568 // Adjust the bcp in r13 by the displacement in rdx
1569 __ addptr(r13, rdx);
1571 assert(UseLoopCounter || !UseOnStackReplacement,
1572 "on-stack-replacement requires loop counters");
1573 Label backedge_counter_overflow;
1574 Label profile_method;
1575 Label dispatch;
1576 if (UseLoopCounter) {
1577 // increment backedge counter for backward branches
1578 // rax: MDO
1579 // ebx: MDO bumped taken-count
1580 // rcx: method
1581 // rdx: target offset
1582 // r13: target bcp
1583 // r14: locals pointer
1584 __ testl(rdx, rdx); // check if forward or backward branch
1585 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1587 // increment counter
1588 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1589 __ incrementl(rax, InvocationCounter::count_increment); // increment
1590 // counter
1591 __ movl(Address(rcx, be_offset), rax); // store counter
1593 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1594 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1595 __ addl(rax, Address(rcx, be_offset)); // add both counters
1597 if (ProfileInterpreter) {
1598 // Test to see if we should create a method data oop
1599 __ cmp32(rax,
1600 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1601 __ jcc(Assembler::less, dispatch);
1603 // if no method data exists, go to profile method
1604 __ test_method_data_pointer(rax, profile_method);
1606 if (UseOnStackReplacement) {
1607 // check for overflow against ebx which is the MDO taken count
1608 __ cmp32(rbx,
1609 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1610 __ jcc(Assembler::below, dispatch);
1612 // When ProfileInterpreter is on, the backedge_count comes
1613 // from the methodDataOop, which value does not get reset on
1614 // the call to frequency_counter_overflow(). To avoid
1615 // excessive calls to the overflow routine while the method is
1616 // being compiled, add a second test to make sure the overflow
1617 // function is called only once every overflow_frequency.
1618 const int overflow_frequency = 1024;
1619 __ andl(rbx, overflow_frequency - 1);
1620 __ jcc(Assembler::zero, backedge_counter_overflow);
1622 }
1623 } else {
1624 if (UseOnStackReplacement) {
1625 // check for overflow against eax, which is the sum of the
1626 // counters
1627 __ cmp32(rax,
1628 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1629 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1631 }
1632 }
1633 __ bind(dispatch);
1634 }
1636 // Pre-load the next target bytecode into rbx
1637 __ load_unsigned_byte(rbx, Address(r13, 0));
1639 // continue with the bytecode @ target
1640 // eax: return bci for jsr's, unused otherwise
1641 // ebx: target bytecode
1642 // r13: target bcp
1643 __ dispatch_only(vtos);
1645 if (UseLoopCounter) {
1646 if (ProfileInterpreter) {
1647 // Out-of-line code to allocate method data oop.
1648 __ bind(profile_method);
1649 __ call_VM(noreg,
1650 CAST_FROM_FN_PTR(address,
1651 InterpreterRuntime::profile_method), r13);
1652 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1653 __ movptr(rcx, Address(rbp, method_offset));
1654 __ movptr(rcx, Address(rcx,
1655 in_bytes(methodOopDesc::method_data_offset())));
1656 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
1657 rcx);
1658 __ test_method_data_pointer(rcx, dispatch);
1659 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
1660 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
1661 __ addptr(rcx, rax);
1662 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
1663 rcx);
1664 __ jmp(dispatch);
1665 }
1667 if (UseOnStackReplacement) {
1668 // invocation counter overflow
1669 __ bind(backedge_counter_overflow);
1670 __ negptr(rdx);
1671 __ addptr(rdx, r13); // branch bcp
1672 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1673 __ call_VM(noreg,
1674 CAST_FROM_FN_PTR(address,
1675 InterpreterRuntime::frequency_counter_overflow),
1676 rdx);
1677 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1679 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1680 // ebx: target bytecode
1681 // rdx: scratch
1682 // r14: locals pointer
1683 // r13: bcp
1684 __ testptr(rax, rax); // test result
1685 __ jcc(Assembler::zero, dispatch); // no osr if null
1686 // nmethod may have been invalidated (VM may block upon call_VM return)
1687 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1688 __ cmpl(rcx, InvalidOSREntryBci);
1689 __ jcc(Assembler::equal, dispatch);
1691 // We have the address of an on stack replacement routine in eax
1692 // We need to prepare to execute the OSR method. First we must
1693 // migrate the locals and monitors off of the stack.
1695 __ mov(r13, rax); // save the nmethod
1697 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1699 // eax is OSR buffer, move it to expected parameter location
1700 __ mov(j_rarg0, rax);
1702 // We use j_rarg definitions here so that registers don't conflict as parameter
1703 // registers change across platforms as we are in the midst of a calling
1704 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1706 const Register retaddr = j_rarg2;
1707 const Register sender_sp = j_rarg1;
1709 // pop the interpreter frame
1710 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1711 __ leave(); // remove frame anchor
1712 __ pop(retaddr); // get return address
1713 __ mov(rsp, sender_sp); // set sp to sender sp
1714 // Ensure compiled code always sees stack at proper alignment
1715 __ andptr(rsp, -(StackAlignmentInBytes));
1717 // unlike x86 we need no specialized return from compiled code
1718 // to the interpreter or the call stub.
1720 // push the return address
1721 __ push(retaddr);
1723 // and begin the OSR nmethod
1724 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1725 }
1726 }
1727 }
1730 void TemplateTable::if_0cmp(Condition cc) {
1731 transition(itos, vtos);
1732 // assume branch is more often taken than not (loops use backward branches)
1733 Label not_taken;
1734 __ testl(rax, rax);
1735 __ jcc(j_not(cc), not_taken);
1736 branch(false, false);
1737 __ bind(not_taken);
1738 __ profile_not_taken_branch(rax);
1739 }
1741 void TemplateTable::if_icmp(Condition cc) {
1742 transition(itos, vtos);
1743 // assume branch is more often taken than not (loops use backward branches)
1744 Label not_taken;
1745 __ pop_i(rdx);
1746 __ cmpl(rdx, rax);
1747 __ jcc(j_not(cc), not_taken);
1748 branch(false, false);
1749 __ bind(not_taken);
1750 __ profile_not_taken_branch(rax);
1751 }
1753 void TemplateTable::if_nullcmp(Condition cc) {
1754 transition(atos, vtos);
1755 // assume branch is more often taken than not (loops use backward branches)
1756 Label not_taken;
1757 __ testptr(rax, rax);
1758 __ jcc(j_not(cc), not_taken);
1759 branch(false, false);
1760 __ bind(not_taken);
1761 __ profile_not_taken_branch(rax);
1762 }
1764 void TemplateTable::if_acmp(Condition cc) {
1765 transition(atos, vtos);
1766 // assume branch is more often taken than not (loops use backward branches)
1767 Label not_taken;
1768 __ pop_ptr(rdx);
1769 __ cmpptr(rdx, rax);
1770 __ jcc(j_not(cc), not_taken);
1771 branch(false, false);
1772 __ bind(not_taken);
1773 __ profile_not_taken_branch(rax);
1774 }
1776 void TemplateTable::ret() {
1777 transition(vtos, vtos);
1778 locals_index(rbx);
1779 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1780 __ profile_ret(rbx, rcx);
1781 __ get_method(rax);
1782 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1783 __ lea(r13, Address(r13, rbx, Address::times_1,
1784 constMethodOopDesc::codes_offset()));
1785 __ dispatch_next(vtos);
1786 }
1788 void TemplateTable::wide_ret() {
1789 transition(vtos, vtos);
1790 locals_index_wide(rbx);
1791 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1792 __ profile_ret(rbx, rcx);
1793 __ get_method(rax);
1794 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1795 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1796 __ dispatch_next(vtos);
1797 }
1799 void TemplateTable::tableswitch() {
1800 Label default_case, continue_execution;
1801 transition(itos, vtos);
1802 // align r13
1803 __ lea(rbx, at_bcp(BytesPerInt));
1804 __ andptr(rbx, -BytesPerInt);
1805 // load lo & hi
1806 __ movl(rcx, Address(rbx, BytesPerInt));
1807 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1808 __ bswapl(rcx);
1809 __ bswapl(rdx);
1810 // check against lo & hi
1811 __ cmpl(rax, rcx);
1812 __ jcc(Assembler::less, default_case);
1813 __ cmpl(rax, rdx);
1814 __ jcc(Assembler::greater, default_case);
1815 // lookup dispatch offset
1816 __ subl(rax, rcx);
1817 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1818 __ profile_switch_case(rax, rbx, rcx);
1819 // continue execution
1820 __ bind(continue_execution);
1821 __ bswapl(rdx);
1822 __ movl2ptr(rdx, rdx);
1823 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1824 __ addptr(r13, rdx);
1825 __ dispatch_only(vtos);
1826 // handle default
1827 __ bind(default_case);
1828 __ profile_switch_default(rax);
1829 __ movl(rdx, Address(rbx, 0));
1830 __ jmp(continue_execution);
1831 }
1833 void TemplateTable::lookupswitch() {
1834 transition(itos, itos);
1835 __ stop("lookupswitch bytecode should have been rewritten");
1836 }
1838 void TemplateTable::fast_linearswitch() {
1839 transition(itos, vtos);
1840 Label loop_entry, loop, found, continue_execution;
1841 // bswap rax so we can avoid bswapping the table entries
1842 __ bswapl(rax);
1843 // align r13
1844 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1845 // this instruction (change offsets
1846 // below)
1847 __ andptr(rbx, -BytesPerInt);
1848 // set counter
1849 __ movl(rcx, Address(rbx, BytesPerInt));
1850 __ bswapl(rcx);
1851 __ jmpb(loop_entry);
1852 // table search
1853 __ bind(loop);
1854 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1855 __ jcc(Assembler::equal, found);
1856 __ bind(loop_entry);
1857 __ decrementl(rcx);
1858 __ jcc(Assembler::greaterEqual, loop);
1859 // default case
1860 __ profile_switch_default(rax);
1861 __ movl(rdx, Address(rbx, 0));
1862 __ jmp(continue_execution);
1863 // entry found -> get offset
1864 __ bind(found);
1865 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1866 __ profile_switch_case(rcx, rax, rbx);
1867 // continue execution
1868 __ bind(continue_execution);
1869 __ bswapl(rdx);
1870 __ movl2ptr(rdx, rdx);
1871 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1872 __ addptr(r13, rdx);
1873 __ dispatch_only(vtos);
1874 }
1876 void TemplateTable::fast_binaryswitch() {
1877 transition(itos, vtos);
1878 // Implementation using the following core algorithm:
1879 //
1880 // int binary_search(int key, LookupswitchPair* array, int n) {
1881 // // Binary search according to "Methodik des Programmierens" by
1882 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1883 // int i = 0;
1884 // int j = n;
1885 // while (i+1 < j) {
1886 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1887 // // with Q: for all i: 0 <= i < n: key < a[i]
1888 // // where a stands for the array and assuming that the (inexisting)
1889 // // element a[n] is infinitely big.
1890 // int h = (i + j) >> 1;
1891 // // i < h < j
1892 // if (key < array[h].fast_match()) {
1893 // j = h;
1894 // } else {
1895 // i = h;
1896 // }
1897 // }
1898 // // R: a[i] <= key < a[i+1] or Q
1899 // // (i.e., if key is within array, i is the correct index)
1900 // return i;
1901 // }
1903 // Register allocation
1904 const Register key = rax; // already set (tosca)
1905 const Register array = rbx;
1906 const Register i = rcx;
1907 const Register j = rdx;
1908 const Register h = rdi;
1909 const Register temp = rsi;
1911 // Find array start
1912 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1913 // get rid of this
1914 // instruction (change
1915 // offsets below)
1916 __ andptr(array, -BytesPerInt);
1918 // Initialize i & j
1919 __ xorl(i, i); // i = 0;
1920 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1922 // Convert j into native byteordering
1923 __ bswapl(j);
1925 // And start
1926 Label entry;
1927 __ jmp(entry);
1929 // binary search loop
1930 {
1931 Label loop;
1932 __ bind(loop);
1933 // int h = (i + j) >> 1;
1934 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1935 __ sarl(h, 1); // h = (i + j) >> 1;
1936 // if (key < array[h].fast_match()) {
1937 // j = h;
1938 // } else {
1939 // i = h;
1940 // }
1941 // Convert array[h].match to native byte-ordering before compare
1942 __ movl(temp, Address(array, h, Address::times_8));
1943 __ bswapl(temp);
1944 __ cmpl(key, temp);
1945 // j = h if (key < array[h].fast_match())
1946 __ cmovl(Assembler::less, j, h);
1947 // i = h if (key >= array[h].fast_match())
1948 __ cmovl(Assembler::greaterEqual, i, h);
1949 // while (i+1 < j)
1950 __ bind(entry);
1951 __ leal(h, Address(i, 1)); // i+1
1952 __ cmpl(h, j); // i+1 < j
1953 __ jcc(Assembler::less, loop);
1954 }
1956 // end of binary search, result index is i (must check again!)
1957 Label default_case;
1958 // Convert array[i].match to native byte-ordering before compare
1959 __ movl(temp, Address(array, i, Address::times_8));
1960 __ bswapl(temp);
1961 __ cmpl(key, temp);
1962 __ jcc(Assembler::notEqual, default_case);
1964 // entry found -> j = offset
1965 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
1966 __ profile_switch_case(i, key, array);
1967 __ bswapl(j);
1968 __ movl2ptr(j, j);
1969 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
1970 __ addptr(r13, j);
1971 __ dispatch_only(vtos);
1973 // default case -> j = default offset
1974 __ bind(default_case);
1975 __ profile_switch_default(i);
1976 __ movl(j, Address(array, -2 * BytesPerInt));
1977 __ bswapl(j);
1978 __ movl2ptr(j, j);
1979 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
1980 __ addptr(r13, j);
1981 __ dispatch_only(vtos);
1982 }
1985 void TemplateTable::_return(TosState state) {
1986 transition(state, state);
1987 assert(_desc->calls_vm(),
1988 "inconsistent calls_vm information"); // call in remove_activation
1990 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1991 assert(state == vtos, "only valid state");
1992 __ movptr(c_rarg1, aaddress(0));
1993 __ load_klass(rdi, c_rarg1);
1994 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1995 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
1996 Label skip_register_finalizer;
1997 __ jcc(Assembler::zero, skip_register_finalizer);
1999 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2001 __ bind(skip_register_finalizer);
2002 }
2004 __ remove_activation(state, r13);
2005 __ jmp(r13);
2006 }
2008 // ----------------------------------------------------------------------------
2009 // Volatile variables demand their effects be made known to all CPU's
2010 // in order. Store buffers on most chips allow reads & writes to
2011 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2012 // without some kind of memory barrier (i.e., it's not sufficient that
2013 // the interpreter does not reorder volatile references, the hardware
2014 // also must not reorder them).
2015 //
2016 // According to the new Java Memory Model (JMM):
2017 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2018 // writes act as aquire & release, so:
2019 // (2) A read cannot let unrelated NON-volatile memory refs that
2020 // happen after the read float up to before the read. It's OK for
2021 // non-volatile memory refs that happen before the volatile read to
2022 // float down below it.
2023 // (3) Similar a volatile write cannot let unrelated NON-volatile
2024 // memory refs that happen BEFORE the write float down to after the
2025 // write. It's OK for non-volatile memory refs that happen after the
2026 // volatile write to float up before it.
2027 //
2028 // We only put in barriers around volatile refs (they are expensive),
2029 // not _between_ memory refs (that would require us to track the
2030 // flavor of the previous memory refs). Requirements (2) and (3)
2031 // require some barriers before volatile stores and after volatile
2032 // loads. These nearly cover requirement (1) but miss the
2033 // volatile-store-volatile-load case. This final case is placed after
2034 // volatile-stores although it could just as well go before
2035 // volatile-loads.
2036 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2037 order_constraint) {
2038 // Helper function to insert a is-volatile test and memory barrier
2039 if (os::is_MP()) { // Not needed on single CPU
2040 __ membar(order_constraint);
2041 }
2042 }
2044 void TemplateTable::resolve_cache_and_index(int byte_no,
2045 Register result,
2046 Register Rcache,
2047 Register index,
2048 size_t index_size) {
2049 const Register temp = rbx;
2050 assert_different_registers(result, Rcache, index, temp);
2052 Label resolved;
2053 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2054 if (byte_no == f1_oop) {
2055 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2056 // This kind of CP cache entry does not need to match the flags byte, because
2057 // there is a 1-1 relation between bytecode type and CP entry type.
2058 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2059 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2060 __ testptr(result, result);
2061 __ jcc(Assembler::notEqual, resolved);
2062 } else {
2063 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2064 assert(result == noreg, ""); //else change code for setting result
2065 const int shift_count = (1 + byte_no) * BitsPerByte;
2066 __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2067 __ shrl(temp, shift_count);
2068 // have we resolved this bytecode?
2069 __ andl(temp, 0xFF);
2070 __ cmpl(temp, (int) bytecode());
2071 __ jcc(Assembler::equal, resolved);
2072 }
2074 // resolve first time through
2075 address entry;
2076 switch (bytecode()) {
2077 case Bytecodes::_getstatic:
2078 case Bytecodes::_putstatic:
2079 case Bytecodes::_getfield:
2080 case Bytecodes::_putfield:
2081 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2082 break;
2083 case Bytecodes::_invokevirtual:
2084 case Bytecodes::_invokespecial:
2085 case Bytecodes::_invokestatic:
2086 case Bytecodes::_invokeinterface:
2087 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2088 break;
2089 case Bytecodes::_invokedynamic:
2090 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2091 break;
2092 case Bytecodes::_fast_aldc:
2093 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2094 break;
2095 case Bytecodes::_fast_aldc_w:
2096 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2097 break;
2098 default:
2099 ShouldNotReachHere();
2100 break;
2101 }
2102 __ movl(temp, (int) bytecode());
2103 __ call_VM(noreg, entry, temp);
2105 // Update registers with resolved info
2106 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2107 if (result != noreg)
2108 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2109 __ bind(resolved);
2110 }
2112 // The Rcache and index registers must be set before call
2113 void TemplateTable::load_field_cp_cache_entry(Register obj,
2114 Register cache,
2115 Register index,
2116 Register off,
2117 Register flags,
2118 bool is_static = false) {
2119 assert_different_registers(cache, index, flags, off);
2121 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2122 // Field offset
2123 __ movptr(off, Address(cache, index, Address::times_8,
2124 in_bytes(cp_base_offset +
2125 ConstantPoolCacheEntry::f2_offset())));
2126 // Flags
2127 __ movl(flags, Address(cache, index, Address::times_8,
2128 in_bytes(cp_base_offset +
2129 ConstantPoolCacheEntry::flags_offset())));
2131 // klass overwrite register
2132 if (is_static) {
2133 __ movptr(obj, Address(cache, index, Address::times_8,
2134 in_bytes(cp_base_offset +
2135 ConstantPoolCacheEntry::f1_offset())));
2136 }
2137 }
2139 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2140 Register method,
2141 Register itable_index,
2142 Register flags,
2143 bool is_invokevirtual,
2144 bool is_invokevfinal, /*unused*/
2145 bool is_invokedynamic) {
2146 // setup registers
2147 const Register cache = rcx;
2148 const Register index = rdx;
2149 assert_different_registers(method, flags);
2150 assert_different_registers(method, cache, index);
2151 assert_different_registers(itable_index, flags);
2152 assert_different_registers(itable_index, cache, index);
2153 // determine constant pool cache field offsets
2154 const int method_offset = in_bytes(
2155 constantPoolCacheOopDesc::base_offset() +
2156 (is_invokevirtual
2157 ? ConstantPoolCacheEntry::f2_offset()
2158 : ConstantPoolCacheEntry::f1_offset()));
2159 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2160 ConstantPoolCacheEntry::flags_offset());
2161 // access constant pool cache fields
2162 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2163 ConstantPoolCacheEntry::f2_offset());
2165 if (byte_no == f1_oop) {
2166 // Resolved f1_oop goes directly into 'method' register.
2167 assert(is_invokedynamic, "");
2168 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2169 } else {
2170 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2171 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2172 }
2173 if (itable_index != noreg) {
2174 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2175 }
2176 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2177 }
2180 // The registers cache and index expected to be set before call.
2181 // Correct values of the cache and index registers are preserved.
2182 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2183 bool is_static, bool has_tos) {
2184 // do the JVMTI work here to avoid disturbing the register state below
2185 // We use c_rarg registers here because we want to use the register used in
2186 // the call to the VM
2187 if (JvmtiExport::can_post_field_access()) {
2188 // Check to see if a field access watch has been set before we
2189 // take the time to call into the VM.
2190 Label L1;
2191 assert_different_registers(cache, index, rax);
2192 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2193 __ testl(rax, rax);
2194 __ jcc(Assembler::zero, L1);
2196 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2198 // cache entry pointer
2199 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
2200 __ shll(c_rarg3, LogBytesPerWord);
2201 __ addptr(c_rarg2, c_rarg3);
2202 if (is_static) {
2203 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2204 } else {
2205 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2206 __ verify_oop(c_rarg1);
2207 }
2208 // c_rarg1: object pointer or NULL
2209 // c_rarg2: cache entry pointer
2210 // c_rarg3: jvalue object on the stack
2211 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2212 InterpreterRuntime::post_field_access),
2213 c_rarg1, c_rarg2, c_rarg3);
2214 __ get_cache_and_index_at_bcp(cache, index, 1);
2215 __ bind(L1);
2216 }
2217 }
2219 void TemplateTable::pop_and_check_object(Register r) {
2220 __ pop_ptr(r);
2221 __ null_check(r); // for field access must check obj.
2222 __ verify_oop(r);
2223 }
2225 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2226 transition(vtos, vtos);
2228 const Register cache = rcx;
2229 const Register index = rdx;
2230 const Register obj = c_rarg3;
2231 const Register off = rbx;
2232 const Register flags = rax;
2233 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2235 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2236 jvmti_post_field_access(cache, index, is_static, false);
2237 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2239 if (!is_static) {
2240 // obj is on the stack
2241 pop_and_check_object(obj);
2242 }
2244 const Address field(obj, off, Address::times_1);
2246 Label Done, notByte, notInt, notShort, notChar,
2247 notLong, notFloat, notObj, notDouble;
2249 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2250 assert(btos == 0, "change code, btos != 0");
2252 __ andl(flags, 0x0F);
2253 __ jcc(Assembler::notZero, notByte);
2254 // btos
2255 __ load_signed_byte(rax, field);
2256 __ push(btos);
2257 // Rewrite bytecode to be faster
2258 if (!is_static) {
2259 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2260 }
2261 __ jmp(Done);
2263 __ bind(notByte);
2264 __ cmpl(flags, atos);
2265 __ jcc(Assembler::notEqual, notObj);
2266 // atos
2267 __ load_heap_oop(rax, field);
2268 __ push(atos);
2269 if (!is_static) {
2270 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2271 }
2272 __ jmp(Done);
2274 __ bind(notObj);
2275 __ cmpl(flags, itos);
2276 __ jcc(Assembler::notEqual, notInt);
2277 // itos
2278 __ movl(rax, field);
2279 __ push(itos);
2280 // Rewrite bytecode to be faster
2281 if (!is_static) {
2282 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2283 }
2284 __ jmp(Done);
2286 __ bind(notInt);
2287 __ cmpl(flags, ctos);
2288 __ jcc(Assembler::notEqual, notChar);
2289 // ctos
2290 __ load_unsigned_short(rax, field);
2291 __ push(ctos);
2292 // Rewrite bytecode to be faster
2293 if (!is_static) {
2294 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2295 }
2296 __ jmp(Done);
2298 __ bind(notChar);
2299 __ cmpl(flags, stos);
2300 __ jcc(Assembler::notEqual, notShort);
2301 // stos
2302 __ load_signed_short(rax, field);
2303 __ push(stos);
2304 // Rewrite bytecode to be faster
2305 if (!is_static) {
2306 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2307 }
2308 __ jmp(Done);
2310 __ bind(notShort);
2311 __ cmpl(flags, ltos);
2312 __ jcc(Assembler::notEqual, notLong);
2313 // ltos
2314 __ movq(rax, field);
2315 __ push(ltos);
2316 // Rewrite bytecode to be faster
2317 if (!is_static) {
2318 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2319 }
2320 __ jmp(Done);
2322 __ bind(notLong);
2323 __ cmpl(flags, ftos);
2324 __ jcc(Assembler::notEqual, notFloat);
2325 // ftos
2326 __ movflt(xmm0, field);
2327 __ push(ftos);
2328 // Rewrite bytecode to be faster
2329 if (!is_static) {
2330 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2331 }
2332 __ jmp(Done);
2334 __ bind(notFloat);
2335 #ifdef ASSERT
2336 __ cmpl(flags, dtos);
2337 __ jcc(Assembler::notEqual, notDouble);
2338 #endif
2339 // dtos
2340 __ movdbl(xmm0, field);
2341 __ push(dtos);
2342 // Rewrite bytecode to be faster
2343 if (!is_static) {
2344 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2345 }
2346 #ifdef ASSERT
2347 __ jmp(Done);
2349 __ bind(notDouble);
2350 __ stop("Bad state");
2351 #endif
2353 __ bind(Done);
2354 // [jk] not needed currently
2355 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2356 // Assembler::LoadStore));
2357 }
2360 void TemplateTable::getfield(int byte_no) {
2361 getfield_or_static(byte_no, false);
2362 }
2364 void TemplateTable::getstatic(int byte_no) {
2365 getfield_or_static(byte_no, true);
2366 }
2368 // The registers cache and index expected to be set before call.
2369 // The function may destroy various registers, just not the cache and index registers.
2370 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2371 transition(vtos, vtos);
2373 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2375 if (JvmtiExport::can_post_field_modification()) {
2376 // Check to see if a field modification watch has been set before
2377 // we take the time to call into the VM.
2378 Label L1;
2379 assert_different_registers(cache, index, rax);
2380 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2381 __ testl(rax, rax);
2382 __ jcc(Assembler::zero, L1);
2384 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2386 if (is_static) {
2387 // Life is simple. Null out the object pointer.
2388 __ xorl(c_rarg1, c_rarg1);
2389 } else {
2390 // Life is harder. The stack holds the value on top, followed by
2391 // the object. We don't know the size of the value, though; it
2392 // could be one or two words depending on its type. As a result,
2393 // we must find the type to determine where the object is.
2394 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2395 Address::times_8,
2396 in_bytes(cp_base_offset +
2397 ConstantPoolCacheEntry::flags_offset())));
2398 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits);
2399 // Make sure we don't need to mask rcx for tosBits after the
2400 // above shift
2401 ConstantPoolCacheEntry::verify_tosBits();
2402 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2403 __ cmpl(c_rarg3, ltos);
2404 __ cmovptr(Assembler::equal,
2405 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2406 __ cmpl(c_rarg3, dtos);
2407 __ cmovptr(Assembler::equal,
2408 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2409 }
2410 // cache entry pointer
2411 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2412 __ shll(rscratch1, LogBytesPerWord);
2413 __ addptr(c_rarg2, rscratch1);
2414 // object (tos)
2415 __ mov(c_rarg3, rsp);
2416 // c_rarg1: object pointer set up above (NULL if static)
2417 // c_rarg2: cache entry pointer
2418 // c_rarg3: jvalue object on the stack
2419 __ call_VM(noreg,
2420 CAST_FROM_FN_PTR(address,
2421 InterpreterRuntime::post_field_modification),
2422 c_rarg1, c_rarg2, c_rarg3);
2423 __ get_cache_and_index_at_bcp(cache, index, 1);
2424 __ bind(L1);
2425 }
2426 }
2428 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2429 transition(vtos, vtos);
2431 const Register cache = rcx;
2432 const Register index = rdx;
2433 const Register obj = rcx;
2434 const Register off = rbx;
2435 const Register flags = rax;
2436 const Register bc = c_rarg3;
2438 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2439 jvmti_post_field_mod(cache, index, is_static);
2440 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2442 // [jk] not needed currently
2443 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2444 // Assembler::StoreStore));
2446 Label notVolatile, Done;
2447 __ movl(rdx, flags);
2448 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2449 __ andl(rdx, 0x1);
2451 // field address
2452 const Address field(obj, off, Address::times_1);
2454 Label notByte, notInt, notShort, notChar,
2455 notLong, notFloat, notObj, notDouble;
2457 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2459 assert(btos == 0, "change code, btos != 0");
2460 __ andl(flags, 0x0f);
2461 __ jcc(Assembler::notZero, notByte);
2462 // btos
2463 __ pop(btos);
2464 if (!is_static) pop_and_check_object(obj);
2465 __ movb(field, rax);
2466 if (!is_static) {
2467 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx);
2468 }
2469 __ jmp(Done);
2471 __ bind(notByte);
2472 __ cmpl(flags, atos);
2473 __ jcc(Assembler::notEqual, notObj);
2474 // atos
2475 __ pop(atos);
2476 if (!is_static) pop_and_check_object(obj);
2478 // Store into the field
2479 do_oop_store(_masm, field, rax, _bs->kind(), false);
2481 if (!is_static) {
2482 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
2483 }
2484 __ jmp(Done);
2486 __ bind(notObj);
2487 __ cmpl(flags, itos);
2488 __ jcc(Assembler::notEqual, notInt);
2489 // itos
2490 __ pop(itos);
2491 if (!is_static) pop_and_check_object(obj);
2492 __ movl(field, rax);
2493 if (!is_static) {
2494 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx);
2495 }
2496 __ jmp(Done);
2498 __ bind(notInt);
2499 __ cmpl(flags, ctos);
2500 __ jcc(Assembler::notEqual, notChar);
2501 // ctos
2502 __ pop(ctos);
2503 if (!is_static) pop_and_check_object(obj);
2504 __ movw(field, rax);
2505 if (!is_static) {
2506 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx);
2507 }
2508 __ jmp(Done);
2510 __ bind(notChar);
2511 __ cmpl(flags, stos);
2512 __ jcc(Assembler::notEqual, notShort);
2513 // stos
2514 __ pop(stos);
2515 if (!is_static) pop_and_check_object(obj);
2516 __ movw(field, rax);
2517 if (!is_static) {
2518 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx);
2519 }
2520 __ jmp(Done);
2522 __ bind(notShort);
2523 __ cmpl(flags, ltos);
2524 __ jcc(Assembler::notEqual, notLong);
2525 // ltos
2526 __ pop(ltos);
2527 if (!is_static) pop_and_check_object(obj);
2528 __ movq(field, rax);
2529 if (!is_static) {
2530 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx);
2531 }
2532 __ jmp(Done);
2534 __ bind(notLong);
2535 __ cmpl(flags, ftos);
2536 __ jcc(Assembler::notEqual, notFloat);
2537 // ftos
2538 __ pop(ftos);
2539 if (!is_static) pop_and_check_object(obj);
2540 __ movflt(field, xmm0);
2541 if (!is_static) {
2542 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx);
2543 }
2544 __ jmp(Done);
2546 __ bind(notFloat);
2547 #ifdef ASSERT
2548 __ cmpl(flags, dtos);
2549 __ jcc(Assembler::notEqual, notDouble);
2550 #endif
2551 // dtos
2552 __ pop(dtos);
2553 if (!is_static) pop_and_check_object(obj);
2554 __ movdbl(field, xmm0);
2555 if (!is_static) {
2556 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx);
2557 }
2559 #ifdef ASSERT
2560 __ jmp(Done);
2562 __ bind(notDouble);
2563 __ stop("Bad state");
2564 #endif
2566 __ bind(Done);
2567 // Check for volatile store
2568 __ testl(rdx, rdx);
2569 __ jcc(Assembler::zero, notVolatile);
2570 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2571 Assembler::StoreStore));
2573 __ bind(notVolatile);
2574 }
2576 void TemplateTable::putfield(int byte_no) {
2577 putfield_or_static(byte_no, false);
2578 }
2580 void TemplateTable::putstatic(int byte_no) {
2581 putfield_or_static(byte_no, true);
2582 }
2584 void TemplateTable::jvmti_post_fast_field_mod() {
2585 if (JvmtiExport::can_post_field_modification()) {
2586 // Check to see if a field modification watch has been set before
2587 // we take the time to call into the VM.
2588 Label L2;
2589 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2590 __ testl(c_rarg3, c_rarg3);
2591 __ jcc(Assembler::zero, L2);
2592 __ pop_ptr(rbx); // copy the object pointer from tos
2593 __ verify_oop(rbx);
2594 __ push_ptr(rbx); // put the object pointer back on tos
2595 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2596 __ mov(c_rarg3, rsp);
2597 const Address field(c_rarg3, 0);
2599 switch (bytecode()) { // load values into the jvalue object
2600 case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
2601 case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
2602 case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
2603 case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
2604 case Bytecodes::_fast_sputfield: // fall through
2605 case Bytecodes::_fast_cputfield: __ movw(field, rax); break;
2606 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break;
2607 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break;
2608 default:
2609 ShouldNotReachHere();
2610 }
2612 // Save rax because call_VM() will clobber it, then use it for
2613 // JVMTI purposes
2614 __ push(rax);
2615 // access constant pool cache entry
2616 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2617 __ verify_oop(rbx);
2618 // rbx: object pointer copied above
2619 // c_rarg2: cache entry pointer
2620 // c_rarg3: jvalue object on the stack
2621 __ call_VM(noreg,
2622 CAST_FROM_FN_PTR(address,
2623 InterpreterRuntime::post_field_modification),
2624 rbx, c_rarg2, c_rarg3);
2625 __ pop(rax); // restore lower value
2626 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2627 __ bind(L2);
2628 }
2629 }
2631 void TemplateTable::fast_storefield(TosState state) {
2632 transition(state, vtos);
2634 ByteSize base = constantPoolCacheOopDesc::base_offset();
2636 jvmti_post_fast_field_mod();
2638 // access constant pool cache
2639 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2641 // test for volatile with rdx
2642 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2643 in_bytes(base +
2644 ConstantPoolCacheEntry::flags_offset())));
2646 // replace index with field offset from cache entry
2647 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2648 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2650 // [jk] not needed currently
2651 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2652 // Assembler::StoreStore));
2654 Label notVolatile;
2655 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2656 __ andl(rdx, 0x1);
2658 // Get object from stack
2659 pop_and_check_object(rcx);
2661 // field address
2662 const Address field(rcx, rbx, Address::times_1);
2664 // access field
2665 switch (bytecode()) {
2666 case Bytecodes::_fast_aputfield:
2667 do_oop_store(_masm, field, rax, _bs->kind(), false);
2668 break;
2669 case Bytecodes::_fast_lputfield:
2670 __ movq(field, rax);
2671 break;
2672 case Bytecodes::_fast_iputfield:
2673 __ movl(field, rax);
2674 break;
2675 case Bytecodes::_fast_bputfield:
2676 __ movb(field, rax);
2677 break;
2678 case Bytecodes::_fast_sputfield:
2679 // fall through
2680 case Bytecodes::_fast_cputfield:
2681 __ movw(field, rax);
2682 break;
2683 case Bytecodes::_fast_fputfield:
2684 __ movflt(field, xmm0);
2685 break;
2686 case Bytecodes::_fast_dputfield:
2687 __ movdbl(field, xmm0);
2688 break;
2689 default:
2690 ShouldNotReachHere();
2691 }
2693 // Check for volatile store
2694 __ testl(rdx, rdx);
2695 __ jcc(Assembler::zero, notVolatile);
2696 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2697 Assembler::StoreStore));
2698 __ bind(notVolatile);
2699 }
2702 void TemplateTable::fast_accessfield(TosState state) {
2703 transition(atos, state);
2705 // Do the JVMTI work here to avoid disturbing the register state below
2706 if (JvmtiExport::can_post_field_access()) {
2707 // Check to see if a field access watch has been set before we
2708 // take the time to call into the VM.
2709 Label L1;
2710 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2711 __ testl(rcx, rcx);
2712 __ jcc(Assembler::zero, L1);
2713 // access constant pool cache entry
2714 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2715 __ verify_oop(rax);
2716 __ mov(r12, rax); // save object pointer before call_VM() clobbers it
2717 __ mov(c_rarg1, rax);
2718 // c_rarg1: object pointer copied above
2719 // c_rarg2: cache entry pointer
2720 __ call_VM(noreg,
2721 CAST_FROM_FN_PTR(address,
2722 InterpreterRuntime::post_field_access),
2723 c_rarg1, c_rarg2);
2724 __ mov(rax, r12); // restore object pointer
2725 __ reinit_heapbase();
2726 __ bind(L1);
2727 }
2729 // access constant pool cache
2730 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2731 // replace index with field offset from cache entry
2732 // [jk] not needed currently
2733 // if (os::is_MP()) {
2734 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2735 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2736 // ConstantPoolCacheEntry::flags_offset())));
2737 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2738 // __ andl(rdx, 0x1);
2739 // }
2740 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2741 in_bytes(constantPoolCacheOopDesc::base_offset() +
2742 ConstantPoolCacheEntry::f2_offset())));
2744 // rax: object
2745 __ verify_oop(rax);
2746 __ null_check(rax);
2747 Address field(rax, rbx, Address::times_1);
2749 // access field
2750 switch (bytecode()) {
2751 case Bytecodes::_fast_agetfield:
2752 __ load_heap_oop(rax, field);
2753 __ verify_oop(rax);
2754 break;
2755 case Bytecodes::_fast_lgetfield:
2756 __ movq(rax, field);
2757 break;
2758 case Bytecodes::_fast_igetfield:
2759 __ movl(rax, field);
2760 break;
2761 case Bytecodes::_fast_bgetfield:
2762 __ movsbl(rax, field);
2763 break;
2764 case Bytecodes::_fast_sgetfield:
2765 __ load_signed_short(rax, field);
2766 break;
2767 case Bytecodes::_fast_cgetfield:
2768 __ load_unsigned_short(rax, field);
2769 break;
2770 case Bytecodes::_fast_fgetfield:
2771 __ movflt(xmm0, field);
2772 break;
2773 case Bytecodes::_fast_dgetfield:
2774 __ movdbl(xmm0, field);
2775 break;
2776 default:
2777 ShouldNotReachHere();
2778 }
2779 // [jk] not needed currently
2780 // if (os::is_MP()) {
2781 // Label notVolatile;
2782 // __ testl(rdx, rdx);
2783 // __ jcc(Assembler::zero, notVolatile);
2784 // __ membar(Assembler::LoadLoad);
2785 // __ bind(notVolatile);
2786 //};
2787 }
2789 void TemplateTable::fast_xaccess(TosState state) {
2790 transition(vtos, state);
2792 // get receiver
2793 __ movptr(rax, aaddress(0));
2794 // access constant pool cache
2795 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2796 __ movptr(rbx,
2797 Address(rcx, rdx, Address::times_8,
2798 in_bytes(constantPoolCacheOopDesc::base_offset() +
2799 ConstantPoolCacheEntry::f2_offset())));
2800 // make sure exception is reported in correct bcp range (getfield is
2801 // next instruction)
2802 __ increment(r13);
2803 __ null_check(rax);
2804 switch (state) {
2805 case itos:
2806 __ movl(rax, Address(rax, rbx, Address::times_1));
2807 break;
2808 case atos:
2809 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2810 __ verify_oop(rax);
2811 break;
2812 case ftos:
2813 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2814 break;
2815 default:
2816 ShouldNotReachHere();
2817 }
2819 // [jk] not needed currently
2820 // if (os::is_MP()) {
2821 // Label notVolatile;
2822 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2823 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2824 // ConstantPoolCacheEntry::flags_offset())));
2825 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2826 // __ testl(rdx, 0x1);
2827 // __ jcc(Assembler::zero, notVolatile);
2828 // __ membar(Assembler::LoadLoad);
2829 // __ bind(notVolatile);
2830 // }
2832 __ decrement(r13);
2833 }
2837 //-----------------------------------------------------------------------------
2838 // Calls
2840 void TemplateTable::count_calls(Register method, Register temp) {
2841 // implemented elsewhere
2842 ShouldNotReachHere();
2843 }
2845 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2846 // determine flags
2847 Bytecodes::Code code = bytecode();
2848 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2849 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2850 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2851 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2852 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2853 const bool receiver_null_check = is_invokespecial;
2854 const bool save_flags = is_invokeinterface || is_invokevirtual;
2855 // setup registers & access constant pool cache
2856 const Register recv = rcx;
2857 const Register flags = rdx;
2858 assert_different_registers(method, index, recv, flags);
2860 // save 'interpreter return address'
2861 __ save_bcp();
2863 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2865 // load receiver if needed (note: no return address pushed yet)
2866 if (load_receiver) {
2867 assert(!is_invokedynamic, "");
2868 __ movl(recv, flags);
2869 __ andl(recv, 0xFF);
2870 Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
2871 __ movptr(recv, recv_addr);
2872 __ verify_oop(recv);
2873 }
2875 // do null check if needed
2876 if (receiver_null_check) {
2877 __ null_check(recv);
2878 }
2880 if (save_flags) {
2881 __ movl(r13, flags);
2882 }
2884 // compute return type
2885 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2886 // Make sure we don't need to mask flags for tosBits after the above shift
2887 ConstantPoolCacheEntry::verify_tosBits();
2888 // load return address
2889 {
2890 address table_addr;
2891 if (is_invokeinterface || is_invokedynamic)
2892 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2893 else
2894 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2895 ExternalAddress table(table_addr);
2896 __ lea(rscratch1, table);
2897 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2898 }
2900 // push return address
2901 __ push(flags);
2903 // Restore flag field from the constant pool cache, and restore esi
2904 // for later null checks. r13 is the bytecode pointer
2905 if (save_flags) {
2906 __ movl(flags, r13);
2907 __ restore_bcp();
2908 }
2909 }
2912 void TemplateTable::invokevirtual_helper(Register index,
2913 Register recv,
2914 Register flags) {
2915 // Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx);
2917 // Test for an invoke of a final method
2918 Label notFinal;
2919 __ movl(rax, flags);
2920 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2921 __ jcc(Assembler::zero, notFinal);
2923 const Register method = index; // method must be rbx
2924 assert(method == rbx,
2925 "methodOop must be rbx for interpreter calling convention");
2927 // do the call - the index is actually the method to call
2928 __ verify_oop(method);
2930 // It's final, need a null check here!
2931 __ null_check(recv);
2933 // profile this call
2934 __ profile_final_call(rax);
2936 __ jump_from_interpreted(method, rax);
2938 __ bind(notFinal);
2940 // get receiver klass
2941 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2942 __ load_klass(rax, recv);
2944 __ verify_oop(rax);
2946 // profile this call
2947 __ profile_virtual_call(rax, r14, rdx);
2949 // get target methodOop & entry point
2950 const int base = instanceKlass::vtable_start_offset() * wordSize;
2951 assert(vtableEntry::size() * wordSize == 8,
2952 "adjust the scaling in the code below");
2953 __ movptr(method, Address(rax, index,
2954 Address::times_8,
2955 base + vtableEntry::method_offset_in_bytes()));
2956 __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
2957 __ jump_from_interpreted(method, rdx);
2958 }
2961 void TemplateTable::invokevirtual(int byte_no) {
2962 transition(vtos, vtos);
2963 assert(byte_no == f2_byte, "use this argument");
2964 prepare_invoke(rbx, noreg, byte_no);
2966 // rbx: index
2967 // rcx: receiver
2968 // rdx: flags
2970 invokevirtual_helper(rbx, rcx, rdx);
2971 }
2974 void TemplateTable::invokespecial(int byte_no) {
2975 transition(vtos, vtos);
2976 assert(byte_no == f1_byte, "use this argument");
2977 prepare_invoke(rbx, noreg, byte_no);
2978 // do the call
2979 __ verify_oop(rbx);
2980 __ profile_call(rax);
2981 __ jump_from_interpreted(rbx, rax);
2982 }
2985 void TemplateTable::invokestatic(int byte_no) {
2986 transition(vtos, vtos);
2987 assert(byte_no == f1_byte, "use this argument");
2988 prepare_invoke(rbx, noreg, byte_no);
2989 // do the call
2990 __ verify_oop(rbx);
2991 __ profile_call(rax);
2992 __ jump_from_interpreted(rbx, rax);
2993 }
2995 void TemplateTable::fast_invokevfinal(int byte_no) {
2996 transition(vtos, vtos);
2997 assert(byte_no == f2_byte, "use this argument");
2998 __ stop("fast_invokevfinal not used on amd64");
2999 }
3001 void TemplateTable::invokeinterface(int byte_no) {
3002 transition(vtos, vtos);
3003 assert(byte_no == f1_byte, "use this argument");
3004 prepare_invoke(rax, rbx, byte_no);
3006 // rax: Interface
3007 // rbx: index
3008 // rcx: receiver
3009 // rdx: flags
3011 // Special case of invokeinterface called for virtual method of
3012 // java.lang.Object. See cpCacheOop.cpp for details.
3013 // This code isn't produced by javac, but could be produced by
3014 // another compliant java compiler.
3015 Label notMethod;
3016 __ movl(r14, rdx);
3017 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface));
3018 __ jcc(Assembler::zero, notMethod);
3020 invokevirtual_helper(rbx, rcx, rdx);
3021 __ bind(notMethod);
3023 // Get receiver klass into rdx - also a null check
3024 __ restore_locals(); // restore r14
3025 __ load_klass(rdx, rcx);
3026 __ verify_oop(rdx);
3028 // profile this call
3029 __ profile_virtual_call(rdx, r13, r14);
3031 Label no_such_interface, no_such_method;
3033 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3034 rdx, rax, rbx,
3035 // outputs: method, scan temp. reg
3036 rbx, r13,
3037 no_such_interface);
3039 // rbx,: methodOop to call
3040 // rcx: receiver
3041 // Check for abstract method error
3042 // Note: This should be done more efficiently via a throw_abstract_method_error
3043 // interpreter entry point and a conditional jump to it in case of a null
3044 // method.
3045 __ testptr(rbx, rbx);
3046 __ jcc(Assembler::zero, no_such_method);
3048 // do the call
3049 // rcx: receiver
3050 // rbx,: methodOop
3051 __ jump_from_interpreted(rbx, rdx);
3052 __ should_not_reach_here();
3054 // exception handling code follows...
3055 // note: must restore interpreter registers to canonical
3056 // state for exception handling to work correctly!
3058 __ bind(no_such_method);
3059 // throw exception
3060 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3061 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3062 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3063 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3064 // the call_VM checks for exception, so we should never return here.
3065 __ should_not_reach_here();
3067 __ bind(no_such_interface);
3068 // throw exception
3069 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3070 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3071 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3072 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3073 InterpreterRuntime::throw_IncompatibleClassChangeError));
3074 // the call_VM checks for exception, so we should never return here.
3075 __ should_not_reach_here();
3076 return;
3077 }
3079 void TemplateTable::invokedynamic(int byte_no) {
3080 transition(vtos, vtos);
3081 assert(byte_no == f1_oop, "use this argument");
3083 if (!EnableInvokeDynamic) {
3084 // We should not encounter this bytecode if !EnableInvokeDynamic.
3085 // The verifier will stop it. However, if we get past the verifier,
3086 // this will stop the thread in a reasonable way, without crashing the JVM.
3087 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3088 InterpreterRuntime::throw_IncompatibleClassChangeError));
3089 // the call_VM checks for exception, so we should never return here.
3090 __ should_not_reach_here();
3091 return;
3092 }
3094 assert(byte_no == f1_oop, "use this argument");
3095 prepare_invoke(rax, rbx, byte_no);
3097 // rax: CallSite object (f1)
3098 // rbx: unused (f2)
3099 // rcx: receiver address
3100 // rdx: flags (unused)
3102 if (ProfileInterpreter) {
3103 Label L;
3104 // %%% should make a type profile for any invokedynamic that takes a ref argument
3105 // profile this call
3106 __ profile_call(r13);
3107 }
3109 __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
3110 __ null_check(rcx);
3111 __ prepare_to_jump_from_interpreted();
3112 __ jump_to_method_handle_entry(rcx, rdx);
3113 }
3116 //-----------------------------------------------------------------------------
3117 // Allocation
3119 void TemplateTable::_new() {
3120 transition(vtos, atos);
3121 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3122 Label slow_case;
3123 Label done;
3124 Label initialize_header;
3125 Label initialize_object; // including clearing the fields
3126 Label allocate_shared;
3128 __ get_cpool_and_tags(rsi, rax);
3129 // get instanceKlass
3130 __ movptr(rsi, Address(rsi, rdx,
3131 Address::times_8, sizeof(constantPoolOopDesc)));
3133 // make sure the class we're about to instantiate has been
3134 // resolved. Note: slow_case does a pop of stack, which is why we
3135 // loaded class/pushed above
3136 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3137 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3138 JVM_CONSTANT_Class);
3139 __ jcc(Assembler::notEqual, slow_case);
3141 // make sure klass is initialized & doesn't have finalizer
3142 // make sure klass is fully initialized
3143 __ cmpl(Address(rsi,
3144 instanceKlass::init_state_offset_in_bytes() +
3145 sizeof(oopDesc)),
3146 instanceKlass::fully_initialized);
3147 __ jcc(Assembler::notEqual, slow_case);
3149 // get instance_size in instanceKlass (scaled to a count of bytes)
3150 __ movl(rdx,
3151 Address(rsi,
3152 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3153 // test to see if it has a finalizer or is malformed in some way
3154 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3155 __ jcc(Assembler::notZero, slow_case);
3157 // Allocate the instance
3158 // 1) Try to allocate in the TLAB
3159 // 2) if fail and the object is large allocate in the shared Eden
3160 // 3) if the above fails (or is not applicable), go to a slow case
3161 // (creates a new TLAB, etc.)
3163 const bool allow_shared_alloc =
3164 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3166 if (UseTLAB) {
3167 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3168 __ lea(rbx, Address(rax, rdx, Address::times_1));
3169 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3170 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3171 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3172 if (ZeroTLAB) {
3173 // the fields have been already cleared
3174 __ jmp(initialize_header);
3175 } else {
3176 // initialize both the header and fields
3177 __ jmp(initialize_object);
3178 }
3179 }
3181 // Allocation in the shared Eden, if allowed.
3182 //
3183 // rdx: instance size in bytes
3184 if (allow_shared_alloc) {
3185 __ bind(allocate_shared);
3187 ExternalAddress top((address)Universe::heap()->top_addr());
3188 ExternalAddress end((address)Universe::heap()->end_addr());
3190 const Register RtopAddr = rscratch1;
3191 const Register RendAddr = rscratch2;
3193 __ lea(RtopAddr, top);
3194 __ lea(RendAddr, end);
3195 __ movptr(rax, Address(RtopAddr, 0));
3197 // For retries rax gets set by cmpxchgq
3198 Label retry;
3199 __ bind(retry);
3200 __ lea(rbx, Address(rax, rdx, Address::times_1));
3201 __ cmpptr(rbx, Address(RendAddr, 0));
3202 __ jcc(Assembler::above, slow_case);
3204 // Compare rax with the top addr, and if still equal, store the new
3205 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3206 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3207 //
3208 // rax: object begin
3209 // rbx: object end
3210 // rdx: instance size in bytes
3211 if (os::is_MP()) {
3212 __ lock();
3213 }
3214 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3216 // if someone beat us on the allocation, try again, otherwise continue
3217 __ jcc(Assembler::notEqual, retry);
3218 }
3220 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3221 // The object is initialized before the header. If the object size is
3222 // zero, go directly to the header initialization.
3223 __ bind(initialize_object);
3224 __ decrementl(rdx, sizeof(oopDesc));
3225 __ jcc(Assembler::zero, initialize_header);
3227 // Initialize object fields
3228 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3229 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3230 {
3231 Label loop;
3232 __ bind(loop);
3233 __ movq(Address(rax, rdx, Address::times_8,
3234 sizeof(oopDesc) - oopSize),
3235 rcx);
3236 __ decrementl(rdx);
3237 __ jcc(Assembler::notZero, loop);
3238 }
3240 // initialize object header only.
3241 __ bind(initialize_header);
3242 if (UseBiasedLocking) {
3243 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3244 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3245 } else {
3246 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3247 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3248 }
3249 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3250 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3251 __ store_klass(rax, rsi); // store klass last
3253 {
3254 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3255 // Trigger dtrace event for fastpath
3256 __ push(atos); // save the return value
3257 __ call_VM_leaf(
3258 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3259 __ pop(atos); // restore the return value
3261 }
3262 __ jmp(done);
3263 }
3266 // slow case
3267 __ bind(slow_case);
3268 __ get_constant_pool(c_rarg1);
3269 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3270 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3271 __ verify_oop(rax);
3273 // continue
3274 __ bind(done);
3275 }
3277 void TemplateTable::newarray() {
3278 transition(itos, atos);
3279 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3280 __ movl(c_rarg2, rax);
3281 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3282 c_rarg1, c_rarg2);
3283 }
3285 void TemplateTable::anewarray() {
3286 transition(itos, atos);
3287 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3288 __ get_constant_pool(c_rarg1);
3289 __ movl(c_rarg3, rax);
3290 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3291 c_rarg1, c_rarg2, c_rarg3);
3292 }
3294 void TemplateTable::arraylength() {
3295 transition(atos, itos);
3296 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3297 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3298 }
3300 void TemplateTable::checkcast() {
3301 transition(atos, atos);
3302 Label done, is_null, ok_is_subtype, quicked, resolved;
3303 __ testptr(rax, rax); // object is in rax
3304 __ jcc(Assembler::zero, is_null);
3306 // Get cpool & tags index
3307 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3308 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3309 // See if bytecode has already been quicked
3310 __ cmpb(Address(rdx, rbx,
3311 Address::times_1,
3312 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3313 JVM_CONSTANT_Class);
3314 __ jcc(Assembler::equal, quicked);
3315 __ push(atos); // save receiver for result, and for GC
3316 __ mov(r12, rcx); // save rcx XXX
3317 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3318 __ movq(rcx, r12); // restore rcx XXX
3319 __ reinit_heapbase();
3320 __ pop_ptr(rdx); // restore receiver
3321 __ jmpb(resolved);
3323 // Get superklass in rax and subklass in rbx
3324 __ bind(quicked);
3325 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3326 __ movptr(rax, Address(rcx, rbx,
3327 Address::times_8, sizeof(constantPoolOopDesc)));
3329 __ bind(resolved);
3330 __ load_klass(rbx, rdx);
3332 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3333 // Superklass in rax. Subklass in rbx.
3334 __ gen_subtype_check(rbx, ok_is_subtype);
3336 // Come here on failure
3337 __ push_ptr(rdx);
3338 // object is at TOS
3339 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3341 // Come here on success
3342 __ bind(ok_is_subtype);
3343 __ mov(rax, rdx); // Restore object in rdx
3345 // Collect counts on whether this check-cast sees NULLs a lot or not.
3346 if (ProfileInterpreter) {
3347 __ jmp(done);
3348 __ bind(is_null);
3349 __ profile_null_seen(rcx);
3350 } else {
3351 __ bind(is_null); // same as 'done'
3352 }
3353 __ bind(done);
3354 }
3356 void TemplateTable::instanceof() {
3357 transition(atos, itos);
3358 Label done, is_null, ok_is_subtype, quicked, resolved;
3359 __ testptr(rax, rax);
3360 __ jcc(Assembler::zero, is_null);
3362 // Get cpool & tags index
3363 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3364 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3365 // See if bytecode has already been quicked
3366 __ cmpb(Address(rdx, rbx,
3367 Address::times_1,
3368 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3369 JVM_CONSTANT_Class);
3370 __ jcc(Assembler::equal, quicked);
3372 __ push(atos); // save receiver for result, and for GC
3373 __ mov(r12, rcx); // save rcx
3374 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3375 __ movq(rcx, r12); // restore rcx
3376 __ reinit_heapbase();
3377 __ pop_ptr(rdx); // restore receiver
3378 __ load_klass(rdx, rdx);
3379 __ jmpb(resolved);
3381 // Get superklass in rax and subklass in rdx
3382 __ bind(quicked);
3383 __ load_klass(rdx, rax);
3384 __ movptr(rax, Address(rcx, rbx,
3385 Address::times_8, sizeof(constantPoolOopDesc)));
3387 __ bind(resolved);
3389 // Generate subtype check. Blows rcx, rdi
3390 // Superklass in rax. Subklass in rdx.
3391 __ gen_subtype_check(rdx, ok_is_subtype);
3393 // Come here on failure
3394 __ xorl(rax, rax);
3395 __ jmpb(done);
3396 // Come here on success
3397 __ bind(ok_is_subtype);
3398 __ movl(rax, 1);
3400 // Collect counts on whether this test sees NULLs a lot or not.
3401 if (ProfileInterpreter) {
3402 __ jmp(done);
3403 __ bind(is_null);
3404 __ profile_null_seen(rcx);
3405 } else {
3406 __ bind(is_null); // same as 'done'
3407 }
3408 __ bind(done);
3409 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3410 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3411 }
3413 //-----------------------------------------------------------------------------
3414 // Breakpoints
3415 void TemplateTable::_breakpoint() {
3416 // Note: We get here even if we are single stepping..
3417 // jbug inists on setting breakpoints at every bytecode
3418 // even if we are in single step mode.
3420 transition(vtos, vtos);
3422 // get the unpatched byte code
3423 __ get_method(c_rarg1);
3424 __ call_VM(noreg,
3425 CAST_FROM_FN_PTR(address,
3426 InterpreterRuntime::get_original_bytecode_at),
3427 c_rarg1, r13);
3428 __ mov(rbx, rax);
3430 // post the breakpoint event
3431 __ get_method(c_rarg1);
3432 __ call_VM(noreg,
3433 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3434 c_rarg1, r13);
3436 // complete the execution of original bytecode
3437 __ dispatch_only_normal(vtos);
3438 }
3440 //-----------------------------------------------------------------------------
3441 // Exceptions
3443 void TemplateTable::athrow() {
3444 transition(atos, vtos);
3445 __ null_check(rax);
3446 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3447 }
3449 //-----------------------------------------------------------------------------
3450 // Synchronization
3451 //
3452 // Note: monitorenter & exit are symmetric routines; which is reflected
3453 // in the assembly code structure as well
3454 //
3455 // Stack layout:
3456 //
3457 // [expressions ] <--- rsp = expression stack top
3458 // ..
3459 // [expressions ]
3460 // [monitor entry] <--- monitor block top = expression stack bot
3461 // ..
3462 // [monitor entry]
3463 // [frame data ] <--- monitor block bot
3464 // ...
3465 // [saved rbp ] <--- rbp
3466 void TemplateTable::monitorenter() {
3467 transition(atos, vtos);
3469 // check for NULL object
3470 __ null_check(rax);
3472 const Address monitor_block_top(
3473 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3474 const Address monitor_block_bot(
3475 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3476 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3478 Label allocated;
3480 // initialize entry pointer
3481 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3483 // find a free slot in the monitor block (result in c_rarg1)
3484 {
3485 Label entry, loop, exit;
3486 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3487 // starting with top-most entry
3488 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3489 // of monitor block
3490 __ jmpb(entry);
3492 __ bind(loop);
3493 // check if current entry is used
3494 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3495 // if not used then remember entry in c_rarg1
3496 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3497 // check if current entry is for same object
3498 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3499 // if same object then stop searching
3500 __ jccb(Assembler::equal, exit);
3501 // otherwise advance to next entry
3502 __ addptr(c_rarg3, entry_size);
3503 __ bind(entry);
3504 // check if bottom reached
3505 __ cmpptr(c_rarg3, c_rarg2);
3506 // if not at bottom then check this entry
3507 __ jcc(Assembler::notEqual, loop);
3508 __ bind(exit);
3509 }
3511 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3512 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3514 // allocate one if there's no free slot
3515 {
3516 Label entry, loop;
3517 // 1. compute new pointers // rsp: old expression stack top
3518 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3519 __ subptr(rsp, entry_size); // move expression stack top
3520 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3521 __ mov(c_rarg3, rsp); // set start value for copy loop
3522 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3523 __ jmp(entry);
3524 // 2. move expression stack contents
3525 __ bind(loop);
3526 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3527 // word from old location
3528 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3529 __ addptr(c_rarg3, wordSize); // advance to next word
3530 __ bind(entry);
3531 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3532 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3533 // copy next word
3534 }
3536 // call run-time routine
3537 // c_rarg1: points to monitor entry
3538 __ bind(allocated);
3540 // Increment bcp to point to the next bytecode, so exception
3541 // handling for async. exceptions work correctly.
3542 // The object has already been poped from the stack, so the
3543 // expression stack looks correct.
3544 __ increment(r13);
3546 // store object
3547 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3548 __ lock_object(c_rarg1);
3550 // check to make sure this monitor doesn't cause stack overflow after locking
3551 __ save_bcp(); // in case of exception
3552 __ generate_stack_overflow_check(0);
3554 // The bcp has already been incremented. Just need to dispatch to
3555 // next instruction.
3556 __ dispatch_next(vtos);
3557 }
3560 void TemplateTable::monitorexit() {
3561 transition(atos, vtos);
3563 // check for NULL object
3564 __ null_check(rax);
3566 const Address monitor_block_top(
3567 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3568 const Address monitor_block_bot(
3569 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3570 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3572 Label found;
3574 // find matching slot
3575 {
3576 Label entry, loop;
3577 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3578 // starting with top-most entry
3579 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3580 // of monitor block
3581 __ jmpb(entry);
3583 __ bind(loop);
3584 // check if current entry is for same object
3585 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3586 // if same object then stop searching
3587 __ jcc(Assembler::equal, found);
3588 // otherwise advance to next entry
3589 __ addptr(c_rarg1, entry_size);
3590 __ bind(entry);
3591 // check if bottom reached
3592 __ cmpptr(c_rarg1, c_rarg2);
3593 // if not at bottom then check this entry
3594 __ jcc(Assembler::notEqual, loop);
3595 }
3597 // error handling. Unlocking was not block-structured
3598 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3599 InterpreterRuntime::throw_illegal_monitor_state_exception));
3600 __ should_not_reach_here();
3602 // call run-time routine
3603 // rsi: points to monitor entry
3604 __ bind(found);
3605 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3606 __ unlock_object(c_rarg1);
3607 __ pop_ptr(rax); // discard object
3608 }
3611 // Wide instructions
3612 void TemplateTable::wide() {
3613 transition(vtos, vtos);
3614 __ load_unsigned_byte(rbx, at_bcp(1));
3615 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3616 __ jmp(Address(rscratch1, rbx, Address::times_8));
3617 // Note: the r13 increment step is part of the individual wide
3618 // bytecode implementations
3619 }
3622 // Multi arrays
3623 void TemplateTable::multianewarray() {
3624 transition(vtos, atos);
3625 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3626 // last dim is on top of stack; we want address of first one:
3627 // first_addr = last_addr + (ndims - 1) * wordSize
3628 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3629 call_VM(rax,
3630 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3631 c_rarg1);
3632 __ load_unsigned_byte(rbx, at_bcp(3));
3633 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3634 }
3635 #endif // !CC_INTERP