Sat, 29 Sep 2012 06:40:00 -0400
8000213: NPG: Should have renamed arrayKlass and typeArrayKlass
Summary: Capitalize these metadata types (and objArrayKlass)
Reviewed-by: stefank, twisti, kvn
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodData.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
38 #ifndef CC_INTERP
40 #define __ _masm->
42 // Platform-dependent initialization
44 void TemplateTable::pd_initialize() {
45 // No amd64 specific initialization
46 }
48 // Address computation: local variables
50 static inline Address iaddress(int n) {
51 return Address(r14, Interpreter::local_offset_in_bytes(n));
52 }
54 static inline Address laddress(int n) {
55 return iaddress(n + 1);
56 }
58 static inline Address faddress(int n) {
59 return iaddress(n);
60 }
62 static inline Address daddress(int n) {
63 return laddress(n);
64 }
66 static inline Address aaddress(int n) {
67 return iaddress(n);
68 }
70 static inline Address iaddress(Register r) {
71 return Address(r14, r, Address::times_8);
72 }
74 static inline Address laddress(Register r) {
75 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
76 }
78 static inline Address faddress(Register r) {
79 return iaddress(r);
80 }
82 static inline Address daddress(Register r) {
83 return laddress(r);
84 }
86 static inline Address aaddress(Register r) {
87 return iaddress(r);
88 }
90 static inline Address at_rsp() {
91 return Address(rsp, 0);
92 }
94 // At top of Java expression stack which may be different than esp(). It
95 // isn't for category 1 objects.
96 static inline Address at_tos () {
97 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
98 }
100 static inline Address at_tos_p1() {
101 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
102 }
104 static inline Address at_tos_p2() {
105 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
106 }
108 static inline Address at_tos_p3() {
109 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
110 }
112 // Condition conversion
113 static Assembler::Condition j_not(TemplateTable::Condition cc) {
114 switch (cc) {
115 case TemplateTable::equal : return Assembler::notEqual;
116 case TemplateTable::not_equal : return Assembler::equal;
117 case TemplateTable::less : return Assembler::greaterEqual;
118 case TemplateTable::less_equal : return Assembler::greater;
119 case TemplateTable::greater : return Assembler::lessEqual;
120 case TemplateTable::greater_equal: return Assembler::less;
121 }
122 ShouldNotReachHere();
123 return Assembler::zero;
124 }
127 // Miscelaneous helper routines
128 // Store an oop (or NULL) at the address described by obj.
129 // If val == noreg this means store a NULL
131 static void do_oop_store(InterpreterMacroAssembler* _masm,
132 Address obj,
133 Register val,
134 BarrierSet::Name barrier,
135 bool precise) {
136 assert(val == noreg || val == rax, "parameter is just for looks");
137 switch (barrier) {
138 #ifndef SERIALGC
139 case BarrierSet::G1SATBCT:
140 case BarrierSet::G1SATBCTLogging:
141 {
142 // flatten object address if needed
143 if (obj.index() == noreg && obj.disp() == 0) {
144 if (obj.base() != rdx) {
145 __ movq(rdx, obj.base());
146 }
147 } else {
148 __ leaq(rdx, obj);
149 }
150 __ g1_write_barrier_pre(rdx /* obj */,
151 rbx /* pre_val */,
152 r15_thread /* thread */,
153 r8 /* tmp */,
154 val != noreg /* tosca_live */,
155 false /* expand_call */);
156 if (val == noreg) {
157 __ store_heap_oop_null(Address(rdx, 0));
158 } else {
159 __ store_heap_oop(Address(rdx, 0), val);
160 __ g1_write_barrier_post(rdx /* store_adr */,
161 val /* new_val */,
162 r15_thread /* thread */,
163 r8 /* tmp */,
164 rbx /* tmp2 */);
165 }
167 }
168 break;
169 #endif // SERIALGC
170 case BarrierSet::CardTableModRef:
171 case BarrierSet::CardTableExtension:
172 {
173 if (val == noreg) {
174 __ store_heap_oop_null(obj);
175 } else {
176 __ store_heap_oop(obj, val);
177 // flatten object address if needed
178 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
179 __ store_check(obj.base());
180 } else {
181 __ leaq(rdx, obj);
182 __ store_check(rdx);
183 }
184 }
185 }
186 break;
187 case BarrierSet::ModRef:
188 case BarrierSet::Other:
189 if (val == noreg) {
190 __ store_heap_oop_null(obj);
191 } else {
192 __ store_heap_oop(obj, val);
193 }
194 break;
195 default :
196 ShouldNotReachHere();
198 }
199 }
201 Address TemplateTable::at_bcp(int offset) {
202 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
203 return Address(r13, offset);
204 }
206 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
207 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
208 int byte_no) {
209 if (!RewriteBytecodes) return;
210 Label L_patch_done;
212 switch (bc) {
213 case Bytecodes::_fast_aputfield:
214 case Bytecodes::_fast_bputfield:
215 case Bytecodes::_fast_cputfield:
216 case Bytecodes::_fast_dputfield:
217 case Bytecodes::_fast_fputfield:
218 case Bytecodes::_fast_iputfield:
219 case Bytecodes::_fast_lputfield:
220 case Bytecodes::_fast_sputfield:
221 {
222 // We skip bytecode quickening for putfield instructions when
223 // the put_code written to the constant pool cache is zero.
224 // This is required so that every execution of this instruction
225 // calls out to InterpreterRuntime::resolve_get_put to do
226 // additional, required work.
227 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
228 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
229 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
230 __ movl(bc_reg, bc);
231 __ cmpl(temp_reg, (int) 0);
232 __ jcc(Assembler::zero, L_patch_done); // don't patch
233 }
234 break;
235 default:
236 assert(byte_no == -1, "sanity");
237 // the pair bytecodes have already done the load.
238 if (load_bc_into_bc_reg) {
239 __ movl(bc_reg, bc);
240 }
241 }
243 if (JvmtiExport::can_post_breakpoint()) {
244 Label L_fast_patch;
245 // if a breakpoint is present we can't rewrite the stream directly
246 __ movzbl(temp_reg, at_bcp(0));
247 __ cmpl(temp_reg, Bytecodes::_breakpoint);
248 __ jcc(Assembler::notEqual, L_fast_patch);
249 __ get_method(temp_reg);
250 // Let breakpoint table handling rewrite to quicker bytecode
251 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
252 #ifndef ASSERT
253 __ jmpb(L_patch_done);
254 #else
255 __ jmp(L_patch_done);
256 #endif
257 __ bind(L_fast_patch);
258 }
260 #ifdef ASSERT
261 Label L_okay;
262 __ load_unsigned_byte(temp_reg, at_bcp(0));
263 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
264 __ jcc(Assembler::equal, L_okay);
265 __ cmpl(temp_reg, bc_reg);
266 __ jcc(Assembler::equal, L_okay);
267 __ stop("patching the wrong bytecode");
268 __ bind(L_okay);
269 #endif
271 // patch bytecode
272 __ movb(at_bcp(0), bc_reg);
273 __ bind(L_patch_done);
274 }
277 // Individual instructions
279 void TemplateTable::nop() {
280 transition(vtos, vtos);
281 // nothing to do
282 }
284 void TemplateTable::shouldnotreachhere() {
285 transition(vtos, vtos);
286 __ stop("shouldnotreachhere bytecode");
287 }
289 void TemplateTable::aconst_null() {
290 transition(vtos, atos);
291 __ xorl(rax, rax);
292 }
294 void TemplateTable::iconst(int value) {
295 transition(vtos, itos);
296 if (value == 0) {
297 __ xorl(rax, rax);
298 } else {
299 __ movl(rax, value);
300 }
301 }
303 void TemplateTable::lconst(int value) {
304 transition(vtos, ltos);
305 if (value == 0) {
306 __ xorl(rax, rax);
307 } else {
308 __ movl(rax, value);
309 }
310 }
312 void TemplateTable::fconst(int value) {
313 transition(vtos, ftos);
314 static float one = 1.0f, two = 2.0f;
315 switch (value) {
316 case 0:
317 __ xorps(xmm0, xmm0);
318 break;
319 case 1:
320 __ movflt(xmm0, ExternalAddress((address) &one));
321 break;
322 case 2:
323 __ movflt(xmm0, ExternalAddress((address) &two));
324 break;
325 default:
326 ShouldNotReachHere();
327 break;
328 }
329 }
331 void TemplateTable::dconst(int value) {
332 transition(vtos, dtos);
333 static double one = 1.0;
334 switch (value) {
335 case 0:
336 __ xorpd(xmm0, xmm0);
337 break;
338 case 1:
339 __ movdbl(xmm0, ExternalAddress((address) &one));
340 break;
341 default:
342 ShouldNotReachHere();
343 break;
344 }
345 }
347 void TemplateTable::bipush() {
348 transition(vtos, itos);
349 __ load_signed_byte(rax, at_bcp(1));
350 }
352 void TemplateTable::sipush() {
353 transition(vtos, itos);
354 __ load_unsigned_short(rax, at_bcp(1));
355 __ bswapl(rax);
356 __ sarl(rax, 16);
357 }
359 void TemplateTable::ldc(bool wide) {
360 transition(vtos, vtos);
361 Label call_ldc, notFloat, notClass, Done;
363 if (wide) {
364 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
365 } else {
366 __ load_unsigned_byte(rbx, at_bcp(1));
367 }
369 __ get_cpool_and_tags(rcx, rax);
370 const int base_offset = ConstantPool::header_size() * wordSize;
371 const int tags_offset = Array<u1>::base_offset_in_bytes();
373 // get type
374 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
376 // unresolved class - get the resolved class
377 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
378 __ jccb(Assembler::equal, call_ldc);
380 // unresolved class in error state - call into runtime to throw the error
381 // from the first resolution attempt
382 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
383 __ jccb(Assembler::equal, call_ldc);
385 // resolved class - need to call vm to get java mirror of the class
386 __ cmpl(rdx, JVM_CONSTANT_Class);
387 __ jcc(Assembler::notEqual, notClass);
389 __ bind(call_ldc);
390 __ movl(c_rarg1, wide);
391 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
392 __ push_ptr(rax);
393 __ verify_oop(rax);
394 __ jmp(Done);
396 __ bind(notClass);
397 __ cmpl(rdx, JVM_CONSTANT_Float);
398 __ jccb(Assembler::notEqual, notFloat);
399 // ftos
400 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
401 __ push_f();
402 __ jmp(Done);
404 __ bind(notFloat);
405 #ifdef ASSERT
406 {
407 Label L;
408 __ cmpl(rdx, JVM_CONSTANT_Integer);
409 __ jcc(Assembler::equal, L);
410 // String and Object are rewritten to fast_aldc
411 __ stop("unexpected tag type in ldc");
412 __ bind(L);
413 }
414 #endif
415 // itos JVM_CONSTANT_Integer only
416 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
417 __ push_i(rax);
418 __ bind(Done);
419 }
421 // Fast path for caching oop constants.
422 void TemplateTable::fast_aldc(bool wide) {
423 transition(vtos, atos);
425 Register result = rax;
426 Register tmp = rdx;
427 int index_size = wide ? sizeof(u2) : sizeof(u1);
429 Label resolved;
431 // We are resolved if the resolved reference cache entry contains a
432 // non-null object (String, MethodType, etc.)
433 assert_different_registers(result, tmp);
434 __ get_cache_index_at_bcp(tmp, 1, index_size);
435 __ load_resolved_reference_at_index(result, tmp);
436 __ testl(result, result);
437 __ jcc(Assembler::notZero, resolved);
439 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
441 // first time invocation - must resolve first
442 __ movl(tmp, (int)bytecode());
443 __ call_VM(result, entry, tmp);
445 __ bind(resolved);
447 if (VerifyOops) {
448 __ verify_oop(result);
449 }
450 }
452 void TemplateTable::ldc2_w() {
453 transition(vtos, vtos);
454 Label Long, Done;
455 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
457 __ get_cpool_and_tags(rcx, rax);
458 const int base_offset = ConstantPool::header_size() * wordSize;
459 const int tags_offset = Array<u1>::base_offset_in_bytes();
461 // get type
462 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
463 JVM_CONSTANT_Double);
464 __ jccb(Assembler::notEqual, Long);
465 // dtos
466 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
467 __ push_d();
468 __ jmpb(Done);
470 __ bind(Long);
471 // ltos
472 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
473 __ push_l();
475 __ bind(Done);
476 }
478 void TemplateTable::locals_index(Register reg, int offset) {
479 __ load_unsigned_byte(reg, at_bcp(offset));
480 __ negptr(reg);
481 }
483 void TemplateTable::iload() {
484 transition(vtos, itos);
485 if (RewriteFrequentPairs) {
486 Label rewrite, done;
487 const Register bc = c_rarg3;
488 assert(rbx != bc, "register damaged");
490 // get next byte
491 __ load_unsigned_byte(rbx,
492 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
493 // if _iload, wait to rewrite to iload2. We only want to rewrite the
494 // last two iloads in a pair. Comparing against fast_iload means that
495 // the next bytecode is neither an iload or a caload, and therefore
496 // an iload pair.
497 __ cmpl(rbx, Bytecodes::_iload);
498 __ jcc(Assembler::equal, done);
500 __ cmpl(rbx, Bytecodes::_fast_iload);
501 __ movl(bc, Bytecodes::_fast_iload2);
502 __ jccb(Assembler::equal, rewrite);
504 // if _caload, rewrite to fast_icaload
505 __ cmpl(rbx, Bytecodes::_caload);
506 __ movl(bc, Bytecodes::_fast_icaload);
507 __ jccb(Assembler::equal, rewrite);
509 // rewrite so iload doesn't check again.
510 __ movl(bc, Bytecodes::_fast_iload);
512 // rewrite
513 // bc: fast bytecode
514 __ bind(rewrite);
515 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
516 __ bind(done);
517 }
519 // Get the local value into tos
520 locals_index(rbx);
521 __ movl(rax, iaddress(rbx));
522 }
524 void TemplateTable::fast_iload2() {
525 transition(vtos, itos);
526 locals_index(rbx);
527 __ movl(rax, iaddress(rbx));
528 __ push(itos);
529 locals_index(rbx, 3);
530 __ movl(rax, iaddress(rbx));
531 }
533 void TemplateTable::fast_iload() {
534 transition(vtos, itos);
535 locals_index(rbx);
536 __ movl(rax, iaddress(rbx));
537 }
539 void TemplateTable::lload() {
540 transition(vtos, ltos);
541 locals_index(rbx);
542 __ movq(rax, laddress(rbx));
543 }
545 void TemplateTable::fload() {
546 transition(vtos, ftos);
547 locals_index(rbx);
548 __ movflt(xmm0, faddress(rbx));
549 }
551 void TemplateTable::dload() {
552 transition(vtos, dtos);
553 locals_index(rbx);
554 __ movdbl(xmm0, daddress(rbx));
555 }
557 void TemplateTable::aload() {
558 transition(vtos, atos);
559 locals_index(rbx);
560 __ movptr(rax, aaddress(rbx));
561 }
563 void TemplateTable::locals_index_wide(Register reg) {
564 __ movl(reg, at_bcp(2));
565 __ bswapl(reg);
566 __ shrl(reg, 16);
567 __ negptr(reg);
568 }
570 void TemplateTable::wide_iload() {
571 transition(vtos, itos);
572 locals_index_wide(rbx);
573 __ movl(rax, iaddress(rbx));
574 }
576 void TemplateTable::wide_lload() {
577 transition(vtos, ltos);
578 locals_index_wide(rbx);
579 __ movq(rax, laddress(rbx));
580 }
582 void TemplateTable::wide_fload() {
583 transition(vtos, ftos);
584 locals_index_wide(rbx);
585 __ movflt(xmm0, faddress(rbx));
586 }
588 void TemplateTable::wide_dload() {
589 transition(vtos, dtos);
590 locals_index_wide(rbx);
591 __ movdbl(xmm0, daddress(rbx));
592 }
594 void TemplateTable::wide_aload() {
595 transition(vtos, atos);
596 locals_index_wide(rbx);
597 __ movptr(rax, aaddress(rbx));
598 }
600 void TemplateTable::index_check(Register array, Register index) {
601 // destroys rbx
602 // check array
603 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
604 // sign extend index for use by indexed load
605 __ movl2ptr(index, index);
606 // check index
607 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
608 if (index != rbx) {
609 // ??? convention: move aberrant index into ebx for exception message
610 assert(rbx != array, "different registers");
611 __ movl(rbx, index);
612 }
613 __ jump_cc(Assembler::aboveEqual,
614 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
615 }
617 void TemplateTable::iaload() {
618 transition(itos, itos);
619 __ pop_ptr(rdx);
620 // eax: index
621 // rdx: array
622 index_check(rdx, rax); // kills rbx
623 __ movl(rax, Address(rdx, rax,
624 Address::times_4,
625 arrayOopDesc::base_offset_in_bytes(T_INT)));
626 }
628 void TemplateTable::laload() {
629 transition(itos, ltos);
630 __ pop_ptr(rdx);
631 // eax: index
632 // rdx: array
633 index_check(rdx, rax); // kills rbx
634 __ movq(rax, Address(rdx, rbx,
635 Address::times_8,
636 arrayOopDesc::base_offset_in_bytes(T_LONG)));
637 }
639 void TemplateTable::faload() {
640 transition(itos, ftos);
641 __ pop_ptr(rdx);
642 // eax: index
643 // rdx: array
644 index_check(rdx, rax); // kills rbx
645 __ movflt(xmm0, Address(rdx, rax,
646 Address::times_4,
647 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
648 }
650 void TemplateTable::daload() {
651 transition(itos, dtos);
652 __ pop_ptr(rdx);
653 // eax: index
654 // rdx: array
655 index_check(rdx, rax); // kills rbx
656 __ movdbl(xmm0, Address(rdx, rax,
657 Address::times_8,
658 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
659 }
661 void TemplateTable::aaload() {
662 transition(itos, atos);
663 __ pop_ptr(rdx);
664 // eax: index
665 // rdx: array
666 index_check(rdx, rax); // kills rbx
667 __ load_heap_oop(rax, Address(rdx, rax,
668 UseCompressedOops ? Address::times_4 : Address::times_8,
669 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
670 }
672 void TemplateTable::baload() {
673 transition(itos, itos);
674 __ pop_ptr(rdx);
675 // eax: index
676 // rdx: array
677 index_check(rdx, rax); // kills rbx
678 __ load_signed_byte(rax,
679 Address(rdx, rax,
680 Address::times_1,
681 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
682 }
684 void TemplateTable::caload() {
685 transition(itos, itos);
686 __ pop_ptr(rdx);
687 // eax: index
688 // rdx: array
689 index_check(rdx, rax); // kills rbx
690 __ load_unsigned_short(rax,
691 Address(rdx, rax,
692 Address::times_2,
693 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
694 }
696 // iload followed by caload frequent pair
697 void TemplateTable::fast_icaload() {
698 transition(vtos, itos);
699 // load index out of locals
700 locals_index(rbx);
701 __ movl(rax, iaddress(rbx));
703 // eax: index
704 // rdx: array
705 __ pop_ptr(rdx);
706 index_check(rdx, rax); // kills rbx
707 __ load_unsigned_short(rax,
708 Address(rdx, rax,
709 Address::times_2,
710 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
711 }
713 void TemplateTable::saload() {
714 transition(itos, itos);
715 __ pop_ptr(rdx);
716 // eax: index
717 // rdx: array
718 index_check(rdx, rax); // kills rbx
719 __ load_signed_short(rax,
720 Address(rdx, rax,
721 Address::times_2,
722 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
723 }
725 void TemplateTable::iload(int n) {
726 transition(vtos, itos);
727 __ movl(rax, iaddress(n));
728 }
730 void TemplateTable::lload(int n) {
731 transition(vtos, ltos);
732 __ movq(rax, laddress(n));
733 }
735 void TemplateTable::fload(int n) {
736 transition(vtos, ftos);
737 __ movflt(xmm0, faddress(n));
738 }
740 void TemplateTable::dload(int n) {
741 transition(vtos, dtos);
742 __ movdbl(xmm0, daddress(n));
743 }
745 void TemplateTable::aload(int n) {
746 transition(vtos, atos);
747 __ movptr(rax, aaddress(n));
748 }
750 void TemplateTable::aload_0() {
751 transition(vtos, atos);
752 // According to bytecode histograms, the pairs:
753 //
754 // _aload_0, _fast_igetfield
755 // _aload_0, _fast_agetfield
756 // _aload_0, _fast_fgetfield
757 //
758 // occur frequently. If RewriteFrequentPairs is set, the (slow)
759 // _aload_0 bytecode checks if the next bytecode is either
760 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
761 // rewrites the current bytecode into a pair bytecode; otherwise it
762 // rewrites the current bytecode into _fast_aload_0 that doesn't do
763 // the pair check anymore.
764 //
765 // Note: If the next bytecode is _getfield, the rewrite must be
766 // delayed, otherwise we may miss an opportunity for a pair.
767 //
768 // Also rewrite frequent pairs
769 // aload_0, aload_1
770 // aload_0, iload_1
771 // These bytecodes with a small amount of code are most profitable
772 // to rewrite
773 if (RewriteFrequentPairs) {
774 Label rewrite, done;
775 const Register bc = c_rarg3;
776 assert(rbx != bc, "register damaged");
777 // get next byte
778 __ load_unsigned_byte(rbx,
779 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
781 // do actual aload_0
782 aload(0);
784 // if _getfield then wait with rewrite
785 __ cmpl(rbx, Bytecodes::_getfield);
786 __ jcc(Assembler::equal, done);
788 // if _igetfield then reqrite to _fast_iaccess_0
789 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
790 Bytecodes::_aload_0,
791 "fix bytecode definition");
792 __ cmpl(rbx, Bytecodes::_fast_igetfield);
793 __ movl(bc, Bytecodes::_fast_iaccess_0);
794 __ jccb(Assembler::equal, rewrite);
796 // if _agetfield then reqrite to _fast_aaccess_0
797 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
798 Bytecodes::_aload_0,
799 "fix bytecode definition");
800 __ cmpl(rbx, Bytecodes::_fast_agetfield);
801 __ movl(bc, Bytecodes::_fast_aaccess_0);
802 __ jccb(Assembler::equal, rewrite);
804 // if _fgetfield then reqrite to _fast_faccess_0
805 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
806 Bytecodes::_aload_0,
807 "fix bytecode definition");
808 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
809 __ movl(bc, Bytecodes::_fast_faccess_0);
810 __ jccb(Assembler::equal, rewrite);
812 // else rewrite to _fast_aload0
813 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
814 Bytecodes::_aload_0,
815 "fix bytecode definition");
816 __ movl(bc, Bytecodes::_fast_aload_0);
818 // rewrite
819 // bc: fast bytecode
820 __ bind(rewrite);
821 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
823 __ bind(done);
824 } else {
825 aload(0);
826 }
827 }
829 void TemplateTable::istore() {
830 transition(itos, vtos);
831 locals_index(rbx);
832 __ movl(iaddress(rbx), rax);
833 }
835 void TemplateTable::lstore() {
836 transition(ltos, vtos);
837 locals_index(rbx);
838 __ movq(laddress(rbx), rax);
839 }
841 void TemplateTable::fstore() {
842 transition(ftos, vtos);
843 locals_index(rbx);
844 __ movflt(faddress(rbx), xmm0);
845 }
847 void TemplateTable::dstore() {
848 transition(dtos, vtos);
849 locals_index(rbx);
850 __ movdbl(daddress(rbx), xmm0);
851 }
853 void TemplateTable::astore() {
854 transition(vtos, vtos);
855 __ pop_ptr(rax);
856 locals_index(rbx);
857 __ movptr(aaddress(rbx), rax);
858 }
860 void TemplateTable::wide_istore() {
861 transition(vtos, vtos);
862 __ pop_i();
863 locals_index_wide(rbx);
864 __ movl(iaddress(rbx), rax);
865 }
867 void TemplateTable::wide_lstore() {
868 transition(vtos, vtos);
869 __ pop_l();
870 locals_index_wide(rbx);
871 __ movq(laddress(rbx), rax);
872 }
874 void TemplateTable::wide_fstore() {
875 transition(vtos, vtos);
876 __ pop_f();
877 locals_index_wide(rbx);
878 __ movflt(faddress(rbx), xmm0);
879 }
881 void TemplateTable::wide_dstore() {
882 transition(vtos, vtos);
883 __ pop_d();
884 locals_index_wide(rbx);
885 __ movdbl(daddress(rbx), xmm0);
886 }
888 void TemplateTable::wide_astore() {
889 transition(vtos, vtos);
890 __ pop_ptr(rax);
891 locals_index_wide(rbx);
892 __ movptr(aaddress(rbx), rax);
893 }
895 void TemplateTable::iastore() {
896 transition(itos, vtos);
897 __ pop_i(rbx);
898 __ pop_ptr(rdx);
899 // eax: value
900 // ebx: index
901 // rdx: array
902 index_check(rdx, rbx); // prefer index in ebx
903 __ movl(Address(rdx, rbx,
904 Address::times_4,
905 arrayOopDesc::base_offset_in_bytes(T_INT)),
906 rax);
907 }
909 void TemplateTable::lastore() {
910 transition(ltos, vtos);
911 __ pop_i(rbx);
912 __ pop_ptr(rdx);
913 // rax: value
914 // ebx: index
915 // rdx: array
916 index_check(rdx, rbx); // prefer index in ebx
917 __ movq(Address(rdx, rbx,
918 Address::times_8,
919 arrayOopDesc::base_offset_in_bytes(T_LONG)),
920 rax);
921 }
923 void TemplateTable::fastore() {
924 transition(ftos, vtos);
925 __ pop_i(rbx);
926 __ pop_ptr(rdx);
927 // xmm0: value
928 // ebx: index
929 // rdx: array
930 index_check(rdx, rbx); // prefer index in ebx
931 __ movflt(Address(rdx, rbx,
932 Address::times_4,
933 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
934 xmm0);
935 }
937 void TemplateTable::dastore() {
938 transition(dtos, vtos);
939 __ pop_i(rbx);
940 __ pop_ptr(rdx);
941 // xmm0: value
942 // ebx: index
943 // rdx: array
944 index_check(rdx, rbx); // prefer index in ebx
945 __ movdbl(Address(rdx, rbx,
946 Address::times_8,
947 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
948 xmm0);
949 }
951 void TemplateTable::aastore() {
952 Label is_null, ok_is_subtype, done;
953 transition(vtos, vtos);
954 // stack: ..., array, index, value
955 __ movptr(rax, at_tos()); // value
956 __ movl(rcx, at_tos_p1()); // index
957 __ movptr(rdx, at_tos_p2()); // array
959 Address element_address(rdx, rcx,
960 UseCompressedOops? Address::times_4 : Address::times_8,
961 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
963 index_check(rdx, rcx); // kills rbx
964 // do array store check - check for NULL value first
965 __ testptr(rax, rax);
966 __ jcc(Assembler::zero, is_null);
968 // Move subklass into rbx
969 __ load_klass(rbx, rax);
970 // Move superklass into rax
971 __ load_klass(rax, rdx);
972 __ movptr(rax, Address(rax,
973 ObjArrayKlass::element_klass_offset()));
974 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
975 __ lea(rdx, element_address);
977 // Generate subtype check. Blows rcx, rdi
978 // Superklass in rax. Subklass in rbx.
979 __ gen_subtype_check(rbx, ok_is_subtype);
981 // Come here on failure
982 // object is at TOS
983 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
985 // Come here on success
986 __ bind(ok_is_subtype);
988 // Get the value we will store
989 __ movptr(rax, at_tos());
990 // Now store using the appropriate barrier
991 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
992 __ jmp(done);
994 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
995 __ bind(is_null);
996 __ profile_null_seen(rbx);
998 // Store a NULL
999 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1001 // Pop stack arguments
1002 __ bind(done);
1003 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1004 }
1006 void TemplateTable::bastore() {
1007 transition(itos, vtos);
1008 __ pop_i(rbx);
1009 __ pop_ptr(rdx);
1010 // eax: value
1011 // ebx: index
1012 // rdx: array
1013 index_check(rdx, rbx); // prefer index in ebx
1014 __ movb(Address(rdx, rbx,
1015 Address::times_1,
1016 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1017 rax);
1018 }
1020 void TemplateTable::castore() {
1021 transition(itos, vtos);
1022 __ pop_i(rbx);
1023 __ pop_ptr(rdx);
1024 // eax: value
1025 // ebx: index
1026 // rdx: array
1027 index_check(rdx, rbx); // prefer index in ebx
1028 __ movw(Address(rdx, rbx,
1029 Address::times_2,
1030 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1031 rax);
1032 }
1034 void TemplateTable::sastore() {
1035 castore();
1036 }
1038 void TemplateTable::istore(int n) {
1039 transition(itos, vtos);
1040 __ movl(iaddress(n), rax);
1041 }
1043 void TemplateTable::lstore(int n) {
1044 transition(ltos, vtos);
1045 __ movq(laddress(n), rax);
1046 }
1048 void TemplateTable::fstore(int n) {
1049 transition(ftos, vtos);
1050 __ movflt(faddress(n), xmm0);
1051 }
1053 void TemplateTable::dstore(int n) {
1054 transition(dtos, vtos);
1055 __ movdbl(daddress(n), xmm0);
1056 }
1058 void TemplateTable::astore(int n) {
1059 transition(vtos, vtos);
1060 __ pop_ptr(rax);
1061 __ movptr(aaddress(n), rax);
1062 }
1064 void TemplateTable::pop() {
1065 transition(vtos, vtos);
1066 __ addptr(rsp, Interpreter::stackElementSize);
1067 }
1069 void TemplateTable::pop2() {
1070 transition(vtos, vtos);
1071 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1072 }
1074 void TemplateTable::dup() {
1075 transition(vtos, vtos);
1076 __ load_ptr(0, rax);
1077 __ push_ptr(rax);
1078 // stack: ..., a, a
1079 }
1081 void TemplateTable::dup_x1() {
1082 transition(vtos, vtos);
1083 // stack: ..., a, b
1084 __ load_ptr( 0, rax); // load b
1085 __ load_ptr( 1, rcx); // load a
1086 __ store_ptr(1, rax); // store b
1087 __ store_ptr(0, rcx); // store a
1088 __ push_ptr(rax); // push b
1089 // stack: ..., b, a, b
1090 }
1092 void TemplateTable::dup_x2() {
1093 transition(vtos, vtos);
1094 // stack: ..., a, b, c
1095 __ load_ptr( 0, rax); // load c
1096 __ load_ptr( 2, rcx); // load a
1097 __ store_ptr(2, rax); // store c in a
1098 __ push_ptr(rax); // push c
1099 // stack: ..., c, b, c, c
1100 __ load_ptr( 2, rax); // load b
1101 __ store_ptr(2, rcx); // store a in b
1102 // stack: ..., c, a, c, c
1103 __ store_ptr(1, rax); // store b in c
1104 // stack: ..., c, a, b, c
1105 }
1107 void TemplateTable::dup2() {
1108 transition(vtos, vtos);
1109 // stack: ..., a, b
1110 __ load_ptr(1, rax); // load a
1111 __ push_ptr(rax); // push a
1112 __ load_ptr(1, rax); // load b
1113 __ push_ptr(rax); // push b
1114 // stack: ..., a, b, a, b
1115 }
1117 void TemplateTable::dup2_x1() {
1118 transition(vtos, vtos);
1119 // stack: ..., a, b, c
1120 __ load_ptr( 0, rcx); // load c
1121 __ load_ptr( 1, rax); // load b
1122 __ push_ptr(rax); // push b
1123 __ push_ptr(rcx); // push c
1124 // stack: ..., a, b, c, b, c
1125 __ store_ptr(3, rcx); // store c in b
1126 // stack: ..., a, c, c, b, c
1127 __ load_ptr( 4, rcx); // load a
1128 __ store_ptr(2, rcx); // store a in 2nd c
1129 // stack: ..., a, c, a, b, c
1130 __ store_ptr(4, rax); // store b in a
1131 // stack: ..., b, c, a, b, c
1132 }
1134 void TemplateTable::dup2_x2() {
1135 transition(vtos, vtos);
1136 // stack: ..., a, b, c, d
1137 __ load_ptr( 0, rcx); // load d
1138 __ load_ptr( 1, rax); // load c
1139 __ push_ptr(rax); // push c
1140 __ push_ptr(rcx); // push d
1141 // stack: ..., a, b, c, d, c, d
1142 __ load_ptr( 4, rax); // load b
1143 __ store_ptr(2, rax); // store b in d
1144 __ store_ptr(4, rcx); // store d in b
1145 // stack: ..., a, d, c, b, c, d
1146 __ load_ptr( 5, rcx); // load a
1147 __ load_ptr( 3, rax); // load c
1148 __ store_ptr(3, rcx); // store a in c
1149 __ store_ptr(5, rax); // store c in a
1150 // stack: ..., c, d, a, b, c, d
1151 }
1153 void TemplateTable::swap() {
1154 transition(vtos, vtos);
1155 // stack: ..., a, b
1156 __ load_ptr( 1, rcx); // load a
1157 __ load_ptr( 0, rax); // load b
1158 __ store_ptr(0, rcx); // store a in b
1159 __ store_ptr(1, rax); // store b in a
1160 // stack: ..., b, a
1161 }
1163 void TemplateTable::iop2(Operation op) {
1164 transition(itos, itos);
1165 switch (op) {
1166 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1167 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1168 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1169 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1170 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1171 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1172 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1173 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1174 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1175 default : ShouldNotReachHere();
1176 }
1177 }
1179 void TemplateTable::lop2(Operation op) {
1180 transition(ltos, ltos);
1181 switch (op) {
1182 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1183 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1184 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1185 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1186 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1187 default : ShouldNotReachHere();
1188 }
1189 }
1191 void TemplateTable::idiv() {
1192 transition(itos, itos);
1193 __ movl(rcx, rax);
1194 __ pop_i(rax);
1195 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1196 // they are not equal, one could do a normal division (no correction
1197 // needed), which may speed up this implementation for the common case.
1198 // (see also JVM spec., p.243 & p.271)
1199 __ corrected_idivl(rcx);
1200 }
1202 void TemplateTable::irem() {
1203 transition(itos, itos);
1204 __ movl(rcx, rax);
1205 __ pop_i(rax);
1206 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1207 // they are not equal, one could do a normal division (no correction
1208 // needed), which may speed up this implementation for the common case.
1209 // (see also JVM spec., p.243 & p.271)
1210 __ corrected_idivl(rcx);
1211 __ movl(rax, rdx);
1212 }
1214 void TemplateTable::lmul() {
1215 transition(ltos, ltos);
1216 __ pop_l(rdx);
1217 __ imulq(rax, rdx);
1218 }
1220 void TemplateTable::ldiv() {
1221 transition(ltos, ltos);
1222 __ mov(rcx, rax);
1223 __ pop_l(rax);
1224 // generate explicit div0 check
1225 __ testq(rcx, rcx);
1226 __ jump_cc(Assembler::zero,
1227 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1228 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1229 // they are not equal, one could do a normal division (no correction
1230 // needed), which may speed up this implementation for the common case.
1231 // (see also JVM spec., p.243 & p.271)
1232 __ corrected_idivq(rcx); // kills rbx
1233 }
1235 void TemplateTable::lrem() {
1236 transition(ltos, ltos);
1237 __ mov(rcx, rax);
1238 __ pop_l(rax);
1239 __ testq(rcx, rcx);
1240 __ jump_cc(Assembler::zero,
1241 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1242 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1243 // they are not equal, one could do a normal division (no correction
1244 // needed), which may speed up this implementation for the common case.
1245 // (see also JVM spec., p.243 & p.271)
1246 __ corrected_idivq(rcx); // kills rbx
1247 __ mov(rax, rdx);
1248 }
1250 void TemplateTable::lshl() {
1251 transition(itos, ltos);
1252 __ movl(rcx, rax); // get shift count
1253 __ pop_l(rax); // get shift value
1254 __ shlq(rax);
1255 }
1257 void TemplateTable::lshr() {
1258 transition(itos, ltos);
1259 __ movl(rcx, rax); // get shift count
1260 __ pop_l(rax); // get shift value
1261 __ sarq(rax);
1262 }
1264 void TemplateTable::lushr() {
1265 transition(itos, ltos);
1266 __ movl(rcx, rax); // get shift count
1267 __ pop_l(rax); // get shift value
1268 __ shrq(rax);
1269 }
1271 void TemplateTable::fop2(Operation op) {
1272 transition(ftos, ftos);
1273 switch (op) {
1274 case add:
1275 __ addss(xmm0, at_rsp());
1276 __ addptr(rsp, Interpreter::stackElementSize);
1277 break;
1278 case sub:
1279 __ movflt(xmm1, xmm0);
1280 __ pop_f(xmm0);
1281 __ subss(xmm0, xmm1);
1282 break;
1283 case mul:
1284 __ mulss(xmm0, at_rsp());
1285 __ addptr(rsp, Interpreter::stackElementSize);
1286 break;
1287 case div:
1288 __ movflt(xmm1, xmm0);
1289 __ pop_f(xmm0);
1290 __ divss(xmm0, xmm1);
1291 break;
1292 case rem:
1293 __ movflt(xmm1, xmm0);
1294 __ pop_f(xmm0);
1295 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1296 break;
1297 default:
1298 ShouldNotReachHere();
1299 break;
1300 }
1301 }
1303 void TemplateTable::dop2(Operation op) {
1304 transition(dtos, dtos);
1305 switch (op) {
1306 case add:
1307 __ addsd(xmm0, at_rsp());
1308 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1309 break;
1310 case sub:
1311 __ movdbl(xmm1, xmm0);
1312 __ pop_d(xmm0);
1313 __ subsd(xmm0, xmm1);
1314 break;
1315 case mul:
1316 __ mulsd(xmm0, at_rsp());
1317 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1318 break;
1319 case div:
1320 __ movdbl(xmm1, xmm0);
1321 __ pop_d(xmm0);
1322 __ divsd(xmm0, xmm1);
1323 break;
1324 case rem:
1325 __ movdbl(xmm1, xmm0);
1326 __ pop_d(xmm0);
1327 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1328 break;
1329 default:
1330 ShouldNotReachHere();
1331 break;
1332 }
1333 }
1335 void TemplateTable::ineg() {
1336 transition(itos, itos);
1337 __ negl(rax);
1338 }
1340 void TemplateTable::lneg() {
1341 transition(ltos, ltos);
1342 __ negq(rax);
1343 }
1345 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1346 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1347 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1348 // of 128-bits operands for SSE instructions.
1349 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1350 // Store the value to a 128-bits operand.
1351 operand[0] = lo;
1352 operand[1] = hi;
1353 return operand;
1354 }
1356 // Buffer for 128-bits masks used by SSE instructions.
1357 static jlong float_signflip_pool[2*2];
1358 static jlong double_signflip_pool[2*2];
1360 void TemplateTable::fneg() {
1361 transition(ftos, ftos);
1362 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1363 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1364 }
1366 void TemplateTable::dneg() {
1367 transition(dtos, dtos);
1368 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1369 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1370 }
1372 void TemplateTable::iinc() {
1373 transition(vtos, vtos);
1374 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1375 locals_index(rbx);
1376 __ addl(iaddress(rbx), rdx);
1377 }
1379 void TemplateTable::wide_iinc() {
1380 transition(vtos, vtos);
1381 __ movl(rdx, at_bcp(4)); // get constant
1382 locals_index_wide(rbx);
1383 __ bswapl(rdx); // swap bytes & sign-extend constant
1384 __ sarl(rdx, 16);
1385 __ addl(iaddress(rbx), rdx);
1386 // Note: should probably use only one movl to get both
1387 // the index and the constant -> fix this
1388 }
1390 void TemplateTable::convert() {
1391 // Checking
1392 #ifdef ASSERT
1393 {
1394 TosState tos_in = ilgl;
1395 TosState tos_out = ilgl;
1396 switch (bytecode()) {
1397 case Bytecodes::_i2l: // fall through
1398 case Bytecodes::_i2f: // fall through
1399 case Bytecodes::_i2d: // fall through
1400 case Bytecodes::_i2b: // fall through
1401 case Bytecodes::_i2c: // fall through
1402 case Bytecodes::_i2s: tos_in = itos; break;
1403 case Bytecodes::_l2i: // fall through
1404 case Bytecodes::_l2f: // fall through
1405 case Bytecodes::_l2d: tos_in = ltos; break;
1406 case Bytecodes::_f2i: // fall through
1407 case Bytecodes::_f2l: // fall through
1408 case Bytecodes::_f2d: tos_in = ftos; break;
1409 case Bytecodes::_d2i: // fall through
1410 case Bytecodes::_d2l: // fall through
1411 case Bytecodes::_d2f: tos_in = dtos; break;
1412 default : ShouldNotReachHere();
1413 }
1414 switch (bytecode()) {
1415 case Bytecodes::_l2i: // fall through
1416 case Bytecodes::_f2i: // fall through
1417 case Bytecodes::_d2i: // fall through
1418 case Bytecodes::_i2b: // fall through
1419 case Bytecodes::_i2c: // fall through
1420 case Bytecodes::_i2s: tos_out = itos; break;
1421 case Bytecodes::_i2l: // fall through
1422 case Bytecodes::_f2l: // fall through
1423 case Bytecodes::_d2l: tos_out = ltos; break;
1424 case Bytecodes::_i2f: // fall through
1425 case Bytecodes::_l2f: // fall through
1426 case Bytecodes::_d2f: tos_out = ftos; break;
1427 case Bytecodes::_i2d: // fall through
1428 case Bytecodes::_l2d: // fall through
1429 case Bytecodes::_f2d: tos_out = dtos; break;
1430 default : ShouldNotReachHere();
1431 }
1432 transition(tos_in, tos_out);
1433 }
1434 #endif // ASSERT
1436 static const int64_t is_nan = 0x8000000000000000L;
1438 // Conversion
1439 switch (bytecode()) {
1440 case Bytecodes::_i2l:
1441 __ movslq(rax, rax);
1442 break;
1443 case Bytecodes::_i2f:
1444 __ cvtsi2ssl(xmm0, rax);
1445 break;
1446 case Bytecodes::_i2d:
1447 __ cvtsi2sdl(xmm0, rax);
1448 break;
1449 case Bytecodes::_i2b:
1450 __ movsbl(rax, rax);
1451 break;
1452 case Bytecodes::_i2c:
1453 __ movzwl(rax, rax);
1454 break;
1455 case Bytecodes::_i2s:
1456 __ movswl(rax, rax);
1457 break;
1458 case Bytecodes::_l2i:
1459 __ movl(rax, rax);
1460 break;
1461 case Bytecodes::_l2f:
1462 __ cvtsi2ssq(xmm0, rax);
1463 break;
1464 case Bytecodes::_l2d:
1465 __ cvtsi2sdq(xmm0, rax);
1466 break;
1467 case Bytecodes::_f2i:
1468 {
1469 Label L;
1470 __ cvttss2sil(rax, xmm0);
1471 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1472 __ jcc(Assembler::notEqual, L);
1473 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1474 __ bind(L);
1475 }
1476 break;
1477 case Bytecodes::_f2l:
1478 {
1479 Label L;
1480 __ cvttss2siq(rax, xmm0);
1481 // NaN or overflow/underflow?
1482 __ cmp64(rax, ExternalAddress((address) &is_nan));
1483 __ jcc(Assembler::notEqual, L);
1484 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1485 __ bind(L);
1486 }
1487 break;
1488 case Bytecodes::_f2d:
1489 __ cvtss2sd(xmm0, xmm0);
1490 break;
1491 case Bytecodes::_d2i:
1492 {
1493 Label L;
1494 __ cvttsd2sil(rax, xmm0);
1495 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1496 __ jcc(Assembler::notEqual, L);
1497 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1498 __ bind(L);
1499 }
1500 break;
1501 case Bytecodes::_d2l:
1502 {
1503 Label L;
1504 __ cvttsd2siq(rax, xmm0);
1505 // NaN or overflow/underflow?
1506 __ cmp64(rax, ExternalAddress((address) &is_nan));
1507 __ jcc(Assembler::notEqual, L);
1508 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1509 __ bind(L);
1510 }
1511 break;
1512 case Bytecodes::_d2f:
1513 __ cvtsd2ss(xmm0, xmm0);
1514 break;
1515 default:
1516 ShouldNotReachHere();
1517 }
1518 }
1520 void TemplateTable::lcmp() {
1521 transition(ltos, itos);
1522 Label done;
1523 __ pop_l(rdx);
1524 __ cmpq(rdx, rax);
1525 __ movl(rax, -1);
1526 __ jccb(Assembler::less, done);
1527 __ setb(Assembler::notEqual, rax);
1528 __ movzbl(rax, rax);
1529 __ bind(done);
1530 }
1532 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1533 Label done;
1534 if (is_float) {
1535 // XXX get rid of pop here, use ... reg, mem32
1536 __ pop_f(xmm1);
1537 __ ucomiss(xmm1, xmm0);
1538 } else {
1539 // XXX get rid of pop here, use ... reg, mem64
1540 __ pop_d(xmm1);
1541 __ ucomisd(xmm1, xmm0);
1542 }
1543 if (unordered_result < 0) {
1544 __ movl(rax, -1);
1545 __ jccb(Assembler::parity, done);
1546 __ jccb(Assembler::below, done);
1547 __ setb(Assembler::notEqual, rdx);
1548 __ movzbl(rax, rdx);
1549 } else {
1550 __ movl(rax, 1);
1551 __ jccb(Assembler::parity, done);
1552 __ jccb(Assembler::above, done);
1553 __ movl(rax, 0);
1554 __ jccb(Assembler::equal, done);
1555 __ decrementl(rax);
1556 }
1557 __ bind(done);
1558 }
1560 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1561 __ get_method(rcx); // rcx holds method
1562 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1563 // holds bumped taken count
1565 const ByteSize be_offset = Method::backedge_counter_offset() +
1566 InvocationCounter::counter_offset();
1567 const ByteSize inv_offset = Method::invocation_counter_offset() +
1568 InvocationCounter::counter_offset();
1569 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1571 // Load up edx with the branch displacement
1572 __ movl(rdx, at_bcp(1));
1573 __ bswapl(rdx);
1575 if (!is_wide) {
1576 __ sarl(rdx, 16);
1577 }
1578 __ movl2ptr(rdx, rdx);
1580 // Handle all the JSR stuff here, then exit.
1581 // It's much shorter and cleaner than intermingling with the non-JSR
1582 // normal-branch stuff occurring below.
1583 if (is_jsr) {
1584 // Pre-load the next target bytecode into rbx
1585 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1587 // compute return address as bci in rax
1588 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1589 in_bytes(ConstMethod::codes_offset())));
1590 __ subptr(rax, Address(rcx, Method::const_offset()));
1591 // Adjust the bcp in r13 by the displacement in rdx
1592 __ addptr(r13, rdx);
1593 // jsr returns atos that is not an oop
1594 __ push_i(rax);
1595 __ dispatch_only(vtos);
1596 return;
1597 }
1599 // Normal (non-jsr) branch handling
1601 // Adjust the bcp in r13 by the displacement in rdx
1602 __ addptr(r13, rdx);
1604 assert(UseLoopCounter || !UseOnStackReplacement,
1605 "on-stack-replacement requires loop counters");
1606 Label backedge_counter_overflow;
1607 Label profile_method;
1608 Label dispatch;
1609 if (UseLoopCounter) {
1610 // increment backedge counter for backward branches
1611 // rax: MDO
1612 // ebx: MDO bumped taken-count
1613 // rcx: method
1614 // rdx: target offset
1615 // r13: target bcp
1616 // r14: locals pointer
1617 __ testl(rdx, rdx); // check if forward or backward branch
1618 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1619 if (TieredCompilation) {
1620 Label no_mdo;
1621 int increment = InvocationCounter::count_increment;
1622 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1623 if (ProfileInterpreter) {
1624 // Are we profiling?
1625 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1626 __ testptr(rbx, rbx);
1627 __ jccb(Assembler::zero, no_mdo);
1628 // Increment the MDO backedge counter
1629 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1630 in_bytes(InvocationCounter::counter_offset()));
1631 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1632 rax, false, Assembler::zero, &backedge_counter_overflow);
1633 __ jmp(dispatch);
1634 }
1635 __ bind(no_mdo);
1636 // Increment backedge counter in Method*
1637 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1638 rax, false, Assembler::zero, &backedge_counter_overflow);
1639 } else {
1640 // increment counter
1641 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1642 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1643 __ movl(Address(rcx, be_offset), rax); // store counter
1645 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1646 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1647 __ addl(rax, Address(rcx, be_offset)); // add both counters
1649 if (ProfileInterpreter) {
1650 // Test to see if we should create a method data oop
1651 __ cmp32(rax,
1652 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1653 __ jcc(Assembler::less, dispatch);
1655 // if no method data exists, go to profile method
1656 __ test_method_data_pointer(rax, profile_method);
1658 if (UseOnStackReplacement) {
1659 // check for overflow against ebx which is the MDO taken count
1660 __ cmp32(rbx,
1661 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1662 __ jcc(Assembler::below, dispatch);
1664 // When ProfileInterpreter is on, the backedge_count comes
1665 // from the MethodData*, which value does not get reset on
1666 // the call to frequency_counter_overflow(). To avoid
1667 // excessive calls to the overflow routine while the method is
1668 // being compiled, add a second test to make sure the overflow
1669 // function is called only once every overflow_frequency.
1670 const int overflow_frequency = 1024;
1671 __ andl(rbx, overflow_frequency - 1);
1672 __ jcc(Assembler::zero, backedge_counter_overflow);
1674 }
1675 } else {
1676 if (UseOnStackReplacement) {
1677 // check for overflow against eax, which is the sum of the
1678 // counters
1679 __ cmp32(rax,
1680 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1681 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1683 }
1684 }
1685 }
1686 __ bind(dispatch);
1687 }
1689 // Pre-load the next target bytecode into rbx
1690 __ load_unsigned_byte(rbx, Address(r13, 0));
1692 // continue with the bytecode @ target
1693 // eax: return bci for jsr's, unused otherwise
1694 // ebx: target bytecode
1695 // r13: target bcp
1696 __ dispatch_only(vtos);
1698 if (UseLoopCounter) {
1699 if (ProfileInterpreter) {
1700 // Out-of-line code to allocate method data oop.
1701 __ bind(profile_method);
1702 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1703 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1704 __ set_method_data_pointer_for_bcp();
1705 __ jmp(dispatch);
1706 }
1708 if (UseOnStackReplacement) {
1709 // invocation counter overflow
1710 __ bind(backedge_counter_overflow);
1711 __ negptr(rdx);
1712 __ addptr(rdx, r13); // branch bcp
1713 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1714 __ call_VM(noreg,
1715 CAST_FROM_FN_PTR(address,
1716 InterpreterRuntime::frequency_counter_overflow),
1717 rdx);
1718 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1720 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1721 // ebx: target bytecode
1722 // rdx: scratch
1723 // r14: locals pointer
1724 // r13: bcp
1725 __ testptr(rax, rax); // test result
1726 __ jcc(Assembler::zero, dispatch); // no osr if null
1727 // nmethod may have been invalidated (VM may block upon call_VM return)
1728 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1729 __ cmpl(rcx, InvalidOSREntryBci);
1730 __ jcc(Assembler::equal, dispatch);
1732 // We have the address of an on stack replacement routine in eax
1733 // We need to prepare to execute the OSR method. First we must
1734 // migrate the locals and monitors off of the stack.
1736 __ mov(r13, rax); // save the nmethod
1738 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1740 // eax is OSR buffer, move it to expected parameter location
1741 __ mov(j_rarg0, rax);
1743 // We use j_rarg definitions here so that registers don't conflict as parameter
1744 // registers change across platforms as we are in the midst of a calling
1745 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1747 const Register retaddr = j_rarg2;
1748 const Register sender_sp = j_rarg1;
1750 // pop the interpreter frame
1751 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1752 __ leave(); // remove frame anchor
1753 __ pop(retaddr); // get return address
1754 __ mov(rsp, sender_sp); // set sp to sender sp
1755 // Ensure compiled code always sees stack at proper alignment
1756 __ andptr(rsp, -(StackAlignmentInBytes));
1758 // unlike x86 we need no specialized return from compiled code
1759 // to the interpreter or the call stub.
1761 // push the return address
1762 __ push(retaddr);
1764 // and begin the OSR nmethod
1765 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1766 }
1767 }
1768 }
1771 void TemplateTable::if_0cmp(Condition cc) {
1772 transition(itos, vtos);
1773 // assume branch is more often taken than not (loops use backward branches)
1774 Label not_taken;
1775 __ testl(rax, rax);
1776 __ jcc(j_not(cc), not_taken);
1777 branch(false, false);
1778 __ bind(not_taken);
1779 __ profile_not_taken_branch(rax);
1780 }
1782 void TemplateTable::if_icmp(Condition cc) {
1783 transition(itos, vtos);
1784 // assume branch is more often taken than not (loops use backward branches)
1785 Label not_taken;
1786 __ pop_i(rdx);
1787 __ cmpl(rdx, rax);
1788 __ jcc(j_not(cc), not_taken);
1789 branch(false, false);
1790 __ bind(not_taken);
1791 __ profile_not_taken_branch(rax);
1792 }
1794 void TemplateTable::if_nullcmp(Condition cc) {
1795 transition(atos, vtos);
1796 // assume branch is more often taken than not (loops use backward branches)
1797 Label not_taken;
1798 __ testptr(rax, rax);
1799 __ jcc(j_not(cc), not_taken);
1800 branch(false, false);
1801 __ bind(not_taken);
1802 __ profile_not_taken_branch(rax);
1803 }
1805 void TemplateTable::if_acmp(Condition cc) {
1806 transition(atos, vtos);
1807 // assume branch is more often taken than not (loops use backward branches)
1808 Label not_taken;
1809 __ pop_ptr(rdx);
1810 __ cmpptr(rdx, rax);
1811 __ jcc(j_not(cc), not_taken);
1812 branch(false, false);
1813 __ bind(not_taken);
1814 __ profile_not_taken_branch(rax);
1815 }
1817 void TemplateTable::ret() {
1818 transition(vtos, vtos);
1819 locals_index(rbx);
1820 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1821 __ profile_ret(rbx, rcx);
1822 __ get_method(rax);
1823 __ movptr(r13, Address(rax, Method::const_offset()));
1824 __ lea(r13, Address(r13, rbx, Address::times_1,
1825 ConstMethod::codes_offset()));
1826 __ dispatch_next(vtos);
1827 }
1829 void TemplateTable::wide_ret() {
1830 transition(vtos, vtos);
1831 locals_index_wide(rbx);
1832 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1833 __ profile_ret(rbx, rcx);
1834 __ get_method(rax);
1835 __ movptr(r13, Address(rax, Method::const_offset()));
1836 __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
1837 __ dispatch_next(vtos);
1838 }
1840 void TemplateTable::tableswitch() {
1841 Label default_case, continue_execution;
1842 transition(itos, vtos);
1843 // align r13
1844 __ lea(rbx, at_bcp(BytesPerInt));
1845 __ andptr(rbx, -BytesPerInt);
1846 // load lo & hi
1847 __ movl(rcx, Address(rbx, BytesPerInt));
1848 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1849 __ bswapl(rcx);
1850 __ bswapl(rdx);
1851 // check against lo & hi
1852 __ cmpl(rax, rcx);
1853 __ jcc(Assembler::less, default_case);
1854 __ cmpl(rax, rdx);
1855 __ jcc(Assembler::greater, default_case);
1856 // lookup dispatch offset
1857 __ subl(rax, rcx);
1858 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1859 __ profile_switch_case(rax, rbx, rcx);
1860 // continue execution
1861 __ bind(continue_execution);
1862 __ bswapl(rdx);
1863 __ movl2ptr(rdx, rdx);
1864 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1865 __ addptr(r13, rdx);
1866 __ dispatch_only(vtos);
1867 // handle default
1868 __ bind(default_case);
1869 __ profile_switch_default(rax);
1870 __ movl(rdx, Address(rbx, 0));
1871 __ jmp(continue_execution);
1872 }
1874 void TemplateTable::lookupswitch() {
1875 transition(itos, itos);
1876 __ stop("lookupswitch bytecode should have been rewritten");
1877 }
1879 void TemplateTable::fast_linearswitch() {
1880 transition(itos, vtos);
1881 Label loop_entry, loop, found, continue_execution;
1882 // bswap rax so we can avoid bswapping the table entries
1883 __ bswapl(rax);
1884 // align r13
1885 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1886 // this instruction (change offsets
1887 // below)
1888 __ andptr(rbx, -BytesPerInt);
1889 // set counter
1890 __ movl(rcx, Address(rbx, BytesPerInt));
1891 __ bswapl(rcx);
1892 __ jmpb(loop_entry);
1893 // table search
1894 __ bind(loop);
1895 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1896 __ jcc(Assembler::equal, found);
1897 __ bind(loop_entry);
1898 __ decrementl(rcx);
1899 __ jcc(Assembler::greaterEqual, loop);
1900 // default case
1901 __ profile_switch_default(rax);
1902 __ movl(rdx, Address(rbx, 0));
1903 __ jmp(continue_execution);
1904 // entry found -> get offset
1905 __ bind(found);
1906 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1907 __ profile_switch_case(rcx, rax, rbx);
1908 // continue execution
1909 __ bind(continue_execution);
1910 __ bswapl(rdx);
1911 __ movl2ptr(rdx, rdx);
1912 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1913 __ addptr(r13, rdx);
1914 __ dispatch_only(vtos);
1915 }
1917 void TemplateTable::fast_binaryswitch() {
1918 transition(itos, vtos);
1919 // Implementation using the following core algorithm:
1920 //
1921 // int binary_search(int key, LookupswitchPair* array, int n) {
1922 // // Binary search according to "Methodik des Programmierens" by
1923 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1924 // int i = 0;
1925 // int j = n;
1926 // while (i+1 < j) {
1927 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1928 // // with Q: for all i: 0 <= i < n: key < a[i]
1929 // // where a stands for the array and assuming that the (inexisting)
1930 // // element a[n] is infinitely big.
1931 // int h = (i + j) >> 1;
1932 // // i < h < j
1933 // if (key < array[h].fast_match()) {
1934 // j = h;
1935 // } else {
1936 // i = h;
1937 // }
1938 // }
1939 // // R: a[i] <= key < a[i+1] or Q
1940 // // (i.e., if key is within array, i is the correct index)
1941 // return i;
1942 // }
1944 // Register allocation
1945 const Register key = rax; // already set (tosca)
1946 const Register array = rbx;
1947 const Register i = rcx;
1948 const Register j = rdx;
1949 const Register h = rdi;
1950 const Register temp = rsi;
1952 // Find array start
1953 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1954 // get rid of this
1955 // instruction (change
1956 // offsets below)
1957 __ andptr(array, -BytesPerInt);
1959 // Initialize i & j
1960 __ xorl(i, i); // i = 0;
1961 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1963 // Convert j into native byteordering
1964 __ bswapl(j);
1966 // And start
1967 Label entry;
1968 __ jmp(entry);
1970 // binary search loop
1971 {
1972 Label loop;
1973 __ bind(loop);
1974 // int h = (i + j) >> 1;
1975 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1976 __ sarl(h, 1); // h = (i + j) >> 1;
1977 // if (key < array[h].fast_match()) {
1978 // j = h;
1979 // } else {
1980 // i = h;
1981 // }
1982 // Convert array[h].match to native byte-ordering before compare
1983 __ movl(temp, Address(array, h, Address::times_8));
1984 __ bswapl(temp);
1985 __ cmpl(key, temp);
1986 // j = h if (key < array[h].fast_match())
1987 __ cmovl(Assembler::less, j, h);
1988 // i = h if (key >= array[h].fast_match())
1989 __ cmovl(Assembler::greaterEqual, i, h);
1990 // while (i+1 < j)
1991 __ bind(entry);
1992 __ leal(h, Address(i, 1)); // i+1
1993 __ cmpl(h, j); // i+1 < j
1994 __ jcc(Assembler::less, loop);
1995 }
1997 // end of binary search, result index is i (must check again!)
1998 Label default_case;
1999 // Convert array[i].match to native byte-ordering before compare
2000 __ movl(temp, Address(array, i, Address::times_8));
2001 __ bswapl(temp);
2002 __ cmpl(key, temp);
2003 __ jcc(Assembler::notEqual, default_case);
2005 // entry found -> j = offset
2006 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2007 __ profile_switch_case(i, key, array);
2008 __ bswapl(j);
2009 __ movl2ptr(j, j);
2010 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2011 __ addptr(r13, j);
2012 __ dispatch_only(vtos);
2014 // default case -> j = default offset
2015 __ bind(default_case);
2016 __ profile_switch_default(i);
2017 __ movl(j, Address(array, -2 * BytesPerInt));
2018 __ bswapl(j);
2019 __ movl2ptr(j, j);
2020 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2021 __ addptr(r13, j);
2022 __ dispatch_only(vtos);
2023 }
2026 void TemplateTable::_return(TosState state) {
2027 transition(state, state);
2028 assert(_desc->calls_vm(),
2029 "inconsistent calls_vm information"); // call in remove_activation
2031 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2032 assert(state == vtos, "only valid state");
2033 __ movptr(c_rarg1, aaddress(0));
2034 __ load_klass(rdi, c_rarg1);
2035 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2036 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2037 Label skip_register_finalizer;
2038 __ jcc(Assembler::zero, skip_register_finalizer);
2040 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2042 __ bind(skip_register_finalizer);
2043 }
2045 __ remove_activation(state, r13);
2046 __ jmp(r13);
2047 }
2049 // ----------------------------------------------------------------------------
2050 // Volatile variables demand their effects be made known to all CPU's
2051 // in order. Store buffers on most chips allow reads & writes to
2052 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2053 // without some kind of memory barrier (i.e., it's not sufficient that
2054 // the interpreter does not reorder volatile references, the hardware
2055 // also must not reorder them).
2056 //
2057 // According to the new Java Memory Model (JMM):
2058 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2059 // writes act as aquire & release, so:
2060 // (2) A read cannot let unrelated NON-volatile memory refs that
2061 // happen after the read float up to before the read. It's OK for
2062 // non-volatile memory refs that happen before the volatile read to
2063 // float down below it.
2064 // (3) Similar a volatile write cannot let unrelated NON-volatile
2065 // memory refs that happen BEFORE the write float down to after the
2066 // write. It's OK for non-volatile memory refs that happen after the
2067 // volatile write to float up before it.
2068 //
2069 // We only put in barriers around volatile refs (they are expensive),
2070 // not _between_ memory refs (that would require us to track the
2071 // flavor of the previous memory refs). Requirements (2) and (3)
2072 // require some barriers before volatile stores and after volatile
2073 // loads. These nearly cover requirement (1) but miss the
2074 // volatile-store-volatile-load case. This final case is placed after
2075 // volatile-stores although it could just as well go before
2076 // volatile-loads.
2077 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2078 order_constraint) {
2079 // Helper function to insert a is-volatile test and memory barrier
2080 if (os::is_MP()) { // Not needed on single CPU
2081 __ membar(order_constraint);
2082 }
2083 }
2085 void TemplateTable::resolve_cache_and_index(int byte_no,
2086 Register Rcache,
2087 Register index,
2088 size_t index_size) {
2089 const Register temp = rbx;
2090 assert_different_registers(Rcache, index, temp);
2092 Label resolved;
2093 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2094 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2095 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2096 __ jcc(Assembler::equal, resolved);
2098 // resolve first time through
2099 address entry;
2100 switch (bytecode()) {
2101 case Bytecodes::_getstatic:
2102 case Bytecodes::_putstatic:
2103 case Bytecodes::_getfield:
2104 case Bytecodes::_putfield:
2105 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2106 break;
2107 case Bytecodes::_invokevirtual:
2108 case Bytecodes::_invokespecial:
2109 case Bytecodes::_invokestatic:
2110 case Bytecodes::_invokeinterface:
2111 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2112 break;
2113 case Bytecodes::_invokehandle:
2114 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
2115 break;
2116 case Bytecodes::_invokedynamic:
2117 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2118 break;
2119 default:
2120 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2121 break;
2122 }
2123 __ movl(temp, (int) bytecode());
2124 __ call_VM(noreg, entry, temp);
2126 // Update registers with resolved info
2127 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2128 __ bind(resolved);
2129 }
2131 // The cache and index registers must be set before call
2132 void TemplateTable::load_field_cp_cache_entry(Register obj,
2133 Register cache,
2134 Register index,
2135 Register off,
2136 Register flags,
2137 bool is_static = false) {
2138 assert_different_registers(cache, index, flags, off);
2140 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2141 // Field offset
2142 __ movptr(off, Address(cache, index, Address::times_ptr,
2143 in_bytes(cp_base_offset +
2144 ConstantPoolCacheEntry::f2_offset())));
2145 // Flags
2146 __ movl(flags, Address(cache, index, Address::times_ptr,
2147 in_bytes(cp_base_offset +
2148 ConstantPoolCacheEntry::flags_offset())));
2150 // klass overwrite register
2151 if (is_static) {
2152 __ movptr(obj, Address(cache, index, Address::times_ptr,
2153 in_bytes(cp_base_offset +
2154 ConstantPoolCacheEntry::f1_offset())));
2155 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2156 __ movptr(obj, Address(obj, mirror_offset));
2157 }
2158 }
2160 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2161 Register method,
2162 Register itable_index,
2163 Register flags,
2164 bool is_invokevirtual,
2165 bool is_invokevfinal, /*unused*/
2166 bool is_invokedynamic) {
2167 // setup registers
2168 const Register cache = rcx;
2169 const Register index = rdx;
2170 assert_different_registers(method, flags);
2171 assert_different_registers(method, cache, index);
2172 assert_different_registers(itable_index, flags);
2173 assert_different_registers(itable_index, cache, index);
2174 // determine constant pool cache field offsets
2175 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2176 const int method_offset = in_bytes(
2177 ConstantPoolCache::base_offset() +
2178 ((byte_no == f2_byte)
2179 ? ConstantPoolCacheEntry::f2_offset()
2180 : ConstantPoolCacheEntry::f1_offset()));
2181 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2182 ConstantPoolCacheEntry::flags_offset());
2183 // access constant pool cache fields
2184 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2185 ConstantPoolCacheEntry::f2_offset());
2187 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2188 resolve_cache_and_index(byte_no, cache, index, index_size);
2189 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2191 if (itable_index != noreg) {
2192 // pick up itable or appendix index from f2 also:
2193 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2194 }
2195 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2196 }
2198 // Correct values of the cache and index registers are preserved.
2199 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2200 bool is_static, bool has_tos) {
2201 // do the JVMTI work here to avoid disturbing the register state below
2202 // We use c_rarg registers here because we want to use the register used in
2203 // the call to the VM
2204 if (JvmtiExport::can_post_field_access()) {
2205 // Check to see if a field access watch has been set before we
2206 // take the time to call into the VM.
2207 Label L1;
2208 assert_different_registers(cache, index, rax);
2209 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2210 __ testl(rax, rax);
2211 __ jcc(Assembler::zero, L1);
2213 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2215 // cache entry pointer
2216 __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
2217 __ shll(c_rarg3, LogBytesPerWord);
2218 __ addptr(c_rarg2, c_rarg3);
2219 if (is_static) {
2220 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2221 } else {
2222 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2223 __ verify_oop(c_rarg1);
2224 }
2225 // c_rarg1: object pointer or NULL
2226 // c_rarg2: cache entry pointer
2227 // c_rarg3: jvalue object on the stack
2228 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2229 InterpreterRuntime::post_field_access),
2230 c_rarg1, c_rarg2, c_rarg3);
2231 __ get_cache_and_index_at_bcp(cache, index, 1);
2232 __ bind(L1);
2233 }
2234 }
2236 void TemplateTable::pop_and_check_object(Register r) {
2237 __ pop_ptr(r);
2238 __ null_check(r); // for field access must check obj.
2239 __ verify_oop(r);
2240 }
2242 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2243 transition(vtos, vtos);
2245 const Register cache = rcx;
2246 const Register index = rdx;
2247 const Register obj = c_rarg3;
2248 const Register off = rbx;
2249 const Register flags = rax;
2250 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2252 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2253 jvmti_post_field_access(cache, index, is_static, false);
2254 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2256 if (!is_static) {
2257 // obj is on the stack
2258 pop_and_check_object(obj);
2259 }
2261 const Address field(obj, off, Address::times_1);
2263 Label Done, notByte, notInt, notShort, notChar,
2264 notLong, notFloat, notObj, notDouble;
2266 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2267 // Make sure we don't need to mask edx after the above shift
2268 assert(btos == 0, "change code, btos != 0");
2270 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2271 __ jcc(Assembler::notZero, notByte);
2272 // btos
2273 __ load_signed_byte(rax, field);
2274 __ push(btos);
2275 // Rewrite bytecode to be faster
2276 if (!is_static) {
2277 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2278 }
2279 __ jmp(Done);
2281 __ bind(notByte);
2282 __ cmpl(flags, atos);
2283 __ jcc(Assembler::notEqual, notObj);
2284 // atos
2285 __ load_heap_oop(rax, field);
2286 __ push(atos);
2287 if (!is_static) {
2288 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2289 }
2290 __ jmp(Done);
2292 __ bind(notObj);
2293 __ cmpl(flags, itos);
2294 __ jcc(Assembler::notEqual, notInt);
2295 // itos
2296 __ movl(rax, field);
2297 __ push(itos);
2298 // Rewrite bytecode to be faster
2299 if (!is_static) {
2300 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2301 }
2302 __ jmp(Done);
2304 __ bind(notInt);
2305 __ cmpl(flags, ctos);
2306 __ jcc(Assembler::notEqual, notChar);
2307 // ctos
2308 __ load_unsigned_short(rax, field);
2309 __ push(ctos);
2310 // Rewrite bytecode to be faster
2311 if (!is_static) {
2312 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2313 }
2314 __ jmp(Done);
2316 __ bind(notChar);
2317 __ cmpl(flags, stos);
2318 __ jcc(Assembler::notEqual, notShort);
2319 // stos
2320 __ load_signed_short(rax, field);
2321 __ push(stos);
2322 // Rewrite bytecode to be faster
2323 if (!is_static) {
2324 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2325 }
2326 __ jmp(Done);
2328 __ bind(notShort);
2329 __ cmpl(flags, ltos);
2330 __ jcc(Assembler::notEqual, notLong);
2331 // ltos
2332 __ movq(rax, field);
2333 __ push(ltos);
2334 // Rewrite bytecode to be faster
2335 if (!is_static) {
2336 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2337 }
2338 __ jmp(Done);
2340 __ bind(notLong);
2341 __ cmpl(flags, ftos);
2342 __ jcc(Assembler::notEqual, notFloat);
2343 // ftos
2344 __ movflt(xmm0, field);
2345 __ push(ftos);
2346 // Rewrite bytecode to be faster
2347 if (!is_static) {
2348 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2349 }
2350 __ jmp(Done);
2352 __ bind(notFloat);
2353 #ifdef ASSERT
2354 __ cmpl(flags, dtos);
2355 __ jcc(Assembler::notEqual, notDouble);
2356 #endif
2357 // dtos
2358 __ movdbl(xmm0, field);
2359 __ push(dtos);
2360 // Rewrite bytecode to be faster
2361 if (!is_static) {
2362 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2363 }
2364 #ifdef ASSERT
2365 __ jmp(Done);
2367 __ bind(notDouble);
2368 __ stop("Bad state");
2369 #endif
2371 __ bind(Done);
2372 // [jk] not needed currently
2373 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2374 // Assembler::LoadStore));
2375 }
2378 void TemplateTable::getfield(int byte_no) {
2379 getfield_or_static(byte_no, false);
2380 }
2382 void TemplateTable::getstatic(int byte_no) {
2383 getfield_or_static(byte_no, true);
2384 }
2386 // The registers cache and index expected to be set before call.
2387 // The function may destroy various registers, just not the cache and index registers.
2388 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2389 transition(vtos, vtos);
2391 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2393 if (JvmtiExport::can_post_field_modification()) {
2394 // Check to see if a field modification watch has been set before
2395 // we take the time to call into the VM.
2396 Label L1;
2397 assert_different_registers(cache, index, rax);
2398 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2399 __ testl(rax, rax);
2400 __ jcc(Assembler::zero, L1);
2402 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2404 if (is_static) {
2405 // Life is simple. Null out the object pointer.
2406 __ xorl(c_rarg1, c_rarg1);
2407 } else {
2408 // Life is harder. The stack holds the value on top, followed by
2409 // the object. We don't know the size of the value, though; it
2410 // could be one or two words depending on its type. As a result,
2411 // we must find the type to determine where the object is.
2412 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2413 Address::times_8,
2414 in_bytes(cp_base_offset +
2415 ConstantPoolCacheEntry::flags_offset())));
2416 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
2417 // Make sure we don't need to mask rcx after the above shift
2418 ConstantPoolCacheEntry::verify_tos_state_shift();
2419 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2420 __ cmpl(c_rarg3, ltos);
2421 __ cmovptr(Assembler::equal,
2422 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2423 __ cmpl(c_rarg3, dtos);
2424 __ cmovptr(Assembler::equal,
2425 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2426 }
2427 // cache entry pointer
2428 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2429 __ shll(rscratch1, LogBytesPerWord);
2430 __ addptr(c_rarg2, rscratch1);
2431 // object (tos)
2432 __ mov(c_rarg3, rsp);
2433 // c_rarg1: object pointer set up above (NULL if static)
2434 // c_rarg2: cache entry pointer
2435 // c_rarg3: jvalue object on the stack
2436 __ call_VM(noreg,
2437 CAST_FROM_FN_PTR(address,
2438 InterpreterRuntime::post_field_modification),
2439 c_rarg1, c_rarg2, c_rarg3);
2440 __ get_cache_and_index_at_bcp(cache, index, 1);
2441 __ bind(L1);
2442 }
2443 }
2445 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2446 transition(vtos, vtos);
2448 const Register cache = rcx;
2449 const Register index = rdx;
2450 const Register obj = rcx;
2451 const Register off = rbx;
2452 const Register flags = rax;
2453 const Register bc = c_rarg3;
2455 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2456 jvmti_post_field_mod(cache, index, is_static);
2457 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2459 // [jk] not needed currently
2460 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2461 // Assembler::StoreStore));
2463 Label notVolatile, Done;
2464 __ movl(rdx, flags);
2465 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2466 __ andl(rdx, 0x1);
2468 // field address
2469 const Address field(obj, off, Address::times_1);
2471 Label notByte, notInt, notShort, notChar,
2472 notLong, notFloat, notObj, notDouble;
2474 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2476 assert(btos == 0, "change code, btos != 0");
2477 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2478 __ jcc(Assembler::notZero, notByte);
2480 // btos
2481 {
2482 __ pop(btos);
2483 if (!is_static) pop_and_check_object(obj);
2484 __ movb(field, rax);
2485 if (!is_static) {
2486 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2487 }
2488 __ jmp(Done);
2489 }
2491 __ bind(notByte);
2492 __ cmpl(flags, atos);
2493 __ jcc(Assembler::notEqual, notObj);
2495 // atos
2496 {
2497 __ pop(atos);
2498 if (!is_static) pop_and_check_object(obj);
2499 // Store into the field
2500 do_oop_store(_masm, field, rax, _bs->kind(), false);
2501 if (!is_static) {
2502 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2503 }
2504 __ jmp(Done);
2505 }
2507 __ bind(notObj);
2508 __ cmpl(flags, itos);
2509 __ jcc(Assembler::notEqual, notInt);
2511 // itos
2512 {
2513 __ pop(itos);
2514 if (!is_static) pop_and_check_object(obj);
2515 __ movl(field, rax);
2516 if (!is_static) {
2517 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2518 }
2519 __ jmp(Done);
2520 }
2522 __ bind(notInt);
2523 __ cmpl(flags, ctos);
2524 __ jcc(Assembler::notEqual, notChar);
2526 // ctos
2527 {
2528 __ pop(ctos);
2529 if (!is_static) pop_and_check_object(obj);
2530 __ movw(field, rax);
2531 if (!is_static) {
2532 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2533 }
2534 __ jmp(Done);
2535 }
2537 __ bind(notChar);
2538 __ cmpl(flags, stos);
2539 __ jcc(Assembler::notEqual, notShort);
2541 // stos
2542 {
2543 __ pop(stos);
2544 if (!is_static) pop_and_check_object(obj);
2545 __ movw(field, rax);
2546 if (!is_static) {
2547 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2548 }
2549 __ jmp(Done);
2550 }
2552 __ bind(notShort);
2553 __ cmpl(flags, ltos);
2554 __ jcc(Assembler::notEqual, notLong);
2556 // ltos
2557 {
2558 __ pop(ltos);
2559 if (!is_static) pop_and_check_object(obj);
2560 __ movq(field, rax);
2561 if (!is_static) {
2562 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2563 }
2564 __ jmp(Done);
2565 }
2567 __ bind(notLong);
2568 __ cmpl(flags, ftos);
2569 __ jcc(Assembler::notEqual, notFloat);
2571 // ftos
2572 {
2573 __ pop(ftos);
2574 if (!is_static) pop_and_check_object(obj);
2575 __ movflt(field, xmm0);
2576 if (!is_static) {
2577 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2578 }
2579 __ jmp(Done);
2580 }
2582 __ bind(notFloat);
2583 #ifdef ASSERT
2584 __ cmpl(flags, dtos);
2585 __ jcc(Assembler::notEqual, notDouble);
2586 #endif
2588 // dtos
2589 {
2590 __ pop(dtos);
2591 if (!is_static) pop_and_check_object(obj);
2592 __ movdbl(field, xmm0);
2593 if (!is_static) {
2594 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2595 }
2596 }
2598 #ifdef ASSERT
2599 __ jmp(Done);
2601 __ bind(notDouble);
2602 __ stop("Bad state");
2603 #endif
2605 __ bind(Done);
2607 // Check for volatile store
2608 __ testl(rdx, rdx);
2609 __ jcc(Assembler::zero, notVolatile);
2610 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2611 Assembler::StoreStore));
2612 __ bind(notVolatile);
2613 }
2615 void TemplateTable::putfield(int byte_no) {
2616 putfield_or_static(byte_no, false);
2617 }
2619 void TemplateTable::putstatic(int byte_no) {
2620 putfield_or_static(byte_no, true);
2621 }
2623 void TemplateTable::jvmti_post_fast_field_mod() {
2624 if (JvmtiExport::can_post_field_modification()) {
2625 // Check to see if a field modification watch has been set before
2626 // we take the time to call into the VM.
2627 Label L2;
2628 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2629 __ testl(c_rarg3, c_rarg3);
2630 __ jcc(Assembler::zero, L2);
2631 __ pop_ptr(rbx); // copy the object pointer from tos
2632 __ verify_oop(rbx);
2633 __ push_ptr(rbx); // put the object pointer back on tos
2634 // Save tos values before call_VM() clobbers them. Since we have
2635 // to do it for every data type, we use the saved values as the
2636 // jvalue object.
2637 switch (bytecode()) { // load values into the jvalue object
2638 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2639 case Bytecodes::_fast_bputfield: // fall through
2640 case Bytecodes::_fast_sputfield: // fall through
2641 case Bytecodes::_fast_cputfield: // fall through
2642 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2643 case Bytecodes::_fast_dputfield: __ push_d(); break;
2644 case Bytecodes::_fast_fputfield: __ push_f(); break;
2645 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2647 default:
2648 ShouldNotReachHere();
2649 }
2650 __ mov(c_rarg3, rsp); // points to jvalue on the stack
2651 // access constant pool cache entry
2652 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2653 __ verify_oop(rbx);
2654 // rbx: object pointer copied above
2655 // c_rarg2: cache entry pointer
2656 // c_rarg3: jvalue object on the stack
2657 __ call_VM(noreg,
2658 CAST_FROM_FN_PTR(address,
2659 InterpreterRuntime::post_field_modification),
2660 rbx, c_rarg2, c_rarg3);
2662 switch (bytecode()) { // restore tos values
2663 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2664 case Bytecodes::_fast_bputfield: // fall through
2665 case Bytecodes::_fast_sputfield: // fall through
2666 case Bytecodes::_fast_cputfield: // fall through
2667 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2668 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2669 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2670 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2671 }
2672 __ bind(L2);
2673 }
2674 }
2676 void TemplateTable::fast_storefield(TosState state) {
2677 transition(state, vtos);
2679 ByteSize base = ConstantPoolCache::base_offset();
2681 jvmti_post_fast_field_mod();
2683 // access constant pool cache
2684 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2686 // test for volatile with rdx
2687 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2688 in_bytes(base +
2689 ConstantPoolCacheEntry::flags_offset())));
2691 // replace index with field offset from cache entry
2692 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2693 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2695 // [jk] not needed currently
2696 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2697 // Assembler::StoreStore));
2699 Label notVolatile;
2700 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2701 __ andl(rdx, 0x1);
2703 // Get object from stack
2704 pop_and_check_object(rcx);
2706 // field address
2707 const Address field(rcx, rbx, Address::times_1);
2709 // access field
2710 switch (bytecode()) {
2711 case Bytecodes::_fast_aputfield:
2712 do_oop_store(_masm, field, rax, _bs->kind(), false);
2713 break;
2714 case Bytecodes::_fast_lputfield:
2715 __ movq(field, rax);
2716 break;
2717 case Bytecodes::_fast_iputfield:
2718 __ movl(field, rax);
2719 break;
2720 case Bytecodes::_fast_bputfield:
2721 __ movb(field, rax);
2722 break;
2723 case Bytecodes::_fast_sputfield:
2724 // fall through
2725 case Bytecodes::_fast_cputfield:
2726 __ movw(field, rax);
2727 break;
2728 case Bytecodes::_fast_fputfield:
2729 __ movflt(field, xmm0);
2730 break;
2731 case Bytecodes::_fast_dputfield:
2732 __ movdbl(field, xmm0);
2733 break;
2734 default:
2735 ShouldNotReachHere();
2736 }
2738 // Check for volatile store
2739 __ testl(rdx, rdx);
2740 __ jcc(Assembler::zero, notVolatile);
2741 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2742 Assembler::StoreStore));
2743 __ bind(notVolatile);
2744 }
2747 void TemplateTable::fast_accessfield(TosState state) {
2748 transition(atos, state);
2750 // Do the JVMTI work here to avoid disturbing the register state below
2751 if (JvmtiExport::can_post_field_access()) {
2752 // Check to see if a field access watch has been set before we
2753 // take the time to call into the VM.
2754 Label L1;
2755 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2756 __ testl(rcx, rcx);
2757 __ jcc(Assembler::zero, L1);
2758 // access constant pool cache entry
2759 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2760 __ verify_oop(rax);
2761 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2762 __ mov(c_rarg1, rax);
2763 // c_rarg1: object pointer copied above
2764 // c_rarg2: cache entry pointer
2765 __ call_VM(noreg,
2766 CAST_FROM_FN_PTR(address,
2767 InterpreterRuntime::post_field_access),
2768 c_rarg1, c_rarg2);
2769 __ pop_ptr(rax); // restore object pointer
2770 __ bind(L1);
2771 }
2773 // access constant pool cache
2774 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2775 // replace index with field offset from cache entry
2776 // [jk] not needed currently
2777 // if (os::is_MP()) {
2778 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2779 // in_bytes(ConstantPoolCache::base_offset() +
2780 // ConstantPoolCacheEntry::flags_offset())));
2781 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2782 // __ andl(rdx, 0x1);
2783 // }
2784 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2785 in_bytes(ConstantPoolCache::base_offset() +
2786 ConstantPoolCacheEntry::f2_offset())));
2788 // rax: object
2789 __ verify_oop(rax);
2790 __ null_check(rax);
2791 Address field(rax, rbx, Address::times_1);
2793 // access field
2794 switch (bytecode()) {
2795 case Bytecodes::_fast_agetfield:
2796 __ load_heap_oop(rax, field);
2797 __ verify_oop(rax);
2798 break;
2799 case Bytecodes::_fast_lgetfield:
2800 __ movq(rax, field);
2801 break;
2802 case Bytecodes::_fast_igetfield:
2803 __ movl(rax, field);
2804 break;
2805 case Bytecodes::_fast_bgetfield:
2806 __ movsbl(rax, field);
2807 break;
2808 case Bytecodes::_fast_sgetfield:
2809 __ load_signed_short(rax, field);
2810 break;
2811 case Bytecodes::_fast_cgetfield:
2812 __ load_unsigned_short(rax, field);
2813 break;
2814 case Bytecodes::_fast_fgetfield:
2815 __ movflt(xmm0, field);
2816 break;
2817 case Bytecodes::_fast_dgetfield:
2818 __ movdbl(xmm0, field);
2819 break;
2820 default:
2821 ShouldNotReachHere();
2822 }
2823 // [jk] not needed currently
2824 // if (os::is_MP()) {
2825 // Label notVolatile;
2826 // __ testl(rdx, rdx);
2827 // __ jcc(Assembler::zero, notVolatile);
2828 // __ membar(Assembler::LoadLoad);
2829 // __ bind(notVolatile);
2830 //};
2831 }
2833 void TemplateTable::fast_xaccess(TosState state) {
2834 transition(vtos, state);
2836 // get receiver
2837 __ movptr(rax, aaddress(0));
2838 // access constant pool cache
2839 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2840 __ movptr(rbx,
2841 Address(rcx, rdx, Address::times_8,
2842 in_bytes(ConstantPoolCache::base_offset() +
2843 ConstantPoolCacheEntry::f2_offset())));
2844 // make sure exception is reported in correct bcp range (getfield is
2845 // next instruction)
2846 __ increment(r13);
2847 __ null_check(rax);
2848 switch (state) {
2849 case itos:
2850 __ movl(rax, Address(rax, rbx, Address::times_1));
2851 break;
2852 case atos:
2853 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2854 __ verify_oop(rax);
2855 break;
2856 case ftos:
2857 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2858 break;
2859 default:
2860 ShouldNotReachHere();
2861 }
2863 // [jk] not needed currently
2864 // if (os::is_MP()) {
2865 // Label notVolatile;
2866 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2867 // in_bytes(ConstantPoolCache::base_offset() +
2868 // ConstantPoolCacheEntry::flags_offset())));
2869 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2870 // __ testl(rdx, 0x1);
2871 // __ jcc(Assembler::zero, notVolatile);
2872 // __ membar(Assembler::LoadLoad);
2873 // __ bind(notVolatile);
2874 // }
2876 __ decrement(r13);
2877 }
2881 //-----------------------------------------------------------------------------
2882 // Calls
2884 void TemplateTable::count_calls(Register method, Register temp) {
2885 // implemented elsewhere
2886 ShouldNotReachHere();
2887 }
2889 void TemplateTable::prepare_invoke(int byte_no,
2890 Register method, // linked method (or i-klass)
2891 Register index, // itable index, MethodType, etc.
2892 Register recv, // if caller wants to see it
2893 Register flags // if caller wants to test it
2894 ) {
2895 // determine flags
2896 const Bytecodes::Code code = bytecode();
2897 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2898 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2899 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2900 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2901 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2902 const bool load_receiver = (recv != noreg);
2903 const bool save_flags = (flags != noreg);
2904 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2905 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2906 assert(flags == noreg || flags == rdx, "");
2907 assert(recv == noreg || recv == rcx, "");
2909 // setup registers & access constant pool cache
2910 if (recv == noreg) recv = rcx;
2911 if (flags == noreg) flags = rdx;
2912 assert_different_registers(method, index, recv, flags);
2914 // save 'interpreter return address'
2915 __ save_bcp();
2917 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2919 // maybe push appendix to arguments (just before return address)
2920 if (is_invokedynamic || is_invokehandle) {
2921 Label L_no_push;
2922 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2923 __ jcc(Assembler::zero, L_no_push);
2924 // Push the appendix as a trailing parameter.
2925 // This must be done before we get the receiver,
2926 // since the parameter_size includes it.
2927 __ push(rbx);
2928 __ mov(rbx, index);
2929 __ load_resolved_reference_at_index(index, rbx);
2930 __ pop(rbx);
2931 __ push(index); // push appendix (MethodType, CallSite, etc.)
2932 __ bind(L_no_push);
2933 }
2935 // load receiver if needed (after appendix is pushed so parameter size is correct)
2936 // Note: no return address pushed yet
2937 if (load_receiver) {
2938 __ movl(recv, flags);
2939 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2940 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2941 const int receiver_is_at_end = -1; // back off one slot to get receiver
2942 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2943 __ movptr(recv, recv_addr);
2944 __ verify_oop(recv);
2945 }
2947 if (save_flags) {
2948 __ movl(r13, flags);
2949 }
2951 // compute return type
2952 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2953 // Make sure we don't need to mask flags after the above shift
2954 ConstantPoolCacheEntry::verify_tos_state_shift();
2955 // load return address
2956 {
2957 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2958 (address)Interpreter::return_5_addrs_by_index_table() :
2959 (address)Interpreter::return_3_addrs_by_index_table();
2960 ExternalAddress table(table_addr);
2961 __ lea(rscratch1, table);
2962 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2963 }
2965 // push return address
2966 __ push(flags);
2968 // Restore flags value from the constant pool cache, and restore rsi
2969 // for later null checks. r13 is the bytecode pointer
2970 if (save_flags) {
2971 __ movl(flags, r13);
2972 __ restore_bcp();
2973 }
2974 }
2977 void TemplateTable::invokevirtual_helper(Register index,
2978 Register recv,
2979 Register flags) {
2980 // Uses temporary registers rax, rdx
2981 assert_different_registers(index, recv, rax, rdx);
2982 assert(index == rbx, "");
2983 assert(recv == rcx, "");
2985 // Test for an invoke of a final method
2986 Label notFinal;
2987 __ movl(rax, flags);
2988 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
2989 __ jcc(Assembler::zero, notFinal);
2991 const Register method = index; // method must be rbx
2992 assert(method == rbx,
2993 "Method* must be rbx for interpreter calling convention");
2995 // do the call - the index is actually the method to call
2996 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
2998 // It's final, need a null check here!
2999 __ null_check(recv);
3001 // profile this call
3002 __ profile_final_call(rax);
3004 __ jump_from_interpreted(method, rax);
3006 __ bind(notFinal);
3008 // get receiver klass
3009 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3010 __ load_klass(rax, recv);
3012 // profile this call
3013 __ profile_virtual_call(rax, r14, rdx);
3015 // get target Method* & entry point
3016 __ lookup_virtual_method(rax, index, method);
3017 __ jump_from_interpreted(method, rdx);
3018 }
3021 void TemplateTable::invokevirtual(int byte_no) {
3022 transition(vtos, vtos);
3023 assert(byte_no == f2_byte, "use this argument");
3024 prepare_invoke(byte_no,
3025 rbx, // method or vtable index
3026 noreg, // unused itable index
3027 rcx, rdx); // recv, flags
3029 // rbx: index
3030 // rcx: receiver
3031 // rdx: flags
3033 invokevirtual_helper(rbx, rcx, rdx);
3034 }
3037 void TemplateTable::invokespecial(int byte_no) {
3038 transition(vtos, vtos);
3039 assert(byte_no == f1_byte, "use this argument");
3040 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3041 rcx); // get receiver also for null check
3042 __ verify_oop(rcx);
3043 __ null_check(rcx);
3044 // do the call
3045 __ profile_call(rax);
3046 __ jump_from_interpreted(rbx, rax);
3047 }
3050 void TemplateTable::invokestatic(int byte_no) {
3051 transition(vtos, vtos);
3052 assert(byte_no == f1_byte, "use this argument");
3053 prepare_invoke(byte_no, rbx); // get f1 Method*
3054 // do the call
3055 __ profile_call(rax);
3056 __ jump_from_interpreted(rbx, rax);
3057 }
3059 void TemplateTable::fast_invokevfinal(int byte_no) {
3060 transition(vtos, vtos);
3061 assert(byte_no == f2_byte, "use this argument");
3062 __ stop("fast_invokevfinal not used on amd64");
3063 }
3065 void TemplateTable::invokeinterface(int byte_no) {
3066 transition(vtos, vtos);
3067 assert(byte_no == f1_byte, "use this argument");
3068 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3069 rcx, rdx); // recv, flags
3071 // rax: interface klass (from f1)
3072 // rbx: itable index (from f2)
3073 // rcx: receiver
3074 // rdx: flags
3076 // Special case of invokeinterface called for virtual method of
3077 // java.lang.Object. See cpCacheOop.cpp for details.
3078 // This code isn't produced by javac, but could be produced by
3079 // another compliant java compiler.
3080 Label notMethod;
3081 __ movl(r14, rdx);
3082 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3083 __ jcc(Assembler::zero, notMethod);
3085 invokevirtual_helper(rbx, rcx, rdx);
3086 __ bind(notMethod);
3088 // Get receiver klass into rdx - also a null check
3089 __ restore_locals(); // restore r14
3090 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3091 __ load_klass(rdx, rcx);
3093 // profile this call
3094 __ profile_virtual_call(rdx, r13, r14);
3096 Label no_such_interface, no_such_method;
3098 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3099 rdx, rax, rbx,
3100 // outputs: method, scan temp. reg
3101 rbx, r13,
3102 no_such_interface);
3104 // rbx: Method* to call
3105 // rcx: receiver
3106 // Check for abstract method error
3107 // Note: This should be done more efficiently via a throw_abstract_method_error
3108 // interpreter entry point and a conditional jump to it in case of a null
3109 // method.
3110 __ testptr(rbx, rbx);
3111 __ jcc(Assembler::zero, no_such_method);
3113 // do the call
3114 // rcx: receiver
3115 // rbx,: Method*
3116 __ jump_from_interpreted(rbx, rdx);
3117 __ should_not_reach_here();
3119 // exception handling code follows...
3120 // note: must restore interpreter registers to canonical
3121 // state for exception handling to work correctly!
3123 __ bind(no_such_method);
3124 // throw exception
3125 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3126 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3127 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3128 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3129 // the call_VM checks for exception, so we should never return here.
3130 __ should_not_reach_here();
3132 __ bind(no_such_interface);
3133 // throw exception
3134 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3135 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3136 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3137 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3138 InterpreterRuntime::throw_IncompatibleClassChangeError));
3139 // the call_VM checks for exception, so we should never return here.
3140 __ should_not_reach_here();
3141 }
3144 void TemplateTable::invokehandle(int byte_no) {
3145 transition(vtos, vtos);
3146 assert(byte_no == f1_byte, "use this argument");
3147 const Register rbx_method = rbx; // f2
3148 const Register rax_mtype = rax; // f1
3149 const Register rcx_recv = rcx;
3150 const Register rdx_flags = rdx;
3152 if (!EnableInvokeDynamic) {
3153 // rewriter does not generate this bytecode
3154 __ should_not_reach_here();
3155 return;
3156 }
3158 prepare_invoke(byte_no,
3159 rbx_method, rax_mtype, // get f2 Method*, f1 MethodType
3160 rcx_recv);
3161 __ verify_method_ptr(rbx_method);
3162 __ verify_oop(rcx_recv);
3163 __ null_check(rcx_recv);
3165 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3167 // FIXME: profile the LambdaForm also
3168 __ profile_final_call(rax);
3170 __ jump_from_interpreted(rbx_method, rdx);
3171 }
3174 void TemplateTable::invokedynamic(int byte_no) {
3175 transition(vtos, vtos);
3176 assert(byte_no == f1_byte, "use this argument");
3178 if (!EnableInvokeDynamic) {
3179 // We should not encounter this bytecode if !EnableInvokeDynamic.
3180 // The verifier will stop it. However, if we get past the verifier,
3181 // this will stop the thread in a reasonable way, without crashing the JVM.
3182 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3183 InterpreterRuntime::throw_IncompatibleClassChangeError));
3184 // the call_VM checks for exception, so we should never return here.
3185 __ should_not_reach_here();
3186 return;
3187 }
3189 const Register rbx_method = rbx;
3190 const Register rax_callsite = rax;
3192 prepare_invoke(byte_no, rbx_method, rax_callsite);
3194 // rax: CallSite object (from cpool->resolved_references[])
3195 // rbx: MH.linkToCallSite method (from f2)
3197 // Note: rax_callsite is already pushed by prepare_invoke
3199 // %%% should make a type profile for any invokedynamic that takes a ref argument
3200 // profile this call
3201 __ profile_call(r13);
3203 __ verify_oop(rax_callsite);
3205 __ jump_from_interpreted(rbx_method, rdx);
3206 }
3209 //-----------------------------------------------------------------------------
3210 // Allocation
3212 void TemplateTable::_new() {
3213 transition(vtos, atos);
3214 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3215 Label slow_case;
3216 Label done;
3217 Label initialize_header;
3218 Label initialize_object; // including clearing the fields
3219 Label allocate_shared;
3221 __ get_cpool_and_tags(rsi, rax);
3222 // Make sure the class we're about to instantiate has been resolved.
3223 // This is done before loading InstanceKlass to be consistent with the order
3224 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3225 const int tags_offset = Array<u1>::base_offset_in_bytes();
3226 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3227 JVM_CONSTANT_Class);
3228 __ jcc(Assembler::notEqual, slow_case);
3230 // get InstanceKlass
3231 __ movptr(rsi, Address(rsi, rdx,
3232 Address::times_8, sizeof(ConstantPool)));
3234 // make sure klass is initialized & doesn't have finalizer
3235 // make sure klass is fully initialized
3236 __ cmpb(Address(rsi,
3237 InstanceKlass::init_state_offset()),
3238 InstanceKlass::fully_initialized);
3239 __ jcc(Assembler::notEqual, slow_case);
3241 // get instance_size in InstanceKlass (scaled to a count of bytes)
3242 __ movl(rdx,
3243 Address(rsi,
3244 Klass::layout_helper_offset()));
3245 // test to see if it has a finalizer or is malformed in some way
3246 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3247 __ jcc(Assembler::notZero, slow_case);
3249 // Allocate the instance
3250 // 1) Try to allocate in the TLAB
3251 // 2) if fail and the object is large allocate in the shared Eden
3252 // 3) if the above fails (or is not applicable), go to a slow case
3253 // (creates a new TLAB, etc.)
3255 const bool allow_shared_alloc =
3256 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3258 if (UseTLAB) {
3259 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3260 __ lea(rbx, Address(rax, rdx, Address::times_1));
3261 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3262 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3263 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3264 if (ZeroTLAB) {
3265 // the fields have been already cleared
3266 __ jmp(initialize_header);
3267 } else {
3268 // initialize both the header and fields
3269 __ jmp(initialize_object);
3270 }
3271 }
3273 // Allocation in the shared Eden, if allowed.
3274 //
3275 // rdx: instance size in bytes
3276 if (allow_shared_alloc) {
3277 __ bind(allocate_shared);
3279 ExternalAddress top((address)Universe::heap()->top_addr());
3280 ExternalAddress end((address)Universe::heap()->end_addr());
3282 const Register RtopAddr = rscratch1;
3283 const Register RendAddr = rscratch2;
3285 __ lea(RtopAddr, top);
3286 __ lea(RendAddr, end);
3287 __ movptr(rax, Address(RtopAddr, 0));
3289 // For retries rax gets set by cmpxchgq
3290 Label retry;
3291 __ bind(retry);
3292 __ lea(rbx, Address(rax, rdx, Address::times_1));
3293 __ cmpptr(rbx, Address(RendAddr, 0));
3294 __ jcc(Assembler::above, slow_case);
3296 // Compare rax with the top addr, and if still equal, store the new
3297 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3298 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3299 //
3300 // rax: object begin
3301 // rbx: object end
3302 // rdx: instance size in bytes
3303 if (os::is_MP()) {
3304 __ lock();
3305 }
3306 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3308 // if someone beat us on the allocation, try again, otherwise continue
3309 __ jcc(Assembler::notEqual, retry);
3311 __ incr_allocated_bytes(r15_thread, rdx, 0);
3312 }
3314 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3315 // The object is initialized before the header. If the object size is
3316 // zero, go directly to the header initialization.
3317 __ bind(initialize_object);
3318 __ decrementl(rdx, sizeof(oopDesc));
3319 __ jcc(Assembler::zero, initialize_header);
3321 // Initialize object fields
3322 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3323 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3324 {
3325 Label loop;
3326 __ bind(loop);
3327 __ movq(Address(rax, rdx, Address::times_8,
3328 sizeof(oopDesc) - oopSize),
3329 rcx);
3330 __ decrementl(rdx);
3331 __ jcc(Assembler::notZero, loop);
3332 }
3334 // initialize object header only.
3335 __ bind(initialize_header);
3336 if (UseBiasedLocking) {
3337 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
3338 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3339 } else {
3340 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3341 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3342 }
3343 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3344 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3345 __ store_klass(rax, rsi); // store klass last
3347 {
3348 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3349 // Trigger dtrace event for fastpath
3350 __ push(atos); // save the return value
3351 __ call_VM_leaf(
3352 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3353 __ pop(atos); // restore the return value
3355 }
3356 __ jmp(done);
3357 }
3360 // slow case
3361 __ bind(slow_case);
3362 __ get_constant_pool(c_rarg1);
3363 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3364 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3365 __ verify_oop(rax);
3367 // continue
3368 __ bind(done);
3369 }
3371 void TemplateTable::newarray() {
3372 transition(itos, atos);
3373 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3374 __ movl(c_rarg2, rax);
3375 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3376 c_rarg1, c_rarg2);
3377 }
3379 void TemplateTable::anewarray() {
3380 transition(itos, atos);
3381 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3382 __ get_constant_pool(c_rarg1);
3383 __ movl(c_rarg3, rax);
3384 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3385 c_rarg1, c_rarg2, c_rarg3);
3386 }
3388 void TemplateTable::arraylength() {
3389 transition(atos, itos);
3390 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3391 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3392 }
3394 void TemplateTable::checkcast() {
3395 transition(atos, atos);
3396 Label done, is_null, ok_is_subtype, quicked, resolved;
3397 __ testptr(rax, rax); // object is in rax
3398 __ jcc(Assembler::zero, is_null);
3400 // Get cpool & tags index
3401 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3402 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3403 // See if bytecode has already been quicked
3404 __ cmpb(Address(rdx, rbx,
3405 Address::times_1,
3406 Array<u1>::base_offset_in_bytes()),
3407 JVM_CONSTANT_Class);
3408 __ jcc(Assembler::equal, quicked);
3409 __ push(atos); // save receiver for result, and for GC
3410 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3411 // vm_result_2 has metadata result
3412 __ get_vm_result_2(rax, r15_thread);
3413 __ pop_ptr(rdx); // restore receiver
3414 __ jmpb(resolved);
3416 // Get superklass in rax and subklass in rbx
3417 __ bind(quicked);
3418 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3419 __ movptr(rax, Address(rcx, rbx,
3420 Address::times_8, sizeof(ConstantPool)));
3422 __ bind(resolved);
3423 __ load_klass(rbx, rdx);
3425 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3426 // Superklass in rax. Subklass in rbx.
3427 __ gen_subtype_check(rbx, ok_is_subtype);
3429 // Come here on failure
3430 __ push_ptr(rdx);
3431 // object is at TOS
3432 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3434 // Come here on success
3435 __ bind(ok_is_subtype);
3436 __ mov(rax, rdx); // Restore object in rdx
3438 // Collect counts on whether this check-cast sees NULLs a lot or not.
3439 if (ProfileInterpreter) {
3440 __ jmp(done);
3441 __ bind(is_null);
3442 __ profile_null_seen(rcx);
3443 } else {
3444 __ bind(is_null); // same as 'done'
3445 }
3446 __ bind(done);
3447 }
3449 void TemplateTable::instanceof() {
3450 transition(atos, itos);
3451 Label done, is_null, ok_is_subtype, quicked, resolved;
3452 __ testptr(rax, rax);
3453 __ jcc(Assembler::zero, is_null);
3455 // Get cpool & tags index
3456 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3457 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3458 // See if bytecode has already been quicked
3459 __ cmpb(Address(rdx, rbx,
3460 Address::times_1,
3461 Array<u1>::base_offset_in_bytes()),
3462 JVM_CONSTANT_Class);
3463 __ jcc(Assembler::equal, quicked);
3465 __ push(atos); // save receiver for result, and for GC
3466 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3467 // vm_result_2 has metadata result
3468 __ get_vm_result_2(rax, r15_thread);
3469 __ pop_ptr(rdx); // restore receiver
3470 __ verify_oop(rdx);
3471 __ load_klass(rdx, rdx);
3472 __ jmpb(resolved);
3474 // Get superklass in rax and subklass in rdx
3475 __ bind(quicked);
3476 __ load_klass(rdx, rax);
3477 __ movptr(rax, Address(rcx, rbx,
3478 Address::times_8, sizeof(ConstantPool)));
3480 __ bind(resolved);
3482 // Generate subtype check. Blows rcx, rdi
3483 // Superklass in rax. Subklass in rdx.
3484 __ gen_subtype_check(rdx, ok_is_subtype);
3486 // Come here on failure
3487 __ xorl(rax, rax);
3488 __ jmpb(done);
3489 // Come here on success
3490 __ bind(ok_is_subtype);
3491 __ movl(rax, 1);
3493 // Collect counts on whether this test sees NULLs a lot or not.
3494 if (ProfileInterpreter) {
3495 __ jmp(done);
3496 __ bind(is_null);
3497 __ profile_null_seen(rcx);
3498 } else {
3499 __ bind(is_null); // same as 'done'
3500 }
3501 __ bind(done);
3502 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3503 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3504 }
3506 //-----------------------------------------------------------------------------
3507 // Breakpoints
3508 void TemplateTable::_breakpoint() {
3509 // Note: We get here even if we are single stepping..
3510 // jbug inists on setting breakpoints at every bytecode
3511 // even if we are in single step mode.
3513 transition(vtos, vtos);
3515 // get the unpatched byte code
3516 __ get_method(c_rarg1);
3517 __ call_VM(noreg,
3518 CAST_FROM_FN_PTR(address,
3519 InterpreterRuntime::get_original_bytecode_at),
3520 c_rarg1, r13);
3521 __ mov(rbx, rax);
3523 // post the breakpoint event
3524 __ get_method(c_rarg1);
3525 __ call_VM(noreg,
3526 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3527 c_rarg1, r13);
3529 // complete the execution of original bytecode
3530 __ dispatch_only_normal(vtos);
3531 }
3533 //-----------------------------------------------------------------------------
3534 // Exceptions
3536 void TemplateTable::athrow() {
3537 transition(atos, vtos);
3538 __ null_check(rax);
3539 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3540 }
3542 //-----------------------------------------------------------------------------
3543 // Synchronization
3544 //
3545 // Note: monitorenter & exit are symmetric routines; which is reflected
3546 // in the assembly code structure as well
3547 //
3548 // Stack layout:
3549 //
3550 // [expressions ] <--- rsp = expression stack top
3551 // ..
3552 // [expressions ]
3553 // [monitor entry] <--- monitor block top = expression stack bot
3554 // ..
3555 // [monitor entry]
3556 // [frame data ] <--- monitor block bot
3557 // ...
3558 // [saved rbp ] <--- rbp
3559 void TemplateTable::monitorenter() {
3560 transition(atos, vtos);
3562 // check for NULL object
3563 __ null_check(rax);
3565 const Address monitor_block_top(
3566 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3567 const Address monitor_block_bot(
3568 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3569 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3571 Label allocated;
3573 // initialize entry pointer
3574 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3576 // find a free slot in the monitor block (result in c_rarg1)
3577 {
3578 Label entry, loop, exit;
3579 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3580 // starting with top-most entry
3581 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3582 // of monitor block
3583 __ jmpb(entry);
3585 __ bind(loop);
3586 // check if current entry is used
3587 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3588 // if not used then remember entry in c_rarg1
3589 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3590 // check if current entry is for same object
3591 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3592 // if same object then stop searching
3593 __ jccb(Assembler::equal, exit);
3594 // otherwise advance to next entry
3595 __ addptr(c_rarg3, entry_size);
3596 __ bind(entry);
3597 // check if bottom reached
3598 __ cmpptr(c_rarg3, c_rarg2);
3599 // if not at bottom then check this entry
3600 __ jcc(Assembler::notEqual, loop);
3601 __ bind(exit);
3602 }
3604 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3605 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3607 // allocate one if there's no free slot
3608 {
3609 Label entry, loop;
3610 // 1. compute new pointers // rsp: old expression stack top
3611 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3612 __ subptr(rsp, entry_size); // move expression stack top
3613 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3614 __ mov(c_rarg3, rsp); // set start value for copy loop
3615 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3616 __ jmp(entry);
3617 // 2. move expression stack contents
3618 __ bind(loop);
3619 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3620 // word from old location
3621 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3622 __ addptr(c_rarg3, wordSize); // advance to next word
3623 __ bind(entry);
3624 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3625 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3626 // copy next word
3627 }
3629 // call run-time routine
3630 // c_rarg1: points to monitor entry
3631 __ bind(allocated);
3633 // Increment bcp to point to the next bytecode, so exception
3634 // handling for async. exceptions work correctly.
3635 // The object has already been poped from the stack, so the
3636 // expression stack looks correct.
3637 __ increment(r13);
3639 // store object
3640 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3641 __ lock_object(c_rarg1);
3643 // check to make sure this monitor doesn't cause stack overflow after locking
3644 __ save_bcp(); // in case of exception
3645 __ generate_stack_overflow_check(0);
3647 // The bcp has already been incremented. Just need to dispatch to
3648 // next instruction.
3649 __ dispatch_next(vtos);
3650 }
3653 void TemplateTable::monitorexit() {
3654 transition(atos, vtos);
3656 // check for NULL object
3657 __ null_check(rax);
3659 const Address monitor_block_top(
3660 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3661 const Address monitor_block_bot(
3662 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3663 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3665 Label found;
3667 // find matching slot
3668 {
3669 Label entry, loop;
3670 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3671 // starting with top-most entry
3672 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3673 // of monitor block
3674 __ jmpb(entry);
3676 __ bind(loop);
3677 // check if current entry is for same object
3678 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3679 // if same object then stop searching
3680 __ jcc(Assembler::equal, found);
3681 // otherwise advance to next entry
3682 __ addptr(c_rarg1, entry_size);
3683 __ bind(entry);
3684 // check if bottom reached
3685 __ cmpptr(c_rarg1, c_rarg2);
3686 // if not at bottom then check this entry
3687 __ jcc(Assembler::notEqual, loop);
3688 }
3690 // error handling. Unlocking was not block-structured
3691 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3692 InterpreterRuntime::throw_illegal_monitor_state_exception));
3693 __ should_not_reach_here();
3695 // call run-time routine
3696 // rsi: points to monitor entry
3697 __ bind(found);
3698 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3699 __ unlock_object(c_rarg1);
3700 __ pop_ptr(rax); // discard object
3701 }
3704 // Wide instructions
3705 void TemplateTable::wide() {
3706 transition(vtos, vtos);
3707 __ load_unsigned_byte(rbx, at_bcp(1));
3708 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3709 __ jmp(Address(rscratch1, rbx, Address::times_8));
3710 // Note: the r13 increment step is part of the individual wide
3711 // bytecode implementations
3712 }
3715 // Multi arrays
3716 void TemplateTable::multianewarray() {
3717 transition(vtos, atos);
3718 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3719 // last dim is on top of stack; we want address of first one:
3720 // first_addr = last_addr + (ndims - 1) * wordSize
3721 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3722 call_VM(rax,
3723 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3724 c_rarg1);
3725 __ load_unsigned_byte(rbx, at_bcp(3));
3726 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3727 }
3728 #endif // !CC_INTERP