Thu, 03 Jan 2013 16:30:47 -0800
8005544: Use 256bit YMM registers in arraycopy stubs on x86
Summary: Use YMM registers in arraycopy and array_fill stubs.
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
39 #ifndef CC_INTERP
40 #define __ _masm->
42 //----------------------------------------------------------------------------------------------------
43 // Platform-dependent initialization
45 void TemplateTable::pd_initialize() {
46 // No i486 specific initialization
47 }
49 //----------------------------------------------------------------------------------------------------
50 // Address computation
52 // local variables
53 static inline Address iaddress(int n) {
54 return Address(rdi, Interpreter::local_offset_in_bytes(n));
55 }
57 static inline Address laddress(int n) { return iaddress(n + 1); }
58 static inline Address haddress(int n) { return iaddress(n + 0); }
59 static inline Address faddress(int n) { return iaddress(n); }
60 static inline Address daddress(int n) { return laddress(n); }
61 static inline Address aaddress(int n) { return iaddress(n); }
63 static inline Address iaddress(Register r) {
64 return Address(rdi, r, Interpreter::stackElementScale());
65 }
66 static inline Address laddress(Register r) {
67 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
68 }
69 static inline Address haddress(Register r) {
70 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
71 }
73 static inline Address faddress(Register r) { return iaddress(r); }
74 static inline Address daddress(Register r) { return laddress(r); }
75 static inline Address aaddress(Register r) { return iaddress(r); }
77 // expression stack
78 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
79 // data beyond the rsp which is potentially unsafe in an MT environment;
80 // an interrupt may overwrite that data.)
81 static inline Address at_rsp () {
82 return Address(rsp, 0);
83 }
85 // At top of Java expression stack which may be different than rsp(). It
86 // isn't for category 1 objects.
87 static inline Address at_tos () {
88 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
89 return tos;
90 }
92 static inline Address at_tos_p1() {
93 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
94 }
96 static inline Address at_tos_p2() {
97 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
98 }
100 // Condition conversion
101 static Assembler::Condition j_not(TemplateTable::Condition cc) {
102 switch (cc) {
103 case TemplateTable::equal : return Assembler::notEqual;
104 case TemplateTable::not_equal : return Assembler::equal;
105 case TemplateTable::less : return Assembler::greaterEqual;
106 case TemplateTable::less_equal : return Assembler::greater;
107 case TemplateTable::greater : return Assembler::lessEqual;
108 case TemplateTable::greater_equal: return Assembler::less;
109 }
110 ShouldNotReachHere();
111 return Assembler::zero;
112 }
115 //----------------------------------------------------------------------------------------------------
116 // Miscelaneous helper routines
118 // Store an oop (or NULL) at the address described by obj.
119 // If val == noreg this means store a NULL
121 static void do_oop_store(InterpreterMacroAssembler* _masm,
122 Address obj,
123 Register val,
124 BarrierSet::Name barrier,
125 bool precise) {
126 assert(val == noreg || val == rax, "parameter is just for looks");
127 switch (barrier) {
128 #ifndef SERIALGC
129 case BarrierSet::G1SATBCT:
130 case BarrierSet::G1SATBCTLogging:
131 {
132 // flatten object address if needed
133 // We do it regardless of precise because we need the registers
134 if (obj.index() == noreg && obj.disp() == 0) {
135 if (obj.base() != rdx) {
136 __ movl(rdx, obj.base());
137 }
138 } else {
139 __ leal(rdx, obj);
140 }
141 __ get_thread(rcx);
142 __ save_bcp();
143 __ g1_write_barrier_pre(rdx /* obj */,
144 rbx /* pre_val */,
145 rcx /* thread */,
146 rsi /* tmp */,
147 val != noreg /* tosca_live */,
148 false /* expand_call */);
150 // Do the actual store
151 // noreg means NULL
152 if (val == noreg) {
153 __ movptr(Address(rdx, 0), NULL_WORD);
154 // No post barrier for NULL
155 } else {
156 __ movl(Address(rdx, 0), val);
157 __ g1_write_barrier_post(rdx /* store_adr */,
158 val /* new_val */,
159 rcx /* thread */,
160 rbx /* tmp */,
161 rsi /* tmp2 */);
162 }
163 __ restore_bcp();
165 }
166 break;
167 #endif // SERIALGC
168 case BarrierSet::CardTableModRef:
169 case BarrierSet::CardTableExtension:
170 {
171 if (val == noreg) {
172 __ movptr(obj, NULL_WORD);
173 } else {
174 __ movl(obj, val);
175 // flatten object address if needed
176 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
177 __ store_check(obj.base());
178 } else {
179 __ leal(rdx, obj);
180 __ store_check(rdx);
181 }
182 }
183 }
184 break;
185 case BarrierSet::ModRef:
186 case BarrierSet::Other:
187 if (val == noreg) {
188 __ movptr(obj, NULL_WORD);
189 } else {
190 __ movl(obj, val);
191 }
192 break;
193 default :
194 ShouldNotReachHere();
196 }
197 }
199 Address TemplateTable::at_bcp(int offset) {
200 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
201 return Address(rsi, offset);
202 }
205 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
206 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
207 int byte_no) {
208 if (!RewriteBytecodes) return;
209 Label L_patch_done;
211 switch (bc) {
212 case Bytecodes::_fast_aputfield:
213 case Bytecodes::_fast_bputfield:
214 case Bytecodes::_fast_cputfield:
215 case Bytecodes::_fast_dputfield:
216 case Bytecodes::_fast_fputfield:
217 case Bytecodes::_fast_iputfield:
218 case Bytecodes::_fast_lputfield:
219 case Bytecodes::_fast_sputfield:
220 {
221 // We skip bytecode quickening for putfield instructions when
222 // the put_code written to the constant pool cache is zero.
223 // This is required so that every execution of this instruction
224 // calls out to InterpreterRuntime::resolve_get_put to do
225 // additional, required work.
226 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
227 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
228 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
229 __ movl(bc_reg, bc);
230 __ cmpl(temp_reg, (int) 0);
231 __ jcc(Assembler::zero, L_patch_done); // don't patch
232 }
233 break;
234 default:
235 assert(byte_no == -1, "sanity");
236 // the pair bytecodes have already done the load.
237 if (load_bc_into_bc_reg) {
238 __ movl(bc_reg, bc);
239 }
240 }
242 if (JvmtiExport::can_post_breakpoint()) {
243 Label L_fast_patch;
244 // if a breakpoint is present we can't rewrite the stream directly
245 __ movzbl(temp_reg, at_bcp(0));
246 __ cmpl(temp_reg, Bytecodes::_breakpoint);
247 __ jcc(Assembler::notEqual, L_fast_patch);
248 __ get_method(temp_reg);
249 // Let breakpoint table handling rewrite to quicker bytecode
250 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
251 #ifndef ASSERT
252 __ jmpb(L_patch_done);
253 #else
254 __ jmp(L_patch_done);
255 #endif
256 __ bind(L_fast_patch);
257 }
259 #ifdef ASSERT
260 Label L_okay;
261 __ load_unsigned_byte(temp_reg, at_bcp(0));
262 __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
263 __ jccb(Assembler::equal, L_okay);
264 __ cmpl(temp_reg, bc_reg);
265 __ jcc(Assembler::equal, L_okay);
266 __ stop("patching the wrong bytecode");
267 __ bind(L_okay);
268 #endif
270 // patch bytecode
271 __ movb(at_bcp(0), bc_reg);
272 __ bind(L_patch_done);
273 }
275 //----------------------------------------------------------------------------------------------------
276 // Individual instructions
278 void TemplateTable::nop() {
279 transition(vtos, vtos);
280 // nothing to do
281 }
283 void TemplateTable::shouldnotreachhere() {
284 transition(vtos, vtos);
285 __ stop("shouldnotreachhere bytecode");
286 }
290 void TemplateTable::aconst_null() {
291 transition(vtos, atos);
292 __ xorptr(rax, rax);
293 }
296 void TemplateTable::iconst(int value) {
297 transition(vtos, itos);
298 if (value == 0) {
299 __ xorptr(rax, rax);
300 } else {
301 __ movptr(rax, value);
302 }
303 }
306 void TemplateTable::lconst(int value) {
307 transition(vtos, ltos);
308 if (value == 0) {
309 __ xorptr(rax, rax);
310 } else {
311 __ movptr(rax, value);
312 }
313 assert(value >= 0, "check this code");
314 __ xorptr(rdx, rdx);
315 }
318 void TemplateTable::fconst(int value) {
319 transition(vtos, ftos);
320 if (value == 0) { __ fldz();
321 } else if (value == 1) { __ fld1();
322 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
323 } else { ShouldNotReachHere();
324 }
325 }
328 void TemplateTable::dconst(int value) {
329 transition(vtos, dtos);
330 if (value == 0) { __ fldz();
331 } else if (value == 1) { __ fld1();
332 } else { ShouldNotReachHere();
333 }
334 }
337 void TemplateTable::bipush() {
338 transition(vtos, itos);
339 __ load_signed_byte(rax, at_bcp(1));
340 }
343 void TemplateTable::sipush() {
344 transition(vtos, itos);
345 __ load_unsigned_short(rax, at_bcp(1));
346 __ bswapl(rax);
347 __ sarl(rax, 16);
348 }
350 void TemplateTable::ldc(bool wide) {
351 transition(vtos, vtos);
352 Label call_ldc, notFloat, notClass, Done;
354 if (wide) {
355 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
356 } else {
357 __ load_unsigned_byte(rbx, at_bcp(1));
358 }
359 __ get_cpool_and_tags(rcx, rax);
360 const int base_offset = ConstantPool::header_size() * wordSize;
361 const int tags_offset = Array<u1>::base_offset_in_bytes();
363 // get type
364 __ xorptr(rdx, rdx);
365 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
367 // unresolved class - get the resolved class
368 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
369 __ jccb(Assembler::equal, call_ldc);
371 // unresolved class in error (resolution failed) - call into runtime
372 // so that the same error from first resolution attempt is thrown.
373 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
374 __ jccb(Assembler::equal, call_ldc);
376 // resolved class - need to call vm to get java mirror of the class
377 __ cmpl(rdx, JVM_CONSTANT_Class);
378 __ jcc(Assembler::notEqual, notClass);
380 __ bind(call_ldc);
381 __ movl(rcx, wide);
382 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
383 __ push(atos);
384 __ jmp(Done);
386 __ bind(notClass);
387 __ cmpl(rdx, JVM_CONSTANT_Float);
388 __ jccb(Assembler::notEqual, notFloat);
389 // ftos
390 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
391 __ push(ftos);
392 __ jmp(Done);
394 __ bind(notFloat);
395 #ifdef ASSERT
396 { Label L;
397 __ cmpl(rdx, JVM_CONSTANT_Integer);
398 __ jcc(Assembler::equal, L);
399 // String and Object are rewritten to fast_aldc
400 __ stop("unexpected tag type in ldc");
401 __ bind(L);
402 }
403 #endif
404 // itos JVM_CONSTANT_Integer only
405 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
406 __ push(itos);
407 __ bind(Done);
408 }
410 // Fast path for caching oop constants.
411 void TemplateTable::fast_aldc(bool wide) {
412 transition(vtos, atos);
414 Register result = rax;
415 Register tmp = rdx;
416 int index_size = wide ? sizeof(u2) : sizeof(u1);
418 Label resolved;
420 // We are resolved if the resolved reference cache entry contains a
421 // non-null object (String, MethodType, etc.)
422 assert_different_registers(result, tmp);
423 __ get_cache_index_at_bcp(tmp, 1, index_size);
424 __ load_resolved_reference_at_index(result, tmp);
425 __ testl(result, result);
426 __ jcc(Assembler::notZero, resolved);
428 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
430 // first time invocation - must resolve first
431 __ movl(tmp, (int)bytecode());
432 __ call_VM(result, entry, tmp);
434 __ bind(resolved);
436 if (VerifyOops) {
437 __ verify_oop(result);
438 }
439 }
441 void TemplateTable::ldc2_w() {
442 transition(vtos, vtos);
443 Label Long, Done;
444 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
446 __ get_cpool_and_tags(rcx, rax);
447 const int base_offset = ConstantPool::header_size() * wordSize;
448 const int tags_offset = Array<u1>::base_offset_in_bytes();
450 // get type
451 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
452 __ jccb(Assembler::notEqual, Long);
453 // dtos
454 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
455 __ push(dtos);
456 __ jmpb(Done);
458 __ bind(Long);
459 // ltos
460 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
461 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
463 __ push(ltos);
465 __ bind(Done);
466 }
469 void TemplateTable::locals_index(Register reg, int offset) {
470 __ load_unsigned_byte(reg, at_bcp(offset));
471 __ negptr(reg);
472 }
475 void TemplateTable::iload() {
476 transition(vtos, itos);
477 if (RewriteFrequentPairs) {
478 Label rewrite, done;
480 // get next byte
481 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
482 // if _iload, wait to rewrite to iload2. We only want to rewrite the
483 // last two iloads in a pair. Comparing against fast_iload means that
484 // the next bytecode is neither an iload or a caload, and therefore
485 // an iload pair.
486 __ cmpl(rbx, Bytecodes::_iload);
487 __ jcc(Assembler::equal, done);
489 __ cmpl(rbx, Bytecodes::_fast_iload);
490 __ movl(rcx, Bytecodes::_fast_iload2);
491 __ jccb(Assembler::equal, rewrite);
493 // if _caload, rewrite to fast_icaload
494 __ cmpl(rbx, Bytecodes::_caload);
495 __ movl(rcx, Bytecodes::_fast_icaload);
496 __ jccb(Assembler::equal, rewrite);
498 // rewrite so iload doesn't check again.
499 __ movl(rcx, Bytecodes::_fast_iload);
501 // rewrite
502 // rcx: fast bytecode
503 __ bind(rewrite);
504 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
505 __ bind(done);
506 }
508 // Get the local value into tos
509 locals_index(rbx);
510 __ movl(rax, iaddress(rbx));
511 }
514 void TemplateTable::fast_iload2() {
515 transition(vtos, itos);
516 locals_index(rbx);
517 __ movl(rax, iaddress(rbx));
518 __ push(itos);
519 locals_index(rbx, 3);
520 __ movl(rax, iaddress(rbx));
521 }
523 void TemplateTable::fast_iload() {
524 transition(vtos, itos);
525 locals_index(rbx);
526 __ movl(rax, iaddress(rbx));
527 }
530 void TemplateTable::lload() {
531 transition(vtos, ltos);
532 locals_index(rbx);
533 __ movptr(rax, laddress(rbx));
534 NOT_LP64(__ movl(rdx, haddress(rbx)));
535 }
538 void TemplateTable::fload() {
539 transition(vtos, ftos);
540 locals_index(rbx);
541 __ fld_s(faddress(rbx));
542 }
545 void TemplateTable::dload() {
546 transition(vtos, dtos);
547 locals_index(rbx);
548 __ fld_d(daddress(rbx));
549 }
552 void TemplateTable::aload() {
553 transition(vtos, atos);
554 locals_index(rbx);
555 __ movptr(rax, aaddress(rbx));
556 }
559 void TemplateTable::locals_index_wide(Register reg) {
560 __ movl(reg, at_bcp(2));
561 __ bswapl(reg);
562 __ shrl(reg, 16);
563 __ negptr(reg);
564 }
567 void TemplateTable::wide_iload() {
568 transition(vtos, itos);
569 locals_index_wide(rbx);
570 __ movl(rax, iaddress(rbx));
571 }
574 void TemplateTable::wide_lload() {
575 transition(vtos, ltos);
576 locals_index_wide(rbx);
577 __ movptr(rax, laddress(rbx));
578 NOT_LP64(__ movl(rdx, haddress(rbx)));
579 }
582 void TemplateTable::wide_fload() {
583 transition(vtos, ftos);
584 locals_index_wide(rbx);
585 __ fld_s(faddress(rbx));
586 }
589 void TemplateTable::wide_dload() {
590 transition(vtos, dtos);
591 locals_index_wide(rbx);
592 __ fld_d(daddress(rbx));
593 }
596 void TemplateTable::wide_aload() {
597 transition(vtos, atos);
598 locals_index_wide(rbx);
599 __ movptr(rax, aaddress(rbx));
600 }
602 void TemplateTable::index_check(Register array, Register index) {
603 // Pop ptr into array
604 __ pop_ptr(array);
605 index_check_without_pop(array, index);
606 }
608 void TemplateTable::index_check_without_pop(Register array, Register index) {
609 // destroys rbx,
610 // check array
611 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
612 LP64_ONLY(__ movslq(index, index));
613 // check index
614 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
615 if (index != rbx) {
616 // ??? convention: move aberrant index into rbx, for exception message
617 assert(rbx != array, "different registers");
618 __ mov(rbx, index);
619 }
620 __ jump_cc(Assembler::aboveEqual,
621 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
622 }
625 void TemplateTable::iaload() {
626 transition(itos, itos);
627 // rdx: array
628 index_check(rdx, rax); // kills rbx,
629 // rax,: index
630 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
631 }
634 void TemplateTable::laload() {
635 transition(itos, ltos);
636 // rax,: index
637 // rdx: array
638 index_check(rdx, rax);
639 __ mov(rbx, rax);
640 // rbx,: index
641 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
642 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
643 }
646 void TemplateTable::faload() {
647 transition(itos, ftos);
648 // rdx: array
649 index_check(rdx, rax); // kills rbx,
650 // rax,: index
651 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
652 }
655 void TemplateTable::daload() {
656 transition(itos, dtos);
657 // rdx: array
658 index_check(rdx, rax); // kills rbx,
659 // rax,: index
660 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
661 }
664 void TemplateTable::aaload() {
665 transition(itos, atos);
666 // rdx: array
667 index_check(rdx, rax); // kills rbx,
668 // rax,: index
669 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
670 }
673 void TemplateTable::baload() {
674 transition(itos, itos);
675 // rdx: array
676 index_check(rdx, rax); // kills rbx,
677 // rax,: index
678 // can do better code for P5 - fix this at some point
679 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
680 __ mov(rax, rbx);
681 }
684 void TemplateTable::caload() {
685 transition(itos, itos);
686 // rdx: array
687 index_check(rdx, rax); // kills rbx,
688 // rax,: index
689 // can do better code for P5 - may want to improve this at some point
690 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
691 __ mov(rax, rbx);
692 }
694 // iload followed by caload frequent pair
695 void TemplateTable::fast_icaload() {
696 transition(vtos, itos);
697 // load index out of locals
698 locals_index(rbx);
699 __ movl(rax, iaddress(rbx));
701 // rdx: array
702 index_check(rdx, rax);
703 // rax,: index
704 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
705 __ mov(rax, rbx);
706 }
708 void TemplateTable::saload() {
709 transition(itos, itos);
710 // rdx: array
711 index_check(rdx, rax); // kills rbx,
712 // rax,: index
713 // can do better code for P5 - may want to improve this at some point
714 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
715 __ mov(rax, rbx);
716 }
719 void TemplateTable::iload(int n) {
720 transition(vtos, itos);
721 __ movl(rax, iaddress(n));
722 }
725 void TemplateTable::lload(int n) {
726 transition(vtos, ltos);
727 __ movptr(rax, laddress(n));
728 NOT_LP64(__ movptr(rdx, haddress(n)));
729 }
732 void TemplateTable::fload(int n) {
733 transition(vtos, ftos);
734 __ fld_s(faddress(n));
735 }
738 void TemplateTable::dload(int n) {
739 transition(vtos, dtos);
740 __ fld_d(daddress(n));
741 }
744 void TemplateTable::aload(int n) {
745 transition(vtos, atos);
746 __ movptr(rax, aaddress(n));
747 }
750 void TemplateTable::aload_0() {
751 transition(vtos, atos);
752 // According to bytecode histograms, the pairs:
753 //
754 // _aload_0, _fast_igetfield
755 // _aload_0, _fast_agetfield
756 // _aload_0, _fast_fgetfield
757 //
758 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
759 // bytecode checks if the next bytecode is either _fast_igetfield,
760 // _fast_agetfield or _fast_fgetfield and then rewrites the
761 // current bytecode into a pair bytecode; otherwise it rewrites the current
762 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
763 //
764 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
765 // otherwise we may miss an opportunity for a pair.
766 //
767 // Also rewrite frequent pairs
768 // aload_0, aload_1
769 // aload_0, iload_1
770 // These bytecodes with a small amount of code are most profitable to rewrite
771 if (RewriteFrequentPairs) {
772 Label rewrite, done;
773 // get next byte
774 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
776 // do actual aload_0
777 aload(0);
779 // if _getfield then wait with rewrite
780 __ cmpl(rbx, Bytecodes::_getfield);
781 __ jcc(Assembler::equal, done);
783 // if _igetfield then reqrite to _fast_iaccess_0
784 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
785 __ cmpl(rbx, Bytecodes::_fast_igetfield);
786 __ movl(rcx, Bytecodes::_fast_iaccess_0);
787 __ jccb(Assembler::equal, rewrite);
789 // if _agetfield then reqrite to _fast_aaccess_0
790 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
791 __ cmpl(rbx, Bytecodes::_fast_agetfield);
792 __ movl(rcx, Bytecodes::_fast_aaccess_0);
793 __ jccb(Assembler::equal, rewrite);
795 // if _fgetfield then reqrite to _fast_faccess_0
796 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
797 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
798 __ movl(rcx, Bytecodes::_fast_faccess_0);
799 __ jccb(Assembler::equal, rewrite);
801 // else rewrite to _fast_aload0
802 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
803 __ movl(rcx, Bytecodes::_fast_aload_0);
805 // rewrite
806 // rcx: fast bytecode
807 __ bind(rewrite);
808 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
810 __ bind(done);
811 } else {
812 aload(0);
813 }
814 }
816 void TemplateTable::istore() {
817 transition(itos, vtos);
818 locals_index(rbx);
819 __ movl(iaddress(rbx), rax);
820 }
823 void TemplateTable::lstore() {
824 transition(ltos, vtos);
825 locals_index(rbx);
826 __ movptr(laddress(rbx), rax);
827 NOT_LP64(__ movptr(haddress(rbx), rdx));
828 }
831 void TemplateTable::fstore() {
832 transition(ftos, vtos);
833 locals_index(rbx);
834 __ fstp_s(faddress(rbx));
835 }
838 void TemplateTable::dstore() {
839 transition(dtos, vtos);
840 locals_index(rbx);
841 __ fstp_d(daddress(rbx));
842 }
845 void TemplateTable::astore() {
846 transition(vtos, vtos);
847 __ pop_ptr(rax);
848 locals_index(rbx);
849 __ movptr(aaddress(rbx), rax);
850 }
853 void TemplateTable::wide_istore() {
854 transition(vtos, vtos);
855 __ pop_i(rax);
856 locals_index_wide(rbx);
857 __ movl(iaddress(rbx), rax);
858 }
861 void TemplateTable::wide_lstore() {
862 transition(vtos, vtos);
863 __ pop_l(rax, rdx);
864 locals_index_wide(rbx);
865 __ movptr(laddress(rbx), rax);
866 NOT_LP64(__ movl(haddress(rbx), rdx));
867 }
870 void TemplateTable::wide_fstore() {
871 wide_istore();
872 }
875 void TemplateTable::wide_dstore() {
876 wide_lstore();
877 }
880 void TemplateTable::wide_astore() {
881 transition(vtos, vtos);
882 __ pop_ptr(rax);
883 locals_index_wide(rbx);
884 __ movptr(aaddress(rbx), rax);
885 }
888 void TemplateTable::iastore() {
889 transition(itos, vtos);
890 __ pop_i(rbx);
891 // rax,: value
892 // rdx: array
893 index_check(rdx, rbx); // prefer index in rbx,
894 // rbx,: index
895 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
896 }
899 void TemplateTable::lastore() {
900 transition(ltos, vtos);
901 __ pop_i(rbx);
902 // rax,: low(value)
903 // rcx: array
904 // rdx: high(value)
905 index_check(rcx, rbx); // prefer index in rbx,
906 // rbx,: index
907 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
908 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
909 }
912 void TemplateTable::fastore() {
913 transition(ftos, vtos);
914 __ pop_i(rbx);
915 // rdx: array
916 // st0: value
917 index_check(rdx, rbx); // prefer index in rbx,
918 // rbx,: index
919 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
920 }
923 void TemplateTable::dastore() {
924 transition(dtos, vtos);
925 __ pop_i(rbx);
926 // rdx: array
927 // st0: value
928 index_check(rdx, rbx); // prefer index in rbx,
929 // rbx,: index
930 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
931 }
934 void TemplateTable::aastore() {
935 Label is_null, ok_is_subtype, done;
936 transition(vtos, vtos);
937 // stack: ..., array, index, value
938 __ movptr(rax, at_tos()); // Value
939 __ movl(rcx, at_tos_p1()); // Index
940 __ movptr(rdx, at_tos_p2()); // Array
942 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
943 index_check_without_pop(rdx, rcx); // kills rbx,
944 // do array store check - check for NULL value first
945 __ testptr(rax, rax);
946 __ jcc(Assembler::zero, is_null);
948 // Move subklass into EBX
949 __ load_klass(rbx, rax);
950 // Move superklass into EAX
951 __ load_klass(rax, rdx);
952 __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset()));
953 // Compress array+index*wordSize+12 into a single register. Frees ECX.
954 __ lea(rdx, element_address);
956 // Generate subtype check. Blows ECX. Resets EDI to locals.
957 // Superklass in EAX. Subklass in EBX.
958 __ gen_subtype_check( rbx, ok_is_subtype );
960 // Come here on failure
961 // object is at TOS
962 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
964 // Come here on success
965 __ bind(ok_is_subtype);
967 // Get the value to store
968 __ movptr(rax, at_rsp());
969 // and store it with appropriate barrier
970 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
972 __ jmp(done);
974 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
975 __ bind(is_null);
976 __ profile_null_seen(rbx);
978 // Store NULL, (noreg means NULL to do_oop_store)
979 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
981 // Pop stack arguments
982 __ bind(done);
983 __ addptr(rsp, 3 * Interpreter::stackElementSize);
984 }
987 void TemplateTable::bastore() {
988 transition(itos, vtos);
989 __ pop_i(rbx);
990 // rax,: value
991 // rdx: array
992 index_check(rdx, rbx); // prefer index in rbx,
993 // rbx,: index
994 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
995 }
998 void TemplateTable::castore() {
999 transition(itos, vtos);
1000 __ pop_i(rbx);
1001 // rax,: value
1002 // rdx: array
1003 index_check(rdx, rbx); // prefer index in rbx,
1004 // rbx,: index
1005 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1006 }
1009 void TemplateTable::sastore() {
1010 castore();
1011 }
1014 void TemplateTable::istore(int n) {
1015 transition(itos, vtos);
1016 __ movl(iaddress(n), rax);
1017 }
1020 void TemplateTable::lstore(int n) {
1021 transition(ltos, vtos);
1022 __ movptr(laddress(n), rax);
1023 NOT_LP64(__ movptr(haddress(n), rdx));
1024 }
1027 void TemplateTable::fstore(int n) {
1028 transition(ftos, vtos);
1029 __ fstp_s(faddress(n));
1030 }
1033 void TemplateTable::dstore(int n) {
1034 transition(dtos, vtos);
1035 __ fstp_d(daddress(n));
1036 }
1039 void TemplateTable::astore(int n) {
1040 transition(vtos, vtos);
1041 __ pop_ptr(rax);
1042 __ movptr(aaddress(n), rax);
1043 }
1046 void TemplateTable::pop() {
1047 transition(vtos, vtos);
1048 __ addptr(rsp, Interpreter::stackElementSize);
1049 }
1052 void TemplateTable::pop2() {
1053 transition(vtos, vtos);
1054 __ addptr(rsp, 2*Interpreter::stackElementSize);
1055 }
1058 void TemplateTable::dup() {
1059 transition(vtos, vtos);
1060 // stack: ..., a
1061 __ load_ptr(0, rax);
1062 __ push_ptr(rax);
1063 // stack: ..., a, a
1064 }
1067 void TemplateTable::dup_x1() {
1068 transition(vtos, vtos);
1069 // stack: ..., a, b
1070 __ load_ptr( 0, rax); // load b
1071 __ load_ptr( 1, rcx); // load a
1072 __ store_ptr(1, rax); // store b
1073 __ store_ptr(0, rcx); // store a
1074 __ push_ptr(rax); // push b
1075 // stack: ..., b, a, b
1076 }
1079 void TemplateTable::dup_x2() {
1080 transition(vtos, vtos);
1081 // stack: ..., a, b, c
1082 __ load_ptr( 0, rax); // load c
1083 __ load_ptr( 2, rcx); // load a
1084 __ store_ptr(2, rax); // store c in a
1085 __ push_ptr(rax); // push c
1086 // stack: ..., c, b, c, c
1087 __ load_ptr( 2, rax); // load b
1088 __ store_ptr(2, rcx); // store a in b
1089 // stack: ..., c, a, c, c
1090 __ store_ptr(1, rax); // store b in c
1091 // stack: ..., c, a, b, c
1092 }
1095 void TemplateTable::dup2() {
1096 transition(vtos, vtos);
1097 // stack: ..., a, b
1098 __ load_ptr(1, rax); // load a
1099 __ push_ptr(rax); // push a
1100 __ load_ptr(1, rax); // load b
1101 __ push_ptr(rax); // push b
1102 // stack: ..., a, b, a, b
1103 }
1106 void TemplateTable::dup2_x1() {
1107 transition(vtos, vtos);
1108 // stack: ..., a, b, c
1109 __ load_ptr( 0, rcx); // load c
1110 __ load_ptr( 1, rax); // load b
1111 __ push_ptr(rax); // push b
1112 __ push_ptr(rcx); // push c
1113 // stack: ..., a, b, c, b, c
1114 __ store_ptr(3, rcx); // store c in b
1115 // stack: ..., a, c, c, b, c
1116 __ load_ptr( 4, rcx); // load a
1117 __ store_ptr(2, rcx); // store a in 2nd c
1118 // stack: ..., a, c, a, b, c
1119 __ store_ptr(4, rax); // store b in a
1120 // stack: ..., b, c, a, b, c
1121 // stack: ..., b, c, a, b, c
1122 }
1125 void TemplateTable::dup2_x2() {
1126 transition(vtos, vtos);
1127 // stack: ..., a, b, c, d
1128 __ load_ptr( 0, rcx); // load d
1129 __ load_ptr( 1, rax); // load c
1130 __ push_ptr(rax); // push c
1131 __ push_ptr(rcx); // push d
1132 // stack: ..., a, b, c, d, c, d
1133 __ load_ptr( 4, rax); // load b
1134 __ store_ptr(2, rax); // store b in d
1135 __ store_ptr(4, rcx); // store d in b
1136 // stack: ..., a, d, c, b, c, d
1137 __ load_ptr( 5, rcx); // load a
1138 __ load_ptr( 3, rax); // load c
1139 __ store_ptr(3, rcx); // store a in c
1140 __ store_ptr(5, rax); // store c in a
1141 // stack: ..., c, d, a, b, c, d
1142 // stack: ..., c, d, a, b, c, d
1143 }
1146 void TemplateTable::swap() {
1147 transition(vtos, vtos);
1148 // stack: ..., a, b
1149 __ load_ptr( 1, rcx); // load a
1150 __ load_ptr( 0, rax); // load b
1151 __ store_ptr(0, rcx); // store a in b
1152 __ store_ptr(1, rax); // store b in a
1153 // stack: ..., b, a
1154 }
1157 void TemplateTable::iop2(Operation op) {
1158 transition(itos, itos);
1159 switch (op) {
1160 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1161 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1162 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1163 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1164 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1165 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1166 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1167 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1168 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1169 default : ShouldNotReachHere();
1170 }
1171 }
1174 void TemplateTable::lop2(Operation op) {
1175 transition(ltos, ltos);
1176 __ pop_l(rbx, rcx);
1177 switch (op) {
1178 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1179 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1180 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1181 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1182 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1183 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1184 default : ShouldNotReachHere();
1185 }
1186 }
1189 void TemplateTable::idiv() {
1190 transition(itos, itos);
1191 __ mov(rcx, rax);
1192 __ pop_i(rax);
1193 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1194 // they are not equal, one could do a normal division (no correction
1195 // needed), which may speed up this implementation for the common case.
1196 // (see also JVM spec., p.243 & p.271)
1197 __ corrected_idivl(rcx);
1198 }
1201 void TemplateTable::irem() {
1202 transition(itos, itos);
1203 __ mov(rcx, rax);
1204 __ pop_i(rax);
1205 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1206 // they are not equal, one could do a normal division (no correction
1207 // needed), which may speed up this implementation for the common case.
1208 // (see also JVM spec., p.243 & p.271)
1209 __ corrected_idivl(rcx);
1210 __ mov(rax, rdx);
1211 }
1214 void TemplateTable::lmul() {
1215 transition(ltos, ltos);
1216 __ pop_l(rbx, rcx);
1217 __ push(rcx); __ push(rbx);
1218 __ push(rdx); __ push(rax);
1219 __ lmul(2 * wordSize, 0);
1220 __ addptr(rsp, 4 * wordSize); // take off temporaries
1221 }
1224 void TemplateTable::ldiv() {
1225 transition(ltos, ltos);
1226 __ pop_l(rbx, rcx);
1227 __ push(rcx); __ push(rbx);
1228 __ push(rdx); __ push(rax);
1229 // check if y = 0
1230 __ orl(rax, rdx);
1231 __ jump_cc(Assembler::zero,
1232 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1233 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1234 __ addptr(rsp, 4 * wordSize); // take off temporaries
1235 }
1238 void TemplateTable::lrem() {
1239 transition(ltos, ltos);
1240 __ pop_l(rbx, rcx);
1241 __ push(rcx); __ push(rbx);
1242 __ push(rdx); __ push(rax);
1243 // check if y = 0
1244 __ orl(rax, rdx);
1245 __ jump_cc(Assembler::zero,
1246 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1247 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1248 __ addptr(rsp, 4 * wordSize);
1249 }
1252 void TemplateTable::lshl() {
1253 transition(itos, ltos);
1254 __ movl(rcx, rax); // get shift count
1255 __ pop_l(rax, rdx); // get shift value
1256 __ lshl(rdx, rax);
1257 }
1260 void TemplateTable::lshr() {
1261 transition(itos, ltos);
1262 __ mov(rcx, rax); // get shift count
1263 __ pop_l(rax, rdx); // get shift value
1264 __ lshr(rdx, rax, true);
1265 }
1268 void TemplateTable::lushr() {
1269 transition(itos, ltos);
1270 __ mov(rcx, rax); // get shift count
1271 __ pop_l(rax, rdx); // get shift value
1272 __ lshr(rdx, rax);
1273 }
1276 void TemplateTable::fop2(Operation op) {
1277 transition(ftos, ftos);
1278 switch (op) {
1279 case add: __ fadd_s (at_rsp()); break;
1280 case sub: __ fsubr_s(at_rsp()); break;
1281 case mul: __ fmul_s (at_rsp()); break;
1282 case div: __ fdivr_s(at_rsp()); break;
1283 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1284 default : ShouldNotReachHere();
1285 }
1286 __ f2ieee();
1287 __ pop(rax); // pop float thing off
1288 }
1291 void TemplateTable::dop2(Operation op) {
1292 transition(dtos, dtos);
1294 switch (op) {
1295 case add: __ fadd_d (at_rsp()); break;
1296 case sub: __ fsubr_d(at_rsp()); break;
1297 case mul: {
1298 Label L_strict;
1299 Label L_join;
1300 const Address access_flags (rcx, Method::access_flags_offset());
1301 __ get_method(rcx);
1302 __ movl(rcx, access_flags);
1303 __ testl(rcx, JVM_ACC_STRICT);
1304 __ jccb(Assembler::notZero, L_strict);
1305 __ fmul_d (at_rsp());
1306 __ jmpb(L_join);
1307 __ bind(L_strict);
1308 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1309 __ fmulp();
1310 __ fmul_d (at_rsp());
1311 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1312 __ fmulp();
1313 __ bind(L_join);
1314 break;
1315 }
1316 case div: {
1317 Label L_strict;
1318 Label L_join;
1319 const Address access_flags (rcx, Method::access_flags_offset());
1320 __ get_method(rcx);
1321 __ movl(rcx, access_flags);
1322 __ testl(rcx, JVM_ACC_STRICT);
1323 __ jccb(Assembler::notZero, L_strict);
1324 __ fdivr_d(at_rsp());
1325 __ jmp(L_join);
1326 __ bind(L_strict);
1327 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1328 __ fmul_d (at_rsp());
1329 __ fdivrp();
1330 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1331 __ fmulp();
1332 __ bind(L_join);
1333 break;
1334 }
1335 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1336 default : ShouldNotReachHere();
1337 }
1338 __ d2ieee();
1339 // Pop double precision number from rsp.
1340 __ pop(rax);
1341 __ pop(rdx);
1342 }
1345 void TemplateTable::ineg() {
1346 transition(itos, itos);
1347 __ negl(rax);
1348 }
1351 void TemplateTable::lneg() {
1352 transition(ltos, ltos);
1353 __ lneg(rdx, rax);
1354 }
1357 void TemplateTable::fneg() {
1358 transition(ftos, ftos);
1359 __ fchs();
1360 }
1363 void TemplateTable::dneg() {
1364 transition(dtos, dtos);
1365 __ fchs();
1366 }
1369 void TemplateTable::iinc() {
1370 transition(vtos, vtos);
1371 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1372 locals_index(rbx);
1373 __ addl(iaddress(rbx), rdx);
1374 }
1377 void TemplateTable::wide_iinc() {
1378 transition(vtos, vtos);
1379 __ movl(rdx, at_bcp(4)); // get constant
1380 locals_index_wide(rbx);
1381 __ bswapl(rdx); // swap bytes & sign-extend constant
1382 __ sarl(rdx, 16);
1383 __ addl(iaddress(rbx), rdx);
1384 // Note: should probably use only one movl to get both
1385 // the index and the constant -> fix this
1386 }
1389 void TemplateTable::convert() {
1390 // Checking
1391 #ifdef ASSERT
1392 { TosState tos_in = ilgl;
1393 TosState tos_out = ilgl;
1394 switch (bytecode()) {
1395 case Bytecodes::_i2l: // fall through
1396 case Bytecodes::_i2f: // fall through
1397 case Bytecodes::_i2d: // fall through
1398 case Bytecodes::_i2b: // fall through
1399 case Bytecodes::_i2c: // fall through
1400 case Bytecodes::_i2s: tos_in = itos; break;
1401 case Bytecodes::_l2i: // fall through
1402 case Bytecodes::_l2f: // fall through
1403 case Bytecodes::_l2d: tos_in = ltos; break;
1404 case Bytecodes::_f2i: // fall through
1405 case Bytecodes::_f2l: // fall through
1406 case Bytecodes::_f2d: tos_in = ftos; break;
1407 case Bytecodes::_d2i: // fall through
1408 case Bytecodes::_d2l: // fall through
1409 case Bytecodes::_d2f: tos_in = dtos; break;
1410 default : ShouldNotReachHere();
1411 }
1412 switch (bytecode()) {
1413 case Bytecodes::_l2i: // fall through
1414 case Bytecodes::_f2i: // fall through
1415 case Bytecodes::_d2i: // fall through
1416 case Bytecodes::_i2b: // fall through
1417 case Bytecodes::_i2c: // fall through
1418 case Bytecodes::_i2s: tos_out = itos; break;
1419 case Bytecodes::_i2l: // fall through
1420 case Bytecodes::_f2l: // fall through
1421 case Bytecodes::_d2l: tos_out = ltos; break;
1422 case Bytecodes::_i2f: // fall through
1423 case Bytecodes::_l2f: // fall through
1424 case Bytecodes::_d2f: tos_out = ftos; break;
1425 case Bytecodes::_i2d: // fall through
1426 case Bytecodes::_l2d: // fall through
1427 case Bytecodes::_f2d: tos_out = dtos; break;
1428 default : ShouldNotReachHere();
1429 }
1430 transition(tos_in, tos_out);
1431 }
1432 #endif // ASSERT
1434 // Conversion
1435 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1436 switch (bytecode()) {
1437 case Bytecodes::_i2l:
1438 __ extend_sign(rdx, rax);
1439 break;
1440 case Bytecodes::_i2f:
1441 __ push(rax); // store int on tos
1442 __ fild_s(at_rsp()); // load int to ST0
1443 __ f2ieee(); // truncate to float size
1444 __ pop(rcx); // adjust rsp
1445 break;
1446 case Bytecodes::_i2d:
1447 __ push(rax); // add one slot for d2ieee()
1448 __ push(rax); // store int on tos
1449 __ fild_s(at_rsp()); // load int to ST0
1450 __ d2ieee(); // truncate to double size
1451 __ pop(rcx); // adjust rsp
1452 __ pop(rcx);
1453 break;
1454 case Bytecodes::_i2b:
1455 __ shll(rax, 24); // truncate upper 24 bits
1456 __ sarl(rax, 24); // and sign-extend byte
1457 LP64_ONLY(__ movsbl(rax, rax));
1458 break;
1459 case Bytecodes::_i2c:
1460 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1461 LP64_ONLY(__ movzwl(rax, rax));
1462 break;
1463 case Bytecodes::_i2s:
1464 __ shll(rax, 16); // truncate upper 16 bits
1465 __ sarl(rax, 16); // and sign-extend short
1466 LP64_ONLY(__ movswl(rax, rax));
1467 break;
1468 case Bytecodes::_l2i:
1469 /* nothing to do */
1470 break;
1471 case Bytecodes::_l2f:
1472 __ push(rdx); // store long on tos
1473 __ push(rax);
1474 __ fild_d(at_rsp()); // load long to ST0
1475 __ f2ieee(); // truncate to float size
1476 __ pop(rcx); // adjust rsp
1477 __ pop(rcx);
1478 break;
1479 case Bytecodes::_l2d:
1480 __ push(rdx); // store long on tos
1481 __ push(rax);
1482 __ fild_d(at_rsp()); // load long to ST0
1483 __ d2ieee(); // truncate to double size
1484 __ pop(rcx); // adjust rsp
1485 __ pop(rcx);
1486 break;
1487 case Bytecodes::_f2i:
1488 __ push(rcx); // reserve space for argument
1489 __ fstp_s(at_rsp()); // pass float argument on stack
1490 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1491 break;
1492 case Bytecodes::_f2l:
1493 __ push(rcx); // reserve space for argument
1494 __ fstp_s(at_rsp()); // pass float argument on stack
1495 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1496 break;
1497 case Bytecodes::_f2d:
1498 /* nothing to do */
1499 break;
1500 case Bytecodes::_d2i:
1501 __ push(rcx); // reserve space for argument
1502 __ push(rcx);
1503 __ fstp_d(at_rsp()); // pass double argument on stack
1504 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1505 break;
1506 case Bytecodes::_d2l:
1507 __ push(rcx); // reserve space for argument
1508 __ push(rcx);
1509 __ fstp_d(at_rsp()); // pass double argument on stack
1510 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1511 break;
1512 case Bytecodes::_d2f:
1513 __ push(rcx); // reserve space for f2ieee()
1514 __ f2ieee(); // truncate to float size
1515 __ pop(rcx); // adjust rsp
1516 break;
1517 default :
1518 ShouldNotReachHere();
1519 }
1520 }
1523 void TemplateTable::lcmp() {
1524 transition(ltos, itos);
1525 // y = rdx:rax
1526 __ pop_l(rbx, rcx); // get x = rcx:rbx
1527 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1528 __ mov(rax, rcx);
1529 }
1532 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1533 if (is_float) {
1534 __ fld_s(at_rsp());
1535 } else {
1536 __ fld_d(at_rsp());
1537 __ pop(rdx);
1538 }
1539 __ pop(rcx);
1540 __ fcmp2int(rax, unordered_result < 0);
1541 }
1544 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1545 __ get_method(rcx); // ECX holds method
1546 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1548 const ByteSize be_offset = Method::backedge_counter_offset() + InvocationCounter::counter_offset();
1549 const ByteSize inv_offset = Method::invocation_counter_offset() + InvocationCounter::counter_offset();
1550 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1552 // Load up EDX with the branch displacement
1553 __ movl(rdx, at_bcp(1));
1554 __ bswapl(rdx);
1555 if (!is_wide) __ sarl(rdx, 16);
1556 LP64_ONLY(__ movslq(rdx, rdx));
1559 // Handle all the JSR stuff here, then exit.
1560 // It's much shorter and cleaner than intermingling with the
1561 // non-JSR normal-branch stuff occurring below.
1562 if (is_jsr) {
1563 // Pre-load the next target bytecode into EBX
1564 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1566 // compute return address as bci in rax,
1567 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset())));
1568 __ subptr(rax, Address(rcx, Method::const_offset()));
1569 // Adjust the bcp in RSI by the displacement in EDX
1570 __ addptr(rsi, rdx);
1571 // Push return address
1572 __ push_i(rax);
1573 // jsr returns vtos
1574 __ dispatch_only_noverify(vtos);
1575 return;
1576 }
1578 // Normal (non-jsr) branch handling
1580 // Adjust the bcp in RSI by the displacement in EDX
1581 __ addptr(rsi, rdx);
1583 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1584 Label backedge_counter_overflow;
1585 Label profile_method;
1586 Label dispatch;
1587 if (UseLoopCounter) {
1588 // increment backedge counter for backward branches
1589 // rax,: MDO
1590 // rbx,: MDO bumped taken-count
1591 // rcx: method
1592 // rdx: target offset
1593 // rsi: target bcp
1594 // rdi: locals pointer
1595 __ testl(rdx, rdx); // check if forward or backward branch
1596 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1598 if (TieredCompilation) {
1599 Label no_mdo;
1600 int increment = InvocationCounter::count_increment;
1601 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1602 if (ProfileInterpreter) {
1603 // Are we profiling?
1604 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1605 __ testptr(rbx, rbx);
1606 __ jccb(Assembler::zero, no_mdo);
1607 // Increment the MDO backedge counter
1608 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1609 in_bytes(InvocationCounter::counter_offset()));
1610 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1611 rax, false, Assembler::zero, &backedge_counter_overflow);
1612 __ jmp(dispatch);
1613 }
1614 __ bind(no_mdo);
1615 // Increment backedge counter in Method*
1616 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1617 rax, false, Assembler::zero, &backedge_counter_overflow);
1618 } else {
1619 // increment counter
1620 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1621 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1622 __ movl(Address(rcx, be_offset), rax); // store counter
1624 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1625 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1626 __ addl(rax, Address(rcx, be_offset)); // add both counters
1628 if (ProfileInterpreter) {
1629 // Test to see if we should create a method data oop
1630 __ cmp32(rax,
1631 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1632 __ jcc(Assembler::less, dispatch);
1634 // if no method data exists, go to profile method
1635 __ test_method_data_pointer(rax, profile_method);
1637 if (UseOnStackReplacement) {
1638 // check for overflow against rbx, which is the MDO taken count
1639 __ cmp32(rbx,
1640 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1641 __ jcc(Assembler::below, dispatch);
1643 // When ProfileInterpreter is on, the backedge_count comes from the
1644 // MethodData*, which value does not get reset on the call to
1645 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1646 // routine while the method is being compiled, add a second test to make
1647 // sure the overflow function is called only once every overflow_frequency.
1648 const int overflow_frequency = 1024;
1649 __ andptr(rbx, overflow_frequency-1);
1650 __ jcc(Assembler::zero, backedge_counter_overflow);
1651 }
1652 } else {
1653 if (UseOnStackReplacement) {
1654 // check for overflow against rax, which is the sum of the counters
1655 __ cmp32(rax,
1656 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1657 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1659 }
1660 }
1661 }
1662 __ bind(dispatch);
1663 }
1665 // Pre-load the next target bytecode into EBX
1666 __ load_unsigned_byte(rbx, Address(rsi, 0));
1668 // continue with the bytecode @ target
1669 // rax,: return bci for jsr's, unused otherwise
1670 // rbx,: target bytecode
1671 // rsi: target bcp
1672 __ dispatch_only(vtos);
1674 if (UseLoopCounter) {
1675 if (ProfileInterpreter) {
1676 // Out-of-line code to allocate method data oop.
1677 __ bind(profile_method);
1678 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1679 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1680 __ set_method_data_pointer_for_bcp();
1681 __ jmp(dispatch);
1682 }
1684 if (UseOnStackReplacement) {
1686 // invocation counter overflow
1687 __ bind(backedge_counter_overflow);
1688 __ negptr(rdx);
1689 __ addptr(rdx, rsi); // branch bcp
1690 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1691 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1693 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1694 // rbx,: target bytecode
1695 // rdx: scratch
1696 // rdi: locals pointer
1697 // rsi: bcp
1698 __ testptr(rax, rax); // test result
1699 __ jcc(Assembler::zero, dispatch); // no osr if null
1700 // nmethod may have been invalidated (VM may block upon call_VM return)
1701 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1702 __ cmpl(rcx, InvalidOSREntryBci);
1703 __ jcc(Assembler::equal, dispatch);
1705 // We have the address of an on stack replacement routine in rax,
1706 // We need to prepare to execute the OSR method. First we must
1707 // migrate the locals and monitors off of the stack.
1709 __ mov(rbx, rax); // save the nmethod
1711 const Register thread = rcx;
1712 __ get_thread(thread);
1713 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1714 // rax, is OSR buffer, move it to expected parameter location
1715 __ mov(rcx, rax);
1717 // pop the interpreter frame
1718 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1719 __ leave(); // remove frame anchor
1720 __ pop(rdi); // get return address
1721 __ mov(rsp, rdx); // set sp to sender sp
1723 // Align stack pointer for compiled code (note that caller is
1724 // responsible for undoing this fixup by remembering the old SP
1725 // in an rbp,-relative location)
1726 __ andptr(rsp, -(StackAlignmentInBytes));
1728 // push the (possibly adjusted) return address
1729 __ push(rdi);
1731 // and begin the OSR nmethod
1732 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1733 }
1734 }
1735 }
1738 void TemplateTable::if_0cmp(Condition cc) {
1739 transition(itos, vtos);
1740 // assume branch is more often taken than not (loops use backward branches)
1741 Label not_taken;
1742 __ testl(rax, rax);
1743 __ jcc(j_not(cc), not_taken);
1744 branch(false, false);
1745 __ bind(not_taken);
1746 __ profile_not_taken_branch(rax);
1747 }
1750 void TemplateTable::if_icmp(Condition cc) {
1751 transition(itos, vtos);
1752 // assume branch is more often taken than not (loops use backward branches)
1753 Label not_taken;
1754 __ pop_i(rdx);
1755 __ cmpl(rdx, rax);
1756 __ jcc(j_not(cc), not_taken);
1757 branch(false, false);
1758 __ bind(not_taken);
1759 __ profile_not_taken_branch(rax);
1760 }
1763 void TemplateTable::if_nullcmp(Condition cc) {
1764 transition(atos, vtos);
1765 // assume branch is more often taken than not (loops use backward branches)
1766 Label not_taken;
1767 __ testptr(rax, rax);
1768 __ jcc(j_not(cc), not_taken);
1769 branch(false, false);
1770 __ bind(not_taken);
1771 __ profile_not_taken_branch(rax);
1772 }
1775 void TemplateTable::if_acmp(Condition cc) {
1776 transition(atos, vtos);
1777 // assume branch is more often taken than not (loops use backward branches)
1778 Label not_taken;
1779 __ pop_ptr(rdx);
1780 __ cmpptr(rdx, rax);
1781 __ jcc(j_not(cc), not_taken);
1782 branch(false, false);
1783 __ bind(not_taken);
1784 __ profile_not_taken_branch(rax);
1785 }
1788 void TemplateTable::ret() {
1789 transition(vtos, vtos);
1790 locals_index(rbx);
1791 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1792 __ profile_ret(rbx, rcx);
1793 __ get_method(rax);
1794 __ movptr(rsi, Address(rax, Method::const_offset()));
1795 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1796 ConstMethod::codes_offset()));
1797 __ dispatch_next(vtos);
1798 }
1801 void TemplateTable::wide_ret() {
1802 transition(vtos, vtos);
1803 locals_index_wide(rbx);
1804 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1805 __ profile_ret(rbx, rcx);
1806 __ get_method(rax);
1807 __ movptr(rsi, Address(rax, Method::const_offset()));
1808 __ lea(rsi, Address(rsi, rbx, Address::times_1, ConstMethod::codes_offset()));
1809 __ dispatch_next(vtos);
1810 }
1813 void TemplateTable::tableswitch() {
1814 Label default_case, continue_execution;
1815 transition(itos, vtos);
1816 // align rsi
1817 __ lea(rbx, at_bcp(wordSize));
1818 __ andptr(rbx, -wordSize);
1819 // load lo & hi
1820 __ movl(rcx, Address(rbx, 1 * wordSize));
1821 __ movl(rdx, Address(rbx, 2 * wordSize));
1822 __ bswapl(rcx);
1823 __ bswapl(rdx);
1824 // check against lo & hi
1825 __ cmpl(rax, rcx);
1826 __ jccb(Assembler::less, default_case);
1827 __ cmpl(rax, rdx);
1828 __ jccb(Assembler::greater, default_case);
1829 // lookup dispatch offset
1830 __ subl(rax, rcx);
1831 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1832 __ profile_switch_case(rax, rbx, rcx);
1833 // continue execution
1834 __ bind(continue_execution);
1835 __ bswapl(rdx);
1836 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1837 __ addptr(rsi, rdx);
1838 __ dispatch_only(vtos);
1839 // handle default
1840 __ bind(default_case);
1841 __ profile_switch_default(rax);
1842 __ movl(rdx, Address(rbx, 0));
1843 __ jmp(continue_execution);
1844 }
1847 void TemplateTable::lookupswitch() {
1848 transition(itos, itos);
1849 __ stop("lookupswitch bytecode should have been rewritten");
1850 }
1853 void TemplateTable::fast_linearswitch() {
1854 transition(itos, vtos);
1855 Label loop_entry, loop, found, continue_execution;
1856 // bswapl rax, so we can avoid bswapping the table entries
1857 __ bswapl(rax);
1858 // align rsi
1859 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1860 __ andptr(rbx, -wordSize);
1861 // set counter
1862 __ movl(rcx, Address(rbx, wordSize));
1863 __ bswapl(rcx);
1864 __ jmpb(loop_entry);
1865 // table search
1866 __ bind(loop);
1867 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1868 __ jccb(Assembler::equal, found);
1869 __ bind(loop_entry);
1870 __ decrementl(rcx);
1871 __ jcc(Assembler::greaterEqual, loop);
1872 // default case
1873 __ profile_switch_default(rax);
1874 __ movl(rdx, Address(rbx, 0));
1875 __ jmpb(continue_execution);
1876 // entry found -> get offset
1877 __ bind(found);
1878 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1879 __ profile_switch_case(rcx, rax, rbx);
1880 // continue execution
1881 __ bind(continue_execution);
1882 __ bswapl(rdx);
1883 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1884 __ addptr(rsi, rdx);
1885 __ dispatch_only(vtos);
1886 }
1889 void TemplateTable::fast_binaryswitch() {
1890 transition(itos, vtos);
1891 // Implementation using the following core algorithm:
1892 //
1893 // int binary_search(int key, LookupswitchPair* array, int n) {
1894 // // Binary search according to "Methodik des Programmierens" by
1895 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1896 // int i = 0;
1897 // int j = n;
1898 // while (i+1 < j) {
1899 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1900 // // with Q: for all i: 0 <= i < n: key < a[i]
1901 // // where a stands for the array and assuming that the (inexisting)
1902 // // element a[n] is infinitely big.
1903 // int h = (i + j) >> 1;
1904 // // i < h < j
1905 // if (key < array[h].fast_match()) {
1906 // j = h;
1907 // } else {
1908 // i = h;
1909 // }
1910 // }
1911 // // R: a[i] <= key < a[i+1] or Q
1912 // // (i.e., if key is within array, i is the correct index)
1913 // return i;
1914 // }
1916 // register allocation
1917 const Register key = rax; // already set (tosca)
1918 const Register array = rbx;
1919 const Register i = rcx;
1920 const Register j = rdx;
1921 const Register h = rdi; // needs to be restored
1922 const Register temp = rsi;
1923 // setup array
1924 __ save_bcp();
1926 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1927 __ andptr(array, -wordSize);
1928 // initialize i & j
1929 __ xorl(i, i); // i = 0;
1930 __ movl(j, Address(array, -wordSize)); // j = length(array);
1931 // Convert j into native byteordering
1932 __ bswapl(j);
1933 // and start
1934 Label entry;
1935 __ jmp(entry);
1937 // binary search loop
1938 { Label loop;
1939 __ bind(loop);
1940 // int h = (i + j) >> 1;
1941 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1942 __ sarl(h, 1); // h = (i + j) >> 1;
1943 // if (key < array[h].fast_match()) {
1944 // j = h;
1945 // } else {
1946 // i = h;
1947 // }
1948 // Convert array[h].match to native byte-ordering before compare
1949 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1950 __ bswapl(temp);
1951 __ cmpl(key, temp);
1952 // j = h if (key < array[h].fast_match())
1953 __ cmov32(Assembler::less , j, h);
1954 // i = h if (key >= array[h].fast_match())
1955 __ cmov32(Assembler::greaterEqual, i, h);
1956 // while (i+1 < j)
1957 __ bind(entry);
1958 __ leal(h, Address(i, 1)); // i+1
1959 __ cmpl(h, j); // i+1 < j
1960 __ jcc(Assembler::less, loop);
1961 }
1963 // end of binary search, result index is i (must check again!)
1964 Label default_case;
1965 // Convert array[i].match to native byte-ordering before compare
1966 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1967 __ bswapl(temp);
1968 __ cmpl(key, temp);
1969 __ jcc(Assembler::notEqual, default_case);
1971 // entry found -> j = offset
1972 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1973 __ profile_switch_case(i, key, array);
1974 __ bswapl(j);
1975 LP64_ONLY(__ movslq(j, j));
1976 __ restore_bcp();
1977 __ restore_locals(); // restore rdi
1978 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1980 __ addptr(rsi, j);
1981 __ dispatch_only(vtos);
1983 // default case -> j = default offset
1984 __ bind(default_case);
1985 __ profile_switch_default(i);
1986 __ movl(j, Address(array, -2*wordSize));
1987 __ bswapl(j);
1988 LP64_ONLY(__ movslq(j, j));
1989 __ restore_bcp();
1990 __ restore_locals(); // restore rdi
1991 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1992 __ addptr(rsi, j);
1993 __ dispatch_only(vtos);
1994 }
1997 void TemplateTable::_return(TosState state) {
1998 transition(state, state);
1999 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2001 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2002 assert(state == vtos, "only valid state");
2003 __ movptr(rax, aaddress(0));
2004 __ load_klass(rdi, rax);
2005 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2006 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2007 Label skip_register_finalizer;
2008 __ jcc(Assembler::zero, skip_register_finalizer);
2010 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2012 __ bind(skip_register_finalizer);
2013 }
2015 __ remove_activation(state, rsi);
2016 __ jmp(rsi);
2017 }
2020 // ----------------------------------------------------------------------------
2021 // Volatile variables demand their effects be made known to all CPU's in
2022 // order. Store buffers on most chips allow reads & writes to reorder; the
2023 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2024 // memory barrier (i.e., it's not sufficient that the interpreter does not
2025 // reorder volatile references, the hardware also must not reorder them).
2026 //
2027 // According to the new Java Memory Model (JMM):
2028 // (1) All volatiles are serialized wrt to each other.
2029 // ALSO reads & writes act as aquire & release, so:
2030 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2031 // the read float up to before the read. It's OK for non-volatile memory refs
2032 // that happen before the volatile read to float down below it.
2033 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2034 // that happen BEFORE the write float down to after the write. It's OK for
2035 // non-volatile memory refs that happen after the volatile write to float up
2036 // before it.
2037 //
2038 // We only put in barriers around volatile refs (they are expensive), not
2039 // _between_ memory refs (that would require us to track the flavor of the
2040 // previous memory refs). Requirements (2) and (3) require some barriers
2041 // before volatile stores and after volatile loads. These nearly cover
2042 // requirement (1) but miss the volatile-store-volatile-load case. This final
2043 // case is placed after volatile-stores although it could just as well go
2044 // before volatile-loads.
2045 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2046 // Helper function to insert a is-volatile test and memory barrier
2047 if( !os::is_MP() ) return; // Not needed on single CPU
2048 __ membar(order_constraint);
2049 }
2051 void TemplateTable::resolve_cache_and_index(int byte_no,
2052 Register Rcache,
2053 Register index,
2054 size_t index_size) {
2055 const Register temp = rbx;
2056 assert_different_registers(Rcache, index, temp);
2058 Label resolved;
2059 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2060 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2061 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2062 __ jcc(Assembler::equal, resolved);
2064 // resolve first time through
2065 address entry;
2066 switch (bytecode()) {
2067 case Bytecodes::_getstatic : // fall through
2068 case Bytecodes::_putstatic : // fall through
2069 case Bytecodes::_getfield : // fall through
2070 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2071 case Bytecodes::_invokevirtual : // fall through
2072 case Bytecodes::_invokespecial : // fall through
2073 case Bytecodes::_invokestatic : // fall through
2074 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2075 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2076 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2077 default:
2078 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2079 break;
2080 }
2081 __ movl(temp, (int)bytecode());
2082 __ call_VM(noreg, entry, temp);
2083 // Update registers with resolved info
2084 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2085 __ bind(resolved);
2086 }
2089 // The cache and index registers must be set before call
2090 void TemplateTable::load_field_cp_cache_entry(Register obj,
2091 Register cache,
2092 Register index,
2093 Register off,
2094 Register flags,
2095 bool is_static = false) {
2096 assert_different_registers(cache, index, flags, off);
2098 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2099 // Field offset
2100 __ movptr(off, Address(cache, index, Address::times_ptr,
2101 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2102 // Flags
2103 __ movl(flags, Address(cache, index, Address::times_ptr,
2104 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2106 // klass overwrite register
2107 if (is_static) {
2108 __ movptr(obj, Address(cache, index, Address::times_ptr,
2109 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2110 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2111 __ movptr(obj, Address(obj, mirror_offset));
2112 }
2113 }
2115 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2116 Register method,
2117 Register itable_index,
2118 Register flags,
2119 bool is_invokevirtual,
2120 bool is_invokevfinal, /*unused*/
2121 bool is_invokedynamic) {
2122 // setup registers
2123 const Register cache = rcx;
2124 const Register index = rdx;
2125 assert_different_registers(method, flags);
2126 assert_different_registers(method, cache, index);
2127 assert_different_registers(itable_index, flags);
2128 assert_different_registers(itable_index, cache, index);
2129 // determine constant pool cache field offsets
2130 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2131 const int method_offset = in_bytes(
2132 ConstantPoolCache::base_offset() +
2133 ((byte_no == f2_byte)
2134 ? ConstantPoolCacheEntry::f2_offset()
2135 : ConstantPoolCacheEntry::f1_offset()));
2136 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2137 ConstantPoolCacheEntry::flags_offset());
2138 // access constant pool cache fields
2139 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2140 ConstantPoolCacheEntry::f2_offset());
2142 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2143 resolve_cache_and_index(byte_no, cache, index, index_size);
2144 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2146 if (itable_index != noreg) {
2147 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2148 }
2149 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2150 }
2153 // The registers cache and index expected to be set before call.
2154 // Correct values of the cache and index registers are preserved.
2155 void TemplateTable::jvmti_post_field_access(Register cache,
2156 Register index,
2157 bool is_static,
2158 bool has_tos) {
2159 if (JvmtiExport::can_post_field_access()) {
2160 // Check to see if a field access watch has been set before we take
2161 // the time to call into the VM.
2162 Label L1;
2163 assert_different_registers(cache, index, rax);
2164 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2165 __ testl(rax,rax);
2166 __ jcc(Assembler::zero, L1);
2168 // cache entry pointer
2169 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2170 __ shll(index, LogBytesPerWord);
2171 __ addptr(cache, index);
2172 if (is_static) {
2173 __ xorptr(rax, rax); // NULL object reference
2174 } else {
2175 __ pop(atos); // Get the object
2176 __ verify_oop(rax);
2177 __ push(atos); // Restore stack state
2178 }
2179 // rax,: object pointer or NULL
2180 // cache: cache entry pointer
2181 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2182 rax, cache);
2183 __ get_cache_and_index_at_bcp(cache, index, 1);
2184 __ bind(L1);
2185 }
2186 }
2188 void TemplateTable::pop_and_check_object(Register r) {
2189 __ pop_ptr(r);
2190 __ null_check(r); // for field access must check obj.
2191 __ verify_oop(r);
2192 }
2194 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2195 transition(vtos, vtos);
2197 const Register cache = rcx;
2198 const Register index = rdx;
2199 const Register obj = rcx;
2200 const Register off = rbx;
2201 const Register flags = rax;
2203 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2204 jvmti_post_field_access(cache, index, is_static, false);
2205 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2207 if (!is_static) pop_and_check_object(obj);
2209 const Address lo(obj, off, Address::times_1, 0*wordSize);
2210 const Address hi(obj, off, Address::times_1, 1*wordSize);
2212 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2214 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2215 assert(btos == 0, "change code, btos != 0");
2216 // btos
2217 __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
2218 __ jcc(Assembler::notZero, notByte);
2220 __ load_signed_byte(rax, lo );
2221 __ push(btos);
2222 // Rewrite bytecode to be faster
2223 if (!is_static) {
2224 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2225 }
2226 __ jmp(Done);
2228 __ bind(notByte);
2229 // itos
2230 __ cmpl(flags, itos );
2231 __ jcc(Assembler::notEqual, notInt);
2233 __ movl(rax, lo );
2234 __ push(itos);
2235 // Rewrite bytecode to be faster
2236 if (!is_static) {
2237 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2238 }
2239 __ jmp(Done);
2241 __ bind(notInt);
2242 // atos
2243 __ cmpl(flags, atos );
2244 __ jcc(Assembler::notEqual, notObj);
2246 __ movl(rax, lo );
2247 __ push(atos);
2248 if (!is_static) {
2249 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2250 }
2251 __ jmp(Done);
2253 __ bind(notObj);
2254 // ctos
2255 __ cmpl(flags, ctos );
2256 __ jcc(Assembler::notEqual, notChar);
2258 __ load_unsigned_short(rax, lo );
2259 __ push(ctos);
2260 if (!is_static) {
2261 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2262 }
2263 __ jmp(Done);
2265 __ bind(notChar);
2266 // stos
2267 __ cmpl(flags, stos );
2268 __ jcc(Assembler::notEqual, notShort);
2270 __ load_signed_short(rax, lo );
2271 __ push(stos);
2272 if (!is_static) {
2273 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2274 }
2275 __ jmp(Done);
2277 __ bind(notShort);
2278 // ltos
2279 __ cmpl(flags, ltos );
2280 __ jcc(Assembler::notEqual, notLong);
2282 // Generate code as if volatile. There just aren't enough registers to
2283 // save that information and this code is faster than the test.
2284 __ fild_d(lo); // Must load atomically
2285 __ subptr(rsp,2*wordSize); // Make space for store
2286 __ fistp_d(Address(rsp,0));
2287 __ pop(rax);
2288 __ pop(rdx);
2290 __ push(ltos);
2291 // Don't rewrite to _fast_lgetfield for potential volatile case.
2292 __ jmp(Done);
2294 __ bind(notLong);
2295 // ftos
2296 __ cmpl(flags, ftos );
2297 __ jcc(Assembler::notEqual, notFloat);
2299 __ fld_s(lo);
2300 __ push(ftos);
2301 if (!is_static) {
2302 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2303 }
2304 __ jmp(Done);
2306 __ bind(notFloat);
2307 // dtos
2308 __ cmpl(flags, dtos );
2309 __ jcc(Assembler::notEqual, notDouble);
2311 __ fld_d(lo);
2312 __ push(dtos);
2313 if (!is_static) {
2314 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2315 }
2316 __ jmpb(Done);
2318 __ bind(notDouble);
2320 __ stop("Bad state");
2322 __ bind(Done);
2323 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2324 // volatile_barrier( );
2325 }
2328 void TemplateTable::getfield(int byte_no) {
2329 getfield_or_static(byte_no, false);
2330 }
2333 void TemplateTable::getstatic(int byte_no) {
2334 getfield_or_static(byte_no, true);
2335 }
2337 // The registers cache and index expected to be set before call.
2338 // The function may destroy various registers, just not the cache and index registers.
2339 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2341 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2343 if (JvmtiExport::can_post_field_modification()) {
2344 // Check to see if a field modification watch has been set before we take
2345 // the time to call into the VM.
2346 Label L1;
2347 assert_different_registers(cache, index, rax);
2348 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2349 __ testl(rax, rax);
2350 __ jcc(Assembler::zero, L1);
2352 // The cache and index registers have been already set.
2353 // This allows to eliminate this call but the cache and index
2354 // registers have to be correspondingly used after this line.
2355 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2357 if (is_static) {
2358 // Life is simple. Null out the object pointer.
2359 __ xorptr(rbx, rbx);
2360 } else {
2361 // Life is harder. The stack holds the value on top, followed by the object.
2362 // We don't know the size of the value, though; it could be one or two words
2363 // depending on its type. As a result, we must find the type to determine where
2364 // the object is.
2365 Label two_word, valsize_known;
2366 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2367 ConstantPoolCacheEntry::flags_offset())));
2368 __ mov(rbx, rsp);
2369 __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift);
2370 // Make sure we don't need to mask rcx after the above shift
2371 ConstantPoolCacheEntry::verify_tos_state_shift();
2372 __ cmpl(rcx, ltos);
2373 __ jccb(Assembler::equal, two_word);
2374 __ cmpl(rcx, dtos);
2375 __ jccb(Assembler::equal, two_word);
2376 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2377 __ jmpb(valsize_known);
2379 __ bind(two_word);
2380 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2382 __ bind(valsize_known);
2383 // setup object pointer
2384 __ movptr(rbx, Address(rbx, 0));
2385 }
2386 // cache entry pointer
2387 __ addptr(rax, in_bytes(cp_base_offset));
2388 __ shll(rdx, LogBytesPerWord);
2389 __ addptr(rax, rdx);
2390 // object (tos)
2391 __ mov(rcx, rsp);
2392 // rbx,: object pointer set up above (NULL if static)
2393 // rax,: cache entry pointer
2394 // rcx: jvalue object on the stack
2395 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2396 rbx, rax, rcx);
2397 __ get_cache_and_index_at_bcp(cache, index, 1);
2398 __ bind(L1);
2399 }
2400 }
2403 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2404 transition(vtos, vtos);
2406 const Register cache = rcx;
2407 const Register index = rdx;
2408 const Register obj = rcx;
2409 const Register off = rbx;
2410 const Register flags = rax;
2412 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2413 jvmti_post_field_mod(cache, index, is_static);
2414 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2416 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2417 // volatile_barrier( );
2419 Label notVolatile, Done;
2420 __ movl(rdx, flags);
2421 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2422 __ andl(rdx, 0x1);
2424 // field addresses
2425 const Address lo(obj, off, Address::times_1, 0*wordSize);
2426 const Address hi(obj, off, Address::times_1, 1*wordSize);
2428 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2430 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2431 assert(btos == 0, "change code, btos != 0");
2432 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2433 __ jcc(Assembler::notZero, notByte);
2435 // btos
2436 {
2437 __ pop(btos);
2438 if (!is_static) pop_and_check_object(obj);
2439 __ movb(lo, rax);
2440 if (!is_static) {
2441 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2442 }
2443 __ jmp(Done);
2444 }
2446 __ bind(notByte);
2447 __ cmpl(flags, itos);
2448 __ jcc(Assembler::notEqual, notInt);
2450 // itos
2451 {
2452 __ pop(itos);
2453 if (!is_static) pop_and_check_object(obj);
2454 __ movl(lo, rax);
2455 if (!is_static) {
2456 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2457 }
2458 __ jmp(Done);
2459 }
2461 __ bind(notInt);
2462 __ cmpl(flags, atos);
2463 __ jcc(Assembler::notEqual, notObj);
2465 // atos
2466 {
2467 __ pop(atos);
2468 if (!is_static) pop_and_check_object(obj);
2469 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2470 if (!is_static) {
2471 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2472 }
2473 __ jmp(Done);
2474 }
2476 __ bind(notObj);
2477 __ cmpl(flags, ctos);
2478 __ jcc(Assembler::notEqual, notChar);
2480 // ctos
2481 {
2482 __ pop(ctos);
2483 if (!is_static) pop_and_check_object(obj);
2484 __ movw(lo, rax);
2485 if (!is_static) {
2486 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2487 }
2488 __ jmp(Done);
2489 }
2491 __ bind(notChar);
2492 __ cmpl(flags, stos);
2493 __ jcc(Assembler::notEqual, notShort);
2495 // stos
2496 {
2497 __ pop(stos);
2498 if (!is_static) pop_and_check_object(obj);
2499 __ movw(lo, rax);
2500 if (!is_static) {
2501 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2502 }
2503 __ jmp(Done);
2504 }
2506 __ bind(notShort);
2507 __ cmpl(flags, ltos);
2508 __ jcc(Assembler::notEqual, notLong);
2510 // ltos
2511 {
2512 Label notVolatileLong;
2513 __ testl(rdx, rdx);
2514 __ jcc(Assembler::zero, notVolatileLong);
2516 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2517 if (!is_static) pop_and_check_object(obj);
2519 // Replace with real volatile test
2520 __ push(rdx);
2521 __ push(rax); // Must update atomically with FIST
2522 __ fild_d(Address(rsp,0)); // So load into FPU register
2523 __ fistp_d(lo); // and put into memory atomically
2524 __ addptr(rsp, 2*wordSize);
2525 // volatile_barrier();
2526 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2527 Assembler::StoreStore));
2528 // Don't rewrite volatile version
2529 __ jmp(notVolatile);
2531 __ bind(notVolatileLong);
2533 __ pop(ltos); // overwrites rdx
2534 if (!is_static) pop_and_check_object(obj);
2535 NOT_LP64(__ movptr(hi, rdx));
2536 __ movptr(lo, rax);
2537 if (!is_static) {
2538 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2539 }
2540 __ jmp(notVolatile);
2541 }
2543 __ bind(notLong);
2544 __ cmpl(flags, ftos);
2545 __ jcc(Assembler::notEqual, notFloat);
2547 // ftos
2548 {
2549 __ pop(ftos);
2550 if (!is_static) pop_and_check_object(obj);
2551 __ fstp_s(lo);
2552 if (!is_static) {
2553 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2554 }
2555 __ jmp(Done);
2556 }
2558 __ bind(notFloat);
2559 #ifdef ASSERT
2560 __ cmpl(flags, dtos);
2561 __ jcc(Assembler::notEqual, notDouble);
2562 #endif
2564 // dtos
2565 {
2566 __ pop(dtos);
2567 if (!is_static) pop_and_check_object(obj);
2568 __ fstp_d(lo);
2569 if (!is_static) {
2570 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2571 }
2572 __ jmp(Done);
2573 }
2575 #ifdef ASSERT
2576 __ bind(notDouble);
2577 __ stop("Bad state");
2578 #endif
2580 __ bind(Done);
2582 // Check for volatile store
2583 __ testl(rdx, rdx);
2584 __ jcc(Assembler::zero, notVolatile);
2585 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2586 Assembler::StoreStore));
2587 __ bind(notVolatile);
2588 }
2591 void TemplateTable::putfield(int byte_no) {
2592 putfield_or_static(byte_no, false);
2593 }
2596 void TemplateTable::putstatic(int byte_no) {
2597 putfield_or_static(byte_no, true);
2598 }
2600 void TemplateTable::jvmti_post_fast_field_mod() {
2601 if (JvmtiExport::can_post_field_modification()) {
2602 // Check to see if a field modification watch has been set before we take
2603 // the time to call into the VM.
2604 Label L2;
2605 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2606 __ testl(rcx,rcx);
2607 __ jcc(Assembler::zero, L2);
2608 __ pop_ptr(rbx); // copy the object pointer from tos
2609 __ verify_oop(rbx);
2610 __ push_ptr(rbx); // put the object pointer back on tos
2612 // Save tos values before call_VM() clobbers them. Since we have
2613 // to do it for every data type, we use the saved values as the
2614 // jvalue object.
2615 switch (bytecode()) { // load values into the jvalue object
2616 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2617 case Bytecodes::_fast_bputfield: // fall through
2618 case Bytecodes::_fast_sputfield: // fall through
2619 case Bytecodes::_fast_cputfield: // fall through
2620 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2621 case Bytecodes::_fast_dputfield: __ push_d(); break;
2622 case Bytecodes::_fast_fputfield: __ push_f(); break;
2623 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2625 default:
2626 ShouldNotReachHere();
2627 }
2628 __ mov(rcx, rsp); // points to jvalue on the stack
2629 // access constant pool cache entry
2630 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2631 __ verify_oop(rbx);
2632 // rbx,: object pointer copied above
2633 // rax,: cache entry pointer
2634 // rcx: jvalue object on the stack
2635 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2637 switch (bytecode()) { // restore tos values
2638 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2639 case Bytecodes::_fast_bputfield: // fall through
2640 case Bytecodes::_fast_sputfield: // fall through
2641 case Bytecodes::_fast_cputfield: // fall through
2642 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2643 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2644 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2645 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2646 }
2647 __ bind(L2);
2648 }
2649 }
2651 void TemplateTable::fast_storefield(TosState state) {
2652 transition(state, vtos);
2654 ByteSize base = ConstantPoolCache::base_offset();
2656 jvmti_post_fast_field_mod();
2658 // access constant pool cache
2659 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2661 // test for volatile with rdx but rdx is tos register for lputfield.
2662 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2663 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2664 ConstantPoolCacheEntry::flags_offset())));
2666 // replace index with field offset from cache entry
2667 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2669 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2670 // volatile_barrier( );
2672 Label notVolatile, Done;
2673 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2674 __ andl(rdx, 0x1);
2675 // Check for volatile store
2676 __ testl(rdx, rdx);
2677 __ jcc(Assembler::zero, notVolatile);
2679 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2681 // Get object from stack
2682 pop_and_check_object(rcx);
2684 // field addresses
2685 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2686 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2688 // access field
2689 switch (bytecode()) {
2690 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2691 case Bytecodes::_fast_sputfield: // fall through
2692 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2693 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2694 case Bytecodes::_fast_lputfield:
2695 NOT_LP64(__ movptr(hi, rdx));
2696 __ movptr(lo, rax);
2697 break;
2698 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2699 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2700 case Bytecodes::_fast_aputfield: {
2701 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2702 break;
2703 }
2704 default:
2705 ShouldNotReachHere();
2706 }
2708 Label done;
2709 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2710 Assembler::StoreStore));
2711 // Barriers are so large that short branch doesn't reach!
2712 __ jmp(done);
2714 // Same code as above, but don't need rdx to test for volatile.
2715 __ bind(notVolatile);
2717 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2719 // Get object from stack
2720 pop_and_check_object(rcx);
2722 // access field
2723 switch (bytecode()) {
2724 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2725 case Bytecodes::_fast_sputfield: // fall through
2726 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2727 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2728 case Bytecodes::_fast_lputfield:
2729 NOT_LP64(__ movptr(hi, rdx));
2730 __ movptr(lo, rax);
2731 break;
2732 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2733 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2734 case Bytecodes::_fast_aputfield: {
2735 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2736 break;
2737 }
2738 default:
2739 ShouldNotReachHere();
2740 }
2741 __ bind(done);
2742 }
2745 void TemplateTable::fast_accessfield(TosState state) {
2746 transition(atos, state);
2748 // do the JVMTI work here to avoid disturbing the register state below
2749 if (JvmtiExport::can_post_field_access()) {
2750 // Check to see if a field access watch has been set before we take
2751 // the time to call into the VM.
2752 Label L1;
2753 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2754 __ testl(rcx,rcx);
2755 __ jcc(Assembler::zero, L1);
2756 // access constant pool cache entry
2757 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2758 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2759 __ verify_oop(rax);
2760 // rax,: object pointer copied above
2761 // rcx: cache entry pointer
2762 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2763 __ pop_ptr(rax); // restore object pointer
2764 __ bind(L1);
2765 }
2767 // access constant pool cache
2768 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2769 // replace index with field offset from cache entry
2770 __ movptr(rbx, Address(rcx,
2771 rbx,
2772 Address::times_ptr,
2773 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2776 // rax,: object
2777 __ verify_oop(rax);
2778 __ null_check(rax);
2779 // field addresses
2780 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2781 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2783 // access field
2784 switch (bytecode()) {
2785 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2786 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2787 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2788 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2789 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2790 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2791 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2792 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2793 default:
2794 ShouldNotReachHere();
2795 }
2797 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2798 // volatile_barrier( );
2799 }
2801 void TemplateTable::fast_xaccess(TosState state) {
2802 transition(vtos, state);
2803 // get receiver
2804 __ movptr(rax, aaddress(0));
2805 // access constant pool cache
2806 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2807 __ movptr(rbx, Address(rcx,
2808 rdx,
2809 Address::times_ptr,
2810 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2811 // make sure exception is reported in correct bcp range (getfield is next instruction)
2812 __ increment(rsi);
2813 __ null_check(rax);
2814 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2815 if (state == itos) {
2816 __ movl(rax, lo);
2817 } else if (state == atos) {
2818 __ movptr(rax, lo);
2819 __ verify_oop(rax);
2820 } else if (state == ftos) {
2821 __ fld_s(lo);
2822 } else {
2823 ShouldNotReachHere();
2824 }
2825 __ decrement(rsi);
2826 }
2830 //----------------------------------------------------------------------------------------------------
2831 // Calls
2833 void TemplateTable::count_calls(Register method, Register temp) {
2834 // implemented elsewhere
2835 ShouldNotReachHere();
2836 }
2839 void TemplateTable::prepare_invoke(int byte_no,
2840 Register method, // linked method (or i-klass)
2841 Register index, // itable index, MethodType, etc.
2842 Register recv, // if caller wants to see it
2843 Register flags // if caller wants to test it
2844 ) {
2845 // determine flags
2846 const Bytecodes::Code code = bytecode();
2847 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2848 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2849 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2850 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2851 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2852 const bool load_receiver = (recv != noreg);
2853 const bool save_flags = (flags != noreg);
2854 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2855 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2856 assert(flags == noreg || flags == rdx, "");
2857 assert(recv == noreg || recv == rcx, "");
2859 // setup registers & access constant pool cache
2860 if (recv == noreg) recv = rcx;
2861 if (flags == noreg) flags = rdx;
2862 assert_different_registers(method, index, recv, flags);
2864 // save 'interpreter return address'
2865 __ save_bcp();
2867 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2869 // maybe push appendix to arguments (just before return address)
2870 if (is_invokedynamic || is_invokehandle) {
2871 Label L_no_push;
2872 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2873 __ jccb(Assembler::zero, L_no_push);
2874 // Push the appendix as a trailing parameter.
2875 // This must be done before we get the receiver,
2876 // since the parameter_size includes it.
2877 __ push(rbx);
2878 __ mov(rbx, index);
2879 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2880 __ load_resolved_reference_at_index(index, rbx);
2881 __ pop(rbx);
2882 __ push(index); // push appendix (MethodType, CallSite, etc.)
2883 __ bind(L_no_push);
2884 }
2886 // load receiver if needed (note: no return address pushed yet)
2887 if (load_receiver) {
2888 __ movl(recv, flags);
2889 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2890 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2891 const int receiver_is_at_end = -1; // back off one slot to get receiver
2892 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2893 __ movptr(recv, recv_addr);
2894 __ verify_oop(recv);
2895 }
2897 if (save_flags) {
2898 __ mov(rsi, flags);
2899 }
2901 // compute return type
2902 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2903 // Make sure we don't need to mask flags after the above shift
2904 ConstantPoolCacheEntry::verify_tos_state_shift();
2905 // load return address
2906 {
2907 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2908 (address)Interpreter::return_5_addrs_by_index_table() :
2909 (address)Interpreter::return_3_addrs_by_index_table();
2910 ExternalAddress table(table_addr);
2911 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2912 }
2914 // push return address
2915 __ push(flags);
2917 // Restore flags value from the constant pool cache, and restore rsi
2918 // for later null checks. rsi is the bytecode pointer
2919 if (save_flags) {
2920 __ mov(flags, rsi);
2921 __ restore_bcp();
2922 }
2923 }
2926 void TemplateTable::invokevirtual_helper(Register index,
2927 Register recv,
2928 Register flags) {
2929 // Uses temporary registers rax, rdx
2930 assert_different_registers(index, recv, rax, rdx);
2931 assert(index == rbx, "");
2932 assert(recv == rcx, "");
2934 // Test for an invoke of a final method
2935 Label notFinal;
2936 __ movl(rax, flags);
2937 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
2938 __ jcc(Assembler::zero, notFinal);
2940 const Register method = index; // method must be rbx
2941 assert(method == rbx,
2942 "Method* must be rbx for interpreter calling convention");
2944 // do the call - the index is actually the method to call
2945 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
2947 // It's final, need a null check here!
2948 __ null_check(recv);
2950 // profile this call
2951 __ profile_final_call(rax);
2953 __ jump_from_interpreted(method, rax);
2955 __ bind(notFinal);
2957 // get receiver klass
2958 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2959 __ load_klass(rax, recv);
2961 // profile this call
2962 __ profile_virtual_call(rax, rdi, rdx);
2964 // get target Method* & entry point
2965 __ lookup_virtual_method(rax, index, method);
2966 __ jump_from_interpreted(method, rdx);
2967 }
2970 void TemplateTable::invokevirtual(int byte_no) {
2971 transition(vtos, vtos);
2972 assert(byte_no == f2_byte, "use this argument");
2973 prepare_invoke(byte_no,
2974 rbx, // method or vtable index
2975 noreg, // unused itable index
2976 rcx, rdx); // recv, flags
2978 // rbx: index
2979 // rcx: receiver
2980 // rdx: flags
2982 invokevirtual_helper(rbx, rcx, rdx);
2983 }
2986 void TemplateTable::invokespecial(int byte_no) {
2987 transition(vtos, vtos);
2988 assert(byte_no == f1_byte, "use this argument");
2989 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
2990 rcx); // get receiver also for null check
2991 __ verify_oop(rcx);
2992 __ null_check(rcx);
2993 // do the call
2994 __ profile_call(rax);
2995 __ jump_from_interpreted(rbx, rax);
2996 }
2999 void TemplateTable::invokestatic(int byte_no) {
3000 transition(vtos, vtos);
3001 assert(byte_no == f1_byte, "use this argument");
3002 prepare_invoke(byte_no, rbx); // get f1 Method*
3003 // do the call
3004 __ profile_call(rax);
3005 __ jump_from_interpreted(rbx, rax);
3006 }
3009 void TemplateTable::fast_invokevfinal(int byte_no) {
3010 transition(vtos, vtos);
3011 assert(byte_no == f2_byte, "use this argument");
3012 __ stop("fast_invokevfinal not used on x86");
3013 }
3016 void TemplateTable::invokeinterface(int byte_no) {
3017 transition(vtos, vtos);
3018 assert(byte_no == f1_byte, "use this argument");
3019 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3020 rcx, rdx); // recv, flags
3022 // rax: interface klass (from f1)
3023 // rbx: itable index (from f2)
3024 // rcx: receiver
3025 // rdx: flags
3027 // Special case of invokeinterface called for virtual method of
3028 // java.lang.Object. See cpCacheOop.cpp for details.
3029 // This code isn't produced by javac, but could be produced by
3030 // another compliant java compiler.
3031 Label notMethod;
3032 __ movl(rdi, rdx);
3033 __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3034 __ jcc(Assembler::zero, notMethod);
3036 invokevirtual_helper(rbx, rcx, rdx);
3037 __ bind(notMethod);
3039 // Get receiver klass into rdx - also a null check
3040 __ restore_locals(); // restore rdi
3041 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3042 __ load_klass(rdx, rcx);
3044 // profile this call
3045 __ profile_virtual_call(rdx, rsi, rdi);
3047 Label no_such_interface, no_such_method;
3049 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3050 rdx, rax, rbx,
3051 // outputs: method, scan temp. reg
3052 rbx, rsi,
3053 no_such_interface);
3055 // rbx: Method* to call
3056 // rcx: receiver
3057 // Check for abstract method error
3058 // Note: This should be done more efficiently via a throw_abstract_method_error
3059 // interpreter entry point and a conditional jump to it in case of a null
3060 // method.
3061 __ testptr(rbx, rbx);
3062 __ jcc(Assembler::zero, no_such_method);
3064 // do the call
3065 // rcx: receiver
3066 // rbx,: Method*
3067 __ jump_from_interpreted(rbx, rdx);
3068 __ should_not_reach_here();
3070 // exception handling code follows...
3071 // note: must restore interpreter registers to canonical
3072 // state for exception handling to work correctly!
3074 __ bind(no_such_method);
3075 // throw exception
3076 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3077 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3078 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3079 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3080 // the call_VM checks for exception, so we should never return here.
3081 __ should_not_reach_here();
3083 __ bind(no_such_interface);
3084 // throw exception
3085 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3086 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3087 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3088 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3089 InterpreterRuntime::throw_IncompatibleClassChangeError));
3090 // the call_VM checks for exception, so we should never return here.
3091 __ should_not_reach_here();
3092 }
3094 void TemplateTable::invokehandle(int byte_no) {
3095 transition(vtos, vtos);
3096 assert(byte_no == f1_byte, "use this argument");
3097 const Register rbx_method = rbx;
3098 const Register rax_mtype = rax;
3099 const Register rcx_recv = rcx;
3100 const Register rdx_flags = rdx;
3102 if (!EnableInvokeDynamic) {
3103 // rewriter does not generate this bytecode
3104 __ should_not_reach_here();
3105 return;
3106 }
3108 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3109 __ verify_method_ptr(rbx_method);
3110 __ verify_oop(rcx_recv);
3111 __ null_check(rcx_recv);
3113 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3114 // rbx: MH.invokeExact_MT method (from f2)
3116 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3118 // FIXME: profile the LambdaForm also
3119 __ profile_final_call(rax);
3121 __ jump_from_interpreted(rbx_method, rdx);
3122 }
3125 void TemplateTable::invokedynamic(int byte_no) {
3126 transition(vtos, vtos);
3127 assert(byte_no == f1_byte, "use this argument");
3129 if (!EnableInvokeDynamic) {
3130 // We should not encounter this bytecode if !EnableInvokeDynamic.
3131 // The verifier will stop it. However, if we get past the verifier,
3132 // this will stop the thread in a reasonable way, without crashing the JVM.
3133 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3134 InterpreterRuntime::throw_IncompatibleClassChangeError));
3135 // the call_VM checks for exception, so we should never return here.
3136 __ should_not_reach_here();
3137 return;
3138 }
3140 const Register rbx_method = rbx;
3141 const Register rax_callsite = rax;
3143 prepare_invoke(byte_no, rbx_method, rax_callsite);
3145 // rax: CallSite object (from cpool->resolved_references[f1])
3146 // rbx: MH.linkToCallSite method (from f2)
3148 // Note: rax_callsite is already pushed by prepare_invoke
3150 // %%% should make a type profile for any invokedynamic that takes a ref argument
3151 // profile this call
3152 __ profile_call(rsi);
3154 __ verify_oop(rax_callsite);
3156 __ jump_from_interpreted(rbx_method, rdx);
3157 }
3159 //----------------------------------------------------------------------------------------------------
3160 // Allocation
3162 void TemplateTable::_new() {
3163 transition(vtos, atos);
3164 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3165 Label slow_case;
3166 Label slow_case_no_pop;
3167 Label done;
3168 Label initialize_header;
3169 Label initialize_object; // including clearing the fields
3170 Label allocate_shared;
3172 __ get_cpool_and_tags(rcx, rax);
3174 // Make sure the class we're about to instantiate has been resolved.
3175 // This is done before loading InstanceKlass to be consistent with the order
3176 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3177 const int tags_offset = Array<u1>::base_offset_in_bytes();
3178 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3179 __ jcc(Assembler::notEqual, slow_case_no_pop);
3181 // get InstanceKlass
3182 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
3183 __ push(rcx); // save the contexts of klass for initializing the header
3185 // make sure klass is initialized & doesn't have finalizer
3186 // make sure klass is fully initialized
3187 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3188 __ jcc(Assembler::notEqual, slow_case);
3190 // get instance_size in InstanceKlass (scaled to a count of bytes)
3191 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3192 // test to see if it has a finalizer or is malformed in some way
3193 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3194 __ jcc(Assembler::notZero, slow_case);
3196 //
3197 // Allocate the instance
3198 // 1) Try to allocate in the TLAB
3199 // 2) if fail and the object is large allocate in the shared Eden
3200 // 3) if the above fails (or is not applicable), go to a slow case
3201 // (creates a new TLAB, etc.)
3203 const bool allow_shared_alloc =
3204 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3206 const Register thread = rcx;
3207 if (UseTLAB || allow_shared_alloc) {
3208 __ get_thread(thread);
3209 }
3211 if (UseTLAB) {
3212 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3213 __ lea(rbx, Address(rax, rdx, Address::times_1));
3214 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3215 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3216 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3217 if (ZeroTLAB) {
3218 // the fields have been already cleared
3219 __ jmp(initialize_header);
3220 } else {
3221 // initialize both the header and fields
3222 __ jmp(initialize_object);
3223 }
3224 }
3226 // Allocation in the shared Eden, if allowed.
3227 //
3228 // rdx: instance size in bytes
3229 if (allow_shared_alloc) {
3230 __ bind(allocate_shared);
3232 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3234 Label retry;
3235 __ bind(retry);
3236 __ movptr(rax, heap_top);
3237 __ lea(rbx, Address(rax, rdx, Address::times_1));
3238 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3239 __ jcc(Assembler::above, slow_case);
3241 // Compare rax, with the top addr, and if still equal, store the new
3242 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3243 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3244 //
3245 // rax,: object begin
3246 // rbx,: object end
3247 // rdx: instance size in bytes
3248 __ locked_cmpxchgptr(rbx, heap_top);
3250 // if someone beat us on the allocation, try again, otherwise continue
3251 __ jcc(Assembler::notEqual, retry);
3253 __ incr_allocated_bytes(thread, rdx, 0);
3254 }
3256 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3257 // The object is initialized before the header. If the object size is
3258 // zero, go directly to the header initialization.
3259 __ bind(initialize_object);
3260 __ decrement(rdx, sizeof(oopDesc));
3261 __ jcc(Assembler::zero, initialize_header);
3263 // Initialize topmost object field, divide rdx by 8, check if odd and
3264 // test if zero.
3265 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3266 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3268 // rdx must have been multiple of 8
3269 #ifdef ASSERT
3270 // make sure rdx was multiple of 8
3271 Label L;
3272 // Ignore partial flag stall after shrl() since it is debug VM
3273 __ jccb(Assembler::carryClear, L);
3274 __ stop("object size is not multiple of 2 - adjust this code");
3275 __ bind(L);
3276 // rdx must be > 0, no extra check needed here
3277 #endif
3279 // initialize remaining object fields: rdx was a multiple of 8
3280 { Label loop;
3281 __ bind(loop);
3282 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3283 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3284 __ decrement(rdx);
3285 __ jcc(Assembler::notZero, loop);
3286 }
3288 // initialize object header only.
3289 __ bind(initialize_header);
3290 if (UseBiasedLocking) {
3291 __ pop(rcx); // get saved klass back in the register.
3292 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3293 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3294 } else {
3295 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3296 (int32_t)markOopDesc::prototype()); // header
3297 __ pop(rcx); // get saved klass back in the register.
3298 }
3299 __ store_klass(rax, rcx); // klass
3301 {
3302 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3303 // Trigger dtrace event for fastpath
3304 __ push(atos);
3305 __ call_VM_leaf(
3306 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3307 __ pop(atos);
3308 }
3310 __ jmp(done);
3311 }
3313 // slow case
3314 __ bind(slow_case);
3315 __ pop(rcx); // restore stack pointer to what it was when we came in.
3316 __ bind(slow_case_no_pop);
3317 __ get_constant_pool(rax);
3318 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3319 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3321 // continue
3322 __ bind(done);
3323 }
3326 void TemplateTable::newarray() {
3327 transition(itos, atos);
3328 __ push_i(rax); // make sure everything is on the stack
3329 __ load_unsigned_byte(rdx, at_bcp(1));
3330 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3331 __ pop_i(rdx); // discard size
3332 }
3335 void TemplateTable::anewarray() {
3336 transition(itos, atos);
3337 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3338 __ get_constant_pool(rcx);
3339 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3340 }
3343 void TemplateTable::arraylength() {
3344 transition(atos, itos);
3345 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3346 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3347 }
3350 void TemplateTable::checkcast() {
3351 transition(atos, atos);
3352 Label done, is_null, ok_is_subtype, quicked, resolved;
3353 __ testptr(rax, rax); // Object is in EAX
3354 __ jcc(Assembler::zero, is_null);
3356 // Get cpool & tags index
3357 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3358 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3359 // See if bytecode has already been quicked
3360 __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3361 __ jcc(Assembler::equal, quicked);
3363 __ push(atos);
3364 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3365 // vm_result_2 has metadata result
3366 // borrow rdi from locals
3367 __ get_thread(rdi);
3368 __ get_vm_result_2(rax, rdi);
3369 __ restore_locals();
3370 __ pop_ptr(rdx);
3371 __ jmpb(resolved);
3373 // Get superklass in EAX and subklass in EBX
3374 __ bind(quicked);
3375 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3376 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3378 __ bind(resolved);
3379 __ load_klass(rbx, rdx);
3381 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3382 // Superklass in EAX. Subklass in EBX.
3383 __ gen_subtype_check( rbx, ok_is_subtype );
3385 // Come here on failure
3386 __ push(rdx);
3387 // object is at TOS
3388 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3390 // Come here on success
3391 __ bind(ok_is_subtype);
3392 __ mov(rax,rdx); // Restore object in EDX
3394 // Collect counts on whether this check-cast sees NULLs a lot or not.
3395 if (ProfileInterpreter) {
3396 __ jmp(done);
3397 __ bind(is_null);
3398 __ profile_null_seen(rcx);
3399 } else {
3400 __ bind(is_null); // same as 'done'
3401 }
3402 __ bind(done);
3403 }
3406 void TemplateTable::instanceof() {
3407 transition(atos, itos);
3408 Label done, is_null, ok_is_subtype, quicked, resolved;
3409 __ testptr(rax, rax);
3410 __ jcc(Assembler::zero, is_null);
3412 // Get cpool & tags index
3413 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3414 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3415 // See if bytecode has already been quicked
3416 __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3417 __ jcc(Assembler::equal, quicked);
3419 __ push(atos);
3420 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3421 // vm_result_2 has metadata result
3422 // borrow rdi from locals
3423 __ get_thread(rdi);
3424 __ get_vm_result_2(rax, rdi);
3425 __ restore_locals();
3426 __ pop_ptr(rdx);
3427 __ load_klass(rdx, rdx);
3428 __ jmp(resolved);
3430 // Get superklass in EAX and subklass in EDX
3431 __ bind(quicked);
3432 __ load_klass(rdx, rax);
3433 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3435 __ bind(resolved);
3437 // Generate subtype check. Blows ECX. Resets EDI.
3438 // Superklass in EAX. Subklass in EDX.
3439 __ gen_subtype_check( rdx, ok_is_subtype );
3441 // Come here on failure
3442 __ xorl(rax,rax);
3443 __ jmpb(done);
3444 // Come here on success
3445 __ bind(ok_is_subtype);
3446 __ movl(rax, 1);
3448 // Collect counts on whether this test sees NULLs a lot or not.
3449 if (ProfileInterpreter) {
3450 __ jmp(done);
3451 __ bind(is_null);
3452 __ profile_null_seen(rcx);
3453 } else {
3454 __ bind(is_null); // same as 'done'
3455 }
3456 __ bind(done);
3457 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3458 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3459 }
3462 //----------------------------------------------------------------------------------------------------
3463 // Breakpoints
3464 void TemplateTable::_breakpoint() {
3466 // Note: We get here even if we are single stepping..
3467 // jbug inists on setting breakpoints at every bytecode
3468 // even if we are in single step mode.
3470 transition(vtos, vtos);
3472 // get the unpatched byte code
3473 __ get_method(rcx);
3474 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3475 __ mov(rbx, rax);
3477 // post the breakpoint event
3478 __ get_method(rcx);
3479 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3481 // complete the execution of original bytecode
3482 __ dispatch_only_normal(vtos);
3483 }
3486 //----------------------------------------------------------------------------------------------------
3487 // Exceptions
3489 void TemplateTable::athrow() {
3490 transition(atos, vtos);
3491 __ null_check(rax);
3492 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3493 }
3496 //----------------------------------------------------------------------------------------------------
3497 // Synchronization
3498 //
3499 // Note: monitorenter & exit are symmetric routines; which is reflected
3500 // in the assembly code structure as well
3501 //
3502 // Stack layout:
3503 //
3504 // [expressions ] <--- rsp = expression stack top
3505 // ..
3506 // [expressions ]
3507 // [monitor entry] <--- monitor block top = expression stack bot
3508 // ..
3509 // [monitor entry]
3510 // [frame data ] <--- monitor block bot
3511 // ...
3512 // [saved rbp, ] <--- rbp,
3515 void TemplateTable::monitorenter() {
3516 transition(atos, vtos);
3518 // check for NULL object
3519 __ null_check(rax);
3521 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3522 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3523 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3524 Label allocated;
3526 // initialize entry pointer
3527 __ xorl(rdx, rdx); // points to free slot or NULL
3529 // find a free slot in the monitor block (result in rdx)
3530 { Label entry, loop, exit;
3531 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3533 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3534 __ jmpb(entry);
3536 __ bind(loop);
3537 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3538 __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3539 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3540 __ jccb(Assembler::equal, exit); // if same object then stop searching
3541 __ addptr(rcx, entry_size); // otherwise advance to next entry
3542 __ bind(entry);
3543 __ cmpptr(rcx, rbx); // check if bottom reached
3544 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3545 __ bind(exit);
3546 }
3548 __ testptr(rdx, rdx); // check if a slot has been found
3549 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3551 // allocate one if there's no free slot
3552 { Label entry, loop;
3553 // 1. compute new pointers // rsp: old expression stack top
3554 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3555 __ subptr(rsp, entry_size); // move expression stack top
3556 __ subptr(rdx, entry_size); // move expression stack bottom
3557 __ mov(rcx, rsp); // set start value for copy loop
3558 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3559 __ jmp(entry);
3560 // 2. move expression stack contents
3561 __ bind(loop);
3562 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3563 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3564 __ addptr(rcx, wordSize); // advance to next word
3565 __ bind(entry);
3566 __ cmpptr(rcx, rdx); // check if bottom reached
3567 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3568 }
3570 // call run-time routine
3571 // rdx: points to monitor entry
3572 __ bind(allocated);
3574 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3575 // The object has already been poped from the stack, so the expression stack looks correct.
3576 __ increment(rsi);
3578 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3579 __ lock_object(rdx);
3581 // check to make sure this monitor doesn't cause stack overflow after locking
3582 __ save_bcp(); // in case of exception
3583 __ generate_stack_overflow_check(0);
3585 // The bcp has already been incremented. Just need to dispatch to next instruction.
3586 __ dispatch_next(vtos);
3587 }
3590 void TemplateTable::monitorexit() {
3591 transition(atos, vtos);
3593 // check for NULL object
3594 __ null_check(rax);
3596 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3597 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3598 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3599 Label found;
3601 // find matching slot
3602 { Label entry, loop;
3603 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3604 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3605 __ jmpb(entry);
3607 __ bind(loop);
3608 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3609 __ jcc(Assembler::equal, found); // if same object then stop searching
3610 __ addptr(rdx, entry_size); // otherwise advance to next entry
3611 __ bind(entry);
3612 __ cmpptr(rdx, rbx); // check if bottom reached
3613 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3614 }
3616 // error handling. Unlocking was not block-structured
3617 Label end;
3618 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3619 __ should_not_reach_here();
3621 // call run-time routine
3622 // rcx: points to monitor entry
3623 __ bind(found);
3624 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3625 __ unlock_object(rdx);
3626 __ pop_ptr(rax); // discard object
3627 __ bind(end);
3628 }
3631 //----------------------------------------------------------------------------------------------------
3632 // Wide instructions
3634 void TemplateTable::wide() {
3635 transition(vtos, vtos);
3636 __ load_unsigned_byte(rbx, at_bcp(1));
3637 ExternalAddress wtable((address)Interpreter::_wentry_point);
3638 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3639 // Note: the rsi increment step is part of the individual wide bytecode implementations
3640 }
3643 //----------------------------------------------------------------------------------------------------
3644 // Multi arrays
3646 void TemplateTable::multianewarray() {
3647 transition(vtos, atos);
3648 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3649 // last dim is on top of stack; we want address of first one:
3650 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3651 // the latter wordSize to point to the beginning of the array.
3652 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3653 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3654 __ load_unsigned_byte(rbx, at_bcp(3));
3655 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3656 }
3658 #endif /* !CC_INTERP */