Tue, 02 Aug 2011 18:36:40 +0200
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
Summary: replace MemBarAcquire/MemBarRelease nodes on the monitor enter/exit code paths with new MemBarAcquireLock/MemBarReleaseLock nodes
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodDataOop.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
38 #ifndef CC_INTERP
40 #define __ _masm->
42 // Platform-dependent initialization
44 void TemplateTable::pd_initialize() {
45 // No amd64 specific initialization
46 }
48 // Address computation: local variables
50 static inline Address iaddress(int n) {
51 return Address(r14, Interpreter::local_offset_in_bytes(n));
52 }
54 static inline Address laddress(int n) {
55 return iaddress(n + 1);
56 }
58 static inline Address faddress(int n) {
59 return iaddress(n);
60 }
62 static inline Address daddress(int n) {
63 return laddress(n);
64 }
66 static inline Address aaddress(int n) {
67 return iaddress(n);
68 }
70 static inline Address iaddress(Register r) {
71 return Address(r14, r, Address::times_8);
72 }
74 static inline Address laddress(Register r) {
75 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
76 }
78 static inline Address faddress(Register r) {
79 return iaddress(r);
80 }
82 static inline Address daddress(Register r) {
83 return laddress(r);
84 }
86 static inline Address aaddress(Register r) {
87 return iaddress(r);
88 }
90 static inline Address at_rsp() {
91 return Address(rsp, 0);
92 }
94 // At top of Java expression stack which may be different than esp(). It
95 // isn't for category 1 objects.
96 static inline Address at_tos () {
97 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
98 }
100 static inline Address at_tos_p1() {
101 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
102 }
104 static inline Address at_tos_p2() {
105 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
106 }
108 static inline Address at_tos_p3() {
109 return Address(rsp, Interpreter::expr_offset_in_bytes(3));
110 }
112 // Condition conversion
113 static Assembler::Condition j_not(TemplateTable::Condition cc) {
114 switch (cc) {
115 case TemplateTable::equal : return Assembler::notEqual;
116 case TemplateTable::not_equal : return Assembler::equal;
117 case TemplateTable::less : return Assembler::greaterEqual;
118 case TemplateTable::less_equal : return Assembler::greater;
119 case TemplateTable::greater : return Assembler::lessEqual;
120 case TemplateTable::greater_equal: return Assembler::less;
121 }
122 ShouldNotReachHere();
123 return Assembler::zero;
124 }
127 // Miscelaneous helper routines
128 // Store an oop (or NULL) at the address described by obj.
129 // If val == noreg this means store a NULL
131 static void do_oop_store(InterpreterMacroAssembler* _masm,
132 Address obj,
133 Register val,
134 BarrierSet::Name barrier,
135 bool precise) {
136 assert(val == noreg || val == rax, "parameter is just for looks");
137 switch (barrier) {
138 #ifndef SERIALGC
139 case BarrierSet::G1SATBCT:
140 case BarrierSet::G1SATBCTLogging:
141 {
142 // flatten object address if needed
143 if (obj.index() == noreg && obj.disp() == 0) {
144 if (obj.base() != rdx) {
145 __ movq(rdx, obj.base());
146 }
147 } else {
148 __ leaq(rdx, obj);
149 }
150 __ g1_write_barrier_pre(rdx /* obj */,
151 rbx /* pre_val */,
152 r15_thread /* thread */,
153 r8 /* tmp */,
154 val != noreg /* tosca_live */,
155 false /* expand_call */);
156 if (val == noreg) {
157 __ store_heap_oop_null(Address(rdx, 0));
158 } else {
159 __ store_heap_oop(Address(rdx, 0), val);
160 __ g1_write_barrier_post(rdx /* store_adr */,
161 val /* new_val */,
162 r15_thread /* thread */,
163 r8 /* tmp */,
164 rbx /* tmp2 */);
165 }
167 }
168 break;
169 #endif // SERIALGC
170 case BarrierSet::CardTableModRef:
171 case BarrierSet::CardTableExtension:
172 {
173 if (val == noreg) {
174 __ store_heap_oop_null(obj);
175 } else {
176 __ store_heap_oop(obj, val);
177 // flatten object address if needed
178 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
179 __ store_check(obj.base());
180 } else {
181 __ leaq(rdx, obj);
182 __ store_check(rdx);
183 }
184 }
185 }
186 break;
187 case BarrierSet::ModRef:
188 case BarrierSet::Other:
189 if (val == noreg) {
190 __ store_heap_oop_null(obj);
191 } else {
192 __ store_heap_oop(obj, val);
193 }
194 break;
195 default :
196 ShouldNotReachHere();
198 }
199 }
201 Address TemplateTable::at_bcp(int offset) {
202 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
203 return Address(r13, offset);
204 }
206 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
207 Register scratch,
208 bool load_bc_into_scratch/*=true*/) {
209 if (!RewriteBytecodes) {
210 return;
211 }
212 // the pair bytecodes have already done the load.
213 if (load_bc_into_scratch) {
214 __ movl(bc, bytecode);
215 }
216 Label patch_done;
217 if (JvmtiExport::can_post_breakpoint()) {
218 Label fast_patch;
219 // if a breakpoint is present we can't rewrite the stream directly
220 __ movzbl(scratch, at_bcp(0));
221 __ cmpl(scratch, Bytecodes::_breakpoint);
222 __ jcc(Assembler::notEqual, fast_patch);
223 __ get_method(scratch);
224 // Let breakpoint table handling rewrite to quicker bytecode
225 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
226 #ifndef ASSERT
227 __ jmpb(patch_done);
228 #else
229 __ jmp(patch_done);
230 #endif
231 __ bind(fast_patch);
232 }
233 #ifdef ASSERT
234 Label okay;
235 __ load_unsigned_byte(scratch, at_bcp(0));
236 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
237 __ jcc(Assembler::equal, okay);
238 __ cmpl(scratch, bc);
239 __ jcc(Assembler::equal, okay);
240 __ stop("patching the wrong bytecode");
241 __ bind(okay);
242 #endif
243 // patch bytecode
244 __ movb(at_bcp(0), bc);
245 __ bind(patch_done);
246 }
249 // Individual instructions
251 void TemplateTable::nop() {
252 transition(vtos, vtos);
253 // nothing to do
254 }
256 void TemplateTable::shouldnotreachhere() {
257 transition(vtos, vtos);
258 __ stop("shouldnotreachhere bytecode");
259 }
261 void TemplateTable::aconst_null() {
262 transition(vtos, atos);
263 __ xorl(rax, rax);
264 }
266 void TemplateTable::iconst(int value) {
267 transition(vtos, itos);
268 if (value == 0) {
269 __ xorl(rax, rax);
270 } else {
271 __ movl(rax, value);
272 }
273 }
275 void TemplateTable::lconst(int value) {
276 transition(vtos, ltos);
277 if (value == 0) {
278 __ xorl(rax, rax);
279 } else {
280 __ movl(rax, value);
281 }
282 }
284 void TemplateTable::fconst(int value) {
285 transition(vtos, ftos);
286 static float one = 1.0f, two = 2.0f;
287 switch (value) {
288 case 0:
289 __ xorps(xmm0, xmm0);
290 break;
291 case 1:
292 __ movflt(xmm0, ExternalAddress((address) &one));
293 break;
294 case 2:
295 __ movflt(xmm0, ExternalAddress((address) &two));
296 break;
297 default:
298 ShouldNotReachHere();
299 break;
300 }
301 }
303 void TemplateTable::dconst(int value) {
304 transition(vtos, dtos);
305 static double one = 1.0;
306 switch (value) {
307 case 0:
308 __ xorpd(xmm0, xmm0);
309 break;
310 case 1:
311 __ movdbl(xmm0, ExternalAddress((address) &one));
312 break;
313 default:
314 ShouldNotReachHere();
315 break;
316 }
317 }
319 void TemplateTable::bipush() {
320 transition(vtos, itos);
321 __ load_signed_byte(rax, at_bcp(1));
322 }
324 void TemplateTable::sipush() {
325 transition(vtos, itos);
326 __ load_unsigned_short(rax, at_bcp(1));
327 __ bswapl(rax);
328 __ sarl(rax, 16);
329 }
331 void TemplateTable::ldc(bool wide) {
332 transition(vtos, vtos);
333 Label call_ldc, notFloat, notClass, Done;
335 if (wide) {
336 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
337 } else {
338 __ load_unsigned_byte(rbx, at_bcp(1));
339 }
341 __ get_cpool_and_tags(rcx, rax);
342 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
343 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
345 // get type
346 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
348 // unresolved string - get the resolved string
349 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
350 __ jccb(Assembler::equal, call_ldc);
352 // unresolved class - get the resolved class
353 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
354 __ jccb(Assembler::equal, call_ldc);
356 // unresolved class in error state - call into runtime to throw the error
357 // from the first resolution attempt
358 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
359 __ jccb(Assembler::equal, call_ldc);
361 // resolved class - need to call vm to get java mirror of the class
362 __ cmpl(rdx, JVM_CONSTANT_Class);
363 __ jcc(Assembler::notEqual, notClass);
365 __ bind(call_ldc);
366 __ movl(c_rarg1, wide);
367 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
368 __ push_ptr(rax);
369 __ verify_oop(rax);
370 __ jmp(Done);
372 __ bind(notClass);
373 __ cmpl(rdx, JVM_CONSTANT_Float);
374 __ jccb(Assembler::notEqual, notFloat);
375 // ftos
376 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
377 __ push_f();
378 __ jmp(Done);
380 __ bind(notFloat);
381 #ifdef ASSERT
382 {
383 Label L;
384 __ cmpl(rdx, JVM_CONSTANT_Integer);
385 __ jcc(Assembler::equal, L);
386 __ cmpl(rdx, JVM_CONSTANT_String);
387 __ jcc(Assembler::equal, L);
388 __ cmpl(rdx, JVM_CONSTANT_Object);
389 __ jcc(Assembler::equal, L);
390 __ stop("unexpected tag type in ldc");
391 __ bind(L);
392 }
393 #endif
394 // atos and itos
395 Label isOop;
396 __ cmpl(rdx, JVM_CONSTANT_Integer);
397 __ jcc(Assembler::notEqual, isOop);
398 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
399 __ push_i(rax);
400 __ jmp(Done);
402 __ bind(isOop);
403 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
404 __ push_ptr(rax);
406 if (VerifyOops) {
407 __ verify_oop(rax);
408 }
410 __ bind(Done);
411 }
413 // Fast path for caching oop constants.
414 // %%% We should use this to handle Class and String constants also.
415 // %%% It will simplify the ldc/primitive path considerably.
416 void TemplateTable::fast_aldc(bool wide) {
417 transition(vtos, atos);
419 if (!EnableInvokeDynamic) {
420 // We should not encounter this bytecode if !EnableInvokeDynamic.
421 // The verifier will stop it. However, if we get past the verifier,
422 // this will stop the thread in a reasonable way, without crashing the JVM.
423 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
424 InterpreterRuntime::throw_IncompatibleClassChangeError));
425 // the call_VM checks for exception, so we should never return here.
426 __ should_not_reach_here();
427 return;
428 }
430 const Register cache = rcx;
431 const Register index = rdx;
433 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
434 if (VerifyOops) {
435 __ verify_oop(rax);
436 }
438 Label L_done, L_throw_exception;
439 const Register con_klass_temp = rcx; // same as cache
440 const Register array_klass_temp = rdx; // same as index
441 __ load_klass(con_klass_temp, rax);
442 __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
443 __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
444 __ jcc(Assembler::notEqual, L_done);
445 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
446 __ jcc(Assembler::notEqual, L_throw_exception);
447 __ xorptr(rax, rax);
448 __ jmp(L_done);
450 // Load the exception from the system-array which wraps it:
451 __ bind(L_throw_exception);
452 __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
453 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
455 __ bind(L_done);
456 }
458 void TemplateTable::ldc2_w() {
459 transition(vtos, vtos);
460 Label Long, Done;
461 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
463 __ get_cpool_and_tags(rcx, rax);
464 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
465 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
467 // get type
468 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
469 JVM_CONSTANT_Double);
470 __ jccb(Assembler::notEqual, Long);
471 // dtos
472 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
473 __ push_d();
474 __ jmpb(Done);
476 __ bind(Long);
477 // ltos
478 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
479 __ push_l();
481 __ bind(Done);
482 }
484 void TemplateTable::locals_index(Register reg, int offset) {
485 __ load_unsigned_byte(reg, at_bcp(offset));
486 __ negptr(reg);
487 }
489 void TemplateTable::iload() {
490 transition(vtos, itos);
491 if (RewriteFrequentPairs) {
492 Label rewrite, done;
493 const Register bc = c_rarg3;
494 assert(rbx != bc, "register damaged");
496 // get next byte
497 __ load_unsigned_byte(rbx,
498 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
499 // if _iload, wait to rewrite to iload2. We only want to rewrite the
500 // last two iloads in a pair. Comparing against fast_iload means that
501 // the next bytecode is neither an iload or a caload, and therefore
502 // an iload pair.
503 __ cmpl(rbx, Bytecodes::_iload);
504 __ jcc(Assembler::equal, done);
506 __ cmpl(rbx, Bytecodes::_fast_iload);
507 __ movl(bc, Bytecodes::_fast_iload2);
508 __ jccb(Assembler::equal, rewrite);
510 // if _caload, rewrite to fast_icaload
511 __ cmpl(rbx, Bytecodes::_caload);
512 __ movl(bc, Bytecodes::_fast_icaload);
513 __ jccb(Assembler::equal, rewrite);
515 // rewrite so iload doesn't check again.
516 __ movl(bc, Bytecodes::_fast_iload);
518 // rewrite
519 // bc: fast bytecode
520 __ bind(rewrite);
521 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
522 __ bind(done);
523 }
525 // Get the local value into tos
526 locals_index(rbx);
527 __ movl(rax, iaddress(rbx));
528 }
530 void TemplateTable::fast_iload2() {
531 transition(vtos, itos);
532 locals_index(rbx);
533 __ movl(rax, iaddress(rbx));
534 __ push(itos);
535 locals_index(rbx, 3);
536 __ movl(rax, iaddress(rbx));
537 }
539 void TemplateTable::fast_iload() {
540 transition(vtos, itos);
541 locals_index(rbx);
542 __ movl(rax, iaddress(rbx));
543 }
545 void TemplateTable::lload() {
546 transition(vtos, ltos);
547 locals_index(rbx);
548 __ movq(rax, laddress(rbx));
549 }
551 void TemplateTable::fload() {
552 transition(vtos, ftos);
553 locals_index(rbx);
554 __ movflt(xmm0, faddress(rbx));
555 }
557 void TemplateTable::dload() {
558 transition(vtos, dtos);
559 locals_index(rbx);
560 __ movdbl(xmm0, daddress(rbx));
561 }
563 void TemplateTable::aload() {
564 transition(vtos, atos);
565 locals_index(rbx);
566 __ movptr(rax, aaddress(rbx));
567 }
569 void TemplateTable::locals_index_wide(Register reg) {
570 __ movl(reg, at_bcp(2));
571 __ bswapl(reg);
572 __ shrl(reg, 16);
573 __ negptr(reg);
574 }
576 void TemplateTable::wide_iload() {
577 transition(vtos, itos);
578 locals_index_wide(rbx);
579 __ movl(rax, iaddress(rbx));
580 }
582 void TemplateTable::wide_lload() {
583 transition(vtos, ltos);
584 locals_index_wide(rbx);
585 __ movq(rax, laddress(rbx));
586 }
588 void TemplateTable::wide_fload() {
589 transition(vtos, ftos);
590 locals_index_wide(rbx);
591 __ movflt(xmm0, faddress(rbx));
592 }
594 void TemplateTable::wide_dload() {
595 transition(vtos, dtos);
596 locals_index_wide(rbx);
597 __ movdbl(xmm0, daddress(rbx));
598 }
600 void TemplateTable::wide_aload() {
601 transition(vtos, atos);
602 locals_index_wide(rbx);
603 __ movptr(rax, aaddress(rbx));
604 }
606 void TemplateTable::index_check(Register array, Register index) {
607 // destroys rbx
608 // check array
609 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
610 // sign extend index for use by indexed load
611 __ movl2ptr(index, index);
612 // check index
613 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
614 if (index != rbx) {
615 // ??? convention: move aberrant index into ebx for exception message
616 assert(rbx != array, "different registers");
617 __ movl(rbx, index);
618 }
619 __ jump_cc(Assembler::aboveEqual,
620 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
621 }
623 void TemplateTable::iaload() {
624 transition(itos, itos);
625 __ pop_ptr(rdx);
626 // eax: index
627 // rdx: array
628 index_check(rdx, rax); // kills rbx
629 __ movl(rax, Address(rdx, rax,
630 Address::times_4,
631 arrayOopDesc::base_offset_in_bytes(T_INT)));
632 }
634 void TemplateTable::laload() {
635 transition(itos, ltos);
636 __ pop_ptr(rdx);
637 // eax: index
638 // rdx: array
639 index_check(rdx, rax); // kills rbx
640 __ movq(rax, Address(rdx, rbx,
641 Address::times_8,
642 arrayOopDesc::base_offset_in_bytes(T_LONG)));
643 }
645 void TemplateTable::faload() {
646 transition(itos, ftos);
647 __ pop_ptr(rdx);
648 // eax: index
649 // rdx: array
650 index_check(rdx, rax); // kills rbx
651 __ movflt(xmm0, Address(rdx, rax,
652 Address::times_4,
653 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
654 }
656 void TemplateTable::daload() {
657 transition(itos, dtos);
658 __ pop_ptr(rdx);
659 // eax: index
660 // rdx: array
661 index_check(rdx, rax); // kills rbx
662 __ movdbl(xmm0, Address(rdx, rax,
663 Address::times_8,
664 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
665 }
667 void TemplateTable::aaload() {
668 transition(itos, atos);
669 __ pop_ptr(rdx);
670 // eax: index
671 // rdx: array
672 index_check(rdx, rax); // kills rbx
673 __ load_heap_oop(rax, Address(rdx, rax,
674 UseCompressedOops ? Address::times_4 : Address::times_8,
675 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
676 }
678 void TemplateTable::baload() {
679 transition(itos, itos);
680 __ pop_ptr(rdx);
681 // eax: index
682 // rdx: array
683 index_check(rdx, rax); // kills rbx
684 __ load_signed_byte(rax,
685 Address(rdx, rax,
686 Address::times_1,
687 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
688 }
690 void TemplateTable::caload() {
691 transition(itos, itos);
692 __ pop_ptr(rdx);
693 // eax: index
694 // rdx: array
695 index_check(rdx, rax); // kills rbx
696 __ load_unsigned_short(rax,
697 Address(rdx, rax,
698 Address::times_2,
699 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
700 }
702 // iload followed by caload frequent pair
703 void TemplateTable::fast_icaload() {
704 transition(vtos, itos);
705 // load index out of locals
706 locals_index(rbx);
707 __ movl(rax, iaddress(rbx));
709 // eax: index
710 // rdx: array
711 __ pop_ptr(rdx);
712 index_check(rdx, rax); // kills rbx
713 __ load_unsigned_short(rax,
714 Address(rdx, rax,
715 Address::times_2,
716 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
717 }
719 void TemplateTable::saload() {
720 transition(itos, itos);
721 __ pop_ptr(rdx);
722 // eax: index
723 // rdx: array
724 index_check(rdx, rax); // kills rbx
725 __ load_signed_short(rax,
726 Address(rdx, rax,
727 Address::times_2,
728 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
729 }
731 void TemplateTable::iload(int n) {
732 transition(vtos, itos);
733 __ movl(rax, iaddress(n));
734 }
736 void TemplateTable::lload(int n) {
737 transition(vtos, ltos);
738 __ movq(rax, laddress(n));
739 }
741 void TemplateTable::fload(int n) {
742 transition(vtos, ftos);
743 __ movflt(xmm0, faddress(n));
744 }
746 void TemplateTable::dload(int n) {
747 transition(vtos, dtos);
748 __ movdbl(xmm0, daddress(n));
749 }
751 void TemplateTable::aload(int n) {
752 transition(vtos, atos);
753 __ movptr(rax, aaddress(n));
754 }
756 void TemplateTable::aload_0() {
757 transition(vtos, atos);
758 // According to bytecode histograms, the pairs:
759 //
760 // _aload_0, _fast_igetfield
761 // _aload_0, _fast_agetfield
762 // _aload_0, _fast_fgetfield
763 //
764 // occur frequently. If RewriteFrequentPairs is set, the (slow)
765 // _aload_0 bytecode checks if the next bytecode is either
766 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
767 // rewrites the current bytecode into a pair bytecode; otherwise it
768 // rewrites the current bytecode into _fast_aload_0 that doesn't do
769 // the pair check anymore.
770 //
771 // Note: If the next bytecode is _getfield, the rewrite must be
772 // delayed, otherwise we may miss an opportunity for a pair.
773 //
774 // Also rewrite frequent pairs
775 // aload_0, aload_1
776 // aload_0, iload_1
777 // These bytecodes with a small amount of code are most profitable
778 // to rewrite
779 if (RewriteFrequentPairs) {
780 Label rewrite, done;
781 const Register bc = c_rarg3;
782 assert(rbx != bc, "register damaged");
783 // get next byte
784 __ load_unsigned_byte(rbx,
785 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
787 // do actual aload_0
788 aload(0);
790 // if _getfield then wait with rewrite
791 __ cmpl(rbx, Bytecodes::_getfield);
792 __ jcc(Assembler::equal, done);
794 // if _igetfield then reqrite to _fast_iaccess_0
795 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
796 Bytecodes::_aload_0,
797 "fix bytecode definition");
798 __ cmpl(rbx, Bytecodes::_fast_igetfield);
799 __ movl(bc, Bytecodes::_fast_iaccess_0);
800 __ jccb(Assembler::equal, rewrite);
802 // if _agetfield then reqrite to _fast_aaccess_0
803 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
804 Bytecodes::_aload_0,
805 "fix bytecode definition");
806 __ cmpl(rbx, Bytecodes::_fast_agetfield);
807 __ movl(bc, Bytecodes::_fast_aaccess_0);
808 __ jccb(Assembler::equal, rewrite);
810 // if _fgetfield then reqrite to _fast_faccess_0
811 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
812 Bytecodes::_aload_0,
813 "fix bytecode definition");
814 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
815 __ movl(bc, Bytecodes::_fast_faccess_0);
816 __ jccb(Assembler::equal, rewrite);
818 // else rewrite to _fast_aload0
819 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
820 Bytecodes::_aload_0,
821 "fix bytecode definition");
822 __ movl(bc, Bytecodes::_fast_aload_0);
824 // rewrite
825 // bc: fast bytecode
826 __ bind(rewrite);
827 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
829 __ bind(done);
830 } else {
831 aload(0);
832 }
833 }
835 void TemplateTable::istore() {
836 transition(itos, vtos);
837 locals_index(rbx);
838 __ movl(iaddress(rbx), rax);
839 }
841 void TemplateTable::lstore() {
842 transition(ltos, vtos);
843 locals_index(rbx);
844 __ movq(laddress(rbx), rax);
845 }
847 void TemplateTable::fstore() {
848 transition(ftos, vtos);
849 locals_index(rbx);
850 __ movflt(faddress(rbx), xmm0);
851 }
853 void TemplateTable::dstore() {
854 transition(dtos, vtos);
855 locals_index(rbx);
856 __ movdbl(daddress(rbx), xmm0);
857 }
859 void TemplateTable::astore() {
860 transition(vtos, vtos);
861 __ pop_ptr(rax);
862 locals_index(rbx);
863 __ movptr(aaddress(rbx), rax);
864 }
866 void TemplateTable::wide_istore() {
867 transition(vtos, vtos);
868 __ pop_i();
869 locals_index_wide(rbx);
870 __ movl(iaddress(rbx), rax);
871 }
873 void TemplateTable::wide_lstore() {
874 transition(vtos, vtos);
875 __ pop_l();
876 locals_index_wide(rbx);
877 __ movq(laddress(rbx), rax);
878 }
880 void TemplateTable::wide_fstore() {
881 transition(vtos, vtos);
882 __ pop_f();
883 locals_index_wide(rbx);
884 __ movflt(faddress(rbx), xmm0);
885 }
887 void TemplateTable::wide_dstore() {
888 transition(vtos, vtos);
889 __ pop_d();
890 locals_index_wide(rbx);
891 __ movdbl(daddress(rbx), xmm0);
892 }
894 void TemplateTable::wide_astore() {
895 transition(vtos, vtos);
896 __ pop_ptr(rax);
897 locals_index_wide(rbx);
898 __ movptr(aaddress(rbx), rax);
899 }
901 void TemplateTable::iastore() {
902 transition(itos, vtos);
903 __ pop_i(rbx);
904 __ pop_ptr(rdx);
905 // eax: value
906 // ebx: index
907 // rdx: array
908 index_check(rdx, rbx); // prefer index in ebx
909 __ movl(Address(rdx, rbx,
910 Address::times_4,
911 arrayOopDesc::base_offset_in_bytes(T_INT)),
912 rax);
913 }
915 void TemplateTable::lastore() {
916 transition(ltos, vtos);
917 __ pop_i(rbx);
918 __ pop_ptr(rdx);
919 // rax: value
920 // ebx: index
921 // rdx: array
922 index_check(rdx, rbx); // prefer index in ebx
923 __ movq(Address(rdx, rbx,
924 Address::times_8,
925 arrayOopDesc::base_offset_in_bytes(T_LONG)),
926 rax);
927 }
929 void TemplateTable::fastore() {
930 transition(ftos, vtos);
931 __ pop_i(rbx);
932 __ pop_ptr(rdx);
933 // xmm0: value
934 // ebx: index
935 // rdx: array
936 index_check(rdx, rbx); // prefer index in ebx
937 __ movflt(Address(rdx, rbx,
938 Address::times_4,
939 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
940 xmm0);
941 }
943 void TemplateTable::dastore() {
944 transition(dtos, vtos);
945 __ pop_i(rbx);
946 __ pop_ptr(rdx);
947 // xmm0: value
948 // ebx: index
949 // rdx: array
950 index_check(rdx, rbx); // prefer index in ebx
951 __ movdbl(Address(rdx, rbx,
952 Address::times_8,
953 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
954 xmm0);
955 }
957 void TemplateTable::aastore() {
958 Label is_null, ok_is_subtype, done;
959 transition(vtos, vtos);
960 // stack: ..., array, index, value
961 __ movptr(rax, at_tos()); // value
962 __ movl(rcx, at_tos_p1()); // index
963 __ movptr(rdx, at_tos_p2()); // array
965 Address element_address(rdx, rcx,
966 UseCompressedOops? Address::times_4 : Address::times_8,
967 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
969 index_check(rdx, rcx); // kills rbx
970 // do array store check - check for NULL value first
971 __ testptr(rax, rax);
972 __ jcc(Assembler::zero, is_null);
974 // Move subklass into rbx
975 __ load_klass(rbx, rax);
976 // Move superklass into rax
977 __ load_klass(rax, rdx);
978 __ movptr(rax, Address(rax,
979 sizeof(oopDesc) +
980 objArrayKlass::element_klass_offset_in_bytes()));
981 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
982 __ lea(rdx, element_address);
984 // Generate subtype check. Blows rcx, rdi
985 // Superklass in rax. Subklass in rbx.
986 __ gen_subtype_check(rbx, ok_is_subtype);
988 // Come here on failure
989 // object is at TOS
990 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
992 // Come here on success
993 __ bind(ok_is_subtype);
995 // Get the value we will store
996 __ movptr(rax, at_tos());
997 // Now store using the appropriate barrier
998 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
999 __ jmp(done);
1001 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1002 __ bind(is_null);
1003 __ profile_null_seen(rbx);
1005 // Store a NULL
1006 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1008 // Pop stack arguments
1009 __ bind(done);
1010 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1011 }
1013 void TemplateTable::bastore() {
1014 transition(itos, vtos);
1015 __ pop_i(rbx);
1016 __ pop_ptr(rdx);
1017 // eax: value
1018 // ebx: index
1019 // rdx: array
1020 index_check(rdx, rbx); // prefer index in ebx
1021 __ movb(Address(rdx, rbx,
1022 Address::times_1,
1023 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1024 rax);
1025 }
1027 void TemplateTable::castore() {
1028 transition(itos, vtos);
1029 __ pop_i(rbx);
1030 __ pop_ptr(rdx);
1031 // eax: value
1032 // ebx: index
1033 // rdx: array
1034 index_check(rdx, rbx); // prefer index in ebx
1035 __ movw(Address(rdx, rbx,
1036 Address::times_2,
1037 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1038 rax);
1039 }
1041 void TemplateTable::sastore() {
1042 castore();
1043 }
1045 void TemplateTable::istore(int n) {
1046 transition(itos, vtos);
1047 __ movl(iaddress(n), rax);
1048 }
1050 void TemplateTable::lstore(int n) {
1051 transition(ltos, vtos);
1052 __ movq(laddress(n), rax);
1053 }
1055 void TemplateTable::fstore(int n) {
1056 transition(ftos, vtos);
1057 __ movflt(faddress(n), xmm0);
1058 }
1060 void TemplateTable::dstore(int n) {
1061 transition(dtos, vtos);
1062 __ movdbl(daddress(n), xmm0);
1063 }
1065 void TemplateTable::astore(int n) {
1066 transition(vtos, vtos);
1067 __ pop_ptr(rax);
1068 __ movptr(aaddress(n), rax);
1069 }
1071 void TemplateTable::pop() {
1072 transition(vtos, vtos);
1073 __ addptr(rsp, Interpreter::stackElementSize);
1074 }
1076 void TemplateTable::pop2() {
1077 transition(vtos, vtos);
1078 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1079 }
1081 void TemplateTable::dup() {
1082 transition(vtos, vtos);
1083 __ load_ptr(0, rax);
1084 __ push_ptr(rax);
1085 // stack: ..., a, a
1086 }
1088 void TemplateTable::dup_x1() {
1089 transition(vtos, vtos);
1090 // stack: ..., a, b
1091 __ load_ptr( 0, rax); // load b
1092 __ load_ptr( 1, rcx); // load a
1093 __ store_ptr(1, rax); // store b
1094 __ store_ptr(0, rcx); // store a
1095 __ push_ptr(rax); // push b
1096 // stack: ..., b, a, b
1097 }
1099 void TemplateTable::dup_x2() {
1100 transition(vtos, vtos);
1101 // stack: ..., a, b, c
1102 __ load_ptr( 0, rax); // load c
1103 __ load_ptr( 2, rcx); // load a
1104 __ store_ptr(2, rax); // store c in a
1105 __ push_ptr(rax); // push c
1106 // stack: ..., c, b, c, c
1107 __ load_ptr( 2, rax); // load b
1108 __ store_ptr(2, rcx); // store a in b
1109 // stack: ..., c, a, c, c
1110 __ store_ptr(1, rax); // store b in c
1111 // stack: ..., c, a, b, c
1112 }
1114 void TemplateTable::dup2() {
1115 transition(vtos, vtos);
1116 // stack: ..., a, b
1117 __ load_ptr(1, rax); // load a
1118 __ push_ptr(rax); // push a
1119 __ load_ptr(1, rax); // load b
1120 __ push_ptr(rax); // push b
1121 // stack: ..., a, b, a, b
1122 }
1124 void TemplateTable::dup2_x1() {
1125 transition(vtos, vtos);
1126 // stack: ..., a, b, c
1127 __ load_ptr( 0, rcx); // load c
1128 __ load_ptr( 1, rax); // load b
1129 __ push_ptr(rax); // push b
1130 __ push_ptr(rcx); // push c
1131 // stack: ..., a, b, c, b, c
1132 __ store_ptr(3, rcx); // store c in b
1133 // stack: ..., a, c, c, b, c
1134 __ load_ptr( 4, rcx); // load a
1135 __ store_ptr(2, rcx); // store a in 2nd c
1136 // stack: ..., a, c, a, b, c
1137 __ store_ptr(4, rax); // store b in a
1138 // stack: ..., b, c, a, b, c
1139 }
1141 void TemplateTable::dup2_x2() {
1142 transition(vtos, vtos);
1143 // stack: ..., a, b, c, d
1144 __ load_ptr( 0, rcx); // load d
1145 __ load_ptr( 1, rax); // load c
1146 __ push_ptr(rax); // push c
1147 __ push_ptr(rcx); // push d
1148 // stack: ..., a, b, c, d, c, d
1149 __ load_ptr( 4, rax); // load b
1150 __ store_ptr(2, rax); // store b in d
1151 __ store_ptr(4, rcx); // store d in b
1152 // stack: ..., a, d, c, b, c, d
1153 __ load_ptr( 5, rcx); // load a
1154 __ load_ptr( 3, rax); // load c
1155 __ store_ptr(3, rcx); // store a in c
1156 __ store_ptr(5, rax); // store c in a
1157 // stack: ..., c, d, a, b, c, d
1158 }
1160 void TemplateTable::swap() {
1161 transition(vtos, vtos);
1162 // stack: ..., a, b
1163 __ load_ptr( 1, rcx); // load a
1164 __ load_ptr( 0, rax); // load b
1165 __ store_ptr(0, rcx); // store a in b
1166 __ store_ptr(1, rax); // store b in a
1167 // stack: ..., b, a
1168 }
1170 void TemplateTable::iop2(Operation op) {
1171 transition(itos, itos);
1172 switch (op) {
1173 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1174 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1175 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1176 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1177 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1178 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1179 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1180 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1181 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1182 default : ShouldNotReachHere();
1183 }
1184 }
1186 void TemplateTable::lop2(Operation op) {
1187 transition(ltos, ltos);
1188 switch (op) {
1189 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1190 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1191 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1192 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1193 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1194 default : ShouldNotReachHere();
1195 }
1196 }
1198 void TemplateTable::idiv() {
1199 transition(itos, itos);
1200 __ movl(rcx, rax);
1201 __ pop_i(rax);
1202 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1203 // they are not equal, one could do a normal division (no correction
1204 // needed), which may speed up this implementation for the common case.
1205 // (see also JVM spec., p.243 & p.271)
1206 __ corrected_idivl(rcx);
1207 }
1209 void TemplateTable::irem() {
1210 transition(itos, itos);
1211 __ movl(rcx, rax);
1212 __ pop_i(rax);
1213 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1214 // they are not equal, one could do a normal division (no correction
1215 // needed), which may speed up this implementation for the common case.
1216 // (see also JVM spec., p.243 & p.271)
1217 __ corrected_idivl(rcx);
1218 __ movl(rax, rdx);
1219 }
1221 void TemplateTable::lmul() {
1222 transition(ltos, ltos);
1223 __ pop_l(rdx);
1224 __ imulq(rax, rdx);
1225 }
1227 void TemplateTable::ldiv() {
1228 transition(ltos, ltos);
1229 __ mov(rcx, rax);
1230 __ pop_l(rax);
1231 // generate explicit div0 check
1232 __ testq(rcx, rcx);
1233 __ jump_cc(Assembler::zero,
1234 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1235 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1236 // they are not equal, one could do a normal division (no correction
1237 // needed), which may speed up this implementation for the common case.
1238 // (see also JVM spec., p.243 & p.271)
1239 __ corrected_idivq(rcx); // kills rbx
1240 }
1242 void TemplateTable::lrem() {
1243 transition(ltos, ltos);
1244 __ mov(rcx, rax);
1245 __ pop_l(rax);
1246 __ testq(rcx, rcx);
1247 __ jump_cc(Assembler::zero,
1248 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1249 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1250 // they are not equal, one could do a normal division (no correction
1251 // needed), which may speed up this implementation for the common case.
1252 // (see also JVM spec., p.243 & p.271)
1253 __ corrected_idivq(rcx); // kills rbx
1254 __ mov(rax, rdx);
1255 }
1257 void TemplateTable::lshl() {
1258 transition(itos, ltos);
1259 __ movl(rcx, rax); // get shift count
1260 __ pop_l(rax); // get shift value
1261 __ shlq(rax);
1262 }
1264 void TemplateTable::lshr() {
1265 transition(itos, ltos);
1266 __ movl(rcx, rax); // get shift count
1267 __ pop_l(rax); // get shift value
1268 __ sarq(rax);
1269 }
1271 void TemplateTable::lushr() {
1272 transition(itos, ltos);
1273 __ movl(rcx, rax); // get shift count
1274 __ pop_l(rax); // get shift value
1275 __ shrq(rax);
1276 }
1278 void TemplateTable::fop2(Operation op) {
1279 transition(ftos, ftos);
1280 switch (op) {
1281 case add:
1282 __ addss(xmm0, at_rsp());
1283 __ addptr(rsp, Interpreter::stackElementSize);
1284 break;
1285 case sub:
1286 __ movflt(xmm1, xmm0);
1287 __ pop_f(xmm0);
1288 __ subss(xmm0, xmm1);
1289 break;
1290 case mul:
1291 __ mulss(xmm0, at_rsp());
1292 __ addptr(rsp, Interpreter::stackElementSize);
1293 break;
1294 case div:
1295 __ movflt(xmm1, xmm0);
1296 __ pop_f(xmm0);
1297 __ divss(xmm0, xmm1);
1298 break;
1299 case rem:
1300 __ movflt(xmm1, xmm0);
1301 __ pop_f(xmm0);
1302 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1303 break;
1304 default:
1305 ShouldNotReachHere();
1306 break;
1307 }
1308 }
1310 void TemplateTable::dop2(Operation op) {
1311 transition(dtos, dtos);
1312 switch (op) {
1313 case add:
1314 __ addsd(xmm0, at_rsp());
1315 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1316 break;
1317 case sub:
1318 __ movdbl(xmm1, xmm0);
1319 __ pop_d(xmm0);
1320 __ subsd(xmm0, xmm1);
1321 break;
1322 case mul:
1323 __ mulsd(xmm0, at_rsp());
1324 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1325 break;
1326 case div:
1327 __ movdbl(xmm1, xmm0);
1328 __ pop_d(xmm0);
1329 __ divsd(xmm0, xmm1);
1330 break;
1331 case rem:
1332 __ movdbl(xmm1, xmm0);
1333 __ pop_d(xmm0);
1334 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1335 break;
1336 default:
1337 ShouldNotReachHere();
1338 break;
1339 }
1340 }
1342 void TemplateTable::ineg() {
1343 transition(itos, itos);
1344 __ negl(rax);
1345 }
1347 void TemplateTable::lneg() {
1348 transition(ltos, ltos);
1349 __ negq(rax);
1350 }
1352 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1353 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1354 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1355 // of 128-bits operands for SSE instructions.
1356 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1357 // Store the value to a 128-bits operand.
1358 operand[0] = lo;
1359 operand[1] = hi;
1360 return operand;
1361 }
1363 // Buffer for 128-bits masks used by SSE instructions.
1364 static jlong float_signflip_pool[2*2];
1365 static jlong double_signflip_pool[2*2];
1367 void TemplateTable::fneg() {
1368 transition(ftos, ftos);
1369 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1370 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1371 }
1373 void TemplateTable::dneg() {
1374 transition(dtos, dtos);
1375 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1376 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1377 }
1379 void TemplateTable::iinc() {
1380 transition(vtos, vtos);
1381 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1382 locals_index(rbx);
1383 __ addl(iaddress(rbx), rdx);
1384 }
1386 void TemplateTable::wide_iinc() {
1387 transition(vtos, vtos);
1388 __ movl(rdx, at_bcp(4)); // get constant
1389 locals_index_wide(rbx);
1390 __ bswapl(rdx); // swap bytes & sign-extend constant
1391 __ sarl(rdx, 16);
1392 __ addl(iaddress(rbx), rdx);
1393 // Note: should probably use only one movl to get both
1394 // the index and the constant -> fix this
1395 }
1397 void TemplateTable::convert() {
1398 // Checking
1399 #ifdef ASSERT
1400 {
1401 TosState tos_in = ilgl;
1402 TosState tos_out = ilgl;
1403 switch (bytecode()) {
1404 case Bytecodes::_i2l: // fall through
1405 case Bytecodes::_i2f: // fall through
1406 case Bytecodes::_i2d: // fall through
1407 case Bytecodes::_i2b: // fall through
1408 case Bytecodes::_i2c: // fall through
1409 case Bytecodes::_i2s: tos_in = itos; break;
1410 case Bytecodes::_l2i: // fall through
1411 case Bytecodes::_l2f: // fall through
1412 case Bytecodes::_l2d: tos_in = ltos; break;
1413 case Bytecodes::_f2i: // fall through
1414 case Bytecodes::_f2l: // fall through
1415 case Bytecodes::_f2d: tos_in = ftos; break;
1416 case Bytecodes::_d2i: // fall through
1417 case Bytecodes::_d2l: // fall through
1418 case Bytecodes::_d2f: tos_in = dtos; break;
1419 default : ShouldNotReachHere();
1420 }
1421 switch (bytecode()) {
1422 case Bytecodes::_l2i: // fall through
1423 case Bytecodes::_f2i: // fall through
1424 case Bytecodes::_d2i: // fall through
1425 case Bytecodes::_i2b: // fall through
1426 case Bytecodes::_i2c: // fall through
1427 case Bytecodes::_i2s: tos_out = itos; break;
1428 case Bytecodes::_i2l: // fall through
1429 case Bytecodes::_f2l: // fall through
1430 case Bytecodes::_d2l: tos_out = ltos; break;
1431 case Bytecodes::_i2f: // fall through
1432 case Bytecodes::_l2f: // fall through
1433 case Bytecodes::_d2f: tos_out = ftos; break;
1434 case Bytecodes::_i2d: // fall through
1435 case Bytecodes::_l2d: // fall through
1436 case Bytecodes::_f2d: tos_out = dtos; break;
1437 default : ShouldNotReachHere();
1438 }
1439 transition(tos_in, tos_out);
1440 }
1441 #endif // ASSERT
1443 static const int64_t is_nan = 0x8000000000000000L;
1445 // Conversion
1446 switch (bytecode()) {
1447 case Bytecodes::_i2l:
1448 __ movslq(rax, rax);
1449 break;
1450 case Bytecodes::_i2f:
1451 __ cvtsi2ssl(xmm0, rax);
1452 break;
1453 case Bytecodes::_i2d:
1454 __ cvtsi2sdl(xmm0, rax);
1455 break;
1456 case Bytecodes::_i2b:
1457 __ movsbl(rax, rax);
1458 break;
1459 case Bytecodes::_i2c:
1460 __ movzwl(rax, rax);
1461 break;
1462 case Bytecodes::_i2s:
1463 __ movswl(rax, rax);
1464 break;
1465 case Bytecodes::_l2i:
1466 __ movl(rax, rax);
1467 break;
1468 case Bytecodes::_l2f:
1469 __ cvtsi2ssq(xmm0, rax);
1470 break;
1471 case Bytecodes::_l2d:
1472 __ cvtsi2sdq(xmm0, rax);
1473 break;
1474 case Bytecodes::_f2i:
1475 {
1476 Label L;
1477 __ cvttss2sil(rax, xmm0);
1478 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1479 __ jcc(Assembler::notEqual, L);
1480 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1481 __ bind(L);
1482 }
1483 break;
1484 case Bytecodes::_f2l:
1485 {
1486 Label L;
1487 __ cvttss2siq(rax, xmm0);
1488 // NaN or overflow/underflow?
1489 __ cmp64(rax, ExternalAddress((address) &is_nan));
1490 __ jcc(Assembler::notEqual, L);
1491 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1492 __ bind(L);
1493 }
1494 break;
1495 case Bytecodes::_f2d:
1496 __ cvtss2sd(xmm0, xmm0);
1497 break;
1498 case Bytecodes::_d2i:
1499 {
1500 Label L;
1501 __ cvttsd2sil(rax, xmm0);
1502 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1503 __ jcc(Assembler::notEqual, L);
1504 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1505 __ bind(L);
1506 }
1507 break;
1508 case Bytecodes::_d2l:
1509 {
1510 Label L;
1511 __ cvttsd2siq(rax, xmm0);
1512 // NaN or overflow/underflow?
1513 __ cmp64(rax, ExternalAddress((address) &is_nan));
1514 __ jcc(Assembler::notEqual, L);
1515 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1516 __ bind(L);
1517 }
1518 break;
1519 case Bytecodes::_d2f:
1520 __ cvtsd2ss(xmm0, xmm0);
1521 break;
1522 default:
1523 ShouldNotReachHere();
1524 }
1525 }
1527 void TemplateTable::lcmp() {
1528 transition(ltos, itos);
1529 Label done;
1530 __ pop_l(rdx);
1531 __ cmpq(rdx, rax);
1532 __ movl(rax, -1);
1533 __ jccb(Assembler::less, done);
1534 __ setb(Assembler::notEqual, rax);
1535 __ movzbl(rax, rax);
1536 __ bind(done);
1537 }
1539 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1540 Label done;
1541 if (is_float) {
1542 // XXX get rid of pop here, use ... reg, mem32
1543 __ pop_f(xmm1);
1544 __ ucomiss(xmm1, xmm0);
1545 } else {
1546 // XXX get rid of pop here, use ... reg, mem64
1547 __ pop_d(xmm1);
1548 __ ucomisd(xmm1, xmm0);
1549 }
1550 if (unordered_result < 0) {
1551 __ movl(rax, -1);
1552 __ jccb(Assembler::parity, done);
1553 __ jccb(Assembler::below, done);
1554 __ setb(Assembler::notEqual, rdx);
1555 __ movzbl(rax, rdx);
1556 } else {
1557 __ movl(rax, 1);
1558 __ jccb(Assembler::parity, done);
1559 __ jccb(Assembler::above, done);
1560 __ movl(rax, 0);
1561 __ jccb(Assembler::equal, done);
1562 __ decrementl(rax);
1563 }
1564 __ bind(done);
1565 }
1567 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1568 __ get_method(rcx); // rcx holds method
1569 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1570 // holds bumped taken count
1572 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
1573 InvocationCounter::counter_offset();
1574 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() +
1575 InvocationCounter::counter_offset();
1576 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1578 // Load up edx with the branch displacement
1579 __ movl(rdx, at_bcp(1));
1580 __ bswapl(rdx);
1582 if (!is_wide) {
1583 __ sarl(rdx, 16);
1584 }
1585 __ movl2ptr(rdx, rdx);
1587 // Handle all the JSR stuff here, then exit.
1588 // It's much shorter and cleaner than intermingling with the non-JSR
1589 // normal-branch stuff occurring below.
1590 if (is_jsr) {
1591 // Pre-load the next target bytecode into rbx
1592 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1594 // compute return address as bci in rax
1595 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1596 in_bytes(constMethodOopDesc::codes_offset())));
1597 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1598 // Adjust the bcp in r13 by the displacement in rdx
1599 __ addptr(r13, rdx);
1600 // jsr returns atos that is not an oop
1601 __ push_i(rax);
1602 __ dispatch_only(vtos);
1603 return;
1604 }
1606 // Normal (non-jsr) branch handling
1608 // Adjust the bcp in r13 by the displacement in rdx
1609 __ addptr(r13, rdx);
1611 assert(UseLoopCounter || !UseOnStackReplacement,
1612 "on-stack-replacement requires loop counters");
1613 Label backedge_counter_overflow;
1614 Label profile_method;
1615 Label dispatch;
1616 if (UseLoopCounter) {
1617 // increment backedge counter for backward branches
1618 // rax: MDO
1619 // ebx: MDO bumped taken-count
1620 // rcx: method
1621 // rdx: target offset
1622 // r13: target bcp
1623 // r14: locals pointer
1624 __ testl(rdx, rdx); // check if forward or backward branch
1625 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1626 if (TieredCompilation) {
1627 Label no_mdo;
1628 int increment = InvocationCounter::count_increment;
1629 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1630 if (ProfileInterpreter) {
1631 // Are we profiling?
1632 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1633 __ testptr(rbx, rbx);
1634 __ jccb(Assembler::zero, no_mdo);
1635 // Increment the MDO backedge counter
1636 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1637 in_bytes(InvocationCounter::counter_offset()));
1638 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1639 rax, false, Assembler::zero, &backedge_counter_overflow);
1640 __ jmp(dispatch);
1641 }
1642 __ bind(no_mdo);
1643 // Increment backedge counter in methodOop
1644 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1645 rax, false, Assembler::zero, &backedge_counter_overflow);
1646 } else {
1647 // increment counter
1648 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1649 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1650 __ movl(Address(rcx, be_offset), rax); // store counter
1652 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1653 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1654 __ addl(rax, Address(rcx, be_offset)); // add both counters
1656 if (ProfileInterpreter) {
1657 // Test to see if we should create a method data oop
1658 __ cmp32(rax,
1659 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1660 __ jcc(Assembler::less, dispatch);
1662 // if no method data exists, go to profile method
1663 __ test_method_data_pointer(rax, profile_method);
1665 if (UseOnStackReplacement) {
1666 // check for overflow against ebx which is the MDO taken count
1667 __ cmp32(rbx,
1668 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1669 __ jcc(Assembler::below, dispatch);
1671 // When ProfileInterpreter is on, the backedge_count comes
1672 // from the methodDataOop, which value does not get reset on
1673 // the call to frequency_counter_overflow(). To avoid
1674 // excessive calls to the overflow routine while the method is
1675 // being compiled, add a second test to make sure the overflow
1676 // function is called only once every overflow_frequency.
1677 const int overflow_frequency = 1024;
1678 __ andl(rbx, overflow_frequency - 1);
1679 __ jcc(Assembler::zero, backedge_counter_overflow);
1681 }
1682 } else {
1683 if (UseOnStackReplacement) {
1684 // check for overflow against eax, which is the sum of the
1685 // counters
1686 __ cmp32(rax,
1687 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1688 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1690 }
1691 }
1692 }
1693 __ bind(dispatch);
1694 }
1696 // Pre-load the next target bytecode into rbx
1697 __ load_unsigned_byte(rbx, Address(r13, 0));
1699 // continue with the bytecode @ target
1700 // eax: return bci for jsr's, unused otherwise
1701 // ebx: target bytecode
1702 // r13: target bcp
1703 __ dispatch_only(vtos);
1705 if (UseLoopCounter) {
1706 if (ProfileInterpreter) {
1707 // Out-of-line code to allocate method data oop.
1708 __ bind(profile_method);
1709 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1710 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1711 __ set_method_data_pointer_for_bcp();
1712 __ jmp(dispatch);
1713 }
1715 if (UseOnStackReplacement) {
1716 // invocation counter overflow
1717 __ bind(backedge_counter_overflow);
1718 __ negptr(rdx);
1719 __ addptr(rdx, r13); // branch bcp
1720 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1721 __ call_VM(noreg,
1722 CAST_FROM_FN_PTR(address,
1723 InterpreterRuntime::frequency_counter_overflow),
1724 rdx);
1725 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1727 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1728 // ebx: target bytecode
1729 // rdx: scratch
1730 // r14: locals pointer
1731 // r13: bcp
1732 __ testptr(rax, rax); // test result
1733 __ jcc(Assembler::zero, dispatch); // no osr if null
1734 // nmethod may have been invalidated (VM may block upon call_VM return)
1735 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1736 __ cmpl(rcx, InvalidOSREntryBci);
1737 __ jcc(Assembler::equal, dispatch);
1739 // We have the address of an on stack replacement routine in eax
1740 // We need to prepare to execute the OSR method. First we must
1741 // migrate the locals and monitors off of the stack.
1743 __ mov(r13, rax); // save the nmethod
1745 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1747 // eax is OSR buffer, move it to expected parameter location
1748 __ mov(j_rarg0, rax);
1750 // We use j_rarg definitions here so that registers don't conflict as parameter
1751 // registers change across platforms as we are in the midst of a calling
1752 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1754 const Register retaddr = j_rarg2;
1755 const Register sender_sp = j_rarg1;
1757 // pop the interpreter frame
1758 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1759 __ leave(); // remove frame anchor
1760 __ pop(retaddr); // get return address
1761 __ mov(rsp, sender_sp); // set sp to sender sp
1762 // Ensure compiled code always sees stack at proper alignment
1763 __ andptr(rsp, -(StackAlignmentInBytes));
1765 // unlike x86 we need no specialized return from compiled code
1766 // to the interpreter or the call stub.
1768 // push the return address
1769 __ push(retaddr);
1771 // and begin the OSR nmethod
1772 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1773 }
1774 }
1775 }
1778 void TemplateTable::if_0cmp(Condition cc) {
1779 transition(itos, vtos);
1780 // assume branch is more often taken than not (loops use backward branches)
1781 Label not_taken;
1782 __ testl(rax, rax);
1783 __ jcc(j_not(cc), not_taken);
1784 branch(false, false);
1785 __ bind(not_taken);
1786 __ profile_not_taken_branch(rax);
1787 }
1789 void TemplateTable::if_icmp(Condition cc) {
1790 transition(itos, vtos);
1791 // assume branch is more often taken than not (loops use backward branches)
1792 Label not_taken;
1793 __ pop_i(rdx);
1794 __ cmpl(rdx, rax);
1795 __ jcc(j_not(cc), not_taken);
1796 branch(false, false);
1797 __ bind(not_taken);
1798 __ profile_not_taken_branch(rax);
1799 }
1801 void TemplateTable::if_nullcmp(Condition cc) {
1802 transition(atos, vtos);
1803 // assume branch is more often taken than not (loops use backward branches)
1804 Label not_taken;
1805 __ testptr(rax, rax);
1806 __ jcc(j_not(cc), not_taken);
1807 branch(false, false);
1808 __ bind(not_taken);
1809 __ profile_not_taken_branch(rax);
1810 }
1812 void TemplateTable::if_acmp(Condition cc) {
1813 transition(atos, vtos);
1814 // assume branch is more often taken than not (loops use backward branches)
1815 Label not_taken;
1816 __ pop_ptr(rdx);
1817 __ cmpptr(rdx, rax);
1818 __ jcc(j_not(cc), not_taken);
1819 branch(false, false);
1820 __ bind(not_taken);
1821 __ profile_not_taken_branch(rax);
1822 }
1824 void TemplateTable::ret() {
1825 transition(vtos, vtos);
1826 locals_index(rbx);
1827 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1828 __ profile_ret(rbx, rcx);
1829 __ get_method(rax);
1830 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1831 __ lea(r13, Address(r13, rbx, Address::times_1,
1832 constMethodOopDesc::codes_offset()));
1833 __ dispatch_next(vtos);
1834 }
1836 void TemplateTable::wide_ret() {
1837 transition(vtos, vtos);
1838 locals_index_wide(rbx);
1839 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1840 __ profile_ret(rbx, rcx);
1841 __ get_method(rax);
1842 __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
1843 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1844 __ dispatch_next(vtos);
1845 }
1847 void TemplateTable::tableswitch() {
1848 Label default_case, continue_execution;
1849 transition(itos, vtos);
1850 // align r13
1851 __ lea(rbx, at_bcp(BytesPerInt));
1852 __ andptr(rbx, -BytesPerInt);
1853 // load lo & hi
1854 __ movl(rcx, Address(rbx, BytesPerInt));
1855 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1856 __ bswapl(rcx);
1857 __ bswapl(rdx);
1858 // check against lo & hi
1859 __ cmpl(rax, rcx);
1860 __ jcc(Assembler::less, default_case);
1861 __ cmpl(rax, rdx);
1862 __ jcc(Assembler::greater, default_case);
1863 // lookup dispatch offset
1864 __ subl(rax, rcx);
1865 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1866 __ profile_switch_case(rax, rbx, rcx);
1867 // continue execution
1868 __ bind(continue_execution);
1869 __ bswapl(rdx);
1870 __ movl2ptr(rdx, rdx);
1871 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1872 __ addptr(r13, rdx);
1873 __ dispatch_only(vtos);
1874 // handle default
1875 __ bind(default_case);
1876 __ profile_switch_default(rax);
1877 __ movl(rdx, Address(rbx, 0));
1878 __ jmp(continue_execution);
1879 }
1881 void TemplateTable::lookupswitch() {
1882 transition(itos, itos);
1883 __ stop("lookupswitch bytecode should have been rewritten");
1884 }
1886 void TemplateTable::fast_linearswitch() {
1887 transition(itos, vtos);
1888 Label loop_entry, loop, found, continue_execution;
1889 // bswap rax so we can avoid bswapping the table entries
1890 __ bswapl(rax);
1891 // align r13
1892 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1893 // this instruction (change offsets
1894 // below)
1895 __ andptr(rbx, -BytesPerInt);
1896 // set counter
1897 __ movl(rcx, Address(rbx, BytesPerInt));
1898 __ bswapl(rcx);
1899 __ jmpb(loop_entry);
1900 // table search
1901 __ bind(loop);
1902 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1903 __ jcc(Assembler::equal, found);
1904 __ bind(loop_entry);
1905 __ decrementl(rcx);
1906 __ jcc(Assembler::greaterEqual, loop);
1907 // default case
1908 __ profile_switch_default(rax);
1909 __ movl(rdx, Address(rbx, 0));
1910 __ jmp(continue_execution);
1911 // entry found -> get offset
1912 __ bind(found);
1913 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1914 __ profile_switch_case(rcx, rax, rbx);
1915 // continue execution
1916 __ bind(continue_execution);
1917 __ bswapl(rdx);
1918 __ movl2ptr(rdx, rdx);
1919 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1920 __ addptr(r13, rdx);
1921 __ dispatch_only(vtos);
1922 }
1924 void TemplateTable::fast_binaryswitch() {
1925 transition(itos, vtos);
1926 // Implementation using the following core algorithm:
1927 //
1928 // int binary_search(int key, LookupswitchPair* array, int n) {
1929 // // Binary search according to "Methodik des Programmierens" by
1930 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1931 // int i = 0;
1932 // int j = n;
1933 // while (i+1 < j) {
1934 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1935 // // with Q: for all i: 0 <= i < n: key < a[i]
1936 // // where a stands for the array and assuming that the (inexisting)
1937 // // element a[n] is infinitely big.
1938 // int h = (i + j) >> 1;
1939 // // i < h < j
1940 // if (key < array[h].fast_match()) {
1941 // j = h;
1942 // } else {
1943 // i = h;
1944 // }
1945 // }
1946 // // R: a[i] <= key < a[i+1] or Q
1947 // // (i.e., if key is within array, i is the correct index)
1948 // return i;
1949 // }
1951 // Register allocation
1952 const Register key = rax; // already set (tosca)
1953 const Register array = rbx;
1954 const Register i = rcx;
1955 const Register j = rdx;
1956 const Register h = rdi;
1957 const Register temp = rsi;
1959 // Find array start
1960 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1961 // get rid of this
1962 // instruction (change
1963 // offsets below)
1964 __ andptr(array, -BytesPerInt);
1966 // Initialize i & j
1967 __ xorl(i, i); // i = 0;
1968 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1970 // Convert j into native byteordering
1971 __ bswapl(j);
1973 // And start
1974 Label entry;
1975 __ jmp(entry);
1977 // binary search loop
1978 {
1979 Label loop;
1980 __ bind(loop);
1981 // int h = (i + j) >> 1;
1982 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1983 __ sarl(h, 1); // h = (i + j) >> 1;
1984 // if (key < array[h].fast_match()) {
1985 // j = h;
1986 // } else {
1987 // i = h;
1988 // }
1989 // Convert array[h].match to native byte-ordering before compare
1990 __ movl(temp, Address(array, h, Address::times_8));
1991 __ bswapl(temp);
1992 __ cmpl(key, temp);
1993 // j = h if (key < array[h].fast_match())
1994 __ cmovl(Assembler::less, j, h);
1995 // i = h if (key >= array[h].fast_match())
1996 __ cmovl(Assembler::greaterEqual, i, h);
1997 // while (i+1 < j)
1998 __ bind(entry);
1999 __ leal(h, Address(i, 1)); // i+1
2000 __ cmpl(h, j); // i+1 < j
2001 __ jcc(Assembler::less, loop);
2002 }
2004 // end of binary search, result index is i (must check again!)
2005 Label default_case;
2006 // Convert array[i].match to native byte-ordering before compare
2007 __ movl(temp, Address(array, i, Address::times_8));
2008 __ bswapl(temp);
2009 __ cmpl(key, temp);
2010 __ jcc(Assembler::notEqual, default_case);
2012 // entry found -> j = offset
2013 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2014 __ profile_switch_case(i, key, array);
2015 __ bswapl(j);
2016 __ movl2ptr(j, j);
2017 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2018 __ addptr(r13, j);
2019 __ dispatch_only(vtos);
2021 // default case -> j = default offset
2022 __ bind(default_case);
2023 __ profile_switch_default(i);
2024 __ movl(j, Address(array, -2 * BytesPerInt));
2025 __ bswapl(j);
2026 __ movl2ptr(j, j);
2027 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2028 __ addptr(r13, j);
2029 __ dispatch_only(vtos);
2030 }
2033 void TemplateTable::_return(TosState state) {
2034 transition(state, state);
2035 assert(_desc->calls_vm(),
2036 "inconsistent calls_vm information"); // call in remove_activation
2038 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2039 assert(state == vtos, "only valid state");
2040 __ movptr(c_rarg1, aaddress(0));
2041 __ load_klass(rdi, c_rarg1);
2042 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2043 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2044 Label skip_register_finalizer;
2045 __ jcc(Assembler::zero, skip_register_finalizer);
2047 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2049 __ bind(skip_register_finalizer);
2050 }
2052 __ remove_activation(state, r13);
2053 __ jmp(r13);
2054 }
2056 // ----------------------------------------------------------------------------
2057 // Volatile variables demand their effects be made known to all CPU's
2058 // in order. Store buffers on most chips allow reads & writes to
2059 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2060 // without some kind of memory barrier (i.e., it's not sufficient that
2061 // the interpreter does not reorder volatile references, the hardware
2062 // also must not reorder them).
2063 //
2064 // According to the new Java Memory Model (JMM):
2065 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2066 // writes act as aquire & release, so:
2067 // (2) A read cannot let unrelated NON-volatile memory refs that
2068 // happen after the read float up to before the read. It's OK for
2069 // non-volatile memory refs that happen before the volatile read to
2070 // float down below it.
2071 // (3) Similar a volatile write cannot let unrelated NON-volatile
2072 // memory refs that happen BEFORE the write float down to after the
2073 // write. It's OK for non-volatile memory refs that happen after the
2074 // volatile write to float up before it.
2075 //
2076 // We only put in barriers around volatile refs (they are expensive),
2077 // not _between_ memory refs (that would require us to track the
2078 // flavor of the previous memory refs). Requirements (2) and (3)
2079 // require some barriers before volatile stores and after volatile
2080 // loads. These nearly cover requirement (1) but miss the
2081 // volatile-store-volatile-load case. This final case is placed after
2082 // volatile-stores although it could just as well go before
2083 // volatile-loads.
2084 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2085 order_constraint) {
2086 // Helper function to insert a is-volatile test and memory barrier
2087 if (os::is_MP()) { // Not needed on single CPU
2088 __ membar(order_constraint);
2089 }
2090 }
2092 void TemplateTable::resolve_cache_and_index(int byte_no,
2093 Register result,
2094 Register Rcache,
2095 Register index,
2096 size_t index_size) {
2097 const Register temp = rbx;
2098 assert_different_registers(result, Rcache, index, temp);
2100 Label resolved;
2101 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2102 if (byte_no == f1_oop) {
2103 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2104 // This kind of CP cache entry does not need to match the flags byte, because
2105 // there is a 1-1 relation between bytecode type and CP entry type.
2106 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2107 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2108 __ testptr(result, result);
2109 __ jcc(Assembler::notEqual, resolved);
2110 } else {
2111 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2112 assert(result == noreg, ""); //else change code for setting result
2113 const int shift_count = (1 + byte_no) * BitsPerByte;
2114 __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2115 __ shrl(temp, shift_count);
2116 // have we resolved this bytecode?
2117 __ andl(temp, 0xFF);
2118 __ cmpl(temp, (int) bytecode());
2119 __ jcc(Assembler::equal, resolved);
2120 }
2122 // resolve first time through
2123 address entry;
2124 switch (bytecode()) {
2125 case Bytecodes::_getstatic:
2126 case Bytecodes::_putstatic:
2127 case Bytecodes::_getfield:
2128 case Bytecodes::_putfield:
2129 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2130 break;
2131 case Bytecodes::_invokevirtual:
2132 case Bytecodes::_invokespecial:
2133 case Bytecodes::_invokestatic:
2134 case Bytecodes::_invokeinterface:
2135 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2136 break;
2137 case Bytecodes::_invokedynamic:
2138 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2139 break;
2140 case Bytecodes::_fast_aldc:
2141 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2142 break;
2143 case Bytecodes::_fast_aldc_w:
2144 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
2145 break;
2146 default:
2147 ShouldNotReachHere();
2148 break;
2149 }
2150 __ movl(temp, (int) bytecode());
2151 __ call_VM(noreg, entry, temp);
2153 // Update registers with resolved info
2154 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2155 if (result != noreg)
2156 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2157 __ bind(resolved);
2158 }
2160 // The Rcache and index registers must be set before call
2161 void TemplateTable::load_field_cp_cache_entry(Register obj,
2162 Register cache,
2163 Register index,
2164 Register off,
2165 Register flags,
2166 bool is_static = false) {
2167 assert_different_registers(cache, index, flags, off);
2169 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2170 // Field offset
2171 __ movptr(off, Address(cache, index, Address::times_8,
2172 in_bytes(cp_base_offset +
2173 ConstantPoolCacheEntry::f2_offset())));
2174 // Flags
2175 __ movl(flags, Address(cache, index, Address::times_8,
2176 in_bytes(cp_base_offset +
2177 ConstantPoolCacheEntry::flags_offset())));
2179 // klass overwrite register
2180 if (is_static) {
2181 __ movptr(obj, Address(cache, index, Address::times_8,
2182 in_bytes(cp_base_offset +
2183 ConstantPoolCacheEntry::f1_offset())));
2184 }
2185 }
2187 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2188 Register method,
2189 Register itable_index,
2190 Register flags,
2191 bool is_invokevirtual,
2192 bool is_invokevfinal, /*unused*/
2193 bool is_invokedynamic) {
2194 // setup registers
2195 const Register cache = rcx;
2196 const Register index = rdx;
2197 assert_different_registers(method, flags);
2198 assert_different_registers(method, cache, index);
2199 assert_different_registers(itable_index, flags);
2200 assert_different_registers(itable_index, cache, index);
2201 // determine constant pool cache field offsets
2202 const int method_offset = in_bytes(
2203 constantPoolCacheOopDesc::base_offset() +
2204 (is_invokevirtual
2205 ? ConstantPoolCacheEntry::f2_offset()
2206 : ConstantPoolCacheEntry::f1_offset()));
2207 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2208 ConstantPoolCacheEntry::flags_offset());
2209 // access constant pool cache fields
2210 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2211 ConstantPoolCacheEntry::f2_offset());
2213 if (byte_no == f1_oop) {
2214 // Resolved f1_oop goes directly into 'method' register.
2215 assert(is_invokedynamic, "");
2216 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2217 } else {
2218 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2219 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2220 }
2221 if (itable_index != noreg) {
2222 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2223 }
2224 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2225 }
2228 // The registers cache and index expected to be set before call.
2229 // Correct values of the cache and index registers are preserved.
2230 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2231 bool is_static, bool has_tos) {
2232 // do the JVMTI work here to avoid disturbing the register state below
2233 // We use c_rarg registers here because we want to use the register used in
2234 // the call to the VM
2235 if (JvmtiExport::can_post_field_access()) {
2236 // Check to see if a field access watch has been set before we
2237 // take the time to call into the VM.
2238 Label L1;
2239 assert_different_registers(cache, index, rax);
2240 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2241 __ testl(rax, rax);
2242 __ jcc(Assembler::zero, L1);
2244 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2246 // cache entry pointer
2247 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
2248 __ shll(c_rarg3, LogBytesPerWord);
2249 __ addptr(c_rarg2, c_rarg3);
2250 if (is_static) {
2251 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2252 } else {
2253 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2254 __ verify_oop(c_rarg1);
2255 }
2256 // c_rarg1: object pointer or NULL
2257 // c_rarg2: cache entry pointer
2258 // c_rarg3: jvalue object on the stack
2259 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2260 InterpreterRuntime::post_field_access),
2261 c_rarg1, c_rarg2, c_rarg3);
2262 __ get_cache_and_index_at_bcp(cache, index, 1);
2263 __ bind(L1);
2264 }
2265 }
2267 void TemplateTable::pop_and_check_object(Register r) {
2268 __ pop_ptr(r);
2269 __ null_check(r); // for field access must check obj.
2270 __ verify_oop(r);
2271 }
2273 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2274 transition(vtos, vtos);
2276 const Register cache = rcx;
2277 const Register index = rdx;
2278 const Register obj = c_rarg3;
2279 const Register off = rbx;
2280 const Register flags = rax;
2281 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2283 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2284 jvmti_post_field_access(cache, index, is_static, false);
2285 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2287 if (!is_static) {
2288 // obj is on the stack
2289 pop_and_check_object(obj);
2290 }
2292 const Address field(obj, off, Address::times_1);
2294 Label Done, notByte, notInt, notShort, notChar,
2295 notLong, notFloat, notObj, notDouble;
2297 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2298 assert(btos == 0, "change code, btos != 0");
2300 __ andl(flags, 0x0F);
2301 __ jcc(Assembler::notZero, notByte);
2302 // btos
2303 __ load_signed_byte(rax, field);
2304 __ push(btos);
2305 // Rewrite bytecode to be faster
2306 if (!is_static) {
2307 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2308 }
2309 __ jmp(Done);
2311 __ bind(notByte);
2312 __ cmpl(flags, atos);
2313 __ jcc(Assembler::notEqual, notObj);
2314 // atos
2315 __ load_heap_oop(rax, field);
2316 __ push(atos);
2317 if (!is_static) {
2318 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2319 }
2320 __ jmp(Done);
2322 __ bind(notObj);
2323 __ cmpl(flags, itos);
2324 __ jcc(Assembler::notEqual, notInt);
2325 // itos
2326 __ movl(rax, field);
2327 __ push(itos);
2328 // Rewrite bytecode to be faster
2329 if (!is_static) {
2330 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2331 }
2332 __ jmp(Done);
2334 __ bind(notInt);
2335 __ cmpl(flags, ctos);
2336 __ jcc(Assembler::notEqual, notChar);
2337 // ctos
2338 __ load_unsigned_short(rax, field);
2339 __ push(ctos);
2340 // Rewrite bytecode to be faster
2341 if (!is_static) {
2342 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2343 }
2344 __ jmp(Done);
2346 __ bind(notChar);
2347 __ cmpl(flags, stos);
2348 __ jcc(Assembler::notEqual, notShort);
2349 // stos
2350 __ load_signed_short(rax, field);
2351 __ push(stos);
2352 // Rewrite bytecode to be faster
2353 if (!is_static) {
2354 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2355 }
2356 __ jmp(Done);
2358 __ bind(notShort);
2359 __ cmpl(flags, ltos);
2360 __ jcc(Assembler::notEqual, notLong);
2361 // ltos
2362 __ movq(rax, field);
2363 __ push(ltos);
2364 // Rewrite bytecode to be faster
2365 if (!is_static) {
2366 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2367 }
2368 __ jmp(Done);
2370 __ bind(notLong);
2371 __ cmpl(flags, ftos);
2372 __ jcc(Assembler::notEqual, notFloat);
2373 // ftos
2374 __ movflt(xmm0, field);
2375 __ push(ftos);
2376 // Rewrite bytecode to be faster
2377 if (!is_static) {
2378 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2379 }
2380 __ jmp(Done);
2382 __ bind(notFloat);
2383 #ifdef ASSERT
2384 __ cmpl(flags, dtos);
2385 __ jcc(Assembler::notEqual, notDouble);
2386 #endif
2387 // dtos
2388 __ movdbl(xmm0, field);
2389 __ push(dtos);
2390 // Rewrite bytecode to be faster
2391 if (!is_static) {
2392 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2393 }
2394 #ifdef ASSERT
2395 __ jmp(Done);
2397 __ bind(notDouble);
2398 __ stop("Bad state");
2399 #endif
2401 __ bind(Done);
2402 // [jk] not needed currently
2403 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2404 // Assembler::LoadStore));
2405 }
2408 void TemplateTable::getfield(int byte_no) {
2409 getfield_or_static(byte_no, false);
2410 }
2412 void TemplateTable::getstatic(int byte_no) {
2413 getfield_or_static(byte_no, true);
2414 }
2416 // The registers cache and index expected to be set before call.
2417 // The function may destroy various registers, just not the cache and index registers.
2418 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2419 transition(vtos, vtos);
2421 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2423 if (JvmtiExport::can_post_field_modification()) {
2424 // Check to see if a field modification watch has been set before
2425 // we take the time to call into the VM.
2426 Label L1;
2427 assert_different_registers(cache, index, rax);
2428 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2429 __ testl(rax, rax);
2430 __ jcc(Assembler::zero, L1);
2432 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2434 if (is_static) {
2435 // Life is simple. Null out the object pointer.
2436 __ xorl(c_rarg1, c_rarg1);
2437 } else {
2438 // Life is harder. The stack holds the value on top, followed by
2439 // the object. We don't know the size of the value, though; it
2440 // could be one or two words depending on its type. As a result,
2441 // we must find the type to determine where the object is.
2442 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2443 Address::times_8,
2444 in_bytes(cp_base_offset +
2445 ConstantPoolCacheEntry::flags_offset())));
2446 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits);
2447 // Make sure we don't need to mask rcx for tosBits after the
2448 // above shift
2449 ConstantPoolCacheEntry::verify_tosBits();
2450 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2451 __ cmpl(c_rarg3, ltos);
2452 __ cmovptr(Assembler::equal,
2453 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2454 __ cmpl(c_rarg3, dtos);
2455 __ cmovptr(Assembler::equal,
2456 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2457 }
2458 // cache entry pointer
2459 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2460 __ shll(rscratch1, LogBytesPerWord);
2461 __ addptr(c_rarg2, rscratch1);
2462 // object (tos)
2463 __ mov(c_rarg3, rsp);
2464 // c_rarg1: object pointer set up above (NULL if static)
2465 // c_rarg2: cache entry pointer
2466 // c_rarg3: jvalue object on the stack
2467 __ call_VM(noreg,
2468 CAST_FROM_FN_PTR(address,
2469 InterpreterRuntime::post_field_modification),
2470 c_rarg1, c_rarg2, c_rarg3);
2471 __ get_cache_and_index_at_bcp(cache, index, 1);
2472 __ bind(L1);
2473 }
2474 }
2476 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2477 transition(vtos, vtos);
2479 const Register cache = rcx;
2480 const Register index = rdx;
2481 const Register obj = rcx;
2482 const Register off = rbx;
2483 const Register flags = rax;
2484 const Register bc = c_rarg3;
2486 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2487 jvmti_post_field_mod(cache, index, is_static);
2488 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2490 // [jk] not needed currently
2491 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2492 // Assembler::StoreStore));
2494 Label notVolatile, Done;
2495 __ movl(rdx, flags);
2496 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2497 __ andl(rdx, 0x1);
2499 // field address
2500 const Address field(obj, off, Address::times_1);
2502 Label notByte, notInt, notShort, notChar,
2503 notLong, notFloat, notObj, notDouble;
2505 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2507 assert(btos == 0, "change code, btos != 0");
2508 __ andl(flags, 0x0f);
2509 __ jcc(Assembler::notZero, notByte);
2510 // btos
2511 __ pop(btos);
2512 if (!is_static) pop_and_check_object(obj);
2513 __ movb(field, rax);
2514 if (!is_static) {
2515 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx);
2516 }
2517 __ jmp(Done);
2519 __ bind(notByte);
2520 __ cmpl(flags, atos);
2521 __ jcc(Assembler::notEqual, notObj);
2522 // atos
2523 __ pop(atos);
2524 if (!is_static) pop_and_check_object(obj);
2526 // Store into the field
2527 do_oop_store(_masm, field, rax, _bs->kind(), false);
2529 if (!is_static) {
2530 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
2531 }
2532 __ jmp(Done);
2534 __ bind(notObj);
2535 __ cmpl(flags, itos);
2536 __ jcc(Assembler::notEqual, notInt);
2537 // itos
2538 __ pop(itos);
2539 if (!is_static) pop_and_check_object(obj);
2540 __ movl(field, rax);
2541 if (!is_static) {
2542 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx);
2543 }
2544 __ jmp(Done);
2546 __ bind(notInt);
2547 __ cmpl(flags, ctos);
2548 __ jcc(Assembler::notEqual, notChar);
2549 // ctos
2550 __ pop(ctos);
2551 if (!is_static) pop_and_check_object(obj);
2552 __ movw(field, rax);
2553 if (!is_static) {
2554 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx);
2555 }
2556 __ jmp(Done);
2558 __ bind(notChar);
2559 __ cmpl(flags, stos);
2560 __ jcc(Assembler::notEqual, notShort);
2561 // stos
2562 __ pop(stos);
2563 if (!is_static) pop_and_check_object(obj);
2564 __ movw(field, rax);
2565 if (!is_static) {
2566 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx);
2567 }
2568 __ jmp(Done);
2570 __ bind(notShort);
2571 __ cmpl(flags, ltos);
2572 __ jcc(Assembler::notEqual, notLong);
2573 // ltos
2574 __ pop(ltos);
2575 if (!is_static) pop_and_check_object(obj);
2576 __ movq(field, rax);
2577 if (!is_static) {
2578 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx);
2579 }
2580 __ jmp(Done);
2582 __ bind(notLong);
2583 __ cmpl(flags, ftos);
2584 __ jcc(Assembler::notEqual, notFloat);
2585 // ftos
2586 __ pop(ftos);
2587 if (!is_static) pop_and_check_object(obj);
2588 __ movflt(field, xmm0);
2589 if (!is_static) {
2590 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx);
2591 }
2592 __ jmp(Done);
2594 __ bind(notFloat);
2595 #ifdef ASSERT
2596 __ cmpl(flags, dtos);
2597 __ jcc(Assembler::notEqual, notDouble);
2598 #endif
2599 // dtos
2600 __ pop(dtos);
2601 if (!is_static) pop_and_check_object(obj);
2602 __ movdbl(field, xmm0);
2603 if (!is_static) {
2604 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx);
2605 }
2607 #ifdef ASSERT
2608 __ jmp(Done);
2610 __ bind(notDouble);
2611 __ stop("Bad state");
2612 #endif
2614 __ bind(Done);
2615 // Check for volatile store
2616 __ testl(rdx, rdx);
2617 __ jcc(Assembler::zero, notVolatile);
2618 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2619 Assembler::StoreStore));
2621 __ bind(notVolatile);
2622 }
2624 void TemplateTable::putfield(int byte_no) {
2625 putfield_or_static(byte_no, false);
2626 }
2628 void TemplateTable::putstatic(int byte_no) {
2629 putfield_or_static(byte_no, true);
2630 }
2632 void TemplateTable::jvmti_post_fast_field_mod() {
2633 if (JvmtiExport::can_post_field_modification()) {
2634 // Check to see if a field modification watch has been set before
2635 // we take the time to call into the VM.
2636 Label L2;
2637 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2638 __ testl(c_rarg3, c_rarg3);
2639 __ jcc(Assembler::zero, L2);
2640 __ pop_ptr(rbx); // copy the object pointer from tos
2641 __ verify_oop(rbx);
2642 __ push_ptr(rbx); // put the object pointer back on tos
2643 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2644 __ mov(c_rarg3, rsp);
2645 const Address field(c_rarg3, 0);
2647 switch (bytecode()) { // load values into the jvalue object
2648 case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
2649 case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
2650 case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
2651 case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
2652 case Bytecodes::_fast_sputfield: // fall through
2653 case Bytecodes::_fast_cputfield: __ movw(field, rax); break;
2654 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break;
2655 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break;
2656 default:
2657 ShouldNotReachHere();
2658 }
2660 // Save rax because call_VM() will clobber it, then use it for
2661 // JVMTI purposes
2662 __ push(rax);
2663 // access constant pool cache entry
2664 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2665 __ verify_oop(rbx);
2666 // rbx: object pointer copied above
2667 // c_rarg2: cache entry pointer
2668 // c_rarg3: jvalue object on the stack
2669 __ call_VM(noreg,
2670 CAST_FROM_FN_PTR(address,
2671 InterpreterRuntime::post_field_modification),
2672 rbx, c_rarg2, c_rarg3);
2673 __ pop(rax); // restore lower value
2674 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2675 __ bind(L2);
2676 }
2677 }
2679 void TemplateTable::fast_storefield(TosState state) {
2680 transition(state, vtos);
2682 ByteSize base = constantPoolCacheOopDesc::base_offset();
2684 jvmti_post_fast_field_mod();
2686 // access constant pool cache
2687 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2689 // test for volatile with rdx
2690 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2691 in_bytes(base +
2692 ConstantPoolCacheEntry::flags_offset())));
2694 // replace index with field offset from cache entry
2695 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2696 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2698 // [jk] not needed currently
2699 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2700 // Assembler::StoreStore));
2702 Label notVolatile;
2703 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2704 __ andl(rdx, 0x1);
2706 // Get object from stack
2707 pop_and_check_object(rcx);
2709 // field address
2710 const Address field(rcx, rbx, Address::times_1);
2712 // access field
2713 switch (bytecode()) {
2714 case Bytecodes::_fast_aputfield:
2715 do_oop_store(_masm, field, rax, _bs->kind(), false);
2716 break;
2717 case Bytecodes::_fast_lputfield:
2718 __ movq(field, rax);
2719 break;
2720 case Bytecodes::_fast_iputfield:
2721 __ movl(field, rax);
2722 break;
2723 case Bytecodes::_fast_bputfield:
2724 __ movb(field, rax);
2725 break;
2726 case Bytecodes::_fast_sputfield:
2727 // fall through
2728 case Bytecodes::_fast_cputfield:
2729 __ movw(field, rax);
2730 break;
2731 case Bytecodes::_fast_fputfield:
2732 __ movflt(field, xmm0);
2733 break;
2734 case Bytecodes::_fast_dputfield:
2735 __ movdbl(field, xmm0);
2736 break;
2737 default:
2738 ShouldNotReachHere();
2739 }
2741 // Check for volatile store
2742 __ testl(rdx, rdx);
2743 __ jcc(Assembler::zero, notVolatile);
2744 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2745 Assembler::StoreStore));
2746 __ bind(notVolatile);
2747 }
2750 void TemplateTable::fast_accessfield(TosState state) {
2751 transition(atos, state);
2753 // Do the JVMTI work here to avoid disturbing the register state below
2754 if (JvmtiExport::can_post_field_access()) {
2755 // Check to see if a field access watch has been set before we
2756 // take the time to call into the VM.
2757 Label L1;
2758 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2759 __ testl(rcx, rcx);
2760 __ jcc(Assembler::zero, L1);
2761 // access constant pool cache entry
2762 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2763 __ verify_oop(rax);
2764 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2765 __ mov(c_rarg1, rax);
2766 // c_rarg1: object pointer copied above
2767 // c_rarg2: cache entry pointer
2768 __ call_VM(noreg,
2769 CAST_FROM_FN_PTR(address,
2770 InterpreterRuntime::post_field_access),
2771 c_rarg1, c_rarg2);
2772 __ pop_ptr(rax); // restore object pointer
2773 __ bind(L1);
2774 }
2776 // access constant pool cache
2777 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2778 // replace index with field offset from cache entry
2779 // [jk] not needed currently
2780 // if (os::is_MP()) {
2781 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2782 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2783 // ConstantPoolCacheEntry::flags_offset())));
2784 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2785 // __ andl(rdx, 0x1);
2786 // }
2787 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2788 in_bytes(constantPoolCacheOopDesc::base_offset() +
2789 ConstantPoolCacheEntry::f2_offset())));
2791 // rax: object
2792 __ verify_oop(rax);
2793 __ null_check(rax);
2794 Address field(rax, rbx, Address::times_1);
2796 // access field
2797 switch (bytecode()) {
2798 case Bytecodes::_fast_agetfield:
2799 __ load_heap_oop(rax, field);
2800 __ verify_oop(rax);
2801 break;
2802 case Bytecodes::_fast_lgetfield:
2803 __ movq(rax, field);
2804 break;
2805 case Bytecodes::_fast_igetfield:
2806 __ movl(rax, field);
2807 break;
2808 case Bytecodes::_fast_bgetfield:
2809 __ movsbl(rax, field);
2810 break;
2811 case Bytecodes::_fast_sgetfield:
2812 __ load_signed_short(rax, field);
2813 break;
2814 case Bytecodes::_fast_cgetfield:
2815 __ load_unsigned_short(rax, field);
2816 break;
2817 case Bytecodes::_fast_fgetfield:
2818 __ movflt(xmm0, field);
2819 break;
2820 case Bytecodes::_fast_dgetfield:
2821 __ movdbl(xmm0, field);
2822 break;
2823 default:
2824 ShouldNotReachHere();
2825 }
2826 // [jk] not needed currently
2827 // if (os::is_MP()) {
2828 // Label notVolatile;
2829 // __ testl(rdx, rdx);
2830 // __ jcc(Assembler::zero, notVolatile);
2831 // __ membar(Assembler::LoadLoad);
2832 // __ bind(notVolatile);
2833 //};
2834 }
2836 void TemplateTable::fast_xaccess(TosState state) {
2837 transition(vtos, state);
2839 // get receiver
2840 __ movptr(rax, aaddress(0));
2841 // access constant pool cache
2842 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2843 __ movptr(rbx,
2844 Address(rcx, rdx, Address::times_8,
2845 in_bytes(constantPoolCacheOopDesc::base_offset() +
2846 ConstantPoolCacheEntry::f2_offset())));
2847 // make sure exception is reported in correct bcp range (getfield is
2848 // next instruction)
2849 __ increment(r13);
2850 __ null_check(rax);
2851 switch (state) {
2852 case itos:
2853 __ movl(rax, Address(rax, rbx, Address::times_1));
2854 break;
2855 case atos:
2856 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2857 __ verify_oop(rax);
2858 break;
2859 case ftos:
2860 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2861 break;
2862 default:
2863 ShouldNotReachHere();
2864 }
2866 // [jk] not needed currently
2867 // if (os::is_MP()) {
2868 // Label notVolatile;
2869 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2870 // in_bytes(constantPoolCacheOopDesc::base_offset() +
2871 // ConstantPoolCacheEntry::flags_offset())));
2872 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2873 // __ testl(rdx, 0x1);
2874 // __ jcc(Assembler::zero, notVolatile);
2875 // __ membar(Assembler::LoadLoad);
2876 // __ bind(notVolatile);
2877 // }
2879 __ decrement(r13);
2880 }
2884 //-----------------------------------------------------------------------------
2885 // Calls
2887 void TemplateTable::count_calls(Register method, Register temp) {
2888 // implemented elsewhere
2889 ShouldNotReachHere();
2890 }
2892 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2893 // determine flags
2894 Bytecodes::Code code = bytecode();
2895 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2896 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2897 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2898 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2899 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2900 const bool receiver_null_check = is_invokespecial;
2901 const bool save_flags = is_invokeinterface || is_invokevirtual;
2902 // setup registers & access constant pool cache
2903 const Register recv = rcx;
2904 const Register flags = rdx;
2905 assert_different_registers(method, index, recv, flags);
2907 // save 'interpreter return address'
2908 __ save_bcp();
2910 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2912 // load receiver if needed (note: no return address pushed yet)
2913 if (load_receiver) {
2914 assert(!is_invokedynamic, "");
2915 __ movl(recv, flags);
2916 __ andl(recv, 0xFF);
2917 Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
2918 __ movptr(recv, recv_addr);
2919 __ verify_oop(recv);
2920 }
2922 // do null check if needed
2923 if (receiver_null_check) {
2924 __ null_check(recv);
2925 }
2927 if (save_flags) {
2928 __ movl(r13, flags);
2929 }
2931 // compute return type
2932 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2933 // Make sure we don't need to mask flags for tosBits after the above shift
2934 ConstantPoolCacheEntry::verify_tosBits();
2935 // load return address
2936 {
2937 address table_addr;
2938 if (is_invokeinterface || is_invokedynamic)
2939 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2940 else
2941 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2942 ExternalAddress table(table_addr);
2943 __ lea(rscratch1, table);
2944 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
2945 }
2947 // push return address
2948 __ push(flags);
2950 // Restore flag field from the constant pool cache, and restore esi
2951 // for later null checks. r13 is the bytecode pointer
2952 if (save_flags) {
2953 __ movl(flags, r13);
2954 __ restore_bcp();
2955 }
2956 }
2959 void TemplateTable::invokevirtual_helper(Register index,
2960 Register recv,
2961 Register flags) {
2962 // Uses temporary registers rax, rdx
2963 assert_different_registers(index, recv, rax, rdx);
2965 // Test for an invoke of a final method
2966 Label notFinal;
2967 __ movl(rax, flags);
2968 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2969 __ jcc(Assembler::zero, notFinal);
2971 const Register method = index; // method must be rbx
2972 assert(method == rbx,
2973 "methodOop must be rbx for interpreter calling convention");
2975 // do the call - the index is actually the method to call
2976 __ verify_oop(method);
2978 // It's final, need a null check here!
2979 __ null_check(recv);
2981 // profile this call
2982 __ profile_final_call(rax);
2984 __ jump_from_interpreted(method, rax);
2986 __ bind(notFinal);
2988 // get receiver klass
2989 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2990 __ load_klass(rax, recv);
2992 __ verify_oop(rax);
2994 // profile this call
2995 __ profile_virtual_call(rax, r14, rdx);
2997 // get target methodOop & entry point
2998 const int base = instanceKlass::vtable_start_offset() * wordSize;
2999 assert(vtableEntry::size() * wordSize == 8,
3000 "adjust the scaling in the code below");
3001 __ movptr(method, Address(rax, index,
3002 Address::times_8,
3003 base + vtableEntry::method_offset_in_bytes()));
3004 __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
3005 __ jump_from_interpreted(method, rdx);
3006 }
3009 void TemplateTable::invokevirtual(int byte_no) {
3010 transition(vtos, vtos);
3011 assert(byte_no == f2_byte, "use this argument");
3012 prepare_invoke(rbx, noreg, byte_no);
3014 // rbx: index
3015 // rcx: receiver
3016 // rdx: flags
3018 invokevirtual_helper(rbx, rcx, rdx);
3019 }
3022 void TemplateTable::invokespecial(int byte_no) {
3023 transition(vtos, vtos);
3024 assert(byte_no == f1_byte, "use this argument");
3025 prepare_invoke(rbx, noreg, byte_no);
3026 // do the call
3027 __ verify_oop(rbx);
3028 __ profile_call(rax);
3029 __ jump_from_interpreted(rbx, rax);
3030 }
3033 void TemplateTable::invokestatic(int byte_no) {
3034 transition(vtos, vtos);
3035 assert(byte_no == f1_byte, "use this argument");
3036 prepare_invoke(rbx, noreg, byte_no);
3037 // do the call
3038 __ verify_oop(rbx);
3039 __ profile_call(rax);
3040 __ jump_from_interpreted(rbx, rax);
3041 }
3043 void TemplateTable::fast_invokevfinal(int byte_no) {
3044 transition(vtos, vtos);
3045 assert(byte_no == f2_byte, "use this argument");
3046 __ stop("fast_invokevfinal not used on amd64");
3047 }
3049 void TemplateTable::invokeinterface(int byte_no) {
3050 transition(vtos, vtos);
3051 assert(byte_no == f1_byte, "use this argument");
3052 prepare_invoke(rax, rbx, byte_no);
3054 // rax: Interface
3055 // rbx: index
3056 // rcx: receiver
3057 // rdx: flags
3059 // Special case of invokeinterface called for virtual method of
3060 // java.lang.Object. See cpCacheOop.cpp for details.
3061 // This code isn't produced by javac, but could be produced by
3062 // another compliant java compiler.
3063 Label notMethod;
3064 __ movl(r14, rdx);
3065 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface));
3066 __ jcc(Assembler::zero, notMethod);
3068 invokevirtual_helper(rbx, rcx, rdx);
3069 __ bind(notMethod);
3071 // Get receiver klass into rdx - also a null check
3072 __ restore_locals(); // restore r14
3073 __ load_klass(rdx, rcx);
3074 __ verify_oop(rdx);
3076 // profile this call
3077 __ profile_virtual_call(rdx, r13, r14);
3079 Label no_such_interface, no_such_method;
3081 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3082 rdx, rax, rbx,
3083 // outputs: method, scan temp. reg
3084 rbx, r13,
3085 no_such_interface);
3087 // rbx,: methodOop to call
3088 // rcx: receiver
3089 // Check for abstract method error
3090 // Note: This should be done more efficiently via a throw_abstract_method_error
3091 // interpreter entry point and a conditional jump to it in case of a null
3092 // method.
3093 __ testptr(rbx, rbx);
3094 __ jcc(Assembler::zero, no_such_method);
3096 // do the call
3097 // rcx: receiver
3098 // rbx,: methodOop
3099 __ jump_from_interpreted(rbx, rdx);
3100 __ should_not_reach_here();
3102 // exception handling code follows...
3103 // note: must restore interpreter registers to canonical
3104 // state for exception handling to work correctly!
3106 __ bind(no_such_method);
3107 // throw exception
3108 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3109 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3110 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3111 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3112 // the call_VM checks for exception, so we should never return here.
3113 __ should_not_reach_here();
3115 __ bind(no_such_interface);
3116 // throw exception
3117 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3118 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3119 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3120 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3121 InterpreterRuntime::throw_IncompatibleClassChangeError));
3122 // the call_VM checks for exception, so we should never return here.
3123 __ should_not_reach_here();
3124 return;
3125 }
3127 void TemplateTable::invokedynamic(int byte_no) {
3128 transition(vtos, vtos);
3129 assert(byte_no == f1_oop, "use this argument");
3131 if (!EnableInvokeDynamic) {
3132 // We should not encounter this bytecode if !EnableInvokeDynamic.
3133 // The verifier will stop it. However, if we get past the verifier,
3134 // this will stop the thread in a reasonable way, without crashing the JVM.
3135 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3136 InterpreterRuntime::throw_IncompatibleClassChangeError));
3137 // the call_VM checks for exception, so we should never return here.
3138 __ should_not_reach_here();
3139 return;
3140 }
3142 prepare_invoke(rax, rbx, byte_no);
3144 // rax: CallSite object (f1)
3145 // rbx: unused (f2)
3146 // rcx: receiver address
3147 // rdx: flags (unused)
3149 Register rax_callsite = rax;
3150 Register rcx_method_handle = rcx;
3152 // %%% should make a type profile for any invokedynamic that takes a ref argument
3153 // profile this call
3154 __ profile_call(r13);
3156 __ verify_oop(rax_callsite);
3157 __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
3158 __ null_check(rcx_method_handle);
3159 __ verify_oop(rcx_method_handle);
3160 __ prepare_to_jump_from_interpreted();
3161 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
3162 }
3165 //-----------------------------------------------------------------------------
3166 // Allocation
3168 void TemplateTable::_new() {
3169 transition(vtos, atos);
3170 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3171 Label slow_case;
3172 Label done;
3173 Label initialize_header;
3174 Label initialize_object; // including clearing the fields
3175 Label allocate_shared;
3177 __ get_cpool_and_tags(rsi, rax);
3178 // Make sure the class we're about to instantiate has been resolved.
3179 // This is done before loading instanceKlass to be consistent with the order
3180 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3181 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3182 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3183 JVM_CONSTANT_Class);
3184 __ jcc(Assembler::notEqual, slow_case);
3186 // get instanceKlass
3187 __ movptr(rsi, Address(rsi, rdx,
3188 Address::times_8, sizeof(constantPoolOopDesc)));
3190 // make sure klass is initialized & doesn't have finalizer
3191 // make sure klass is fully initialized
3192 __ cmpl(Address(rsi,
3193 instanceKlass::init_state_offset_in_bytes() +
3194 sizeof(oopDesc)),
3195 instanceKlass::fully_initialized);
3196 __ jcc(Assembler::notEqual, slow_case);
3198 // get instance_size in instanceKlass (scaled to a count of bytes)
3199 __ movl(rdx,
3200 Address(rsi,
3201 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3202 // test to see if it has a finalizer or is malformed in some way
3203 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3204 __ jcc(Assembler::notZero, slow_case);
3206 // Allocate the instance
3207 // 1) Try to allocate in the TLAB
3208 // 2) if fail and the object is large allocate in the shared Eden
3209 // 3) if the above fails (or is not applicable), go to a slow case
3210 // (creates a new TLAB, etc.)
3212 const bool allow_shared_alloc =
3213 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3215 if (UseTLAB) {
3216 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3217 __ lea(rbx, Address(rax, rdx, Address::times_1));
3218 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3219 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3220 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3221 if (ZeroTLAB) {
3222 // the fields have been already cleared
3223 __ jmp(initialize_header);
3224 } else {
3225 // initialize both the header and fields
3226 __ jmp(initialize_object);
3227 }
3228 }
3230 // Allocation in the shared Eden, if allowed.
3231 //
3232 // rdx: instance size in bytes
3233 if (allow_shared_alloc) {
3234 __ bind(allocate_shared);
3236 ExternalAddress top((address)Universe::heap()->top_addr());
3237 ExternalAddress end((address)Universe::heap()->end_addr());
3239 const Register RtopAddr = rscratch1;
3240 const Register RendAddr = rscratch2;
3242 __ lea(RtopAddr, top);
3243 __ lea(RendAddr, end);
3244 __ movptr(rax, Address(RtopAddr, 0));
3246 // For retries rax gets set by cmpxchgq
3247 Label retry;
3248 __ bind(retry);
3249 __ lea(rbx, Address(rax, rdx, Address::times_1));
3250 __ cmpptr(rbx, Address(RendAddr, 0));
3251 __ jcc(Assembler::above, slow_case);
3253 // Compare rax with the top addr, and if still equal, store the new
3254 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3255 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3256 //
3257 // rax: object begin
3258 // rbx: object end
3259 // rdx: instance size in bytes
3260 if (os::is_MP()) {
3261 __ lock();
3262 }
3263 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3265 // if someone beat us on the allocation, try again, otherwise continue
3266 __ jcc(Assembler::notEqual, retry);
3268 __ incr_allocated_bytes(r15_thread, rdx, 0);
3269 }
3271 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3272 // The object is initialized before the header. If the object size is
3273 // zero, go directly to the header initialization.
3274 __ bind(initialize_object);
3275 __ decrementl(rdx, sizeof(oopDesc));
3276 __ jcc(Assembler::zero, initialize_header);
3278 // Initialize object fields
3279 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3280 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3281 {
3282 Label loop;
3283 __ bind(loop);
3284 __ movq(Address(rax, rdx, Address::times_8,
3285 sizeof(oopDesc) - oopSize),
3286 rcx);
3287 __ decrementl(rdx);
3288 __ jcc(Assembler::notZero, loop);
3289 }
3291 // initialize object header only.
3292 __ bind(initialize_header);
3293 if (UseBiasedLocking) {
3294 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3295 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3296 } else {
3297 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3298 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3299 }
3300 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3301 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3302 __ store_klass(rax, rsi); // store klass last
3304 {
3305 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3306 // Trigger dtrace event for fastpath
3307 __ push(atos); // save the return value
3308 __ call_VM_leaf(
3309 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3310 __ pop(atos); // restore the return value
3312 }
3313 __ jmp(done);
3314 }
3317 // slow case
3318 __ bind(slow_case);
3319 __ get_constant_pool(c_rarg1);
3320 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3321 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3322 __ verify_oop(rax);
3324 // continue
3325 __ bind(done);
3326 }
3328 void TemplateTable::newarray() {
3329 transition(itos, atos);
3330 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3331 __ movl(c_rarg2, rax);
3332 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3333 c_rarg1, c_rarg2);
3334 }
3336 void TemplateTable::anewarray() {
3337 transition(itos, atos);
3338 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3339 __ get_constant_pool(c_rarg1);
3340 __ movl(c_rarg3, rax);
3341 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3342 c_rarg1, c_rarg2, c_rarg3);
3343 }
3345 void TemplateTable::arraylength() {
3346 transition(atos, itos);
3347 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3348 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3349 }
3351 void TemplateTable::checkcast() {
3352 transition(atos, atos);
3353 Label done, is_null, ok_is_subtype, quicked, resolved;
3354 __ testptr(rax, rax); // object is in rax
3355 __ jcc(Assembler::zero, is_null);
3357 // Get cpool & tags index
3358 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3359 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3360 // See if bytecode has already been quicked
3361 __ cmpb(Address(rdx, rbx,
3362 Address::times_1,
3363 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3364 JVM_CONSTANT_Class);
3365 __ jcc(Assembler::equal, quicked);
3366 __ push(atos); // save receiver for result, and for GC
3367 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3368 __ pop_ptr(rdx); // restore receiver
3369 __ jmpb(resolved);
3371 // Get superklass in rax and subklass in rbx
3372 __ bind(quicked);
3373 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3374 __ movptr(rax, Address(rcx, rbx,
3375 Address::times_8, sizeof(constantPoolOopDesc)));
3377 __ bind(resolved);
3378 __ load_klass(rbx, rdx);
3380 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3381 // Superklass in rax. Subklass in rbx.
3382 __ gen_subtype_check(rbx, ok_is_subtype);
3384 // Come here on failure
3385 __ push_ptr(rdx);
3386 // object is at TOS
3387 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3389 // Come here on success
3390 __ bind(ok_is_subtype);
3391 __ mov(rax, rdx); // Restore object in rdx
3393 // Collect counts on whether this check-cast sees NULLs a lot or not.
3394 if (ProfileInterpreter) {
3395 __ jmp(done);
3396 __ bind(is_null);
3397 __ profile_null_seen(rcx);
3398 } else {
3399 __ bind(is_null); // same as 'done'
3400 }
3401 __ bind(done);
3402 }
3404 void TemplateTable::instanceof() {
3405 transition(atos, itos);
3406 Label done, is_null, ok_is_subtype, quicked, resolved;
3407 __ testptr(rax, rax);
3408 __ jcc(Assembler::zero, is_null);
3410 // Get cpool & tags index
3411 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3412 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3413 // See if bytecode has already been quicked
3414 __ cmpb(Address(rdx, rbx,
3415 Address::times_1,
3416 typeArrayOopDesc::header_size(T_BYTE) * wordSize),
3417 JVM_CONSTANT_Class);
3418 __ jcc(Assembler::equal, quicked);
3420 __ push(atos); // save receiver for result, and for GC
3421 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3422 __ pop_ptr(rdx); // restore receiver
3423 __ verify_oop(rdx);
3424 __ load_klass(rdx, rdx);
3425 __ jmpb(resolved);
3427 // Get superklass in rax and subklass in rdx
3428 __ bind(quicked);
3429 __ load_klass(rdx, rax);
3430 __ movptr(rax, Address(rcx, rbx,
3431 Address::times_8, sizeof(constantPoolOopDesc)));
3433 __ bind(resolved);
3435 // Generate subtype check. Blows rcx, rdi
3436 // Superklass in rax. Subklass in rdx.
3437 __ gen_subtype_check(rdx, ok_is_subtype);
3439 // Come here on failure
3440 __ xorl(rax, rax);
3441 __ jmpb(done);
3442 // Come here on success
3443 __ bind(ok_is_subtype);
3444 __ movl(rax, 1);
3446 // Collect counts on whether this test sees NULLs a lot or not.
3447 if (ProfileInterpreter) {
3448 __ jmp(done);
3449 __ bind(is_null);
3450 __ profile_null_seen(rcx);
3451 } else {
3452 __ bind(is_null); // same as 'done'
3453 }
3454 __ bind(done);
3455 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3456 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3457 }
3459 //-----------------------------------------------------------------------------
3460 // Breakpoints
3461 void TemplateTable::_breakpoint() {
3462 // Note: We get here even if we are single stepping..
3463 // jbug inists on setting breakpoints at every bytecode
3464 // even if we are in single step mode.
3466 transition(vtos, vtos);
3468 // get the unpatched byte code
3469 __ get_method(c_rarg1);
3470 __ call_VM(noreg,
3471 CAST_FROM_FN_PTR(address,
3472 InterpreterRuntime::get_original_bytecode_at),
3473 c_rarg1, r13);
3474 __ mov(rbx, rax);
3476 // post the breakpoint event
3477 __ get_method(c_rarg1);
3478 __ call_VM(noreg,
3479 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3480 c_rarg1, r13);
3482 // complete the execution of original bytecode
3483 __ dispatch_only_normal(vtos);
3484 }
3486 //-----------------------------------------------------------------------------
3487 // Exceptions
3489 void TemplateTable::athrow() {
3490 transition(atos, vtos);
3491 __ null_check(rax);
3492 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3493 }
3495 //-----------------------------------------------------------------------------
3496 // Synchronization
3497 //
3498 // Note: monitorenter & exit are symmetric routines; which is reflected
3499 // in the assembly code structure as well
3500 //
3501 // Stack layout:
3502 //
3503 // [expressions ] <--- rsp = expression stack top
3504 // ..
3505 // [expressions ]
3506 // [monitor entry] <--- monitor block top = expression stack bot
3507 // ..
3508 // [monitor entry]
3509 // [frame data ] <--- monitor block bot
3510 // ...
3511 // [saved rbp ] <--- rbp
3512 void TemplateTable::monitorenter() {
3513 transition(atos, vtos);
3515 // check for NULL object
3516 __ null_check(rax);
3518 const Address monitor_block_top(
3519 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3520 const Address monitor_block_bot(
3521 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3522 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3524 Label allocated;
3526 // initialize entry pointer
3527 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3529 // find a free slot in the monitor block (result in c_rarg1)
3530 {
3531 Label entry, loop, exit;
3532 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3533 // starting with top-most entry
3534 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3535 // of monitor block
3536 __ jmpb(entry);
3538 __ bind(loop);
3539 // check if current entry is used
3540 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3541 // if not used then remember entry in c_rarg1
3542 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3543 // check if current entry is for same object
3544 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3545 // if same object then stop searching
3546 __ jccb(Assembler::equal, exit);
3547 // otherwise advance to next entry
3548 __ addptr(c_rarg3, entry_size);
3549 __ bind(entry);
3550 // check if bottom reached
3551 __ cmpptr(c_rarg3, c_rarg2);
3552 // if not at bottom then check this entry
3553 __ jcc(Assembler::notEqual, loop);
3554 __ bind(exit);
3555 }
3557 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3558 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3560 // allocate one if there's no free slot
3561 {
3562 Label entry, loop;
3563 // 1. compute new pointers // rsp: old expression stack top
3564 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3565 __ subptr(rsp, entry_size); // move expression stack top
3566 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3567 __ mov(c_rarg3, rsp); // set start value for copy loop
3568 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3569 __ jmp(entry);
3570 // 2. move expression stack contents
3571 __ bind(loop);
3572 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3573 // word from old location
3574 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3575 __ addptr(c_rarg3, wordSize); // advance to next word
3576 __ bind(entry);
3577 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3578 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3579 // copy next word
3580 }
3582 // call run-time routine
3583 // c_rarg1: points to monitor entry
3584 __ bind(allocated);
3586 // Increment bcp to point to the next bytecode, so exception
3587 // handling for async. exceptions work correctly.
3588 // The object has already been poped from the stack, so the
3589 // expression stack looks correct.
3590 __ increment(r13);
3592 // store object
3593 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3594 __ lock_object(c_rarg1);
3596 // check to make sure this monitor doesn't cause stack overflow after locking
3597 __ save_bcp(); // in case of exception
3598 __ generate_stack_overflow_check(0);
3600 // The bcp has already been incremented. Just need to dispatch to
3601 // next instruction.
3602 __ dispatch_next(vtos);
3603 }
3606 void TemplateTable::monitorexit() {
3607 transition(atos, vtos);
3609 // check for NULL object
3610 __ null_check(rax);
3612 const Address monitor_block_top(
3613 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3614 const Address monitor_block_bot(
3615 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3616 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3618 Label found;
3620 // find matching slot
3621 {
3622 Label entry, loop;
3623 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3624 // starting with top-most entry
3625 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3626 // of monitor block
3627 __ jmpb(entry);
3629 __ bind(loop);
3630 // check if current entry is for same object
3631 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3632 // if same object then stop searching
3633 __ jcc(Assembler::equal, found);
3634 // otherwise advance to next entry
3635 __ addptr(c_rarg1, entry_size);
3636 __ bind(entry);
3637 // check if bottom reached
3638 __ cmpptr(c_rarg1, c_rarg2);
3639 // if not at bottom then check this entry
3640 __ jcc(Assembler::notEqual, loop);
3641 }
3643 // error handling. Unlocking was not block-structured
3644 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3645 InterpreterRuntime::throw_illegal_monitor_state_exception));
3646 __ should_not_reach_here();
3648 // call run-time routine
3649 // rsi: points to monitor entry
3650 __ bind(found);
3651 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3652 __ unlock_object(c_rarg1);
3653 __ pop_ptr(rax); // discard object
3654 }
3657 // Wide instructions
3658 void TemplateTable::wide() {
3659 transition(vtos, vtos);
3660 __ load_unsigned_byte(rbx, at_bcp(1));
3661 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3662 __ jmp(Address(rscratch1, rbx, Address::times_8));
3663 // Note: the r13 increment step is part of the individual wide
3664 // bytecode implementations
3665 }
3668 // Multi arrays
3669 void TemplateTable::multianewarray() {
3670 transition(vtos, atos);
3671 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3672 // last dim is on top of stack; we want address of first one:
3673 // first_addr = last_addr + (ndims - 1) * wordSize
3674 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3675 call_VM(rax,
3676 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3677 c_rarg1);
3678 __ load_unsigned_byte(rbx, at_bcp(3));
3679 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3680 }
3681 #endif // !CC_INTERP