Thu, 03 Nov 2011 04:12:49 -0700
7106944: assert(_pc == *pc_addr) failed may be too strong
Reviewed-by: kvn, never
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodDataOop.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
39 #ifndef CC_INTERP
40 #define __ _masm->
42 //----------------------------------------------------------------------------------------------------
43 // Platform-dependent initialization
45 void TemplateTable::pd_initialize() {
46 // No i486 specific initialization
47 }
49 //----------------------------------------------------------------------------------------------------
50 // Address computation
52 // local variables
53 static inline Address iaddress(int n) {
54 return Address(rdi, Interpreter::local_offset_in_bytes(n));
55 }
57 static inline Address laddress(int n) { return iaddress(n + 1); }
58 static inline Address haddress(int n) { return iaddress(n + 0); }
59 static inline Address faddress(int n) { return iaddress(n); }
60 static inline Address daddress(int n) { return laddress(n); }
61 static inline Address aaddress(int n) { return iaddress(n); }
63 static inline Address iaddress(Register r) {
64 return Address(rdi, r, Interpreter::stackElementScale());
65 }
66 static inline Address laddress(Register r) {
67 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
68 }
69 static inline Address haddress(Register r) {
70 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
71 }
73 static inline Address faddress(Register r) { return iaddress(r); }
74 static inline Address daddress(Register r) { return laddress(r); }
75 static inline Address aaddress(Register r) { return iaddress(r); }
77 // expression stack
78 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
79 // data beyond the rsp which is potentially unsafe in an MT environment;
80 // an interrupt may overwrite that data.)
81 static inline Address at_rsp () {
82 return Address(rsp, 0);
83 }
85 // At top of Java expression stack which may be different than rsp(). It
86 // isn't for category 1 objects.
87 static inline Address at_tos () {
88 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
89 return tos;
90 }
92 static inline Address at_tos_p1() {
93 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
94 }
96 static inline Address at_tos_p2() {
97 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
98 }
100 // Condition conversion
101 static Assembler::Condition j_not(TemplateTable::Condition cc) {
102 switch (cc) {
103 case TemplateTable::equal : return Assembler::notEqual;
104 case TemplateTable::not_equal : return Assembler::equal;
105 case TemplateTable::less : return Assembler::greaterEqual;
106 case TemplateTable::less_equal : return Assembler::greater;
107 case TemplateTable::greater : return Assembler::lessEqual;
108 case TemplateTable::greater_equal: return Assembler::less;
109 }
110 ShouldNotReachHere();
111 return Assembler::zero;
112 }
115 //----------------------------------------------------------------------------------------------------
116 // Miscelaneous helper routines
118 // Store an oop (or NULL) at the address described by obj.
119 // If val == noreg this means store a NULL
121 static void do_oop_store(InterpreterMacroAssembler* _masm,
122 Address obj,
123 Register val,
124 BarrierSet::Name barrier,
125 bool precise) {
126 assert(val == noreg || val == rax, "parameter is just for looks");
127 switch (barrier) {
128 #ifndef SERIALGC
129 case BarrierSet::G1SATBCT:
130 case BarrierSet::G1SATBCTLogging:
131 {
132 // flatten object address if needed
133 // We do it regardless of precise because we need the registers
134 if (obj.index() == noreg && obj.disp() == 0) {
135 if (obj.base() != rdx) {
136 __ movl(rdx, obj.base());
137 }
138 } else {
139 __ leal(rdx, obj);
140 }
141 __ get_thread(rcx);
142 __ save_bcp();
143 __ g1_write_barrier_pre(rdx /* obj */,
144 rbx /* pre_val */,
145 rcx /* thread */,
146 rsi /* tmp */,
147 val != noreg /* tosca_live */,
148 false /* expand_call */);
150 // Do the actual store
151 // noreg means NULL
152 if (val == noreg) {
153 __ movptr(Address(rdx, 0), NULL_WORD);
154 // No post barrier for NULL
155 } else {
156 __ movl(Address(rdx, 0), val);
157 __ g1_write_barrier_post(rdx /* store_adr */,
158 val /* new_val */,
159 rcx /* thread */,
160 rbx /* tmp */,
161 rsi /* tmp2 */);
162 }
163 __ restore_bcp();
165 }
166 break;
167 #endif // SERIALGC
168 case BarrierSet::CardTableModRef:
169 case BarrierSet::CardTableExtension:
170 {
171 if (val == noreg) {
172 __ movptr(obj, NULL_WORD);
173 } else {
174 __ movl(obj, val);
175 // flatten object address if needed
176 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
177 __ store_check(obj.base());
178 } else {
179 __ leal(rdx, obj);
180 __ store_check(rdx);
181 }
182 }
183 }
184 break;
185 case BarrierSet::ModRef:
186 case BarrierSet::Other:
187 if (val == noreg) {
188 __ movptr(obj, NULL_WORD);
189 } else {
190 __ movl(obj, val);
191 }
192 break;
193 default :
194 ShouldNotReachHere();
196 }
197 }
199 Address TemplateTable::at_bcp(int offset) {
200 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
201 return Address(rsi, offset);
202 }
205 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
206 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
207 int byte_no) {
208 if (!RewriteBytecodes) return;
209 Label L_patch_done;
211 switch (bc) {
212 case Bytecodes::_fast_aputfield:
213 case Bytecodes::_fast_bputfield:
214 case Bytecodes::_fast_cputfield:
215 case Bytecodes::_fast_dputfield:
216 case Bytecodes::_fast_fputfield:
217 case Bytecodes::_fast_iputfield:
218 case Bytecodes::_fast_lputfield:
219 case Bytecodes::_fast_sputfield:
220 {
221 // We skip bytecode quickening for putfield instructions when
222 // the put_code written to the constant pool cache is zero.
223 // This is required so that every execution of this instruction
224 // calls out to InterpreterRuntime::resolve_get_put to do
225 // additional, required work.
226 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
227 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
228 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
229 __ movl(bc_reg, bc);
230 __ cmpl(temp_reg, (int) 0);
231 __ jcc(Assembler::zero, L_patch_done); // don't patch
232 }
233 break;
234 default:
235 assert(byte_no == -1, "sanity");
236 // the pair bytecodes have already done the load.
237 if (load_bc_into_bc_reg) {
238 __ movl(bc_reg, bc);
239 }
240 }
242 if (JvmtiExport::can_post_breakpoint()) {
243 Label L_fast_patch;
244 // if a breakpoint is present we can't rewrite the stream directly
245 __ movzbl(temp_reg, at_bcp(0));
246 __ cmpl(temp_reg, Bytecodes::_breakpoint);
247 __ jcc(Assembler::notEqual, L_fast_patch);
248 __ get_method(temp_reg);
249 // Let breakpoint table handling rewrite to quicker bytecode
250 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
251 #ifndef ASSERT
252 __ jmpb(L_patch_done);
253 #else
254 __ jmp(L_patch_done);
255 #endif
256 __ bind(L_fast_patch);
257 }
259 #ifdef ASSERT
260 Label L_okay;
261 __ load_unsigned_byte(temp_reg, at_bcp(0));
262 __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
263 __ jccb(Assembler::equal, L_okay);
264 __ cmpl(temp_reg, bc_reg);
265 __ jcc(Assembler::equal, L_okay);
266 __ stop("patching the wrong bytecode");
267 __ bind(L_okay);
268 #endif
270 // patch bytecode
271 __ movb(at_bcp(0), bc_reg);
272 __ bind(L_patch_done);
273 }
275 //----------------------------------------------------------------------------------------------------
276 // Individual instructions
278 void TemplateTable::nop() {
279 transition(vtos, vtos);
280 // nothing to do
281 }
283 void TemplateTable::shouldnotreachhere() {
284 transition(vtos, vtos);
285 __ stop("shouldnotreachhere bytecode");
286 }
290 void TemplateTable::aconst_null() {
291 transition(vtos, atos);
292 __ xorptr(rax, rax);
293 }
296 void TemplateTable::iconst(int value) {
297 transition(vtos, itos);
298 if (value == 0) {
299 __ xorptr(rax, rax);
300 } else {
301 __ movptr(rax, value);
302 }
303 }
306 void TemplateTable::lconst(int value) {
307 transition(vtos, ltos);
308 if (value == 0) {
309 __ xorptr(rax, rax);
310 } else {
311 __ movptr(rax, value);
312 }
313 assert(value >= 0, "check this code");
314 __ xorptr(rdx, rdx);
315 }
318 void TemplateTable::fconst(int value) {
319 transition(vtos, ftos);
320 if (value == 0) { __ fldz();
321 } else if (value == 1) { __ fld1();
322 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
323 } else { ShouldNotReachHere();
324 }
325 }
328 void TemplateTable::dconst(int value) {
329 transition(vtos, dtos);
330 if (value == 0) { __ fldz();
331 } else if (value == 1) { __ fld1();
332 } else { ShouldNotReachHere();
333 }
334 }
337 void TemplateTable::bipush() {
338 transition(vtos, itos);
339 __ load_signed_byte(rax, at_bcp(1));
340 }
343 void TemplateTable::sipush() {
344 transition(vtos, itos);
345 __ load_unsigned_short(rax, at_bcp(1));
346 __ bswapl(rax);
347 __ sarl(rax, 16);
348 }
350 void TemplateTable::ldc(bool wide) {
351 transition(vtos, vtos);
352 Label call_ldc, notFloat, notClass, Done;
354 if (wide) {
355 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
356 } else {
357 __ load_unsigned_byte(rbx, at_bcp(1));
358 }
359 __ get_cpool_and_tags(rcx, rax);
360 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
361 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
363 // get type
364 __ xorptr(rdx, rdx);
365 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
367 // unresolved string - get the resolved string
368 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
369 __ jccb(Assembler::equal, call_ldc);
371 // unresolved class - get the resolved class
372 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
373 __ jccb(Assembler::equal, call_ldc);
375 // unresolved class in error (resolution failed) - call into runtime
376 // so that the same error from first resolution attempt is thrown.
377 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
378 __ jccb(Assembler::equal, call_ldc);
380 // resolved class - need to call vm to get java mirror of the class
381 __ cmpl(rdx, JVM_CONSTANT_Class);
382 __ jcc(Assembler::notEqual, notClass);
384 __ bind(call_ldc);
385 __ movl(rcx, wide);
386 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
387 __ push(atos);
388 __ jmp(Done);
390 __ bind(notClass);
391 __ cmpl(rdx, JVM_CONSTANT_Float);
392 __ jccb(Assembler::notEqual, notFloat);
393 // ftos
394 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
395 __ push(ftos);
396 __ jmp(Done);
398 __ bind(notFloat);
399 #ifdef ASSERT
400 { Label L;
401 __ cmpl(rdx, JVM_CONSTANT_Integer);
402 __ jcc(Assembler::equal, L);
403 __ cmpl(rdx, JVM_CONSTANT_String);
404 __ jcc(Assembler::equal, L);
405 __ cmpl(rdx, JVM_CONSTANT_Object);
406 __ jcc(Assembler::equal, L);
407 __ stop("unexpected tag type in ldc");
408 __ bind(L);
409 }
410 #endif
411 Label isOop;
412 // atos and itos
413 // Integer is only non-oop type we will see here
414 __ cmpl(rdx, JVM_CONSTANT_Integer);
415 __ jccb(Assembler::notEqual, isOop);
416 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
417 __ push(itos);
418 __ jmp(Done);
419 __ bind(isOop);
420 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
421 __ push(atos);
423 if (VerifyOops) {
424 __ verify_oop(rax);
425 }
426 __ bind(Done);
427 }
429 // Fast path for caching oop constants.
430 // %%% We should use this to handle Class and String constants also.
431 // %%% It will simplify the ldc/primitive path considerably.
432 void TemplateTable::fast_aldc(bool wide) {
433 transition(vtos, atos);
435 if (!EnableInvokeDynamic) {
436 // We should not encounter this bytecode if !EnableInvokeDynamic.
437 // The verifier will stop it. However, if we get past the verifier,
438 // this will stop the thread in a reasonable way, without crashing the JVM.
439 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
440 InterpreterRuntime::throw_IncompatibleClassChangeError));
441 // the call_VM checks for exception, so we should never return here.
442 __ should_not_reach_here();
443 return;
444 }
446 const Register cache = rcx;
447 const Register index = rdx;
449 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
450 if (VerifyOops) {
451 __ verify_oop(rax);
452 }
454 Label L_done, L_throw_exception;
455 const Register con_klass_temp = rcx; // same as Rcache
456 __ load_klass(con_klass_temp, rax);
457 __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
458 __ jcc(Assembler::notEqual, L_done);
459 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
460 __ jcc(Assembler::notEqual, L_throw_exception);
461 __ xorptr(rax, rax);
462 __ jmp(L_done);
464 // Load the exception from the system-array which wraps it:
465 __ bind(L_throw_exception);
466 __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
467 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
469 __ bind(L_done);
470 }
472 void TemplateTable::ldc2_w() {
473 transition(vtos, vtos);
474 Label Long, Done;
475 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
477 __ get_cpool_and_tags(rcx, rax);
478 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
479 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
481 // get type
482 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
483 __ jccb(Assembler::notEqual, Long);
484 // dtos
485 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
486 __ push(dtos);
487 __ jmpb(Done);
489 __ bind(Long);
490 // ltos
491 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
492 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
494 __ push(ltos);
496 __ bind(Done);
497 }
500 void TemplateTable::locals_index(Register reg, int offset) {
501 __ load_unsigned_byte(reg, at_bcp(offset));
502 __ negptr(reg);
503 }
506 void TemplateTable::iload() {
507 transition(vtos, itos);
508 if (RewriteFrequentPairs) {
509 Label rewrite, done;
511 // get next byte
512 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
513 // if _iload, wait to rewrite to iload2. We only want to rewrite the
514 // last two iloads in a pair. Comparing against fast_iload means that
515 // the next bytecode is neither an iload or a caload, and therefore
516 // an iload pair.
517 __ cmpl(rbx, Bytecodes::_iload);
518 __ jcc(Assembler::equal, done);
520 __ cmpl(rbx, Bytecodes::_fast_iload);
521 __ movl(rcx, Bytecodes::_fast_iload2);
522 __ jccb(Assembler::equal, rewrite);
524 // if _caload, rewrite to fast_icaload
525 __ cmpl(rbx, Bytecodes::_caload);
526 __ movl(rcx, Bytecodes::_fast_icaload);
527 __ jccb(Assembler::equal, rewrite);
529 // rewrite so iload doesn't check again.
530 __ movl(rcx, Bytecodes::_fast_iload);
532 // rewrite
533 // rcx: fast bytecode
534 __ bind(rewrite);
535 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
536 __ bind(done);
537 }
539 // Get the local value into tos
540 locals_index(rbx);
541 __ movl(rax, iaddress(rbx));
542 }
545 void TemplateTable::fast_iload2() {
546 transition(vtos, itos);
547 locals_index(rbx);
548 __ movl(rax, iaddress(rbx));
549 __ push(itos);
550 locals_index(rbx, 3);
551 __ movl(rax, iaddress(rbx));
552 }
554 void TemplateTable::fast_iload() {
555 transition(vtos, itos);
556 locals_index(rbx);
557 __ movl(rax, iaddress(rbx));
558 }
561 void TemplateTable::lload() {
562 transition(vtos, ltos);
563 locals_index(rbx);
564 __ movptr(rax, laddress(rbx));
565 NOT_LP64(__ movl(rdx, haddress(rbx)));
566 }
569 void TemplateTable::fload() {
570 transition(vtos, ftos);
571 locals_index(rbx);
572 __ fld_s(faddress(rbx));
573 }
576 void TemplateTable::dload() {
577 transition(vtos, dtos);
578 locals_index(rbx);
579 __ fld_d(daddress(rbx));
580 }
583 void TemplateTable::aload() {
584 transition(vtos, atos);
585 locals_index(rbx);
586 __ movptr(rax, aaddress(rbx));
587 }
590 void TemplateTable::locals_index_wide(Register reg) {
591 __ movl(reg, at_bcp(2));
592 __ bswapl(reg);
593 __ shrl(reg, 16);
594 __ negptr(reg);
595 }
598 void TemplateTable::wide_iload() {
599 transition(vtos, itos);
600 locals_index_wide(rbx);
601 __ movl(rax, iaddress(rbx));
602 }
605 void TemplateTable::wide_lload() {
606 transition(vtos, ltos);
607 locals_index_wide(rbx);
608 __ movptr(rax, laddress(rbx));
609 NOT_LP64(__ movl(rdx, haddress(rbx)));
610 }
613 void TemplateTable::wide_fload() {
614 transition(vtos, ftos);
615 locals_index_wide(rbx);
616 __ fld_s(faddress(rbx));
617 }
620 void TemplateTable::wide_dload() {
621 transition(vtos, dtos);
622 locals_index_wide(rbx);
623 __ fld_d(daddress(rbx));
624 }
627 void TemplateTable::wide_aload() {
628 transition(vtos, atos);
629 locals_index_wide(rbx);
630 __ movptr(rax, aaddress(rbx));
631 }
633 void TemplateTable::index_check(Register array, Register index) {
634 // Pop ptr into array
635 __ pop_ptr(array);
636 index_check_without_pop(array, index);
637 }
639 void TemplateTable::index_check_without_pop(Register array, Register index) {
640 // destroys rbx,
641 // check array
642 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
643 LP64_ONLY(__ movslq(index, index));
644 // check index
645 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
646 if (index != rbx) {
647 // ??? convention: move aberrant index into rbx, for exception message
648 assert(rbx != array, "different registers");
649 __ mov(rbx, index);
650 }
651 __ jump_cc(Assembler::aboveEqual,
652 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
653 }
656 void TemplateTable::iaload() {
657 transition(itos, itos);
658 // rdx: array
659 index_check(rdx, rax); // kills rbx,
660 // rax,: index
661 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
662 }
665 void TemplateTable::laload() {
666 transition(itos, ltos);
667 // rax,: index
668 // rdx: array
669 index_check(rdx, rax);
670 __ mov(rbx, rax);
671 // rbx,: index
672 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
673 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
674 }
677 void TemplateTable::faload() {
678 transition(itos, ftos);
679 // rdx: array
680 index_check(rdx, rax); // kills rbx,
681 // rax,: index
682 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
683 }
686 void TemplateTable::daload() {
687 transition(itos, dtos);
688 // rdx: array
689 index_check(rdx, rax); // kills rbx,
690 // rax,: index
691 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
692 }
695 void TemplateTable::aaload() {
696 transition(itos, atos);
697 // rdx: array
698 index_check(rdx, rax); // kills rbx,
699 // rax,: index
700 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
701 }
704 void TemplateTable::baload() {
705 transition(itos, itos);
706 // rdx: array
707 index_check(rdx, rax); // kills rbx,
708 // rax,: index
709 // can do better code for P5 - fix this at some point
710 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
711 __ mov(rax, rbx);
712 }
715 void TemplateTable::caload() {
716 transition(itos, itos);
717 // rdx: array
718 index_check(rdx, rax); // kills rbx,
719 // rax,: index
720 // can do better code for P5 - may want to improve this at some point
721 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
722 __ mov(rax, rbx);
723 }
725 // iload followed by caload frequent pair
726 void TemplateTable::fast_icaload() {
727 transition(vtos, itos);
728 // load index out of locals
729 locals_index(rbx);
730 __ movl(rax, iaddress(rbx));
732 // rdx: array
733 index_check(rdx, rax);
734 // rax,: index
735 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
736 __ mov(rax, rbx);
737 }
739 void TemplateTable::saload() {
740 transition(itos, itos);
741 // rdx: array
742 index_check(rdx, rax); // kills rbx,
743 // rax,: index
744 // can do better code for P5 - may want to improve this at some point
745 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
746 __ mov(rax, rbx);
747 }
750 void TemplateTable::iload(int n) {
751 transition(vtos, itos);
752 __ movl(rax, iaddress(n));
753 }
756 void TemplateTable::lload(int n) {
757 transition(vtos, ltos);
758 __ movptr(rax, laddress(n));
759 NOT_LP64(__ movptr(rdx, haddress(n)));
760 }
763 void TemplateTable::fload(int n) {
764 transition(vtos, ftos);
765 __ fld_s(faddress(n));
766 }
769 void TemplateTable::dload(int n) {
770 transition(vtos, dtos);
771 __ fld_d(daddress(n));
772 }
775 void TemplateTable::aload(int n) {
776 transition(vtos, atos);
777 __ movptr(rax, aaddress(n));
778 }
781 void TemplateTable::aload_0() {
782 transition(vtos, atos);
783 // According to bytecode histograms, the pairs:
784 //
785 // _aload_0, _fast_igetfield
786 // _aload_0, _fast_agetfield
787 // _aload_0, _fast_fgetfield
788 //
789 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
790 // bytecode checks if the next bytecode is either _fast_igetfield,
791 // _fast_agetfield or _fast_fgetfield and then rewrites the
792 // current bytecode into a pair bytecode; otherwise it rewrites the current
793 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
794 //
795 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
796 // otherwise we may miss an opportunity for a pair.
797 //
798 // Also rewrite frequent pairs
799 // aload_0, aload_1
800 // aload_0, iload_1
801 // These bytecodes with a small amount of code are most profitable to rewrite
802 if (RewriteFrequentPairs) {
803 Label rewrite, done;
804 // get next byte
805 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
807 // do actual aload_0
808 aload(0);
810 // if _getfield then wait with rewrite
811 __ cmpl(rbx, Bytecodes::_getfield);
812 __ jcc(Assembler::equal, done);
814 // if _igetfield then reqrite to _fast_iaccess_0
815 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
816 __ cmpl(rbx, Bytecodes::_fast_igetfield);
817 __ movl(rcx, Bytecodes::_fast_iaccess_0);
818 __ jccb(Assembler::equal, rewrite);
820 // if _agetfield then reqrite to _fast_aaccess_0
821 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
822 __ cmpl(rbx, Bytecodes::_fast_agetfield);
823 __ movl(rcx, Bytecodes::_fast_aaccess_0);
824 __ jccb(Assembler::equal, rewrite);
826 // if _fgetfield then reqrite to _fast_faccess_0
827 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
828 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
829 __ movl(rcx, Bytecodes::_fast_faccess_0);
830 __ jccb(Assembler::equal, rewrite);
832 // else rewrite to _fast_aload0
833 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
834 __ movl(rcx, Bytecodes::_fast_aload_0);
836 // rewrite
837 // rcx: fast bytecode
838 __ bind(rewrite);
839 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
841 __ bind(done);
842 } else {
843 aload(0);
844 }
845 }
847 void TemplateTable::istore() {
848 transition(itos, vtos);
849 locals_index(rbx);
850 __ movl(iaddress(rbx), rax);
851 }
854 void TemplateTable::lstore() {
855 transition(ltos, vtos);
856 locals_index(rbx);
857 __ movptr(laddress(rbx), rax);
858 NOT_LP64(__ movptr(haddress(rbx), rdx));
859 }
862 void TemplateTable::fstore() {
863 transition(ftos, vtos);
864 locals_index(rbx);
865 __ fstp_s(faddress(rbx));
866 }
869 void TemplateTable::dstore() {
870 transition(dtos, vtos);
871 locals_index(rbx);
872 __ fstp_d(daddress(rbx));
873 }
876 void TemplateTable::astore() {
877 transition(vtos, vtos);
878 __ pop_ptr(rax);
879 locals_index(rbx);
880 __ movptr(aaddress(rbx), rax);
881 }
884 void TemplateTable::wide_istore() {
885 transition(vtos, vtos);
886 __ pop_i(rax);
887 locals_index_wide(rbx);
888 __ movl(iaddress(rbx), rax);
889 }
892 void TemplateTable::wide_lstore() {
893 transition(vtos, vtos);
894 __ pop_l(rax, rdx);
895 locals_index_wide(rbx);
896 __ movptr(laddress(rbx), rax);
897 NOT_LP64(__ movl(haddress(rbx), rdx));
898 }
901 void TemplateTable::wide_fstore() {
902 wide_istore();
903 }
906 void TemplateTable::wide_dstore() {
907 wide_lstore();
908 }
911 void TemplateTable::wide_astore() {
912 transition(vtos, vtos);
913 __ pop_ptr(rax);
914 locals_index_wide(rbx);
915 __ movptr(aaddress(rbx), rax);
916 }
919 void TemplateTable::iastore() {
920 transition(itos, vtos);
921 __ pop_i(rbx);
922 // rax,: value
923 // rdx: array
924 index_check(rdx, rbx); // prefer index in rbx,
925 // rbx,: index
926 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
927 }
930 void TemplateTable::lastore() {
931 transition(ltos, vtos);
932 __ pop_i(rbx);
933 // rax,: low(value)
934 // rcx: array
935 // rdx: high(value)
936 index_check(rcx, rbx); // prefer index in rbx,
937 // rbx,: index
938 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
939 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
940 }
943 void TemplateTable::fastore() {
944 transition(ftos, vtos);
945 __ pop_i(rbx);
946 // rdx: array
947 // st0: value
948 index_check(rdx, rbx); // prefer index in rbx,
949 // rbx,: index
950 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
951 }
954 void TemplateTable::dastore() {
955 transition(dtos, vtos);
956 __ pop_i(rbx);
957 // rdx: array
958 // st0: value
959 index_check(rdx, rbx); // prefer index in rbx,
960 // rbx,: index
961 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
962 }
965 void TemplateTable::aastore() {
966 Label is_null, ok_is_subtype, done;
967 transition(vtos, vtos);
968 // stack: ..., array, index, value
969 __ movptr(rax, at_tos()); // Value
970 __ movl(rcx, at_tos_p1()); // Index
971 __ movptr(rdx, at_tos_p2()); // Array
973 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
974 index_check_without_pop(rdx, rcx); // kills rbx,
975 // do array store check - check for NULL value first
976 __ testptr(rax, rax);
977 __ jcc(Assembler::zero, is_null);
979 // Move subklass into EBX
980 __ load_klass(rbx, rax);
981 // Move superklass into EAX
982 __ load_klass(rax, rdx);
983 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
984 // Compress array+index*wordSize+12 into a single register. Frees ECX.
985 __ lea(rdx, element_address);
987 // Generate subtype check. Blows ECX. Resets EDI to locals.
988 // Superklass in EAX. Subklass in EBX.
989 __ gen_subtype_check( rbx, ok_is_subtype );
991 // Come here on failure
992 // object is at TOS
993 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
995 // Come here on success
996 __ bind(ok_is_subtype);
998 // Get the value to store
999 __ movptr(rax, at_rsp());
1000 // and store it with appropriate barrier
1001 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
1003 __ jmp(done);
1005 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
1006 __ bind(is_null);
1007 __ profile_null_seen(rbx);
1009 // Store NULL, (noreg means NULL to do_oop_store)
1010 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1012 // Pop stack arguments
1013 __ bind(done);
1014 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1015 }
1018 void TemplateTable::bastore() {
1019 transition(itos, vtos);
1020 __ pop_i(rbx);
1021 // rax,: value
1022 // rdx: array
1023 index_check(rdx, rbx); // prefer index in rbx,
1024 // rbx,: index
1025 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
1026 }
1029 void TemplateTable::castore() {
1030 transition(itos, vtos);
1031 __ pop_i(rbx);
1032 // rax,: value
1033 // rdx: array
1034 index_check(rdx, rbx); // prefer index in rbx,
1035 // rbx,: index
1036 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1037 }
1040 void TemplateTable::sastore() {
1041 castore();
1042 }
1045 void TemplateTable::istore(int n) {
1046 transition(itos, vtos);
1047 __ movl(iaddress(n), rax);
1048 }
1051 void TemplateTable::lstore(int n) {
1052 transition(ltos, vtos);
1053 __ movptr(laddress(n), rax);
1054 NOT_LP64(__ movptr(haddress(n), rdx));
1055 }
1058 void TemplateTable::fstore(int n) {
1059 transition(ftos, vtos);
1060 __ fstp_s(faddress(n));
1061 }
1064 void TemplateTable::dstore(int n) {
1065 transition(dtos, vtos);
1066 __ fstp_d(daddress(n));
1067 }
1070 void TemplateTable::astore(int n) {
1071 transition(vtos, vtos);
1072 __ pop_ptr(rax);
1073 __ movptr(aaddress(n), rax);
1074 }
1077 void TemplateTable::pop() {
1078 transition(vtos, vtos);
1079 __ addptr(rsp, Interpreter::stackElementSize);
1080 }
1083 void TemplateTable::pop2() {
1084 transition(vtos, vtos);
1085 __ addptr(rsp, 2*Interpreter::stackElementSize);
1086 }
1089 void TemplateTable::dup() {
1090 transition(vtos, vtos);
1091 // stack: ..., a
1092 __ load_ptr(0, rax);
1093 __ push_ptr(rax);
1094 // stack: ..., a, a
1095 }
1098 void TemplateTable::dup_x1() {
1099 transition(vtos, vtos);
1100 // stack: ..., a, b
1101 __ load_ptr( 0, rax); // load b
1102 __ load_ptr( 1, rcx); // load a
1103 __ store_ptr(1, rax); // store b
1104 __ store_ptr(0, rcx); // store a
1105 __ push_ptr(rax); // push b
1106 // stack: ..., b, a, b
1107 }
1110 void TemplateTable::dup_x2() {
1111 transition(vtos, vtos);
1112 // stack: ..., a, b, c
1113 __ load_ptr( 0, rax); // load c
1114 __ load_ptr( 2, rcx); // load a
1115 __ store_ptr(2, rax); // store c in a
1116 __ push_ptr(rax); // push c
1117 // stack: ..., c, b, c, c
1118 __ load_ptr( 2, rax); // load b
1119 __ store_ptr(2, rcx); // store a in b
1120 // stack: ..., c, a, c, c
1121 __ store_ptr(1, rax); // store b in c
1122 // stack: ..., c, a, b, c
1123 }
1126 void TemplateTable::dup2() {
1127 transition(vtos, vtos);
1128 // stack: ..., a, b
1129 __ load_ptr(1, rax); // load a
1130 __ push_ptr(rax); // push a
1131 __ load_ptr(1, rax); // load b
1132 __ push_ptr(rax); // push b
1133 // stack: ..., a, b, a, b
1134 }
1137 void TemplateTable::dup2_x1() {
1138 transition(vtos, vtos);
1139 // stack: ..., a, b, c
1140 __ load_ptr( 0, rcx); // load c
1141 __ load_ptr( 1, rax); // load b
1142 __ push_ptr(rax); // push b
1143 __ push_ptr(rcx); // push c
1144 // stack: ..., a, b, c, b, c
1145 __ store_ptr(3, rcx); // store c in b
1146 // stack: ..., a, c, c, b, c
1147 __ load_ptr( 4, rcx); // load a
1148 __ store_ptr(2, rcx); // store a in 2nd c
1149 // stack: ..., a, c, a, b, c
1150 __ store_ptr(4, rax); // store b in a
1151 // stack: ..., b, c, a, b, c
1152 // stack: ..., b, c, a, b, c
1153 }
1156 void TemplateTable::dup2_x2() {
1157 transition(vtos, vtos);
1158 // stack: ..., a, b, c, d
1159 __ load_ptr( 0, rcx); // load d
1160 __ load_ptr( 1, rax); // load c
1161 __ push_ptr(rax); // push c
1162 __ push_ptr(rcx); // push d
1163 // stack: ..., a, b, c, d, c, d
1164 __ load_ptr( 4, rax); // load b
1165 __ store_ptr(2, rax); // store b in d
1166 __ store_ptr(4, rcx); // store d in b
1167 // stack: ..., a, d, c, b, c, d
1168 __ load_ptr( 5, rcx); // load a
1169 __ load_ptr( 3, rax); // load c
1170 __ store_ptr(3, rcx); // store a in c
1171 __ store_ptr(5, rax); // store c in a
1172 // stack: ..., c, d, a, b, c, d
1173 // stack: ..., c, d, a, b, c, d
1174 }
1177 void TemplateTable::swap() {
1178 transition(vtos, vtos);
1179 // stack: ..., a, b
1180 __ load_ptr( 1, rcx); // load a
1181 __ load_ptr( 0, rax); // load b
1182 __ store_ptr(0, rcx); // store a in b
1183 __ store_ptr(1, rax); // store b in a
1184 // stack: ..., b, a
1185 }
1188 void TemplateTable::iop2(Operation op) {
1189 transition(itos, itos);
1190 switch (op) {
1191 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1192 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1193 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1194 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1195 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1196 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1197 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1198 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1199 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1200 default : ShouldNotReachHere();
1201 }
1202 }
1205 void TemplateTable::lop2(Operation op) {
1206 transition(ltos, ltos);
1207 __ pop_l(rbx, rcx);
1208 switch (op) {
1209 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1210 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1211 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1212 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1213 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1214 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1215 default : ShouldNotReachHere();
1216 }
1217 }
1220 void TemplateTable::idiv() {
1221 transition(itos, itos);
1222 __ mov(rcx, rax);
1223 __ pop_i(rax);
1224 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1225 // they are not equal, one could do a normal division (no correction
1226 // needed), which may speed up this implementation for the common case.
1227 // (see also JVM spec., p.243 & p.271)
1228 __ corrected_idivl(rcx);
1229 }
1232 void TemplateTable::irem() {
1233 transition(itos, itos);
1234 __ mov(rcx, rax);
1235 __ pop_i(rax);
1236 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1237 // they are not equal, one could do a normal division (no correction
1238 // needed), which may speed up this implementation for the common case.
1239 // (see also JVM spec., p.243 & p.271)
1240 __ corrected_idivl(rcx);
1241 __ mov(rax, rdx);
1242 }
1245 void TemplateTable::lmul() {
1246 transition(ltos, ltos);
1247 __ pop_l(rbx, rcx);
1248 __ push(rcx); __ push(rbx);
1249 __ push(rdx); __ push(rax);
1250 __ lmul(2 * wordSize, 0);
1251 __ addptr(rsp, 4 * wordSize); // take off temporaries
1252 }
1255 void TemplateTable::ldiv() {
1256 transition(ltos, ltos);
1257 __ pop_l(rbx, rcx);
1258 __ push(rcx); __ push(rbx);
1259 __ push(rdx); __ push(rax);
1260 // check if y = 0
1261 __ orl(rax, rdx);
1262 __ jump_cc(Assembler::zero,
1263 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1264 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1265 __ addptr(rsp, 4 * wordSize); // take off temporaries
1266 }
1269 void TemplateTable::lrem() {
1270 transition(ltos, ltos);
1271 __ pop_l(rbx, rcx);
1272 __ push(rcx); __ push(rbx);
1273 __ push(rdx); __ push(rax);
1274 // check if y = 0
1275 __ orl(rax, rdx);
1276 __ jump_cc(Assembler::zero,
1277 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1278 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1279 __ addptr(rsp, 4 * wordSize);
1280 }
1283 void TemplateTable::lshl() {
1284 transition(itos, ltos);
1285 __ movl(rcx, rax); // get shift count
1286 __ pop_l(rax, rdx); // get shift value
1287 __ lshl(rdx, rax);
1288 }
1291 void TemplateTable::lshr() {
1292 transition(itos, ltos);
1293 __ mov(rcx, rax); // get shift count
1294 __ pop_l(rax, rdx); // get shift value
1295 __ lshr(rdx, rax, true);
1296 }
1299 void TemplateTable::lushr() {
1300 transition(itos, ltos);
1301 __ mov(rcx, rax); // get shift count
1302 __ pop_l(rax, rdx); // get shift value
1303 __ lshr(rdx, rax);
1304 }
1307 void TemplateTable::fop2(Operation op) {
1308 transition(ftos, ftos);
1309 switch (op) {
1310 case add: __ fadd_s (at_rsp()); break;
1311 case sub: __ fsubr_s(at_rsp()); break;
1312 case mul: __ fmul_s (at_rsp()); break;
1313 case div: __ fdivr_s(at_rsp()); break;
1314 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1315 default : ShouldNotReachHere();
1316 }
1317 __ f2ieee();
1318 __ pop(rax); // pop float thing off
1319 }
1322 void TemplateTable::dop2(Operation op) {
1323 transition(dtos, dtos);
1325 switch (op) {
1326 case add: __ fadd_d (at_rsp()); break;
1327 case sub: __ fsubr_d(at_rsp()); break;
1328 case mul: {
1329 Label L_strict;
1330 Label L_join;
1331 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1332 __ get_method(rcx);
1333 __ movl(rcx, access_flags);
1334 __ testl(rcx, JVM_ACC_STRICT);
1335 __ jccb(Assembler::notZero, L_strict);
1336 __ fmul_d (at_rsp());
1337 __ jmpb(L_join);
1338 __ bind(L_strict);
1339 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1340 __ fmulp();
1341 __ fmul_d (at_rsp());
1342 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1343 __ fmulp();
1344 __ bind(L_join);
1345 break;
1346 }
1347 case div: {
1348 Label L_strict;
1349 Label L_join;
1350 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1351 __ get_method(rcx);
1352 __ movl(rcx, access_flags);
1353 __ testl(rcx, JVM_ACC_STRICT);
1354 __ jccb(Assembler::notZero, L_strict);
1355 __ fdivr_d(at_rsp());
1356 __ jmp(L_join);
1357 __ bind(L_strict);
1358 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1359 __ fmul_d (at_rsp());
1360 __ fdivrp();
1361 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1362 __ fmulp();
1363 __ bind(L_join);
1364 break;
1365 }
1366 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1367 default : ShouldNotReachHere();
1368 }
1369 __ d2ieee();
1370 // Pop double precision number from rsp.
1371 __ pop(rax);
1372 __ pop(rdx);
1373 }
1376 void TemplateTable::ineg() {
1377 transition(itos, itos);
1378 __ negl(rax);
1379 }
1382 void TemplateTable::lneg() {
1383 transition(ltos, ltos);
1384 __ lneg(rdx, rax);
1385 }
1388 void TemplateTable::fneg() {
1389 transition(ftos, ftos);
1390 __ fchs();
1391 }
1394 void TemplateTable::dneg() {
1395 transition(dtos, dtos);
1396 __ fchs();
1397 }
1400 void TemplateTable::iinc() {
1401 transition(vtos, vtos);
1402 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1403 locals_index(rbx);
1404 __ addl(iaddress(rbx), rdx);
1405 }
1408 void TemplateTable::wide_iinc() {
1409 transition(vtos, vtos);
1410 __ movl(rdx, at_bcp(4)); // get constant
1411 locals_index_wide(rbx);
1412 __ bswapl(rdx); // swap bytes & sign-extend constant
1413 __ sarl(rdx, 16);
1414 __ addl(iaddress(rbx), rdx);
1415 // Note: should probably use only one movl to get both
1416 // the index and the constant -> fix this
1417 }
1420 void TemplateTable::convert() {
1421 // Checking
1422 #ifdef ASSERT
1423 { TosState tos_in = ilgl;
1424 TosState tos_out = ilgl;
1425 switch (bytecode()) {
1426 case Bytecodes::_i2l: // fall through
1427 case Bytecodes::_i2f: // fall through
1428 case Bytecodes::_i2d: // fall through
1429 case Bytecodes::_i2b: // fall through
1430 case Bytecodes::_i2c: // fall through
1431 case Bytecodes::_i2s: tos_in = itos; break;
1432 case Bytecodes::_l2i: // fall through
1433 case Bytecodes::_l2f: // fall through
1434 case Bytecodes::_l2d: tos_in = ltos; break;
1435 case Bytecodes::_f2i: // fall through
1436 case Bytecodes::_f2l: // fall through
1437 case Bytecodes::_f2d: tos_in = ftos; break;
1438 case Bytecodes::_d2i: // fall through
1439 case Bytecodes::_d2l: // fall through
1440 case Bytecodes::_d2f: tos_in = dtos; break;
1441 default : ShouldNotReachHere();
1442 }
1443 switch (bytecode()) {
1444 case Bytecodes::_l2i: // fall through
1445 case Bytecodes::_f2i: // fall through
1446 case Bytecodes::_d2i: // fall through
1447 case Bytecodes::_i2b: // fall through
1448 case Bytecodes::_i2c: // fall through
1449 case Bytecodes::_i2s: tos_out = itos; break;
1450 case Bytecodes::_i2l: // fall through
1451 case Bytecodes::_f2l: // fall through
1452 case Bytecodes::_d2l: tos_out = ltos; break;
1453 case Bytecodes::_i2f: // fall through
1454 case Bytecodes::_l2f: // fall through
1455 case Bytecodes::_d2f: tos_out = ftos; break;
1456 case Bytecodes::_i2d: // fall through
1457 case Bytecodes::_l2d: // fall through
1458 case Bytecodes::_f2d: tos_out = dtos; break;
1459 default : ShouldNotReachHere();
1460 }
1461 transition(tos_in, tos_out);
1462 }
1463 #endif // ASSERT
1465 // Conversion
1466 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1467 switch (bytecode()) {
1468 case Bytecodes::_i2l:
1469 __ extend_sign(rdx, rax);
1470 break;
1471 case Bytecodes::_i2f:
1472 __ push(rax); // store int on tos
1473 __ fild_s(at_rsp()); // load int to ST0
1474 __ f2ieee(); // truncate to float size
1475 __ pop(rcx); // adjust rsp
1476 break;
1477 case Bytecodes::_i2d:
1478 __ push(rax); // add one slot for d2ieee()
1479 __ push(rax); // store int on tos
1480 __ fild_s(at_rsp()); // load int to ST0
1481 __ d2ieee(); // truncate to double size
1482 __ pop(rcx); // adjust rsp
1483 __ pop(rcx);
1484 break;
1485 case Bytecodes::_i2b:
1486 __ shll(rax, 24); // truncate upper 24 bits
1487 __ sarl(rax, 24); // and sign-extend byte
1488 LP64_ONLY(__ movsbl(rax, rax));
1489 break;
1490 case Bytecodes::_i2c:
1491 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1492 LP64_ONLY(__ movzwl(rax, rax));
1493 break;
1494 case Bytecodes::_i2s:
1495 __ shll(rax, 16); // truncate upper 16 bits
1496 __ sarl(rax, 16); // and sign-extend short
1497 LP64_ONLY(__ movswl(rax, rax));
1498 break;
1499 case Bytecodes::_l2i:
1500 /* nothing to do */
1501 break;
1502 case Bytecodes::_l2f:
1503 __ push(rdx); // store long on tos
1504 __ push(rax);
1505 __ fild_d(at_rsp()); // load long to ST0
1506 __ f2ieee(); // truncate to float size
1507 __ pop(rcx); // adjust rsp
1508 __ pop(rcx);
1509 break;
1510 case Bytecodes::_l2d:
1511 __ push(rdx); // store long on tos
1512 __ push(rax);
1513 __ fild_d(at_rsp()); // load long to ST0
1514 __ d2ieee(); // truncate to double size
1515 __ pop(rcx); // adjust rsp
1516 __ pop(rcx);
1517 break;
1518 case Bytecodes::_f2i:
1519 __ push(rcx); // reserve space for argument
1520 __ fstp_s(at_rsp()); // pass float argument on stack
1521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1522 break;
1523 case Bytecodes::_f2l:
1524 __ push(rcx); // reserve space for argument
1525 __ fstp_s(at_rsp()); // pass float argument on stack
1526 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1527 break;
1528 case Bytecodes::_f2d:
1529 /* nothing to do */
1530 break;
1531 case Bytecodes::_d2i:
1532 __ push(rcx); // reserve space for argument
1533 __ push(rcx);
1534 __ fstp_d(at_rsp()); // pass double argument on stack
1535 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1536 break;
1537 case Bytecodes::_d2l:
1538 __ push(rcx); // reserve space for argument
1539 __ push(rcx);
1540 __ fstp_d(at_rsp()); // pass double argument on stack
1541 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1542 break;
1543 case Bytecodes::_d2f:
1544 __ push(rcx); // reserve space for f2ieee()
1545 __ f2ieee(); // truncate to float size
1546 __ pop(rcx); // adjust rsp
1547 break;
1548 default :
1549 ShouldNotReachHere();
1550 }
1551 }
1554 void TemplateTable::lcmp() {
1555 transition(ltos, itos);
1556 // y = rdx:rax
1557 __ pop_l(rbx, rcx); // get x = rcx:rbx
1558 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1559 __ mov(rax, rcx);
1560 }
1563 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1564 if (is_float) {
1565 __ fld_s(at_rsp());
1566 } else {
1567 __ fld_d(at_rsp());
1568 __ pop(rdx);
1569 }
1570 __ pop(rcx);
1571 __ fcmp2int(rax, unordered_result < 0);
1572 }
1575 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1576 __ get_method(rcx); // ECX holds method
1577 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1579 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1580 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1581 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1583 // Load up EDX with the branch displacement
1584 __ movl(rdx, at_bcp(1));
1585 __ bswapl(rdx);
1586 if (!is_wide) __ sarl(rdx, 16);
1587 LP64_ONLY(__ movslq(rdx, rdx));
1590 // Handle all the JSR stuff here, then exit.
1591 // It's much shorter and cleaner than intermingling with the
1592 // non-JSR normal-branch stuff occurring below.
1593 if (is_jsr) {
1594 // Pre-load the next target bytecode into EBX
1595 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1597 // compute return address as bci in rax,
1598 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1599 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1600 // Adjust the bcp in RSI by the displacement in EDX
1601 __ addptr(rsi, rdx);
1602 // Push return address
1603 __ push_i(rax);
1604 // jsr returns vtos
1605 __ dispatch_only_noverify(vtos);
1606 return;
1607 }
1609 // Normal (non-jsr) branch handling
1611 // Adjust the bcp in RSI by the displacement in EDX
1612 __ addptr(rsi, rdx);
1614 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1615 Label backedge_counter_overflow;
1616 Label profile_method;
1617 Label dispatch;
1618 if (UseLoopCounter) {
1619 // increment backedge counter for backward branches
1620 // rax,: MDO
1621 // rbx,: MDO bumped taken-count
1622 // rcx: method
1623 // rdx: target offset
1624 // rsi: target bcp
1625 // rdi: locals pointer
1626 __ testl(rdx, rdx); // check if forward or backward branch
1627 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1629 if (TieredCompilation) {
1630 Label no_mdo;
1631 int increment = InvocationCounter::count_increment;
1632 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1633 if (ProfileInterpreter) {
1634 // Are we profiling?
1635 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1636 __ testptr(rbx, rbx);
1637 __ jccb(Assembler::zero, no_mdo);
1638 // Increment the MDO backedge counter
1639 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1640 in_bytes(InvocationCounter::counter_offset()));
1641 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1642 rax, false, Assembler::zero, &backedge_counter_overflow);
1643 __ jmp(dispatch);
1644 }
1645 __ bind(no_mdo);
1646 // Increment backedge counter in methodOop
1647 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1648 rax, false, Assembler::zero, &backedge_counter_overflow);
1649 } else {
1650 // increment counter
1651 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1652 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1653 __ movl(Address(rcx, be_offset), rax); // store counter
1655 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1656 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1657 __ addl(rax, Address(rcx, be_offset)); // add both counters
1659 if (ProfileInterpreter) {
1660 // Test to see if we should create a method data oop
1661 __ cmp32(rax,
1662 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1663 __ jcc(Assembler::less, dispatch);
1665 // if no method data exists, go to profile method
1666 __ test_method_data_pointer(rax, profile_method);
1668 if (UseOnStackReplacement) {
1669 // check for overflow against rbx, which is the MDO taken count
1670 __ cmp32(rbx,
1671 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1672 __ jcc(Assembler::below, dispatch);
1674 // When ProfileInterpreter is on, the backedge_count comes from the
1675 // methodDataOop, which value does not get reset on the call to
1676 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1677 // routine while the method is being compiled, add a second test to make
1678 // sure the overflow function is called only once every overflow_frequency.
1679 const int overflow_frequency = 1024;
1680 __ andptr(rbx, overflow_frequency-1);
1681 __ jcc(Assembler::zero, backedge_counter_overflow);
1682 }
1683 } else {
1684 if (UseOnStackReplacement) {
1685 // check for overflow against rax, which is the sum of the counters
1686 __ cmp32(rax,
1687 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1688 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1690 }
1691 }
1692 }
1693 __ bind(dispatch);
1694 }
1696 // Pre-load the next target bytecode into EBX
1697 __ load_unsigned_byte(rbx, Address(rsi, 0));
1699 // continue with the bytecode @ target
1700 // rax,: return bci for jsr's, unused otherwise
1701 // rbx,: target bytecode
1702 // rsi: target bcp
1703 __ dispatch_only(vtos);
1705 if (UseLoopCounter) {
1706 if (ProfileInterpreter) {
1707 // Out-of-line code to allocate method data oop.
1708 __ bind(profile_method);
1709 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1710 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1711 __ set_method_data_pointer_for_bcp();
1712 __ jmp(dispatch);
1713 }
1715 if (UseOnStackReplacement) {
1717 // invocation counter overflow
1718 __ bind(backedge_counter_overflow);
1719 __ negptr(rdx);
1720 __ addptr(rdx, rsi); // branch bcp
1721 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1722 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1724 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1725 // rbx,: target bytecode
1726 // rdx: scratch
1727 // rdi: locals pointer
1728 // rsi: bcp
1729 __ testptr(rax, rax); // test result
1730 __ jcc(Assembler::zero, dispatch); // no osr if null
1731 // nmethod may have been invalidated (VM may block upon call_VM return)
1732 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1733 __ cmpl(rcx, InvalidOSREntryBci);
1734 __ jcc(Assembler::equal, dispatch);
1736 // We have the address of an on stack replacement routine in rax,
1737 // We need to prepare to execute the OSR method. First we must
1738 // migrate the locals and monitors off of the stack.
1740 __ mov(rbx, rax); // save the nmethod
1742 const Register thread = rcx;
1743 __ get_thread(thread);
1744 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1745 // rax, is OSR buffer, move it to expected parameter location
1746 __ mov(rcx, rax);
1748 // pop the interpreter frame
1749 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1750 __ leave(); // remove frame anchor
1751 __ pop(rdi); // get return address
1752 __ mov(rsp, rdx); // set sp to sender sp
1754 // Align stack pointer for compiled code (note that caller is
1755 // responsible for undoing this fixup by remembering the old SP
1756 // in an rbp,-relative location)
1757 __ andptr(rsp, -(StackAlignmentInBytes));
1759 // push the (possibly adjusted) return address
1760 __ push(rdi);
1762 // and begin the OSR nmethod
1763 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1764 }
1765 }
1766 }
1769 void TemplateTable::if_0cmp(Condition cc) {
1770 transition(itos, vtos);
1771 // assume branch is more often taken than not (loops use backward branches)
1772 Label not_taken;
1773 __ testl(rax, rax);
1774 __ jcc(j_not(cc), not_taken);
1775 branch(false, false);
1776 __ bind(not_taken);
1777 __ profile_not_taken_branch(rax);
1778 }
1781 void TemplateTable::if_icmp(Condition cc) {
1782 transition(itos, vtos);
1783 // assume branch is more often taken than not (loops use backward branches)
1784 Label not_taken;
1785 __ pop_i(rdx);
1786 __ cmpl(rdx, rax);
1787 __ jcc(j_not(cc), not_taken);
1788 branch(false, false);
1789 __ bind(not_taken);
1790 __ profile_not_taken_branch(rax);
1791 }
1794 void TemplateTable::if_nullcmp(Condition cc) {
1795 transition(atos, vtos);
1796 // assume branch is more often taken than not (loops use backward branches)
1797 Label not_taken;
1798 __ testptr(rax, rax);
1799 __ jcc(j_not(cc), not_taken);
1800 branch(false, false);
1801 __ bind(not_taken);
1802 __ profile_not_taken_branch(rax);
1803 }
1806 void TemplateTable::if_acmp(Condition cc) {
1807 transition(atos, vtos);
1808 // assume branch is more often taken than not (loops use backward branches)
1809 Label not_taken;
1810 __ pop_ptr(rdx);
1811 __ cmpptr(rdx, rax);
1812 __ jcc(j_not(cc), not_taken);
1813 branch(false, false);
1814 __ bind(not_taken);
1815 __ profile_not_taken_branch(rax);
1816 }
1819 void TemplateTable::ret() {
1820 transition(vtos, vtos);
1821 locals_index(rbx);
1822 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1823 __ profile_ret(rbx, rcx);
1824 __ get_method(rax);
1825 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1826 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1827 constMethodOopDesc::codes_offset()));
1828 __ dispatch_next(vtos);
1829 }
1832 void TemplateTable::wide_ret() {
1833 transition(vtos, vtos);
1834 locals_index_wide(rbx);
1835 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1836 __ profile_ret(rbx, rcx);
1837 __ get_method(rax);
1838 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1839 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1840 __ dispatch_next(vtos);
1841 }
1844 void TemplateTable::tableswitch() {
1845 Label default_case, continue_execution;
1846 transition(itos, vtos);
1847 // align rsi
1848 __ lea(rbx, at_bcp(wordSize));
1849 __ andptr(rbx, -wordSize);
1850 // load lo & hi
1851 __ movl(rcx, Address(rbx, 1 * wordSize));
1852 __ movl(rdx, Address(rbx, 2 * wordSize));
1853 __ bswapl(rcx);
1854 __ bswapl(rdx);
1855 // check against lo & hi
1856 __ cmpl(rax, rcx);
1857 __ jccb(Assembler::less, default_case);
1858 __ cmpl(rax, rdx);
1859 __ jccb(Assembler::greater, default_case);
1860 // lookup dispatch offset
1861 __ subl(rax, rcx);
1862 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1863 __ profile_switch_case(rax, rbx, rcx);
1864 // continue execution
1865 __ bind(continue_execution);
1866 __ bswapl(rdx);
1867 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1868 __ addptr(rsi, rdx);
1869 __ dispatch_only(vtos);
1870 // handle default
1871 __ bind(default_case);
1872 __ profile_switch_default(rax);
1873 __ movl(rdx, Address(rbx, 0));
1874 __ jmp(continue_execution);
1875 }
1878 void TemplateTable::lookupswitch() {
1879 transition(itos, itos);
1880 __ stop("lookupswitch bytecode should have been rewritten");
1881 }
1884 void TemplateTable::fast_linearswitch() {
1885 transition(itos, vtos);
1886 Label loop_entry, loop, found, continue_execution;
1887 // bswapl rax, so we can avoid bswapping the table entries
1888 __ bswapl(rax);
1889 // align rsi
1890 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1891 __ andptr(rbx, -wordSize);
1892 // set counter
1893 __ movl(rcx, Address(rbx, wordSize));
1894 __ bswapl(rcx);
1895 __ jmpb(loop_entry);
1896 // table search
1897 __ bind(loop);
1898 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1899 __ jccb(Assembler::equal, found);
1900 __ bind(loop_entry);
1901 __ decrementl(rcx);
1902 __ jcc(Assembler::greaterEqual, loop);
1903 // default case
1904 __ profile_switch_default(rax);
1905 __ movl(rdx, Address(rbx, 0));
1906 __ jmpb(continue_execution);
1907 // entry found -> get offset
1908 __ bind(found);
1909 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1910 __ profile_switch_case(rcx, rax, rbx);
1911 // continue execution
1912 __ bind(continue_execution);
1913 __ bswapl(rdx);
1914 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1915 __ addptr(rsi, rdx);
1916 __ dispatch_only(vtos);
1917 }
1920 void TemplateTable::fast_binaryswitch() {
1921 transition(itos, vtos);
1922 // Implementation using the following core algorithm:
1923 //
1924 // int binary_search(int key, LookupswitchPair* array, int n) {
1925 // // Binary search according to "Methodik des Programmierens" by
1926 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1927 // int i = 0;
1928 // int j = n;
1929 // while (i+1 < j) {
1930 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1931 // // with Q: for all i: 0 <= i < n: key < a[i]
1932 // // where a stands for the array and assuming that the (inexisting)
1933 // // element a[n] is infinitely big.
1934 // int h = (i + j) >> 1;
1935 // // i < h < j
1936 // if (key < array[h].fast_match()) {
1937 // j = h;
1938 // } else {
1939 // i = h;
1940 // }
1941 // }
1942 // // R: a[i] <= key < a[i+1] or Q
1943 // // (i.e., if key is within array, i is the correct index)
1944 // return i;
1945 // }
1947 // register allocation
1948 const Register key = rax; // already set (tosca)
1949 const Register array = rbx;
1950 const Register i = rcx;
1951 const Register j = rdx;
1952 const Register h = rdi; // needs to be restored
1953 const Register temp = rsi;
1954 // setup array
1955 __ save_bcp();
1957 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1958 __ andptr(array, -wordSize);
1959 // initialize i & j
1960 __ xorl(i, i); // i = 0;
1961 __ movl(j, Address(array, -wordSize)); // j = length(array);
1962 // Convert j into native byteordering
1963 __ bswapl(j);
1964 // and start
1965 Label entry;
1966 __ jmp(entry);
1968 // binary search loop
1969 { Label loop;
1970 __ bind(loop);
1971 // int h = (i + j) >> 1;
1972 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1973 __ sarl(h, 1); // h = (i + j) >> 1;
1974 // if (key < array[h].fast_match()) {
1975 // j = h;
1976 // } else {
1977 // i = h;
1978 // }
1979 // Convert array[h].match to native byte-ordering before compare
1980 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1981 __ bswapl(temp);
1982 __ cmpl(key, temp);
1983 // j = h if (key < array[h].fast_match())
1984 __ cmov32(Assembler::less , j, h);
1985 // i = h if (key >= array[h].fast_match())
1986 __ cmov32(Assembler::greaterEqual, i, h);
1987 // while (i+1 < j)
1988 __ bind(entry);
1989 __ leal(h, Address(i, 1)); // i+1
1990 __ cmpl(h, j); // i+1 < j
1991 __ jcc(Assembler::less, loop);
1992 }
1994 // end of binary search, result index is i (must check again!)
1995 Label default_case;
1996 // Convert array[i].match to native byte-ordering before compare
1997 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1998 __ bswapl(temp);
1999 __ cmpl(key, temp);
2000 __ jcc(Assembler::notEqual, default_case);
2002 // entry found -> j = offset
2003 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
2004 __ profile_switch_case(i, key, array);
2005 __ bswapl(j);
2006 LP64_ONLY(__ movslq(j, j));
2007 __ restore_bcp();
2008 __ restore_locals(); // restore rdi
2009 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2011 __ addptr(rsi, j);
2012 __ dispatch_only(vtos);
2014 // default case -> j = default offset
2015 __ bind(default_case);
2016 __ profile_switch_default(i);
2017 __ movl(j, Address(array, -2*wordSize));
2018 __ bswapl(j);
2019 LP64_ONLY(__ movslq(j, j));
2020 __ restore_bcp();
2021 __ restore_locals(); // restore rdi
2022 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2023 __ addptr(rsi, j);
2024 __ dispatch_only(vtos);
2025 }
2028 void TemplateTable::_return(TosState state) {
2029 transition(state, state);
2030 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2032 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2033 assert(state == vtos, "only valid state");
2034 __ movptr(rax, aaddress(0));
2035 __ load_klass(rdi, rax);
2036 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2037 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2038 Label skip_register_finalizer;
2039 __ jcc(Assembler::zero, skip_register_finalizer);
2041 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2043 __ bind(skip_register_finalizer);
2044 }
2046 __ remove_activation(state, rsi);
2047 __ jmp(rsi);
2048 }
2051 // ----------------------------------------------------------------------------
2052 // Volatile variables demand their effects be made known to all CPU's in
2053 // order. Store buffers on most chips allow reads & writes to reorder; the
2054 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2055 // memory barrier (i.e., it's not sufficient that the interpreter does not
2056 // reorder volatile references, the hardware also must not reorder them).
2057 //
2058 // According to the new Java Memory Model (JMM):
2059 // (1) All volatiles are serialized wrt to each other.
2060 // ALSO reads & writes act as aquire & release, so:
2061 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2062 // the read float up to before the read. It's OK for non-volatile memory refs
2063 // that happen before the volatile read to float down below it.
2064 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2065 // that happen BEFORE the write float down to after the write. It's OK for
2066 // non-volatile memory refs that happen after the volatile write to float up
2067 // before it.
2068 //
2069 // We only put in barriers around volatile refs (they are expensive), not
2070 // _between_ memory refs (that would require us to track the flavor of the
2071 // previous memory refs). Requirements (2) and (3) require some barriers
2072 // before volatile stores and after volatile loads. These nearly cover
2073 // requirement (1) but miss the volatile-store-volatile-load case. This final
2074 // case is placed after volatile-stores although it could just as well go
2075 // before volatile-loads.
2076 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2077 // Helper function to insert a is-volatile test and memory barrier
2078 if( !os::is_MP() ) return; // Not needed on single CPU
2079 __ membar(order_constraint);
2080 }
2082 void TemplateTable::resolve_cache_and_index(int byte_no,
2083 Register result,
2084 Register Rcache,
2085 Register index,
2086 size_t index_size) {
2087 Register temp = rbx;
2089 assert_different_registers(result, Rcache, index, temp);
2091 Label resolved;
2092 if (byte_no == f1_oop) {
2093 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2094 // This kind of CP cache entry does not need to match the flags byte, because
2095 // there is a 1-1 relation between bytecode type and CP entry type.
2096 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2097 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2098 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2099 __ testptr(result, result);
2100 __ jcc(Assembler::notEqual, resolved);
2101 } else {
2102 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2103 assert(result == noreg, ""); //else change code for setting result
2104 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2105 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2106 __ jcc(Assembler::equal, resolved);
2107 }
2109 // resolve first time through
2110 address entry;
2111 switch (bytecode()) {
2112 case Bytecodes::_getstatic : // fall through
2113 case Bytecodes::_putstatic : // fall through
2114 case Bytecodes::_getfield : // fall through
2115 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2116 case Bytecodes::_invokevirtual : // fall through
2117 case Bytecodes::_invokespecial : // fall through
2118 case Bytecodes::_invokestatic : // fall through
2119 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2120 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2121 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2122 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2123 default : ShouldNotReachHere(); break;
2124 }
2125 __ movl(temp, (int)bytecode());
2126 __ call_VM(noreg, entry, temp);
2127 // Update registers with resolved info
2128 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2129 if (result != noreg)
2130 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2131 __ bind(resolved);
2132 }
2135 // The cache and index registers must be set before call
2136 void TemplateTable::load_field_cp_cache_entry(Register obj,
2137 Register cache,
2138 Register index,
2139 Register off,
2140 Register flags,
2141 bool is_static = false) {
2142 assert_different_registers(cache, index, flags, off);
2144 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2145 // Field offset
2146 __ movptr(off, Address(cache, index, Address::times_ptr,
2147 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2148 // Flags
2149 __ movl(flags, Address(cache, index, Address::times_ptr,
2150 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2152 // klass overwrite register
2153 if (is_static) {
2154 __ movptr(obj, Address(cache, index, Address::times_ptr,
2155 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2156 }
2157 }
2159 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2160 Register method,
2161 Register itable_index,
2162 Register flags,
2163 bool is_invokevirtual,
2164 bool is_invokevfinal /*unused*/,
2165 bool is_invokedynamic) {
2166 // setup registers
2167 const Register cache = rcx;
2168 const Register index = rdx;
2169 assert_different_registers(method, flags);
2170 assert_different_registers(method, cache, index);
2171 assert_different_registers(itable_index, flags);
2172 assert_different_registers(itable_index, cache, index);
2173 // determine constant pool cache field offsets
2174 const int method_offset = in_bytes(
2175 constantPoolCacheOopDesc::base_offset() +
2176 (is_invokevirtual
2177 ? ConstantPoolCacheEntry::f2_offset()
2178 : ConstantPoolCacheEntry::f1_offset()
2179 )
2180 );
2181 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2182 ConstantPoolCacheEntry::flags_offset());
2183 // access constant pool cache fields
2184 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2185 ConstantPoolCacheEntry::f2_offset());
2187 if (byte_no == f1_oop) {
2188 // Resolved f1_oop goes directly into 'method' register.
2189 assert(is_invokedynamic, "");
2190 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2191 } else {
2192 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2193 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2194 }
2195 if (itable_index != noreg) {
2196 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2197 }
2198 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2199 }
2202 // The registers cache and index expected to be set before call.
2203 // Correct values of the cache and index registers are preserved.
2204 void TemplateTable::jvmti_post_field_access(Register cache,
2205 Register index,
2206 bool is_static,
2207 bool has_tos) {
2208 if (JvmtiExport::can_post_field_access()) {
2209 // Check to see if a field access watch has been set before we take
2210 // the time to call into the VM.
2211 Label L1;
2212 assert_different_registers(cache, index, rax);
2213 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2214 __ testl(rax,rax);
2215 __ jcc(Assembler::zero, L1);
2217 // cache entry pointer
2218 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2219 __ shll(index, LogBytesPerWord);
2220 __ addptr(cache, index);
2221 if (is_static) {
2222 __ xorptr(rax, rax); // NULL object reference
2223 } else {
2224 __ pop(atos); // Get the object
2225 __ verify_oop(rax);
2226 __ push(atos); // Restore stack state
2227 }
2228 // rax,: object pointer or NULL
2229 // cache: cache entry pointer
2230 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2231 rax, cache);
2232 __ get_cache_and_index_at_bcp(cache, index, 1);
2233 __ bind(L1);
2234 }
2235 }
2237 void TemplateTable::pop_and_check_object(Register r) {
2238 __ pop_ptr(r);
2239 __ null_check(r); // for field access must check obj.
2240 __ verify_oop(r);
2241 }
2243 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2244 transition(vtos, vtos);
2246 const Register cache = rcx;
2247 const Register index = rdx;
2248 const Register obj = rcx;
2249 const Register off = rbx;
2250 const Register flags = rax;
2252 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2253 jvmti_post_field_access(cache, index, is_static, false);
2254 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2256 if (!is_static) pop_and_check_object(obj);
2258 const Address lo(obj, off, Address::times_1, 0*wordSize);
2259 const Address hi(obj, off, Address::times_1, 1*wordSize);
2261 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2263 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2264 assert(btos == 0, "change code, btos != 0");
2265 // btos
2266 __ andptr(flags, 0x0f);
2267 __ jcc(Assembler::notZero, notByte);
2269 __ load_signed_byte(rax, lo );
2270 __ push(btos);
2271 // Rewrite bytecode to be faster
2272 if (!is_static) {
2273 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2274 }
2275 __ jmp(Done);
2277 __ bind(notByte);
2278 // itos
2279 __ cmpl(flags, itos );
2280 __ jcc(Assembler::notEqual, notInt);
2282 __ movl(rax, lo );
2283 __ push(itos);
2284 // Rewrite bytecode to be faster
2285 if (!is_static) {
2286 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2287 }
2288 __ jmp(Done);
2290 __ bind(notInt);
2291 // atos
2292 __ cmpl(flags, atos );
2293 __ jcc(Assembler::notEqual, notObj);
2295 __ movl(rax, lo );
2296 __ push(atos);
2297 if (!is_static) {
2298 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2299 }
2300 __ jmp(Done);
2302 __ bind(notObj);
2303 // ctos
2304 __ cmpl(flags, ctos );
2305 __ jcc(Assembler::notEqual, notChar);
2307 __ load_unsigned_short(rax, lo );
2308 __ push(ctos);
2309 if (!is_static) {
2310 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2311 }
2312 __ jmp(Done);
2314 __ bind(notChar);
2315 // stos
2316 __ cmpl(flags, stos );
2317 __ jcc(Assembler::notEqual, notShort);
2319 __ load_signed_short(rax, lo );
2320 __ push(stos);
2321 if (!is_static) {
2322 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2323 }
2324 __ jmp(Done);
2326 __ bind(notShort);
2327 // ltos
2328 __ cmpl(flags, ltos );
2329 __ jcc(Assembler::notEqual, notLong);
2331 // Generate code as if volatile. There just aren't enough registers to
2332 // save that information and this code is faster than the test.
2333 __ fild_d(lo); // Must load atomically
2334 __ subptr(rsp,2*wordSize); // Make space for store
2335 __ fistp_d(Address(rsp,0));
2336 __ pop(rax);
2337 __ pop(rdx);
2339 __ push(ltos);
2340 // Don't rewrite to _fast_lgetfield for potential volatile case.
2341 __ jmp(Done);
2343 __ bind(notLong);
2344 // ftos
2345 __ cmpl(flags, ftos );
2346 __ jcc(Assembler::notEqual, notFloat);
2348 __ fld_s(lo);
2349 __ push(ftos);
2350 if (!is_static) {
2351 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2352 }
2353 __ jmp(Done);
2355 __ bind(notFloat);
2356 // dtos
2357 __ cmpl(flags, dtos );
2358 __ jcc(Assembler::notEqual, notDouble);
2360 __ fld_d(lo);
2361 __ push(dtos);
2362 if (!is_static) {
2363 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2364 }
2365 __ jmpb(Done);
2367 __ bind(notDouble);
2369 __ stop("Bad state");
2371 __ bind(Done);
2372 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2373 // volatile_barrier( );
2374 }
2377 void TemplateTable::getfield(int byte_no) {
2378 getfield_or_static(byte_no, false);
2379 }
2382 void TemplateTable::getstatic(int byte_no) {
2383 getfield_or_static(byte_no, true);
2384 }
2386 // The registers cache and index expected to be set before call.
2387 // The function may destroy various registers, just not the cache and index registers.
2388 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2390 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2392 if (JvmtiExport::can_post_field_modification()) {
2393 // Check to see if a field modification watch has been set before we take
2394 // the time to call into the VM.
2395 Label L1;
2396 assert_different_registers(cache, index, rax);
2397 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2398 __ testl(rax, rax);
2399 __ jcc(Assembler::zero, L1);
2401 // The cache and index registers have been already set.
2402 // This allows to eliminate this call but the cache and index
2403 // registers have to be correspondingly used after this line.
2404 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2406 if (is_static) {
2407 // Life is simple. Null out the object pointer.
2408 __ xorptr(rbx, rbx);
2409 } else {
2410 // Life is harder. The stack holds the value on top, followed by the object.
2411 // We don't know the size of the value, though; it could be one or two words
2412 // depending on its type. As a result, we must find the type to determine where
2413 // the object is.
2414 Label two_word, valsize_known;
2415 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2416 ConstantPoolCacheEntry::flags_offset())));
2417 __ mov(rbx, rsp);
2418 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2419 // Make sure we don't need to mask rcx for tosBits after the above shift
2420 ConstantPoolCacheEntry::verify_tosBits();
2421 __ cmpl(rcx, ltos);
2422 __ jccb(Assembler::equal, two_word);
2423 __ cmpl(rcx, dtos);
2424 __ jccb(Assembler::equal, two_word);
2425 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2426 __ jmpb(valsize_known);
2428 __ bind(two_word);
2429 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2431 __ bind(valsize_known);
2432 // setup object pointer
2433 __ movptr(rbx, Address(rbx, 0));
2434 }
2435 // cache entry pointer
2436 __ addptr(rax, in_bytes(cp_base_offset));
2437 __ shll(rdx, LogBytesPerWord);
2438 __ addptr(rax, rdx);
2439 // object (tos)
2440 __ mov(rcx, rsp);
2441 // rbx,: object pointer set up above (NULL if static)
2442 // rax,: cache entry pointer
2443 // rcx: jvalue object on the stack
2444 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2445 rbx, rax, rcx);
2446 __ get_cache_and_index_at_bcp(cache, index, 1);
2447 __ bind(L1);
2448 }
2449 }
2452 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2453 transition(vtos, vtos);
2455 const Register cache = rcx;
2456 const Register index = rdx;
2457 const Register obj = rcx;
2458 const Register off = rbx;
2459 const Register flags = rax;
2461 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2462 jvmti_post_field_mod(cache, index, is_static);
2463 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2465 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2466 // volatile_barrier( );
2468 Label notVolatile, Done;
2469 __ movl(rdx, flags);
2470 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2471 __ andl(rdx, 0x1);
2473 // field addresses
2474 const Address lo(obj, off, Address::times_1, 0*wordSize);
2475 const Address hi(obj, off, Address::times_1, 1*wordSize);
2477 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2479 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2480 assert(btos == 0, "change code, btos != 0");
2481 __ andl(flags, 0x0f);
2482 __ jcc(Assembler::notZero, notByte);
2484 // btos
2485 {
2486 __ pop(btos);
2487 if (!is_static) pop_and_check_object(obj);
2488 __ movb(lo, rax);
2489 if (!is_static) {
2490 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2491 }
2492 __ jmp(Done);
2493 }
2495 __ bind(notByte);
2496 __ cmpl(flags, itos);
2497 __ jcc(Assembler::notEqual, notInt);
2499 // itos
2500 {
2501 __ pop(itos);
2502 if (!is_static) pop_and_check_object(obj);
2503 __ movl(lo, rax);
2504 if (!is_static) {
2505 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2506 }
2507 __ jmp(Done);
2508 }
2510 __ bind(notInt);
2511 __ cmpl(flags, atos);
2512 __ jcc(Assembler::notEqual, notObj);
2514 // atos
2515 {
2516 __ pop(atos);
2517 if (!is_static) pop_and_check_object(obj);
2518 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2519 if (!is_static) {
2520 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2521 }
2522 __ jmp(Done);
2523 }
2525 __ bind(notObj);
2526 __ cmpl(flags, ctos);
2527 __ jcc(Assembler::notEqual, notChar);
2529 // ctos
2530 {
2531 __ pop(ctos);
2532 if (!is_static) pop_and_check_object(obj);
2533 __ movw(lo, rax);
2534 if (!is_static) {
2535 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2536 }
2537 __ jmp(Done);
2538 }
2540 __ bind(notChar);
2541 __ cmpl(flags, stos);
2542 __ jcc(Assembler::notEqual, notShort);
2544 // stos
2545 {
2546 __ pop(stos);
2547 if (!is_static) pop_and_check_object(obj);
2548 __ movw(lo, rax);
2549 if (!is_static) {
2550 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2551 }
2552 __ jmp(Done);
2553 }
2555 __ bind(notShort);
2556 __ cmpl(flags, ltos);
2557 __ jcc(Assembler::notEqual, notLong);
2559 // ltos
2560 {
2561 Label notVolatileLong;
2562 __ testl(rdx, rdx);
2563 __ jcc(Assembler::zero, notVolatileLong);
2565 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2566 if (!is_static) pop_and_check_object(obj);
2568 // Replace with real volatile test
2569 __ push(rdx);
2570 __ push(rax); // Must update atomically with FIST
2571 __ fild_d(Address(rsp,0)); // So load into FPU register
2572 __ fistp_d(lo); // and put into memory atomically
2573 __ addptr(rsp, 2*wordSize);
2574 // volatile_barrier();
2575 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2576 Assembler::StoreStore));
2577 // Don't rewrite volatile version
2578 __ jmp(notVolatile);
2580 __ bind(notVolatileLong);
2582 __ pop(ltos); // overwrites rdx
2583 if (!is_static) pop_and_check_object(obj);
2584 NOT_LP64(__ movptr(hi, rdx));
2585 __ movptr(lo, rax);
2586 if (!is_static) {
2587 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2588 }
2589 __ jmp(notVolatile);
2590 }
2592 __ bind(notLong);
2593 __ cmpl(flags, ftos);
2594 __ jcc(Assembler::notEqual, notFloat);
2596 // ftos
2597 {
2598 __ pop(ftos);
2599 if (!is_static) pop_and_check_object(obj);
2600 __ fstp_s(lo);
2601 if (!is_static) {
2602 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2603 }
2604 __ jmp(Done);
2605 }
2607 __ bind(notFloat);
2608 #ifdef ASSERT
2609 __ cmpl(flags, dtos);
2610 __ jcc(Assembler::notEqual, notDouble);
2611 #endif
2613 // dtos
2614 {
2615 __ pop(dtos);
2616 if (!is_static) pop_and_check_object(obj);
2617 __ fstp_d(lo);
2618 if (!is_static) {
2619 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2620 }
2621 __ jmp(Done);
2622 }
2624 #ifdef ASSERT
2625 __ bind(notDouble);
2626 __ stop("Bad state");
2627 #endif
2629 __ bind(Done);
2631 // Check for volatile store
2632 __ testl(rdx, rdx);
2633 __ jcc(Assembler::zero, notVolatile);
2634 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2635 Assembler::StoreStore));
2636 __ bind(notVolatile);
2637 }
2640 void TemplateTable::putfield(int byte_no) {
2641 putfield_or_static(byte_no, false);
2642 }
2645 void TemplateTable::putstatic(int byte_no) {
2646 putfield_or_static(byte_no, true);
2647 }
2649 void TemplateTable::jvmti_post_fast_field_mod() {
2650 if (JvmtiExport::can_post_field_modification()) {
2651 // Check to see if a field modification watch has been set before we take
2652 // the time to call into the VM.
2653 Label L2;
2654 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2655 __ testl(rcx,rcx);
2656 __ jcc(Assembler::zero, L2);
2657 __ pop_ptr(rbx); // copy the object pointer from tos
2658 __ verify_oop(rbx);
2659 __ push_ptr(rbx); // put the object pointer back on tos
2660 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2661 __ mov(rcx, rsp);
2662 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2663 __ xorptr(rbx, rbx);
2664 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2665 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2666 switch (bytecode()) { // load values into the jvalue object
2667 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2668 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2669 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2670 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2671 case Bytecodes::_fast_lputfield:
2672 NOT_LP64(__ movptr(hi_value, rdx));
2673 __ movptr(lo_value, rax);
2674 break;
2676 // need to call fld_s() after fstp_s() to restore the value for below
2677 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2679 // need to call fld_d() after fstp_d() to restore the value for below
2680 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2682 // since rcx is not an object we don't call store_check() here
2683 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2685 default: ShouldNotReachHere();
2686 }
2687 __ pop_ptr(rbx); // restore copy of object pointer
2689 // Save rax, and sometimes rdx because call_VM() will clobber them,
2690 // then use them for JVM/DI purposes
2691 __ push(rax);
2692 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2693 // access constant pool cache entry
2694 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2695 __ verify_oop(rbx);
2696 // rbx,: object pointer copied above
2697 // rax,: cache entry pointer
2698 // rcx: jvalue object on the stack
2699 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2700 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2701 __ pop(rax); // restore lower value
2702 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2703 __ bind(L2);
2704 }
2705 }
2707 void TemplateTable::fast_storefield(TosState state) {
2708 transition(state, vtos);
2710 ByteSize base = constantPoolCacheOopDesc::base_offset();
2712 jvmti_post_fast_field_mod();
2714 // access constant pool cache
2715 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2717 // test for volatile with rdx but rdx is tos register for lputfield.
2718 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2719 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2720 ConstantPoolCacheEntry::flags_offset())));
2722 // replace index with field offset from cache entry
2723 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2725 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2726 // volatile_barrier( );
2728 Label notVolatile, Done;
2729 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2730 __ andl(rdx, 0x1);
2731 // Check for volatile store
2732 __ testl(rdx, rdx);
2733 __ jcc(Assembler::zero, notVolatile);
2735 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2737 // Get object from stack
2738 pop_and_check_object(rcx);
2740 // field addresses
2741 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2742 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2744 // access field
2745 switch (bytecode()) {
2746 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2747 case Bytecodes::_fast_sputfield: // fall through
2748 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2749 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2750 case Bytecodes::_fast_lputfield:
2751 NOT_LP64(__ movptr(hi, rdx));
2752 __ movptr(lo, rax);
2753 break;
2754 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2755 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2756 case Bytecodes::_fast_aputfield: {
2757 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2758 break;
2759 }
2760 default:
2761 ShouldNotReachHere();
2762 }
2764 Label done;
2765 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2766 Assembler::StoreStore));
2767 // Barriers are so large that short branch doesn't reach!
2768 __ jmp(done);
2770 // Same code as above, but don't need rdx to test for volatile.
2771 __ bind(notVolatile);
2773 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2775 // Get object from stack
2776 pop_and_check_object(rcx);
2778 // access field
2779 switch (bytecode()) {
2780 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2781 case Bytecodes::_fast_sputfield: // fall through
2782 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2783 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2784 case Bytecodes::_fast_lputfield:
2785 NOT_LP64(__ movptr(hi, rdx));
2786 __ movptr(lo, rax);
2787 break;
2788 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2789 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2790 case Bytecodes::_fast_aputfield: {
2791 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2792 break;
2793 }
2794 default:
2795 ShouldNotReachHere();
2796 }
2797 __ bind(done);
2798 }
2801 void TemplateTable::fast_accessfield(TosState state) {
2802 transition(atos, state);
2804 // do the JVMTI work here to avoid disturbing the register state below
2805 if (JvmtiExport::can_post_field_access()) {
2806 // Check to see if a field access watch has been set before we take
2807 // the time to call into the VM.
2808 Label L1;
2809 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2810 __ testl(rcx,rcx);
2811 __ jcc(Assembler::zero, L1);
2812 // access constant pool cache entry
2813 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2814 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2815 __ verify_oop(rax);
2816 // rax,: object pointer copied above
2817 // rcx: cache entry pointer
2818 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2819 __ pop_ptr(rax); // restore object pointer
2820 __ bind(L1);
2821 }
2823 // access constant pool cache
2824 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2825 // replace index with field offset from cache entry
2826 __ movptr(rbx, Address(rcx,
2827 rbx,
2828 Address::times_ptr,
2829 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2832 // rax,: object
2833 __ verify_oop(rax);
2834 __ null_check(rax);
2835 // field addresses
2836 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2837 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2839 // access field
2840 switch (bytecode()) {
2841 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2842 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2843 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2844 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2845 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2846 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2847 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2848 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2849 default:
2850 ShouldNotReachHere();
2851 }
2853 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2854 // volatile_barrier( );
2855 }
2857 void TemplateTable::fast_xaccess(TosState state) {
2858 transition(vtos, state);
2859 // get receiver
2860 __ movptr(rax, aaddress(0));
2861 // access constant pool cache
2862 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2863 __ movptr(rbx, Address(rcx,
2864 rdx,
2865 Address::times_ptr,
2866 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2867 // make sure exception is reported in correct bcp range (getfield is next instruction)
2868 __ increment(rsi);
2869 __ null_check(rax);
2870 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2871 if (state == itos) {
2872 __ movl(rax, lo);
2873 } else if (state == atos) {
2874 __ movptr(rax, lo);
2875 __ verify_oop(rax);
2876 } else if (state == ftos) {
2877 __ fld_s(lo);
2878 } else {
2879 ShouldNotReachHere();
2880 }
2881 __ decrement(rsi);
2882 }
2886 //----------------------------------------------------------------------------------------------------
2887 // Calls
2889 void TemplateTable::count_calls(Register method, Register temp) {
2890 // implemented elsewhere
2891 ShouldNotReachHere();
2892 }
2895 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2896 // determine flags
2897 Bytecodes::Code code = bytecode();
2898 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2899 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2900 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2901 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2902 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2903 const bool receiver_null_check = is_invokespecial;
2904 const bool save_flags = is_invokeinterface || is_invokevirtual;
2905 // setup registers & access constant pool cache
2906 const Register recv = rcx;
2907 const Register flags = rdx;
2908 assert_different_registers(method, index, recv, flags);
2910 // save 'interpreter return address'
2911 __ save_bcp();
2913 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2915 // load receiver if needed (note: no return address pushed yet)
2916 if (load_receiver) {
2917 assert(!is_invokedynamic, "");
2918 __ movl(recv, flags);
2919 __ andl(recv, 0xFF);
2920 // recv count is 0 based?
2921 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2922 __ movptr(recv, recv_addr);
2923 __ verify_oop(recv);
2924 }
2926 // do null check if needed
2927 if (receiver_null_check) {
2928 __ null_check(recv);
2929 }
2931 if (save_flags) {
2932 __ mov(rsi, flags);
2933 }
2935 // compute return type
2936 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2937 // Make sure we don't need to mask flags for tosBits after the above shift
2938 ConstantPoolCacheEntry::verify_tosBits();
2939 // load return address
2940 {
2941 address table_addr;
2942 if (is_invokeinterface || is_invokedynamic)
2943 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2944 else
2945 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2946 ExternalAddress table(table_addr);
2947 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2948 }
2950 // push return address
2951 __ push(flags);
2953 // Restore flag value from the constant pool cache, and restore rsi
2954 // for later null checks. rsi is the bytecode pointer
2955 if (save_flags) {
2956 __ mov(flags, rsi);
2957 __ restore_bcp();
2958 }
2959 }
2962 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2963 Register flags) {
2965 // Uses temporary registers rax, rdx
2966 assert_different_registers(index, recv, rax, rdx);
2968 // Test for an invoke of a final method
2969 Label notFinal;
2970 __ movl(rax, flags);
2971 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2972 __ jcc(Assembler::zero, notFinal);
2974 Register method = index; // method must be rbx,
2975 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2977 // do the call - the index is actually the method to call
2978 __ verify_oop(method);
2980 // It's final, need a null check here!
2981 __ null_check(recv);
2983 // profile this call
2984 __ profile_final_call(rax);
2986 __ jump_from_interpreted(method, rax);
2988 __ bind(notFinal);
2990 // get receiver klass
2991 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2992 // Keep recv in rcx for callee expects it there
2993 __ load_klass(rax, recv);
2994 __ verify_oop(rax);
2996 // profile this call
2997 __ profile_virtual_call(rax, rdi, rdx);
2999 // get target methodOop & entry point
3000 const int base = instanceKlass::vtable_start_offset() * wordSize;
3001 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
3002 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
3003 __ jump_from_interpreted(method, rdx);
3004 }
3007 void TemplateTable::invokevirtual(int byte_no) {
3008 transition(vtos, vtos);
3009 assert(byte_no == f2_byte, "use this argument");
3010 prepare_invoke(rbx, noreg, byte_no);
3012 // rbx,: index
3013 // rcx: receiver
3014 // rdx: flags
3016 invokevirtual_helper(rbx, rcx, rdx);
3017 }
3020 void TemplateTable::invokespecial(int byte_no) {
3021 transition(vtos, vtos);
3022 assert(byte_no == f1_byte, "use this argument");
3023 prepare_invoke(rbx, noreg, byte_no);
3024 // do the call
3025 __ verify_oop(rbx);
3026 __ profile_call(rax);
3027 __ jump_from_interpreted(rbx, rax);
3028 }
3031 void TemplateTable::invokestatic(int byte_no) {
3032 transition(vtos, vtos);
3033 assert(byte_no == f1_byte, "use this argument");
3034 prepare_invoke(rbx, noreg, byte_no);
3035 // do the call
3036 __ verify_oop(rbx);
3037 __ profile_call(rax);
3038 __ jump_from_interpreted(rbx, rax);
3039 }
3042 void TemplateTable::fast_invokevfinal(int byte_no) {
3043 transition(vtos, vtos);
3044 assert(byte_no == f2_byte, "use this argument");
3045 __ stop("fast_invokevfinal not used on x86");
3046 }
3049 void TemplateTable::invokeinterface(int byte_no) {
3050 transition(vtos, vtos);
3051 assert(byte_no == f1_byte, "use this argument");
3052 prepare_invoke(rax, rbx, byte_no);
3054 // rax,: Interface
3055 // rbx,: index
3056 // rcx: receiver
3057 // rdx: flags
3059 // Special case of invokeinterface called for virtual method of
3060 // java.lang.Object. See cpCacheOop.cpp for details.
3061 // This code isn't produced by javac, but could be produced by
3062 // another compliant java compiler.
3063 Label notMethod;
3064 __ movl(rdi, rdx);
3065 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3066 __ jcc(Assembler::zero, notMethod);
3068 invokevirtual_helper(rbx, rcx, rdx);
3069 __ bind(notMethod);
3071 // Get receiver klass into rdx - also a null check
3072 __ restore_locals(); // restore rdi
3073 __ load_klass(rdx, rcx);
3074 __ verify_oop(rdx);
3076 // profile this call
3077 __ profile_virtual_call(rdx, rsi, rdi);
3079 Label no_such_interface, no_such_method;
3081 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3082 rdx, rax, rbx,
3083 // outputs: method, scan temp. reg
3084 rbx, rsi,
3085 no_such_interface);
3087 // rbx,: methodOop to call
3088 // rcx: receiver
3089 // Check for abstract method error
3090 // Note: This should be done more efficiently via a throw_abstract_method_error
3091 // interpreter entry point and a conditional jump to it in case of a null
3092 // method.
3093 __ testptr(rbx, rbx);
3094 __ jcc(Assembler::zero, no_such_method);
3096 // do the call
3097 // rcx: receiver
3098 // rbx,: methodOop
3099 __ jump_from_interpreted(rbx, rdx);
3100 __ should_not_reach_here();
3102 // exception handling code follows...
3103 // note: must restore interpreter registers to canonical
3104 // state for exception handling to work correctly!
3106 __ bind(no_such_method);
3107 // throw exception
3108 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3109 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3110 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3111 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3112 // the call_VM checks for exception, so we should never return here.
3113 __ should_not_reach_here();
3115 __ bind(no_such_interface);
3116 // throw exception
3117 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3118 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3119 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3120 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3121 InterpreterRuntime::throw_IncompatibleClassChangeError));
3122 // the call_VM checks for exception, so we should never return here.
3123 __ should_not_reach_here();
3124 }
3126 void TemplateTable::invokedynamic(int byte_no) {
3127 transition(vtos, vtos);
3128 assert(byte_no == f1_oop, "use this argument");
3130 if (!EnableInvokeDynamic) {
3131 // We should not encounter this bytecode if !EnableInvokeDynamic.
3132 // The verifier will stop it. However, if we get past the verifier,
3133 // this will stop the thread in a reasonable way, without crashing the JVM.
3134 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3135 InterpreterRuntime::throw_IncompatibleClassChangeError));
3136 // the call_VM checks for exception, so we should never return here.
3137 __ should_not_reach_here();
3138 return;
3139 }
3141 prepare_invoke(rax, rbx, byte_no);
3143 // rax: CallSite object (f1)
3144 // rbx: unused (f2)
3145 // rcx: receiver address
3146 // rdx: flags (unused)
3148 Register rax_callsite = rax;
3149 Register rcx_method_handle = rcx;
3151 // %%% should make a type profile for any invokedynamic that takes a ref argument
3152 // profile this call
3153 __ profile_call(rsi);
3155 __ verify_oop(rax_callsite);
3156 __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
3157 __ null_check(rcx_method_handle);
3158 __ verify_oop(rcx_method_handle);
3159 __ prepare_to_jump_from_interpreted();
3160 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
3161 }
3163 //----------------------------------------------------------------------------------------------------
3164 // Allocation
3166 void TemplateTable::_new() {
3167 transition(vtos, atos);
3168 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3169 Label slow_case;
3170 Label slow_case_no_pop;
3171 Label done;
3172 Label initialize_header;
3173 Label initialize_object; // including clearing the fields
3174 Label allocate_shared;
3176 __ get_cpool_and_tags(rcx, rax);
3178 // Make sure the class we're about to instantiate has been resolved.
3179 // This is done before loading instanceKlass to be consistent with the order
3180 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3181 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3182 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3183 __ jcc(Assembler::notEqual, slow_case_no_pop);
3185 // get instanceKlass
3186 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3187 __ push(rcx); // save the contexts of klass for initializing the header
3189 // make sure klass is initialized & doesn't have finalizer
3190 // make sure klass is fully initialized
3191 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3192 __ jcc(Assembler::notEqual, slow_case);
3194 // get instance_size in instanceKlass (scaled to a count of bytes)
3195 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3196 // test to see if it has a finalizer or is malformed in some way
3197 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3198 __ jcc(Assembler::notZero, slow_case);
3200 //
3201 // Allocate the instance
3202 // 1) Try to allocate in the TLAB
3203 // 2) if fail and the object is large allocate in the shared Eden
3204 // 3) if the above fails (or is not applicable), go to a slow case
3205 // (creates a new TLAB, etc.)
3207 const bool allow_shared_alloc =
3208 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3210 const Register thread = rcx;
3211 if (UseTLAB || allow_shared_alloc) {
3212 __ get_thread(thread);
3213 }
3215 if (UseTLAB) {
3216 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3217 __ lea(rbx, Address(rax, rdx, Address::times_1));
3218 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3219 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3220 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3221 if (ZeroTLAB) {
3222 // the fields have been already cleared
3223 __ jmp(initialize_header);
3224 } else {
3225 // initialize both the header and fields
3226 __ jmp(initialize_object);
3227 }
3228 }
3230 // Allocation in the shared Eden, if allowed.
3231 //
3232 // rdx: instance size in bytes
3233 if (allow_shared_alloc) {
3234 __ bind(allocate_shared);
3236 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3238 Label retry;
3239 __ bind(retry);
3240 __ movptr(rax, heap_top);
3241 __ lea(rbx, Address(rax, rdx, Address::times_1));
3242 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3243 __ jcc(Assembler::above, slow_case);
3245 // Compare rax, with the top addr, and if still equal, store the new
3246 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3247 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3248 //
3249 // rax,: object begin
3250 // rbx,: object end
3251 // rdx: instance size in bytes
3252 __ locked_cmpxchgptr(rbx, heap_top);
3254 // if someone beat us on the allocation, try again, otherwise continue
3255 __ jcc(Assembler::notEqual, retry);
3257 __ incr_allocated_bytes(thread, rdx, 0);
3258 }
3260 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3261 // The object is initialized before the header. If the object size is
3262 // zero, go directly to the header initialization.
3263 __ bind(initialize_object);
3264 __ decrement(rdx, sizeof(oopDesc));
3265 __ jcc(Assembler::zero, initialize_header);
3267 // Initialize topmost object field, divide rdx by 8, check if odd and
3268 // test if zero.
3269 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3270 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3272 // rdx must have been multiple of 8
3273 #ifdef ASSERT
3274 // make sure rdx was multiple of 8
3275 Label L;
3276 // Ignore partial flag stall after shrl() since it is debug VM
3277 __ jccb(Assembler::carryClear, L);
3278 __ stop("object size is not multiple of 2 - adjust this code");
3279 __ bind(L);
3280 // rdx must be > 0, no extra check needed here
3281 #endif
3283 // initialize remaining object fields: rdx was a multiple of 8
3284 { Label loop;
3285 __ bind(loop);
3286 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3287 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3288 __ decrement(rdx);
3289 __ jcc(Assembler::notZero, loop);
3290 }
3292 // initialize object header only.
3293 __ bind(initialize_header);
3294 if (UseBiasedLocking) {
3295 __ pop(rcx); // get saved klass back in the register.
3296 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3297 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3298 } else {
3299 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3300 (int32_t)markOopDesc::prototype()); // header
3301 __ pop(rcx); // get saved klass back in the register.
3302 }
3303 __ store_klass(rax, rcx); // klass
3305 {
3306 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3307 // Trigger dtrace event for fastpath
3308 __ push(atos);
3309 __ call_VM_leaf(
3310 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3311 __ pop(atos);
3312 }
3314 __ jmp(done);
3315 }
3317 // slow case
3318 __ bind(slow_case);
3319 __ pop(rcx); // restore stack pointer to what it was when we came in.
3320 __ bind(slow_case_no_pop);
3321 __ get_constant_pool(rax);
3322 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3323 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3325 // continue
3326 __ bind(done);
3327 }
3330 void TemplateTable::newarray() {
3331 transition(itos, atos);
3332 __ push_i(rax); // make sure everything is on the stack
3333 __ load_unsigned_byte(rdx, at_bcp(1));
3334 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3335 __ pop_i(rdx); // discard size
3336 }
3339 void TemplateTable::anewarray() {
3340 transition(itos, atos);
3341 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3342 __ get_constant_pool(rcx);
3343 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3344 }
3347 void TemplateTable::arraylength() {
3348 transition(atos, itos);
3349 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3350 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3351 }
3354 void TemplateTable::checkcast() {
3355 transition(atos, atos);
3356 Label done, is_null, ok_is_subtype, quicked, resolved;
3357 __ testptr(rax, rax); // Object is in EAX
3358 __ jcc(Assembler::zero, is_null);
3360 // Get cpool & tags index
3361 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3362 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3363 // See if bytecode has already been quicked
3364 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3365 __ jcc(Assembler::equal, quicked);
3367 __ push(atos);
3368 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3369 __ pop_ptr(rdx);
3370 __ jmpb(resolved);
3372 // Get superklass in EAX and subklass in EBX
3373 __ bind(quicked);
3374 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3375 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3377 __ bind(resolved);
3378 __ load_klass(rbx, rdx);
3380 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3381 // Superklass in EAX. Subklass in EBX.
3382 __ gen_subtype_check( rbx, ok_is_subtype );
3384 // Come here on failure
3385 __ push(rdx);
3386 // object is at TOS
3387 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3389 // Come here on success
3390 __ bind(ok_is_subtype);
3391 __ mov(rax,rdx); // Restore object in EDX
3393 // Collect counts on whether this check-cast sees NULLs a lot or not.
3394 if (ProfileInterpreter) {
3395 __ jmp(done);
3396 __ bind(is_null);
3397 __ profile_null_seen(rcx);
3398 } else {
3399 __ bind(is_null); // same as 'done'
3400 }
3401 __ bind(done);
3402 }
3405 void TemplateTable::instanceof() {
3406 transition(atos, itos);
3407 Label done, is_null, ok_is_subtype, quicked, resolved;
3408 __ testptr(rax, rax);
3409 __ jcc(Assembler::zero, is_null);
3411 // Get cpool & tags index
3412 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3413 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3414 // See if bytecode has already been quicked
3415 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3416 __ jcc(Assembler::equal, quicked);
3418 __ push(atos);
3419 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3420 __ pop_ptr(rdx);
3421 __ load_klass(rdx, rdx);
3422 __ jmp(resolved);
3424 // Get superklass in EAX and subklass in EDX
3425 __ bind(quicked);
3426 __ load_klass(rdx, rax);
3427 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3429 __ bind(resolved);
3431 // Generate subtype check. Blows ECX. Resets EDI.
3432 // Superklass in EAX. Subklass in EDX.
3433 __ gen_subtype_check( rdx, ok_is_subtype );
3435 // Come here on failure
3436 __ xorl(rax,rax);
3437 __ jmpb(done);
3438 // Come here on success
3439 __ bind(ok_is_subtype);
3440 __ movl(rax, 1);
3442 // Collect counts on whether this test sees NULLs a lot or not.
3443 if (ProfileInterpreter) {
3444 __ jmp(done);
3445 __ bind(is_null);
3446 __ profile_null_seen(rcx);
3447 } else {
3448 __ bind(is_null); // same as 'done'
3449 }
3450 __ bind(done);
3451 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3452 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3453 }
3456 //----------------------------------------------------------------------------------------------------
3457 // Breakpoints
3458 void TemplateTable::_breakpoint() {
3460 // Note: We get here even if we are single stepping..
3461 // jbug inists on setting breakpoints at every bytecode
3462 // even if we are in single step mode.
3464 transition(vtos, vtos);
3466 // get the unpatched byte code
3467 __ get_method(rcx);
3468 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3469 __ mov(rbx, rax);
3471 // post the breakpoint event
3472 __ get_method(rcx);
3473 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3475 // complete the execution of original bytecode
3476 __ dispatch_only_normal(vtos);
3477 }
3480 //----------------------------------------------------------------------------------------------------
3481 // Exceptions
3483 void TemplateTable::athrow() {
3484 transition(atos, vtos);
3485 __ null_check(rax);
3486 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3487 }
3490 //----------------------------------------------------------------------------------------------------
3491 // Synchronization
3492 //
3493 // Note: monitorenter & exit are symmetric routines; which is reflected
3494 // in the assembly code structure as well
3495 //
3496 // Stack layout:
3497 //
3498 // [expressions ] <--- rsp = expression stack top
3499 // ..
3500 // [expressions ]
3501 // [monitor entry] <--- monitor block top = expression stack bot
3502 // ..
3503 // [monitor entry]
3504 // [frame data ] <--- monitor block bot
3505 // ...
3506 // [saved rbp, ] <--- rbp,
3509 void TemplateTable::monitorenter() {
3510 transition(atos, vtos);
3512 // check for NULL object
3513 __ null_check(rax);
3515 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3516 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3517 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3518 Label allocated;
3520 // initialize entry pointer
3521 __ xorl(rdx, rdx); // points to free slot or NULL
3523 // find a free slot in the monitor block (result in rdx)
3524 { Label entry, loop, exit;
3525 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3527 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3528 __ jmpb(entry);
3530 __ bind(loop);
3531 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3532 __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3533 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3534 __ jccb(Assembler::equal, exit); // if same object then stop searching
3535 __ addptr(rcx, entry_size); // otherwise advance to next entry
3536 __ bind(entry);
3537 __ cmpptr(rcx, rbx); // check if bottom reached
3538 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3539 __ bind(exit);
3540 }
3542 __ testptr(rdx, rdx); // check if a slot has been found
3543 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3545 // allocate one if there's no free slot
3546 { Label entry, loop;
3547 // 1. compute new pointers // rsp: old expression stack top
3548 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3549 __ subptr(rsp, entry_size); // move expression stack top
3550 __ subptr(rdx, entry_size); // move expression stack bottom
3551 __ mov(rcx, rsp); // set start value for copy loop
3552 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3553 __ jmp(entry);
3554 // 2. move expression stack contents
3555 __ bind(loop);
3556 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3557 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3558 __ addptr(rcx, wordSize); // advance to next word
3559 __ bind(entry);
3560 __ cmpptr(rcx, rdx); // check if bottom reached
3561 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3562 }
3564 // call run-time routine
3565 // rdx: points to monitor entry
3566 __ bind(allocated);
3568 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3569 // The object has already been poped from the stack, so the expression stack looks correct.
3570 __ increment(rsi);
3572 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3573 __ lock_object(rdx);
3575 // check to make sure this monitor doesn't cause stack overflow after locking
3576 __ save_bcp(); // in case of exception
3577 __ generate_stack_overflow_check(0);
3579 // The bcp has already been incremented. Just need to dispatch to next instruction.
3580 __ dispatch_next(vtos);
3581 }
3584 void TemplateTable::monitorexit() {
3585 transition(atos, vtos);
3587 // check for NULL object
3588 __ null_check(rax);
3590 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3591 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3592 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3593 Label found;
3595 // find matching slot
3596 { Label entry, loop;
3597 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3598 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3599 __ jmpb(entry);
3601 __ bind(loop);
3602 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3603 __ jcc(Assembler::equal, found); // if same object then stop searching
3604 __ addptr(rdx, entry_size); // otherwise advance to next entry
3605 __ bind(entry);
3606 __ cmpptr(rdx, rbx); // check if bottom reached
3607 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3608 }
3610 // error handling. Unlocking was not block-structured
3611 Label end;
3612 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3613 __ should_not_reach_here();
3615 // call run-time routine
3616 // rcx: points to monitor entry
3617 __ bind(found);
3618 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3619 __ unlock_object(rdx);
3620 __ pop_ptr(rax); // discard object
3621 __ bind(end);
3622 }
3625 //----------------------------------------------------------------------------------------------------
3626 // Wide instructions
3628 void TemplateTable::wide() {
3629 transition(vtos, vtos);
3630 __ load_unsigned_byte(rbx, at_bcp(1));
3631 ExternalAddress wtable((address)Interpreter::_wentry_point);
3632 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3633 // Note: the rsi increment step is part of the individual wide bytecode implementations
3634 }
3637 //----------------------------------------------------------------------------------------------------
3638 // Multi arrays
3640 void TemplateTable::multianewarray() {
3641 transition(vtos, atos);
3642 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3643 // last dim is on top of stack; we want address of first one:
3644 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3645 // the latter wordSize to point to the beginning of the array.
3646 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3647 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3648 __ load_unsigned_byte(rbx, at_bcp(3));
3649 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3650 }
3652 #endif /* !CC_INTERP */