Thu, 27 May 2010 19:08:38 -0700
6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateTable_x86_32.cpp.incl"
28 #ifndef CC_INTERP
29 #define __ _masm->
31 //----------------------------------------------------------------------------------------------------
32 // Platform-dependent initialization
34 void TemplateTable::pd_initialize() {
35 // No i486 specific initialization
36 }
38 //----------------------------------------------------------------------------------------------------
39 // Address computation
41 // local variables
42 static inline Address iaddress(int n) {
43 return Address(rdi, Interpreter::local_offset_in_bytes(n));
44 }
46 static inline Address laddress(int n) { return iaddress(n + 1); }
47 static inline Address haddress(int n) { return iaddress(n + 0); }
48 static inline Address faddress(int n) { return iaddress(n); }
49 static inline Address daddress(int n) { return laddress(n); }
50 static inline Address aaddress(int n) { return iaddress(n); }
52 static inline Address iaddress(Register r) {
53 return Address(rdi, r, Interpreter::stackElementScale());
54 }
55 static inline Address laddress(Register r) {
56 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
57 }
58 static inline Address haddress(Register r) {
59 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
60 }
62 static inline Address faddress(Register r) { return iaddress(r); }
63 static inline Address daddress(Register r) { return laddress(r); }
64 static inline Address aaddress(Register r) { return iaddress(r); }
66 // expression stack
67 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
68 // data beyond the rsp which is potentially unsafe in an MT environment;
69 // an interrupt may overwrite that data.)
70 static inline Address at_rsp () {
71 return Address(rsp, 0);
72 }
74 // At top of Java expression stack which may be different than rsp(). It
75 // isn't for category 1 objects.
76 static inline Address at_tos () {
77 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
78 return tos;
79 }
81 static inline Address at_tos_p1() {
82 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
83 }
85 static inline Address at_tos_p2() {
86 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
87 }
89 // Condition conversion
90 static Assembler::Condition j_not(TemplateTable::Condition cc) {
91 switch (cc) {
92 case TemplateTable::equal : return Assembler::notEqual;
93 case TemplateTable::not_equal : return Assembler::equal;
94 case TemplateTable::less : return Assembler::greaterEqual;
95 case TemplateTable::less_equal : return Assembler::greater;
96 case TemplateTable::greater : return Assembler::lessEqual;
97 case TemplateTable::greater_equal: return Assembler::less;
98 }
99 ShouldNotReachHere();
100 return Assembler::zero;
101 }
104 //----------------------------------------------------------------------------------------------------
105 // Miscelaneous helper routines
107 // Store an oop (or NULL) at the address described by obj.
108 // If val == noreg this means store a NULL
110 static void do_oop_store(InterpreterMacroAssembler* _masm,
111 Address obj,
112 Register val,
113 BarrierSet::Name barrier,
114 bool precise) {
115 assert(val == noreg || val == rax, "parameter is just for looks");
116 switch (barrier) {
117 #ifndef SERIALGC
118 case BarrierSet::G1SATBCT:
119 case BarrierSet::G1SATBCTLogging:
120 {
121 // flatten object address if needed
122 // We do it regardless of precise because we need the registers
123 if (obj.index() == noreg && obj.disp() == 0) {
124 if (obj.base() != rdx) {
125 __ movl(rdx, obj.base());
126 }
127 } else {
128 __ leal(rdx, obj);
129 }
130 __ get_thread(rcx);
131 __ save_bcp();
132 __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
134 // Do the actual store
135 // noreg means NULL
136 if (val == noreg) {
137 __ movptr(Address(rdx, 0), NULL_WORD);
138 // No post barrier for NULL
139 } else {
140 __ movl(Address(rdx, 0), val);
141 __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
142 }
143 __ restore_bcp();
145 }
146 break;
147 #endif // SERIALGC
148 case BarrierSet::CardTableModRef:
149 case BarrierSet::CardTableExtension:
150 {
151 if (val == noreg) {
152 __ movptr(obj, NULL_WORD);
153 } else {
154 __ movl(obj, val);
155 // flatten object address if needed
156 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
157 __ store_check(obj.base());
158 } else {
159 __ leal(rdx, obj);
160 __ store_check(rdx);
161 }
162 }
163 }
164 break;
165 case BarrierSet::ModRef:
166 case BarrierSet::Other:
167 if (val == noreg) {
168 __ movptr(obj, NULL_WORD);
169 } else {
170 __ movl(obj, val);
171 }
172 break;
173 default :
174 ShouldNotReachHere();
176 }
177 }
179 Address TemplateTable::at_bcp(int offset) {
180 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
181 return Address(rsi, offset);
182 }
185 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
186 Register scratch,
187 bool load_bc_into_scratch/*=true*/) {
189 if (!RewriteBytecodes) return;
190 // the pair bytecodes have already done the load.
191 if (load_bc_into_scratch) {
192 __ movl(bc, bytecode);
193 }
194 Label patch_done;
195 if (JvmtiExport::can_post_breakpoint()) {
196 Label fast_patch;
197 // if a breakpoint is present we can't rewrite the stream directly
198 __ movzbl(scratch, at_bcp(0));
199 __ cmpl(scratch, Bytecodes::_breakpoint);
200 __ jcc(Assembler::notEqual, fast_patch);
201 __ get_method(scratch);
202 // Let breakpoint table handling rewrite to quicker bytecode
203 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
204 #ifndef ASSERT
205 __ jmpb(patch_done);
206 #else
207 __ jmp(patch_done);
208 #endif
209 __ bind(fast_patch);
210 }
211 #ifdef ASSERT
212 Label okay;
213 __ load_unsigned_byte(scratch, at_bcp(0));
214 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
215 __ jccb(Assembler::equal, okay);
216 __ cmpl(scratch, bc);
217 __ jcc(Assembler::equal, okay);
218 __ stop("patching the wrong bytecode");
219 __ bind(okay);
220 #endif
221 // patch bytecode
222 __ movb(at_bcp(0), bc);
223 __ bind(patch_done);
224 }
226 //----------------------------------------------------------------------------------------------------
227 // Individual instructions
229 void TemplateTable::nop() {
230 transition(vtos, vtos);
231 // nothing to do
232 }
234 void TemplateTable::shouldnotreachhere() {
235 transition(vtos, vtos);
236 __ stop("shouldnotreachhere bytecode");
237 }
241 void TemplateTable::aconst_null() {
242 transition(vtos, atos);
243 __ xorptr(rax, rax);
244 }
247 void TemplateTable::iconst(int value) {
248 transition(vtos, itos);
249 if (value == 0) {
250 __ xorptr(rax, rax);
251 } else {
252 __ movptr(rax, value);
253 }
254 }
257 void TemplateTable::lconst(int value) {
258 transition(vtos, ltos);
259 if (value == 0) {
260 __ xorptr(rax, rax);
261 } else {
262 __ movptr(rax, value);
263 }
264 assert(value >= 0, "check this code");
265 __ xorptr(rdx, rdx);
266 }
269 void TemplateTable::fconst(int value) {
270 transition(vtos, ftos);
271 if (value == 0) { __ fldz();
272 } else if (value == 1) { __ fld1();
273 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
274 } else { ShouldNotReachHere();
275 }
276 }
279 void TemplateTable::dconst(int value) {
280 transition(vtos, dtos);
281 if (value == 0) { __ fldz();
282 } else if (value == 1) { __ fld1();
283 } else { ShouldNotReachHere();
284 }
285 }
288 void TemplateTable::bipush() {
289 transition(vtos, itos);
290 __ load_signed_byte(rax, at_bcp(1));
291 }
294 void TemplateTable::sipush() {
295 transition(vtos, itos);
296 __ load_unsigned_short(rax, at_bcp(1));
297 __ bswapl(rax);
298 __ sarl(rax, 16);
299 }
301 void TemplateTable::ldc(bool wide) {
302 transition(vtos, vtos);
303 Label call_ldc, notFloat, notClass, Done;
305 if (wide) {
306 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
307 } else {
308 __ load_unsigned_byte(rbx, at_bcp(1));
309 }
310 __ get_cpool_and_tags(rcx, rax);
311 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
312 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
314 // get type
315 __ xorptr(rdx, rdx);
316 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
318 // unresolved string - get the resolved string
319 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
320 __ jccb(Assembler::equal, call_ldc);
322 // unresolved class - get the resolved class
323 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
324 __ jccb(Assembler::equal, call_ldc);
326 // unresolved class in error (resolution failed) - call into runtime
327 // so that the same error from first resolution attempt is thrown.
328 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
329 __ jccb(Assembler::equal, call_ldc);
331 // resolved class - need to call vm to get java mirror of the class
332 __ cmpl(rdx, JVM_CONSTANT_Class);
333 __ jcc(Assembler::notEqual, notClass);
335 __ bind(call_ldc);
336 __ movl(rcx, wide);
337 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
338 __ push(atos);
339 __ jmp(Done);
341 __ bind(notClass);
342 __ cmpl(rdx, JVM_CONSTANT_Float);
343 __ jccb(Assembler::notEqual, notFloat);
344 // ftos
345 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
346 __ push(ftos);
347 __ jmp(Done);
349 __ bind(notFloat);
350 #ifdef ASSERT
351 { Label L;
352 __ cmpl(rdx, JVM_CONSTANT_Integer);
353 __ jcc(Assembler::equal, L);
354 __ cmpl(rdx, JVM_CONSTANT_String);
355 __ jcc(Assembler::equal, L);
356 __ stop("unexpected tag type in ldc");
357 __ bind(L);
358 }
359 #endif
360 Label isOop;
361 // atos and itos
362 // String is only oop type we will see here
363 __ cmpl(rdx, JVM_CONSTANT_String);
364 __ jccb(Assembler::equal, isOop);
365 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
366 __ push(itos);
367 __ jmp(Done);
368 __ bind(isOop);
369 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
370 __ push(atos);
372 if (VerifyOops) {
373 __ verify_oop(rax);
374 }
375 __ bind(Done);
376 }
378 void TemplateTable::ldc2_w() {
379 transition(vtos, vtos);
380 Label Long, Done;
381 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
383 __ get_cpool_and_tags(rcx, rax);
384 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
385 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
387 // get type
388 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
389 __ jccb(Assembler::notEqual, Long);
390 // dtos
391 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
392 __ push(dtos);
393 __ jmpb(Done);
395 __ bind(Long);
396 // ltos
397 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
398 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
400 __ push(ltos);
402 __ bind(Done);
403 }
406 void TemplateTable::locals_index(Register reg, int offset) {
407 __ load_unsigned_byte(reg, at_bcp(offset));
408 __ negptr(reg);
409 }
412 void TemplateTable::iload() {
413 transition(vtos, itos);
414 if (RewriteFrequentPairs) {
415 Label rewrite, done;
417 // get next byte
418 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
419 // if _iload, wait to rewrite to iload2. We only want to rewrite the
420 // last two iloads in a pair. Comparing against fast_iload means that
421 // the next bytecode is neither an iload or a caload, and therefore
422 // an iload pair.
423 __ cmpl(rbx, Bytecodes::_iload);
424 __ jcc(Assembler::equal, done);
426 __ cmpl(rbx, Bytecodes::_fast_iload);
427 __ movl(rcx, Bytecodes::_fast_iload2);
428 __ jccb(Assembler::equal, rewrite);
430 // if _caload, rewrite to fast_icaload
431 __ cmpl(rbx, Bytecodes::_caload);
432 __ movl(rcx, Bytecodes::_fast_icaload);
433 __ jccb(Assembler::equal, rewrite);
435 // rewrite so iload doesn't check again.
436 __ movl(rcx, Bytecodes::_fast_iload);
438 // rewrite
439 // rcx: fast bytecode
440 __ bind(rewrite);
441 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
442 __ bind(done);
443 }
445 // Get the local value into tos
446 locals_index(rbx);
447 __ movl(rax, iaddress(rbx));
448 }
451 void TemplateTable::fast_iload2() {
452 transition(vtos, itos);
453 locals_index(rbx);
454 __ movl(rax, iaddress(rbx));
455 __ push(itos);
456 locals_index(rbx, 3);
457 __ movl(rax, iaddress(rbx));
458 }
460 void TemplateTable::fast_iload() {
461 transition(vtos, itos);
462 locals_index(rbx);
463 __ movl(rax, iaddress(rbx));
464 }
467 void TemplateTable::lload() {
468 transition(vtos, ltos);
469 locals_index(rbx);
470 __ movptr(rax, laddress(rbx));
471 NOT_LP64(__ movl(rdx, haddress(rbx)));
472 }
475 void TemplateTable::fload() {
476 transition(vtos, ftos);
477 locals_index(rbx);
478 __ fld_s(faddress(rbx));
479 }
482 void TemplateTable::dload() {
483 transition(vtos, dtos);
484 locals_index(rbx);
485 __ fld_d(daddress(rbx));
486 }
489 void TemplateTable::aload() {
490 transition(vtos, atos);
491 locals_index(rbx);
492 __ movptr(rax, aaddress(rbx));
493 }
496 void TemplateTable::locals_index_wide(Register reg) {
497 __ movl(reg, at_bcp(2));
498 __ bswapl(reg);
499 __ shrl(reg, 16);
500 __ negptr(reg);
501 }
504 void TemplateTable::wide_iload() {
505 transition(vtos, itos);
506 locals_index_wide(rbx);
507 __ movl(rax, iaddress(rbx));
508 }
511 void TemplateTable::wide_lload() {
512 transition(vtos, ltos);
513 locals_index_wide(rbx);
514 __ movptr(rax, laddress(rbx));
515 NOT_LP64(__ movl(rdx, haddress(rbx)));
516 }
519 void TemplateTable::wide_fload() {
520 transition(vtos, ftos);
521 locals_index_wide(rbx);
522 __ fld_s(faddress(rbx));
523 }
526 void TemplateTable::wide_dload() {
527 transition(vtos, dtos);
528 locals_index_wide(rbx);
529 __ fld_d(daddress(rbx));
530 }
533 void TemplateTable::wide_aload() {
534 transition(vtos, atos);
535 locals_index_wide(rbx);
536 __ movptr(rax, aaddress(rbx));
537 }
539 void TemplateTable::index_check(Register array, Register index) {
540 // Pop ptr into array
541 __ pop_ptr(array);
542 index_check_without_pop(array, index);
543 }
545 void TemplateTable::index_check_without_pop(Register array, Register index) {
546 // destroys rbx,
547 // check array
548 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
549 LP64_ONLY(__ movslq(index, index));
550 // check index
551 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
552 if (index != rbx) {
553 // ??? convention: move aberrant index into rbx, for exception message
554 assert(rbx != array, "different registers");
555 __ mov(rbx, index);
556 }
557 __ jump_cc(Assembler::aboveEqual,
558 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
559 }
562 void TemplateTable::iaload() {
563 transition(itos, itos);
564 // rdx: array
565 index_check(rdx, rax); // kills rbx,
566 // rax,: index
567 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
568 }
571 void TemplateTable::laload() {
572 transition(itos, ltos);
573 // rax,: index
574 // rdx: array
575 index_check(rdx, rax);
576 __ mov(rbx, rax);
577 // rbx,: index
578 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
579 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
580 }
583 void TemplateTable::faload() {
584 transition(itos, ftos);
585 // rdx: array
586 index_check(rdx, rax); // kills rbx,
587 // rax,: index
588 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
589 }
592 void TemplateTable::daload() {
593 transition(itos, dtos);
594 // rdx: array
595 index_check(rdx, rax); // kills rbx,
596 // rax,: index
597 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
598 }
601 void TemplateTable::aaload() {
602 transition(itos, atos);
603 // rdx: array
604 index_check(rdx, rax); // kills rbx,
605 // rax,: index
606 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
607 }
610 void TemplateTable::baload() {
611 transition(itos, itos);
612 // rdx: array
613 index_check(rdx, rax); // kills rbx,
614 // rax,: index
615 // can do better code for P5 - fix this at some point
616 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
617 __ mov(rax, rbx);
618 }
621 void TemplateTable::caload() {
622 transition(itos, itos);
623 // rdx: array
624 index_check(rdx, rax); // kills rbx,
625 // rax,: index
626 // can do better code for P5 - may want to improve this at some point
627 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
628 __ mov(rax, rbx);
629 }
631 // iload followed by caload frequent pair
632 void TemplateTable::fast_icaload() {
633 transition(vtos, itos);
634 // load index out of locals
635 locals_index(rbx);
636 __ movl(rax, iaddress(rbx));
638 // rdx: array
639 index_check(rdx, rax);
640 // rax,: index
641 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
642 __ mov(rax, rbx);
643 }
645 void TemplateTable::saload() {
646 transition(itos, itos);
647 // rdx: array
648 index_check(rdx, rax); // kills rbx,
649 // rax,: index
650 // can do better code for P5 - may want to improve this at some point
651 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
652 __ mov(rax, rbx);
653 }
656 void TemplateTable::iload(int n) {
657 transition(vtos, itos);
658 __ movl(rax, iaddress(n));
659 }
662 void TemplateTable::lload(int n) {
663 transition(vtos, ltos);
664 __ movptr(rax, laddress(n));
665 NOT_LP64(__ movptr(rdx, haddress(n)));
666 }
669 void TemplateTable::fload(int n) {
670 transition(vtos, ftos);
671 __ fld_s(faddress(n));
672 }
675 void TemplateTable::dload(int n) {
676 transition(vtos, dtos);
677 __ fld_d(daddress(n));
678 }
681 void TemplateTable::aload(int n) {
682 transition(vtos, atos);
683 __ movptr(rax, aaddress(n));
684 }
687 void TemplateTable::aload_0() {
688 transition(vtos, atos);
689 // According to bytecode histograms, the pairs:
690 //
691 // _aload_0, _fast_igetfield
692 // _aload_0, _fast_agetfield
693 // _aload_0, _fast_fgetfield
694 //
695 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
696 // bytecode checks if the next bytecode is either _fast_igetfield,
697 // _fast_agetfield or _fast_fgetfield and then rewrites the
698 // current bytecode into a pair bytecode; otherwise it rewrites the current
699 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
700 //
701 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
702 // otherwise we may miss an opportunity for a pair.
703 //
704 // Also rewrite frequent pairs
705 // aload_0, aload_1
706 // aload_0, iload_1
707 // These bytecodes with a small amount of code are most profitable to rewrite
708 if (RewriteFrequentPairs) {
709 Label rewrite, done;
710 // get next byte
711 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
713 // do actual aload_0
714 aload(0);
716 // if _getfield then wait with rewrite
717 __ cmpl(rbx, Bytecodes::_getfield);
718 __ jcc(Assembler::equal, done);
720 // if _igetfield then reqrite to _fast_iaccess_0
721 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
722 __ cmpl(rbx, Bytecodes::_fast_igetfield);
723 __ movl(rcx, Bytecodes::_fast_iaccess_0);
724 __ jccb(Assembler::equal, rewrite);
726 // if _agetfield then reqrite to _fast_aaccess_0
727 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
728 __ cmpl(rbx, Bytecodes::_fast_agetfield);
729 __ movl(rcx, Bytecodes::_fast_aaccess_0);
730 __ jccb(Assembler::equal, rewrite);
732 // if _fgetfield then reqrite to _fast_faccess_0
733 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
734 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
735 __ movl(rcx, Bytecodes::_fast_faccess_0);
736 __ jccb(Assembler::equal, rewrite);
738 // else rewrite to _fast_aload0
739 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
740 __ movl(rcx, Bytecodes::_fast_aload_0);
742 // rewrite
743 // rcx: fast bytecode
744 __ bind(rewrite);
745 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
747 __ bind(done);
748 } else {
749 aload(0);
750 }
751 }
753 void TemplateTable::istore() {
754 transition(itos, vtos);
755 locals_index(rbx);
756 __ movl(iaddress(rbx), rax);
757 }
760 void TemplateTable::lstore() {
761 transition(ltos, vtos);
762 locals_index(rbx);
763 __ movptr(laddress(rbx), rax);
764 NOT_LP64(__ movptr(haddress(rbx), rdx));
765 }
768 void TemplateTable::fstore() {
769 transition(ftos, vtos);
770 locals_index(rbx);
771 __ fstp_s(faddress(rbx));
772 }
775 void TemplateTable::dstore() {
776 transition(dtos, vtos);
777 locals_index(rbx);
778 __ fstp_d(daddress(rbx));
779 }
782 void TemplateTable::astore() {
783 transition(vtos, vtos);
784 __ pop_ptr(rax);
785 locals_index(rbx);
786 __ movptr(aaddress(rbx), rax);
787 }
790 void TemplateTable::wide_istore() {
791 transition(vtos, vtos);
792 __ pop_i(rax);
793 locals_index_wide(rbx);
794 __ movl(iaddress(rbx), rax);
795 }
798 void TemplateTable::wide_lstore() {
799 transition(vtos, vtos);
800 __ pop_l(rax, rdx);
801 locals_index_wide(rbx);
802 __ movptr(laddress(rbx), rax);
803 NOT_LP64(__ movl(haddress(rbx), rdx));
804 }
807 void TemplateTable::wide_fstore() {
808 wide_istore();
809 }
812 void TemplateTable::wide_dstore() {
813 wide_lstore();
814 }
817 void TemplateTable::wide_astore() {
818 transition(vtos, vtos);
819 __ pop_ptr(rax);
820 locals_index_wide(rbx);
821 __ movptr(aaddress(rbx), rax);
822 }
825 void TemplateTable::iastore() {
826 transition(itos, vtos);
827 __ pop_i(rbx);
828 // rax,: value
829 // rdx: array
830 index_check(rdx, rbx); // prefer index in rbx,
831 // rbx,: index
832 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
833 }
836 void TemplateTable::lastore() {
837 transition(ltos, vtos);
838 __ pop_i(rbx);
839 // rax,: low(value)
840 // rcx: array
841 // rdx: high(value)
842 index_check(rcx, rbx); // prefer index in rbx,
843 // rbx,: index
844 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
845 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
846 }
849 void TemplateTable::fastore() {
850 transition(ftos, vtos);
851 __ pop_i(rbx);
852 // rdx: array
853 // st0: value
854 index_check(rdx, rbx); // prefer index in rbx,
855 // rbx,: index
856 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
857 }
860 void TemplateTable::dastore() {
861 transition(dtos, vtos);
862 __ pop_i(rbx);
863 // rdx: array
864 // st0: value
865 index_check(rdx, rbx); // prefer index in rbx,
866 // rbx,: index
867 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
868 }
871 void TemplateTable::aastore() {
872 Label is_null, ok_is_subtype, done;
873 transition(vtos, vtos);
874 // stack: ..., array, index, value
875 __ movptr(rax, at_tos()); // Value
876 __ movl(rcx, at_tos_p1()); // Index
877 __ movptr(rdx, at_tos_p2()); // Array
879 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
880 index_check_without_pop(rdx, rcx); // kills rbx,
881 // do array store check - check for NULL value first
882 __ testptr(rax, rax);
883 __ jcc(Assembler::zero, is_null);
885 // Move subklass into EBX
886 __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
887 // Move superklass into EAX
888 __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
889 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
890 // Compress array+index*wordSize+12 into a single register. Frees ECX.
891 __ lea(rdx, element_address);
893 // Generate subtype check. Blows ECX. Resets EDI to locals.
894 // Superklass in EAX. Subklass in EBX.
895 __ gen_subtype_check( rbx, ok_is_subtype );
897 // Come here on failure
898 // object is at TOS
899 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
901 // Come here on success
902 __ bind(ok_is_subtype);
904 // Get the value to store
905 __ movptr(rax, at_rsp());
906 // and store it with appropriate barrier
907 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
909 __ jmp(done);
911 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
912 __ bind(is_null);
913 __ profile_null_seen(rbx);
915 // Store NULL, (noreg means NULL to do_oop_store)
916 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
918 // Pop stack arguments
919 __ bind(done);
920 __ addptr(rsp, 3 * Interpreter::stackElementSize);
921 }
924 void TemplateTable::bastore() {
925 transition(itos, vtos);
926 __ pop_i(rbx);
927 // rax,: value
928 // rdx: array
929 index_check(rdx, rbx); // prefer index in rbx,
930 // rbx,: index
931 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
932 }
935 void TemplateTable::castore() {
936 transition(itos, vtos);
937 __ pop_i(rbx);
938 // rax,: value
939 // rdx: array
940 index_check(rdx, rbx); // prefer index in rbx,
941 // rbx,: index
942 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
943 }
946 void TemplateTable::sastore() {
947 castore();
948 }
951 void TemplateTable::istore(int n) {
952 transition(itos, vtos);
953 __ movl(iaddress(n), rax);
954 }
957 void TemplateTable::lstore(int n) {
958 transition(ltos, vtos);
959 __ movptr(laddress(n), rax);
960 NOT_LP64(__ movptr(haddress(n), rdx));
961 }
964 void TemplateTable::fstore(int n) {
965 transition(ftos, vtos);
966 __ fstp_s(faddress(n));
967 }
970 void TemplateTable::dstore(int n) {
971 transition(dtos, vtos);
972 __ fstp_d(daddress(n));
973 }
976 void TemplateTable::astore(int n) {
977 transition(vtos, vtos);
978 __ pop_ptr(rax);
979 __ movptr(aaddress(n), rax);
980 }
983 void TemplateTable::pop() {
984 transition(vtos, vtos);
985 __ addptr(rsp, Interpreter::stackElementSize);
986 }
989 void TemplateTable::pop2() {
990 transition(vtos, vtos);
991 __ addptr(rsp, 2*Interpreter::stackElementSize);
992 }
995 void TemplateTable::dup() {
996 transition(vtos, vtos);
997 // stack: ..., a
998 __ load_ptr(0, rax);
999 __ push_ptr(rax);
1000 // stack: ..., a, a
1001 }
1004 void TemplateTable::dup_x1() {
1005 transition(vtos, vtos);
1006 // stack: ..., a, b
1007 __ load_ptr( 0, rax); // load b
1008 __ load_ptr( 1, rcx); // load a
1009 __ store_ptr(1, rax); // store b
1010 __ store_ptr(0, rcx); // store a
1011 __ push_ptr(rax); // push b
1012 // stack: ..., b, a, b
1013 }
1016 void TemplateTable::dup_x2() {
1017 transition(vtos, vtos);
1018 // stack: ..., a, b, c
1019 __ load_ptr( 0, rax); // load c
1020 __ load_ptr( 2, rcx); // load a
1021 __ store_ptr(2, rax); // store c in a
1022 __ push_ptr(rax); // push c
1023 // stack: ..., c, b, c, c
1024 __ load_ptr( 2, rax); // load b
1025 __ store_ptr(2, rcx); // store a in b
1026 // stack: ..., c, a, c, c
1027 __ store_ptr(1, rax); // store b in c
1028 // stack: ..., c, a, b, c
1029 }
1032 void TemplateTable::dup2() {
1033 transition(vtos, vtos);
1034 // stack: ..., a, b
1035 __ load_ptr(1, rax); // load a
1036 __ push_ptr(rax); // push a
1037 __ load_ptr(1, rax); // load b
1038 __ push_ptr(rax); // push b
1039 // stack: ..., a, b, a, b
1040 }
1043 void TemplateTable::dup2_x1() {
1044 transition(vtos, vtos);
1045 // stack: ..., a, b, c
1046 __ load_ptr( 0, rcx); // load c
1047 __ load_ptr( 1, rax); // load b
1048 __ push_ptr(rax); // push b
1049 __ push_ptr(rcx); // push c
1050 // stack: ..., a, b, c, b, c
1051 __ store_ptr(3, rcx); // store c in b
1052 // stack: ..., a, c, c, b, c
1053 __ load_ptr( 4, rcx); // load a
1054 __ store_ptr(2, rcx); // store a in 2nd c
1055 // stack: ..., a, c, a, b, c
1056 __ store_ptr(4, rax); // store b in a
1057 // stack: ..., b, c, a, b, c
1058 // stack: ..., b, c, a, b, c
1059 }
1062 void TemplateTable::dup2_x2() {
1063 transition(vtos, vtos);
1064 // stack: ..., a, b, c, d
1065 __ load_ptr( 0, rcx); // load d
1066 __ load_ptr( 1, rax); // load c
1067 __ push_ptr(rax); // push c
1068 __ push_ptr(rcx); // push d
1069 // stack: ..., a, b, c, d, c, d
1070 __ load_ptr( 4, rax); // load b
1071 __ store_ptr(2, rax); // store b in d
1072 __ store_ptr(4, rcx); // store d in b
1073 // stack: ..., a, d, c, b, c, d
1074 __ load_ptr( 5, rcx); // load a
1075 __ load_ptr( 3, rax); // load c
1076 __ store_ptr(3, rcx); // store a in c
1077 __ store_ptr(5, rax); // store c in a
1078 // stack: ..., c, d, a, b, c, d
1079 // stack: ..., c, d, a, b, c, d
1080 }
1083 void TemplateTable::swap() {
1084 transition(vtos, vtos);
1085 // stack: ..., a, b
1086 __ load_ptr( 1, rcx); // load a
1087 __ load_ptr( 0, rax); // load b
1088 __ store_ptr(0, rcx); // store a in b
1089 __ store_ptr(1, rax); // store b in a
1090 // stack: ..., b, a
1091 }
1094 void TemplateTable::iop2(Operation op) {
1095 transition(itos, itos);
1096 switch (op) {
1097 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1098 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1099 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1100 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1101 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1102 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1103 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1104 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1105 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1106 default : ShouldNotReachHere();
1107 }
1108 }
1111 void TemplateTable::lop2(Operation op) {
1112 transition(ltos, ltos);
1113 __ pop_l(rbx, rcx);
1114 switch (op) {
1115 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1116 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1117 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1118 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1119 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1120 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1121 default : ShouldNotReachHere();
1122 }
1123 }
1126 void TemplateTable::idiv() {
1127 transition(itos, itos);
1128 __ mov(rcx, rax);
1129 __ pop_i(rax);
1130 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1131 // they are not equal, one could do a normal division (no correction
1132 // needed), which may speed up this implementation for the common case.
1133 // (see also JVM spec., p.243 & p.271)
1134 __ corrected_idivl(rcx);
1135 }
1138 void TemplateTable::irem() {
1139 transition(itos, itos);
1140 __ mov(rcx, rax);
1141 __ pop_i(rax);
1142 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1143 // they are not equal, one could do a normal division (no correction
1144 // needed), which may speed up this implementation for the common case.
1145 // (see also JVM spec., p.243 & p.271)
1146 __ corrected_idivl(rcx);
1147 __ mov(rax, rdx);
1148 }
1151 void TemplateTable::lmul() {
1152 transition(ltos, ltos);
1153 __ pop_l(rbx, rcx);
1154 __ push(rcx); __ push(rbx);
1155 __ push(rdx); __ push(rax);
1156 __ lmul(2 * wordSize, 0);
1157 __ addptr(rsp, 4 * wordSize); // take off temporaries
1158 }
1161 void TemplateTable::ldiv() {
1162 transition(ltos, ltos);
1163 __ pop_l(rbx, rcx);
1164 __ push(rcx); __ push(rbx);
1165 __ push(rdx); __ push(rax);
1166 // check if y = 0
1167 __ orl(rax, rdx);
1168 __ jump_cc(Assembler::zero,
1169 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1170 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1171 __ addptr(rsp, 4 * wordSize); // take off temporaries
1172 }
1175 void TemplateTable::lrem() {
1176 transition(ltos, ltos);
1177 __ pop_l(rbx, rcx);
1178 __ push(rcx); __ push(rbx);
1179 __ push(rdx); __ push(rax);
1180 // check if y = 0
1181 __ orl(rax, rdx);
1182 __ jump_cc(Assembler::zero,
1183 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1184 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1185 __ addptr(rsp, 4 * wordSize);
1186 }
1189 void TemplateTable::lshl() {
1190 transition(itos, ltos);
1191 __ movl(rcx, rax); // get shift count
1192 __ pop_l(rax, rdx); // get shift value
1193 __ lshl(rdx, rax);
1194 }
1197 void TemplateTable::lshr() {
1198 transition(itos, ltos);
1199 __ mov(rcx, rax); // get shift count
1200 __ pop_l(rax, rdx); // get shift value
1201 __ lshr(rdx, rax, true);
1202 }
1205 void TemplateTable::lushr() {
1206 transition(itos, ltos);
1207 __ mov(rcx, rax); // get shift count
1208 __ pop_l(rax, rdx); // get shift value
1209 __ lshr(rdx, rax);
1210 }
1213 void TemplateTable::fop2(Operation op) {
1214 transition(ftos, ftos);
1215 switch (op) {
1216 case add: __ fadd_s (at_rsp()); break;
1217 case sub: __ fsubr_s(at_rsp()); break;
1218 case mul: __ fmul_s (at_rsp()); break;
1219 case div: __ fdivr_s(at_rsp()); break;
1220 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1221 default : ShouldNotReachHere();
1222 }
1223 __ f2ieee();
1224 __ pop(rax); // pop float thing off
1225 }
1228 void TemplateTable::dop2(Operation op) {
1229 transition(dtos, dtos);
1231 switch (op) {
1232 case add: __ fadd_d (at_rsp()); break;
1233 case sub: __ fsubr_d(at_rsp()); break;
1234 case mul: {
1235 Label L_strict;
1236 Label L_join;
1237 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1238 __ get_method(rcx);
1239 __ movl(rcx, access_flags);
1240 __ testl(rcx, JVM_ACC_STRICT);
1241 __ jccb(Assembler::notZero, L_strict);
1242 __ fmul_d (at_rsp());
1243 __ jmpb(L_join);
1244 __ bind(L_strict);
1245 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1246 __ fmulp();
1247 __ fmul_d (at_rsp());
1248 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1249 __ fmulp();
1250 __ bind(L_join);
1251 break;
1252 }
1253 case div: {
1254 Label L_strict;
1255 Label L_join;
1256 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1257 __ get_method(rcx);
1258 __ movl(rcx, access_flags);
1259 __ testl(rcx, JVM_ACC_STRICT);
1260 __ jccb(Assembler::notZero, L_strict);
1261 __ fdivr_d(at_rsp());
1262 __ jmp(L_join);
1263 __ bind(L_strict);
1264 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1265 __ fmul_d (at_rsp());
1266 __ fdivrp();
1267 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1268 __ fmulp();
1269 __ bind(L_join);
1270 break;
1271 }
1272 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1273 default : ShouldNotReachHere();
1274 }
1275 __ d2ieee();
1276 // Pop double precision number from rsp.
1277 __ pop(rax);
1278 __ pop(rdx);
1279 }
1282 void TemplateTable::ineg() {
1283 transition(itos, itos);
1284 __ negl(rax);
1285 }
1288 void TemplateTable::lneg() {
1289 transition(ltos, ltos);
1290 __ lneg(rdx, rax);
1291 }
1294 void TemplateTable::fneg() {
1295 transition(ftos, ftos);
1296 __ fchs();
1297 }
1300 void TemplateTable::dneg() {
1301 transition(dtos, dtos);
1302 __ fchs();
1303 }
1306 void TemplateTable::iinc() {
1307 transition(vtos, vtos);
1308 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1309 locals_index(rbx);
1310 __ addl(iaddress(rbx), rdx);
1311 }
1314 void TemplateTable::wide_iinc() {
1315 transition(vtos, vtos);
1316 __ movl(rdx, at_bcp(4)); // get constant
1317 locals_index_wide(rbx);
1318 __ bswapl(rdx); // swap bytes & sign-extend constant
1319 __ sarl(rdx, 16);
1320 __ addl(iaddress(rbx), rdx);
1321 // Note: should probably use only one movl to get both
1322 // the index and the constant -> fix this
1323 }
1326 void TemplateTable::convert() {
1327 // Checking
1328 #ifdef ASSERT
1329 { TosState tos_in = ilgl;
1330 TosState tos_out = ilgl;
1331 switch (bytecode()) {
1332 case Bytecodes::_i2l: // fall through
1333 case Bytecodes::_i2f: // fall through
1334 case Bytecodes::_i2d: // fall through
1335 case Bytecodes::_i2b: // fall through
1336 case Bytecodes::_i2c: // fall through
1337 case Bytecodes::_i2s: tos_in = itos; break;
1338 case Bytecodes::_l2i: // fall through
1339 case Bytecodes::_l2f: // fall through
1340 case Bytecodes::_l2d: tos_in = ltos; break;
1341 case Bytecodes::_f2i: // fall through
1342 case Bytecodes::_f2l: // fall through
1343 case Bytecodes::_f2d: tos_in = ftos; break;
1344 case Bytecodes::_d2i: // fall through
1345 case Bytecodes::_d2l: // fall through
1346 case Bytecodes::_d2f: tos_in = dtos; break;
1347 default : ShouldNotReachHere();
1348 }
1349 switch (bytecode()) {
1350 case Bytecodes::_l2i: // fall through
1351 case Bytecodes::_f2i: // fall through
1352 case Bytecodes::_d2i: // fall through
1353 case Bytecodes::_i2b: // fall through
1354 case Bytecodes::_i2c: // fall through
1355 case Bytecodes::_i2s: tos_out = itos; break;
1356 case Bytecodes::_i2l: // fall through
1357 case Bytecodes::_f2l: // fall through
1358 case Bytecodes::_d2l: tos_out = ltos; break;
1359 case Bytecodes::_i2f: // fall through
1360 case Bytecodes::_l2f: // fall through
1361 case Bytecodes::_d2f: tos_out = ftos; break;
1362 case Bytecodes::_i2d: // fall through
1363 case Bytecodes::_l2d: // fall through
1364 case Bytecodes::_f2d: tos_out = dtos; break;
1365 default : ShouldNotReachHere();
1366 }
1367 transition(tos_in, tos_out);
1368 }
1369 #endif // ASSERT
1371 // Conversion
1372 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1373 switch (bytecode()) {
1374 case Bytecodes::_i2l:
1375 __ extend_sign(rdx, rax);
1376 break;
1377 case Bytecodes::_i2f:
1378 __ push(rax); // store int on tos
1379 __ fild_s(at_rsp()); // load int to ST0
1380 __ f2ieee(); // truncate to float size
1381 __ pop(rcx); // adjust rsp
1382 break;
1383 case Bytecodes::_i2d:
1384 __ push(rax); // add one slot for d2ieee()
1385 __ push(rax); // store int on tos
1386 __ fild_s(at_rsp()); // load int to ST0
1387 __ d2ieee(); // truncate to double size
1388 __ pop(rcx); // adjust rsp
1389 __ pop(rcx);
1390 break;
1391 case Bytecodes::_i2b:
1392 __ shll(rax, 24); // truncate upper 24 bits
1393 __ sarl(rax, 24); // and sign-extend byte
1394 LP64_ONLY(__ movsbl(rax, rax));
1395 break;
1396 case Bytecodes::_i2c:
1397 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1398 LP64_ONLY(__ movzwl(rax, rax));
1399 break;
1400 case Bytecodes::_i2s:
1401 __ shll(rax, 16); // truncate upper 16 bits
1402 __ sarl(rax, 16); // and sign-extend short
1403 LP64_ONLY(__ movswl(rax, rax));
1404 break;
1405 case Bytecodes::_l2i:
1406 /* nothing to do */
1407 break;
1408 case Bytecodes::_l2f:
1409 __ push(rdx); // store long on tos
1410 __ push(rax);
1411 __ fild_d(at_rsp()); // load long to ST0
1412 __ f2ieee(); // truncate to float size
1413 __ pop(rcx); // adjust rsp
1414 __ pop(rcx);
1415 break;
1416 case Bytecodes::_l2d:
1417 __ push(rdx); // store long on tos
1418 __ push(rax);
1419 __ fild_d(at_rsp()); // load long to ST0
1420 __ d2ieee(); // truncate to double size
1421 __ pop(rcx); // adjust rsp
1422 __ pop(rcx);
1423 break;
1424 case Bytecodes::_f2i:
1425 __ push(rcx); // reserve space for argument
1426 __ fstp_s(at_rsp()); // pass float argument on stack
1427 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1428 break;
1429 case Bytecodes::_f2l:
1430 __ push(rcx); // reserve space for argument
1431 __ fstp_s(at_rsp()); // pass float argument on stack
1432 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1433 break;
1434 case Bytecodes::_f2d:
1435 /* nothing to do */
1436 break;
1437 case Bytecodes::_d2i:
1438 __ push(rcx); // reserve space for argument
1439 __ push(rcx);
1440 __ fstp_d(at_rsp()); // pass double argument on stack
1441 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1442 break;
1443 case Bytecodes::_d2l:
1444 __ push(rcx); // reserve space for argument
1445 __ push(rcx);
1446 __ fstp_d(at_rsp()); // pass double argument on stack
1447 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1448 break;
1449 case Bytecodes::_d2f:
1450 __ push(rcx); // reserve space for f2ieee()
1451 __ f2ieee(); // truncate to float size
1452 __ pop(rcx); // adjust rsp
1453 break;
1454 default :
1455 ShouldNotReachHere();
1456 }
1457 }
1460 void TemplateTable::lcmp() {
1461 transition(ltos, itos);
1462 // y = rdx:rax
1463 __ pop_l(rbx, rcx); // get x = rcx:rbx
1464 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1465 __ mov(rax, rcx);
1466 }
1469 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1470 if (is_float) {
1471 __ fld_s(at_rsp());
1472 } else {
1473 __ fld_d(at_rsp());
1474 __ pop(rdx);
1475 }
1476 __ pop(rcx);
1477 __ fcmp2int(rax, unordered_result < 0);
1478 }
1481 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1482 __ get_method(rcx); // ECX holds method
1483 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1485 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1486 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1487 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1489 // Load up EDX with the branch displacement
1490 __ movl(rdx, at_bcp(1));
1491 __ bswapl(rdx);
1492 if (!is_wide) __ sarl(rdx, 16);
1493 LP64_ONLY(__ movslq(rdx, rdx));
1496 // Handle all the JSR stuff here, then exit.
1497 // It's much shorter and cleaner than intermingling with the
1498 // non-JSR normal-branch stuff occurring below.
1499 if (is_jsr) {
1500 // Pre-load the next target bytecode into EBX
1501 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1503 // compute return address as bci in rax,
1504 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1505 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1506 // Adjust the bcp in RSI by the displacement in EDX
1507 __ addptr(rsi, rdx);
1508 // Push return address
1509 __ push_i(rax);
1510 // jsr returns vtos
1511 __ dispatch_only_noverify(vtos);
1512 return;
1513 }
1515 // Normal (non-jsr) branch handling
1517 // Adjust the bcp in RSI by the displacement in EDX
1518 __ addptr(rsi, rdx);
1520 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1521 Label backedge_counter_overflow;
1522 Label profile_method;
1523 Label dispatch;
1524 if (UseLoopCounter) {
1525 // increment backedge counter for backward branches
1526 // rax,: MDO
1527 // rbx,: MDO bumped taken-count
1528 // rcx: method
1529 // rdx: target offset
1530 // rsi: target bcp
1531 // rdi: locals pointer
1532 __ testl(rdx, rdx); // check if forward or backward branch
1533 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1535 // increment counter
1536 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1537 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1538 __ movl(Address(rcx, be_offset), rax); // store counter
1540 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1541 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1542 __ addl(rax, Address(rcx, be_offset)); // add both counters
1544 if (ProfileInterpreter) {
1545 // Test to see if we should create a method data oop
1546 __ cmp32(rax,
1547 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1548 __ jcc(Assembler::less, dispatch);
1550 // if no method data exists, go to profile method
1551 __ test_method_data_pointer(rax, profile_method);
1553 if (UseOnStackReplacement) {
1554 // check for overflow against rbx, which is the MDO taken count
1555 __ cmp32(rbx,
1556 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1557 __ jcc(Assembler::below, dispatch);
1559 // When ProfileInterpreter is on, the backedge_count comes from the
1560 // methodDataOop, which value does not get reset on the call to
1561 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1562 // routine while the method is being compiled, add a second test to make
1563 // sure the overflow function is called only once every overflow_frequency.
1564 const int overflow_frequency = 1024;
1565 __ andptr(rbx, overflow_frequency-1);
1566 __ jcc(Assembler::zero, backedge_counter_overflow);
1568 }
1569 } else {
1570 if (UseOnStackReplacement) {
1571 // check for overflow against rax, which is the sum of the counters
1572 __ cmp32(rax,
1573 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1574 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1576 }
1577 }
1578 __ bind(dispatch);
1579 }
1581 // Pre-load the next target bytecode into EBX
1582 __ load_unsigned_byte(rbx, Address(rsi, 0));
1584 // continue with the bytecode @ target
1585 // rax,: return bci for jsr's, unused otherwise
1586 // rbx,: target bytecode
1587 // rsi: target bcp
1588 __ dispatch_only(vtos);
1590 if (UseLoopCounter) {
1591 if (ProfileInterpreter) {
1592 // Out-of-line code to allocate method data oop.
1593 __ bind(profile_method);
1594 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
1595 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1596 __ movptr(rcx, Address(rbp, method_offset));
1597 __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1598 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1599 __ test_method_data_pointer(rcx, dispatch);
1600 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
1601 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
1602 __ addptr(rcx, rax);
1603 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1604 __ jmp(dispatch);
1605 }
1607 if (UseOnStackReplacement) {
1609 // invocation counter overflow
1610 __ bind(backedge_counter_overflow);
1611 __ negptr(rdx);
1612 __ addptr(rdx, rsi); // branch bcp
1613 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1614 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1616 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1617 // rbx,: target bytecode
1618 // rdx: scratch
1619 // rdi: locals pointer
1620 // rsi: bcp
1621 __ testptr(rax, rax); // test result
1622 __ jcc(Assembler::zero, dispatch); // no osr if null
1623 // nmethod may have been invalidated (VM may block upon call_VM return)
1624 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1625 __ cmpl(rcx, InvalidOSREntryBci);
1626 __ jcc(Assembler::equal, dispatch);
1628 // We have the address of an on stack replacement routine in rax,
1629 // We need to prepare to execute the OSR method. First we must
1630 // migrate the locals and monitors off of the stack.
1632 __ mov(rbx, rax); // save the nmethod
1634 const Register thread = rcx;
1635 __ get_thread(thread);
1636 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1637 // rax, is OSR buffer, move it to expected parameter location
1638 __ mov(rcx, rax);
1640 // pop the interpreter frame
1641 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1642 __ leave(); // remove frame anchor
1643 __ pop(rdi); // get return address
1644 __ mov(rsp, rdx); // set sp to sender sp
1647 Label skip;
1648 Label chkint;
1650 // The interpreter frame we have removed may be returning to
1651 // either the callstub or the interpreter. Since we will
1652 // now be returning from a compiled (OSR) nmethod we must
1653 // adjust the return to the return were it can handler compiled
1654 // results and clean the fpu stack. This is very similar to
1655 // what a i2c adapter must do.
1657 // Are we returning to the call stub?
1659 __ cmp32(rdi, ExternalAddress(StubRoutines::_call_stub_return_address));
1660 __ jcc(Assembler::notEqual, chkint);
1662 // yes adjust to the specialized call stub return.
1663 assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
1664 __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
1665 __ jmp(skip);
1667 __ bind(chkint);
1669 // Are we returning to the interpreter? Look for sentinel
1671 __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
1672 __ jcc(Assembler::notEqual, skip);
1674 // Adjust to compiled return back to interpreter
1676 __ movptr(rdi, Address(rdi, -wordSize));
1677 __ bind(skip);
1679 // Align stack pointer for compiled code (note that caller is
1680 // responsible for undoing this fixup by remembering the old SP
1681 // in an rbp,-relative location)
1682 __ andptr(rsp, -(StackAlignmentInBytes));
1684 // push the (possibly adjusted) return address
1685 __ push(rdi);
1687 // and begin the OSR nmethod
1688 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1689 }
1690 }
1691 }
1694 void TemplateTable::if_0cmp(Condition cc) {
1695 transition(itos, vtos);
1696 // assume branch is more often taken than not (loops use backward branches)
1697 Label not_taken;
1698 __ testl(rax, rax);
1699 __ jcc(j_not(cc), not_taken);
1700 branch(false, false);
1701 __ bind(not_taken);
1702 __ profile_not_taken_branch(rax);
1703 }
1706 void TemplateTable::if_icmp(Condition cc) {
1707 transition(itos, vtos);
1708 // assume branch is more often taken than not (loops use backward branches)
1709 Label not_taken;
1710 __ pop_i(rdx);
1711 __ cmpl(rdx, rax);
1712 __ jcc(j_not(cc), not_taken);
1713 branch(false, false);
1714 __ bind(not_taken);
1715 __ profile_not_taken_branch(rax);
1716 }
1719 void TemplateTable::if_nullcmp(Condition cc) {
1720 transition(atos, vtos);
1721 // assume branch is more often taken than not (loops use backward branches)
1722 Label not_taken;
1723 __ testptr(rax, rax);
1724 __ jcc(j_not(cc), not_taken);
1725 branch(false, false);
1726 __ bind(not_taken);
1727 __ profile_not_taken_branch(rax);
1728 }
1731 void TemplateTable::if_acmp(Condition cc) {
1732 transition(atos, vtos);
1733 // assume branch is more often taken than not (loops use backward branches)
1734 Label not_taken;
1735 __ pop_ptr(rdx);
1736 __ cmpptr(rdx, rax);
1737 __ jcc(j_not(cc), not_taken);
1738 branch(false, false);
1739 __ bind(not_taken);
1740 __ profile_not_taken_branch(rax);
1741 }
1744 void TemplateTable::ret() {
1745 transition(vtos, vtos);
1746 locals_index(rbx);
1747 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1748 __ profile_ret(rbx, rcx);
1749 __ get_method(rax);
1750 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1751 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1752 constMethodOopDesc::codes_offset()));
1753 __ dispatch_next(vtos);
1754 }
1757 void TemplateTable::wide_ret() {
1758 transition(vtos, vtos);
1759 locals_index_wide(rbx);
1760 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1761 __ profile_ret(rbx, rcx);
1762 __ get_method(rax);
1763 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1764 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1765 __ dispatch_next(vtos);
1766 }
1769 void TemplateTable::tableswitch() {
1770 Label default_case, continue_execution;
1771 transition(itos, vtos);
1772 // align rsi
1773 __ lea(rbx, at_bcp(wordSize));
1774 __ andptr(rbx, -wordSize);
1775 // load lo & hi
1776 __ movl(rcx, Address(rbx, 1 * wordSize));
1777 __ movl(rdx, Address(rbx, 2 * wordSize));
1778 __ bswapl(rcx);
1779 __ bswapl(rdx);
1780 // check against lo & hi
1781 __ cmpl(rax, rcx);
1782 __ jccb(Assembler::less, default_case);
1783 __ cmpl(rax, rdx);
1784 __ jccb(Assembler::greater, default_case);
1785 // lookup dispatch offset
1786 __ subl(rax, rcx);
1787 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1788 __ profile_switch_case(rax, rbx, rcx);
1789 // continue execution
1790 __ bind(continue_execution);
1791 __ bswapl(rdx);
1792 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1793 __ addptr(rsi, rdx);
1794 __ dispatch_only(vtos);
1795 // handle default
1796 __ bind(default_case);
1797 __ profile_switch_default(rax);
1798 __ movl(rdx, Address(rbx, 0));
1799 __ jmp(continue_execution);
1800 }
1803 void TemplateTable::lookupswitch() {
1804 transition(itos, itos);
1805 __ stop("lookupswitch bytecode should have been rewritten");
1806 }
1809 void TemplateTable::fast_linearswitch() {
1810 transition(itos, vtos);
1811 Label loop_entry, loop, found, continue_execution;
1812 // bswapl rax, so we can avoid bswapping the table entries
1813 __ bswapl(rax);
1814 // align rsi
1815 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1816 __ andptr(rbx, -wordSize);
1817 // set counter
1818 __ movl(rcx, Address(rbx, wordSize));
1819 __ bswapl(rcx);
1820 __ jmpb(loop_entry);
1821 // table search
1822 __ bind(loop);
1823 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1824 __ jccb(Assembler::equal, found);
1825 __ bind(loop_entry);
1826 __ decrementl(rcx);
1827 __ jcc(Assembler::greaterEqual, loop);
1828 // default case
1829 __ profile_switch_default(rax);
1830 __ movl(rdx, Address(rbx, 0));
1831 __ jmpb(continue_execution);
1832 // entry found -> get offset
1833 __ bind(found);
1834 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1835 __ profile_switch_case(rcx, rax, rbx);
1836 // continue execution
1837 __ bind(continue_execution);
1838 __ bswapl(rdx);
1839 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1840 __ addptr(rsi, rdx);
1841 __ dispatch_only(vtos);
1842 }
1845 void TemplateTable::fast_binaryswitch() {
1846 transition(itos, vtos);
1847 // Implementation using the following core algorithm:
1848 //
1849 // int binary_search(int key, LookupswitchPair* array, int n) {
1850 // // Binary search according to "Methodik des Programmierens" by
1851 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1852 // int i = 0;
1853 // int j = n;
1854 // while (i+1 < j) {
1855 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1856 // // with Q: for all i: 0 <= i < n: key < a[i]
1857 // // where a stands for the array and assuming that the (inexisting)
1858 // // element a[n] is infinitely big.
1859 // int h = (i + j) >> 1;
1860 // // i < h < j
1861 // if (key < array[h].fast_match()) {
1862 // j = h;
1863 // } else {
1864 // i = h;
1865 // }
1866 // }
1867 // // R: a[i] <= key < a[i+1] or Q
1868 // // (i.e., if key is within array, i is the correct index)
1869 // return i;
1870 // }
1872 // register allocation
1873 const Register key = rax; // already set (tosca)
1874 const Register array = rbx;
1875 const Register i = rcx;
1876 const Register j = rdx;
1877 const Register h = rdi; // needs to be restored
1878 const Register temp = rsi;
1879 // setup array
1880 __ save_bcp();
1882 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1883 __ andptr(array, -wordSize);
1884 // initialize i & j
1885 __ xorl(i, i); // i = 0;
1886 __ movl(j, Address(array, -wordSize)); // j = length(array);
1887 // Convert j into native byteordering
1888 __ bswapl(j);
1889 // and start
1890 Label entry;
1891 __ jmp(entry);
1893 // binary search loop
1894 { Label loop;
1895 __ bind(loop);
1896 // int h = (i + j) >> 1;
1897 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1898 __ sarl(h, 1); // h = (i + j) >> 1;
1899 // if (key < array[h].fast_match()) {
1900 // j = h;
1901 // } else {
1902 // i = h;
1903 // }
1904 // Convert array[h].match to native byte-ordering before compare
1905 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1906 __ bswapl(temp);
1907 __ cmpl(key, temp);
1908 if (VM_Version::supports_cmov()) {
1909 __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
1910 __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
1911 } else {
1912 Label set_i, end_of_if;
1913 __ jccb(Assembler::greaterEqual, set_i); // {
1914 __ mov(j, h); // j = h;
1915 __ jmp(end_of_if); // }
1916 __ bind(set_i); // else {
1917 __ mov(i, h); // i = h;
1918 __ bind(end_of_if); // }
1919 }
1920 // while (i+1 < j)
1921 __ bind(entry);
1922 __ leal(h, Address(i, 1)); // i+1
1923 __ cmpl(h, j); // i+1 < j
1924 __ jcc(Assembler::less, loop);
1925 }
1927 // end of binary search, result index is i (must check again!)
1928 Label default_case;
1929 // Convert array[i].match to native byte-ordering before compare
1930 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1931 __ bswapl(temp);
1932 __ cmpl(key, temp);
1933 __ jcc(Assembler::notEqual, default_case);
1935 // entry found -> j = offset
1936 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1937 __ profile_switch_case(i, key, array);
1938 __ bswapl(j);
1939 LP64_ONLY(__ movslq(j, j));
1940 __ restore_bcp();
1941 __ restore_locals(); // restore rdi
1942 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1944 __ addptr(rsi, j);
1945 __ dispatch_only(vtos);
1947 // default case -> j = default offset
1948 __ bind(default_case);
1949 __ profile_switch_default(i);
1950 __ movl(j, Address(array, -2*wordSize));
1951 __ bswapl(j);
1952 LP64_ONLY(__ movslq(j, j));
1953 __ restore_bcp();
1954 __ restore_locals(); // restore rdi
1955 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1956 __ addptr(rsi, j);
1957 __ dispatch_only(vtos);
1958 }
1961 void TemplateTable::_return(TosState state) {
1962 transition(state, state);
1963 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
1965 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1966 assert(state == vtos, "only valid state");
1967 __ movptr(rax, aaddress(0));
1968 __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
1969 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1970 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
1971 Label skip_register_finalizer;
1972 __ jcc(Assembler::zero, skip_register_finalizer);
1974 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
1976 __ bind(skip_register_finalizer);
1977 }
1979 __ remove_activation(state, rsi);
1980 __ jmp(rsi);
1981 }
1984 // ----------------------------------------------------------------------------
1985 // Volatile variables demand their effects be made known to all CPU's in
1986 // order. Store buffers on most chips allow reads & writes to reorder; the
1987 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1988 // memory barrier (i.e., it's not sufficient that the interpreter does not
1989 // reorder volatile references, the hardware also must not reorder them).
1990 //
1991 // According to the new Java Memory Model (JMM):
1992 // (1) All volatiles are serialized wrt to each other.
1993 // ALSO reads & writes act as aquire & release, so:
1994 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1995 // the read float up to before the read. It's OK for non-volatile memory refs
1996 // that happen before the volatile read to float down below it.
1997 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1998 // that happen BEFORE the write float down to after the write. It's OK for
1999 // non-volatile memory refs that happen after the volatile write to float up
2000 // before it.
2001 //
2002 // We only put in barriers around volatile refs (they are expensive), not
2003 // _between_ memory refs (that would require us to track the flavor of the
2004 // previous memory refs). Requirements (2) and (3) require some barriers
2005 // before volatile stores and after volatile loads. These nearly cover
2006 // requirement (1) but miss the volatile-store-volatile-load case. This final
2007 // case is placed after volatile-stores although it could just as well go
2008 // before volatile-loads.
2009 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2010 // Helper function to insert a is-volatile test and memory barrier
2011 if( !os::is_MP() ) return; // Not needed on single CPU
2012 __ membar(order_constraint);
2013 }
2015 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
2016 assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
2017 bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
2019 Register temp = rbx;
2021 assert_different_registers(Rcache, index, temp);
2023 const int shift_count = (1 + byte_no)*BitsPerByte;
2024 Label resolved;
2025 __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
2026 if (is_invokedynamic) {
2027 // we are resolved if the f1 field contains a non-null CallSite object
2028 __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
2029 __ jcc(Assembler::notEqual, resolved);
2030 } else {
2031 __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2032 __ shrl(temp, shift_count);
2033 // have we resolved this bytecode?
2034 __ andl(temp, 0xFF);
2035 __ cmpl(temp, (int)bytecode());
2036 __ jcc(Assembler::equal, resolved);
2037 }
2039 // resolve first time through
2040 address entry;
2041 switch (bytecode()) {
2042 case Bytecodes::_getstatic : // fall through
2043 case Bytecodes::_putstatic : // fall through
2044 case Bytecodes::_getfield : // fall through
2045 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2046 case Bytecodes::_invokevirtual : // fall through
2047 case Bytecodes::_invokespecial : // fall through
2048 case Bytecodes::_invokestatic : // fall through
2049 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2050 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2051 default : ShouldNotReachHere(); break;
2052 }
2053 __ movl(temp, (int)bytecode());
2054 __ call_VM(noreg, entry, temp);
2055 // Update registers with resolved info
2056 __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
2057 __ bind(resolved);
2058 }
2061 // The cache and index registers must be set before call
2062 void TemplateTable::load_field_cp_cache_entry(Register obj,
2063 Register cache,
2064 Register index,
2065 Register off,
2066 Register flags,
2067 bool is_static = false) {
2068 assert_different_registers(cache, index, flags, off);
2070 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2071 // Field offset
2072 __ movptr(off, Address(cache, index, Address::times_ptr,
2073 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2074 // Flags
2075 __ movl(flags, Address(cache, index, Address::times_ptr,
2076 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2078 // klass overwrite register
2079 if (is_static) {
2080 __ movptr(obj, Address(cache, index, Address::times_ptr,
2081 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2082 }
2083 }
2085 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2086 Register method,
2087 Register itable_index,
2088 Register flags,
2089 bool is_invokevirtual,
2090 bool is_invokevfinal /*unused*/) {
2091 // setup registers
2092 const Register cache = rcx;
2093 const Register index = rdx;
2094 assert_different_registers(method, flags);
2095 assert_different_registers(method, cache, index);
2096 assert_different_registers(itable_index, flags);
2097 assert_different_registers(itable_index, cache, index);
2098 // determine constant pool cache field offsets
2099 const int method_offset = in_bytes(
2100 constantPoolCacheOopDesc::base_offset() +
2101 (is_invokevirtual
2102 ? ConstantPoolCacheEntry::f2_offset()
2103 : ConstantPoolCacheEntry::f1_offset()
2104 )
2105 );
2106 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2107 ConstantPoolCacheEntry::flags_offset());
2108 // access constant pool cache fields
2109 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2110 ConstantPoolCacheEntry::f2_offset());
2112 resolve_cache_and_index(byte_no, cache, index);
2114 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2115 if (itable_index != noreg) {
2116 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2117 }
2118 __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
2119 }
2122 // The registers cache and index expected to be set before call.
2123 // Correct values of the cache and index registers are preserved.
2124 void TemplateTable::jvmti_post_field_access(Register cache,
2125 Register index,
2126 bool is_static,
2127 bool has_tos) {
2128 if (JvmtiExport::can_post_field_access()) {
2129 // Check to see if a field access watch has been set before we take
2130 // the time to call into the VM.
2131 Label L1;
2132 assert_different_registers(cache, index, rax);
2133 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2134 __ testl(rax,rax);
2135 __ jcc(Assembler::zero, L1);
2137 // cache entry pointer
2138 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2139 __ shll(index, LogBytesPerWord);
2140 __ addptr(cache, index);
2141 if (is_static) {
2142 __ xorptr(rax, rax); // NULL object reference
2143 } else {
2144 __ pop(atos); // Get the object
2145 __ verify_oop(rax);
2146 __ push(atos); // Restore stack state
2147 }
2148 // rax,: object pointer or NULL
2149 // cache: cache entry pointer
2150 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2151 rax, cache);
2152 __ get_cache_and_index_at_bcp(cache, index, 1);
2153 __ bind(L1);
2154 }
2155 }
2157 void TemplateTable::pop_and_check_object(Register r) {
2158 __ pop_ptr(r);
2159 __ null_check(r); // for field access must check obj.
2160 __ verify_oop(r);
2161 }
2163 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2164 transition(vtos, vtos);
2166 const Register cache = rcx;
2167 const Register index = rdx;
2168 const Register obj = rcx;
2169 const Register off = rbx;
2170 const Register flags = rax;
2172 resolve_cache_and_index(byte_no, cache, index);
2173 jvmti_post_field_access(cache, index, is_static, false);
2174 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2176 if (!is_static) pop_and_check_object(obj);
2178 const Address lo(obj, off, Address::times_1, 0*wordSize);
2179 const Address hi(obj, off, Address::times_1, 1*wordSize);
2181 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2183 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2184 assert(btos == 0, "change code, btos != 0");
2185 // btos
2186 __ andptr(flags, 0x0f);
2187 __ jcc(Assembler::notZero, notByte);
2189 __ load_signed_byte(rax, lo );
2190 __ push(btos);
2191 // Rewrite bytecode to be faster
2192 if (!is_static) {
2193 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2194 }
2195 __ jmp(Done);
2197 __ bind(notByte);
2198 // itos
2199 __ cmpl(flags, itos );
2200 __ jcc(Assembler::notEqual, notInt);
2202 __ movl(rax, lo );
2203 __ push(itos);
2204 // Rewrite bytecode to be faster
2205 if (!is_static) {
2206 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2207 }
2208 __ jmp(Done);
2210 __ bind(notInt);
2211 // atos
2212 __ cmpl(flags, atos );
2213 __ jcc(Assembler::notEqual, notObj);
2215 __ movl(rax, lo );
2216 __ push(atos);
2217 if (!is_static) {
2218 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2219 }
2220 __ jmp(Done);
2222 __ bind(notObj);
2223 // ctos
2224 __ cmpl(flags, ctos );
2225 __ jcc(Assembler::notEqual, notChar);
2227 __ load_unsigned_short(rax, lo );
2228 __ push(ctos);
2229 if (!is_static) {
2230 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2231 }
2232 __ jmp(Done);
2234 __ bind(notChar);
2235 // stos
2236 __ cmpl(flags, stos );
2237 __ jcc(Assembler::notEqual, notShort);
2239 __ load_signed_short(rax, lo );
2240 __ push(stos);
2241 if (!is_static) {
2242 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2243 }
2244 __ jmp(Done);
2246 __ bind(notShort);
2247 // ltos
2248 __ cmpl(flags, ltos );
2249 __ jcc(Assembler::notEqual, notLong);
2251 // Generate code as if volatile. There just aren't enough registers to
2252 // save that information and this code is faster than the test.
2253 __ fild_d(lo); // Must load atomically
2254 __ subptr(rsp,2*wordSize); // Make space for store
2255 __ fistp_d(Address(rsp,0));
2256 __ pop(rax);
2257 __ pop(rdx);
2259 __ push(ltos);
2260 // Don't rewrite to _fast_lgetfield for potential volatile case.
2261 __ jmp(Done);
2263 __ bind(notLong);
2264 // ftos
2265 __ cmpl(flags, ftos );
2266 __ jcc(Assembler::notEqual, notFloat);
2268 __ fld_s(lo);
2269 __ push(ftos);
2270 if (!is_static) {
2271 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2272 }
2273 __ jmp(Done);
2275 __ bind(notFloat);
2276 // dtos
2277 __ cmpl(flags, dtos );
2278 __ jcc(Assembler::notEqual, notDouble);
2280 __ fld_d(lo);
2281 __ push(dtos);
2282 if (!is_static) {
2283 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2284 }
2285 __ jmpb(Done);
2287 __ bind(notDouble);
2289 __ stop("Bad state");
2291 __ bind(Done);
2292 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2293 // volatile_barrier( );
2294 }
2297 void TemplateTable::getfield(int byte_no) {
2298 getfield_or_static(byte_no, false);
2299 }
2302 void TemplateTable::getstatic(int byte_no) {
2303 getfield_or_static(byte_no, true);
2304 }
2306 // The registers cache and index expected to be set before call.
2307 // The function may destroy various registers, just not the cache and index registers.
2308 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2310 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2312 if (JvmtiExport::can_post_field_modification()) {
2313 // Check to see if a field modification watch has been set before we take
2314 // the time to call into the VM.
2315 Label L1;
2316 assert_different_registers(cache, index, rax);
2317 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2318 __ testl(rax, rax);
2319 __ jcc(Assembler::zero, L1);
2321 // The cache and index registers have been already set.
2322 // This allows to eliminate this call but the cache and index
2323 // registers have to be correspondingly used after this line.
2324 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2326 if (is_static) {
2327 // Life is simple. Null out the object pointer.
2328 __ xorptr(rbx, rbx);
2329 } else {
2330 // Life is harder. The stack holds the value on top, followed by the object.
2331 // We don't know the size of the value, though; it could be one or two words
2332 // depending on its type. As a result, we must find the type to determine where
2333 // the object is.
2334 Label two_word, valsize_known;
2335 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2336 ConstantPoolCacheEntry::flags_offset())));
2337 __ mov(rbx, rsp);
2338 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2339 // Make sure we don't need to mask rcx for tosBits after the above shift
2340 ConstantPoolCacheEntry::verify_tosBits();
2341 __ cmpl(rcx, ltos);
2342 __ jccb(Assembler::equal, two_word);
2343 __ cmpl(rcx, dtos);
2344 __ jccb(Assembler::equal, two_word);
2345 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2346 __ jmpb(valsize_known);
2348 __ bind(two_word);
2349 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2351 __ bind(valsize_known);
2352 // setup object pointer
2353 __ movptr(rbx, Address(rbx, 0));
2354 }
2355 // cache entry pointer
2356 __ addptr(rax, in_bytes(cp_base_offset));
2357 __ shll(rdx, LogBytesPerWord);
2358 __ addptr(rax, rdx);
2359 // object (tos)
2360 __ mov(rcx, rsp);
2361 // rbx,: object pointer set up above (NULL if static)
2362 // rax,: cache entry pointer
2363 // rcx: jvalue object on the stack
2364 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2365 rbx, rax, rcx);
2366 __ get_cache_and_index_at_bcp(cache, index, 1);
2367 __ bind(L1);
2368 }
2369 }
2372 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2373 transition(vtos, vtos);
2375 const Register cache = rcx;
2376 const Register index = rdx;
2377 const Register obj = rcx;
2378 const Register off = rbx;
2379 const Register flags = rax;
2381 resolve_cache_and_index(byte_no, cache, index);
2382 jvmti_post_field_mod(cache, index, is_static);
2383 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2385 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2386 // volatile_barrier( );
2388 Label notVolatile, Done;
2389 __ movl(rdx, flags);
2390 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2391 __ andl(rdx, 0x1);
2393 // field addresses
2394 const Address lo(obj, off, Address::times_1, 0*wordSize);
2395 const Address hi(obj, off, Address::times_1, 1*wordSize);
2397 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2399 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2400 assert(btos == 0, "change code, btos != 0");
2401 // btos
2402 __ andl(flags, 0x0f);
2403 __ jcc(Assembler::notZero, notByte);
2405 __ pop(btos);
2406 if (!is_static) pop_and_check_object(obj);
2407 __ movb(lo, rax );
2408 if (!is_static) {
2409 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2410 }
2411 __ jmp(Done);
2413 __ bind(notByte);
2414 // itos
2415 __ cmpl(flags, itos );
2416 __ jcc(Assembler::notEqual, notInt);
2418 __ pop(itos);
2419 if (!is_static) pop_and_check_object(obj);
2421 __ movl(lo, rax );
2422 if (!is_static) {
2423 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2424 }
2425 __ jmp(Done);
2427 __ bind(notInt);
2428 // atos
2429 __ cmpl(flags, atos );
2430 __ jcc(Assembler::notEqual, notObj);
2432 __ pop(atos);
2433 if (!is_static) pop_and_check_object(obj);
2435 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2437 if (!is_static) {
2438 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2439 }
2441 __ jmp(Done);
2443 __ bind(notObj);
2444 // ctos
2445 __ cmpl(flags, ctos );
2446 __ jcc(Assembler::notEqual, notChar);
2448 __ pop(ctos);
2449 if (!is_static) pop_and_check_object(obj);
2450 __ movw(lo, rax );
2451 if (!is_static) {
2452 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2453 }
2454 __ jmp(Done);
2456 __ bind(notChar);
2457 // stos
2458 __ cmpl(flags, stos );
2459 __ jcc(Assembler::notEqual, notShort);
2461 __ pop(stos);
2462 if (!is_static) pop_and_check_object(obj);
2463 __ movw(lo, rax );
2464 if (!is_static) {
2465 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2466 }
2467 __ jmp(Done);
2469 __ bind(notShort);
2470 // ltos
2471 __ cmpl(flags, ltos );
2472 __ jcc(Assembler::notEqual, notLong);
2474 Label notVolatileLong;
2475 __ testl(rdx, rdx);
2476 __ jcc(Assembler::zero, notVolatileLong);
2478 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2479 if (!is_static) pop_and_check_object(obj);
2481 // Replace with real volatile test
2482 __ push(rdx);
2483 __ push(rax); // Must update atomically with FIST
2484 __ fild_d(Address(rsp,0)); // So load into FPU register
2485 __ fistp_d(lo); // and put into memory atomically
2486 __ addptr(rsp, 2*wordSize);
2487 // volatile_barrier();
2488 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2489 Assembler::StoreStore));
2490 // Don't rewrite volatile version
2491 __ jmp(notVolatile);
2493 __ bind(notVolatileLong);
2495 __ pop(ltos); // overwrites rdx
2496 if (!is_static) pop_and_check_object(obj);
2497 NOT_LP64(__ movptr(hi, rdx));
2498 __ movptr(lo, rax);
2499 if (!is_static) {
2500 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2501 }
2502 __ jmp(notVolatile);
2504 __ bind(notLong);
2505 // ftos
2506 __ cmpl(flags, ftos );
2507 __ jcc(Assembler::notEqual, notFloat);
2509 __ pop(ftos);
2510 if (!is_static) pop_and_check_object(obj);
2511 __ fstp_s(lo);
2512 if (!is_static) {
2513 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2514 }
2515 __ jmp(Done);
2517 __ bind(notFloat);
2518 // dtos
2519 __ cmpl(flags, dtos );
2520 __ jcc(Assembler::notEqual, notDouble);
2522 __ pop(dtos);
2523 if (!is_static) pop_and_check_object(obj);
2524 __ fstp_d(lo);
2525 if (!is_static) {
2526 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2527 }
2528 __ jmp(Done);
2530 __ bind(notDouble);
2532 __ stop("Bad state");
2534 __ bind(Done);
2536 // Check for volatile store
2537 __ testl(rdx, rdx);
2538 __ jcc(Assembler::zero, notVolatile);
2539 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2540 Assembler::StoreStore));
2541 __ bind(notVolatile);
2542 }
2545 void TemplateTable::putfield(int byte_no) {
2546 putfield_or_static(byte_no, false);
2547 }
2550 void TemplateTable::putstatic(int byte_no) {
2551 putfield_or_static(byte_no, true);
2552 }
2554 void TemplateTable::jvmti_post_fast_field_mod() {
2555 if (JvmtiExport::can_post_field_modification()) {
2556 // Check to see if a field modification watch has been set before we take
2557 // the time to call into the VM.
2558 Label L2;
2559 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2560 __ testl(rcx,rcx);
2561 __ jcc(Assembler::zero, L2);
2562 __ pop_ptr(rbx); // copy the object pointer from tos
2563 __ verify_oop(rbx);
2564 __ push_ptr(rbx); // put the object pointer back on tos
2565 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2566 __ mov(rcx, rsp);
2567 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2568 __ xorptr(rbx, rbx);
2569 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2570 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2571 switch (bytecode()) { // load values into the jvalue object
2572 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2573 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2574 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2575 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2576 case Bytecodes::_fast_lputfield:
2577 NOT_LP64(__ movptr(hi_value, rdx));
2578 __ movptr(lo_value, rax);
2579 break;
2581 // need to call fld_s() after fstp_s() to restore the value for below
2582 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2584 // need to call fld_d() after fstp_d() to restore the value for below
2585 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2587 // since rcx is not an object we don't call store_check() here
2588 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2590 default: ShouldNotReachHere();
2591 }
2592 __ pop_ptr(rbx); // restore copy of object pointer
2594 // Save rax, and sometimes rdx because call_VM() will clobber them,
2595 // then use them for JVM/DI purposes
2596 __ push(rax);
2597 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2598 // access constant pool cache entry
2599 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2600 __ verify_oop(rbx);
2601 // rbx,: object pointer copied above
2602 // rax,: cache entry pointer
2603 // rcx: jvalue object on the stack
2604 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2605 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2606 __ pop(rax); // restore lower value
2607 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2608 __ bind(L2);
2609 }
2610 }
2612 void TemplateTable::fast_storefield(TosState state) {
2613 transition(state, vtos);
2615 ByteSize base = constantPoolCacheOopDesc::base_offset();
2617 jvmti_post_fast_field_mod();
2619 // access constant pool cache
2620 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2622 // test for volatile with rdx but rdx is tos register for lputfield.
2623 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2624 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2625 ConstantPoolCacheEntry::flags_offset())));
2627 // replace index with field offset from cache entry
2628 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2630 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2631 // volatile_barrier( );
2633 Label notVolatile, Done;
2634 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2635 __ andl(rdx, 0x1);
2636 // Check for volatile store
2637 __ testl(rdx, rdx);
2638 __ jcc(Assembler::zero, notVolatile);
2640 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2642 // Get object from stack
2643 pop_and_check_object(rcx);
2645 // field addresses
2646 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2647 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2649 // access field
2650 switch (bytecode()) {
2651 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2652 case Bytecodes::_fast_sputfield: // fall through
2653 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2654 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2655 case Bytecodes::_fast_lputfield:
2656 NOT_LP64(__ movptr(hi, rdx));
2657 __ movptr(lo, rax);
2658 break;
2659 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2660 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2661 case Bytecodes::_fast_aputfield: {
2662 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2663 break;
2664 }
2665 default:
2666 ShouldNotReachHere();
2667 }
2669 Label done;
2670 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2671 Assembler::StoreStore));
2672 // Barriers are so large that short branch doesn't reach!
2673 __ jmp(done);
2675 // Same code as above, but don't need rdx to test for volatile.
2676 __ bind(notVolatile);
2678 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2680 // Get object from stack
2681 pop_and_check_object(rcx);
2683 // access field
2684 switch (bytecode()) {
2685 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2686 case Bytecodes::_fast_sputfield: // fall through
2687 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2688 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2689 case Bytecodes::_fast_lputfield:
2690 NOT_LP64(__ movptr(hi, rdx));
2691 __ movptr(lo, rax);
2692 break;
2693 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2694 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2695 case Bytecodes::_fast_aputfield: {
2696 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2697 break;
2698 }
2699 default:
2700 ShouldNotReachHere();
2701 }
2702 __ bind(done);
2703 }
2706 void TemplateTable::fast_accessfield(TosState state) {
2707 transition(atos, state);
2709 // do the JVMTI work here to avoid disturbing the register state below
2710 if (JvmtiExport::can_post_field_access()) {
2711 // Check to see if a field access watch has been set before we take
2712 // the time to call into the VM.
2713 Label L1;
2714 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2715 __ testl(rcx,rcx);
2716 __ jcc(Assembler::zero, L1);
2717 // access constant pool cache entry
2718 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2719 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2720 __ verify_oop(rax);
2721 // rax,: object pointer copied above
2722 // rcx: cache entry pointer
2723 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2724 __ pop_ptr(rax); // restore object pointer
2725 __ bind(L1);
2726 }
2728 // access constant pool cache
2729 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2730 // replace index with field offset from cache entry
2731 __ movptr(rbx, Address(rcx,
2732 rbx,
2733 Address::times_ptr,
2734 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2737 // rax,: object
2738 __ verify_oop(rax);
2739 __ null_check(rax);
2740 // field addresses
2741 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2742 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2744 // access field
2745 switch (bytecode()) {
2746 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2747 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2748 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2749 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2750 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2751 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2752 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2753 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2754 default:
2755 ShouldNotReachHere();
2756 }
2758 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2759 // volatile_barrier( );
2760 }
2762 void TemplateTable::fast_xaccess(TosState state) {
2763 transition(vtos, state);
2764 // get receiver
2765 __ movptr(rax, aaddress(0));
2766 // access constant pool cache
2767 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2768 __ movptr(rbx, Address(rcx,
2769 rdx,
2770 Address::times_ptr,
2771 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2772 // make sure exception is reported in correct bcp range (getfield is next instruction)
2773 __ increment(rsi);
2774 __ null_check(rax);
2775 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2776 if (state == itos) {
2777 __ movl(rax, lo);
2778 } else if (state == atos) {
2779 __ movptr(rax, lo);
2780 __ verify_oop(rax);
2781 } else if (state == ftos) {
2782 __ fld_s(lo);
2783 } else {
2784 ShouldNotReachHere();
2785 }
2786 __ decrement(rsi);
2787 }
2791 //----------------------------------------------------------------------------------------------------
2792 // Calls
2794 void TemplateTable::count_calls(Register method, Register temp) {
2795 // implemented elsewhere
2796 ShouldNotReachHere();
2797 }
2800 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2801 // determine flags
2802 Bytecodes::Code code = bytecode();
2803 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2804 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2805 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2806 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2807 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2808 const bool receiver_null_check = is_invokespecial;
2809 const bool save_flags = is_invokeinterface || is_invokevirtual;
2810 // setup registers & access constant pool cache
2811 const Register recv = rcx;
2812 const Register flags = rdx;
2813 assert_different_registers(method, index, recv, flags);
2815 // save 'interpreter return address'
2816 __ save_bcp();
2818 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
2820 // load receiver if needed (note: no return address pushed yet)
2821 if (load_receiver) {
2822 __ movl(recv, flags);
2823 __ andl(recv, 0xFF);
2824 // recv count is 0 based?
2825 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2826 __ movptr(recv, recv_addr);
2827 __ verify_oop(recv);
2828 }
2830 // do null check if needed
2831 if (receiver_null_check) {
2832 __ null_check(recv);
2833 }
2835 if (save_flags) {
2836 __ mov(rsi, flags);
2837 }
2839 // compute return type
2840 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2841 // Make sure we don't need to mask flags for tosBits after the above shift
2842 ConstantPoolCacheEntry::verify_tosBits();
2843 // load return address
2844 {
2845 address table_addr;
2846 if (is_invokeinterface || is_invokedynamic)
2847 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2848 else
2849 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2850 ExternalAddress table(table_addr);
2851 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2852 }
2854 // push return address
2855 __ push(flags);
2857 // Restore flag value from the constant pool cache, and restore rsi
2858 // for later null checks. rsi is the bytecode pointer
2859 if (save_flags) {
2860 __ mov(flags, rsi);
2861 __ restore_bcp();
2862 }
2863 }
2866 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2867 Register flags) {
2869 // Uses temporary registers rax, rdx
2870 assert_different_registers(index, recv, rax, rdx);
2872 // Test for an invoke of a final method
2873 Label notFinal;
2874 __ movl(rax, flags);
2875 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2876 __ jcc(Assembler::zero, notFinal);
2878 Register method = index; // method must be rbx,
2879 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2881 // do the call - the index is actually the method to call
2882 __ verify_oop(method);
2884 // It's final, need a null check here!
2885 __ null_check(recv);
2887 // profile this call
2888 __ profile_final_call(rax);
2890 __ jump_from_interpreted(method, rax);
2892 __ bind(notFinal);
2894 // get receiver klass
2895 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2896 // Keep recv in rcx for callee expects it there
2897 __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
2898 __ verify_oop(rax);
2900 // profile this call
2901 __ profile_virtual_call(rax, rdi, rdx);
2903 // get target methodOop & entry point
2904 const int base = instanceKlass::vtable_start_offset() * wordSize;
2905 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
2906 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
2907 __ jump_from_interpreted(method, rdx);
2908 }
2911 void TemplateTable::invokevirtual(int byte_no) {
2912 transition(vtos, vtos);
2913 prepare_invoke(rbx, noreg, byte_no);
2915 // rbx,: index
2916 // rcx: receiver
2917 // rdx: flags
2919 invokevirtual_helper(rbx, rcx, rdx);
2920 }
2923 void TemplateTable::invokespecial(int byte_no) {
2924 transition(vtos, vtos);
2925 prepare_invoke(rbx, noreg, byte_no);
2926 // do the call
2927 __ verify_oop(rbx);
2928 __ profile_call(rax);
2929 __ jump_from_interpreted(rbx, rax);
2930 }
2933 void TemplateTable::invokestatic(int byte_no) {
2934 transition(vtos, vtos);
2935 prepare_invoke(rbx, noreg, byte_no);
2936 // do the call
2937 __ verify_oop(rbx);
2938 __ profile_call(rax);
2939 __ jump_from_interpreted(rbx, rax);
2940 }
2943 void TemplateTable::fast_invokevfinal(int byte_no) {
2944 transition(vtos, vtos);
2945 __ stop("fast_invokevfinal not used on x86");
2946 }
2949 void TemplateTable::invokeinterface(int byte_no) {
2950 transition(vtos, vtos);
2951 prepare_invoke(rax, rbx, byte_no);
2953 // rax,: Interface
2954 // rbx,: index
2955 // rcx: receiver
2956 // rdx: flags
2958 // Special case of invokeinterface called for virtual method of
2959 // java.lang.Object. See cpCacheOop.cpp for details.
2960 // This code isn't produced by javac, but could be produced by
2961 // another compliant java compiler.
2962 Label notMethod;
2963 __ movl(rdi, rdx);
2964 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
2965 __ jcc(Assembler::zero, notMethod);
2967 invokevirtual_helper(rbx, rcx, rdx);
2968 __ bind(notMethod);
2970 // Get receiver klass into rdx - also a null check
2971 __ restore_locals(); // restore rdi
2972 __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
2973 __ verify_oop(rdx);
2975 // profile this call
2976 __ profile_virtual_call(rdx, rsi, rdi);
2978 Label no_such_interface, no_such_method;
2980 __ lookup_interface_method(// inputs: rec. class, interface, itable index
2981 rdx, rax, rbx,
2982 // outputs: method, scan temp. reg
2983 rbx, rsi,
2984 no_such_interface);
2986 // rbx,: methodOop to call
2987 // rcx: receiver
2988 // Check for abstract method error
2989 // Note: This should be done more efficiently via a throw_abstract_method_error
2990 // interpreter entry point and a conditional jump to it in case of a null
2991 // method.
2992 __ testptr(rbx, rbx);
2993 __ jcc(Assembler::zero, no_such_method);
2995 // do the call
2996 // rcx: receiver
2997 // rbx,: methodOop
2998 __ jump_from_interpreted(rbx, rdx);
2999 __ should_not_reach_here();
3001 // exception handling code follows...
3002 // note: must restore interpreter registers to canonical
3003 // state for exception handling to work correctly!
3005 __ bind(no_such_method);
3006 // throw exception
3007 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3008 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3009 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3010 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3011 // the call_VM checks for exception, so we should never return here.
3012 __ should_not_reach_here();
3014 __ bind(no_such_interface);
3015 // throw exception
3016 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3017 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3018 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3019 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3020 InterpreterRuntime::throw_IncompatibleClassChangeError));
3021 // the call_VM checks for exception, so we should never return here.
3022 __ should_not_reach_here();
3023 }
3025 void TemplateTable::invokedynamic(int byte_no) {
3026 transition(vtos, vtos);
3028 if (!EnableInvokeDynamic) {
3029 // We should not encounter this bytecode if !EnableInvokeDynamic.
3030 // The verifier will stop it. However, if we get past the verifier,
3031 // this will stop the thread in a reasonable way, without crashing the JVM.
3032 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3033 InterpreterRuntime::throw_IncompatibleClassChangeError));
3034 // the call_VM checks for exception, so we should never return here.
3035 __ should_not_reach_here();
3036 return;
3037 }
3039 prepare_invoke(rax, rbx, byte_no);
3041 // rax: CallSite object (f1)
3042 // rbx: unused (f2)
3043 // rcx: receiver address
3044 // rdx: flags (unused)
3046 if (ProfileInterpreter) {
3047 Label L;
3048 // %%% should make a type profile for any invokedynamic that takes a ref argument
3049 // profile this call
3050 __ profile_call(rsi);
3051 }
3053 __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
3054 __ null_check(rcx);
3055 __ prepare_to_jump_from_interpreted();
3056 __ jump_to_method_handle_entry(rcx, rdx);
3057 }
3059 //----------------------------------------------------------------------------------------------------
3060 // Allocation
3062 void TemplateTable::_new() {
3063 transition(vtos, atos);
3064 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3065 Label slow_case;
3066 Label done;
3067 Label initialize_header;
3068 Label initialize_object; // including clearing the fields
3069 Label allocate_shared;
3071 __ get_cpool_and_tags(rcx, rax);
3072 // get instanceKlass
3073 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3074 __ push(rcx); // save the contexts of klass for initializing the header
3076 // make sure the class we're about to instantiate has been resolved.
3077 // Note: slow_case does a pop of stack, which is why we loaded class/pushed above
3078 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3079 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3080 __ jcc(Assembler::notEqual, slow_case);
3082 // make sure klass is initialized & doesn't have finalizer
3083 // make sure klass is fully initialized
3084 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3085 __ jcc(Assembler::notEqual, slow_case);
3087 // get instance_size in instanceKlass (scaled to a count of bytes)
3088 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3089 // test to see if it has a finalizer or is malformed in some way
3090 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3091 __ jcc(Assembler::notZero, slow_case);
3093 //
3094 // Allocate the instance
3095 // 1) Try to allocate in the TLAB
3096 // 2) if fail and the object is large allocate in the shared Eden
3097 // 3) if the above fails (or is not applicable), go to a slow case
3098 // (creates a new TLAB, etc.)
3100 const bool allow_shared_alloc =
3101 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3103 if (UseTLAB) {
3104 const Register thread = rcx;
3106 __ get_thread(thread);
3107 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3108 __ lea(rbx, Address(rax, rdx, Address::times_1));
3109 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3110 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3111 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3112 if (ZeroTLAB) {
3113 // the fields have been already cleared
3114 __ jmp(initialize_header);
3115 } else {
3116 // initialize both the header and fields
3117 __ jmp(initialize_object);
3118 }
3119 }
3121 // Allocation in the shared Eden, if allowed.
3122 //
3123 // rdx: instance size in bytes
3124 if (allow_shared_alloc) {
3125 __ bind(allocate_shared);
3127 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3129 Label retry;
3130 __ bind(retry);
3131 __ movptr(rax, heap_top);
3132 __ lea(rbx, Address(rax, rdx, Address::times_1));
3133 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3134 __ jcc(Assembler::above, slow_case);
3136 // Compare rax, with the top addr, and if still equal, store the new
3137 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3138 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3139 //
3140 // rax,: object begin
3141 // rbx,: object end
3142 // rdx: instance size in bytes
3143 __ locked_cmpxchgptr(rbx, heap_top);
3145 // if someone beat us on the allocation, try again, otherwise continue
3146 __ jcc(Assembler::notEqual, retry);
3147 }
3149 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3150 // The object is initialized before the header. If the object size is
3151 // zero, go directly to the header initialization.
3152 __ bind(initialize_object);
3153 __ decrement(rdx, sizeof(oopDesc));
3154 __ jcc(Assembler::zero, initialize_header);
3156 // Initialize topmost object field, divide rdx by 8, check if odd and
3157 // test if zero.
3158 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3159 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3161 // rdx must have been multiple of 8
3162 #ifdef ASSERT
3163 // make sure rdx was multiple of 8
3164 Label L;
3165 // Ignore partial flag stall after shrl() since it is debug VM
3166 __ jccb(Assembler::carryClear, L);
3167 __ stop("object size is not multiple of 2 - adjust this code");
3168 __ bind(L);
3169 // rdx must be > 0, no extra check needed here
3170 #endif
3172 // initialize remaining object fields: rdx was a multiple of 8
3173 { Label loop;
3174 __ bind(loop);
3175 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3176 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3177 __ decrement(rdx);
3178 __ jcc(Assembler::notZero, loop);
3179 }
3181 // initialize object header only.
3182 __ bind(initialize_header);
3183 if (UseBiasedLocking) {
3184 __ pop(rcx); // get saved klass back in the register.
3185 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3186 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3187 } else {
3188 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3189 (int32_t)markOopDesc::prototype()); // header
3190 __ pop(rcx); // get saved klass back in the register.
3191 }
3192 __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
3194 {
3195 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3196 // Trigger dtrace event for fastpath
3197 __ push(atos);
3198 __ call_VM_leaf(
3199 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3200 __ pop(atos);
3201 }
3203 __ jmp(done);
3204 }
3206 // slow case
3207 __ bind(slow_case);
3208 __ pop(rcx); // restore stack pointer to what it was when we came in.
3209 __ get_constant_pool(rax);
3210 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3211 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3213 // continue
3214 __ bind(done);
3215 }
3218 void TemplateTable::newarray() {
3219 transition(itos, atos);
3220 __ push_i(rax); // make sure everything is on the stack
3221 __ load_unsigned_byte(rdx, at_bcp(1));
3222 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3223 __ pop_i(rdx); // discard size
3224 }
3227 void TemplateTable::anewarray() {
3228 transition(itos, atos);
3229 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3230 __ get_constant_pool(rcx);
3231 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3232 }
3235 void TemplateTable::arraylength() {
3236 transition(atos, itos);
3237 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3238 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3239 }
3242 void TemplateTable::checkcast() {
3243 transition(atos, atos);
3244 Label done, is_null, ok_is_subtype, quicked, resolved;
3245 __ testptr(rax, rax); // Object is in EAX
3246 __ jcc(Assembler::zero, is_null);
3248 // Get cpool & tags index
3249 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3250 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3251 // See if bytecode has already been quicked
3252 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3253 __ jcc(Assembler::equal, quicked);
3255 __ push(atos);
3256 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3257 __ pop_ptr(rdx);
3258 __ jmpb(resolved);
3260 // Get superklass in EAX and subklass in EBX
3261 __ bind(quicked);
3262 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3263 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3265 __ bind(resolved);
3266 __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3268 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3269 // Superklass in EAX. Subklass in EBX.
3270 __ gen_subtype_check( rbx, ok_is_subtype );
3272 // Come here on failure
3273 __ push(rdx);
3274 // object is at TOS
3275 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3277 // Come here on success
3278 __ bind(ok_is_subtype);
3279 __ mov(rax,rdx); // Restore object in EDX
3281 // Collect counts on whether this check-cast sees NULLs a lot or not.
3282 if (ProfileInterpreter) {
3283 __ jmp(done);
3284 __ bind(is_null);
3285 __ profile_null_seen(rcx);
3286 } else {
3287 __ bind(is_null); // same as 'done'
3288 }
3289 __ bind(done);
3290 }
3293 void TemplateTable::instanceof() {
3294 transition(atos, itos);
3295 Label done, is_null, ok_is_subtype, quicked, resolved;
3296 __ testptr(rax, rax);
3297 __ jcc(Assembler::zero, is_null);
3299 // Get cpool & tags index
3300 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3301 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3302 // See if bytecode has already been quicked
3303 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3304 __ jcc(Assembler::equal, quicked);
3306 __ push(atos);
3307 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3308 __ pop_ptr(rdx);
3309 __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3310 __ jmp(resolved);
3312 // Get superklass in EAX and subklass in EDX
3313 __ bind(quicked);
3314 __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
3315 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3317 __ bind(resolved);
3319 // Generate subtype check. Blows ECX. Resets EDI.
3320 // Superklass in EAX. Subklass in EDX.
3321 __ gen_subtype_check( rdx, ok_is_subtype );
3323 // Come here on failure
3324 __ xorl(rax,rax);
3325 __ jmpb(done);
3326 // Come here on success
3327 __ bind(ok_is_subtype);
3328 __ movl(rax, 1);
3330 // Collect counts on whether this test sees NULLs a lot or not.
3331 if (ProfileInterpreter) {
3332 __ jmp(done);
3333 __ bind(is_null);
3334 __ profile_null_seen(rcx);
3335 } else {
3336 __ bind(is_null); // same as 'done'
3337 }
3338 __ bind(done);
3339 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3340 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3341 }
3344 //----------------------------------------------------------------------------------------------------
3345 // Breakpoints
3346 void TemplateTable::_breakpoint() {
3348 // Note: We get here even if we are single stepping..
3349 // jbug inists on setting breakpoints at every bytecode
3350 // even if we are in single step mode.
3352 transition(vtos, vtos);
3354 // get the unpatched byte code
3355 __ get_method(rcx);
3356 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3357 __ mov(rbx, rax);
3359 // post the breakpoint event
3360 __ get_method(rcx);
3361 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3363 // complete the execution of original bytecode
3364 __ dispatch_only_normal(vtos);
3365 }
3368 //----------------------------------------------------------------------------------------------------
3369 // Exceptions
3371 void TemplateTable::athrow() {
3372 transition(atos, vtos);
3373 __ null_check(rax);
3374 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3375 }
3378 //----------------------------------------------------------------------------------------------------
3379 // Synchronization
3380 //
3381 // Note: monitorenter & exit are symmetric routines; which is reflected
3382 // in the assembly code structure as well
3383 //
3384 // Stack layout:
3385 //
3386 // [expressions ] <--- rsp = expression stack top
3387 // ..
3388 // [expressions ]
3389 // [monitor entry] <--- monitor block top = expression stack bot
3390 // ..
3391 // [monitor entry]
3392 // [frame data ] <--- monitor block bot
3393 // ...
3394 // [saved rbp, ] <--- rbp,
3397 void TemplateTable::monitorenter() {
3398 transition(atos, vtos);
3400 // check for NULL object
3401 __ null_check(rax);
3403 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3404 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3405 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3406 Label allocated;
3408 // initialize entry pointer
3409 __ xorl(rdx, rdx); // points to free slot or NULL
3411 // find a free slot in the monitor block (result in rdx)
3412 { Label entry, loop, exit;
3413 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3414 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3415 __ jmpb(entry);
3417 __ bind(loop);
3418 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3420 // TODO - need new func here - kbt
3421 if (VM_Version::supports_cmov()) {
3422 __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3423 } else {
3424 Label L;
3425 __ jccb(Assembler::notEqual, L);
3426 __ mov(rdx, rcx); // if not used then remember entry in rdx
3427 __ bind(L);
3428 }
3429 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3430 __ jccb(Assembler::equal, exit); // if same object then stop searching
3431 __ addptr(rcx, entry_size); // otherwise advance to next entry
3432 __ bind(entry);
3433 __ cmpptr(rcx, rbx); // check if bottom reached
3434 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3435 __ bind(exit);
3436 }
3438 __ testptr(rdx, rdx); // check if a slot has been found
3439 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3441 // allocate one if there's no free slot
3442 { Label entry, loop;
3443 // 1. compute new pointers // rsp: old expression stack top
3444 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3445 __ subptr(rsp, entry_size); // move expression stack top
3446 __ subptr(rdx, entry_size); // move expression stack bottom
3447 __ mov(rcx, rsp); // set start value for copy loop
3448 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3449 __ jmp(entry);
3450 // 2. move expression stack contents
3451 __ bind(loop);
3452 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3453 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3454 __ addptr(rcx, wordSize); // advance to next word
3455 __ bind(entry);
3456 __ cmpptr(rcx, rdx); // check if bottom reached
3457 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3458 }
3460 // call run-time routine
3461 // rdx: points to monitor entry
3462 __ bind(allocated);
3464 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3465 // The object has already been poped from the stack, so the expression stack looks correct.
3466 __ increment(rsi);
3468 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3469 __ lock_object(rdx);
3471 // check to make sure this monitor doesn't cause stack overflow after locking
3472 __ save_bcp(); // in case of exception
3473 __ generate_stack_overflow_check(0);
3475 // The bcp has already been incremented. Just need to dispatch to next instruction.
3476 __ dispatch_next(vtos);
3477 }
3480 void TemplateTable::monitorexit() {
3481 transition(atos, vtos);
3483 // check for NULL object
3484 __ null_check(rax);
3486 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3487 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3488 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3489 Label found;
3491 // find matching slot
3492 { Label entry, loop;
3493 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3494 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3495 __ jmpb(entry);
3497 __ bind(loop);
3498 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3499 __ jcc(Assembler::equal, found); // if same object then stop searching
3500 __ addptr(rdx, entry_size); // otherwise advance to next entry
3501 __ bind(entry);
3502 __ cmpptr(rdx, rbx); // check if bottom reached
3503 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3504 }
3506 // error handling. Unlocking was not block-structured
3507 Label end;
3508 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3509 __ should_not_reach_here();
3511 // call run-time routine
3512 // rcx: points to monitor entry
3513 __ bind(found);
3514 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3515 __ unlock_object(rdx);
3516 __ pop_ptr(rax); // discard object
3517 __ bind(end);
3518 }
3521 //----------------------------------------------------------------------------------------------------
3522 // Wide instructions
3524 void TemplateTable::wide() {
3525 transition(vtos, vtos);
3526 __ load_unsigned_byte(rbx, at_bcp(1));
3527 ExternalAddress wtable((address)Interpreter::_wentry_point);
3528 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3529 // Note: the rsi increment step is part of the individual wide bytecode implementations
3530 }
3533 //----------------------------------------------------------------------------------------------------
3534 // Multi arrays
3536 void TemplateTable::multianewarray() {
3537 transition(vtos, atos);
3538 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3539 // last dim is on top of stack; we want address of first one:
3540 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3541 // the latter wordSize to point to the beginning of the array.
3542 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3543 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3544 __ load_unsigned_byte(rbx, at_bcp(3));
3545 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3546 }
3548 #endif /* !CC_INTERP */