Tue, 29 Jun 2010 10:34:00 -0700
6964774: Adjust optimization flags setting
Summary: Adjust performance flags settings.
Reviewed-by: never, phh
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateTable_x86_32.cpp.incl"
28 #ifndef CC_INTERP
29 #define __ _masm->
31 //----------------------------------------------------------------------------------------------------
32 // Platform-dependent initialization
34 void TemplateTable::pd_initialize() {
35 // No i486 specific initialization
36 }
38 //----------------------------------------------------------------------------------------------------
39 // Address computation
41 // local variables
42 static inline Address iaddress(int n) {
43 return Address(rdi, Interpreter::local_offset_in_bytes(n));
44 }
46 static inline Address laddress(int n) { return iaddress(n + 1); }
47 static inline Address haddress(int n) { return iaddress(n + 0); }
48 static inline Address faddress(int n) { return iaddress(n); }
49 static inline Address daddress(int n) { return laddress(n); }
50 static inline Address aaddress(int n) { return iaddress(n); }
52 static inline Address iaddress(Register r) {
53 return Address(rdi, r, Interpreter::stackElementScale());
54 }
55 static inline Address laddress(Register r) {
56 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
57 }
58 static inline Address haddress(Register r) {
59 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
60 }
62 static inline Address faddress(Register r) { return iaddress(r); }
63 static inline Address daddress(Register r) { return laddress(r); }
64 static inline Address aaddress(Register r) { return iaddress(r); }
66 // expression stack
67 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
68 // data beyond the rsp which is potentially unsafe in an MT environment;
69 // an interrupt may overwrite that data.)
70 static inline Address at_rsp () {
71 return Address(rsp, 0);
72 }
74 // At top of Java expression stack which may be different than rsp(). It
75 // isn't for category 1 objects.
76 static inline Address at_tos () {
77 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
78 return tos;
79 }
81 static inline Address at_tos_p1() {
82 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
83 }
85 static inline Address at_tos_p2() {
86 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
87 }
89 // Condition conversion
90 static Assembler::Condition j_not(TemplateTable::Condition cc) {
91 switch (cc) {
92 case TemplateTable::equal : return Assembler::notEqual;
93 case TemplateTable::not_equal : return Assembler::equal;
94 case TemplateTable::less : return Assembler::greaterEqual;
95 case TemplateTable::less_equal : return Assembler::greater;
96 case TemplateTable::greater : return Assembler::lessEqual;
97 case TemplateTable::greater_equal: return Assembler::less;
98 }
99 ShouldNotReachHere();
100 return Assembler::zero;
101 }
104 //----------------------------------------------------------------------------------------------------
105 // Miscelaneous helper routines
107 // Store an oop (or NULL) at the address described by obj.
108 // If val == noreg this means store a NULL
110 static void do_oop_store(InterpreterMacroAssembler* _masm,
111 Address obj,
112 Register val,
113 BarrierSet::Name barrier,
114 bool precise) {
115 assert(val == noreg || val == rax, "parameter is just for looks");
116 switch (barrier) {
117 #ifndef SERIALGC
118 case BarrierSet::G1SATBCT:
119 case BarrierSet::G1SATBCTLogging:
120 {
121 // flatten object address if needed
122 // We do it regardless of precise because we need the registers
123 if (obj.index() == noreg && obj.disp() == 0) {
124 if (obj.base() != rdx) {
125 __ movl(rdx, obj.base());
126 }
127 } else {
128 __ leal(rdx, obj);
129 }
130 __ get_thread(rcx);
131 __ save_bcp();
132 __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
134 // Do the actual store
135 // noreg means NULL
136 if (val == noreg) {
137 __ movptr(Address(rdx, 0), NULL_WORD);
138 // No post barrier for NULL
139 } else {
140 __ movl(Address(rdx, 0), val);
141 __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
142 }
143 __ restore_bcp();
145 }
146 break;
147 #endif // SERIALGC
148 case BarrierSet::CardTableModRef:
149 case BarrierSet::CardTableExtension:
150 {
151 if (val == noreg) {
152 __ movptr(obj, NULL_WORD);
153 } else {
154 __ movl(obj, val);
155 // flatten object address if needed
156 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
157 __ store_check(obj.base());
158 } else {
159 __ leal(rdx, obj);
160 __ store_check(rdx);
161 }
162 }
163 }
164 break;
165 case BarrierSet::ModRef:
166 case BarrierSet::Other:
167 if (val == noreg) {
168 __ movptr(obj, NULL_WORD);
169 } else {
170 __ movl(obj, val);
171 }
172 break;
173 default :
174 ShouldNotReachHere();
176 }
177 }
179 Address TemplateTable::at_bcp(int offset) {
180 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
181 return Address(rsi, offset);
182 }
185 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
186 Register scratch,
187 bool load_bc_into_scratch/*=true*/) {
189 if (!RewriteBytecodes) return;
190 // the pair bytecodes have already done the load.
191 if (load_bc_into_scratch) {
192 __ movl(bc, bytecode);
193 }
194 Label patch_done;
195 if (JvmtiExport::can_post_breakpoint()) {
196 Label fast_patch;
197 // if a breakpoint is present we can't rewrite the stream directly
198 __ movzbl(scratch, at_bcp(0));
199 __ cmpl(scratch, Bytecodes::_breakpoint);
200 __ jcc(Assembler::notEqual, fast_patch);
201 __ get_method(scratch);
202 // Let breakpoint table handling rewrite to quicker bytecode
203 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
204 #ifndef ASSERT
205 __ jmpb(patch_done);
206 #else
207 __ jmp(patch_done);
208 #endif
209 __ bind(fast_patch);
210 }
211 #ifdef ASSERT
212 Label okay;
213 __ load_unsigned_byte(scratch, at_bcp(0));
214 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
215 __ jccb(Assembler::equal, okay);
216 __ cmpl(scratch, bc);
217 __ jcc(Assembler::equal, okay);
218 __ stop("patching the wrong bytecode");
219 __ bind(okay);
220 #endif
221 // patch bytecode
222 __ movb(at_bcp(0), bc);
223 __ bind(patch_done);
224 }
226 //----------------------------------------------------------------------------------------------------
227 // Individual instructions
229 void TemplateTable::nop() {
230 transition(vtos, vtos);
231 // nothing to do
232 }
234 void TemplateTable::shouldnotreachhere() {
235 transition(vtos, vtos);
236 __ stop("shouldnotreachhere bytecode");
237 }
241 void TemplateTable::aconst_null() {
242 transition(vtos, atos);
243 __ xorptr(rax, rax);
244 }
247 void TemplateTable::iconst(int value) {
248 transition(vtos, itos);
249 if (value == 0) {
250 __ xorptr(rax, rax);
251 } else {
252 __ movptr(rax, value);
253 }
254 }
257 void TemplateTable::lconst(int value) {
258 transition(vtos, ltos);
259 if (value == 0) {
260 __ xorptr(rax, rax);
261 } else {
262 __ movptr(rax, value);
263 }
264 assert(value >= 0, "check this code");
265 __ xorptr(rdx, rdx);
266 }
269 void TemplateTable::fconst(int value) {
270 transition(vtos, ftos);
271 if (value == 0) { __ fldz();
272 } else if (value == 1) { __ fld1();
273 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
274 } else { ShouldNotReachHere();
275 }
276 }
279 void TemplateTable::dconst(int value) {
280 transition(vtos, dtos);
281 if (value == 0) { __ fldz();
282 } else if (value == 1) { __ fld1();
283 } else { ShouldNotReachHere();
284 }
285 }
288 void TemplateTable::bipush() {
289 transition(vtos, itos);
290 __ load_signed_byte(rax, at_bcp(1));
291 }
294 void TemplateTable::sipush() {
295 transition(vtos, itos);
296 __ load_unsigned_short(rax, at_bcp(1));
297 __ bswapl(rax);
298 __ sarl(rax, 16);
299 }
301 void TemplateTable::ldc(bool wide) {
302 transition(vtos, vtos);
303 Label call_ldc, notFloat, notClass, Done;
305 if (wide) {
306 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
307 } else {
308 __ load_unsigned_byte(rbx, at_bcp(1));
309 }
310 __ get_cpool_and_tags(rcx, rax);
311 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
312 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
314 // get type
315 __ xorptr(rdx, rdx);
316 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
318 // unresolved string - get the resolved string
319 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
320 __ jccb(Assembler::equal, call_ldc);
322 // unresolved class - get the resolved class
323 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
324 __ jccb(Assembler::equal, call_ldc);
326 // unresolved class in error (resolution failed) - call into runtime
327 // so that the same error from first resolution attempt is thrown.
328 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
329 __ jccb(Assembler::equal, call_ldc);
331 // resolved class - need to call vm to get java mirror of the class
332 __ cmpl(rdx, JVM_CONSTANT_Class);
333 __ jcc(Assembler::notEqual, notClass);
335 __ bind(call_ldc);
336 __ movl(rcx, wide);
337 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
338 __ push(atos);
339 __ jmp(Done);
341 __ bind(notClass);
342 __ cmpl(rdx, JVM_CONSTANT_Float);
343 __ jccb(Assembler::notEqual, notFloat);
344 // ftos
345 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
346 __ push(ftos);
347 __ jmp(Done);
349 __ bind(notFloat);
350 #ifdef ASSERT
351 { Label L;
352 __ cmpl(rdx, JVM_CONSTANT_Integer);
353 __ jcc(Assembler::equal, L);
354 __ cmpl(rdx, JVM_CONSTANT_String);
355 __ jcc(Assembler::equal, L);
356 __ stop("unexpected tag type in ldc");
357 __ bind(L);
358 }
359 #endif
360 Label isOop;
361 // atos and itos
362 // String is only oop type we will see here
363 __ cmpl(rdx, JVM_CONSTANT_String);
364 __ jccb(Assembler::equal, isOop);
365 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
366 __ push(itos);
367 __ jmp(Done);
368 __ bind(isOop);
369 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
370 __ push(atos);
372 if (VerifyOops) {
373 __ verify_oop(rax);
374 }
375 __ bind(Done);
376 }
378 // Fast path for caching oop constants.
379 // %%% We should use this to handle Class and String constants also.
380 // %%% It will simplify the ldc/primitive path considerably.
381 void TemplateTable::fast_aldc(bool wide) {
382 transition(vtos, atos);
384 if (!EnableMethodHandles) {
385 // We should not encounter this bytecode if !EnableMethodHandles.
386 // The verifier will stop it. However, if we get past the verifier,
387 // this will stop the thread in a reasonable way, without crashing the JVM.
388 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
389 InterpreterRuntime::throw_IncompatibleClassChangeError));
390 // the call_VM checks for exception, so we should never return here.
391 __ should_not_reach_here();
392 return;
393 }
395 const Register cache = rcx;
396 const Register index = rdx;
398 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
399 if (VerifyOops) {
400 __ verify_oop(rax);
401 }
402 }
404 void TemplateTable::ldc2_w() {
405 transition(vtos, vtos);
406 Label Long, Done;
407 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
409 __ get_cpool_and_tags(rcx, rax);
410 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
411 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
413 // get type
414 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
415 __ jccb(Assembler::notEqual, Long);
416 // dtos
417 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
418 __ push(dtos);
419 __ jmpb(Done);
421 __ bind(Long);
422 // ltos
423 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
424 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
426 __ push(ltos);
428 __ bind(Done);
429 }
432 void TemplateTable::locals_index(Register reg, int offset) {
433 __ load_unsigned_byte(reg, at_bcp(offset));
434 __ negptr(reg);
435 }
438 void TemplateTable::iload() {
439 transition(vtos, itos);
440 if (RewriteFrequentPairs) {
441 Label rewrite, done;
443 // get next byte
444 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
445 // if _iload, wait to rewrite to iload2. We only want to rewrite the
446 // last two iloads in a pair. Comparing against fast_iload means that
447 // the next bytecode is neither an iload or a caload, and therefore
448 // an iload pair.
449 __ cmpl(rbx, Bytecodes::_iload);
450 __ jcc(Assembler::equal, done);
452 __ cmpl(rbx, Bytecodes::_fast_iload);
453 __ movl(rcx, Bytecodes::_fast_iload2);
454 __ jccb(Assembler::equal, rewrite);
456 // if _caload, rewrite to fast_icaload
457 __ cmpl(rbx, Bytecodes::_caload);
458 __ movl(rcx, Bytecodes::_fast_icaload);
459 __ jccb(Assembler::equal, rewrite);
461 // rewrite so iload doesn't check again.
462 __ movl(rcx, Bytecodes::_fast_iload);
464 // rewrite
465 // rcx: fast bytecode
466 __ bind(rewrite);
467 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
468 __ bind(done);
469 }
471 // Get the local value into tos
472 locals_index(rbx);
473 __ movl(rax, iaddress(rbx));
474 }
477 void TemplateTable::fast_iload2() {
478 transition(vtos, itos);
479 locals_index(rbx);
480 __ movl(rax, iaddress(rbx));
481 __ push(itos);
482 locals_index(rbx, 3);
483 __ movl(rax, iaddress(rbx));
484 }
486 void TemplateTable::fast_iload() {
487 transition(vtos, itos);
488 locals_index(rbx);
489 __ movl(rax, iaddress(rbx));
490 }
493 void TemplateTable::lload() {
494 transition(vtos, ltos);
495 locals_index(rbx);
496 __ movptr(rax, laddress(rbx));
497 NOT_LP64(__ movl(rdx, haddress(rbx)));
498 }
501 void TemplateTable::fload() {
502 transition(vtos, ftos);
503 locals_index(rbx);
504 __ fld_s(faddress(rbx));
505 }
508 void TemplateTable::dload() {
509 transition(vtos, dtos);
510 locals_index(rbx);
511 __ fld_d(daddress(rbx));
512 }
515 void TemplateTable::aload() {
516 transition(vtos, atos);
517 locals_index(rbx);
518 __ movptr(rax, aaddress(rbx));
519 }
522 void TemplateTable::locals_index_wide(Register reg) {
523 __ movl(reg, at_bcp(2));
524 __ bswapl(reg);
525 __ shrl(reg, 16);
526 __ negptr(reg);
527 }
530 void TemplateTable::wide_iload() {
531 transition(vtos, itos);
532 locals_index_wide(rbx);
533 __ movl(rax, iaddress(rbx));
534 }
537 void TemplateTable::wide_lload() {
538 transition(vtos, ltos);
539 locals_index_wide(rbx);
540 __ movptr(rax, laddress(rbx));
541 NOT_LP64(__ movl(rdx, haddress(rbx)));
542 }
545 void TemplateTable::wide_fload() {
546 transition(vtos, ftos);
547 locals_index_wide(rbx);
548 __ fld_s(faddress(rbx));
549 }
552 void TemplateTable::wide_dload() {
553 transition(vtos, dtos);
554 locals_index_wide(rbx);
555 __ fld_d(daddress(rbx));
556 }
559 void TemplateTable::wide_aload() {
560 transition(vtos, atos);
561 locals_index_wide(rbx);
562 __ movptr(rax, aaddress(rbx));
563 }
565 void TemplateTable::index_check(Register array, Register index) {
566 // Pop ptr into array
567 __ pop_ptr(array);
568 index_check_without_pop(array, index);
569 }
571 void TemplateTable::index_check_without_pop(Register array, Register index) {
572 // destroys rbx,
573 // check array
574 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
575 LP64_ONLY(__ movslq(index, index));
576 // check index
577 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
578 if (index != rbx) {
579 // ??? convention: move aberrant index into rbx, for exception message
580 assert(rbx != array, "different registers");
581 __ mov(rbx, index);
582 }
583 __ jump_cc(Assembler::aboveEqual,
584 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
585 }
588 void TemplateTable::iaload() {
589 transition(itos, itos);
590 // rdx: array
591 index_check(rdx, rax); // kills rbx,
592 // rax,: index
593 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
594 }
597 void TemplateTable::laload() {
598 transition(itos, ltos);
599 // rax,: index
600 // rdx: array
601 index_check(rdx, rax);
602 __ mov(rbx, rax);
603 // rbx,: index
604 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
605 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
606 }
609 void TemplateTable::faload() {
610 transition(itos, ftos);
611 // rdx: array
612 index_check(rdx, rax); // kills rbx,
613 // rax,: index
614 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
615 }
618 void TemplateTable::daload() {
619 transition(itos, dtos);
620 // rdx: array
621 index_check(rdx, rax); // kills rbx,
622 // rax,: index
623 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
624 }
627 void TemplateTable::aaload() {
628 transition(itos, atos);
629 // rdx: array
630 index_check(rdx, rax); // kills rbx,
631 // rax,: index
632 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
633 }
636 void TemplateTable::baload() {
637 transition(itos, itos);
638 // rdx: array
639 index_check(rdx, rax); // kills rbx,
640 // rax,: index
641 // can do better code for P5 - fix this at some point
642 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
643 __ mov(rax, rbx);
644 }
647 void TemplateTable::caload() {
648 transition(itos, itos);
649 // rdx: array
650 index_check(rdx, rax); // kills rbx,
651 // rax,: index
652 // can do better code for P5 - may want to improve this at some point
653 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
654 __ mov(rax, rbx);
655 }
657 // iload followed by caload frequent pair
658 void TemplateTable::fast_icaload() {
659 transition(vtos, itos);
660 // load index out of locals
661 locals_index(rbx);
662 __ movl(rax, iaddress(rbx));
664 // rdx: array
665 index_check(rdx, rax);
666 // rax,: index
667 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
668 __ mov(rax, rbx);
669 }
671 void TemplateTable::saload() {
672 transition(itos, itos);
673 // rdx: array
674 index_check(rdx, rax); // kills rbx,
675 // rax,: index
676 // can do better code for P5 - may want to improve this at some point
677 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
678 __ mov(rax, rbx);
679 }
682 void TemplateTable::iload(int n) {
683 transition(vtos, itos);
684 __ movl(rax, iaddress(n));
685 }
688 void TemplateTable::lload(int n) {
689 transition(vtos, ltos);
690 __ movptr(rax, laddress(n));
691 NOT_LP64(__ movptr(rdx, haddress(n)));
692 }
695 void TemplateTable::fload(int n) {
696 transition(vtos, ftos);
697 __ fld_s(faddress(n));
698 }
701 void TemplateTable::dload(int n) {
702 transition(vtos, dtos);
703 __ fld_d(daddress(n));
704 }
707 void TemplateTable::aload(int n) {
708 transition(vtos, atos);
709 __ movptr(rax, aaddress(n));
710 }
713 void TemplateTable::aload_0() {
714 transition(vtos, atos);
715 // According to bytecode histograms, the pairs:
716 //
717 // _aload_0, _fast_igetfield
718 // _aload_0, _fast_agetfield
719 // _aload_0, _fast_fgetfield
720 //
721 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
722 // bytecode checks if the next bytecode is either _fast_igetfield,
723 // _fast_agetfield or _fast_fgetfield and then rewrites the
724 // current bytecode into a pair bytecode; otherwise it rewrites the current
725 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
726 //
727 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
728 // otherwise we may miss an opportunity for a pair.
729 //
730 // Also rewrite frequent pairs
731 // aload_0, aload_1
732 // aload_0, iload_1
733 // These bytecodes with a small amount of code are most profitable to rewrite
734 if (RewriteFrequentPairs) {
735 Label rewrite, done;
736 // get next byte
737 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
739 // do actual aload_0
740 aload(0);
742 // if _getfield then wait with rewrite
743 __ cmpl(rbx, Bytecodes::_getfield);
744 __ jcc(Assembler::equal, done);
746 // if _igetfield then reqrite to _fast_iaccess_0
747 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
748 __ cmpl(rbx, Bytecodes::_fast_igetfield);
749 __ movl(rcx, Bytecodes::_fast_iaccess_0);
750 __ jccb(Assembler::equal, rewrite);
752 // if _agetfield then reqrite to _fast_aaccess_0
753 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
754 __ cmpl(rbx, Bytecodes::_fast_agetfield);
755 __ movl(rcx, Bytecodes::_fast_aaccess_0);
756 __ jccb(Assembler::equal, rewrite);
758 // if _fgetfield then reqrite to _fast_faccess_0
759 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
760 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
761 __ movl(rcx, Bytecodes::_fast_faccess_0);
762 __ jccb(Assembler::equal, rewrite);
764 // else rewrite to _fast_aload0
765 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
766 __ movl(rcx, Bytecodes::_fast_aload_0);
768 // rewrite
769 // rcx: fast bytecode
770 __ bind(rewrite);
771 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
773 __ bind(done);
774 } else {
775 aload(0);
776 }
777 }
779 void TemplateTable::istore() {
780 transition(itos, vtos);
781 locals_index(rbx);
782 __ movl(iaddress(rbx), rax);
783 }
786 void TemplateTable::lstore() {
787 transition(ltos, vtos);
788 locals_index(rbx);
789 __ movptr(laddress(rbx), rax);
790 NOT_LP64(__ movptr(haddress(rbx), rdx));
791 }
794 void TemplateTable::fstore() {
795 transition(ftos, vtos);
796 locals_index(rbx);
797 __ fstp_s(faddress(rbx));
798 }
801 void TemplateTable::dstore() {
802 transition(dtos, vtos);
803 locals_index(rbx);
804 __ fstp_d(daddress(rbx));
805 }
808 void TemplateTable::astore() {
809 transition(vtos, vtos);
810 __ pop_ptr(rax);
811 locals_index(rbx);
812 __ movptr(aaddress(rbx), rax);
813 }
816 void TemplateTable::wide_istore() {
817 transition(vtos, vtos);
818 __ pop_i(rax);
819 locals_index_wide(rbx);
820 __ movl(iaddress(rbx), rax);
821 }
824 void TemplateTable::wide_lstore() {
825 transition(vtos, vtos);
826 __ pop_l(rax, rdx);
827 locals_index_wide(rbx);
828 __ movptr(laddress(rbx), rax);
829 NOT_LP64(__ movl(haddress(rbx), rdx));
830 }
833 void TemplateTable::wide_fstore() {
834 wide_istore();
835 }
838 void TemplateTable::wide_dstore() {
839 wide_lstore();
840 }
843 void TemplateTable::wide_astore() {
844 transition(vtos, vtos);
845 __ pop_ptr(rax);
846 locals_index_wide(rbx);
847 __ movptr(aaddress(rbx), rax);
848 }
851 void TemplateTable::iastore() {
852 transition(itos, vtos);
853 __ pop_i(rbx);
854 // rax,: value
855 // rdx: array
856 index_check(rdx, rbx); // prefer index in rbx,
857 // rbx,: index
858 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
859 }
862 void TemplateTable::lastore() {
863 transition(ltos, vtos);
864 __ pop_i(rbx);
865 // rax,: low(value)
866 // rcx: array
867 // rdx: high(value)
868 index_check(rcx, rbx); // prefer index in rbx,
869 // rbx,: index
870 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
871 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
872 }
875 void TemplateTable::fastore() {
876 transition(ftos, vtos);
877 __ pop_i(rbx);
878 // rdx: array
879 // st0: value
880 index_check(rdx, rbx); // prefer index in rbx,
881 // rbx,: index
882 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
883 }
886 void TemplateTable::dastore() {
887 transition(dtos, vtos);
888 __ pop_i(rbx);
889 // rdx: array
890 // st0: value
891 index_check(rdx, rbx); // prefer index in rbx,
892 // rbx,: index
893 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
894 }
897 void TemplateTable::aastore() {
898 Label is_null, ok_is_subtype, done;
899 transition(vtos, vtos);
900 // stack: ..., array, index, value
901 __ movptr(rax, at_tos()); // Value
902 __ movl(rcx, at_tos_p1()); // Index
903 __ movptr(rdx, at_tos_p2()); // Array
905 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
906 index_check_without_pop(rdx, rcx); // kills rbx,
907 // do array store check - check for NULL value first
908 __ testptr(rax, rax);
909 __ jcc(Assembler::zero, is_null);
911 // Move subklass into EBX
912 __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
913 // Move superklass into EAX
914 __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
915 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
916 // Compress array+index*wordSize+12 into a single register. Frees ECX.
917 __ lea(rdx, element_address);
919 // Generate subtype check. Blows ECX. Resets EDI to locals.
920 // Superklass in EAX. Subklass in EBX.
921 __ gen_subtype_check( rbx, ok_is_subtype );
923 // Come here on failure
924 // object is at TOS
925 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
927 // Come here on success
928 __ bind(ok_is_subtype);
930 // Get the value to store
931 __ movptr(rax, at_rsp());
932 // and store it with appropriate barrier
933 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
935 __ jmp(done);
937 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
938 __ bind(is_null);
939 __ profile_null_seen(rbx);
941 // Store NULL, (noreg means NULL to do_oop_store)
942 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
944 // Pop stack arguments
945 __ bind(done);
946 __ addptr(rsp, 3 * Interpreter::stackElementSize);
947 }
950 void TemplateTable::bastore() {
951 transition(itos, vtos);
952 __ pop_i(rbx);
953 // rax,: value
954 // rdx: array
955 index_check(rdx, rbx); // prefer index in rbx,
956 // rbx,: index
957 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
958 }
961 void TemplateTable::castore() {
962 transition(itos, vtos);
963 __ pop_i(rbx);
964 // rax,: value
965 // rdx: array
966 index_check(rdx, rbx); // prefer index in rbx,
967 // rbx,: index
968 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
969 }
972 void TemplateTable::sastore() {
973 castore();
974 }
977 void TemplateTable::istore(int n) {
978 transition(itos, vtos);
979 __ movl(iaddress(n), rax);
980 }
983 void TemplateTable::lstore(int n) {
984 transition(ltos, vtos);
985 __ movptr(laddress(n), rax);
986 NOT_LP64(__ movptr(haddress(n), rdx));
987 }
990 void TemplateTable::fstore(int n) {
991 transition(ftos, vtos);
992 __ fstp_s(faddress(n));
993 }
996 void TemplateTable::dstore(int n) {
997 transition(dtos, vtos);
998 __ fstp_d(daddress(n));
999 }
1002 void TemplateTable::astore(int n) {
1003 transition(vtos, vtos);
1004 __ pop_ptr(rax);
1005 __ movptr(aaddress(n), rax);
1006 }
1009 void TemplateTable::pop() {
1010 transition(vtos, vtos);
1011 __ addptr(rsp, Interpreter::stackElementSize);
1012 }
1015 void TemplateTable::pop2() {
1016 transition(vtos, vtos);
1017 __ addptr(rsp, 2*Interpreter::stackElementSize);
1018 }
1021 void TemplateTable::dup() {
1022 transition(vtos, vtos);
1023 // stack: ..., a
1024 __ load_ptr(0, rax);
1025 __ push_ptr(rax);
1026 // stack: ..., a, a
1027 }
1030 void TemplateTable::dup_x1() {
1031 transition(vtos, vtos);
1032 // stack: ..., a, b
1033 __ load_ptr( 0, rax); // load b
1034 __ load_ptr( 1, rcx); // load a
1035 __ store_ptr(1, rax); // store b
1036 __ store_ptr(0, rcx); // store a
1037 __ push_ptr(rax); // push b
1038 // stack: ..., b, a, b
1039 }
1042 void TemplateTable::dup_x2() {
1043 transition(vtos, vtos);
1044 // stack: ..., a, b, c
1045 __ load_ptr( 0, rax); // load c
1046 __ load_ptr( 2, rcx); // load a
1047 __ store_ptr(2, rax); // store c in a
1048 __ push_ptr(rax); // push c
1049 // stack: ..., c, b, c, c
1050 __ load_ptr( 2, rax); // load b
1051 __ store_ptr(2, rcx); // store a in b
1052 // stack: ..., c, a, c, c
1053 __ store_ptr(1, rax); // store b in c
1054 // stack: ..., c, a, b, c
1055 }
1058 void TemplateTable::dup2() {
1059 transition(vtos, vtos);
1060 // stack: ..., a, b
1061 __ load_ptr(1, rax); // load a
1062 __ push_ptr(rax); // push a
1063 __ load_ptr(1, rax); // load b
1064 __ push_ptr(rax); // push b
1065 // stack: ..., a, b, a, b
1066 }
1069 void TemplateTable::dup2_x1() {
1070 transition(vtos, vtos);
1071 // stack: ..., a, b, c
1072 __ load_ptr( 0, rcx); // load c
1073 __ load_ptr( 1, rax); // load b
1074 __ push_ptr(rax); // push b
1075 __ push_ptr(rcx); // push c
1076 // stack: ..., a, b, c, b, c
1077 __ store_ptr(3, rcx); // store c in b
1078 // stack: ..., a, c, c, b, c
1079 __ load_ptr( 4, rcx); // load a
1080 __ store_ptr(2, rcx); // store a in 2nd c
1081 // stack: ..., a, c, a, b, c
1082 __ store_ptr(4, rax); // store b in a
1083 // stack: ..., b, c, a, b, c
1084 // stack: ..., b, c, a, b, c
1085 }
1088 void TemplateTable::dup2_x2() {
1089 transition(vtos, vtos);
1090 // stack: ..., a, b, c, d
1091 __ load_ptr( 0, rcx); // load d
1092 __ load_ptr( 1, rax); // load c
1093 __ push_ptr(rax); // push c
1094 __ push_ptr(rcx); // push d
1095 // stack: ..., a, b, c, d, c, d
1096 __ load_ptr( 4, rax); // load b
1097 __ store_ptr(2, rax); // store b in d
1098 __ store_ptr(4, rcx); // store d in b
1099 // stack: ..., a, d, c, b, c, d
1100 __ load_ptr( 5, rcx); // load a
1101 __ load_ptr( 3, rax); // load c
1102 __ store_ptr(3, rcx); // store a in c
1103 __ store_ptr(5, rax); // store c in a
1104 // stack: ..., c, d, a, b, c, d
1105 // stack: ..., c, d, a, b, c, d
1106 }
1109 void TemplateTable::swap() {
1110 transition(vtos, vtos);
1111 // stack: ..., a, b
1112 __ load_ptr( 1, rcx); // load a
1113 __ load_ptr( 0, rax); // load b
1114 __ store_ptr(0, rcx); // store a in b
1115 __ store_ptr(1, rax); // store b in a
1116 // stack: ..., b, a
1117 }
1120 void TemplateTable::iop2(Operation op) {
1121 transition(itos, itos);
1122 switch (op) {
1123 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1124 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1125 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1126 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1127 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1128 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1129 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1130 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1131 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1132 default : ShouldNotReachHere();
1133 }
1134 }
1137 void TemplateTable::lop2(Operation op) {
1138 transition(ltos, ltos);
1139 __ pop_l(rbx, rcx);
1140 switch (op) {
1141 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1142 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1143 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1144 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1145 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1146 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1147 default : ShouldNotReachHere();
1148 }
1149 }
1152 void TemplateTable::idiv() {
1153 transition(itos, itos);
1154 __ mov(rcx, rax);
1155 __ pop_i(rax);
1156 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1157 // they are not equal, one could do a normal division (no correction
1158 // needed), which may speed up this implementation for the common case.
1159 // (see also JVM spec., p.243 & p.271)
1160 __ corrected_idivl(rcx);
1161 }
1164 void TemplateTable::irem() {
1165 transition(itos, itos);
1166 __ mov(rcx, rax);
1167 __ pop_i(rax);
1168 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1169 // they are not equal, one could do a normal division (no correction
1170 // needed), which may speed up this implementation for the common case.
1171 // (see also JVM spec., p.243 & p.271)
1172 __ corrected_idivl(rcx);
1173 __ mov(rax, rdx);
1174 }
1177 void TemplateTable::lmul() {
1178 transition(ltos, ltos);
1179 __ pop_l(rbx, rcx);
1180 __ push(rcx); __ push(rbx);
1181 __ push(rdx); __ push(rax);
1182 __ lmul(2 * wordSize, 0);
1183 __ addptr(rsp, 4 * wordSize); // take off temporaries
1184 }
1187 void TemplateTable::ldiv() {
1188 transition(ltos, ltos);
1189 __ pop_l(rbx, rcx);
1190 __ push(rcx); __ push(rbx);
1191 __ push(rdx); __ push(rax);
1192 // check if y = 0
1193 __ orl(rax, rdx);
1194 __ jump_cc(Assembler::zero,
1195 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1196 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1197 __ addptr(rsp, 4 * wordSize); // take off temporaries
1198 }
1201 void TemplateTable::lrem() {
1202 transition(ltos, ltos);
1203 __ pop_l(rbx, rcx);
1204 __ push(rcx); __ push(rbx);
1205 __ push(rdx); __ push(rax);
1206 // check if y = 0
1207 __ orl(rax, rdx);
1208 __ jump_cc(Assembler::zero,
1209 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1210 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1211 __ addptr(rsp, 4 * wordSize);
1212 }
1215 void TemplateTable::lshl() {
1216 transition(itos, ltos);
1217 __ movl(rcx, rax); // get shift count
1218 __ pop_l(rax, rdx); // get shift value
1219 __ lshl(rdx, rax);
1220 }
1223 void TemplateTable::lshr() {
1224 transition(itos, ltos);
1225 __ mov(rcx, rax); // get shift count
1226 __ pop_l(rax, rdx); // get shift value
1227 __ lshr(rdx, rax, true);
1228 }
1231 void TemplateTable::lushr() {
1232 transition(itos, ltos);
1233 __ mov(rcx, rax); // get shift count
1234 __ pop_l(rax, rdx); // get shift value
1235 __ lshr(rdx, rax);
1236 }
1239 void TemplateTable::fop2(Operation op) {
1240 transition(ftos, ftos);
1241 switch (op) {
1242 case add: __ fadd_s (at_rsp()); break;
1243 case sub: __ fsubr_s(at_rsp()); break;
1244 case mul: __ fmul_s (at_rsp()); break;
1245 case div: __ fdivr_s(at_rsp()); break;
1246 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1247 default : ShouldNotReachHere();
1248 }
1249 __ f2ieee();
1250 __ pop(rax); // pop float thing off
1251 }
1254 void TemplateTable::dop2(Operation op) {
1255 transition(dtos, dtos);
1257 switch (op) {
1258 case add: __ fadd_d (at_rsp()); break;
1259 case sub: __ fsubr_d(at_rsp()); break;
1260 case mul: {
1261 Label L_strict;
1262 Label L_join;
1263 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1264 __ get_method(rcx);
1265 __ movl(rcx, access_flags);
1266 __ testl(rcx, JVM_ACC_STRICT);
1267 __ jccb(Assembler::notZero, L_strict);
1268 __ fmul_d (at_rsp());
1269 __ jmpb(L_join);
1270 __ bind(L_strict);
1271 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1272 __ fmulp();
1273 __ fmul_d (at_rsp());
1274 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1275 __ fmulp();
1276 __ bind(L_join);
1277 break;
1278 }
1279 case div: {
1280 Label L_strict;
1281 Label L_join;
1282 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1283 __ get_method(rcx);
1284 __ movl(rcx, access_flags);
1285 __ testl(rcx, JVM_ACC_STRICT);
1286 __ jccb(Assembler::notZero, L_strict);
1287 __ fdivr_d(at_rsp());
1288 __ jmp(L_join);
1289 __ bind(L_strict);
1290 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1291 __ fmul_d (at_rsp());
1292 __ fdivrp();
1293 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1294 __ fmulp();
1295 __ bind(L_join);
1296 break;
1297 }
1298 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1299 default : ShouldNotReachHere();
1300 }
1301 __ d2ieee();
1302 // Pop double precision number from rsp.
1303 __ pop(rax);
1304 __ pop(rdx);
1305 }
1308 void TemplateTable::ineg() {
1309 transition(itos, itos);
1310 __ negl(rax);
1311 }
1314 void TemplateTable::lneg() {
1315 transition(ltos, ltos);
1316 __ lneg(rdx, rax);
1317 }
1320 void TemplateTable::fneg() {
1321 transition(ftos, ftos);
1322 __ fchs();
1323 }
1326 void TemplateTable::dneg() {
1327 transition(dtos, dtos);
1328 __ fchs();
1329 }
1332 void TemplateTable::iinc() {
1333 transition(vtos, vtos);
1334 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1335 locals_index(rbx);
1336 __ addl(iaddress(rbx), rdx);
1337 }
1340 void TemplateTable::wide_iinc() {
1341 transition(vtos, vtos);
1342 __ movl(rdx, at_bcp(4)); // get constant
1343 locals_index_wide(rbx);
1344 __ bswapl(rdx); // swap bytes & sign-extend constant
1345 __ sarl(rdx, 16);
1346 __ addl(iaddress(rbx), rdx);
1347 // Note: should probably use only one movl to get both
1348 // the index and the constant -> fix this
1349 }
1352 void TemplateTable::convert() {
1353 // Checking
1354 #ifdef ASSERT
1355 { TosState tos_in = ilgl;
1356 TosState tos_out = ilgl;
1357 switch (bytecode()) {
1358 case Bytecodes::_i2l: // fall through
1359 case Bytecodes::_i2f: // fall through
1360 case Bytecodes::_i2d: // fall through
1361 case Bytecodes::_i2b: // fall through
1362 case Bytecodes::_i2c: // fall through
1363 case Bytecodes::_i2s: tos_in = itos; break;
1364 case Bytecodes::_l2i: // fall through
1365 case Bytecodes::_l2f: // fall through
1366 case Bytecodes::_l2d: tos_in = ltos; break;
1367 case Bytecodes::_f2i: // fall through
1368 case Bytecodes::_f2l: // fall through
1369 case Bytecodes::_f2d: tos_in = ftos; break;
1370 case Bytecodes::_d2i: // fall through
1371 case Bytecodes::_d2l: // fall through
1372 case Bytecodes::_d2f: tos_in = dtos; break;
1373 default : ShouldNotReachHere();
1374 }
1375 switch (bytecode()) {
1376 case Bytecodes::_l2i: // fall through
1377 case Bytecodes::_f2i: // fall through
1378 case Bytecodes::_d2i: // fall through
1379 case Bytecodes::_i2b: // fall through
1380 case Bytecodes::_i2c: // fall through
1381 case Bytecodes::_i2s: tos_out = itos; break;
1382 case Bytecodes::_i2l: // fall through
1383 case Bytecodes::_f2l: // fall through
1384 case Bytecodes::_d2l: tos_out = ltos; break;
1385 case Bytecodes::_i2f: // fall through
1386 case Bytecodes::_l2f: // fall through
1387 case Bytecodes::_d2f: tos_out = ftos; break;
1388 case Bytecodes::_i2d: // fall through
1389 case Bytecodes::_l2d: // fall through
1390 case Bytecodes::_f2d: tos_out = dtos; break;
1391 default : ShouldNotReachHere();
1392 }
1393 transition(tos_in, tos_out);
1394 }
1395 #endif // ASSERT
1397 // Conversion
1398 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1399 switch (bytecode()) {
1400 case Bytecodes::_i2l:
1401 __ extend_sign(rdx, rax);
1402 break;
1403 case Bytecodes::_i2f:
1404 __ push(rax); // store int on tos
1405 __ fild_s(at_rsp()); // load int to ST0
1406 __ f2ieee(); // truncate to float size
1407 __ pop(rcx); // adjust rsp
1408 break;
1409 case Bytecodes::_i2d:
1410 __ push(rax); // add one slot for d2ieee()
1411 __ push(rax); // store int on tos
1412 __ fild_s(at_rsp()); // load int to ST0
1413 __ d2ieee(); // truncate to double size
1414 __ pop(rcx); // adjust rsp
1415 __ pop(rcx);
1416 break;
1417 case Bytecodes::_i2b:
1418 __ shll(rax, 24); // truncate upper 24 bits
1419 __ sarl(rax, 24); // and sign-extend byte
1420 LP64_ONLY(__ movsbl(rax, rax));
1421 break;
1422 case Bytecodes::_i2c:
1423 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1424 LP64_ONLY(__ movzwl(rax, rax));
1425 break;
1426 case Bytecodes::_i2s:
1427 __ shll(rax, 16); // truncate upper 16 bits
1428 __ sarl(rax, 16); // and sign-extend short
1429 LP64_ONLY(__ movswl(rax, rax));
1430 break;
1431 case Bytecodes::_l2i:
1432 /* nothing to do */
1433 break;
1434 case Bytecodes::_l2f:
1435 __ push(rdx); // store long on tos
1436 __ push(rax);
1437 __ fild_d(at_rsp()); // load long to ST0
1438 __ f2ieee(); // truncate to float size
1439 __ pop(rcx); // adjust rsp
1440 __ pop(rcx);
1441 break;
1442 case Bytecodes::_l2d:
1443 __ push(rdx); // store long on tos
1444 __ push(rax);
1445 __ fild_d(at_rsp()); // load long to ST0
1446 __ d2ieee(); // truncate to double size
1447 __ pop(rcx); // adjust rsp
1448 __ pop(rcx);
1449 break;
1450 case Bytecodes::_f2i:
1451 __ push(rcx); // reserve space for argument
1452 __ fstp_s(at_rsp()); // pass float argument on stack
1453 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1454 break;
1455 case Bytecodes::_f2l:
1456 __ push(rcx); // reserve space for argument
1457 __ fstp_s(at_rsp()); // pass float argument on stack
1458 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1459 break;
1460 case Bytecodes::_f2d:
1461 /* nothing to do */
1462 break;
1463 case Bytecodes::_d2i:
1464 __ push(rcx); // reserve space for argument
1465 __ push(rcx);
1466 __ fstp_d(at_rsp()); // pass double argument on stack
1467 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1468 break;
1469 case Bytecodes::_d2l:
1470 __ push(rcx); // reserve space for argument
1471 __ push(rcx);
1472 __ fstp_d(at_rsp()); // pass double argument on stack
1473 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1474 break;
1475 case Bytecodes::_d2f:
1476 __ push(rcx); // reserve space for f2ieee()
1477 __ f2ieee(); // truncate to float size
1478 __ pop(rcx); // adjust rsp
1479 break;
1480 default :
1481 ShouldNotReachHere();
1482 }
1483 }
1486 void TemplateTable::lcmp() {
1487 transition(ltos, itos);
1488 // y = rdx:rax
1489 __ pop_l(rbx, rcx); // get x = rcx:rbx
1490 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1491 __ mov(rax, rcx);
1492 }
1495 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1496 if (is_float) {
1497 __ fld_s(at_rsp());
1498 } else {
1499 __ fld_d(at_rsp());
1500 __ pop(rdx);
1501 }
1502 __ pop(rcx);
1503 __ fcmp2int(rax, unordered_result < 0);
1504 }
1507 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1508 __ get_method(rcx); // ECX holds method
1509 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1511 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1512 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1513 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1515 // Load up EDX with the branch displacement
1516 __ movl(rdx, at_bcp(1));
1517 __ bswapl(rdx);
1518 if (!is_wide) __ sarl(rdx, 16);
1519 LP64_ONLY(__ movslq(rdx, rdx));
1522 // Handle all the JSR stuff here, then exit.
1523 // It's much shorter and cleaner than intermingling with the
1524 // non-JSR normal-branch stuff occurring below.
1525 if (is_jsr) {
1526 // Pre-load the next target bytecode into EBX
1527 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1529 // compute return address as bci in rax,
1530 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1531 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1532 // Adjust the bcp in RSI by the displacement in EDX
1533 __ addptr(rsi, rdx);
1534 // Push return address
1535 __ push_i(rax);
1536 // jsr returns vtos
1537 __ dispatch_only_noverify(vtos);
1538 return;
1539 }
1541 // Normal (non-jsr) branch handling
1543 // Adjust the bcp in RSI by the displacement in EDX
1544 __ addptr(rsi, rdx);
1546 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1547 Label backedge_counter_overflow;
1548 Label profile_method;
1549 Label dispatch;
1550 if (UseLoopCounter) {
1551 // increment backedge counter for backward branches
1552 // rax,: MDO
1553 // rbx,: MDO bumped taken-count
1554 // rcx: method
1555 // rdx: target offset
1556 // rsi: target bcp
1557 // rdi: locals pointer
1558 __ testl(rdx, rdx); // check if forward or backward branch
1559 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1561 // increment counter
1562 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1563 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1564 __ movl(Address(rcx, be_offset), rax); // store counter
1566 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1567 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1568 __ addl(rax, Address(rcx, be_offset)); // add both counters
1570 if (ProfileInterpreter) {
1571 // Test to see if we should create a method data oop
1572 __ cmp32(rax,
1573 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1574 __ jcc(Assembler::less, dispatch);
1576 // if no method data exists, go to profile method
1577 __ test_method_data_pointer(rax, profile_method);
1579 if (UseOnStackReplacement) {
1580 // check for overflow against rbx, which is the MDO taken count
1581 __ cmp32(rbx,
1582 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1583 __ jcc(Assembler::below, dispatch);
1585 // When ProfileInterpreter is on, the backedge_count comes from the
1586 // methodDataOop, which value does not get reset on the call to
1587 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1588 // routine while the method is being compiled, add a second test to make
1589 // sure the overflow function is called only once every overflow_frequency.
1590 const int overflow_frequency = 1024;
1591 __ andptr(rbx, overflow_frequency-1);
1592 __ jcc(Assembler::zero, backedge_counter_overflow);
1594 }
1595 } else {
1596 if (UseOnStackReplacement) {
1597 // check for overflow against rax, which is the sum of the counters
1598 __ cmp32(rax,
1599 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1600 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1602 }
1603 }
1604 __ bind(dispatch);
1605 }
1607 // Pre-load the next target bytecode into EBX
1608 __ load_unsigned_byte(rbx, Address(rsi, 0));
1610 // continue with the bytecode @ target
1611 // rax,: return bci for jsr's, unused otherwise
1612 // rbx,: target bytecode
1613 // rsi: target bcp
1614 __ dispatch_only(vtos);
1616 if (UseLoopCounter) {
1617 if (ProfileInterpreter) {
1618 // Out-of-line code to allocate method data oop.
1619 __ bind(profile_method);
1620 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
1621 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1622 __ movptr(rcx, Address(rbp, method_offset));
1623 __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1624 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1625 __ test_method_data_pointer(rcx, dispatch);
1626 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
1627 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
1628 __ addptr(rcx, rax);
1629 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1630 __ jmp(dispatch);
1631 }
1633 if (UseOnStackReplacement) {
1635 // invocation counter overflow
1636 __ bind(backedge_counter_overflow);
1637 __ negptr(rdx);
1638 __ addptr(rdx, rsi); // branch bcp
1639 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1640 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1642 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1643 // rbx,: target bytecode
1644 // rdx: scratch
1645 // rdi: locals pointer
1646 // rsi: bcp
1647 __ testptr(rax, rax); // test result
1648 __ jcc(Assembler::zero, dispatch); // no osr if null
1649 // nmethod may have been invalidated (VM may block upon call_VM return)
1650 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1651 __ cmpl(rcx, InvalidOSREntryBci);
1652 __ jcc(Assembler::equal, dispatch);
1654 // We have the address of an on stack replacement routine in rax,
1655 // We need to prepare to execute the OSR method. First we must
1656 // migrate the locals and monitors off of the stack.
1658 __ mov(rbx, rax); // save the nmethod
1660 const Register thread = rcx;
1661 __ get_thread(thread);
1662 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1663 // rax, is OSR buffer, move it to expected parameter location
1664 __ mov(rcx, rax);
1666 // pop the interpreter frame
1667 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1668 __ leave(); // remove frame anchor
1669 __ pop(rdi); // get return address
1670 __ mov(rsp, rdx); // set sp to sender sp
1673 Label skip;
1674 Label chkint;
1676 // The interpreter frame we have removed may be returning to
1677 // either the callstub or the interpreter. Since we will
1678 // now be returning from a compiled (OSR) nmethod we must
1679 // adjust the return to the return were it can handler compiled
1680 // results and clean the fpu stack. This is very similar to
1681 // what a i2c adapter must do.
1683 // Are we returning to the call stub?
1685 __ cmp32(rdi, ExternalAddress(StubRoutines::_call_stub_return_address));
1686 __ jcc(Assembler::notEqual, chkint);
1688 // yes adjust to the specialized call stub return.
1689 assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
1690 __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
1691 __ jmp(skip);
1693 __ bind(chkint);
1695 // Are we returning to the interpreter? Look for sentinel
1697 __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
1698 __ jcc(Assembler::notEqual, skip);
1700 // Adjust to compiled return back to interpreter
1702 __ movptr(rdi, Address(rdi, -wordSize));
1703 __ bind(skip);
1705 // Align stack pointer for compiled code (note that caller is
1706 // responsible for undoing this fixup by remembering the old SP
1707 // in an rbp,-relative location)
1708 __ andptr(rsp, -(StackAlignmentInBytes));
1710 // push the (possibly adjusted) return address
1711 __ push(rdi);
1713 // and begin the OSR nmethod
1714 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1715 }
1716 }
1717 }
1720 void TemplateTable::if_0cmp(Condition cc) {
1721 transition(itos, vtos);
1722 // assume branch is more often taken than not (loops use backward branches)
1723 Label not_taken;
1724 __ testl(rax, rax);
1725 __ jcc(j_not(cc), not_taken);
1726 branch(false, false);
1727 __ bind(not_taken);
1728 __ profile_not_taken_branch(rax);
1729 }
1732 void TemplateTable::if_icmp(Condition cc) {
1733 transition(itos, vtos);
1734 // assume branch is more often taken than not (loops use backward branches)
1735 Label not_taken;
1736 __ pop_i(rdx);
1737 __ cmpl(rdx, rax);
1738 __ jcc(j_not(cc), not_taken);
1739 branch(false, false);
1740 __ bind(not_taken);
1741 __ profile_not_taken_branch(rax);
1742 }
1745 void TemplateTable::if_nullcmp(Condition cc) {
1746 transition(atos, vtos);
1747 // assume branch is more often taken than not (loops use backward branches)
1748 Label not_taken;
1749 __ testptr(rax, rax);
1750 __ jcc(j_not(cc), not_taken);
1751 branch(false, false);
1752 __ bind(not_taken);
1753 __ profile_not_taken_branch(rax);
1754 }
1757 void TemplateTable::if_acmp(Condition cc) {
1758 transition(atos, vtos);
1759 // assume branch is more often taken than not (loops use backward branches)
1760 Label not_taken;
1761 __ pop_ptr(rdx);
1762 __ cmpptr(rdx, rax);
1763 __ jcc(j_not(cc), not_taken);
1764 branch(false, false);
1765 __ bind(not_taken);
1766 __ profile_not_taken_branch(rax);
1767 }
1770 void TemplateTable::ret() {
1771 transition(vtos, vtos);
1772 locals_index(rbx);
1773 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1774 __ profile_ret(rbx, rcx);
1775 __ get_method(rax);
1776 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1777 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1778 constMethodOopDesc::codes_offset()));
1779 __ dispatch_next(vtos);
1780 }
1783 void TemplateTable::wide_ret() {
1784 transition(vtos, vtos);
1785 locals_index_wide(rbx);
1786 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1787 __ profile_ret(rbx, rcx);
1788 __ get_method(rax);
1789 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1790 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1791 __ dispatch_next(vtos);
1792 }
1795 void TemplateTable::tableswitch() {
1796 Label default_case, continue_execution;
1797 transition(itos, vtos);
1798 // align rsi
1799 __ lea(rbx, at_bcp(wordSize));
1800 __ andptr(rbx, -wordSize);
1801 // load lo & hi
1802 __ movl(rcx, Address(rbx, 1 * wordSize));
1803 __ movl(rdx, Address(rbx, 2 * wordSize));
1804 __ bswapl(rcx);
1805 __ bswapl(rdx);
1806 // check against lo & hi
1807 __ cmpl(rax, rcx);
1808 __ jccb(Assembler::less, default_case);
1809 __ cmpl(rax, rdx);
1810 __ jccb(Assembler::greater, default_case);
1811 // lookup dispatch offset
1812 __ subl(rax, rcx);
1813 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1814 __ profile_switch_case(rax, rbx, rcx);
1815 // continue execution
1816 __ bind(continue_execution);
1817 __ bswapl(rdx);
1818 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1819 __ addptr(rsi, rdx);
1820 __ dispatch_only(vtos);
1821 // handle default
1822 __ bind(default_case);
1823 __ profile_switch_default(rax);
1824 __ movl(rdx, Address(rbx, 0));
1825 __ jmp(continue_execution);
1826 }
1829 void TemplateTable::lookupswitch() {
1830 transition(itos, itos);
1831 __ stop("lookupswitch bytecode should have been rewritten");
1832 }
1835 void TemplateTable::fast_linearswitch() {
1836 transition(itos, vtos);
1837 Label loop_entry, loop, found, continue_execution;
1838 // bswapl rax, so we can avoid bswapping the table entries
1839 __ bswapl(rax);
1840 // align rsi
1841 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1842 __ andptr(rbx, -wordSize);
1843 // set counter
1844 __ movl(rcx, Address(rbx, wordSize));
1845 __ bswapl(rcx);
1846 __ jmpb(loop_entry);
1847 // table search
1848 __ bind(loop);
1849 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1850 __ jccb(Assembler::equal, found);
1851 __ bind(loop_entry);
1852 __ decrementl(rcx);
1853 __ jcc(Assembler::greaterEqual, loop);
1854 // default case
1855 __ profile_switch_default(rax);
1856 __ movl(rdx, Address(rbx, 0));
1857 __ jmpb(continue_execution);
1858 // entry found -> get offset
1859 __ bind(found);
1860 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1861 __ profile_switch_case(rcx, rax, rbx);
1862 // continue execution
1863 __ bind(continue_execution);
1864 __ bswapl(rdx);
1865 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1866 __ addptr(rsi, rdx);
1867 __ dispatch_only(vtos);
1868 }
1871 void TemplateTable::fast_binaryswitch() {
1872 transition(itos, vtos);
1873 // Implementation using the following core algorithm:
1874 //
1875 // int binary_search(int key, LookupswitchPair* array, int n) {
1876 // // Binary search according to "Methodik des Programmierens" by
1877 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1878 // int i = 0;
1879 // int j = n;
1880 // while (i+1 < j) {
1881 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1882 // // with Q: for all i: 0 <= i < n: key < a[i]
1883 // // where a stands for the array and assuming that the (inexisting)
1884 // // element a[n] is infinitely big.
1885 // int h = (i + j) >> 1;
1886 // // i < h < j
1887 // if (key < array[h].fast_match()) {
1888 // j = h;
1889 // } else {
1890 // i = h;
1891 // }
1892 // }
1893 // // R: a[i] <= key < a[i+1] or Q
1894 // // (i.e., if key is within array, i is the correct index)
1895 // return i;
1896 // }
1898 // register allocation
1899 const Register key = rax; // already set (tosca)
1900 const Register array = rbx;
1901 const Register i = rcx;
1902 const Register j = rdx;
1903 const Register h = rdi; // needs to be restored
1904 const Register temp = rsi;
1905 // setup array
1906 __ save_bcp();
1908 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1909 __ andptr(array, -wordSize);
1910 // initialize i & j
1911 __ xorl(i, i); // i = 0;
1912 __ movl(j, Address(array, -wordSize)); // j = length(array);
1913 // Convert j into native byteordering
1914 __ bswapl(j);
1915 // and start
1916 Label entry;
1917 __ jmp(entry);
1919 // binary search loop
1920 { Label loop;
1921 __ bind(loop);
1922 // int h = (i + j) >> 1;
1923 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1924 __ sarl(h, 1); // h = (i + j) >> 1;
1925 // if (key < array[h].fast_match()) {
1926 // j = h;
1927 // } else {
1928 // i = h;
1929 // }
1930 // Convert array[h].match to native byte-ordering before compare
1931 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1932 __ bswapl(temp);
1933 __ cmpl(key, temp);
1934 if (VM_Version::supports_cmov()) {
1935 __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
1936 __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
1937 } else {
1938 Label set_i, end_of_if;
1939 __ jccb(Assembler::greaterEqual, set_i); // {
1940 __ mov(j, h); // j = h;
1941 __ jmp(end_of_if); // }
1942 __ bind(set_i); // else {
1943 __ mov(i, h); // i = h;
1944 __ bind(end_of_if); // }
1945 }
1946 // while (i+1 < j)
1947 __ bind(entry);
1948 __ leal(h, Address(i, 1)); // i+1
1949 __ cmpl(h, j); // i+1 < j
1950 __ jcc(Assembler::less, loop);
1951 }
1953 // end of binary search, result index is i (must check again!)
1954 Label default_case;
1955 // Convert array[i].match to native byte-ordering before compare
1956 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1957 __ bswapl(temp);
1958 __ cmpl(key, temp);
1959 __ jcc(Assembler::notEqual, default_case);
1961 // entry found -> j = offset
1962 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1963 __ profile_switch_case(i, key, array);
1964 __ bswapl(j);
1965 LP64_ONLY(__ movslq(j, j));
1966 __ restore_bcp();
1967 __ restore_locals(); // restore rdi
1968 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1970 __ addptr(rsi, j);
1971 __ dispatch_only(vtos);
1973 // default case -> j = default offset
1974 __ bind(default_case);
1975 __ profile_switch_default(i);
1976 __ movl(j, Address(array, -2*wordSize));
1977 __ bswapl(j);
1978 LP64_ONLY(__ movslq(j, j));
1979 __ restore_bcp();
1980 __ restore_locals(); // restore rdi
1981 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1982 __ addptr(rsi, j);
1983 __ dispatch_only(vtos);
1984 }
1987 void TemplateTable::_return(TosState state) {
1988 transition(state, state);
1989 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
1991 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1992 assert(state == vtos, "only valid state");
1993 __ movptr(rax, aaddress(0));
1994 __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
1995 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1996 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
1997 Label skip_register_finalizer;
1998 __ jcc(Assembler::zero, skip_register_finalizer);
2000 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2002 __ bind(skip_register_finalizer);
2003 }
2005 __ remove_activation(state, rsi);
2006 __ jmp(rsi);
2007 }
2010 // ----------------------------------------------------------------------------
2011 // Volatile variables demand their effects be made known to all CPU's in
2012 // order. Store buffers on most chips allow reads & writes to reorder; the
2013 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2014 // memory barrier (i.e., it's not sufficient that the interpreter does not
2015 // reorder volatile references, the hardware also must not reorder them).
2016 //
2017 // According to the new Java Memory Model (JMM):
2018 // (1) All volatiles are serialized wrt to each other.
2019 // ALSO reads & writes act as aquire & release, so:
2020 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2021 // the read float up to before the read. It's OK for non-volatile memory refs
2022 // that happen before the volatile read to float down below it.
2023 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2024 // that happen BEFORE the write float down to after the write. It's OK for
2025 // non-volatile memory refs that happen after the volatile write to float up
2026 // before it.
2027 //
2028 // We only put in barriers around volatile refs (they are expensive), not
2029 // _between_ memory refs (that would require us to track the flavor of the
2030 // previous memory refs). Requirements (2) and (3) require some barriers
2031 // before volatile stores and after volatile loads. These nearly cover
2032 // requirement (1) but miss the volatile-store-volatile-load case. This final
2033 // case is placed after volatile-stores although it could just as well go
2034 // before volatile-loads.
2035 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2036 // Helper function to insert a is-volatile test and memory barrier
2037 if( !os::is_MP() ) return; // Not needed on single CPU
2038 __ membar(order_constraint);
2039 }
2041 void TemplateTable::resolve_cache_and_index(int byte_no,
2042 Register result,
2043 Register Rcache,
2044 Register index,
2045 size_t index_size) {
2046 Register temp = rbx;
2048 assert_different_registers(result, Rcache, index, temp);
2050 Label resolved;
2051 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2052 if (byte_no == f1_oop) {
2053 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2054 // This kind of CP cache entry does not need to match the flags byte, because
2055 // there is a 1-1 relation between bytecode type and CP entry type.
2056 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2057 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2058 __ testptr(result, result);
2059 __ jcc(Assembler::notEqual, resolved);
2060 } else {
2061 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2062 assert(result == noreg, ""); //else change code for setting result
2063 const int shift_count = (1 + byte_no)*BitsPerByte;
2064 __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2065 __ shrl(temp, shift_count);
2066 // have we resolved this bytecode?
2067 __ andl(temp, 0xFF);
2068 __ cmpl(temp, (int)bytecode());
2069 __ jcc(Assembler::equal, resolved);
2070 }
2072 // resolve first time through
2073 address entry;
2074 switch (bytecode()) {
2075 case Bytecodes::_getstatic : // fall through
2076 case Bytecodes::_putstatic : // fall through
2077 case Bytecodes::_getfield : // fall through
2078 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2079 case Bytecodes::_invokevirtual : // fall through
2080 case Bytecodes::_invokespecial : // fall through
2081 case Bytecodes::_invokestatic : // fall through
2082 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2083 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2084 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2085 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2086 default : ShouldNotReachHere(); break;
2087 }
2088 __ movl(temp, (int)bytecode());
2089 __ call_VM(noreg, entry, temp);
2090 // Update registers with resolved info
2091 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2092 if (result != noreg)
2093 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2094 __ bind(resolved);
2095 }
2098 // The cache and index registers must be set before call
2099 void TemplateTable::load_field_cp_cache_entry(Register obj,
2100 Register cache,
2101 Register index,
2102 Register off,
2103 Register flags,
2104 bool is_static = false) {
2105 assert_different_registers(cache, index, flags, off);
2107 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2108 // Field offset
2109 __ movptr(off, Address(cache, index, Address::times_ptr,
2110 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2111 // Flags
2112 __ movl(flags, Address(cache, index, Address::times_ptr,
2113 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2115 // klass overwrite register
2116 if (is_static) {
2117 __ movptr(obj, Address(cache, index, Address::times_ptr,
2118 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2119 }
2120 }
2122 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2123 Register method,
2124 Register itable_index,
2125 Register flags,
2126 bool is_invokevirtual,
2127 bool is_invokevfinal /*unused*/,
2128 bool is_invokedynamic) {
2129 // setup registers
2130 const Register cache = rcx;
2131 const Register index = rdx;
2132 assert_different_registers(method, flags);
2133 assert_different_registers(method, cache, index);
2134 assert_different_registers(itable_index, flags);
2135 assert_different_registers(itable_index, cache, index);
2136 // determine constant pool cache field offsets
2137 const int method_offset = in_bytes(
2138 constantPoolCacheOopDesc::base_offset() +
2139 (is_invokevirtual
2140 ? ConstantPoolCacheEntry::f2_offset()
2141 : ConstantPoolCacheEntry::f1_offset()
2142 )
2143 );
2144 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2145 ConstantPoolCacheEntry::flags_offset());
2146 // access constant pool cache fields
2147 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2148 ConstantPoolCacheEntry::f2_offset());
2150 if (byte_no == f1_oop) {
2151 // Resolved f1_oop goes directly into 'method' register.
2152 assert(is_invokedynamic, "");
2153 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2154 } else {
2155 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2156 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2157 }
2158 if (itable_index != noreg) {
2159 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2160 }
2161 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2162 }
2165 // The registers cache and index expected to be set before call.
2166 // Correct values of the cache and index registers are preserved.
2167 void TemplateTable::jvmti_post_field_access(Register cache,
2168 Register index,
2169 bool is_static,
2170 bool has_tos) {
2171 if (JvmtiExport::can_post_field_access()) {
2172 // Check to see if a field access watch has been set before we take
2173 // the time to call into the VM.
2174 Label L1;
2175 assert_different_registers(cache, index, rax);
2176 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2177 __ testl(rax,rax);
2178 __ jcc(Assembler::zero, L1);
2180 // cache entry pointer
2181 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2182 __ shll(index, LogBytesPerWord);
2183 __ addptr(cache, index);
2184 if (is_static) {
2185 __ xorptr(rax, rax); // NULL object reference
2186 } else {
2187 __ pop(atos); // Get the object
2188 __ verify_oop(rax);
2189 __ push(atos); // Restore stack state
2190 }
2191 // rax,: object pointer or NULL
2192 // cache: cache entry pointer
2193 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2194 rax, cache);
2195 __ get_cache_and_index_at_bcp(cache, index, 1);
2196 __ bind(L1);
2197 }
2198 }
2200 void TemplateTable::pop_and_check_object(Register r) {
2201 __ pop_ptr(r);
2202 __ null_check(r); // for field access must check obj.
2203 __ verify_oop(r);
2204 }
2206 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2207 transition(vtos, vtos);
2209 const Register cache = rcx;
2210 const Register index = rdx;
2211 const Register obj = rcx;
2212 const Register off = rbx;
2213 const Register flags = rax;
2215 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2216 jvmti_post_field_access(cache, index, is_static, false);
2217 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2219 if (!is_static) pop_and_check_object(obj);
2221 const Address lo(obj, off, Address::times_1, 0*wordSize);
2222 const Address hi(obj, off, Address::times_1, 1*wordSize);
2224 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2226 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2227 assert(btos == 0, "change code, btos != 0");
2228 // btos
2229 __ andptr(flags, 0x0f);
2230 __ jcc(Assembler::notZero, notByte);
2232 __ load_signed_byte(rax, lo );
2233 __ push(btos);
2234 // Rewrite bytecode to be faster
2235 if (!is_static) {
2236 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2237 }
2238 __ jmp(Done);
2240 __ bind(notByte);
2241 // itos
2242 __ cmpl(flags, itos );
2243 __ jcc(Assembler::notEqual, notInt);
2245 __ movl(rax, lo );
2246 __ push(itos);
2247 // Rewrite bytecode to be faster
2248 if (!is_static) {
2249 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2250 }
2251 __ jmp(Done);
2253 __ bind(notInt);
2254 // atos
2255 __ cmpl(flags, atos );
2256 __ jcc(Assembler::notEqual, notObj);
2258 __ movl(rax, lo );
2259 __ push(atos);
2260 if (!is_static) {
2261 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2262 }
2263 __ jmp(Done);
2265 __ bind(notObj);
2266 // ctos
2267 __ cmpl(flags, ctos );
2268 __ jcc(Assembler::notEqual, notChar);
2270 __ load_unsigned_short(rax, lo );
2271 __ push(ctos);
2272 if (!is_static) {
2273 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2274 }
2275 __ jmp(Done);
2277 __ bind(notChar);
2278 // stos
2279 __ cmpl(flags, stos );
2280 __ jcc(Assembler::notEqual, notShort);
2282 __ load_signed_short(rax, lo );
2283 __ push(stos);
2284 if (!is_static) {
2285 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2286 }
2287 __ jmp(Done);
2289 __ bind(notShort);
2290 // ltos
2291 __ cmpl(flags, ltos );
2292 __ jcc(Assembler::notEqual, notLong);
2294 // Generate code as if volatile. There just aren't enough registers to
2295 // save that information and this code is faster than the test.
2296 __ fild_d(lo); // Must load atomically
2297 __ subptr(rsp,2*wordSize); // Make space for store
2298 __ fistp_d(Address(rsp,0));
2299 __ pop(rax);
2300 __ pop(rdx);
2302 __ push(ltos);
2303 // Don't rewrite to _fast_lgetfield for potential volatile case.
2304 __ jmp(Done);
2306 __ bind(notLong);
2307 // ftos
2308 __ cmpl(flags, ftos );
2309 __ jcc(Assembler::notEqual, notFloat);
2311 __ fld_s(lo);
2312 __ push(ftos);
2313 if (!is_static) {
2314 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2315 }
2316 __ jmp(Done);
2318 __ bind(notFloat);
2319 // dtos
2320 __ cmpl(flags, dtos );
2321 __ jcc(Assembler::notEqual, notDouble);
2323 __ fld_d(lo);
2324 __ push(dtos);
2325 if (!is_static) {
2326 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2327 }
2328 __ jmpb(Done);
2330 __ bind(notDouble);
2332 __ stop("Bad state");
2334 __ bind(Done);
2335 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2336 // volatile_barrier( );
2337 }
2340 void TemplateTable::getfield(int byte_no) {
2341 getfield_or_static(byte_no, false);
2342 }
2345 void TemplateTable::getstatic(int byte_no) {
2346 getfield_or_static(byte_no, true);
2347 }
2349 // The registers cache and index expected to be set before call.
2350 // The function may destroy various registers, just not the cache and index registers.
2351 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2353 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2355 if (JvmtiExport::can_post_field_modification()) {
2356 // Check to see if a field modification watch has been set before we take
2357 // the time to call into the VM.
2358 Label L1;
2359 assert_different_registers(cache, index, rax);
2360 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2361 __ testl(rax, rax);
2362 __ jcc(Assembler::zero, L1);
2364 // The cache and index registers have been already set.
2365 // This allows to eliminate this call but the cache and index
2366 // registers have to be correspondingly used after this line.
2367 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2369 if (is_static) {
2370 // Life is simple. Null out the object pointer.
2371 __ xorptr(rbx, rbx);
2372 } else {
2373 // Life is harder. The stack holds the value on top, followed by the object.
2374 // We don't know the size of the value, though; it could be one or two words
2375 // depending on its type. As a result, we must find the type to determine where
2376 // the object is.
2377 Label two_word, valsize_known;
2378 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2379 ConstantPoolCacheEntry::flags_offset())));
2380 __ mov(rbx, rsp);
2381 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2382 // Make sure we don't need to mask rcx for tosBits after the above shift
2383 ConstantPoolCacheEntry::verify_tosBits();
2384 __ cmpl(rcx, ltos);
2385 __ jccb(Assembler::equal, two_word);
2386 __ cmpl(rcx, dtos);
2387 __ jccb(Assembler::equal, two_word);
2388 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2389 __ jmpb(valsize_known);
2391 __ bind(two_word);
2392 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2394 __ bind(valsize_known);
2395 // setup object pointer
2396 __ movptr(rbx, Address(rbx, 0));
2397 }
2398 // cache entry pointer
2399 __ addptr(rax, in_bytes(cp_base_offset));
2400 __ shll(rdx, LogBytesPerWord);
2401 __ addptr(rax, rdx);
2402 // object (tos)
2403 __ mov(rcx, rsp);
2404 // rbx,: object pointer set up above (NULL if static)
2405 // rax,: cache entry pointer
2406 // rcx: jvalue object on the stack
2407 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2408 rbx, rax, rcx);
2409 __ get_cache_and_index_at_bcp(cache, index, 1);
2410 __ bind(L1);
2411 }
2412 }
2415 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2416 transition(vtos, vtos);
2418 const Register cache = rcx;
2419 const Register index = rdx;
2420 const Register obj = rcx;
2421 const Register off = rbx;
2422 const Register flags = rax;
2424 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2425 jvmti_post_field_mod(cache, index, is_static);
2426 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2428 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2429 // volatile_barrier( );
2431 Label notVolatile, Done;
2432 __ movl(rdx, flags);
2433 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2434 __ andl(rdx, 0x1);
2436 // field addresses
2437 const Address lo(obj, off, Address::times_1, 0*wordSize);
2438 const Address hi(obj, off, Address::times_1, 1*wordSize);
2440 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2442 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2443 assert(btos == 0, "change code, btos != 0");
2444 // btos
2445 __ andl(flags, 0x0f);
2446 __ jcc(Assembler::notZero, notByte);
2448 __ pop(btos);
2449 if (!is_static) pop_and_check_object(obj);
2450 __ movb(lo, rax );
2451 if (!is_static) {
2452 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2453 }
2454 __ jmp(Done);
2456 __ bind(notByte);
2457 // itos
2458 __ cmpl(flags, itos );
2459 __ jcc(Assembler::notEqual, notInt);
2461 __ pop(itos);
2462 if (!is_static) pop_and_check_object(obj);
2464 __ movl(lo, rax );
2465 if (!is_static) {
2466 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2467 }
2468 __ jmp(Done);
2470 __ bind(notInt);
2471 // atos
2472 __ cmpl(flags, atos );
2473 __ jcc(Assembler::notEqual, notObj);
2475 __ pop(atos);
2476 if (!is_static) pop_and_check_object(obj);
2478 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2480 if (!is_static) {
2481 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2482 }
2484 __ jmp(Done);
2486 __ bind(notObj);
2487 // ctos
2488 __ cmpl(flags, ctos );
2489 __ jcc(Assembler::notEqual, notChar);
2491 __ pop(ctos);
2492 if (!is_static) pop_and_check_object(obj);
2493 __ movw(lo, rax );
2494 if (!is_static) {
2495 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2496 }
2497 __ jmp(Done);
2499 __ bind(notChar);
2500 // stos
2501 __ cmpl(flags, stos );
2502 __ jcc(Assembler::notEqual, notShort);
2504 __ pop(stos);
2505 if (!is_static) pop_and_check_object(obj);
2506 __ movw(lo, rax );
2507 if (!is_static) {
2508 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2509 }
2510 __ jmp(Done);
2512 __ bind(notShort);
2513 // ltos
2514 __ cmpl(flags, ltos );
2515 __ jcc(Assembler::notEqual, notLong);
2517 Label notVolatileLong;
2518 __ testl(rdx, rdx);
2519 __ jcc(Assembler::zero, notVolatileLong);
2521 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2522 if (!is_static) pop_and_check_object(obj);
2524 // Replace with real volatile test
2525 __ push(rdx);
2526 __ push(rax); // Must update atomically with FIST
2527 __ fild_d(Address(rsp,0)); // So load into FPU register
2528 __ fistp_d(lo); // and put into memory atomically
2529 __ addptr(rsp, 2*wordSize);
2530 // volatile_barrier();
2531 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2532 Assembler::StoreStore));
2533 // Don't rewrite volatile version
2534 __ jmp(notVolatile);
2536 __ bind(notVolatileLong);
2538 __ pop(ltos); // overwrites rdx
2539 if (!is_static) pop_and_check_object(obj);
2540 NOT_LP64(__ movptr(hi, rdx));
2541 __ movptr(lo, rax);
2542 if (!is_static) {
2543 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2544 }
2545 __ jmp(notVolatile);
2547 __ bind(notLong);
2548 // ftos
2549 __ cmpl(flags, ftos );
2550 __ jcc(Assembler::notEqual, notFloat);
2552 __ pop(ftos);
2553 if (!is_static) pop_and_check_object(obj);
2554 __ fstp_s(lo);
2555 if (!is_static) {
2556 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2557 }
2558 __ jmp(Done);
2560 __ bind(notFloat);
2561 // dtos
2562 __ cmpl(flags, dtos );
2563 __ jcc(Assembler::notEqual, notDouble);
2565 __ pop(dtos);
2566 if (!is_static) pop_and_check_object(obj);
2567 __ fstp_d(lo);
2568 if (!is_static) {
2569 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2570 }
2571 __ jmp(Done);
2573 __ bind(notDouble);
2575 __ stop("Bad state");
2577 __ bind(Done);
2579 // Check for volatile store
2580 __ testl(rdx, rdx);
2581 __ jcc(Assembler::zero, notVolatile);
2582 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2583 Assembler::StoreStore));
2584 __ bind(notVolatile);
2585 }
2588 void TemplateTable::putfield(int byte_no) {
2589 putfield_or_static(byte_no, false);
2590 }
2593 void TemplateTable::putstatic(int byte_no) {
2594 putfield_or_static(byte_no, true);
2595 }
2597 void TemplateTable::jvmti_post_fast_field_mod() {
2598 if (JvmtiExport::can_post_field_modification()) {
2599 // Check to see if a field modification watch has been set before we take
2600 // the time to call into the VM.
2601 Label L2;
2602 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2603 __ testl(rcx,rcx);
2604 __ jcc(Assembler::zero, L2);
2605 __ pop_ptr(rbx); // copy the object pointer from tos
2606 __ verify_oop(rbx);
2607 __ push_ptr(rbx); // put the object pointer back on tos
2608 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2609 __ mov(rcx, rsp);
2610 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2611 __ xorptr(rbx, rbx);
2612 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2613 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2614 switch (bytecode()) { // load values into the jvalue object
2615 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2616 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2617 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2618 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2619 case Bytecodes::_fast_lputfield:
2620 NOT_LP64(__ movptr(hi_value, rdx));
2621 __ movptr(lo_value, rax);
2622 break;
2624 // need to call fld_s() after fstp_s() to restore the value for below
2625 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2627 // need to call fld_d() after fstp_d() to restore the value for below
2628 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2630 // since rcx is not an object we don't call store_check() here
2631 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2633 default: ShouldNotReachHere();
2634 }
2635 __ pop_ptr(rbx); // restore copy of object pointer
2637 // Save rax, and sometimes rdx because call_VM() will clobber them,
2638 // then use them for JVM/DI purposes
2639 __ push(rax);
2640 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2641 // access constant pool cache entry
2642 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2643 __ verify_oop(rbx);
2644 // rbx,: object pointer copied above
2645 // rax,: cache entry pointer
2646 // rcx: jvalue object on the stack
2647 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2648 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2649 __ pop(rax); // restore lower value
2650 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2651 __ bind(L2);
2652 }
2653 }
2655 void TemplateTable::fast_storefield(TosState state) {
2656 transition(state, vtos);
2658 ByteSize base = constantPoolCacheOopDesc::base_offset();
2660 jvmti_post_fast_field_mod();
2662 // access constant pool cache
2663 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2665 // test for volatile with rdx but rdx is tos register for lputfield.
2666 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2667 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2668 ConstantPoolCacheEntry::flags_offset())));
2670 // replace index with field offset from cache entry
2671 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2673 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2674 // volatile_barrier( );
2676 Label notVolatile, Done;
2677 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2678 __ andl(rdx, 0x1);
2679 // Check for volatile store
2680 __ testl(rdx, rdx);
2681 __ jcc(Assembler::zero, notVolatile);
2683 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2685 // Get object from stack
2686 pop_and_check_object(rcx);
2688 // field addresses
2689 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2690 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2692 // access field
2693 switch (bytecode()) {
2694 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2695 case Bytecodes::_fast_sputfield: // fall through
2696 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2697 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2698 case Bytecodes::_fast_lputfield:
2699 NOT_LP64(__ movptr(hi, rdx));
2700 __ movptr(lo, rax);
2701 break;
2702 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2703 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2704 case Bytecodes::_fast_aputfield: {
2705 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2706 break;
2707 }
2708 default:
2709 ShouldNotReachHere();
2710 }
2712 Label done;
2713 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2714 Assembler::StoreStore));
2715 // Barriers are so large that short branch doesn't reach!
2716 __ jmp(done);
2718 // Same code as above, but don't need rdx to test for volatile.
2719 __ bind(notVolatile);
2721 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2723 // Get object from stack
2724 pop_and_check_object(rcx);
2726 // access field
2727 switch (bytecode()) {
2728 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2729 case Bytecodes::_fast_sputfield: // fall through
2730 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2731 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2732 case Bytecodes::_fast_lputfield:
2733 NOT_LP64(__ movptr(hi, rdx));
2734 __ movptr(lo, rax);
2735 break;
2736 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2737 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2738 case Bytecodes::_fast_aputfield: {
2739 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2740 break;
2741 }
2742 default:
2743 ShouldNotReachHere();
2744 }
2745 __ bind(done);
2746 }
2749 void TemplateTable::fast_accessfield(TosState state) {
2750 transition(atos, state);
2752 // do the JVMTI work here to avoid disturbing the register state below
2753 if (JvmtiExport::can_post_field_access()) {
2754 // Check to see if a field access watch has been set before we take
2755 // the time to call into the VM.
2756 Label L1;
2757 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2758 __ testl(rcx,rcx);
2759 __ jcc(Assembler::zero, L1);
2760 // access constant pool cache entry
2761 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2762 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2763 __ verify_oop(rax);
2764 // rax,: object pointer copied above
2765 // rcx: cache entry pointer
2766 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2767 __ pop_ptr(rax); // restore object pointer
2768 __ bind(L1);
2769 }
2771 // access constant pool cache
2772 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2773 // replace index with field offset from cache entry
2774 __ movptr(rbx, Address(rcx,
2775 rbx,
2776 Address::times_ptr,
2777 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2780 // rax,: object
2781 __ verify_oop(rax);
2782 __ null_check(rax);
2783 // field addresses
2784 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2785 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2787 // access field
2788 switch (bytecode()) {
2789 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2790 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2791 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2792 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2793 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2794 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2795 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2796 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2797 default:
2798 ShouldNotReachHere();
2799 }
2801 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2802 // volatile_barrier( );
2803 }
2805 void TemplateTable::fast_xaccess(TosState state) {
2806 transition(vtos, state);
2807 // get receiver
2808 __ movptr(rax, aaddress(0));
2809 // access constant pool cache
2810 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2811 __ movptr(rbx, Address(rcx,
2812 rdx,
2813 Address::times_ptr,
2814 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2815 // make sure exception is reported in correct bcp range (getfield is next instruction)
2816 __ increment(rsi);
2817 __ null_check(rax);
2818 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2819 if (state == itos) {
2820 __ movl(rax, lo);
2821 } else if (state == atos) {
2822 __ movptr(rax, lo);
2823 __ verify_oop(rax);
2824 } else if (state == ftos) {
2825 __ fld_s(lo);
2826 } else {
2827 ShouldNotReachHere();
2828 }
2829 __ decrement(rsi);
2830 }
2834 //----------------------------------------------------------------------------------------------------
2835 // Calls
2837 void TemplateTable::count_calls(Register method, Register temp) {
2838 // implemented elsewhere
2839 ShouldNotReachHere();
2840 }
2843 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2844 // determine flags
2845 Bytecodes::Code code = bytecode();
2846 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2847 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2848 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2849 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2850 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2851 const bool receiver_null_check = is_invokespecial;
2852 const bool save_flags = is_invokeinterface || is_invokevirtual;
2853 // setup registers & access constant pool cache
2854 const Register recv = rcx;
2855 const Register flags = rdx;
2856 assert_different_registers(method, index, recv, flags);
2858 // save 'interpreter return address'
2859 __ save_bcp();
2861 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2863 // load receiver if needed (note: no return address pushed yet)
2864 if (load_receiver) {
2865 assert(!is_invokedynamic, "");
2866 __ movl(recv, flags);
2867 __ andl(recv, 0xFF);
2868 // recv count is 0 based?
2869 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2870 __ movptr(recv, recv_addr);
2871 __ verify_oop(recv);
2872 }
2874 // do null check if needed
2875 if (receiver_null_check) {
2876 __ null_check(recv);
2877 }
2879 if (save_flags) {
2880 __ mov(rsi, flags);
2881 }
2883 // compute return type
2884 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2885 // Make sure we don't need to mask flags for tosBits after the above shift
2886 ConstantPoolCacheEntry::verify_tosBits();
2887 // load return address
2888 {
2889 address table_addr;
2890 if (is_invokeinterface || is_invokedynamic)
2891 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2892 else
2893 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2894 ExternalAddress table(table_addr);
2895 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2896 }
2898 // push return address
2899 __ push(flags);
2901 // Restore flag value from the constant pool cache, and restore rsi
2902 // for later null checks. rsi is the bytecode pointer
2903 if (save_flags) {
2904 __ mov(flags, rsi);
2905 __ restore_bcp();
2906 }
2907 }
2910 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2911 Register flags) {
2913 // Uses temporary registers rax, rdx
2914 assert_different_registers(index, recv, rax, rdx);
2916 // Test for an invoke of a final method
2917 Label notFinal;
2918 __ movl(rax, flags);
2919 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2920 __ jcc(Assembler::zero, notFinal);
2922 Register method = index; // method must be rbx,
2923 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2925 // do the call - the index is actually the method to call
2926 __ verify_oop(method);
2928 // It's final, need a null check here!
2929 __ null_check(recv);
2931 // profile this call
2932 __ profile_final_call(rax);
2934 __ jump_from_interpreted(method, rax);
2936 __ bind(notFinal);
2938 // get receiver klass
2939 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2940 // Keep recv in rcx for callee expects it there
2941 __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
2942 __ verify_oop(rax);
2944 // profile this call
2945 __ profile_virtual_call(rax, rdi, rdx);
2947 // get target methodOop & entry point
2948 const int base = instanceKlass::vtable_start_offset() * wordSize;
2949 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
2950 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
2951 __ jump_from_interpreted(method, rdx);
2952 }
2955 void TemplateTable::invokevirtual(int byte_no) {
2956 transition(vtos, vtos);
2957 assert(byte_no == f2_byte, "use this argument");
2958 prepare_invoke(rbx, noreg, byte_no);
2960 // rbx,: index
2961 // rcx: receiver
2962 // rdx: flags
2964 invokevirtual_helper(rbx, rcx, rdx);
2965 }
2968 void TemplateTable::invokespecial(int byte_no) {
2969 transition(vtos, vtos);
2970 assert(byte_no == f1_byte, "use this argument");
2971 prepare_invoke(rbx, noreg, byte_no);
2972 // do the call
2973 __ verify_oop(rbx);
2974 __ profile_call(rax);
2975 __ jump_from_interpreted(rbx, rax);
2976 }
2979 void TemplateTable::invokestatic(int byte_no) {
2980 transition(vtos, vtos);
2981 assert(byte_no == f1_byte, "use this argument");
2982 prepare_invoke(rbx, noreg, byte_no);
2983 // do the call
2984 __ verify_oop(rbx);
2985 __ profile_call(rax);
2986 __ jump_from_interpreted(rbx, rax);
2987 }
2990 void TemplateTable::fast_invokevfinal(int byte_no) {
2991 transition(vtos, vtos);
2992 assert(byte_no == f2_byte, "use this argument");
2993 __ stop("fast_invokevfinal not used on x86");
2994 }
2997 void TemplateTable::invokeinterface(int byte_no) {
2998 transition(vtos, vtos);
2999 assert(byte_no == f1_byte, "use this argument");
3000 prepare_invoke(rax, rbx, byte_no);
3002 // rax,: Interface
3003 // rbx,: index
3004 // rcx: receiver
3005 // rdx: flags
3007 // Special case of invokeinterface called for virtual method of
3008 // java.lang.Object. See cpCacheOop.cpp for details.
3009 // This code isn't produced by javac, but could be produced by
3010 // another compliant java compiler.
3011 Label notMethod;
3012 __ movl(rdi, rdx);
3013 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3014 __ jcc(Assembler::zero, notMethod);
3016 invokevirtual_helper(rbx, rcx, rdx);
3017 __ bind(notMethod);
3019 // Get receiver klass into rdx - also a null check
3020 __ restore_locals(); // restore rdi
3021 __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
3022 __ verify_oop(rdx);
3024 // profile this call
3025 __ profile_virtual_call(rdx, rsi, rdi);
3027 Label no_such_interface, no_such_method;
3029 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3030 rdx, rax, rbx,
3031 // outputs: method, scan temp. reg
3032 rbx, rsi,
3033 no_such_interface);
3035 // rbx,: methodOop to call
3036 // rcx: receiver
3037 // Check for abstract method error
3038 // Note: This should be done more efficiently via a throw_abstract_method_error
3039 // interpreter entry point and a conditional jump to it in case of a null
3040 // method.
3041 __ testptr(rbx, rbx);
3042 __ jcc(Assembler::zero, no_such_method);
3044 // do the call
3045 // rcx: receiver
3046 // rbx,: methodOop
3047 __ jump_from_interpreted(rbx, rdx);
3048 __ should_not_reach_here();
3050 // exception handling code follows...
3051 // note: must restore interpreter registers to canonical
3052 // state for exception handling to work correctly!
3054 __ bind(no_such_method);
3055 // throw exception
3056 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3057 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3058 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3059 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3060 // the call_VM checks for exception, so we should never return here.
3061 __ should_not_reach_here();
3063 __ bind(no_such_interface);
3064 // throw exception
3065 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3066 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3067 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3068 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3069 InterpreterRuntime::throw_IncompatibleClassChangeError));
3070 // the call_VM checks for exception, so we should never return here.
3071 __ should_not_reach_here();
3072 }
3074 void TemplateTable::invokedynamic(int byte_no) {
3075 transition(vtos, vtos);
3077 if (!EnableInvokeDynamic) {
3078 // We should not encounter this bytecode if !EnableInvokeDynamic.
3079 // The verifier will stop it. However, if we get past the verifier,
3080 // this will stop the thread in a reasonable way, without crashing the JVM.
3081 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3082 InterpreterRuntime::throw_IncompatibleClassChangeError));
3083 // the call_VM checks for exception, so we should never return here.
3084 __ should_not_reach_here();
3085 return;
3086 }
3088 assert(byte_no == f1_oop, "use this argument");
3089 prepare_invoke(rax, rbx, byte_no);
3091 // rax: CallSite object (f1)
3092 // rbx: unused (f2)
3093 // rdx: flags (unused)
3095 if (ProfileInterpreter) {
3096 Label L;
3097 // %%% should make a type profile for any invokedynamic that takes a ref argument
3098 // profile this call
3099 __ profile_call(rsi);
3100 }
3102 __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
3103 __ null_check(rcx);
3104 __ prepare_to_jump_from_interpreted();
3105 __ jump_to_method_handle_entry(rcx, rdx);
3106 }
3108 //----------------------------------------------------------------------------------------------------
3109 // Allocation
3111 void TemplateTable::_new() {
3112 transition(vtos, atos);
3113 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3114 Label slow_case;
3115 Label done;
3116 Label initialize_header;
3117 Label initialize_object; // including clearing the fields
3118 Label allocate_shared;
3120 __ get_cpool_and_tags(rcx, rax);
3121 // get instanceKlass
3122 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3123 __ push(rcx); // save the contexts of klass for initializing the header
3125 // make sure the class we're about to instantiate has been resolved.
3126 // Note: slow_case does a pop of stack, which is why we loaded class/pushed above
3127 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3128 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3129 __ jcc(Assembler::notEqual, slow_case);
3131 // make sure klass is initialized & doesn't have finalizer
3132 // make sure klass is fully initialized
3133 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3134 __ jcc(Assembler::notEqual, slow_case);
3136 // get instance_size in instanceKlass (scaled to a count of bytes)
3137 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3138 // test to see if it has a finalizer or is malformed in some way
3139 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3140 __ jcc(Assembler::notZero, slow_case);
3142 //
3143 // Allocate the instance
3144 // 1) Try to allocate in the TLAB
3145 // 2) if fail and the object is large allocate in the shared Eden
3146 // 3) if the above fails (or is not applicable), go to a slow case
3147 // (creates a new TLAB, etc.)
3149 const bool allow_shared_alloc =
3150 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3152 if (UseTLAB) {
3153 const Register thread = rcx;
3155 __ get_thread(thread);
3156 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3157 __ lea(rbx, Address(rax, rdx, Address::times_1));
3158 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3159 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3160 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3161 if (ZeroTLAB) {
3162 // the fields have been already cleared
3163 __ jmp(initialize_header);
3164 } else {
3165 // initialize both the header and fields
3166 __ jmp(initialize_object);
3167 }
3168 }
3170 // Allocation in the shared Eden, if allowed.
3171 //
3172 // rdx: instance size in bytes
3173 if (allow_shared_alloc) {
3174 __ bind(allocate_shared);
3176 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3178 Label retry;
3179 __ bind(retry);
3180 __ movptr(rax, heap_top);
3181 __ lea(rbx, Address(rax, rdx, Address::times_1));
3182 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3183 __ jcc(Assembler::above, slow_case);
3185 // Compare rax, with the top addr, and if still equal, store the new
3186 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3187 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3188 //
3189 // rax,: object begin
3190 // rbx,: object end
3191 // rdx: instance size in bytes
3192 __ locked_cmpxchgptr(rbx, heap_top);
3194 // if someone beat us on the allocation, try again, otherwise continue
3195 __ jcc(Assembler::notEqual, retry);
3196 }
3198 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3199 // The object is initialized before the header. If the object size is
3200 // zero, go directly to the header initialization.
3201 __ bind(initialize_object);
3202 __ decrement(rdx, sizeof(oopDesc));
3203 __ jcc(Assembler::zero, initialize_header);
3205 // Initialize topmost object field, divide rdx by 8, check if odd and
3206 // test if zero.
3207 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3208 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3210 // rdx must have been multiple of 8
3211 #ifdef ASSERT
3212 // make sure rdx was multiple of 8
3213 Label L;
3214 // Ignore partial flag stall after shrl() since it is debug VM
3215 __ jccb(Assembler::carryClear, L);
3216 __ stop("object size is not multiple of 2 - adjust this code");
3217 __ bind(L);
3218 // rdx must be > 0, no extra check needed here
3219 #endif
3221 // initialize remaining object fields: rdx was a multiple of 8
3222 { Label loop;
3223 __ bind(loop);
3224 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3225 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3226 __ decrement(rdx);
3227 __ jcc(Assembler::notZero, loop);
3228 }
3230 // initialize object header only.
3231 __ bind(initialize_header);
3232 if (UseBiasedLocking) {
3233 __ pop(rcx); // get saved klass back in the register.
3234 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3235 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3236 } else {
3237 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3238 (int32_t)markOopDesc::prototype()); // header
3239 __ pop(rcx); // get saved klass back in the register.
3240 }
3241 __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
3243 {
3244 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3245 // Trigger dtrace event for fastpath
3246 __ push(atos);
3247 __ call_VM_leaf(
3248 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3249 __ pop(atos);
3250 }
3252 __ jmp(done);
3253 }
3255 // slow case
3256 __ bind(slow_case);
3257 __ pop(rcx); // restore stack pointer to what it was when we came in.
3258 __ get_constant_pool(rax);
3259 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3260 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3262 // continue
3263 __ bind(done);
3264 }
3267 void TemplateTable::newarray() {
3268 transition(itos, atos);
3269 __ push_i(rax); // make sure everything is on the stack
3270 __ load_unsigned_byte(rdx, at_bcp(1));
3271 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3272 __ pop_i(rdx); // discard size
3273 }
3276 void TemplateTable::anewarray() {
3277 transition(itos, atos);
3278 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3279 __ get_constant_pool(rcx);
3280 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3281 }
3284 void TemplateTable::arraylength() {
3285 transition(atos, itos);
3286 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3287 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3288 }
3291 void TemplateTable::checkcast() {
3292 transition(atos, atos);
3293 Label done, is_null, ok_is_subtype, quicked, resolved;
3294 __ testptr(rax, rax); // Object is in EAX
3295 __ jcc(Assembler::zero, is_null);
3297 // Get cpool & tags index
3298 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3299 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3300 // See if bytecode has already been quicked
3301 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3302 __ jcc(Assembler::equal, quicked);
3304 __ push(atos);
3305 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3306 __ pop_ptr(rdx);
3307 __ jmpb(resolved);
3309 // Get superklass in EAX and subklass in EBX
3310 __ bind(quicked);
3311 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3312 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3314 __ bind(resolved);
3315 __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3317 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3318 // Superklass in EAX. Subklass in EBX.
3319 __ gen_subtype_check( rbx, ok_is_subtype );
3321 // Come here on failure
3322 __ push(rdx);
3323 // object is at TOS
3324 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3326 // Come here on success
3327 __ bind(ok_is_subtype);
3328 __ mov(rax,rdx); // Restore object in EDX
3330 // Collect counts on whether this check-cast sees NULLs a lot or not.
3331 if (ProfileInterpreter) {
3332 __ jmp(done);
3333 __ bind(is_null);
3334 __ profile_null_seen(rcx);
3335 } else {
3336 __ bind(is_null); // same as 'done'
3337 }
3338 __ bind(done);
3339 }
3342 void TemplateTable::instanceof() {
3343 transition(atos, itos);
3344 Label done, is_null, ok_is_subtype, quicked, resolved;
3345 __ testptr(rax, rax);
3346 __ jcc(Assembler::zero, is_null);
3348 // Get cpool & tags index
3349 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3350 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3351 // See if bytecode has already been quicked
3352 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3353 __ jcc(Assembler::equal, quicked);
3355 __ push(atos);
3356 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3357 __ pop_ptr(rdx);
3358 __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3359 __ jmp(resolved);
3361 // Get superklass in EAX and subklass in EDX
3362 __ bind(quicked);
3363 __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
3364 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3366 __ bind(resolved);
3368 // Generate subtype check. Blows ECX. Resets EDI.
3369 // Superklass in EAX. Subklass in EDX.
3370 __ gen_subtype_check( rdx, ok_is_subtype );
3372 // Come here on failure
3373 __ xorl(rax,rax);
3374 __ jmpb(done);
3375 // Come here on success
3376 __ bind(ok_is_subtype);
3377 __ movl(rax, 1);
3379 // Collect counts on whether this test sees NULLs a lot or not.
3380 if (ProfileInterpreter) {
3381 __ jmp(done);
3382 __ bind(is_null);
3383 __ profile_null_seen(rcx);
3384 } else {
3385 __ bind(is_null); // same as 'done'
3386 }
3387 __ bind(done);
3388 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3389 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3390 }
3393 //----------------------------------------------------------------------------------------------------
3394 // Breakpoints
3395 void TemplateTable::_breakpoint() {
3397 // Note: We get here even if we are single stepping..
3398 // jbug inists on setting breakpoints at every bytecode
3399 // even if we are in single step mode.
3401 transition(vtos, vtos);
3403 // get the unpatched byte code
3404 __ get_method(rcx);
3405 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3406 __ mov(rbx, rax);
3408 // post the breakpoint event
3409 __ get_method(rcx);
3410 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3412 // complete the execution of original bytecode
3413 __ dispatch_only_normal(vtos);
3414 }
3417 //----------------------------------------------------------------------------------------------------
3418 // Exceptions
3420 void TemplateTable::athrow() {
3421 transition(atos, vtos);
3422 __ null_check(rax);
3423 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3424 }
3427 //----------------------------------------------------------------------------------------------------
3428 // Synchronization
3429 //
3430 // Note: monitorenter & exit are symmetric routines; which is reflected
3431 // in the assembly code structure as well
3432 //
3433 // Stack layout:
3434 //
3435 // [expressions ] <--- rsp = expression stack top
3436 // ..
3437 // [expressions ]
3438 // [monitor entry] <--- monitor block top = expression stack bot
3439 // ..
3440 // [monitor entry]
3441 // [frame data ] <--- monitor block bot
3442 // ...
3443 // [saved rbp, ] <--- rbp,
3446 void TemplateTable::monitorenter() {
3447 transition(atos, vtos);
3449 // check for NULL object
3450 __ null_check(rax);
3452 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3453 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3454 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3455 Label allocated;
3457 // initialize entry pointer
3458 __ xorl(rdx, rdx); // points to free slot or NULL
3460 // find a free slot in the monitor block (result in rdx)
3461 { Label entry, loop, exit;
3462 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3463 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3464 __ jmpb(entry);
3466 __ bind(loop);
3467 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3469 // TODO - need new func here - kbt
3470 if (VM_Version::supports_cmov()) {
3471 __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3472 } else {
3473 Label L;
3474 __ jccb(Assembler::notEqual, L);
3475 __ mov(rdx, rcx); // if not used then remember entry in rdx
3476 __ bind(L);
3477 }
3478 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3479 __ jccb(Assembler::equal, exit); // if same object then stop searching
3480 __ addptr(rcx, entry_size); // otherwise advance to next entry
3481 __ bind(entry);
3482 __ cmpptr(rcx, rbx); // check if bottom reached
3483 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3484 __ bind(exit);
3485 }
3487 __ testptr(rdx, rdx); // check if a slot has been found
3488 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3490 // allocate one if there's no free slot
3491 { Label entry, loop;
3492 // 1. compute new pointers // rsp: old expression stack top
3493 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3494 __ subptr(rsp, entry_size); // move expression stack top
3495 __ subptr(rdx, entry_size); // move expression stack bottom
3496 __ mov(rcx, rsp); // set start value for copy loop
3497 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3498 __ jmp(entry);
3499 // 2. move expression stack contents
3500 __ bind(loop);
3501 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3502 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3503 __ addptr(rcx, wordSize); // advance to next word
3504 __ bind(entry);
3505 __ cmpptr(rcx, rdx); // check if bottom reached
3506 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3507 }
3509 // call run-time routine
3510 // rdx: points to monitor entry
3511 __ bind(allocated);
3513 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3514 // The object has already been poped from the stack, so the expression stack looks correct.
3515 __ increment(rsi);
3517 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3518 __ lock_object(rdx);
3520 // check to make sure this monitor doesn't cause stack overflow after locking
3521 __ save_bcp(); // in case of exception
3522 __ generate_stack_overflow_check(0);
3524 // The bcp has already been incremented. Just need to dispatch to next instruction.
3525 __ dispatch_next(vtos);
3526 }
3529 void TemplateTable::monitorexit() {
3530 transition(atos, vtos);
3532 // check for NULL object
3533 __ null_check(rax);
3535 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3536 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3537 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3538 Label found;
3540 // find matching slot
3541 { Label entry, loop;
3542 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3543 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3544 __ jmpb(entry);
3546 __ bind(loop);
3547 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3548 __ jcc(Assembler::equal, found); // if same object then stop searching
3549 __ addptr(rdx, entry_size); // otherwise advance to next entry
3550 __ bind(entry);
3551 __ cmpptr(rdx, rbx); // check if bottom reached
3552 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3553 }
3555 // error handling. Unlocking was not block-structured
3556 Label end;
3557 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3558 __ should_not_reach_here();
3560 // call run-time routine
3561 // rcx: points to monitor entry
3562 __ bind(found);
3563 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3564 __ unlock_object(rdx);
3565 __ pop_ptr(rax); // discard object
3566 __ bind(end);
3567 }
3570 //----------------------------------------------------------------------------------------------------
3571 // Wide instructions
3573 void TemplateTable::wide() {
3574 transition(vtos, vtos);
3575 __ load_unsigned_byte(rbx, at_bcp(1));
3576 ExternalAddress wtable((address)Interpreter::_wentry_point);
3577 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3578 // Note: the rsi increment step is part of the individual wide bytecode implementations
3579 }
3582 //----------------------------------------------------------------------------------------------------
3583 // Multi arrays
3585 void TemplateTable::multianewarray() {
3586 transition(vtos, atos);
3587 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3588 // last dim is on top of stack; we want address of first one:
3589 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3590 // the latter wordSize to point to the beginning of the array.
3591 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3592 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3593 __ load_unsigned_byte(rbx, at_bcp(3));
3594 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3595 }
3597 #endif /* !CC_INTERP */