Tue, 10 Mar 2009 08:52:16 -0700
Merge
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateTable_x86_32.cpp.incl"
28 #ifndef CC_INTERP
29 #define __ _masm->
31 //----------------------------------------------------------------------------------------------------
32 // Platform-dependent initialization
34 void TemplateTable::pd_initialize() {
35 // No i486 specific initialization
36 }
38 //----------------------------------------------------------------------------------------------------
39 // Address computation
41 // local variables
42 static inline Address iaddress(int n) {
43 return Address(rdi, Interpreter::local_offset_in_bytes(n));
44 }
46 static inline Address laddress(int n) { return iaddress(n + 1); }
47 static inline Address haddress(int n) { return iaddress(n + 0); }
48 static inline Address faddress(int n) { return iaddress(n); }
49 static inline Address daddress(int n) { return laddress(n); }
50 static inline Address aaddress(int n) { return iaddress(n); }
52 static inline Address iaddress(Register r) {
53 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::value_offset_in_bytes());
54 }
55 static inline Address laddress(Register r) {
56 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
57 }
58 static inline Address haddress(Register r) {
59 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
60 }
62 static inline Address faddress(Register r) { return iaddress(r); };
63 static inline Address daddress(Register r) {
64 assert(!TaggedStackInterpreter, "This doesn't work");
65 return laddress(r);
66 };
67 static inline Address aaddress(Register r) { return iaddress(r); };
69 // expression stack
70 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
71 // data beyond the rsp which is potentially unsafe in an MT environment;
72 // an interrupt may overwrite that data.)
73 static inline Address at_rsp () {
74 return Address(rsp, 0);
75 }
77 // At top of Java expression stack which may be different than rsp(). It
78 // isn't for category 1 objects.
79 static inline Address at_tos () {
80 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
81 return tos;
82 }
84 static inline Address at_tos_p1() {
85 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
86 }
88 static inline Address at_tos_p2() {
89 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
90 }
92 // Condition conversion
93 static Assembler::Condition j_not(TemplateTable::Condition cc) {
94 switch (cc) {
95 case TemplateTable::equal : return Assembler::notEqual;
96 case TemplateTable::not_equal : return Assembler::equal;
97 case TemplateTable::less : return Assembler::greaterEqual;
98 case TemplateTable::less_equal : return Assembler::greater;
99 case TemplateTable::greater : return Assembler::lessEqual;
100 case TemplateTable::greater_equal: return Assembler::less;
101 }
102 ShouldNotReachHere();
103 return Assembler::zero;
104 }
107 //----------------------------------------------------------------------------------------------------
108 // Miscelaneous helper routines
110 // Store an oop (or NULL) at the address described by obj.
111 // If val == noreg this means store a NULL
113 static void do_oop_store(InterpreterMacroAssembler* _masm,
114 Address obj,
115 Register val,
116 BarrierSet::Name barrier,
117 bool precise) {
118 assert(val == noreg || val == rax, "parameter is just for looks");
119 switch (barrier) {
120 #ifndef SERIALGC
121 case BarrierSet::G1SATBCT:
122 case BarrierSet::G1SATBCTLogging:
123 {
124 // flatten object address if needed
125 // We do it regardless of precise because we need the registers
126 if (obj.index() == noreg && obj.disp() == 0) {
127 if (obj.base() != rdx) {
128 __ movl(rdx, obj.base());
129 }
130 } else {
131 __ leal(rdx, obj);
132 }
133 __ get_thread(rcx);
134 __ save_bcp();
135 __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
137 // Do the actual store
138 // noreg means NULL
139 if (val == noreg) {
140 __ movptr(Address(rdx, 0), NULL_WORD);
141 // No post barrier for NULL
142 } else {
143 __ movl(Address(rdx, 0), val);
144 __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
145 }
146 __ restore_bcp();
148 }
149 break;
150 #endif // SERIALGC
151 case BarrierSet::CardTableModRef:
152 case BarrierSet::CardTableExtension:
153 {
154 if (val == noreg) {
155 __ movptr(obj, NULL_WORD);
156 } else {
157 __ movl(obj, val);
158 // flatten object address if needed
159 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
160 __ store_check(obj.base());
161 } else {
162 __ leal(rdx, obj);
163 __ store_check(rdx);
164 }
165 }
166 }
167 break;
168 case BarrierSet::ModRef:
169 case BarrierSet::Other:
170 if (val == noreg) {
171 __ movptr(obj, NULL_WORD);
172 } else {
173 __ movl(obj, val);
174 }
175 break;
176 default :
177 ShouldNotReachHere();
179 }
180 }
182 Address TemplateTable::at_bcp(int offset) {
183 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
184 return Address(rsi, offset);
185 }
188 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
189 Register scratch,
190 bool load_bc_into_scratch/*=true*/) {
192 if (!RewriteBytecodes) return;
193 // the pair bytecodes have already done the load.
194 if (load_bc_into_scratch) {
195 __ movl(bc, bytecode);
196 }
197 Label patch_done;
198 if (JvmtiExport::can_post_breakpoint()) {
199 Label fast_patch;
200 // if a breakpoint is present we can't rewrite the stream directly
201 __ movzbl(scratch, at_bcp(0));
202 __ cmpl(scratch, Bytecodes::_breakpoint);
203 __ jcc(Assembler::notEqual, fast_patch);
204 __ get_method(scratch);
205 // Let breakpoint table handling rewrite to quicker bytecode
206 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
207 #ifndef ASSERT
208 __ jmpb(patch_done);
209 __ bind(fast_patch);
210 }
211 #else
212 __ jmp(patch_done);
213 __ bind(fast_patch);
214 }
215 Label okay;
216 __ load_unsigned_byte(scratch, at_bcp(0));
217 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
218 __ jccb(Assembler::equal, okay);
219 __ cmpl(scratch, bc);
220 __ jcc(Assembler::equal, okay);
221 __ stop("patching the wrong bytecode");
222 __ bind(okay);
223 #endif
224 // patch bytecode
225 __ movb(at_bcp(0), bc);
226 __ bind(patch_done);
227 }
229 //----------------------------------------------------------------------------------------------------
230 // Individual instructions
232 void TemplateTable::nop() {
233 transition(vtos, vtos);
234 // nothing to do
235 }
237 void TemplateTable::shouldnotreachhere() {
238 transition(vtos, vtos);
239 __ stop("shouldnotreachhere bytecode");
240 }
244 void TemplateTable::aconst_null() {
245 transition(vtos, atos);
246 __ xorptr(rax, rax);
247 }
250 void TemplateTable::iconst(int value) {
251 transition(vtos, itos);
252 if (value == 0) {
253 __ xorptr(rax, rax);
254 } else {
255 __ movptr(rax, value);
256 }
257 }
260 void TemplateTable::lconst(int value) {
261 transition(vtos, ltos);
262 if (value == 0) {
263 __ xorptr(rax, rax);
264 } else {
265 __ movptr(rax, value);
266 }
267 assert(value >= 0, "check this code");
268 __ xorptr(rdx, rdx);
269 }
272 void TemplateTable::fconst(int value) {
273 transition(vtos, ftos);
274 if (value == 0) { __ fldz();
275 } else if (value == 1) { __ fld1();
276 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
277 } else { ShouldNotReachHere();
278 }
279 }
282 void TemplateTable::dconst(int value) {
283 transition(vtos, dtos);
284 if (value == 0) { __ fldz();
285 } else if (value == 1) { __ fld1();
286 } else { ShouldNotReachHere();
287 }
288 }
291 void TemplateTable::bipush() {
292 transition(vtos, itos);
293 __ load_signed_byte(rax, at_bcp(1));
294 }
297 void TemplateTable::sipush() {
298 transition(vtos, itos);
299 __ load_unsigned_short(rax, at_bcp(1));
300 __ bswapl(rax);
301 __ sarl(rax, 16);
302 }
304 void TemplateTable::ldc(bool wide) {
305 transition(vtos, vtos);
306 Label call_ldc, notFloat, notClass, Done;
308 if (wide) {
309 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
310 } else {
311 __ load_unsigned_byte(rbx, at_bcp(1));
312 }
313 __ get_cpool_and_tags(rcx, rax);
314 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
315 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
317 // get type
318 __ xorptr(rdx, rdx);
319 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
321 // unresolved string - get the resolved string
322 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
323 __ jccb(Assembler::equal, call_ldc);
325 // unresolved class - get the resolved class
326 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
327 __ jccb(Assembler::equal, call_ldc);
329 // unresolved class in error (resolution failed) - call into runtime
330 // so that the same error from first resolution attempt is thrown.
331 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
332 __ jccb(Assembler::equal, call_ldc);
334 // resolved class - need to call vm to get java mirror of the class
335 __ cmpl(rdx, JVM_CONSTANT_Class);
336 __ jcc(Assembler::notEqual, notClass);
338 __ bind(call_ldc);
339 __ movl(rcx, wide);
340 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
341 __ push(atos);
342 __ jmp(Done);
344 __ bind(notClass);
345 __ cmpl(rdx, JVM_CONSTANT_Float);
346 __ jccb(Assembler::notEqual, notFloat);
347 // ftos
348 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
349 __ push(ftos);
350 __ jmp(Done);
352 __ bind(notFloat);
353 #ifdef ASSERT
354 { Label L;
355 __ cmpl(rdx, JVM_CONSTANT_Integer);
356 __ jcc(Assembler::equal, L);
357 __ cmpl(rdx, JVM_CONSTANT_String);
358 __ jcc(Assembler::equal, L);
359 __ stop("unexpected tag type in ldc");
360 __ bind(L);
361 }
362 #endif
363 Label isOop;
364 // atos and itos
365 // String is only oop type we will see here
366 __ cmpl(rdx, JVM_CONSTANT_String);
367 __ jccb(Assembler::equal, isOop);
368 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
369 __ push(itos);
370 __ jmp(Done);
371 __ bind(isOop);
372 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
373 __ push(atos);
375 if (VerifyOops) {
376 __ verify_oop(rax);
377 }
378 __ bind(Done);
379 }
381 void TemplateTable::ldc2_w() {
382 transition(vtos, vtos);
383 Label Long, Done;
384 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
386 __ get_cpool_and_tags(rcx, rax);
387 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
388 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
390 // get type
391 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
392 __ jccb(Assembler::notEqual, Long);
393 // dtos
394 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
395 __ push(dtos);
396 __ jmpb(Done);
398 __ bind(Long);
399 // ltos
400 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
401 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
403 __ push(ltos);
405 __ bind(Done);
406 }
409 void TemplateTable::locals_index(Register reg, int offset) {
410 __ load_unsigned_byte(reg, at_bcp(offset));
411 __ negptr(reg);
412 }
415 void TemplateTable::iload() {
416 transition(vtos, itos);
417 if (RewriteFrequentPairs) {
418 Label rewrite, done;
420 // get next byte
421 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
422 // if _iload, wait to rewrite to iload2. We only want to rewrite the
423 // last two iloads in a pair. Comparing against fast_iload means that
424 // the next bytecode is neither an iload or a caload, and therefore
425 // an iload pair.
426 __ cmpl(rbx, Bytecodes::_iload);
427 __ jcc(Assembler::equal, done);
429 __ cmpl(rbx, Bytecodes::_fast_iload);
430 __ movl(rcx, Bytecodes::_fast_iload2);
431 __ jccb(Assembler::equal, rewrite);
433 // if _caload, rewrite to fast_icaload
434 __ cmpl(rbx, Bytecodes::_caload);
435 __ movl(rcx, Bytecodes::_fast_icaload);
436 __ jccb(Assembler::equal, rewrite);
438 // rewrite so iload doesn't check again.
439 __ movl(rcx, Bytecodes::_fast_iload);
441 // rewrite
442 // rcx: fast bytecode
443 __ bind(rewrite);
444 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
445 __ bind(done);
446 }
448 // Get the local value into tos
449 locals_index(rbx);
450 __ movl(rax, iaddress(rbx));
451 debug_only(__ verify_local_tag(frame::TagValue, rbx));
452 }
455 void TemplateTable::fast_iload2() {
456 transition(vtos, itos);
457 locals_index(rbx);
458 __ movl(rax, iaddress(rbx));
459 debug_only(__ verify_local_tag(frame::TagValue, rbx));
460 __ push(itos);
461 locals_index(rbx, 3);
462 __ movl(rax, iaddress(rbx));
463 debug_only(__ verify_local_tag(frame::TagValue, rbx));
464 }
466 void TemplateTable::fast_iload() {
467 transition(vtos, itos);
468 locals_index(rbx);
469 __ movl(rax, iaddress(rbx));
470 debug_only(__ verify_local_tag(frame::TagValue, rbx));
471 }
474 void TemplateTable::lload() {
475 transition(vtos, ltos);
476 locals_index(rbx);
477 __ movptr(rax, laddress(rbx));
478 NOT_LP64(__ movl(rdx, haddress(rbx)));
479 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
480 }
483 void TemplateTable::fload() {
484 transition(vtos, ftos);
485 locals_index(rbx);
486 __ fld_s(faddress(rbx));
487 debug_only(__ verify_local_tag(frame::TagValue, rbx));
488 }
491 void TemplateTable::dload() {
492 transition(vtos, dtos);
493 locals_index(rbx);
494 if (TaggedStackInterpreter) {
495 // Get double out of locals array, onto temp stack and load with
496 // float instruction into ST0
497 __ movl(rax, laddress(rbx));
498 __ movl(rdx, haddress(rbx));
499 __ push(rdx); // push hi first
500 __ push(rax);
501 __ fld_d(Address(rsp, 0));
502 __ addptr(rsp, 2*wordSize);
503 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
504 } else {
505 __ fld_d(daddress(rbx));
506 }
507 }
510 void TemplateTable::aload() {
511 transition(vtos, atos);
512 locals_index(rbx);
513 __ movptr(rax, aaddress(rbx));
514 debug_only(__ verify_local_tag(frame::TagReference, rbx));
515 }
518 void TemplateTable::locals_index_wide(Register reg) {
519 __ movl(reg, at_bcp(2));
520 __ bswapl(reg);
521 __ shrl(reg, 16);
522 __ negptr(reg);
523 }
526 void TemplateTable::wide_iload() {
527 transition(vtos, itos);
528 locals_index_wide(rbx);
529 __ movl(rax, iaddress(rbx));
530 debug_only(__ verify_local_tag(frame::TagValue, rbx));
531 }
534 void TemplateTable::wide_lload() {
535 transition(vtos, ltos);
536 locals_index_wide(rbx);
537 __ movptr(rax, laddress(rbx));
538 NOT_LP64(__ movl(rdx, haddress(rbx)));
539 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
540 }
543 void TemplateTable::wide_fload() {
544 transition(vtos, ftos);
545 locals_index_wide(rbx);
546 __ fld_s(faddress(rbx));
547 debug_only(__ verify_local_tag(frame::TagValue, rbx));
548 }
551 void TemplateTable::wide_dload() {
552 transition(vtos, dtos);
553 locals_index_wide(rbx);
554 if (TaggedStackInterpreter) {
555 // Get double out of locals array, onto temp stack and load with
556 // float instruction into ST0
557 __ movl(rax, laddress(rbx));
558 __ movl(rdx, haddress(rbx));
559 __ push(rdx); // push hi first
560 __ push(rax);
561 __ fld_d(Address(rsp, 0));
562 __ addl(rsp, 2*wordSize);
563 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
564 } else {
565 __ fld_d(daddress(rbx));
566 }
567 }
570 void TemplateTable::wide_aload() {
571 transition(vtos, atos);
572 locals_index_wide(rbx);
573 __ movptr(rax, aaddress(rbx));
574 debug_only(__ verify_local_tag(frame::TagReference, rbx));
575 }
577 void TemplateTable::index_check(Register array, Register index) {
578 // Pop ptr into array
579 __ pop_ptr(array);
580 index_check_without_pop(array, index);
581 }
583 void TemplateTable::index_check_without_pop(Register array, Register index) {
584 // destroys rbx,
585 // check array
586 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
587 LP64_ONLY(__ movslq(index, index));
588 // check index
589 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
590 if (index != rbx) {
591 // ??? convention: move aberrant index into rbx, for exception message
592 assert(rbx != array, "different registers");
593 __ mov(rbx, index);
594 }
595 __ jump_cc(Assembler::aboveEqual,
596 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
597 }
600 void TemplateTable::iaload() {
601 transition(itos, itos);
602 // rdx: array
603 index_check(rdx, rax); // kills rbx,
604 // rax,: index
605 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
606 }
609 void TemplateTable::laload() {
610 transition(itos, ltos);
611 // rax,: index
612 // rdx: array
613 index_check(rdx, rax);
614 __ mov(rbx, rax);
615 // rbx,: index
616 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
617 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
618 }
621 void TemplateTable::faload() {
622 transition(itos, ftos);
623 // rdx: array
624 index_check(rdx, rax); // kills rbx,
625 // rax,: index
626 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
627 }
630 void TemplateTable::daload() {
631 transition(itos, dtos);
632 // rdx: array
633 index_check(rdx, rax); // kills rbx,
634 // rax,: index
635 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
636 }
639 void TemplateTable::aaload() {
640 transition(itos, atos);
641 // rdx: array
642 index_check(rdx, rax); // kills rbx,
643 // rax,: index
644 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
645 }
648 void TemplateTable::baload() {
649 transition(itos, itos);
650 // rdx: array
651 index_check(rdx, rax); // kills rbx,
652 // rax,: index
653 // can do better code for P5 - fix this at some point
654 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
655 __ mov(rax, rbx);
656 }
659 void TemplateTable::caload() {
660 transition(itos, itos);
661 // rdx: array
662 index_check(rdx, rax); // kills rbx,
663 // rax,: index
664 // can do better code for P5 - may want to improve this at some point
665 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
666 __ mov(rax, rbx);
667 }
669 // iload followed by caload frequent pair
670 void TemplateTable::fast_icaload() {
671 transition(vtos, itos);
672 // load index out of locals
673 locals_index(rbx);
674 __ movl(rax, iaddress(rbx));
675 debug_only(__ verify_local_tag(frame::TagValue, rbx));
677 // rdx: array
678 index_check(rdx, rax);
679 // rax,: index
680 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
681 __ mov(rax, rbx);
682 }
684 void TemplateTable::saload() {
685 transition(itos, itos);
686 // rdx: array
687 index_check(rdx, rax); // kills rbx,
688 // rax,: index
689 // can do better code for P5 - may want to improve this at some point
690 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
691 __ mov(rax, rbx);
692 }
695 void TemplateTable::iload(int n) {
696 transition(vtos, itos);
697 __ movl(rax, iaddress(n));
698 debug_only(__ verify_local_tag(frame::TagValue, n));
699 }
702 void TemplateTable::lload(int n) {
703 transition(vtos, ltos);
704 __ movptr(rax, laddress(n));
705 NOT_LP64(__ movptr(rdx, haddress(n)));
706 debug_only(__ verify_local_tag(frame::TagCategory2, n));
707 }
710 void TemplateTable::fload(int n) {
711 transition(vtos, ftos);
712 __ fld_s(faddress(n));
713 debug_only(__ verify_local_tag(frame::TagValue, n));
714 }
717 void TemplateTable::dload(int n) {
718 transition(vtos, dtos);
719 if (TaggedStackInterpreter) {
720 // Get double out of locals array, onto temp stack and load with
721 // float instruction into ST0
722 __ movl(rax, laddress(n));
723 __ movl(rdx, haddress(n));
724 __ push(rdx); // push hi first
725 __ push(rax);
726 __ fld_d(Address(rsp, 0));
727 __ addptr(rsp, 2*wordSize); // reset rsp
728 debug_only(__ verify_local_tag(frame::TagCategory2, n));
729 } else {
730 __ fld_d(daddress(n));
731 }
732 }
735 void TemplateTable::aload(int n) {
736 transition(vtos, atos);
737 __ movptr(rax, aaddress(n));
738 debug_only(__ verify_local_tag(frame::TagReference, n));
739 }
742 void TemplateTable::aload_0() {
743 transition(vtos, atos);
744 // According to bytecode histograms, the pairs:
745 //
746 // _aload_0, _fast_igetfield
747 // _aload_0, _fast_agetfield
748 // _aload_0, _fast_fgetfield
749 //
750 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
751 // bytecode checks if the next bytecode is either _fast_igetfield,
752 // _fast_agetfield or _fast_fgetfield and then rewrites the
753 // current bytecode into a pair bytecode; otherwise it rewrites the current
754 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
755 //
756 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
757 // otherwise we may miss an opportunity for a pair.
758 //
759 // Also rewrite frequent pairs
760 // aload_0, aload_1
761 // aload_0, iload_1
762 // These bytecodes with a small amount of code are most profitable to rewrite
763 if (RewriteFrequentPairs) {
764 Label rewrite, done;
765 // get next byte
766 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
768 // do actual aload_0
769 aload(0);
771 // if _getfield then wait with rewrite
772 __ cmpl(rbx, Bytecodes::_getfield);
773 __ jcc(Assembler::equal, done);
775 // if _igetfield then reqrite to _fast_iaccess_0
776 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
777 __ cmpl(rbx, Bytecodes::_fast_igetfield);
778 __ movl(rcx, Bytecodes::_fast_iaccess_0);
779 __ jccb(Assembler::equal, rewrite);
781 // if _agetfield then reqrite to _fast_aaccess_0
782 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
783 __ cmpl(rbx, Bytecodes::_fast_agetfield);
784 __ movl(rcx, Bytecodes::_fast_aaccess_0);
785 __ jccb(Assembler::equal, rewrite);
787 // if _fgetfield then reqrite to _fast_faccess_0
788 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
789 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
790 __ movl(rcx, Bytecodes::_fast_faccess_0);
791 __ jccb(Assembler::equal, rewrite);
793 // else rewrite to _fast_aload0
794 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
795 __ movl(rcx, Bytecodes::_fast_aload_0);
797 // rewrite
798 // rcx: fast bytecode
799 __ bind(rewrite);
800 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
802 __ bind(done);
803 } else {
804 aload(0);
805 }
806 }
808 void TemplateTable::istore() {
809 transition(itos, vtos);
810 locals_index(rbx);
811 __ movl(iaddress(rbx), rax);
812 __ tag_local(frame::TagValue, rbx);
813 }
816 void TemplateTable::lstore() {
817 transition(ltos, vtos);
818 locals_index(rbx);
819 __ movptr(laddress(rbx), rax);
820 NOT_LP64(__ movptr(haddress(rbx), rdx));
821 __ tag_local(frame::TagCategory2, rbx);
822 }
825 void TemplateTable::fstore() {
826 transition(ftos, vtos);
827 locals_index(rbx);
828 __ fstp_s(faddress(rbx));
829 __ tag_local(frame::TagValue, rbx);
830 }
833 void TemplateTable::dstore() {
834 transition(dtos, vtos);
835 locals_index(rbx);
836 if (TaggedStackInterpreter) {
837 // Store double on stack and reload into locals nonadjacently
838 __ subptr(rsp, 2 * wordSize);
839 __ fstp_d(Address(rsp, 0));
840 __ pop(rax);
841 __ pop(rdx);
842 __ movptr(laddress(rbx), rax);
843 __ movptr(haddress(rbx), rdx);
844 __ tag_local(frame::TagCategory2, rbx);
845 } else {
846 __ fstp_d(daddress(rbx));
847 }
848 }
851 void TemplateTable::astore() {
852 transition(vtos, vtos);
853 __ pop_ptr(rax, rdx); // will need to pop tag too
854 locals_index(rbx);
855 __ movptr(aaddress(rbx), rax);
856 __ tag_local(rdx, rbx); // need to store same tag in local may be returnAddr
857 }
860 void TemplateTable::wide_istore() {
861 transition(vtos, vtos);
862 __ pop_i(rax);
863 locals_index_wide(rbx);
864 __ movl(iaddress(rbx), rax);
865 __ tag_local(frame::TagValue, rbx);
866 }
869 void TemplateTable::wide_lstore() {
870 transition(vtos, vtos);
871 __ pop_l(rax, rdx);
872 locals_index_wide(rbx);
873 __ movptr(laddress(rbx), rax);
874 NOT_LP64(__ movl(haddress(rbx), rdx));
875 __ tag_local(frame::TagCategory2, rbx);
876 }
879 void TemplateTable::wide_fstore() {
880 wide_istore();
881 }
884 void TemplateTable::wide_dstore() {
885 wide_lstore();
886 }
889 void TemplateTable::wide_astore() {
890 transition(vtos, vtos);
891 __ pop_ptr(rax, rdx);
892 locals_index_wide(rbx);
893 __ movptr(aaddress(rbx), rax);
894 __ tag_local(rdx, rbx);
895 }
898 void TemplateTable::iastore() {
899 transition(itos, vtos);
900 __ pop_i(rbx);
901 // rax,: value
902 // rdx: array
903 index_check(rdx, rbx); // prefer index in rbx,
904 // rbx,: index
905 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
906 }
909 void TemplateTable::lastore() {
910 transition(ltos, vtos);
911 __ pop_i(rbx);
912 // rax,: low(value)
913 // rcx: array
914 // rdx: high(value)
915 index_check(rcx, rbx); // prefer index in rbx,
916 // rbx,: index
917 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
918 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
919 }
922 void TemplateTable::fastore() {
923 transition(ftos, vtos);
924 __ pop_i(rbx);
925 // rdx: array
926 // st0: value
927 index_check(rdx, rbx); // prefer index in rbx,
928 // rbx,: index
929 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
930 }
933 void TemplateTable::dastore() {
934 transition(dtos, vtos);
935 __ pop_i(rbx);
936 // rdx: array
937 // st0: value
938 index_check(rdx, rbx); // prefer index in rbx,
939 // rbx,: index
940 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
941 }
944 void TemplateTable::aastore() {
945 Label is_null, ok_is_subtype, done;
946 transition(vtos, vtos);
947 // stack: ..., array, index, value
948 __ movptr(rax, at_tos()); // Value
949 __ movl(rcx, at_tos_p1()); // Index
950 __ movptr(rdx, at_tos_p2()); // Array
952 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
953 index_check_without_pop(rdx, rcx); // kills rbx,
954 // do array store check - check for NULL value first
955 __ testptr(rax, rax);
956 __ jcc(Assembler::zero, is_null);
958 // Move subklass into EBX
959 __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
960 // Move superklass into EAX
961 __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
962 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
963 // Compress array+index*wordSize+12 into a single register. Frees ECX.
964 __ lea(rdx, element_address);
966 // Generate subtype check. Blows ECX. Resets EDI to locals.
967 // Superklass in EAX. Subklass in EBX.
968 __ gen_subtype_check( rbx, ok_is_subtype );
970 // Come here on failure
971 // object is at TOS
972 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
974 // Come here on success
975 __ bind(ok_is_subtype);
977 // Get the value to store
978 __ movptr(rax, at_rsp());
979 // and store it with appropriate barrier
980 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
982 __ jmp(done);
984 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
985 __ bind(is_null);
986 __ profile_null_seen(rbx);
988 // Store NULL, (noreg means NULL to do_oop_store)
989 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
991 // Pop stack arguments
992 __ bind(done);
993 __ addptr(rsp, 3 * Interpreter::stackElementSize());
994 }
997 void TemplateTable::bastore() {
998 transition(itos, vtos);
999 __ pop_i(rbx);
1000 // rax,: value
1001 // rdx: array
1002 index_check(rdx, rbx); // prefer index in rbx,
1003 // rbx,: index
1004 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
1005 }
1008 void TemplateTable::castore() {
1009 transition(itos, vtos);
1010 __ pop_i(rbx);
1011 // rax,: value
1012 // rdx: array
1013 index_check(rdx, rbx); // prefer index in rbx,
1014 // rbx,: index
1015 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1016 }
1019 void TemplateTable::sastore() {
1020 castore();
1021 }
1024 void TemplateTable::istore(int n) {
1025 transition(itos, vtos);
1026 __ movl(iaddress(n), rax);
1027 __ tag_local(frame::TagValue, n);
1028 }
1031 void TemplateTable::lstore(int n) {
1032 transition(ltos, vtos);
1033 __ movptr(laddress(n), rax);
1034 NOT_LP64(__ movptr(haddress(n), rdx));
1035 __ tag_local(frame::TagCategory2, n);
1036 }
1039 void TemplateTable::fstore(int n) {
1040 transition(ftos, vtos);
1041 __ fstp_s(faddress(n));
1042 __ tag_local(frame::TagValue, n);
1043 }
1046 void TemplateTable::dstore(int n) {
1047 transition(dtos, vtos);
1048 if (TaggedStackInterpreter) {
1049 __ subptr(rsp, 2 * wordSize);
1050 __ fstp_d(Address(rsp, 0));
1051 __ pop(rax);
1052 __ pop(rdx);
1053 __ movl(laddress(n), rax);
1054 __ movl(haddress(n), rdx);
1055 __ tag_local(frame::TagCategory2, n);
1056 } else {
1057 __ fstp_d(daddress(n));
1058 }
1059 }
1062 void TemplateTable::astore(int n) {
1063 transition(vtos, vtos);
1064 __ pop_ptr(rax, rdx);
1065 __ movptr(aaddress(n), rax);
1066 __ tag_local(rdx, n);
1067 }
1070 void TemplateTable::pop() {
1071 transition(vtos, vtos);
1072 __ addptr(rsp, Interpreter::stackElementSize());
1073 }
1076 void TemplateTable::pop2() {
1077 transition(vtos, vtos);
1078 __ addptr(rsp, 2*Interpreter::stackElementSize());
1079 }
1082 void TemplateTable::dup() {
1083 transition(vtos, vtos);
1084 // stack: ..., a
1085 __ load_ptr_and_tag(0, rax, rdx);
1086 __ push_ptr(rax, rdx);
1087 // stack: ..., a, a
1088 }
1091 void TemplateTable::dup_x1() {
1092 transition(vtos, vtos);
1093 // stack: ..., a, b
1094 __ load_ptr_and_tag(0, rax, rdx); // load b
1095 __ load_ptr_and_tag(1, rcx, rbx); // load a
1096 __ store_ptr_and_tag(1, rax, rdx); // store b
1097 __ store_ptr_and_tag(0, rcx, rbx); // store a
1098 __ push_ptr(rax, rdx); // push b
1099 // stack: ..., b, a, b
1100 }
1103 void TemplateTable::dup_x2() {
1104 transition(vtos, vtos);
1105 // stack: ..., a, b, c
1106 __ load_ptr_and_tag(0, rax, rdx); // load c
1107 __ load_ptr_and_tag(2, rcx, rbx); // load a
1108 __ store_ptr_and_tag(2, rax, rdx); // store c in a
1109 __ push_ptr(rax, rdx); // push c
1110 // stack: ..., c, b, c, c
1111 __ load_ptr_and_tag(2, rax, rdx); // load b
1112 __ store_ptr_and_tag(2, rcx, rbx); // store a in b
1113 // stack: ..., c, a, c, c
1114 __ store_ptr_and_tag(1, rax, rdx); // store b in c
1115 // stack: ..., c, a, b, c
1116 }
1119 void TemplateTable::dup2() {
1120 transition(vtos, vtos);
1121 // stack: ..., a, b
1122 __ load_ptr_and_tag(1, rax, rdx); // load a
1123 __ push_ptr(rax, rdx); // push a
1124 __ load_ptr_and_tag(1, rax, rdx); // load b
1125 __ push_ptr(rax, rdx); // push b
1126 // stack: ..., a, b, a, b
1127 }
1130 void TemplateTable::dup2_x1() {
1131 transition(vtos, vtos);
1132 // stack: ..., a, b, c
1133 __ load_ptr_and_tag(0, rcx, rbx); // load c
1134 __ load_ptr_and_tag(1, rax, rdx); // load b
1135 __ push_ptr(rax, rdx); // push b
1136 __ push_ptr(rcx, rbx); // push c
1137 // stack: ..., a, b, c, b, c
1138 __ store_ptr_and_tag(3, rcx, rbx); // store c in b
1139 // stack: ..., a, c, c, b, c
1140 __ load_ptr_and_tag(4, rcx, rbx); // load a
1141 __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
1142 // stack: ..., a, c, a, b, c
1143 __ store_ptr_and_tag(4, rax, rdx); // store b in a
1144 // stack: ..., b, c, a, b, c
1145 // stack: ..., b, c, a, b, c
1146 }
1149 void TemplateTable::dup2_x2() {
1150 transition(vtos, vtos);
1151 // stack: ..., a, b, c, d
1152 __ load_ptr_and_tag(0, rcx, rbx); // load d
1153 __ load_ptr_and_tag(1, rax, rdx); // load c
1154 __ push_ptr(rax, rdx); // push c
1155 __ push_ptr(rcx, rbx); // push d
1156 // stack: ..., a, b, c, d, c, d
1157 __ load_ptr_and_tag(4, rax, rdx); // load b
1158 __ store_ptr_and_tag(2, rax, rdx); // store b in d
1159 __ store_ptr_and_tag(4, rcx, rbx); // store d in b
1160 // stack: ..., a, d, c, b, c, d
1161 __ load_ptr_and_tag(5, rcx, rbx); // load a
1162 __ load_ptr_and_tag(3, rax, rdx); // load c
1163 __ store_ptr_and_tag(3, rcx, rbx); // store a in c
1164 __ store_ptr_and_tag(5, rax, rdx); // store c in a
1165 // stack: ..., c, d, a, b, c, d
1166 // stack: ..., c, d, a, b, c, d
1167 }
1170 void TemplateTable::swap() {
1171 transition(vtos, vtos);
1172 // stack: ..., a, b
1173 __ load_ptr_and_tag(1, rcx, rbx); // load a
1174 __ load_ptr_and_tag(0, rax, rdx); // load b
1175 __ store_ptr_and_tag(0, rcx, rbx); // store a in b
1176 __ store_ptr_and_tag(1, rax, rdx); // store b in a
1177 // stack: ..., b, a
1178 }
1181 void TemplateTable::iop2(Operation op) {
1182 transition(itos, itos);
1183 switch (op) {
1184 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1185 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1186 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1187 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1188 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1189 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1190 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1191 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1192 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1193 default : ShouldNotReachHere();
1194 }
1195 }
1198 void TemplateTable::lop2(Operation op) {
1199 transition(ltos, ltos);
1200 __ pop_l(rbx, rcx);
1201 switch (op) {
1202 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1203 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1204 __ mov(rax, rbx); __ mov(rdx, rcx); break;
1205 case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break;
1206 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1207 case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1208 default : ShouldNotReachHere();
1209 }
1210 }
1213 void TemplateTable::idiv() {
1214 transition(itos, itos);
1215 __ mov(rcx, rax);
1216 __ pop_i(rax);
1217 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1218 // they are not equal, one could do a normal division (no correction
1219 // needed), which may speed up this implementation for the common case.
1220 // (see also JVM spec., p.243 & p.271)
1221 __ corrected_idivl(rcx);
1222 }
1225 void TemplateTable::irem() {
1226 transition(itos, itos);
1227 __ mov(rcx, rax);
1228 __ pop_i(rax);
1229 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1230 // they are not equal, one could do a normal division (no correction
1231 // needed), which may speed up this implementation for the common case.
1232 // (see also JVM spec., p.243 & p.271)
1233 __ corrected_idivl(rcx);
1234 __ mov(rax, rdx);
1235 }
1238 void TemplateTable::lmul() {
1239 transition(ltos, ltos);
1240 __ pop_l(rbx, rcx);
1241 __ push(rcx); __ push(rbx);
1242 __ push(rdx); __ push(rax);
1243 __ lmul(2 * wordSize, 0);
1244 __ addptr(rsp, 4 * wordSize); // take off temporaries
1245 }
1248 void TemplateTable::ldiv() {
1249 transition(ltos, ltos);
1250 __ pop_l(rbx, rcx);
1251 __ push(rcx); __ push(rbx);
1252 __ push(rdx); __ push(rax);
1253 // check if y = 0
1254 __ orl(rax, rdx);
1255 __ jump_cc(Assembler::zero,
1256 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1257 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1258 __ addptr(rsp, 4 * wordSize); // take off temporaries
1259 }
1262 void TemplateTable::lrem() {
1263 transition(ltos, ltos);
1264 __ pop_l(rbx, rcx);
1265 __ push(rcx); __ push(rbx);
1266 __ push(rdx); __ push(rax);
1267 // check if y = 0
1268 __ orl(rax, rdx);
1269 __ jump_cc(Assembler::zero,
1270 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1271 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1272 __ addptr(rsp, 4 * wordSize);
1273 }
1276 void TemplateTable::lshl() {
1277 transition(itos, ltos);
1278 __ movl(rcx, rax); // get shift count
1279 __ pop_l(rax, rdx); // get shift value
1280 __ lshl(rdx, rax);
1281 }
1284 void TemplateTable::lshr() {
1285 transition(itos, ltos);
1286 __ mov(rcx, rax); // get shift count
1287 __ pop_l(rax, rdx); // get shift value
1288 __ lshr(rdx, rax, true);
1289 }
1292 void TemplateTable::lushr() {
1293 transition(itos, ltos);
1294 __ mov(rcx, rax); // get shift count
1295 __ pop_l(rax, rdx); // get shift value
1296 __ lshr(rdx, rax);
1297 }
1300 void TemplateTable::fop2(Operation op) {
1301 transition(ftos, ftos);
1302 __ pop_ftos_to_rsp(); // pop ftos into rsp
1303 switch (op) {
1304 case add: __ fadd_s (at_rsp()); break;
1305 case sub: __ fsubr_s(at_rsp()); break;
1306 case mul: __ fmul_s (at_rsp()); break;
1307 case div: __ fdivr_s(at_rsp()); break;
1308 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1309 default : ShouldNotReachHere();
1310 }
1311 __ f2ieee();
1312 __ pop(rax); // pop float thing off
1313 }
1316 void TemplateTable::dop2(Operation op) {
1317 transition(dtos, dtos);
1318 __ pop_dtos_to_rsp(); // pop dtos into rsp
1320 switch (op) {
1321 case add: __ fadd_d (at_rsp()); break;
1322 case sub: __ fsubr_d(at_rsp()); break;
1323 case mul: {
1324 Label L_strict;
1325 Label L_join;
1326 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1327 __ get_method(rcx);
1328 __ movl(rcx, access_flags);
1329 __ testl(rcx, JVM_ACC_STRICT);
1330 __ jccb(Assembler::notZero, L_strict);
1331 __ fmul_d (at_rsp());
1332 __ jmpb(L_join);
1333 __ bind(L_strict);
1334 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1335 __ fmulp();
1336 __ fmul_d (at_rsp());
1337 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1338 __ fmulp();
1339 __ bind(L_join);
1340 break;
1341 }
1342 case div: {
1343 Label L_strict;
1344 Label L_join;
1345 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1346 __ get_method(rcx);
1347 __ movl(rcx, access_flags);
1348 __ testl(rcx, JVM_ACC_STRICT);
1349 __ jccb(Assembler::notZero, L_strict);
1350 __ fdivr_d(at_rsp());
1351 __ jmp(L_join);
1352 __ bind(L_strict);
1353 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1354 __ fmul_d (at_rsp());
1355 __ fdivrp();
1356 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1357 __ fmulp();
1358 __ bind(L_join);
1359 break;
1360 }
1361 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1362 default : ShouldNotReachHere();
1363 }
1364 __ d2ieee();
1365 // Pop double precision number from rsp.
1366 __ pop(rax);
1367 __ pop(rdx);
1368 }
1371 void TemplateTable::ineg() {
1372 transition(itos, itos);
1373 __ negl(rax);
1374 }
1377 void TemplateTable::lneg() {
1378 transition(ltos, ltos);
1379 __ lneg(rdx, rax);
1380 }
1383 void TemplateTable::fneg() {
1384 transition(ftos, ftos);
1385 __ fchs();
1386 }
1389 void TemplateTable::dneg() {
1390 transition(dtos, dtos);
1391 __ fchs();
1392 }
1395 void TemplateTable::iinc() {
1396 transition(vtos, vtos);
1397 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1398 locals_index(rbx);
1399 __ addl(iaddress(rbx), rdx);
1400 }
1403 void TemplateTable::wide_iinc() {
1404 transition(vtos, vtos);
1405 __ movl(rdx, at_bcp(4)); // get constant
1406 locals_index_wide(rbx);
1407 __ bswapl(rdx); // swap bytes & sign-extend constant
1408 __ sarl(rdx, 16);
1409 __ addl(iaddress(rbx), rdx);
1410 // Note: should probably use only one movl to get both
1411 // the index and the constant -> fix this
1412 }
1415 void TemplateTable::convert() {
1416 // Checking
1417 #ifdef ASSERT
1418 { TosState tos_in = ilgl;
1419 TosState tos_out = ilgl;
1420 switch (bytecode()) {
1421 case Bytecodes::_i2l: // fall through
1422 case Bytecodes::_i2f: // fall through
1423 case Bytecodes::_i2d: // fall through
1424 case Bytecodes::_i2b: // fall through
1425 case Bytecodes::_i2c: // fall through
1426 case Bytecodes::_i2s: tos_in = itos; break;
1427 case Bytecodes::_l2i: // fall through
1428 case Bytecodes::_l2f: // fall through
1429 case Bytecodes::_l2d: tos_in = ltos; break;
1430 case Bytecodes::_f2i: // fall through
1431 case Bytecodes::_f2l: // fall through
1432 case Bytecodes::_f2d: tos_in = ftos; break;
1433 case Bytecodes::_d2i: // fall through
1434 case Bytecodes::_d2l: // fall through
1435 case Bytecodes::_d2f: tos_in = dtos; break;
1436 default : ShouldNotReachHere();
1437 }
1438 switch (bytecode()) {
1439 case Bytecodes::_l2i: // fall through
1440 case Bytecodes::_f2i: // fall through
1441 case Bytecodes::_d2i: // fall through
1442 case Bytecodes::_i2b: // fall through
1443 case Bytecodes::_i2c: // fall through
1444 case Bytecodes::_i2s: tos_out = itos; break;
1445 case Bytecodes::_i2l: // fall through
1446 case Bytecodes::_f2l: // fall through
1447 case Bytecodes::_d2l: tos_out = ltos; break;
1448 case Bytecodes::_i2f: // fall through
1449 case Bytecodes::_l2f: // fall through
1450 case Bytecodes::_d2f: tos_out = ftos; break;
1451 case Bytecodes::_i2d: // fall through
1452 case Bytecodes::_l2d: // fall through
1453 case Bytecodes::_f2d: tos_out = dtos; break;
1454 default : ShouldNotReachHere();
1455 }
1456 transition(tos_in, tos_out);
1457 }
1458 #endif // ASSERT
1460 // Conversion
1461 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1462 switch (bytecode()) {
1463 case Bytecodes::_i2l:
1464 __ extend_sign(rdx, rax);
1465 break;
1466 case Bytecodes::_i2f:
1467 __ push(rax); // store int on tos
1468 __ fild_s(at_rsp()); // load int to ST0
1469 __ f2ieee(); // truncate to float size
1470 __ pop(rcx); // adjust rsp
1471 break;
1472 case Bytecodes::_i2d:
1473 __ push(rax); // add one slot for d2ieee()
1474 __ push(rax); // store int on tos
1475 __ fild_s(at_rsp()); // load int to ST0
1476 __ d2ieee(); // truncate to double size
1477 __ pop(rcx); // adjust rsp
1478 __ pop(rcx);
1479 break;
1480 case Bytecodes::_i2b:
1481 __ shll(rax, 24); // truncate upper 24 bits
1482 __ sarl(rax, 24); // and sign-extend byte
1483 LP64_ONLY(__ movsbl(rax, rax));
1484 break;
1485 case Bytecodes::_i2c:
1486 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1487 LP64_ONLY(__ movzwl(rax, rax));
1488 break;
1489 case Bytecodes::_i2s:
1490 __ shll(rax, 16); // truncate upper 16 bits
1491 __ sarl(rax, 16); // and sign-extend short
1492 LP64_ONLY(__ movswl(rax, rax));
1493 break;
1494 case Bytecodes::_l2i:
1495 /* nothing to do */
1496 break;
1497 case Bytecodes::_l2f:
1498 __ push(rdx); // store long on tos
1499 __ push(rax);
1500 __ fild_d(at_rsp()); // load long to ST0
1501 __ f2ieee(); // truncate to float size
1502 __ pop(rcx); // adjust rsp
1503 __ pop(rcx);
1504 break;
1505 case Bytecodes::_l2d:
1506 __ push(rdx); // store long on tos
1507 __ push(rax);
1508 __ fild_d(at_rsp()); // load long to ST0
1509 __ d2ieee(); // truncate to double size
1510 __ pop(rcx); // adjust rsp
1511 __ pop(rcx);
1512 break;
1513 case Bytecodes::_f2i:
1514 __ push(rcx); // reserve space for argument
1515 __ fstp_s(at_rsp()); // pass float argument on stack
1516 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1517 break;
1518 case Bytecodes::_f2l:
1519 __ push(rcx); // reserve space for argument
1520 __ fstp_s(at_rsp()); // pass float argument on stack
1521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1522 break;
1523 case Bytecodes::_f2d:
1524 /* nothing to do */
1525 break;
1526 case Bytecodes::_d2i:
1527 __ push(rcx); // reserve space for argument
1528 __ push(rcx);
1529 __ fstp_d(at_rsp()); // pass double argument on stack
1530 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1531 break;
1532 case Bytecodes::_d2l:
1533 __ push(rcx); // reserve space for argument
1534 __ push(rcx);
1535 __ fstp_d(at_rsp()); // pass double argument on stack
1536 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1537 break;
1538 case Bytecodes::_d2f:
1539 __ push(rcx); // reserve space for f2ieee()
1540 __ f2ieee(); // truncate to float size
1541 __ pop(rcx); // adjust rsp
1542 break;
1543 default :
1544 ShouldNotReachHere();
1545 }
1546 }
1549 void TemplateTable::lcmp() {
1550 transition(ltos, itos);
1551 // y = rdx:rax
1552 __ pop_l(rbx, rcx); // get x = rcx:rbx
1553 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1554 __ mov(rax, rcx);
1555 }
1558 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1559 if (is_float) {
1560 __ pop_ftos_to_rsp();
1561 __ fld_s(at_rsp());
1562 } else {
1563 __ pop_dtos_to_rsp();
1564 __ fld_d(at_rsp());
1565 __ pop(rdx);
1566 }
1567 __ pop(rcx);
1568 __ fcmp2int(rax, unordered_result < 0);
1569 }
1572 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1573 __ get_method(rcx); // ECX holds method
1574 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1576 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1577 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1578 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1580 // Load up EDX with the branch displacement
1581 __ movl(rdx, at_bcp(1));
1582 __ bswapl(rdx);
1583 if (!is_wide) __ sarl(rdx, 16);
1584 LP64_ONLY(__ movslq(rdx, rdx));
1587 // Handle all the JSR stuff here, then exit.
1588 // It's much shorter and cleaner than intermingling with the
1589 // non-JSR normal-branch stuff occurring below.
1590 if (is_jsr) {
1591 // Pre-load the next target bytecode into EBX
1592 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1594 // compute return address as bci in rax,
1595 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1596 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1597 // Adjust the bcp in RSI by the displacement in EDX
1598 __ addptr(rsi, rdx);
1599 // Push return address
1600 __ push_i(rax);
1601 // jsr returns vtos
1602 __ dispatch_only_noverify(vtos);
1603 return;
1604 }
1606 // Normal (non-jsr) branch handling
1608 // Adjust the bcp in RSI by the displacement in EDX
1609 __ addptr(rsi, rdx);
1611 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1612 Label backedge_counter_overflow;
1613 Label profile_method;
1614 Label dispatch;
1615 if (UseLoopCounter) {
1616 // increment backedge counter for backward branches
1617 // rax,: MDO
1618 // rbx,: MDO bumped taken-count
1619 // rcx: method
1620 // rdx: target offset
1621 // rsi: target bcp
1622 // rdi: locals pointer
1623 __ testl(rdx, rdx); // check if forward or backward branch
1624 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1626 // increment counter
1627 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1628 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1629 __ movl(Address(rcx, be_offset), rax); // store counter
1631 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1632 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1633 __ addl(rax, Address(rcx, be_offset)); // add both counters
1635 if (ProfileInterpreter) {
1636 // Test to see if we should create a method data oop
1637 __ cmp32(rax,
1638 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1639 __ jcc(Assembler::less, dispatch);
1641 // if no method data exists, go to profile method
1642 __ test_method_data_pointer(rax, profile_method);
1644 if (UseOnStackReplacement) {
1645 // check for overflow against rbx, which is the MDO taken count
1646 __ cmp32(rbx,
1647 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1648 __ jcc(Assembler::below, dispatch);
1650 // When ProfileInterpreter is on, the backedge_count comes from the
1651 // methodDataOop, which value does not get reset on the call to
1652 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1653 // routine while the method is being compiled, add a second test to make
1654 // sure the overflow function is called only once every overflow_frequency.
1655 const int overflow_frequency = 1024;
1656 __ andptr(rbx, overflow_frequency-1);
1657 __ jcc(Assembler::zero, backedge_counter_overflow);
1659 }
1660 } else {
1661 if (UseOnStackReplacement) {
1662 // check for overflow against rax, which is the sum of the counters
1663 __ cmp32(rax,
1664 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1665 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1667 }
1668 }
1669 __ bind(dispatch);
1670 }
1672 // Pre-load the next target bytecode into EBX
1673 __ load_unsigned_byte(rbx, Address(rsi, 0));
1675 // continue with the bytecode @ target
1676 // rax,: return bci for jsr's, unused otherwise
1677 // rbx,: target bytecode
1678 // rsi: target bcp
1679 __ dispatch_only(vtos);
1681 if (UseLoopCounter) {
1682 if (ProfileInterpreter) {
1683 // Out-of-line code to allocate method data oop.
1684 __ bind(profile_method);
1685 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
1686 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1687 __ movptr(rcx, Address(rbp, method_offset));
1688 __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1689 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1690 __ test_method_data_pointer(rcx, dispatch);
1691 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
1692 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
1693 __ addptr(rcx, rax);
1694 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1695 __ jmp(dispatch);
1696 }
1698 if (UseOnStackReplacement) {
1700 // invocation counter overflow
1701 __ bind(backedge_counter_overflow);
1702 __ negptr(rdx);
1703 __ addptr(rdx, rsi); // branch bcp
1704 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1705 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1707 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1708 // rbx,: target bytecode
1709 // rdx: scratch
1710 // rdi: locals pointer
1711 // rsi: bcp
1712 __ testptr(rax, rax); // test result
1713 __ jcc(Assembler::zero, dispatch); // no osr if null
1714 // nmethod may have been invalidated (VM may block upon call_VM return)
1715 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1716 __ cmpl(rcx, InvalidOSREntryBci);
1717 __ jcc(Assembler::equal, dispatch);
1719 // We have the address of an on stack replacement routine in rax,
1720 // We need to prepare to execute the OSR method. First we must
1721 // migrate the locals and monitors off of the stack.
1723 __ mov(rbx, rax); // save the nmethod
1725 const Register thread = rcx;
1726 __ get_thread(thread);
1727 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1728 // rax, is OSR buffer, move it to expected parameter location
1729 __ mov(rcx, rax);
1731 // pop the interpreter frame
1732 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1733 __ leave(); // remove frame anchor
1734 __ pop(rdi); // get return address
1735 __ mov(rsp, rdx); // set sp to sender sp
1738 Label skip;
1739 Label chkint;
1741 // The interpreter frame we have removed may be returning to
1742 // either the callstub or the interpreter. Since we will
1743 // now be returning from a compiled (OSR) nmethod we must
1744 // adjust the return to the return were it can handler compiled
1745 // results and clean the fpu stack. This is very similar to
1746 // what a i2c adapter must do.
1748 // Are we returning to the call stub?
1750 __ cmp32(rdi, ExternalAddress(StubRoutines::_call_stub_return_address));
1751 __ jcc(Assembler::notEqual, chkint);
1753 // yes adjust to the specialized call stub return.
1754 assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
1755 __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
1756 __ jmp(skip);
1758 __ bind(chkint);
1760 // Are we returning to the interpreter? Look for sentinel
1762 __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
1763 __ jcc(Assembler::notEqual, skip);
1765 // Adjust to compiled return back to interpreter
1767 __ movptr(rdi, Address(rdi, -wordSize));
1768 __ bind(skip);
1770 // Align stack pointer for compiled code (note that caller is
1771 // responsible for undoing this fixup by remembering the old SP
1772 // in an rbp,-relative location)
1773 __ andptr(rsp, -(StackAlignmentInBytes));
1775 // push the (possibly adjusted) return address
1776 __ push(rdi);
1778 // and begin the OSR nmethod
1779 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1780 }
1781 }
1782 }
1785 void TemplateTable::if_0cmp(Condition cc) {
1786 transition(itos, vtos);
1787 // assume branch is more often taken than not (loops use backward branches)
1788 Label not_taken;
1789 __ testl(rax, rax);
1790 __ jcc(j_not(cc), not_taken);
1791 branch(false, false);
1792 __ bind(not_taken);
1793 __ profile_not_taken_branch(rax);
1794 }
1797 void TemplateTable::if_icmp(Condition cc) {
1798 transition(itos, vtos);
1799 // assume branch is more often taken than not (loops use backward branches)
1800 Label not_taken;
1801 __ pop_i(rdx);
1802 __ cmpl(rdx, rax);
1803 __ jcc(j_not(cc), not_taken);
1804 branch(false, false);
1805 __ bind(not_taken);
1806 __ profile_not_taken_branch(rax);
1807 }
1810 void TemplateTable::if_nullcmp(Condition cc) {
1811 transition(atos, vtos);
1812 // assume branch is more often taken than not (loops use backward branches)
1813 Label not_taken;
1814 __ testptr(rax, rax);
1815 __ jcc(j_not(cc), not_taken);
1816 branch(false, false);
1817 __ bind(not_taken);
1818 __ profile_not_taken_branch(rax);
1819 }
1822 void TemplateTable::if_acmp(Condition cc) {
1823 transition(atos, vtos);
1824 // assume branch is more often taken than not (loops use backward branches)
1825 Label not_taken;
1826 __ pop_ptr(rdx);
1827 __ cmpptr(rdx, rax);
1828 __ jcc(j_not(cc), not_taken);
1829 branch(false, false);
1830 __ bind(not_taken);
1831 __ profile_not_taken_branch(rax);
1832 }
1835 void TemplateTable::ret() {
1836 transition(vtos, vtos);
1837 locals_index(rbx);
1838 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1839 __ profile_ret(rbx, rcx);
1840 __ get_method(rax);
1841 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1842 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1843 constMethodOopDesc::codes_offset()));
1844 __ dispatch_next(vtos);
1845 }
1848 void TemplateTable::wide_ret() {
1849 transition(vtos, vtos);
1850 locals_index_wide(rbx);
1851 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1852 __ profile_ret(rbx, rcx);
1853 __ get_method(rax);
1854 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1855 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1856 __ dispatch_next(vtos);
1857 }
1860 void TemplateTable::tableswitch() {
1861 Label default_case, continue_execution;
1862 transition(itos, vtos);
1863 // align rsi
1864 __ lea(rbx, at_bcp(wordSize));
1865 __ andptr(rbx, -wordSize);
1866 // load lo & hi
1867 __ movl(rcx, Address(rbx, 1 * wordSize));
1868 __ movl(rdx, Address(rbx, 2 * wordSize));
1869 __ bswapl(rcx);
1870 __ bswapl(rdx);
1871 // check against lo & hi
1872 __ cmpl(rax, rcx);
1873 __ jccb(Assembler::less, default_case);
1874 __ cmpl(rax, rdx);
1875 __ jccb(Assembler::greater, default_case);
1876 // lookup dispatch offset
1877 __ subl(rax, rcx);
1878 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1879 __ profile_switch_case(rax, rbx, rcx);
1880 // continue execution
1881 __ bind(continue_execution);
1882 __ bswapl(rdx);
1883 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1884 __ addptr(rsi, rdx);
1885 __ dispatch_only(vtos);
1886 // handle default
1887 __ bind(default_case);
1888 __ profile_switch_default(rax);
1889 __ movl(rdx, Address(rbx, 0));
1890 __ jmp(continue_execution);
1891 }
1894 void TemplateTable::lookupswitch() {
1895 transition(itos, itos);
1896 __ stop("lookupswitch bytecode should have been rewritten");
1897 }
1900 void TemplateTable::fast_linearswitch() {
1901 transition(itos, vtos);
1902 Label loop_entry, loop, found, continue_execution;
1903 // bswapl rax, so we can avoid bswapping the table entries
1904 __ bswapl(rax);
1905 // align rsi
1906 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1907 __ andptr(rbx, -wordSize);
1908 // set counter
1909 __ movl(rcx, Address(rbx, wordSize));
1910 __ bswapl(rcx);
1911 __ jmpb(loop_entry);
1912 // table search
1913 __ bind(loop);
1914 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1915 __ jccb(Assembler::equal, found);
1916 __ bind(loop_entry);
1917 __ decrementl(rcx);
1918 __ jcc(Assembler::greaterEqual, loop);
1919 // default case
1920 __ profile_switch_default(rax);
1921 __ movl(rdx, Address(rbx, 0));
1922 __ jmpb(continue_execution);
1923 // entry found -> get offset
1924 __ bind(found);
1925 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1926 __ profile_switch_case(rcx, rax, rbx);
1927 // continue execution
1928 __ bind(continue_execution);
1929 __ bswapl(rdx);
1930 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1931 __ addptr(rsi, rdx);
1932 __ dispatch_only(vtos);
1933 }
1936 void TemplateTable::fast_binaryswitch() {
1937 transition(itos, vtos);
1938 // Implementation using the following core algorithm:
1939 //
1940 // int binary_search(int key, LookupswitchPair* array, int n) {
1941 // // Binary search according to "Methodik des Programmierens" by
1942 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1943 // int i = 0;
1944 // int j = n;
1945 // while (i+1 < j) {
1946 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1947 // // with Q: for all i: 0 <= i < n: key < a[i]
1948 // // where a stands for the array and assuming that the (inexisting)
1949 // // element a[n] is infinitely big.
1950 // int h = (i + j) >> 1;
1951 // // i < h < j
1952 // if (key < array[h].fast_match()) {
1953 // j = h;
1954 // } else {
1955 // i = h;
1956 // }
1957 // }
1958 // // R: a[i] <= key < a[i+1] or Q
1959 // // (i.e., if key is within array, i is the correct index)
1960 // return i;
1961 // }
1963 // register allocation
1964 const Register key = rax; // already set (tosca)
1965 const Register array = rbx;
1966 const Register i = rcx;
1967 const Register j = rdx;
1968 const Register h = rdi; // needs to be restored
1969 const Register temp = rsi;
1970 // setup array
1971 __ save_bcp();
1973 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1974 __ andptr(array, -wordSize);
1975 // initialize i & j
1976 __ xorl(i, i); // i = 0;
1977 __ movl(j, Address(array, -wordSize)); // j = length(array);
1978 // Convert j into native byteordering
1979 __ bswapl(j);
1980 // and start
1981 Label entry;
1982 __ jmp(entry);
1984 // binary search loop
1985 { Label loop;
1986 __ bind(loop);
1987 // int h = (i + j) >> 1;
1988 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1989 __ sarl(h, 1); // h = (i + j) >> 1;
1990 // if (key < array[h].fast_match()) {
1991 // j = h;
1992 // } else {
1993 // i = h;
1994 // }
1995 // Convert array[h].match to native byte-ordering before compare
1996 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1997 __ bswapl(temp);
1998 __ cmpl(key, temp);
1999 if (VM_Version::supports_cmov()) {
2000 __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
2001 __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
2002 } else {
2003 Label set_i, end_of_if;
2004 __ jccb(Assembler::greaterEqual, set_i); // {
2005 __ mov(j, h); // j = h;
2006 __ jmp(end_of_if); // }
2007 __ bind(set_i); // else {
2008 __ mov(i, h); // i = h;
2009 __ bind(end_of_if); // }
2010 }
2011 // while (i+1 < j)
2012 __ bind(entry);
2013 __ leal(h, Address(i, 1)); // i+1
2014 __ cmpl(h, j); // i+1 < j
2015 __ jcc(Assembler::less, loop);
2016 }
2018 // end of binary search, result index is i (must check again!)
2019 Label default_case;
2020 // Convert array[i].match to native byte-ordering before compare
2021 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
2022 __ bswapl(temp);
2023 __ cmpl(key, temp);
2024 __ jcc(Assembler::notEqual, default_case);
2026 // entry found -> j = offset
2027 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
2028 __ profile_switch_case(i, key, array);
2029 __ bswapl(j);
2030 LP64_ONLY(__ movslq(j, j));
2031 __ restore_bcp();
2032 __ restore_locals(); // restore rdi
2033 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2035 __ addptr(rsi, j);
2036 __ dispatch_only(vtos);
2038 // default case -> j = default offset
2039 __ bind(default_case);
2040 __ profile_switch_default(i);
2041 __ movl(j, Address(array, -2*wordSize));
2042 __ bswapl(j);
2043 LP64_ONLY(__ movslq(j, j));
2044 __ restore_bcp();
2045 __ restore_locals(); // restore rdi
2046 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2047 __ addptr(rsi, j);
2048 __ dispatch_only(vtos);
2049 }
2052 void TemplateTable::_return(TosState state) {
2053 transition(state, state);
2054 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2056 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2057 assert(state == vtos, "only valid state");
2058 __ movptr(rax, aaddress(0));
2059 __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
2060 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2061 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2062 Label skip_register_finalizer;
2063 __ jcc(Assembler::zero, skip_register_finalizer);
2065 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2067 __ bind(skip_register_finalizer);
2068 }
2070 __ remove_activation(state, rsi);
2071 __ jmp(rsi);
2072 }
2075 // ----------------------------------------------------------------------------
2076 // Volatile variables demand their effects be made known to all CPU's in
2077 // order. Store buffers on most chips allow reads & writes to reorder; the
2078 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2079 // memory barrier (i.e., it's not sufficient that the interpreter does not
2080 // reorder volatile references, the hardware also must not reorder them).
2081 //
2082 // According to the new Java Memory Model (JMM):
2083 // (1) All volatiles are serialized wrt to each other.
2084 // ALSO reads & writes act as aquire & release, so:
2085 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2086 // the read float up to before the read. It's OK for non-volatile memory refs
2087 // that happen before the volatile read to float down below it.
2088 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2089 // that happen BEFORE the write float down to after the write. It's OK for
2090 // non-volatile memory refs that happen after the volatile write to float up
2091 // before it.
2092 //
2093 // We only put in barriers around volatile refs (they are expensive), not
2094 // _between_ memory refs (that would require us to track the flavor of the
2095 // previous memory refs). Requirements (2) and (3) require some barriers
2096 // before volatile stores and after volatile loads. These nearly cover
2097 // requirement (1) but miss the volatile-store-volatile-load case. This final
2098 // case is placed after volatile-stores although it could just as well go
2099 // before volatile-loads.
2100 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2101 // Helper function to insert a is-volatile test and memory barrier
2102 if( !os::is_MP() ) return; // Not needed on single CPU
2103 __ membar(order_constraint);
2104 }
2106 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
2107 assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
2109 Register temp = rbx;
2111 assert_different_registers(Rcache, index, temp);
2113 const int shift_count = (1 + byte_no)*BitsPerByte;
2114 Label resolved;
2115 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2116 __ movl(temp, Address(Rcache,
2117 index,
2118 Address::times_ptr,
2119 constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2120 __ shrl(temp, shift_count);
2121 // have we resolved this bytecode?
2122 __ andptr(temp, 0xFF);
2123 __ cmpl(temp, (int)bytecode());
2124 __ jcc(Assembler::equal, resolved);
2126 // resolve first time through
2127 address entry;
2128 switch (bytecode()) {
2129 case Bytecodes::_getstatic : // fall through
2130 case Bytecodes::_putstatic : // fall through
2131 case Bytecodes::_getfield : // fall through
2132 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2133 case Bytecodes::_invokevirtual : // fall through
2134 case Bytecodes::_invokespecial : // fall through
2135 case Bytecodes::_invokestatic : // fall through
2136 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2137 default : ShouldNotReachHere(); break;
2138 }
2139 __ movl(temp, (int)bytecode());
2140 __ call_VM(noreg, entry, temp);
2141 // Update registers with resolved info
2142 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2143 __ bind(resolved);
2144 }
2147 // The cache and index registers must be set before call
2148 void TemplateTable::load_field_cp_cache_entry(Register obj,
2149 Register cache,
2150 Register index,
2151 Register off,
2152 Register flags,
2153 bool is_static = false) {
2154 assert_different_registers(cache, index, flags, off);
2156 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2157 // Field offset
2158 __ movptr(off, Address(cache, index, Address::times_ptr,
2159 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2160 // Flags
2161 __ movl(flags, Address(cache, index, Address::times_ptr,
2162 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2164 // klass overwrite register
2165 if (is_static) {
2166 __ movptr(obj, Address(cache, index, Address::times_ptr,
2167 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2168 }
2169 }
2171 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2172 Register method,
2173 Register itable_index,
2174 Register flags,
2175 bool is_invokevirtual,
2176 bool is_invokevfinal /*unused*/) {
2177 // setup registers
2178 const Register cache = rcx;
2179 const Register index = rdx;
2180 assert_different_registers(method, flags);
2181 assert_different_registers(method, cache, index);
2182 assert_different_registers(itable_index, flags);
2183 assert_different_registers(itable_index, cache, index);
2184 // determine constant pool cache field offsets
2185 const int method_offset = in_bytes(
2186 constantPoolCacheOopDesc::base_offset() +
2187 (is_invokevirtual
2188 ? ConstantPoolCacheEntry::f2_offset()
2189 : ConstantPoolCacheEntry::f1_offset()
2190 )
2191 );
2192 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2193 ConstantPoolCacheEntry::flags_offset());
2194 // access constant pool cache fields
2195 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2196 ConstantPoolCacheEntry::f2_offset());
2198 resolve_cache_and_index(byte_no, cache, index);
2200 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2201 if (itable_index != noreg) {
2202 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2203 }
2204 __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
2205 }
2208 // The registers cache and index expected to be set before call.
2209 // Correct values of the cache and index registers are preserved.
2210 void TemplateTable::jvmti_post_field_access(Register cache,
2211 Register index,
2212 bool is_static,
2213 bool has_tos) {
2214 if (JvmtiExport::can_post_field_access()) {
2215 // Check to see if a field access watch has been set before we take
2216 // the time to call into the VM.
2217 Label L1;
2218 assert_different_registers(cache, index, rax);
2219 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2220 __ testl(rax,rax);
2221 __ jcc(Assembler::zero, L1);
2223 // cache entry pointer
2224 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2225 __ shll(index, LogBytesPerWord);
2226 __ addptr(cache, index);
2227 if (is_static) {
2228 __ xorptr(rax, rax); // NULL object reference
2229 } else {
2230 __ pop(atos); // Get the object
2231 __ verify_oop(rax);
2232 __ push(atos); // Restore stack state
2233 }
2234 // rax,: object pointer or NULL
2235 // cache: cache entry pointer
2236 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2237 rax, cache);
2238 __ get_cache_and_index_at_bcp(cache, index, 1);
2239 __ bind(L1);
2240 }
2241 }
2243 void TemplateTable::pop_and_check_object(Register r) {
2244 __ pop_ptr(r);
2245 __ null_check(r); // for field access must check obj.
2246 __ verify_oop(r);
2247 }
2249 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2250 transition(vtos, vtos);
2252 const Register cache = rcx;
2253 const Register index = rdx;
2254 const Register obj = rcx;
2255 const Register off = rbx;
2256 const Register flags = rax;
2258 resolve_cache_and_index(byte_no, cache, index);
2259 jvmti_post_field_access(cache, index, is_static, false);
2260 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2262 if (!is_static) pop_and_check_object(obj);
2264 const Address lo(obj, off, Address::times_1, 0*wordSize);
2265 const Address hi(obj, off, Address::times_1, 1*wordSize);
2267 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2269 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2270 assert(btos == 0, "change code, btos != 0");
2271 // btos
2272 __ andptr(flags, 0x0f);
2273 __ jcc(Assembler::notZero, notByte);
2275 __ load_signed_byte(rax, lo );
2276 __ push(btos);
2277 // Rewrite bytecode to be faster
2278 if (!is_static) {
2279 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2280 }
2281 __ jmp(Done);
2283 __ bind(notByte);
2284 // itos
2285 __ cmpl(flags, itos );
2286 __ jcc(Assembler::notEqual, notInt);
2288 __ movl(rax, lo );
2289 __ push(itos);
2290 // Rewrite bytecode to be faster
2291 if (!is_static) {
2292 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2293 }
2294 __ jmp(Done);
2296 __ bind(notInt);
2297 // atos
2298 __ cmpl(flags, atos );
2299 __ jcc(Assembler::notEqual, notObj);
2301 __ movl(rax, lo );
2302 __ push(atos);
2303 if (!is_static) {
2304 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2305 }
2306 __ jmp(Done);
2308 __ bind(notObj);
2309 // ctos
2310 __ cmpl(flags, ctos );
2311 __ jcc(Assembler::notEqual, notChar);
2313 __ load_unsigned_short(rax, lo );
2314 __ push(ctos);
2315 if (!is_static) {
2316 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2317 }
2318 __ jmp(Done);
2320 __ bind(notChar);
2321 // stos
2322 __ cmpl(flags, stos );
2323 __ jcc(Assembler::notEqual, notShort);
2325 __ load_signed_short(rax, lo );
2326 __ push(stos);
2327 if (!is_static) {
2328 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2329 }
2330 __ jmp(Done);
2332 __ bind(notShort);
2333 // ltos
2334 __ cmpl(flags, ltos );
2335 __ jcc(Assembler::notEqual, notLong);
2337 // Generate code as if volatile. There just aren't enough registers to
2338 // save that information and this code is faster than the test.
2339 __ fild_d(lo); // Must load atomically
2340 __ subptr(rsp,2*wordSize); // Make space for store
2341 __ fistp_d(Address(rsp,0));
2342 __ pop(rax);
2343 __ pop(rdx);
2345 __ push(ltos);
2346 // Don't rewrite to _fast_lgetfield for potential volatile case.
2347 __ jmp(Done);
2349 __ bind(notLong);
2350 // ftos
2351 __ cmpl(flags, ftos );
2352 __ jcc(Assembler::notEqual, notFloat);
2354 __ fld_s(lo);
2355 __ push(ftos);
2356 if (!is_static) {
2357 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2358 }
2359 __ jmp(Done);
2361 __ bind(notFloat);
2362 // dtos
2363 __ cmpl(flags, dtos );
2364 __ jcc(Assembler::notEqual, notDouble);
2366 __ fld_d(lo);
2367 __ push(dtos);
2368 if (!is_static) {
2369 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2370 }
2371 __ jmpb(Done);
2373 __ bind(notDouble);
2375 __ stop("Bad state");
2377 __ bind(Done);
2378 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2379 // volatile_barrier( );
2380 }
2383 void TemplateTable::getfield(int byte_no) {
2384 getfield_or_static(byte_no, false);
2385 }
2388 void TemplateTable::getstatic(int byte_no) {
2389 getfield_or_static(byte_no, true);
2390 }
2392 // The registers cache and index expected to be set before call.
2393 // The function may destroy various registers, just not the cache and index registers.
2394 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2396 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2398 if (JvmtiExport::can_post_field_modification()) {
2399 // Check to see if a field modification watch has been set before we take
2400 // the time to call into the VM.
2401 Label L1;
2402 assert_different_registers(cache, index, rax);
2403 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2404 __ testl(rax, rax);
2405 __ jcc(Assembler::zero, L1);
2407 // The cache and index registers have been already set.
2408 // This allows to eliminate this call but the cache and index
2409 // registers have to be correspondingly used after this line.
2410 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2412 if (is_static) {
2413 // Life is simple. Null out the object pointer.
2414 __ xorptr(rbx, rbx);
2415 } else {
2416 // Life is harder. The stack holds the value on top, followed by the object.
2417 // We don't know the size of the value, though; it could be one or two words
2418 // depending on its type. As a result, we must find the type to determine where
2419 // the object is.
2420 Label two_word, valsize_known;
2421 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2422 ConstantPoolCacheEntry::flags_offset())));
2423 __ mov(rbx, rsp);
2424 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2425 // Make sure we don't need to mask rcx for tosBits after the above shift
2426 ConstantPoolCacheEntry::verify_tosBits();
2427 __ cmpl(rcx, ltos);
2428 __ jccb(Assembler::equal, two_word);
2429 __ cmpl(rcx, dtos);
2430 __ jccb(Assembler::equal, two_word);
2431 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2432 __ jmpb(valsize_known);
2434 __ bind(two_word);
2435 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2437 __ bind(valsize_known);
2438 // setup object pointer
2439 __ movptr(rbx, Address(rbx, 0));
2440 }
2441 // cache entry pointer
2442 __ addptr(rax, in_bytes(cp_base_offset));
2443 __ shll(rdx, LogBytesPerWord);
2444 __ addptr(rax, rdx);
2445 // object (tos)
2446 __ mov(rcx, rsp);
2447 // rbx,: object pointer set up above (NULL if static)
2448 // rax,: cache entry pointer
2449 // rcx: jvalue object on the stack
2450 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2451 rbx, rax, rcx);
2452 __ get_cache_and_index_at_bcp(cache, index, 1);
2453 __ bind(L1);
2454 }
2455 }
2458 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2459 transition(vtos, vtos);
2461 const Register cache = rcx;
2462 const Register index = rdx;
2463 const Register obj = rcx;
2464 const Register off = rbx;
2465 const Register flags = rax;
2467 resolve_cache_and_index(byte_no, cache, index);
2468 jvmti_post_field_mod(cache, index, is_static);
2469 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2471 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2472 // volatile_barrier( );
2474 Label notVolatile, Done;
2475 __ movl(rdx, flags);
2476 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2477 __ andl(rdx, 0x1);
2479 // field addresses
2480 const Address lo(obj, off, Address::times_1, 0*wordSize);
2481 const Address hi(obj, off, Address::times_1, 1*wordSize);
2483 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2485 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2486 assert(btos == 0, "change code, btos != 0");
2487 // btos
2488 __ andl(flags, 0x0f);
2489 __ jcc(Assembler::notZero, notByte);
2491 __ pop(btos);
2492 if (!is_static) pop_and_check_object(obj);
2493 __ movb(lo, rax );
2494 if (!is_static) {
2495 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2496 }
2497 __ jmp(Done);
2499 __ bind(notByte);
2500 // itos
2501 __ cmpl(flags, itos );
2502 __ jcc(Assembler::notEqual, notInt);
2504 __ pop(itos);
2505 if (!is_static) pop_and_check_object(obj);
2507 __ movl(lo, rax );
2508 if (!is_static) {
2509 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2510 }
2511 __ jmp(Done);
2513 __ bind(notInt);
2514 // atos
2515 __ cmpl(flags, atos );
2516 __ jcc(Assembler::notEqual, notObj);
2518 __ pop(atos);
2519 if (!is_static) pop_and_check_object(obj);
2521 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2523 if (!is_static) {
2524 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2525 }
2527 __ jmp(Done);
2529 __ bind(notObj);
2530 // ctos
2531 __ cmpl(flags, ctos );
2532 __ jcc(Assembler::notEqual, notChar);
2534 __ pop(ctos);
2535 if (!is_static) pop_and_check_object(obj);
2536 __ movw(lo, rax );
2537 if (!is_static) {
2538 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2539 }
2540 __ jmp(Done);
2542 __ bind(notChar);
2543 // stos
2544 __ cmpl(flags, stos );
2545 __ jcc(Assembler::notEqual, notShort);
2547 __ pop(stos);
2548 if (!is_static) pop_and_check_object(obj);
2549 __ movw(lo, rax );
2550 if (!is_static) {
2551 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2552 }
2553 __ jmp(Done);
2555 __ bind(notShort);
2556 // ltos
2557 __ cmpl(flags, ltos );
2558 __ jcc(Assembler::notEqual, notLong);
2560 Label notVolatileLong;
2561 __ testl(rdx, rdx);
2562 __ jcc(Assembler::zero, notVolatileLong);
2564 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2565 if (!is_static) pop_and_check_object(obj);
2567 // Replace with real volatile test
2568 __ push(rdx);
2569 __ push(rax); // Must update atomically with FIST
2570 __ fild_d(Address(rsp,0)); // So load into FPU register
2571 __ fistp_d(lo); // and put into memory atomically
2572 __ addptr(rsp, 2*wordSize);
2573 // volatile_barrier();
2574 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2575 Assembler::StoreStore));
2576 // Don't rewrite volatile version
2577 __ jmp(notVolatile);
2579 __ bind(notVolatileLong);
2581 __ pop(ltos); // overwrites rdx
2582 if (!is_static) pop_and_check_object(obj);
2583 NOT_LP64(__ movptr(hi, rdx));
2584 __ movptr(lo, rax);
2585 if (!is_static) {
2586 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2587 }
2588 __ jmp(notVolatile);
2590 __ bind(notLong);
2591 // ftos
2592 __ cmpl(flags, ftos );
2593 __ jcc(Assembler::notEqual, notFloat);
2595 __ pop(ftos);
2596 if (!is_static) pop_and_check_object(obj);
2597 __ fstp_s(lo);
2598 if (!is_static) {
2599 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2600 }
2601 __ jmp(Done);
2603 __ bind(notFloat);
2604 // dtos
2605 __ cmpl(flags, dtos );
2606 __ jcc(Assembler::notEqual, notDouble);
2608 __ pop(dtos);
2609 if (!is_static) pop_and_check_object(obj);
2610 __ fstp_d(lo);
2611 if (!is_static) {
2612 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2613 }
2614 __ jmp(Done);
2616 __ bind(notDouble);
2618 __ stop("Bad state");
2620 __ bind(Done);
2622 // Check for volatile store
2623 __ testl(rdx, rdx);
2624 __ jcc(Assembler::zero, notVolatile);
2625 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2626 Assembler::StoreStore));
2627 __ bind(notVolatile);
2628 }
2631 void TemplateTable::putfield(int byte_no) {
2632 putfield_or_static(byte_no, false);
2633 }
2636 void TemplateTable::putstatic(int byte_no) {
2637 putfield_or_static(byte_no, true);
2638 }
2640 void TemplateTable::jvmti_post_fast_field_mod() {
2641 if (JvmtiExport::can_post_field_modification()) {
2642 // Check to see if a field modification watch has been set before we take
2643 // the time to call into the VM.
2644 Label L2;
2645 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2646 __ testl(rcx,rcx);
2647 __ jcc(Assembler::zero, L2);
2648 __ pop_ptr(rbx); // copy the object pointer from tos
2649 __ verify_oop(rbx);
2650 __ push_ptr(rbx); // put the object pointer back on tos
2651 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2652 __ mov(rcx, rsp);
2653 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2654 __ xorptr(rbx, rbx);
2655 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2656 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2657 switch (bytecode()) { // load values into the jvalue object
2658 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2659 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2660 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2661 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2662 case Bytecodes::_fast_lputfield:
2663 NOT_LP64(__ movptr(hi_value, rdx));
2664 __ movptr(lo_value, rax);
2665 break;
2667 // need to call fld_s() after fstp_s() to restore the value for below
2668 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2670 // need to call fld_d() after fstp_d() to restore the value for below
2671 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2673 // since rcx is not an object we don't call store_check() here
2674 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2676 default: ShouldNotReachHere();
2677 }
2678 __ pop_ptr(rbx); // restore copy of object pointer
2680 // Save rax, and sometimes rdx because call_VM() will clobber them,
2681 // then use them for JVM/DI purposes
2682 __ push(rax);
2683 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2684 // access constant pool cache entry
2685 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2686 __ verify_oop(rbx);
2687 // rbx,: object pointer copied above
2688 // rax,: cache entry pointer
2689 // rcx: jvalue object on the stack
2690 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2691 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2692 __ pop(rax); // restore lower value
2693 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2694 __ bind(L2);
2695 }
2696 }
2698 void TemplateTable::fast_storefield(TosState state) {
2699 transition(state, vtos);
2701 ByteSize base = constantPoolCacheOopDesc::base_offset();
2703 jvmti_post_fast_field_mod();
2705 // access constant pool cache
2706 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2708 // test for volatile with rdx but rdx is tos register for lputfield.
2709 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2710 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2711 ConstantPoolCacheEntry::flags_offset())));
2713 // replace index with field offset from cache entry
2714 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2716 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2717 // volatile_barrier( );
2719 Label notVolatile, Done;
2720 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2721 __ andl(rdx, 0x1);
2722 // Check for volatile store
2723 __ testl(rdx, rdx);
2724 __ jcc(Assembler::zero, notVolatile);
2726 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2728 // Get object from stack
2729 pop_and_check_object(rcx);
2731 // field addresses
2732 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2733 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2735 // access field
2736 switch (bytecode()) {
2737 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2738 case Bytecodes::_fast_sputfield: // fall through
2739 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2740 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2741 case Bytecodes::_fast_lputfield:
2742 NOT_LP64(__ movptr(hi, rdx));
2743 __ movptr(lo, rax);
2744 break;
2745 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2746 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2747 case Bytecodes::_fast_aputfield: {
2748 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2749 break;
2750 }
2751 default:
2752 ShouldNotReachHere();
2753 }
2755 Label done;
2756 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2757 Assembler::StoreStore));
2758 // Barriers are so large that short branch doesn't reach!
2759 __ jmp(done);
2761 // Same code as above, but don't need rdx to test for volatile.
2762 __ bind(notVolatile);
2764 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2766 // Get object from stack
2767 pop_and_check_object(rcx);
2769 // access field
2770 switch (bytecode()) {
2771 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2772 case Bytecodes::_fast_sputfield: // fall through
2773 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2774 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2775 case Bytecodes::_fast_lputfield:
2776 NOT_LP64(__ movptr(hi, rdx));
2777 __ movptr(lo, rax);
2778 break;
2779 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2780 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2781 case Bytecodes::_fast_aputfield: {
2782 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2783 break;
2784 }
2785 default:
2786 ShouldNotReachHere();
2787 }
2788 __ bind(done);
2789 }
2792 void TemplateTable::fast_accessfield(TosState state) {
2793 transition(atos, state);
2795 // do the JVMTI work here to avoid disturbing the register state below
2796 if (JvmtiExport::can_post_field_access()) {
2797 // Check to see if a field access watch has been set before we take
2798 // the time to call into the VM.
2799 Label L1;
2800 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2801 __ testl(rcx,rcx);
2802 __ jcc(Assembler::zero, L1);
2803 // access constant pool cache entry
2804 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2805 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2806 __ verify_oop(rax);
2807 // rax,: object pointer copied above
2808 // rcx: cache entry pointer
2809 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2810 __ pop_ptr(rax); // restore object pointer
2811 __ bind(L1);
2812 }
2814 // access constant pool cache
2815 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2816 // replace index with field offset from cache entry
2817 __ movptr(rbx, Address(rcx,
2818 rbx,
2819 Address::times_ptr,
2820 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2823 // rax,: object
2824 __ verify_oop(rax);
2825 __ null_check(rax);
2826 // field addresses
2827 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2828 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2830 // access field
2831 switch (bytecode()) {
2832 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2833 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2834 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2835 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2836 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2837 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2838 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2839 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2840 default:
2841 ShouldNotReachHere();
2842 }
2844 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2845 // volatile_barrier( );
2846 }
2848 void TemplateTable::fast_xaccess(TosState state) {
2849 transition(vtos, state);
2850 // get receiver
2851 __ movptr(rax, aaddress(0));
2852 debug_only(__ verify_local_tag(frame::TagReference, 0));
2853 // access constant pool cache
2854 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2855 __ movptr(rbx, Address(rcx,
2856 rdx,
2857 Address::times_ptr,
2858 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2859 // make sure exception is reported in correct bcp range (getfield is next instruction)
2860 __ increment(rsi);
2861 __ null_check(rax);
2862 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2863 if (state == itos) {
2864 __ movl(rax, lo);
2865 } else if (state == atos) {
2866 __ movptr(rax, lo);
2867 __ verify_oop(rax);
2868 } else if (state == ftos) {
2869 __ fld_s(lo);
2870 } else {
2871 ShouldNotReachHere();
2872 }
2873 __ decrement(rsi);
2874 }
2878 //----------------------------------------------------------------------------------------------------
2879 // Calls
2881 void TemplateTable::count_calls(Register method, Register temp) {
2882 // implemented elsewhere
2883 ShouldNotReachHere();
2884 }
2887 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no, Bytecodes::Code code) {
2888 // determine flags
2889 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2890 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2891 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2892 const bool load_receiver = code != Bytecodes::_invokestatic;
2893 const bool receiver_null_check = is_invokespecial;
2894 const bool save_flags = is_invokeinterface || is_invokevirtual;
2895 // setup registers & access constant pool cache
2896 const Register recv = rcx;
2897 const Register flags = rdx;
2898 assert_different_registers(method, index, recv, flags);
2900 // save 'interpreter return address'
2901 __ save_bcp();
2903 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
2905 // load receiver if needed (note: no return address pushed yet)
2906 if (load_receiver) {
2907 __ movl(recv, flags);
2908 __ andl(recv, 0xFF);
2909 // recv count is 0 based?
2910 __ movptr(recv, Address(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)));
2911 __ verify_oop(recv);
2912 }
2914 // do null check if needed
2915 if (receiver_null_check) {
2916 __ null_check(recv);
2917 }
2919 if (save_flags) {
2920 __ mov(rsi, flags);
2921 }
2923 // compute return type
2924 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2925 // Make sure we don't need to mask flags for tosBits after the above shift
2926 ConstantPoolCacheEntry::verify_tosBits();
2927 // load return address
2928 {
2929 ExternalAddress table(is_invokeinterface ? (address)Interpreter::return_5_addrs_by_index_table() :
2930 (address)Interpreter::return_3_addrs_by_index_table());
2931 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2932 }
2934 // push return address
2935 __ push(flags);
2937 // Restore flag value from the constant pool cache, and restore rsi
2938 // for later null checks. rsi is the bytecode pointer
2939 if (save_flags) {
2940 __ mov(flags, rsi);
2941 __ restore_bcp();
2942 }
2943 }
2946 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2947 Register flags) {
2949 // Uses temporary registers rax, rdx
2950 assert_different_registers(index, recv, rax, rdx);
2952 // Test for an invoke of a final method
2953 Label notFinal;
2954 __ movl(rax, flags);
2955 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2956 __ jcc(Assembler::zero, notFinal);
2958 Register method = index; // method must be rbx,
2959 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2961 // do the call - the index is actually the method to call
2962 __ verify_oop(method);
2964 // It's final, need a null check here!
2965 __ null_check(recv);
2967 // profile this call
2968 __ profile_final_call(rax);
2970 __ jump_from_interpreted(method, rax);
2972 __ bind(notFinal);
2974 // get receiver klass
2975 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2976 // Keep recv in rcx for callee expects it there
2977 __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
2978 __ verify_oop(rax);
2980 // profile this call
2981 __ profile_virtual_call(rax, rdi, rdx);
2983 // get target methodOop & entry point
2984 const int base = instanceKlass::vtable_start_offset() * wordSize;
2985 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
2986 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
2987 __ jump_from_interpreted(method, rdx);
2988 }
2991 void TemplateTable::invokevirtual(int byte_no) {
2992 transition(vtos, vtos);
2993 prepare_invoke(rbx, noreg, byte_no, bytecode());
2995 // rbx,: index
2996 // rcx: receiver
2997 // rdx: flags
2999 invokevirtual_helper(rbx, rcx, rdx);
3000 }
3003 void TemplateTable::invokespecial(int byte_no) {
3004 transition(vtos, vtos);
3005 prepare_invoke(rbx, noreg, byte_no, bytecode());
3006 // do the call
3007 __ verify_oop(rbx);
3008 __ profile_call(rax);
3009 __ jump_from_interpreted(rbx, rax);
3010 }
3013 void TemplateTable::invokestatic(int byte_no) {
3014 transition(vtos, vtos);
3015 prepare_invoke(rbx, noreg, byte_no, bytecode());
3016 // do the call
3017 __ verify_oop(rbx);
3018 __ profile_call(rax);
3019 __ jump_from_interpreted(rbx, rax);
3020 }
3023 void TemplateTable::fast_invokevfinal(int byte_no) {
3024 transition(vtos, vtos);
3025 __ stop("fast_invokevfinal not used on x86");
3026 }
3029 void TemplateTable::invokeinterface(int byte_no) {
3030 transition(vtos, vtos);
3031 prepare_invoke(rax, rbx, byte_no, bytecode());
3033 // rax,: Interface
3034 // rbx,: index
3035 // rcx: receiver
3036 // rdx: flags
3038 // Special case of invokeinterface called for virtual method of
3039 // java.lang.Object. See cpCacheOop.cpp for details.
3040 // This code isn't produced by javac, but could be produced by
3041 // another compliant java compiler.
3042 Label notMethod;
3043 __ movl(rdi, rdx);
3044 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3045 __ jcc(Assembler::zero, notMethod);
3047 invokevirtual_helper(rbx, rcx, rdx);
3048 __ bind(notMethod);
3050 // Get receiver klass into rdx - also a null check
3051 __ restore_locals(); // restore rdi
3052 __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
3053 __ verify_oop(rdx);
3055 // profile this call
3056 __ profile_virtual_call(rdx, rsi, rdi);
3058 Label no_such_interface, no_such_method;
3060 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3061 rdx, rax, rbx,
3062 // outputs: method, scan temp. reg
3063 rbx, rsi,
3064 no_such_interface);
3066 // rbx,: methodOop to call
3067 // rcx: receiver
3068 // Check for abstract method error
3069 // Note: This should be done more efficiently via a throw_abstract_method_error
3070 // interpreter entry point and a conditional jump to it in case of a null
3071 // method.
3072 __ testptr(rbx, rbx);
3073 __ jcc(Assembler::zero, no_such_method);
3075 // do the call
3076 // rcx: receiver
3077 // rbx,: methodOop
3078 __ jump_from_interpreted(rbx, rdx);
3079 __ should_not_reach_here();
3081 // exception handling code follows...
3082 // note: must restore interpreter registers to canonical
3083 // state for exception handling to work correctly!
3085 __ bind(no_such_method);
3086 // throw exception
3087 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3088 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3089 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3090 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3091 // the call_VM checks for exception, so we should never return here.
3092 __ should_not_reach_here();
3094 __ bind(no_such_interface);
3095 // throw exception
3096 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3097 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3098 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3099 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3100 InterpreterRuntime::throw_IncompatibleClassChangeError));
3101 // the call_VM checks for exception, so we should never return here.
3102 __ should_not_reach_here();
3103 }
3105 //----------------------------------------------------------------------------------------------------
3106 // Allocation
3108 void TemplateTable::_new() {
3109 transition(vtos, atos);
3110 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3111 Label slow_case;
3112 Label done;
3113 Label initialize_header;
3114 Label initialize_object; // including clearing the fields
3115 Label allocate_shared;
3117 __ get_cpool_and_tags(rcx, rax);
3118 // get instanceKlass
3119 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3120 __ push(rcx); // save the contexts of klass for initializing the header
3122 // make sure the class we're about to instantiate has been resolved.
3123 // Note: slow_case does a pop of stack, which is why we loaded class/pushed above
3124 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3125 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3126 __ jcc(Assembler::notEqual, slow_case);
3128 // make sure klass is initialized & doesn't have finalizer
3129 // make sure klass is fully initialized
3130 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3131 __ jcc(Assembler::notEqual, slow_case);
3133 // get instance_size in instanceKlass (scaled to a count of bytes)
3134 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3135 // test to see if it has a finalizer or is malformed in some way
3136 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3137 __ jcc(Assembler::notZero, slow_case);
3139 //
3140 // Allocate the instance
3141 // 1) Try to allocate in the TLAB
3142 // 2) if fail and the object is large allocate in the shared Eden
3143 // 3) if the above fails (or is not applicable), go to a slow case
3144 // (creates a new TLAB, etc.)
3146 const bool allow_shared_alloc =
3147 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3149 if (UseTLAB) {
3150 const Register thread = rcx;
3152 __ get_thread(thread);
3153 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3154 __ lea(rbx, Address(rax, rdx, Address::times_1));
3155 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3156 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3157 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3158 if (ZeroTLAB) {
3159 // the fields have been already cleared
3160 __ jmp(initialize_header);
3161 } else {
3162 // initialize both the header and fields
3163 __ jmp(initialize_object);
3164 }
3165 }
3167 // Allocation in the shared Eden, if allowed.
3168 //
3169 // rdx: instance size in bytes
3170 if (allow_shared_alloc) {
3171 __ bind(allocate_shared);
3173 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3175 Label retry;
3176 __ bind(retry);
3177 __ movptr(rax, heap_top);
3178 __ lea(rbx, Address(rax, rdx, Address::times_1));
3179 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3180 __ jcc(Assembler::above, slow_case);
3182 // Compare rax, with the top addr, and if still equal, store the new
3183 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3184 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3185 //
3186 // rax,: object begin
3187 // rbx,: object end
3188 // rdx: instance size in bytes
3189 __ locked_cmpxchgptr(rbx, heap_top);
3191 // if someone beat us on the allocation, try again, otherwise continue
3192 __ jcc(Assembler::notEqual, retry);
3193 }
3195 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3196 // The object is initialized before the header. If the object size is
3197 // zero, go directly to the header initialization.
3198 __ bind(initialize_object);
3199 __ decrement(rdx, sizeof(oopDesc));
3200 __ jcc(Assembler::zero, initialize_header);
3202 // Initialize topmost object field, divide rdx by 8, check if odd and
3203 // test if zero.
3204 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3205 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3207 // rdx must have been multiple of 8
3208 #ifdef ASSERT
3209 // make sure rdx was multiple of 8
3210 Label L;
3211 // Ignore partial flag stall after shrl() since it is debug VM
3212 __ jccb(Assembler::carryClear, L);
3213 __ stop("object size is not multiple of 2 - adjust this code");
3214 __ bind(L);
3215 // rdx must be > 0, no extra check needed here
3216 #endif
3218 // initialize remaining object fields: rdx was a multiple of 8
3219 { Label loop;
3220 __ bind(loop);
3221 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3222 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3223 __ decrement(rdx);
3224 __ jcc(Assembler::notZero, loop);
3225 }
3227 // initialize object header only.
3228 __ bind(initialize_header);
3229 if (UseBiasedLocking) {
3230 __ pop(rcx); // get saved klass back in the register.
3231 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3232 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3233 } else {
3234 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3235 (int32_t)markOopDesc::prototype()); // header
3236 __ pop(rcx); // get saved klass back in the register.
3237 }
3238 __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
3240 {
3241 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3242 // Trigger dtrace event for fastpath
3243 __ push(atos);
3244 __ call_VM_leaf(
3245 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3246 __ pop(atos);
3247 }
3249 __ jmp(done);
3250 }
3252 // slow case
3253 __ bind(slow_case);
3254 __ pop(rcx); // restore stack pointer to what it was when we came in.
3255 __ get_constant_pool(rax);
3256 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3257 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3259 // continue
3260 __ bind(done);
3261 }
3264 void TemplateTable::newarray() {
3265 transition(itos, atos);
3266 __ push_i(rax); // make sure everything is on the stack
3267 __ load_unsigned_byte(rdx, at_bcp(1));
3268 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3269 __ pop_i(rdx); // discard size
3270 }
3273 void TemplateTable::anewarray() {
3274 transition(itos, atos);
3275 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3276 __ get_constant_pool(rcx);
3277 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3278 }
3281 void TemplateTable::arraylength() {
3282 transition(atos, itos);
3283 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3284 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3285 }
3288 void TemplateTable::checkcast() {
3289 transition(atos, atos);
3290 Label done, is_null, ok_is_subtype, quicked, resolved;
3291 __ testptr(rax, rax); // Object is in EAX
3292 __ jcc(Assembler::zero, is_null);
3294 // Get cpool & tags index
3295 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3296 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3297 // See if bytecode has already been quicked
3298 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3299 __ jcc(Assembler::equal, quicked);
3301 __ push(atos);
3302 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3303 __ pop_ptr(rdx);
3304 __ jmpb(resolved);
3306 // Get superklass in EAX and subklass in EBX
3307 __ bind(quicked);
3308 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3309 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3311 __ bind(resolved);
3312 __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3314 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3315 // Superklass in EAX. Subklass in EBX.
3316 __ gen_subtype_check( rbx, ok_is_subtype );
3318 // Come here on failure
3319 __ push(rdx);
3320 // object is at TOS
3321 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3323 // Come here on success
3324 __ bind(ok_is_subtype);
3325 __ mov(rax,rdx); // Restore object in EDX
3327 // Collect counts on whether this check-cast sees NULLs a lot or not.
3328 if (ProfileInterpreter) {
3329 __ jmp(done);
3330 __ bind(is_null);
3331 __ profile_null_seen(rcx);
3332 } else {
3333 __ bind(is_null); // same as 'done'
3334 }
3335 __ bind(done);
3336 }
3339 void TemplateTable::instanceof() {
3340 transition(atos, itos);
3341 Label done, is_null, ok_is_subtype, quicked, resolved;
3342 __ testptr(rax, rax);
3343 __ jcc(Assembler::zero, is_null);
3345 // Get cpool & tags index
3346 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3347 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3348 // See if bytecode has already been quicked
3349 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3350 __ jcc(Assembler::equal, quicked);
3352 __ push(atos);
3353 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3354 __ pop_ptr(rdx);
3355 __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3356 __ jmp(resolved);
3358 // Get superklass in EAX and subklass in EDX
3359 __ bind(quicked);
3360 __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
3361 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3363 __ bind(resolved);
3365 // Generate subtype check. Blows ECX. Resets EDI.
3366 // Superklass in EAX. Subklass in EDX.
3367 __ gen_subtype_check( rdx, ok_is_subtype );
3369 // Come here on failure
3370 __ xorl(rax,rax);
3371 __ jmpb(done);
3372 // Come here on success
3373 __ bind(ok_is_subtype);
3374 __ movl(rax, 1);
3376 // Collect counts on whether this test sees NULLs a lot or not.
3377 if (ProfileInterpreter) {
3378 __ jmp(done);
3379 __ bind(is_null);
3380 __ profile_null_seen(rcx);
3381 } else {
3382 __ bind(is_null); // same as 'done'
3383 }
3384 __ bind(done);
3385 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3386 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3387 }
3390 //----------------------------------------------------------------------------------------------------
3391 // Breakpoints
3392 void TemplateTable::_breakpoint() {
3394 // Note: We get here even if we are single stepping..
3395 // jbug inists on setting breakpoints at every bytecode
3396 // even if we are in single step mode.
3398 transition(vtos, vtos);
3400 // get the unpatched byte code
3401 __ get_method(rcx);
3402 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3403 __ mov(rbx, rax);
3405 // post the breakpoint event
3406 __ get_method(rcx);
3407 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3409 // complete the execution of original bytecode
3410 __ dispatch_only_normal(vtos);
3411 }
3414 //----------------------------------------------------------------------------------------------------
3415 // Exceptions
3417 void TemplateTable::athrow() {
3418 transition(atos, vtos);
3419 __ null_check(rax);
3420 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3421 }
3424 //----------------------------------------------------------------------------------------------------
3425 // Synchronization
3426 //
3427 // Note: monitorenter & exit are symmetric routines; which is reflected
3428 // in the assembly code structure as well
3429 //
3430 // Stack layout:
3431 //
3432 // [expressions ] <--- rsp = expression stack top
3433 // ..
3434 // [expressions ]
3435 // [monitor entry] <--- monitor block top = expression stack bot
3436 // ..
3437 // [monitor entry]
3438 // [frame data ] <--- monitor block bot
3439 // ...
3440 // [saved rbp, ] <--- rbp,
3443 void TemplateTable::monitorenter() {
3444 transition(atos, vtos);
3446 // check for NULL object
3447 __ null_check(rax);
3449 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3450 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3451 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3452 Label allocated;
3454 // initialize entry pointer
3455 __ xorl(rdx, rdx); // points to free slot or NULL
3457 // find a free slot in the monitor block (result in rdx)
3458 { Label entry, loop, exit;
3459 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3460 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3461 __ jmpb(entry);
3463 __ bind(loop);
3464 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3466 // TODO - need new func here - kbt
3467 if (VM_Version::supports_cmov()) {
3468 __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3469 } else {
3470 Label L;
3471 __ jccb(Assembler::notEqual, L);
3472 __ mov(rdx, rcx); // if not used then remember entry in rdx
3473 __ bind(L);
3474 }
3475 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3476 __ jccb(Assembler::equal, exit); // if same object then stop searching
3477 __ addptr(rcx, entry_size); // otherwise advance to next entry
3478 __ bind(entry);
3479 __ cmpptr(rcx, rbx); // check if bottom reached
3480 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3481 __ bind(exit);
3482 }
3484 __ testptr(rdx, rdx); // check if a slot has been found
3485 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3487 // allocate one if there's no free slot
3488 { Label entry, loop;
3489 // 1. compute new pointers // rsp: old expression stack top
3490 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3491 __ subptr(rsp, entry_size); // move expression stack top
3492 __ subptr(rdx, entry_size); // move expression stack bottom
3493 __ mov(rcx, rsp); // set start value for copy loop
3494 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3495 __ jmp(entry);
3496 // 2. move expression stack contents
3497 __ bind(loop);
3498 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3499 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3500 __ addptr(rcx, wordSize); // advance to next word
3501 __ bind(entry);
3502 __ cmpptr(rcx, rdx); // check if bottom reached
3503 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3504 }
3506 // call run-time routine
3507 // rdx: points to monitor entry
3508 __ bind(allocated);
3510 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3511 // The object has already been poped from the stack, so the expression stack looks correct.
3512 __ increment(rsi);
3514 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3515 __ lock_object(rdx);
3517 // check to make sure this monitor doesn't cause stack overflow after locking
3518 __ save_bcp(); // in case of exception
3519 __ generate_stack_overflow_check(0);
3521 // The bcp has already been incremented. Just need to dispatch to next instruction.
3522 __ dispatch_next(vtos);
3523 }
3526 void TemplateTable::monitorexit() {
3527 transition(atos, vtos);
3529 // check for NULL object
3530 __ null_check(rax);
3532 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3533 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3534 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3535 Label found;
3537 // find matching slot
3538 { Label entry, loop;
3539 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3540 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3541 __ jmpb(entry);
3543 __ bind(loop);
3544 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3545 __ jcc(Assembler::equal, found); // if same object then stop searching
3546 __ addptr(rdx, entry_size); // otherwise advance to next entry
3547 __ bind(entry);
3548 __ cmpptr(rdx, rbx); // check if bottom reached
3549 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3550 }
3552 // error handling. Unlocking was not block-structured
3553 Label end;
3554 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3555 __ should_not_reach_here();
3557 // call run-time routine
3558 // rcx: points to monitor entry
3559 __ bind(found);
3560 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3561 __ unlock_object(rdx);
3562 __ pop_ptr(rax); // discard object
3563 __ bind(end);
3564 }
3567 //----------------------------------------------------------------------------------------------------
3568 // Wide instructions
3570 void TemplateTable::wide() {
3571 transition(vtos, vtos);
3572 __ load_unsigned_byte(rbx, at_bcp(1));
3573 ExternalAddress wtable((address)Interpreter::_wentry_point);
3574 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3575 // Note: the rsi increment step is part of the individual wide bytecode implementations
3576 }
3579 //----------------------------------------------------------------------------------------------------
3580 // Multi arrays
3582 void TemplateTable::multianewarray() {
3583 transition(vtos, atos);
3584 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3585 // last dim is on top of stack; we want address of first one:
3586 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3587 // the latter wordSize to point to the beginning of the array.
3588 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3589 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3590 __ load_unsigned_byte(rbx, at_bcp(3));
3591 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3592 }
3594 #endif /* !CC_INTERP */