Wed, 16 Feb 2011 13:30:31 -0800
7013964: openjdk LICENSE file needs rebranding
Reviewed-by: darcy, katleman, jjg
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodDataOop.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
38 #ifndef CC_INTERP
39 #define __ _masm->
41 //----------------------------------------------------------------------------------------------------
42 // Platform-dependent initialization
44 void TemplateTable::pd_initialize() {
45 // No i486 specific initialization
46 }
48 //----------------------------------------------------------------------------------------------------
49 // Address computation
51 // local variables
52 static inline Address iaddress(int n) {
53 return Address(rdi, Interpreter::local_offset_in_bytes(n));
54 }
56 static inline Address laddress(int n) { return iaddress(n + 1); }
57 static inline Address haddress(int n) { return iaddress(n + 0); }
58 static inline Address faddress(int n) { return iaddress(n); }
59 static inline Address daddress(int n) { return laddress(n); }
60 static inline Address aaddress(int n) { return iaddress(n); }
62 static inline Address iaddress(Register r) {
63 return Address(rdi, r, Interpreter::stackElementScale());
64 }
65 static inline Address laddress(Register r) {
66 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
67 }
68 static inline Address haddress(Register r) {
69 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
70 }
72 static inline Address faddress(Register r) { return iaddress(r); }
73 static inline Address daddress(Register r) { return laddress(r); }
74 static inline Address aaddress(Register r) { return iaddress(r); }
76 // expression stack
77 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
78 // data beyond the rsp which is potentially unsafe in an MT environment;
79 // an interrupt may overwrite that data.)
80 static inline Address at_rsp () {
81 return Address(rsp, 0);
82 }
84 // At top of Java expression stack which may be different than rsp(). It
85 // isn't for category 1 objects.
86 static inline Address at_tos () {
87 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
88 return tos;
89 }
91 static inline Address at_tos_p1() {
92 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
93 }
95 static inline Address at_tos_p2() {
96 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
97 }
99 // Condition conversion
100 static Assembler::Condition j_not(TemplateTable::Condition cc) {
101 switch (cc) {
102 case TemplateTable::equal : return Assembler::notEqual;
103 case TemplateTable::not_equal : return Assembler::equal;
104 case TemplateTable::less : return Assembler::greaterEqual;
105 case TemplateTable::less_equal : return Assembler::greater;
106 case TemplateTable::greater : return Assembler::lessEqual;
107 case TemplateTable::greater_equal: return Assembler::less;
108 }
109 ShouldNotReachHere();
110 return Assembler::zero;
111 }
114 //----------------------------------------------------------------------------------------------------
115 // Miscelaneous helper routines
117 // Store an oop (or NULL) at the address described by obj.
118 // If val == noreg this means store a NULL
120 static void do_oop_store(InterpreterMacroAssembler* _masm,
121 Address obj,
122 Register val,
123 BarrierSet::Name barrier,
124 bool precise) {
125 assert(val == noreg || val == rax, "parameter is just for looks");
126 switch (barrier) {
127 #ifndef SERIALGC
128 case BarrierSet::G1SATBCT:
129 case BarrierSet::G1SATBCTLogging:
130 {
131 // flatten object address if needed
132 // We do it regardless of precise because we need the registers
133 if (obj.index() == noreg && obj.disp() == 0) {
134 if (obj.base() != rdx) {
135 __ movl(rdx, obj.base());
136 }
137 } else {
138 __ leal(rdx, obj);
139 }
140 __ get_thread(rcx);
141 __ save_bcp();
142 __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
144 // Do the actual store
145 // noreg means NULL
146 if (val == noreg) {
147 __ movptr(Address(rdx, 0), NULL_WORD);
148 // No post barrier for NULL
149 } else {
150 __ movl(Address(rdx, 0), val);
151 __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
152 }
153 __ restore_bcp();
155 }
156 break;
157 #endif // SERIALGC
158 case BarrierSet::CardTableModRef:
159 case BarrierSet::CardTableExtension:
160 {
161 if (val == noreg) {
162 __ movptr(obj, NULL_WORD);
163 } else {
164 __ movl(obj, val);
165 // flatten object address if needed
166 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
167 __ store_check(obj.base());
168 } else {
169 __ leal(rdx, obj);
170 __ store_check(rdx);
171 }
172 }
173 }
174 break;
175 case BarrierSet::ModRef:
176 case BarrierSet::Other:
177 if (val == noreg) {
178 __ movptr(obj, NULL_WORD);
179 } else {
180 __ movl(obj, val);
181 }
182 break;
183 default :
184 ShouldNotReachHere();
186 }
187 }
189 Address TemplateTable::at_bcp(int offset) {
190 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
191 return Address(rsi, offset);
192 }
195 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
196 Register scratch,
197 bool load_bc_into_scratch/*=true*/) {
199 if (!RewriteBytecodes) return;
200 // the pair bytecodes have already done the load.
201 if (load_bc_into_scratch) {
202 __ movl(bc, bytecode);
203 }
204 Label patch_done;
205 if (JvmtiExport::can_post_breakpoint()) {
206 Label fast_patch;
207 // if a breakpoint is present we can't rewrite the stream directly
208 __ movzbl(scratch, at_bcp(0));
209 __ cmpl(scratch, Bytecodes::_breakpoint);
210 __ jcc(Assembler::notEqual, fast_patch);
211 __ get_method(scratch);
212 // Let breakpoint table handling rewrite to quicker bytecode
213 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
214 #ifndef ASSERT
215 __ jmpb(patch_done);
216 #else
217 __ jmp(patch_done);
218 #endif
219 __ bind(fast_patch);
220 }
221 #ifdef ASSERT
222 Label okay;
223 __ load_unsigned_byte(scratch, at_bcp(0));
224 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
225 __ jccb(Assembler::equal, okay);
226 __ cmpl(scratch, bc);
227 __ jcc(Assembler::equal, okay);
228 __ stop("patching the wrong bytecode");
229 __ bind(okay);
230 #endif
231 // patch bytecode
232 __ movb(at_bcp(0), bc);
233 __ bind(patch_done);
234 }
236 //----------------------------------------------------------------------------------------------------
237 // Individual instructions
239 void TemplateTable::nop() {
240 transition(vtos, vtos);
241 // nothing to do
242 }
244 void TemplateTable::shouldnotreachhere() {
245 transition(vtos, vtos);
246 __ stop("shouldnotreachhere bytecode");
247 }
251 void TemplateTable::aconst_null() {
252 transition(vtos, atos);
253 __ xorptr(rax, rax);
254 }
257 void TemplateTable::iconst(int value) {
258 transition(vtos, itos);
259 if (value == 0) {
260 __ xorptr(rax, rax);
261 } else {
262 __ movptr(rax, value);
263 }
264 }
267 void TemplateTable::lconst(int value) {
268 transition(vtos, ltos);
269 if (value == 0) {
270 __ xorptr(rax, rax);
271 } else {
272 __ movptr(rax, value);
273 }
274 assert(value >= 0, "check this code");
275 __ xorptr(rdx, rdx);
276 }
279 void TemplateTable::fconst(int value) {
280 transition(vtos, ftos);
281 if (value == 0) { __ fldz();
282 } else if (value == 1) { __ fld1();
283 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
284 } else { ShouldNotReachHere();
285 }
286 }
289 void TemplateTable::dconst(int value) {
290 transition(vtos, dtos);
291 if (value == 0) { __ fldz();
292 } else if (value == 1) { __ fld1();
293 } else { ShouldNotReachHere();
294 }
295 }
298 void TemplateTable::bipush() {
299 transition(vtos, itos);
300 __ load_signed_byte(rax, at_bcp(1));
301 }
304 void TemplateTable::sipush() {
305 transition(vtos, itos);
306 __ load_unsigned_short(rax, at_bcp(1));
307 __ bswapl(rax);
308 __ sarl(rax, 16);
309 }
311 void TemplateTable::ldc(bool wide) {
312 transition(vtos, vtos);
313 Label call_ldc, notFloat, notClass, Done;
315 if (wide) {
316 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
317 } else {
318 __ load_unsigned_byte(rbx, at_bcp(1));
319 }
320 __ get_cpool_and_tags(rcx, rax);
321 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
322 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
324 // get type
325 __ xorptr(rdx, rdx);
326 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
328 // unresolved string - get the resolved string
329 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
330 __ jccb(Assembler::equal, call_ldc);
332 // unresolved class - get the resolved class
333 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
334 __ jccb(Assembler::equal, call_ldc);
336 // unresolved class in error (resolution failed) - call into runtime
337 // so that the same error from first resolution attempt is thrown.
338 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
339 __ jccb(Assembler::equal, call_ldc);
341 // resolved class - need to call vm to get java mirror of the class
342 __ cmpl(rdx, JVM_CONSTANT_Class);
343 __ jcc(Assembler::notEqual, notClass);
345 __ bind(call_ldc);
346 __ movl(rcx, wide);
347 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
348 __ push(atos);
349 __ jmp(Done);
351 __ bind(notClass);
352 __ cmpl(rdx, JVM_CONSTANT_Float);
353 __ jccb(Assembler::notEqual, notFloat);
354 // ftos
355 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
356 __ push(ftos);
357 __ jmp(Done);
359 __ bind(notFloat);
360 #ifdef ASSERT
361 { Label L;
362 __ cmpl(rdx, JVM_CONSTANT_Integer);
363 __ jcc(Assembler::equal, L);
364 __ cmpl(rdx, JVM_CONSTANT_String);
365 __ jcc(Assembler::equal, L);
366 __ stop("unexpected tag type in ldc");
367 __ bind(L);
368 }
369 #endif
370 Label isOop;
371 // atos and itos
372 // String is only oop type we will see here
373 __ cmpl(rdx, JVM_CONSTANT_String);
374 __ jccb(Assembler::equal, isOop);
375 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
376 __ push(itos);
377 __ jmp(Done);
378 __ bind(isOop);
379 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
380 __ push(atos);
382 if (VerifyOops) {
383 __ verify_oop(rax);
384 }
385 __ bind(Done);
386 }
388 // Fast path for caching oop constants.
389 // %%% We should use this to handle Class and String constants also.
390 // %%% It will simplify the ldc/primitive path considerably.
391 void TemplateTable::fast_aldc(bool wide) {
392 transition(vtos, atos);
394 if (!EnableMethodHandles) {
395 // We should not encounter this bytecode if !EnableMethodHandles.
396 // The verifier will stop it. However, if we get past the verifier,
397 // this will stop the thread in a reasonable way, without crashing the JVM.
398 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
399 InterpreterRuntime::throw_IncompatibleClassChangeError));
400 // the call_VM checks for exception, so we should never return here.
401 __ should_not_reach_here();
402 return;
403 }
405 const Register cache = rcx;
406 const Register index = rdx;
408 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
409 if (VerifyOops) {
410 __ verify_oop(rax);
411 }
413 Label L_done, L_throw_exception;
414 const Register con_klass_temp = rcx; // same as Rcache
415 __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
416 __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
417 __ jcc(Assembler::notEqual, L_done);
418 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
419 __ jcc(Assembler::notEqual, L_throw_exception);
420 __ xorptr(rax, rax);
421 __ jmp(L_done);
423 // Load the exception from the system-array which wraps it:
424 __ bind(L_throw_exception);
425 __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
426 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
428 __ bind(L_done);
429 }
431 void TemplateTable::ldc2_w() {
432 transition(vtos, vtos);
433 Label Long, Done;
434 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
436 __ get_cpool_and_tags(rcx, rax);
437 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
438 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
440 // get type
441 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
442 __ jccb(Assembler::notEqual, Long);
443 // dtos
444 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
445 __ push(dtos);
446 __ jmpb(Done);
448 __ bind(Long);
449 // ltos
450 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
451 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
453 __ push(ltos);
455 __ bind(Done);
456 }
459 void TemplateTable::locals_index(Register reg, int offset) {
460 __ load_unsigned_byte(reg, at_bcp(offset));
461 __ negptr(reg);
462 }
465 void TemplateTable::iload() {
466 transition(vtos, itos);
467 if (RewriteFrequentPairs) {
468 Label rewrite, done;
470 // get next byte
471 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
472 // if _iload, wait to rewrite to iload2. We only want to rewrite the
473 // last two iloads in a pair. Comparing against fast_iload means that
474 // the next bytecode is neither an iload or a caload, and therefore
475 // an iload pair.
476 __ cmpl(rbx, Bytecodes::_iload);
477 __ jcc(Assembler::equal, done);
479 __ cmpl(rbx, Bytecodes::_fast_iload);
480 __ movl(rcx, Bytecodes::_fast_iload2);
481 __ jccb(Assembler::equal, rewrite);
483 // if _caload, rewrite to fast_icaload
484 __ cmpl(rbx, Bytecodes::_caload);
485 __ movl(rcx, Bytecodes::_fast_icaload);
486 __ jccb(Assembler::equal, rewrite);
488 // rewrite so iload doesn't check again.
489 __ movl(rcx, Bytecodes::_fast_iload);
491 // rewrite
492 // rcx: fast bytecode
493 __ bind(rewrite);
494 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
495 __ bind(done);
496 }
498 // Get the local value into tos
499 locals_index(rbx);
500 __ movl(rax, iaddress(rbx));
501 }
504 void TemplateTable::fast_iload2() {
505 transition(vtos, itos);
506 locals_index(rbx);
507 __ movl(rax, iaddress(rbx));
508 __ push(itos);
509 locals_index(rbx, 3);
510 __ movl(rax, iaddress(rbx));
511 }
513 void TemplateTable::fast_iload() {
514 transition(vtos, itos);
515 locals_index(rbx);
516 __ movl(rax, iaddress(rbx));
517 }
520 void TemplateTable::lload() {
521 transition(vtos, ltos);
522 locals_index(rbx);
523 __ movptr(rax, laddress(rbx));
524 NOT_LP64(__ movl(rdx, haddress(rbx)));
525 }
528 void TemplateTable::fload() {
529 transition(vtos, ftos);
530 locals_index(rbx);
531 __ fld_s(faddress(rbx));
532 }
535 void TemplateTable::dload() {
536 transition(vtos, dtos);
537 locals_index(rbx);
538 __ fld_d(daddress(rbx));
539 }
542 void TemplateTable::aload() {
543 transition(vtos, atos);
544 locals_index(rbx);
545 __ movptr(rax, aaddress(rbx));
546 }
549 void TemplateTable::locals_index_wide(Register reg) {
550 __ movl(reg, at_bcp(2));
551 __ bswapl(reg);
552 __ shrl(reg, 16);
553 __ negptr(reg);
554 }
557 void TemplateTable::wide_iload() {
558 transition(vtos, itos);
559 locals_index_wide(rbx);
560 __ movl(rax, iaddress(rbx));
561 }
564 void TemplateTable::wide_lload() {
565 transition(vtos, ltos);
566 locals_index_wide(rbx);
567 __ movptr(rax, laddress(rbx));
568 NOT_LP64(__ movl(rdx, haddress(rbx)));
569 }
572 void TemplateTable::wide_fload() {
573 transition(vtos, ftos);
574 locals_index_wide(rbx);
575 __ fld_s(faddress(rbx));
576 }
579 void TemplateTable::wide_dload() {
580 transition(vtos, dtos);
581 locals_index_wide(rbx);
582 __ fld_d(daddress(rbx));
583 }
586 void TemplateTable::wide_aload() {
587 transition(vtos, atos);
588 locals_index_wide(rbx);
589 __ movptr(rax, aaddress(rbx));
590 }
592 void TemplateTable::index_check(Register array, Register index) {
593 // Pop ptr into array
594 __ pop_ptr(array);
595 index_check_without_pop(array, index);
596 }
598 void TemplateTable::index_check_without_pop(Register array, Register index) {
599 // destroys rbx,
600 // check array
601 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
602 LP64_ONLY(__ movslq(index, index));
603 // check index
604 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
605 if (index != rbx) {
606 // ??? convention: move aberrant index into rbx, for exception message
607 assert(rbx != array, "different registers");
608 __ mov(rbx, index);
609 }
610 __ jump_cc(Assembler::aboveEqual,
611 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
612 }
615 void TemplateTable::iaload() {
616 transition(itos, itos);
617 // rdx: array
618 index_check(rdx, rax); // kills rbx,
619 // rax,: index
620 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
621 }
624 void TemplateTable::laload() {
625 transition(itos, ltos);
626 // rax,: index
627 // rdx: array
628 index_check(rdx, rax);
629 __ mov(rbx, rax);
630 // rbx,: index
631 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
632 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
633 }
636 void TemplateTable::faload() {
637 transition(itos, ftos);
638 // rdx: array
639 index_check(rdx, rax); // kills rbx,
640 // rax,: index
641 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
642 }
645 void TemplateTable::daload() {
646 transition(itos, dtos);
647 // rdx: array
648 index_check(rdx, rax); // kills rbx,
649 // rax,: index
650 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
651 }
654 void TemplateTable::aaload() {
655 transition(itos, atos);
656 // rdx: array
657 index_check(rdx, rax); // kills rbx,
658 // rax,: index
659 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
660 }
663 void TemplateTable::baload() {
664 transition(itos, itos);
665 // rdx: array
666 index_check(rdx, rax); // kills rbx,
667 // rax,: index
668 // can do better code for P5 - fix this at some point
669 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
670 __ mov(rax, rbx);
671 }
674 void TemplateTable::caload() {
675 transition(itos, itos);
676 // rdx: array
677 index_check(rdx, rax); // kills rbx,
678 // rax,: index
679 // can do better code for P5 - may want to improve this at some point
680 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
681 __ mov(rax, rbx);
682 }
684 // iload followed by caload frequent pair
685 void TemplateTable::fast_icaload() {
686 transition(vtos, itos);
687 // load index out of locals
688 locals_index(rbx);
689 __ movl(rax, iaddress(rbx));
691 // rdx: array
692 index_check(rdx, rax);
693 // rax,: index
694 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
695 __ mov(rax, rbx);
696 }
698 void TemplateTable::saload() {
699 transition(itos, itos);
700 // rdx: array
701 index_check(rdx, rax); // kills rbx,
702 // rax,: index
703 // can do better code for P5 - may want to improve this at some point
704 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
705 __ mov(rax, rbx);
706 }
709 void TemplateTable::iload(int n) {
710 transition(vtos, itos);
711 __ movl(rax, iaddress(n));
712 }
715 void TemplateTable::lload(int n) {
716 transition(vtos, ltos);
717 __ movptr(rax, laddress(n));
718 NOT_LP64(__ movptr(rdx, haddress(n)));
719 }
722 void TemplateTable::fload(int n) {
723 transition(vtos, ftos);
724 __ fld_s(faddress(n));
725 }
728 void TemplateTable::dload(int n) {
729 transition(vtos, dtos);
730 __ fld_d(daddress(n));
731 }
734 void TemplateTable::aload(int n) {
735 transition(vtos, atos);
736 __ movptr(rax, aaddress(n));
737 }
740 void TemplateTable::aload_0() {
741 transition(vtos, atos);
742 // According to bytecode histograms, the pairs:
743 //
744 // _aload_0, _fast_igetfield
745 // _aload_0, _fast_agetfield
746 // _aload_0, _fast_fgetfield
747 //
748 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
749 // bytecode checks if the next bytecode is either _fast_igetfield,
750 // _fast_agetfield or _fast_fgetfield and then rewrites the
751 // current bytecode into a pair bytecode; otherwise it rewrites the current
752 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
753 //
754 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
755 // otherwise we may miss an opportunity for a pair.
756 //
757 // Also rewrite frequent pairs
758 // aload_0, aload_1
759 // aload_0, iload_1
760 // These bytecodes with a small amount of code are most profitable to rewrite
761 if (RewriteFrequentPairs) {
762 Label rewrite, done;
763 // get next byte
764 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
766 // do actual aload_0
767 aload(0);
769 // if _getfield then wait with rewrite
770 __ cmpl(rbx, Bytecodes::_getfield);
771 __ jcc(Assembler::equal, done);
773 // if _igetfield then reqrite to _fast_iaccess_0
774 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
775 __ cmpl(rbx, Bytecodes::_fast_igetfield);
776 __ movl(rcx, Bytecodes::_fast_iaccess_0);
777 __ jccb(Assembler::equal, rewrite);
779 // if _agetfield then reqrite to _fast_aaccess_0
780 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
781 __ cmpl(rbx, Bytecodes::_fast_agetfield);
782 __ movl(rcx, Bytecodes::_fast_aaccess_0);
783 __ jccb(Assembler::equal, rewrite);
785 // if _fgetfield then reqrite to _fast_faccess_0
786 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
787 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
788 __ movl(rcx, Bytecodes::_fast_faccess_0);
789 __ jccb(Assembler::equal, rewrite);
791 // else rewrite to _fast_aload0
792 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
793 __ movl(rcx, Bytecodes::_fast_aload_0);
795 // rewrite
796 // rcx: fast bytecode
797 __ bind(rewrite);
798 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
800 __ bind(done);
801 } else {
802 aload(0);
803 }
804 }
806 void TemplateTable::istore() {
807 transition(itos, vtos);
808 locals_index(rbx);
809 __ movl(iaddress(rbx), rax);
810 }
813 void TemplateTable::lstore() {
814 transition(ltos, vtos);
815 locals_index(rbx);
816 __ movptr(laddress(rbx), rax);
817 NOT_LP64(__ movptr(haddress(rbx), rdx));
818 }
821 void TemplateTable::fstore() {
822 transition(ftos, vtos);
823 locals_index(rbx);
824 __ fstp_s(faddress(rbx));
825 }
828 void TemplateTable::dstore() {
829 transition(dtos, vtos);
830 locals_index(rbx);
831 __ fstp_d(daddress(rbx));
832 }
835 void TemplateTable::astore() {
836 transition(vtos, vtos);
837 __ pop_ptr(rax);
838 locals_index(rbx);
839 __ movptr(aaddress(rbx), rax);
840 }
843 void TemplateTable::wide_istore() {
844 transition(vtos, vtos);
845 __ pop_i(rax);
846 locals_index_wide(rbx);
847 __ movl(iaddress(rbx), rax);
848 }
851 void TemplateTable::wide_lstore() {
852 transition(vtos, vtos);
853 __ pop_l(rax, rdx);
854 locals_index_wide(rbx);
855 __ movptr(laddress(rbx), rax);
856 NOT_LP64(__ movl(haddress(rbx), rdx));
857 }
860 void TemplateTable::wide_fstore() {
861 wide_istore();
862 }
865 void TemplateTable::wide_dstore() {
866 wide_lstore();
867 }
870 void TemplateTable::wide_astore() {
871 transition(vtos, vtos);
872 __ pop_ptr(rax);
873 locals_index_wide(rbx);
874 __ movptr(aaddress(rbx), rax);
875 }
878 void TemplateTable::iastore() {
879 transition(itos, vtos);
880 __ pop_i(rbx);
881 // rax,: value
882 // rdx: array
883 index_check(rdx, rbx); // prefer index in rbx,
884 // rbx,: index
885 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
886 }
889 void TemplateTable::lastore() {
890 transition(ltos, vtos);
891 __ pop_i(rbx);
892 // rax,: low(value)
893 // rcx: array
894 // rdx: high(value)
895 index_check(rcx, rbx); // prefer index in rbx,
896 // rbx,: index
897 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
898 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
899 }
902 void TemplateTable::fastore() {
903 transition(ftos, vtos);
904 __ pop_i(rbx);
905 // rdx: array
906 // st0: value
907 index_check(rdx, rbx); // prefer index in rbx,
908 // rbx,: index
909 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
910 }
913 void TemplateTable::dastore() {
914 transition(dtos, vtos);
915 __ pop_i(rbx);
916 // rdx: array
917 // st0: value
918 index_check(rdx, rbx); // prefer index in rbx,
919 // rbx,: index
920 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
921 }
924 void TemplateTable::aastore() {
925 Label is_null, ok_is_subtype, done;
926 transition(vtos, vtos);
927 // stack: ..., array, index, value
928 __ movptr(rax, at_tos()); // Value
929 __ movl(rcx, at_tos_p1()); // Index
930 __ movptr(rdx, at_tos_p2()); // Array
932 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
933 index_check_without_pop(rdx, rcx); // kills rbx,
934 // do array store check - check for NULL value first
935 __ testptr(rax, rax);
936 __ jcc(Assembler::zero, is_null);
938 // Move subklass into EBX
939 __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
940 // Move superklass into EAX
941 __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
942 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
943 // Compress array+index*wordSize+12 into a single register. Frees ECX.
944 __ lea(rdx, element_address);
946 // Generate subtype check. Blows ECX. Resets EDI to locals.
947 // Superklass in EAX. Subklass in EBX.
948 __ gen_subtype_check( rbx, ok_is_subtype );
950 // Come here on failure
951 // object is at TOS
952 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
954 // Come here on success
955 __ bind(ok_is_subtype);
957 // Get the value to store
958 __ movptr(rax, at_rsp());
959 // and store it with appropriate barrier
960 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
962 __ jmp(done);
964 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
965 __ bind(is_null);
966 __ profile_null_seen(rbx);
968 // Store NULL, (noreg means NULL to do_oop_store)
969 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
971 // Pop stack arguments
972 __ bind(done);
973 __ addptr(rsp, 3 * Interpreter::stackElementSize);
974 }
977 void TemplateTable::bastore() {
978 transition(itos, vtos);
979 __ pop_i(rbx);
980 // rax,: value
981 // rdx: array
982 index_check(rdx, rbx); // prefer index in rbx,
983 // rbx,: index
984 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
985 }
988 void TemplateTable::castore() {
989 transition(itos, vtos);
990 __ pop_i(rbx);
991 // rax,: value
992 // rdx: array
993 index_check(rdx, rbx); // prefer index in rbx,
994 // rbx,: index
995 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
996 }
999 void TemplateTable::sastore() {
1000 castore();
1001 }
1004 void TemplateTable::istore(int n) {
1005 transition(itos, vtos);
1006 __ movl(iaddress(n), rax);
1007 }
1010 void TemplateTable::lstore(int n) {
1011 transition(ltos, vtos);
1012 __ movptr(laddress(n), rax);
1013 NOT_LP64(__ movptr(haddress(n), rdx));
1014 }
1017 void TemplateTable::fstore(int n) {
1018 transition(ftos, vtos);
1019 __ fstp_s(faddress(n));
1020 }
1023 void TemplateTable::dstore(int n) {
1024 transition(dtos, vtos);
1025 __ fstp_d(daddress(n));
1026 }
1029 void TemplateTable::astore(int n) {
1030 transition(vtos, vtos);
1031 __ pop_ptr(rax);
1032 __ movptr(aaddress(n), rax);
1033 }
1036 void TemplateTable::pop() {
1037 transition(vtos, vtos);
1038 __ addptr(rsp, Interpreter::stackElementSize);
1039 }
1042 void TemplateTable::pop2() {
1043 transition(vtos, vtos);
1044 __ addptr(rsp, 2*Interpreter::stackElementSize);
1045 }
1048 void TemplateTable::dup() {
1049 transition(vtos, vtos);
1050 // stack: ..., a
1051 __ load_ptr(0, rax);
1052 __ push_ptr(rax);
1053 // stack: ..., a, a
1054 }
1057 void TemplateTable::dup_x1() {
1058 transition(vtos, vtos);
1059 // stack: ..., a, b
1060 __ load_ptr( 0, rax); // load b
1061 __ load_ptr( 1, rcx); // load a
1062 __ store_ptr(1, rax); // store b
1063 __ store_ptr(0, rcx); // store a
1064 __ push_ptr(rax); // push b
1065 // stack: ..., b, a, b
1066 }
1069 void TemplateTable::dup_x2() {
1070 transition(vtos, vtos);
1071 // stack: ..., a, b, c
1072 __ load_ptr( 0, rax); // load c
1073 __ load_ptr( 2, rcx); // load a
1074 __ store_ptr(2, rax); // store c in a
1075 __ push_ptr(rax); // push c
1076 // stack: ..., c, b, c, c
1077 __ load_ptr( 2, rax); // load b
1078 __ store_ptr(2, rcx); // store a in b
1079 // stack: ..., c, a, c, c
1080 __ store_ptr(1, rax); // store b in c
1081 // stack: ..., c, a, b, c
1082 }
1085 void TemplateTable::dup2() {
1086 transition(vtos, vtos);
1087 // stack: ..., a, b
1088 __ load_ptr(1, rax); // load a
1089 __ push_ptr(rax); // push a
1090 __ load_ptr(1, rax); // load b
1091 __ push_ptr(rax); // push b
1092 // stack: ..., a, b, a, b
1093 }
1096 void TemplateTable::dup2_x1() {
1097 transition(vtos, vtos);
1098 // stack: ..., a, b, c
1099 __ load_ptr( 0, rcx); // load c
1100 __ load_ptr( 1, rax); // load b
1101 __ push_ptr(rax); // push b
1102 __ push_ptr(rcx); // push c
1103 // stack: ..., a, b, c, b, c
1104 __ store_ptr(3, rcx); // store c in b
1105 // stack: ..., a, c, c, b, c
1106 __ load_ptr( 4, rcx); // load a
1107 __ store_ptr(2, rcx); // store a in 2nd c
1108 // stack: ..., a, c, a, b, c
1109 __ store_ptr(4, rax); // store b in a
1110 // stack: ..., b, c, a, b, c
1111 // stack: ..., b, c, a, b, c
1112 }
1115 void TemplateTable::dup2_x2() {
1116 transition(vtos, vtos);
1117 // stack: ..., a, b, c, d
1118 __ load_ptr( 0, rcx); // load d
1119 __ load_ptr( 1, rax); // load c
1120 __ push_ptr(rax); // push c
1121 __ push_ptr(rcx); // push d
1122 // stack: ..., a, b, c, d, c, d
1123 __ load_ptr( 4, rax); // load b
1124 __ store_ptr(2, rax); // store b in d
1125 __ store_ptr(4, rcx); // store d in b
1126 // stack: ..., a, d, c, b, c, d
1127 __ load_ptr( 5, rcx); // load a
1128 __ load_ptr( 3, rax); // load c
1129 __ store_ptr(3, rcx); // store a in c
1130 __ store_ptr(5, rax); // store c in a
1131 // stack: ..., c, d, a, b, c, d
1132 // stack: ..., c, d, a, b, c, d
1133 }
1136 void TemplateTable::swap() {
1137 transition(vtos, vtos);
1138 // stack: ..., a, b
1139 __ load_ptr( 1, rcx); // load a
1140 __ load_ptr( 0, rax); // load b
1141 __ store_ptr(0, rcx); // store a in b
1142 __ store_ptr(1, rax); // store b in a
1143 // stack: ..., b, a
1144 }
1147 void TemplateTable::iop2(Operation op) {
1148 transition(itos, itos);
1149 switch (op) {
1150 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1151 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1152 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1153 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1154 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1155 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1156 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1157 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1158 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1159 default : ShouldNotReachHere();
1160 }
1161 }
1164 void TemplateTable::lop2(Operation op) {
1165 transition(ltos, ltos);
1166 __ pop_l(rbx, rcx);
1167 switch (op) {
1168 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1169 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1170 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1171 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1172 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1173 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1174 default : ShouldNotReachHere();
1175 }
1176 }
1179 void TemplateTable::idiv() {
1180 transition(itos, itos);
1181 __ mov(rcx, rax);
1182 __ pop_i(rax);
1183 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1184 // they are not equal, one could do a normal division (no correction
1185 // needed), which may speed up this implementation for the common case.
1186 // (see also JVM spec., p.243 & p.271)
1187 __ corrected_idivl(rcx);
1188 }
1191 void TemplateTable::irem() {
1192 transition(itos, itos);
1193 __ mov(rcx, rax);
1194 __ pop_i(rax);
1195 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1196 // they are not equal, one could do a normal division (no correction
1197 // needed), which may speed up this implementation for the common case.
1198 // (see also JVM spec., p.243 & p.271)
1199 __ corrected_idivl(rcx);
1200 __ mov(rax, rdx);
1201 }
1204 void TemplateTable::lmul() {
1205 transition(ltos, ltos);
1206 __ pop_l(rbx, rcx);
1207 __ push(rcx); __ push(rbx);
1208 __ push(rdx); __ push(rax);
1209 __ lmul(2 * wordSize, 0);
1210 __ addptr(rsp, 4 * wordSize); // take off temporaries
1211 }
1214 void TemplateTable::ldiv() {
1215 transition(ltos, ltos);
1216 __ pop_l(rbx, rcx);
1217 __ push(rcx); __ push(rbx);
1218 __ push(rdx); __ push(rax);
1219 // check if y = 0
1220 __ orl(rax, rdx);
1221 __ jump_cc(Assembler::zero,
1222 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1223 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1224 __ addptr(rsp, 4 * wordSize); // take off temporaries
1225 }
1228 void TemplateTable::lrem() {
1229 transition(ltos, ltos);
1230 __ pop_l(rbx, rcx);
1231 __ push(rcx); __ push(rbx);
1232 __ push(rdx); __ push(rax);
1233 // check if y = 0
1234 __ orl(rax, rdx);
1235 __ jump_cc(Assembler::zero,
1236 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1237 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1238 __ addptr(rsp, 4 * wordSize);
1239 }
1242 void TemplateTable::lshl() {
1243 transition(itos, ltos);
1244 __ movl(rcx, rax); // get shift count
1245 __ pop_l(rax, rdx); // get shift value
1246 __ lshl(rdx, rax);
1247 }
1250 void TemplateTable::lshr() {
1251 transition(itos, ltos);
1252 __ mov(rcx, rax); // get shift count
1253 __ pop_l(rax, rdx); // get shift value
1254 __ lshr(rdx, rax, true);
1255 }
1258 void TemplateTable::lushr() {
1259 transition(itos, ltos);
1260 __ mov(rcx, rax); // get shift count
1261 __ pop_l(rax, rdx); // get shift value
1262 __ lshr(rdx, rax);
1263 }
1266 void TemplateTable::fop2(Operation op) {
1267 transition(ftos, ftos);
1268 switch (op) {
1269 case add: __ fadd_s (at_rsp()); break;
1270 case sub: __ fsubr_s(at_rsp()); break;
1271 case mul: __ fmul_s (at_rsp()); break;
1272 case div: __ fdivr_s(at_rsp()); break;
1273 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1274 default : ShouldNotReachHere();
1275 }
1276 __ f2ieee();
1277 __ pop(rax); // pop float thing off
1278 }
1281 void TemplateTable::dop2(Operation op) {
1282 transition(dtos, dtos);
1284 switch (op) {
1285 case add: __ fadd_d (at_rsp()); break;
1286 case sub: __ fsubr_d(at_rsp()); break;
1287 case mul: {
1288 Label L_strict;
1289 Label L_join;
1290 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1291 __ get_method(rcx);
1292 __ movl(rcx, access_flags);
1293 __ testl(rcx, JVM_ACC_STRICT);
1294 __ jccb(Assembler::notZero, L_strict);
1295 __ fmul_d (at_rsp());
1296 __ jmpb(L_join);
1297 __ bind(L_strict);
1298 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1299 __ fmulp();
1300 __ fmul_d (at_rsp());
1301 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1302 __ fmulp();
1303 __ bind(L_join);
1304 break;
1305 }
1306 case div: {
1307 Label L_strict;
1308 Label L_join;
1309 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1310 __ get_method(rcx);
1311 __ movl(rcx, access_flags);
1312 __ testl(rcx, JVM_ACC_STRICT);
1313 __ jccb(Assembler::notZero, L_strict);
1314 __ fdivr_d(at_rsp());
1315 __ jmp(L_join);
1316 __ bind(L_strict);
1317 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1318 __ fmul_d (at_rsp());
1319 __ fdivrp();
1320 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1321 __ fmulp();
1322 __ bind(L_join);
1323 break;
1324 }
1325 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1326 default : ShouldNotReachHere();
1327 }
1328 __ d2ieee();
1329 // Pop double precision number from rsp.
1330 __ pop(rax);
1331 __ pop(rdx);
1332 }
1335 void TemplateTable::ineg() {
1336 transition(itos, itos);
1337 __ negl(rax);
1338 }
1341 void TemplateTable::lneg() {
1342 transition(ltos, ltos);
1343 __ lneg(rdx, rax);
1344 }
1347 void TemplateTable::fneg() {
1348 transition(ftos, ftos);
1349 __ fchs();
1350 }
1353 void TemplateTable::dneg() {
1354 transition(dtos, dtos);
1355 __ fchs();
1356 }
1359 void TemplateTable::iinc() {
1360 transition(vtos, vtos);
1361 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1362 locals_index(rbx);
1363 __ addl(iaddress(rbx), rdx);
1364 }
1367 void TemplateTable::wide_iinc() {
1368 transition(vtos, vtos);
1369 __ movl(rdx, at_bcp(4)); // get constant
1370 locals_index_wide(rbx);
1371 __ bswapl(rdx); // swap bytes & sign-extend constant
1372 __ sarl(rdx, 16);
1373 __ addl(iaddress(rbx), rdx);
1374 // Note: should probably use only one movl to get both
1375 // the index and the constant -> fix this
1376 }
1379 void TemplateTable::convert() {
1380 // Checking
1381 #ifdef ASSERT
1382 { TosState tos_in = ilgl;
1383 TosState tos_out = ilgl;
1384 switch (bytecode()) {
1385 case Bytecodes::_i2l: // fall through
1386 case Bytecodes::_i2f: // fall through
1387 case Bytecodes::_i2d: // fall through
1388 case Bytecodes::_i2b: // fall through
1389 case Bytecodes::_i2c: // fall through
1390 case Bytecodes::_i2s: tos_in = itos; break;
1391 case Bytecodes::_l2i: // fall through
1392 case Bytecodes::_l2f: // fall through
1393 case Bytecodes::_l2d: tos_in = ltos; break;
1394 case Bytecodes::_f2i: // fall through
1395 case Bytecodes::_f2l: // fall through
1396 case Bytecodes::_f2d: tos_in = ftos; break;
1397 case Bytecodes::_d2i: // fall through
1398 case Bytecodes::_d2l: // fall through
1399 case Bytecodes::_d2f: tos_in = dtos; break;
1400 default : ShouldNotReachHere();
1401 }
1402 switch (bytecode()) {
1403 case Bytecodes::_l2i: // fall through
1404 case Bytecodes::_f2i: // fall through
1405 case Bytecodes::_d2i: // fall through
1406 case Bytecodes::_i2b: // fall through
1407 case Bytecodes::_i2c: // fall through
1408 case Bytecodes::_i2s: tos_out = itos; break;
1409 case Bytecodes::_i2l: // fall through
1410 case Bytecodes::_f2l: // fall through
1411 case Bytecodes::_d2l: tos_out = ltos; break;
1412 case Bytecodes::_i2f: // fall through
1413 case Bytecodes::_l2f: // fall through
1414 case Bytecodes::_d2f: tos_out = ftos; break;
1415 case Bytecodes::_i2d: // fall through
1416 case Bytecodes::_l2d: // fall through
1417 case Bytecodes::_f2d: tos_out = dtos; break;
1418 default : ShouldNotReachHere();
1419 }
1420 transition(tos_in, tos_out);
1421 }
1422 #endif // ASSERT
1424 // Conversion
1425 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1426 switch (bytecode()) {
1427 case Bytecodes::_i2l:
1428 __ extend_sign(rdx, rax);
1429 break;
1430 case Bytecodes::_i2f:
1431 __ push(rax); // store int on tos
1432 __ fild_s(at_rsp()); // load int to ST0
1433 __ f2ieee(); // truncate to float size
1434 __ pop(rcx); // adjust rsp
1435 break;
1436 case Bytecodes::_i2d:
1437 __ push(rax); // add one slot for d2ieee()
1438 __ push(rax); // store int on tos
1439 __ fild_s(at_rsp()); // load int to ST0
1440 __ d2ieee(); // truncate to double size
1441 __ pop(rcx); // adjust rsp
1442 __ pop(rcx);
1443 break;
1444 case Bytecodes::_i2b:
1445 __ shll(rax, 24); // truncate upper 24 bits
1446 __ sarl(rax, 24); // and sign-extend byte
1447 LP64_ONLY(__ movsbl(rax, rax));
1448 break;
1449 case Bytecodes::_i2c:
1450 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1451 LP64_ONLY(__ movzwl(rax, rax));
1452 break;
1453 case Bytecodes::_i2s:
1454 __ shll(rax, 16); // truncate upper 16 bits
1455 __ sarl(rax, 16); // and sign-extend short
1456 LP64_ONLY(__ movswl(rax, rax));
1457 break;
1458 case Bytecodes::_l2i:
1459 /* nothing to do */
1460 break;
1461 case Bytecodes::_l2f:
1462 __ push(rdx); // store long on tos
1463 __ push(rax);
1464 __ fild_d(at_rsp()); // load long to ST0
1465 __ f2ieee(); // truncate to float size
1466 __ pop(rcx); // adjust rsp
1467 __ pop(rcx);
1468 break;
1469 case Bytecodes::_l2d:
1470 __ push(rdx); // store long on tos
1471 __ push(rax);
1472 __ fild_d(at_rsp()); // load long to ST0
1473 __ d2ieee(); // truncate to double size
1474 __ pop(rcx); // adjust rsp
1475 __ pop(rcx);
1476 break;
1477 case Bytecodes::_f2i:
1478 __ push(rcx); // reserve space for argument
1479 __ fstp_s(at_rsp()); // pass float argument on stack
1480 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1481 break;
1482 case Bytecodes::_f2l:
1483 __ push(rcx); // reserve space for argument
1484 __ fstp_s(at_rsp()); // pass float argument on stack
1485 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1486 break;
1487 case Bytecodes::_f2d:
1488 /* nothing to do */
1489 break;
1490 case Bytecodes::_d2i:
1491 __ push(rcx); // reserve space for argument
1492 __ push(rcx);
1493 __ fstp_d(at_rsp()); // pass double argument on stack
1494 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1495 break;
1496 case Bytecodes::_d2l:
1497 __ push(rcx); // reserve space for argument
1498 __ push(rcx);
1499 __ fstp_d(at_rsp()); // pass double argument on stack
1500 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1501 break;
1502 case Bytecodes::_d2f:
1503 __ push(rcx); // reserve space for f2ieee()
1504 __ f2ieee(); // truncate to float size
1505 __ pop(rcx); // adjust rsp
1506 break;
1507 default :
1508 ShouldNotReachHere();
1509 }
1510 }
1513 void TemplateTable::lcmp() {
1514 transition(ltos, itos);
1515 // y = rdx:rax
1516 __ pop_l(rbx, rcx); // get x = rcx:rbx
1517 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1518 __ mov(rax, rcx);
1519 }
1522 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1523 if (is_float) {
1524 __ fld_s(at_rsp());
1525 } else {
1526 __ fld_d(at_rsp());
1527 __ pop(rdx);
1528 }
1529 __ pop(rcx);
1530 __ fcmp2int(rax, unordered_result < 0);
1531 }
1534 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1535 __ get_method(rcx); // ECX holds method
1536 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1538 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1539 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1540 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1542 // Load up EDX with the branch displacement
1543 __ movl(rdx, at_bcp(1));
1544 __ bswapl(rdx);
1545 if (!is_wide) __ sarl(rdx, 16);
1546 LP64_ONLY(__ movslq(rdx, rdx));
1549 // Handle all the JSR stuff here, then exit.
1550 // It's much shorter and cleaner than intermingling with the
1551 // non-JSR normal-branch stuff occurring below.
1552 if (is_jsr) {
1553 // Pre-load the next target bytecode into EBX
1554 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1556 // compute return address as bci in rax,
1557 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1558 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1559 // Adjust the bcp in RSI by the displacement in EDX
1560 __ addptr(rsi, rdx);
1561 // Push return address
1562 __ push_i(rax);
1563 // jsr returns vtos
1564 __ dispatch_only_noverify(vtos);
1565 return;
1566 }
1568 // Normal (non-jsr) branch handling
1570 // Adjust the bcp in RSI by the displacement in EDX
1571 __ addptr(rsi, rdx);
1573 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1574 Label backedge_counter_overflow;
1575 Label profile_method;
1576 Label dispatch;
1577 if (UseLoopCounter) {
1578 // increment backedge counter for backward branches
1579 // rax,: MDO
1580 // rbx,: MDO bumped taken-count
1581 // rcx: method
1582 // rdx: target offset
1583 // rsi: target bcp
1584 // rdi: locals pointer
1585 __ testl(rdx, rdx); // check if forward or backward branch
1586 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1588 if (TieredCompilation) {
1589 Label no_mdo;
1590 int increment = InvocationCounter::count_increment;
1591 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1592 if (ProfileInterpreter) {
1593 // Are we profiling?
1594 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1595 __ testptr(rbx, rbx);
1596 __ jccb(Assembler::zero, no_mdo);
1597 // Increment the MDO backedge counter
1598 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1599 in_bytes(InvocationCounter::counter_offset()));
1600 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1601 rax, false, Assembler::zero, &backedge_counter_overflow);
1602 __ jmp(dispatch);
1603 }
1604 __ bind(no_mdo);
1605 // Increment backedge counter in methodOop
1606 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1607 rax, false, Assembler::zero, &backedge_counter_overflow);
1608 } else {
1609 // increment counter
1610 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1611 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1612 __ movl(Address(rcx, be_offset), rax); // store counter
1614 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1615 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1616 __ addl(rax, Address(rcx, be_offset)); // add both counters
1618 if (ProfileInterpreter) {
1619 // Test to see if we should create a method data oop
1620 __ cmp32(rax,
1621 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1622 __ jcc(Assembler::less, dispatch);
1624 // if no method data exists, go to profile method
1625 __ test_method_data_pointer(rax, profile_method);
1627 if (UseOnStackReplacement) {
1628 // check for overflow against rbx, which is the MDO taken count
1629 __ cmp32(rbx,
1630 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1631 __ jcc(Assembler::below, dispatch);
1633 // When ProfileInterpreter is on, the backedge_count comes from the
1634 // methodDataOop, which value does not get reset on the call to
1635 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1636 // routine while the method is being compiled, add a second test to make
1637 // sure the overflow function is called only once every overflow_frequency.
1638 const int overflow_frequency = 1024;
1639 __ andptr(rbx, overflow_frequency-1);
1640 __ jcc(Assembler::zero, backedge_counter_overflow);
1641 }
1642 } else {
1643 if (UseOnStackReplacement) {
1644 // check for overflow against rax, which is the sum of the counters
1645 __ cmp32(rax,
1646 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1647 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1649 }
1650 }
1651 }
1652 __ bind(dispatch);
1653 }
1655 // Pre-load the next target bytecode into EBX
1656 __ load_unsigned_byte(rbx, Address(rsi, 0));
1658 // continue with the bytecode @ target
1659 // rax,: return bci for jsr's, unused otherwise
1660 // rbx,: target bytecode
1661 // rsi: target bcp
1662 __ dispatch_only(vtos);
1664 if (UseLoopCounter) {
1665 if (ProfileInterpreter) {
1666 // Out-of-line code to allocate method data oop.
1667 __ bind(profile_method);
1668 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1669 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1670 __ set_method_data_pointer_for_bcp();
1671 __ jmp(dispatch);
1672 }
1674 if (UseOnStackReplacement) {
1676 // invocation counter overflow
1677 __ bind(backedge_counter_overflow);
1678 __ negptr(rdx);
1679 __ addptr(rdx, rsi); // branch bcp
1680 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1681 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1683 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1684 // rbx,: target bytecode
1685 // rdx: scratch
1686 // rdi: locals pointer
1687 // rsi: bcp
1688 __ testptr(rax, rax); // test result
1689 __ jcc(Assembler::zero, dispatch); // no osr if null
1690 // nmethod may have been invalidated (VM may block upon call_VM return)
1691 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1692 __ cmpl(rcx, InvalidOSREntryBci);
1693 __ jcc(Assembler::equal, dispatch);
1695 // We have the address of an on stack replacement routine in rax,
1696 // We need to prepare to execute the OSR method. First we must
1697 // migrate the locals and monitors off of the stack.
1699 __ mov(rbx, rax); // save the nmethod
1701 const Register thread = rcx;
1702 __ get_thread(thread);
1703 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1704 // rax, is OSR buffer, move it to expected parameter location
1705 __ mov(rcx, rax);
1707 // pop the interpreter frame
1708 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1709 __ leave(); // remove frame anchor
1710 __ pop(rdi); // get return address
1711 __ mov(rsp, rdx); // set sp to sender sp
1714 Label skip;
1715 Label chkint;
1717 // The interpreter frame we have removed may be returning to
1718 // either the callstub or the interpreter. Since we will
1719 // now be returning from a compiled (OSR) nmethod we must
1720 // adjust the return to the return were it can handler compiled
1721 // results and clean the fpu stack. This is very similar to
1722 // what a i2c adapter must do.
1724 // Are we returning to the call stub?
1726 __ cmp32(rdi, ExternalAddress(StubRoutines::_call_stub_return_address));
1727 __ jcc(Assembler::notEqual, chkint);
1729 // yes adjust to the specialized call stub return.
1730 assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
1731 __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
1732 __ jmp(skip);
1734 __ bind(chkint);
1736 // Are we returning to the interpreter? Look for sentinel
1738 __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
1739 __ jcc(Assembler::notEqual, skip);
1741 // Adjust to compiled return back to interpreter
1743 __ movptr(rdi, Address(rdi, -wordSize));
1744 __ bind(skip);
1746 // Align stack pointer for compiled code (note that caller is
1747 // responsible for undoing this fixup by remembering the old SP
1748 // in an rbp,-relative location)
1749 __ andptr(rsp, -(StackAlignmentInBytes));
1751 // push the (possibly adjusted) return address
1752 __ push(rdi);
1754 // and begin the OSR nmethod
1755 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1756 }
1757 }
1758 }
1761 void TemplateTable::if_0cmp(Condition cc) {
1762 transition(itos, vtos);
1763 // assume branch is more often taken than not (loops use backward branches)
1764 Label not_taken;
1765 __ testl(rax, rax);
1766 __ jcc(j_not(cc), not_taken);
1767 branch(false, false);
1768 __ bind(not_taken);
1769 __ profile_not_taken_branch(rax);
1770 }
1773 void TemplateTable::if_icmp(Condition cc) {
1774 transition(itos, vtos);
1775 // assume branch is more often taken than not (loops use backward branches)
1776 Label not_taken;
1777 __ pop_i(rdx);
1778 __ cmpl(rdx, rax);
1779 __ jcc(j_not(cc), not_taken);
1780 branch(false, false);
1781 __ bind(not_taken);
1782 __ profile_not_taken_branch(rax);
1783 }
1786 void TemplateTable::if_nullcmp(Condition cc) {
1787 transition(atos, vtos);
1788 // assume branch is more often taken than not (loops use backward branches)
1789 Label not_taken;
1790 __ testptr(rax, rax);
1791 __ jcc(j_not(cc), not_taken);
1792 branch(false, false);
1793 __ bind(not_taken);
1794 __ profile_not_taken_branch(rax);
1795 }
1798 void TemplateTable::if_acmp(Condition cc) {
1799 transition(atos, vtos);
1800 // assume branch is more often taken than not (loops use backward branches)
1801 Label not_taken;
1802 __ pop_ptr(rdx);
1803 __ cmpptr(rdx, rax);
1804 __ jcc(j_not(cc), not_taken);
1805 branch(false, false);
1806 __ bind(not_taken);
1807 __ profile_not_taken_branch(rax);
1808 }
1811 void TemplateTable::ret() {
1812 transition(vtos, vtos);
1813 locals_index(rbx);
1814 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1815 __ profile_ret(rbx, rcx);
1816 __ get_method(rax);
1817 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1818 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1819 constMethodOopDesc::codes_offset()));
1820 __ dispatch_next(vtos);
1821 }
1824 void TemplateTable::wide_ret() {
1825 transition(vtos, vtos);
1826 locals_index_wide(rbx);
1827 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1828 __ profile_ret(rbx, rcx);
1829 __ get_method(rax);
1830 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1831 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1832 __ dispatch_next(vtos);
1833 }
1836 void TemplateTable::tableswitch() {
1837 Label default_case, continue_execution;
1838 transition(itos, vtos);
1839 // align rsi
1840 __ lea(rbx, at_bcp(wordSize));
1841 __ andptr(rbx, -wordSize);
1842 // load lo & hi
1843 __ movl(rcx, Address(rbx, 1 * wordSize));
1844 __ movl(rdx, Address(rbx, 2 * wordSize));
1845 __ bswapl(rcx);
1846 __ bswapl(rdx);
1847 // check against lo & hi
1848 __ cmpl(rax, rcx);
1849 __ jccb(Assembler::less, default_case);
1850 __ cmpl(rax, rdx);
1851 __ jccb(Assembler::greater, default_case);
1852 // lookup dispatch offset
1853 __ subl(rax, rcx);
1854 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1855 __ profile_switch_case(rax, rbx, rcx);
1856 // continue execution
1857 __ bind(continue_execution);
1858 __ bswapl(rdx);
1859 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1860 __ addptr(rsi, rdx);
1861 __ dispatch_only(vtos);
1862 // handle default
1863 __ bind(default_case);
1864 __ profile_switch_default(rax);
1865 __ movl(rdx, Address(rbx, 0));
1866 __ jmp(continue_execution);
1867 }
1870 void TemplateTable::lookupswitch() {
1871 transition(itos, itos);
1872 __ stop("lookupswitch bytecode should have been rewritten");
1873 }
1876 void TemplateTable::fast_linearswitch() {
1877 transition(itos, vtos);
1878 Label loop_entry, loop, found, continue_execution;
1879 // bswapl rax, so we can avoid bswapping the table entries
1880 __ bswapl(rax);
1881 // align rsi
1882 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1883 __ andptr(rbx, -wordSize);
1884 // set counter
1885 __ movl(rcx, Address(rbx, wordSize));
1886 __ bswapl(rcx);
1887 __ jmpb(loop_entry);
1888 // table search
1889 __ bind(loop);
1890 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1891 __ jccb(Assembler::equal, found);
1892 __ bind(loop_entry);
1893 __ decrementl(rcx);
1894 __ jcc(Assembler::greaterEqual, loop);
1895 // default case
1896 __ profile_switch_default(rax);
1897 __ movl(rdx, Address(rbx, 0));
1898 __ jmpb(continue_execution);
1899 // entry found -> get offset
1900 __ bind(found);
1901 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1902 __ profile_switch_case(rcx, rax, rbx);
1903 // continue execution
1904 __ bind(continue_execution);
1905 __ bswapl(rdx);
1906 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1907 __ addptr(rsi, rdx);
1908 __ dispatch_only(vtos);
1909 }
1912 void TemplateTable::fast_binaryswitch() {
1913 transition(itos, vtos);
1914 // Implementation using the following core algorithm:
1915 //
1916 // int binary_search(int key, LookupswitchPair* array, int n) {
1917 // // Binary search according to "Methodik des Programmierens" by
1918 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1919 // int i = 0;
1920 // int j = n;
1921 // while (i+1 < j) {
1922 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1923 // // with Q: for all i: 0 <= i < n: key < a[i]
1924 // // where a stands for the array and assuming that the (inexisting)
1925 // // element a[n] is infinitely big.
1926 // int h = (i + j) >> 1;
1927 // // i < h < j
1928 // if (key < array[h].fast_match()) {
1929 // j = h;
1930 // } else {
1931 // i = h;
1932 // }
1933 // }
1934 // // R: a[i] <= key < a[i+1] or Q
1935 // // (i.e., if key is within array, i is the correct index)
1936 // return i;
1937 // }
1939 // register allocation
1940 const Register key = rax; // already set (tosca)
1941 const Register array = rbx;
1942 const Register i = rcx;
1943 const Register j = rdx;
1944 const Register h = rdi; // needs to be restored
1945 const Register temp = rsi;
1946 // setup array
1947 __ save_bcp();
1949 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1950 __ andptr(array, -wordSize);
1951 // initialize i & j
1952 __ xorl(i, i); // i = 0;
1953 __ movl(j, Address(array, -wordSize)); // j = length(array);
1954 // Convert j into native byteordering
1955 __ bswapl(j);
1956 // and start
1957 Label entry;
1958 __ jmp(entry);
1960 // binary search loop
1961 { Label loop;
1962 __ bind(loop);
1963 // int h = (i + j) >> 1;
1964 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1965 __ sarl(h, 1); // h = (i + j) >> 1;
1966 // if (key < array[h].fast_match()) {
1967 // j = h;
1968 // } else {
1969 // i = h;
1970 // }
1971 // Convert array[h].match to native byte-ordering before compare
1972 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1973 __ bswapl(temp);
1974 __ cmpl(key, temp);
1975 if (VM_Version::supports_cmov()) {
1976 __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
1977 __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
1978 } else {
1979 Label set_i, end_of_if;
1980 __ jccb(Assembler::greaterEqual, set_i); // {
1981 __ mov(j, h); // j = h;
1982 __ jmp(end_of_if); // }
1983 __ bind(set_i); // else {
1984 __ mov(i, h); // i = h;
1985 __ bind(end_of_if); // }
1986 }
1987 // while (i+1 < j)
1988 __ bind(entry);
1989 __ leal(h, Address(i, 1)); // i+1
1990 __ cmpl(h, j); // i+1 < j
1991 __ jcc(Assembler::less, loop);
1992 }
1994 // end of binary search, result index is i (must check again!)
1995 Label default_case;
1996 // Convert array[i].match to native byte-ordering before compare
1997 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1998 __ bswapl(temp);
1999 __ cmpl(key, temp);
2000 __ jcc(Assembler::notEqual, default_case);
2002 // entry found -> j = offset
2003 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
2004 __ profile_switch_case(i, key, array);
2005 __ bswapl(j);
2006 LP64_ONLY(__ movslq(j, j));
2007 __ restore_bcp();
2008 __ restore_locals(); // restore rdi
2009 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2011 __ addptr(rsi, j);
2012 __ dispatch_only(vtos);
2014 // default case -> j = default offset
2015 __ bind(default_case);
2016 __ profile_switch_default(i);
2017 __ movl(j, Address(array, -2*wordSize));
2018 __ bswapl(j);
2019 LP64_ONLY(__ movslq(j, j));
2020 __ restore_bcp();
2021 __ restore_locals(); // restore rdi
2022 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2023 __ addptr(rsi, j);
2024 __ dispatch_only(vtos);
2025 }
2028 void TemplateTable::_return(TosState state) {
2029 transition(state, state);
2030 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2032 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2033 assert(state == vtos, "only valid state");
2034 __ movptr(rax, aaddress(0));
2035 __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
2036 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2037 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2038 Label skip_register_finalizer;
2039 __ jcc(Assembler::zero, skip_register_finalizer);
2041 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2043 __ bind(skip_register_finalizer);
2044 }
2046 __ remove_activation(state, rsi);
2047 __ jmp(rsi);
2048 }
2051 // ----------------------------------------------------------------------------
2052 // Volatile variables demand their effects be made known to all CPU's in
2053 // order. Store buffers on most chips allow reads & writes to reorder; the
2054 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2055 // memory barrier (i.e., it's not sufficient that the interpreter does not
2056 // reorder volatile references, the hardware also must not reorder them).
2057 //
2058 // According to the new Java Memory Model (JMM):
2059 // (1) All volatiles are serialized wrt to each other.
2060 // ALSO reads & writes act as aquire & release, so:
2061 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2062 // the read float up to before the read. It's OK for non-volatile memory refs
2063 // that happen before the volatile read to float down below it.
2064 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2065 // that happen BEFORE the write float down to after the write. It's OK for
2066 // non-volatile memory refs that happen after the volatile write to float up
2067 // before it.
2068 //
2069 // We only put in barriers around volatile refs (they are expensive), not
2070 // _between_ memory refs (that would require us to track the flavor of the
2071 // previous memory refs). Requirements (2) and (3) require some barriers
2072 // before volatile stores and after volatile loads. These nearly cover
2073 // requirement (1) but miss the volatile-store-volatile-load case. This final
2074 // case is placed after volatile-stores although it could just as well go
2075 // before volatile-loads.
2076 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2077 // Helper function to insert a is-volatile test and memory barrier
2078 if( !os::is_MP() ) return; // Not needed on single CPU
2079 __ membar(order_constraint);
2080 }
2082 void TemplateTable::resolve_cache_and_index(int byte_no,
2083 Register result,
2084 Register Rcache,
2085 Register index,
2086 size_t index_size) {
2087 Register temp = rbx;
2089 assert_different_registers(result, Rcache, index, temp);
2091 Label resolved;
2092 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2093 if (byte_no == f1_oop) {
2094 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2095 // This kind of CP cache entry does not need to match the flags byte, because
2096 // there is a 1-1 relation between bytecode type and CP entry type.
2097 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2098 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2099 __ testptr(result, result);
2100 __ jcc(Assembler::notEqual, resolved);
2101 } else {
2102 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2103 assert(result == noreg, ""); //else change code for setting result
2104 const int shift_count = (1 + byte_no)*BitsPerByte;
2105 __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2106 __ shrl(temp, shift_count);
2107 // have we resolved this bytecode?
2108 __ andl(temp, 0xFF);
2109 __ cmpl(temp, (int)bytecode());
2110 __ jcc(Assembler::equal, resolved);
2111 }
2113 // resolve first time through
2114 address entry;
2115 switch (bytecode()) {
2116 case Bytecodes::_getstatic : // fall through
2117 case Bytecodes::_putstatic : // fall through
2118 case Bytecodes::_getfield : // fall through
2119 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2120 case Bytecodes::_invokevirtual : // fall through
2121 case Bytecodes::_invokespecial : // fall through
2122 case Bytecodes::_invokestatic : // fall through
2123 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2124 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2125 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2126 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2127 default : ShouldNotReachHere(); break;
2128 }
2129 __ movl(temp, (int)bytecode());
2130 __ call_VM(noreg, entry, temp);
2131 // Update registers with resolved info
2132 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2133 if (result != noreg)
2134 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2135 __ bind(resolved);
2136 }
2139 // The cache and index registers must be set before call
2140 void TemplateTable::load_field_cp_cache_entry(Register obj,
2141 Register cache,
2142 Register index,
2143 Register off,
2144 Register flags,
2145 bool is_static = false) {
2146 assert_different_registers(cache, index, flags, off);
2148 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2149 // Field offset
2150 __ movptr(off, Address(cache, index, Address::times_ptr,
2151 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2152 // Flags
2153 __ movl(flags, Address(cache, index, Address::times_ptr,
2154 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2156 // klass overwrite register
2157 if (is_static) {
2158 __ movptr(obj, Address(cache, index, Address::times_ptr,
2159 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2160 }
2161 }
2163 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2164 Register method,
2165 Register itable_index,
2166 Register flags,
2167 bool is_invokevirtual,
2168 bool is_invokevfinal /*unused*/,
2169 bool is_invokedynamic) {
2170 // setup registers
2171 const Register cache = rcx;
2172 const Register index = rdx;
2173 assert_different_registers(method, flags);
2174 assert_different_registers(method, cache, index);
2175 assert_different_registers(itable_index, flags);
2176 assert_different_registers(itable_index, cache, index);
2177 // determine constant pool cache field offsets
2178 const int method_offset = in_bytes(
2179 constantPoolCacheOopDesc::base_offset() +
2180 (is_invokevirtual
2181 ? ConstantPoolCacheEntry::f2_offset()
2182 : ConstantPoolCacheEntry::f1_offset()
2183 )
2184 );
2185 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2186 ConstantPoolCacheEntry::flags_offset());
2187 // access constant pool cache fields
2188 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2189 ConstantPoolCacheEntry::f2_offset());
2191 if (byte_no == f1_oop) {
2192 // Resolved f1_oop goes directly into 'method' register.
2193 assert(is_invokedynamic, "");
2194 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2195 } else {
2196 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2197 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2198 }
2199 if (itable_index != noreg) {
2200 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2201 }
2202 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2203 }
2206 // The registers cache and index expected to be set before call.
2207 // Correct values of the cache and index registers are preserved.
2208 void TemplateTable::jvmti_post_field_access(Register cache,
2209 Register index,
2210 bool is_static,
2211 bool has_tos) {
2212 if (JvmtiExport::can_post_field_access()) {
2213 // Check to see if a field access watch has been set before we take
2214 // the time to call into the VM.
2215 Label L1;
2216 assert_different_registers(cache, index, rax);
2217 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2218 __ testl(rax,rax);
2219 __ jcc(Assembler::zero, L1);
2221 // cache entry pointer
2222 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2223 __ shll(index, LogBytesPerWord);
2224 __ addptr(cache, index);
2225 if (is_static) {
2226 __ xorptr(rax, rax); // NULL object reference
2227 } else {
2228 __ pop(atos); // Get the object
2229 __ verify_oop(rax);
2230 __ push(atos); // Restore stack state
2231 }
2232 // rax,: object pointer or NULL
2233 // cache: cache entry pointer
2234 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2235 rax, cache);
2236 __ get_cache_and_index_at_bcp(cache, index, 1);
2237 __ bind(L1);
2238 }
2239 }
2241 void TemplateTable::pop_and_check_object(Register r) {
2242 __ pop_ptr(r);
2243 __ null_check(r); // for field access must check obj.
2244 __ verify_oop(r);
2245 }
2247 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2248 transition(vtos, vtos);
2250 const Register cache = rcx;
2251 const Register index = rdx;
2252 const Register obj = rcx;
2253 const Register off = rbx;
2254 const Register flags = rax;
2256 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2257 jvmti_post_field_access(cache, index, is_static, false);
2258 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2260 if (!is_static) pop_and_check_object(obj);
2262 const Address lo(obj, off, Address::times_1, 0*wordSize);
2263 const Address hi(obj, off, Address::times_1, 1*wordSize);
2265 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2267 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2268 assert(btos == 0, "change code, btos != 0");
2269 // btos
2270 __ andptr(flags, 0x0f);
2271 __ jcc(Assembler::notZero, notByte);
2273 __ load_signed_byte(rax, lo );
2274 __ push(btos);
2275 // Rewrite bytecode to be faster
2276 if (!is_static) {
2277 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2278 }
2279 __ jmp(Done);
2281 __ bind(notByte);
2282 // itos
2283 __ cmpl(flags, itos );
2284 __ jcc(Assembler::notEqual, notInt);
2286 __ movl(rax, lo );
2287 __ push(itos);
2288 // Rewrite bytecode to be faster
2289 if (!is_static) {
2290 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2291 }
2292 __ jmp(Done);
2294 __ bind(notInt);
2295 // atos
2296 __ cmpl(flags, atos );
2297 __ jcc(Assembler::notEqual, notObj);
2299 __ movl(rax, lo );
2300 __ push(atos);
2301 if (!is_static) {
2302 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2303 }
2304 __ jmp(Done);
2306 __ bind(notObj);
2307 // ctos
2308 __ cmpl(flags, ctos );
2309 __ jcc(Assembler::notEqual, notChar);
2311 __ load_unsigned_short(rax, lo );
2312 __ push(ctos);
2313 if (!is_static) {
2314 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2315 }
2316 __ jmp(Done);
2318 __ bind(notChar);
2319 // stos
2320 __ cmpl(flags, stos );
2321 __ jcc(Assembler::notEqual, notShort);
2323 __ load_signed_short(rax, lo );
2324 __ push(stos);
2325 if (!is_static) {
2326 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2327 }
2328 __ jmp(Done);
2330 __ bind(notShort);
2331 // ltos
2332 __ cmpl(flags, ltos );
2333 __ jcc(Assembler::notEqual, notLong);
2335 // Generate code as if volatile. There just aren't enough registers to
2336 // save that information and this code is faster than the test.
2337 __ fild_d(lo); // Must load atomically
2338 __ subptr(rsp,2*wordSize); // Make space for store
2339 __ fistp_d(Address(rsp,0));
2340 __ pop(rax);
2341 __ pop(rdx);
2343 __ push(ltos);
2344 // Don't rewrite to _fast_lgetfield for potential volatile case.
2345 __ jmp(Done);
2347 __ bind(notLong);
2348 // ftos
2349 __ cmpl(flags, ftos );
2350 __ jcc(Assembler::notEqual, notFloat);
2352 __ fld_s(lo);
2353 __ push(ftos);
2354 if (!is_static) {
2355 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2356 }
2357 __ jmp(Done);
2359 __ bind(notFloat);
2360 // dtos
2361 __ cmpl(flags, dtos );
2362 __ jcc(Assembler::notEqual, notDouble);
2364 __ fld_d(lo);
2365 __ push(dtos);
2366 if (!is_static) {
2367 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2368 }
2369 __ jmpb(Done);
2371 __ bind(notDouble);
2373 __ stop("Bad state");
2375 __ bind(Done);
2376 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2377 // volatile_barrier( );
2378 }
2381 void TemplateTable::getfield(int byte_no) {
2382 getfield_or_static(byte_no, false);
2383 }
2386 void TemplateTable::getstatic(int byte_no) {
2387 getfield_or_static(byte_no, true);
2388 }
2390 // The registers cache and index expected to be set before call.
2391 // The function may destroy various registers, just not the cache and index registers.
2392 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2394 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2396 if (JvmtiExport::can_post_field_modification()) {
2397 // Check to see if a field modification watch has been set before we take
2398 // the time to call into the VM.
2399 Label L1;
2400 assert_different_registers(cache, index, rax);
2401 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2402 __ testl(rax, rax);
2403 __ jcc(Assembler::zero, L1);
2405 // The cache and index registers have been already set.
2406 // This allows to eliminate this call but the cache and index
2407 // registers have to be correspondingly used after this line.
2408 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2410 if (is_static) {
2411 // Life is simple. Null out the object pointer.
2412 __ xorptr(rbx, rbx);
2413 } else {
2414 // Life is harder. The stack holds the value on top, followed by the object.
2415 // We don't know the size of the value, though; it could be one or two words
2416 // depending on its type. As a result, we must find the type to determine where
2417 // the object is.
2418 Label two_word, valsize_known;
2419 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2420 ConstantPoolCacheEntry::flags_offset())));
2421 __ mov(rbx, rsp);
2422 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2423 // Make sure we don't need to mask rcx for tosBits after the above shift
2424 ConstantPoolCacheEntry::verify_tosBits();
2425 __ cmpl(rcx, ltos);
2426 __ jccb(Assembler::equal, two_word);
2427 __ cmpl(rcx, dtos);
2428 __ jccb(Assembler::equal, two_word);
2429 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2430 __ jmpb(valsize_known);
2432 __ bind(two_word);
2433 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2435 __ bind(valsize_known);
2436 // setup object pointer
2437 __ movptr(rbx, Address(rbx, 0));
2438 }
2439 // cache entry pointer
2440 __ addptr(rax, in_bytes(cp_base_offset));
2441 __ shll(rdx, LogBytesPerWord);
2442 __ addptr(rax, rdx);
2443 // object (tos)
2444 __ mov(rcx, rsp);
2445 // rbx,: object pointer set up above (NULL if static)
2446 // rax,: cache entry pointer
2447 // rcx: jvalue object on the stack
2448 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2449 rbx, rax, rcx);
2450 __ get_cache_and_index_at_bcp(cache, index, 1);
2451 __ bind(L1);
2452 }
2453 }
2456 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2457 transition(vtos, vtos);
2459 const Register cache = rcx;
2460 const Register index = rdx;
2461 const Register obj = rcx;
2462 const Register off = rbx;
2463 const Register flags = rax;
2465 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2466 jvmti_post_field_mod(cache, index, is_static);
2467 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2469 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2470 // volatile_barrier( );
2472 Label notVolatile, Done;
2473 __ movl(rdx, flags);
2474 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2475 __ andl(rdx, 0x1);
2477 // field addresses
2478 const Address lo(obj, off, Address::times_1, 0*wordSize);
2479 const Address hi(obj, off, Address::times_1, 1*wordSize);
2481 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2483 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2484 assert(btos == 0, "change code, btos != 0");
2485 // btos
2486 __ andl(flags, 0x0f);
2487 __ jcc(Assembler::notZero, notByte);
2489 __ pop(btos);
2490 if (!is_static) pop_and_check_object(obj);
2491 __ movb(lo, rax );
2492 if (!is_static) {
2493 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2494 }
2495 __ jmp(Done);
2497 __ bind(notByte);
2498 // itos
2499 __ cmpl(flags, itos );
2500 __ jcc(Assembler::notEqual, notInt);
2502 __ pop(itos);
2503 if (!is_static) pop_and_check_object(obj);
2505 __ movl(lo, rax );
2506 if (!is_static) {
2507 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2508 }
2509 __ jmp(Done);
2511 __ bind(notInt);
2512 // atos
2513 __ cmpl(flags, atos );
2514 __ jcc(Assembler::notEqual, notObj);
2516 __ pop(atos);
2517 if (!is_static) pop_and_check_object(obj);
2519 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2521 if (!is_static) {
2522 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2523 }
2525 __ jmp(Done);
2527 __ bind(notObj);
2528 // ctos
2529 __ cmpl(flags, ctos );
2530 __ jcc(Assembler::notEqual, notChar);
2532 __ pop(ctos);
2533 if (!is_static) pop_and_check_object(obj);
2534 __ movw(lo, rax );
2535 if (!is_static) {
2536 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2537 }
2538 __ jmp(Done);
2540 __ bind(notChar);
2541 // stos
2542 __ cmpl(flags, stos );
2543 __ jcc(Assembler::notEqual, notShort);
2545 __ pop(stos);
2546 if (!is_static) pop_and_check_object(obj);
2547 __ movw(lo, rax );
2548 if (!is_static) {
2549 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2550 }
2551 __ jmp(Done);
2553 __ bind(notShort);
2554 // ltos
2555 __ cmpl(flags, ltos );
2556 __ jcc(Assembler::notEqual, notLong);
2558 Label notVolatileLong;
2559 __ testl(rdx, rdx);
2560 __ jcc(Assembler::zero, notVolatileLong);
2562 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2563 if (!is_static) pop_and_check_object(obj);
2565 // Replace with real volatile test
2566 __ push(rdx);
2567 __ push(rax); // Must update atomically with FIST
2568 __ fild_d(Address(rsp,0)); // So load into FPU register
2569 __ fistp_d(lo); // and put into memory atomically
2570 __ addptr(rsp, 2*wordSize);
2571 // volatile_barrier();
2572 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2573 Assembler::StoreStore));
2574 // Don't rewrite volatile version
2575 __ jmp(notVolatile);
2577 __ bind(notVolatileLong);
2579 __ pop(ltos); // overwrites rdx
2580 if (!is_static) pop_and_check_object(obj);
2581 NOT_LP64(__ movptr(hi, rdx));
2582 __ movptr(lo, rax);
2583 if (!is_static) {
2584 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2585 }
2586 __ jmp(notVolatile);
2588 __ bind(notLong);
2589 // ftos
2590 __ cmpl(flags, ftos );
2591 __ jcc(Assembler::notEqual, notFloat);
2593 __ pop(ftos);
2594 if (!is_static) pop_and_check_object(obj);
2595 __ fstp_s(lo);
2596 if (!is_static) {
2597 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2598 }
2599 __ jmp(Done);
2601 __ bind(notFloat);
2602 // dtos
2603 __ cmpl(flags, dtos );
2604 __ jcc(Assembler::notEqual, notDouble);
2606 __ pop(dtos);
2607 if (!is_static) pop_and_check_object(obj);
2608 __ fstp_d(lo);
2609 if (!is_static) {
2610 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2611 }
2612 __ jmp(Done);
2614 __ bind(notDouble);
2616 __ stop("Bad state");
2618 __ bind(Done);
2620 // Check for volatile store
2621 __ testl(rdx, rdx);
2622 __ jcc(Assembler::zero, notVolatile);
2623 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2624 Assembler::StoreStore));
2625 __ bind(notVolatile);
2626 }
2629 void TemplateTable::putfield(int byte_no) {
2630 putfield_or_static(byte_no, false);
2631 }
2634 void TemplateTable::putstatic(int byte_no) {
2635 putfield_or_static(byte_no, true);
2636 }
2638 void TemplateTable::jvmti_post_fast_field_mod() {
2639 if (JvmtiExport::can_post_field_modification()) {
2640 // Check to see if a field modification watch has been set before we take
2641 // the time to call into the VM.
2642 Label L2;
2643 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2644 __ testl(rcx,rcx);
2645 __ jcc(Assembler::zero, L2);
2646 __ pop_ptr(rbx); // copy the object pointer from tos
2647 __ verify_oop(rbx);
2648 __ push_ptr(rbx); // put the object pointer back on tos
2649 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2650 __ mov(rcx, rsp);
2651 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2652 __ xorptr(rbx, rbx);
2653 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2654 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2655 switch (bytecode()) { // load values into the jvalue object
2656 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2657 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2658 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2659 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2660 case Bytecodes::_fast_lputfield:
2661 NOT_LP64(__ movptr(hi_value, rdx));
2662 __ movptr(lo_value, rax);
2663 break;
2665 // need to call fld_s() after fstp_s() to restore the value for below
2666 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2668 // need to call fld_d() after fstp_d() to restore the value for below
2669 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2671 // since rcx is not an object we don't call store_check() here
2672 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2674 default: ShouldNotReachHere();
2675 }
2676 __ pop_ptr(rbx); // restore copy of object pointer
2678 // Save rax, and sometimes rdx because call_VM() will clobber them,
2679 // then use them for JVM/DI purposes
2680 __ push(rax);
2681 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2682 // access constant pool cache entry
2683 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2684 __ verify_oop(rbx);
2685 // rbx,: object pointer copied above
2686 // rax,: cache entry pointer
2687 // rcx: jvalue object on the stack
2688 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2689 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2690 __ pop(rax); // restore lower value
2691 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2692 __ bind(L2);
2693 }
2694 }
2696 void TemplateTable::fast_storefield(TosState state) {
2697 transition(state, vtos);
2699 ByteSize base = constantPoolCacheOopDesc::base_offset();
2701 jvmti_post_fast_field_mod();
2703 // access constant pool cache
2704 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2706 // test for volatile with rdx but rdx is tos register for lputfield.
2707 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2708 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2709 ConstantPoolCacheEntry::flags_offset())));
2711 // replace index with field offset from cache entry
2712 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2714 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2715 // volatile_barrier( );
2717 Label notVolatile, Done;
2718 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2719 __ andl(rdx, 0x1);
2720 // Check for volatile store
2721 __ testl(rdx, rdx);
2722 __ jcc(Assembler::zero, notVolatile);
2724 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2726 // Get object from stack
2727 pop_and_check_object(rcx);
2729 // field addresses
2730 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2731 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2733 // access field
2734 switch (bytecode()) {
2735 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2736 case Bytecodes::_fast_sputfield: // fall through
2737 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2738 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2739 case Bytecodes::_fast_lputfield:
2740 NOT_LP64(__ movptr(hi, rdx));
2741 __ movptr(lo, rax);
2742 break;
2743 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2744 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2745 case Bytecodes::_fast_aputfield: {
2746 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2747 break;
2748 }
2749 default:
2750 ShouldNotReachHere();
2751 }
2753 Label done;
2754 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2755 Assembler::StoreStore));
2756 // Barriers are so large that short branch doesn't reach!
2757 __ jmp(done);
2759 // Same code as above, but don't need rdx to test for volatile.
2760 __ bind(notVolatile);
2762 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2764 // Get object from stack
2765 pop_and_check_object(rcx);
2767 // access field
2768 switch (bytecode()) {
2769 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2770 case Bytecodes::_fast_sputfield: // fall through
2771 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2772 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2773 case Bytecodes::_fast_lputfield:
2774 NOT_LP64(__ movptr(hi, rdx));
2775 __ movptr(lo, rax);
2776 break;
2777 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2778 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2779 case Bytecodes::_fast_aputfield: {
2780 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2781 break;
2782 }
2783 default:
2784 ShouldNotReachHere();
2785 }
2786 __ bind(done);
2787 }
2790 void TemplateTable::fast_accessfield(TosState state) {
2791 transition(atos, state);
2793 // do the JVMTI work here to avoid disturbing the register state below
2794 if (JvmtiExport::can_post_field_access()) {
2795 // Check to see if a field access watch has been set before we take
2796 // the time to call into the VM.
2797 Label L1;
2798 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2799 __ testl(rcx,rcx);
2800 __ jcc(Assembler::zero, L1);
2801 // access constant pool cache entry
2802 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2803 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2804 __ verify_oop(rax);
2805 // rax,: object pointer copied above
2806 // rcx: cache entry pointer
2807 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2808 __ pop_ptr(rax); // restore object pointer
2809 __ bind(L1);
2810 }
2812 // access constant pool cache
2813 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2814 // replace index with field offset from cache entry
2815 __ movptr(rbx, Address(rcx,
2816 rbx,
2817 Address::times_ptr,
2818 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2821 // rax,: object
2822 __ verify_oop(rax);
2823 __ null_check(rax);
2824 // field addresses
2825 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2826 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2828 // access field
2829 switch (bytecode()) {
2830 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2831 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2832 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2833 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2834 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2835 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2836 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2837 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2838 default:
2839 ShouldNotReachHere();
2840 }
2842 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2843 // volatile_barrier( );
2844 }
2846 void TemplateTable::fast_xaccess(TosState state) {
2847 transition(vtos, state);
2848 // get receiver
2849 __ movptr(rax, aaddress(0));
2850 // access constant pool cache
2851 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2852 __ movptr(rbx, Address(rcx,
2853 rdx,
2854 Address::times_ptr,
2855 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2856 // make sure exception is reported in correct bcp range (getfield is next instruction)
2857 __ increment(rsi);
2858 __ null_check(rax);
2859 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2860 if (state == itos) {
2861 __ movl(rax, lo);
2862 } else if (state == atos) {
2863 __ movptr(rax, lo);
2864 __ verify_oop(rax);
2865 } else if (state == ftos) {
2866 __ fld_s(lo);
2867 } else {
2868 ShouldNotReachHere();
2869 }
2870 __ decrement(rsi);
2871 }
2875 //----------------------------------------------------------------------------------------------------
2876 // Calls
2878 void TemplateTable::count_calls(Register method, Register temp) {
2879 // implemented elsewhere
2880 ShouldNotReachHere();
2881 }
2884 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2885 // determine flags
2886 Bytecodes::Code code = bytecode();
2887 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2888 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2889 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2890 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2891 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2892 const bool receiver_null_check = is_invokespecial;
2893 const bool save_flags = is_invokeinterface || is_invokevirtual;
2894 // setup registers & access constant pool cache
2895 const Register recv = rcx;
2896 const Register flags = rdx;
2897 assert_different_registers(method, index, recv, flags);
2899 // save 'interpreter return address'
2900 __ save_bcp();
2902 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2904 // load receiver if needed (note: no return address pushed yet)
2905 if (load_receiver) {
2906 assert(!is_invokedynamic, "");
2907 __ movl(recv, flags);
2908 __ andl(recv, 0xFF);
2909 // recv count is 0 based?
2910 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2911 __ movptr(recv, recv_addr);
2912 __ verify_oop(recv);
2913 }
2915 // do null check if needed
2916 if (receiver_null_check) {
2917 __ null_check(recv);
2918 }
2920 if (save_flags) {
2921 __ mov(rsi, flags);
2922 }
2924 // compute return type
2925 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2926 // Make sure we don't need to mask flags for tosBits after the above shift
2927 ConstantPoolCacheEntry::verify_tosBits();
2928 // load return address
2929 {
2930 address table_addr;
2931 if (is_invokeinterface || is_invokedynamic)
2932 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2933 else
2934 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2935 ExternalAddress table(table_addr);
2936 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2937 }
2939 // push return address
2940 __ push(flags);
2942 // Restore flag value from the constant pool cache, and restore rsi
2943 // for later null checks. rsi is the bytecode pointer
2944 if (save_flags) {
2945 __ mov(flags, rsi);
2946 __ restore_bcp();
2947 }
2948 }
2951 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2952 Register flags) {
2954 // Uses temporary registers rax, rdx
2955 assert_different_registers(index, recv, rax, rdx);
2957 // Test for an invoke of a final method
2958 Label notFinal;
2959 __ movl(rax, flags);
2960 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2961 __ jcc(Assembler::zero, notFinal);
2963 Register method = index; // method must be rbx,
2964 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2966 // do the call - the index is actually the method to call
2967 __ verify_oop(method);
2969 // It's final, need a null check here!
2970 __ null_check(recv);
2972 // profile this call
2973 __ profile_final_call(rax);
2975 __ jump_from_interpreted(method, rax);
2977 __ bind(notFinal);
2979 // get receiver klass
2980 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2981 // Keep recv in rcx for callee expects it there
2982 __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
2983 __ verify_oop(rax);
2985 // profile this call
2986 __ profile_virtual_call(rax, rdi, rdx);
2988 // get target methodOop & entry point
2989 const int base = instanceKlass::vtable_start_offset() * wordSize;
2990 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
2991 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
2992 __ jump_from_interpreted(method, rdx);
2993 }
2996 void TemplateTable::invokevirtual(int byte_no) {
2997 transition(vtos, vtos);
2998 assert(byte_no == f2_byte, "use this argument");
2999 prepare_invoke(rbx, noreg, byte_no);
3001 // rbx,: index
3002 // rcx: receiver
3003 // rdx: flags
3005 invokevirtual_helper(rbx, rcx, rdx);
3006 }
3009 void TemplateTable::invokespecial(int byte_no) {
3010 transition(vtos, vtos);
3011 assert(byte_no == f1_byte, "use this argument");
3012 prepare_invoke(rbx, noreg, byte_no);
3013 // do the call
3014 __ verify_oop(rbx);
3015 __ profile_call(rax);
3016 __ jump_from_interpreted(rbx, rax);
3017 }
3020 void TemplateTable::invokestatic(int byte_no) {
3021 transition(vtos, vtos);
3022 assert(byte_no == f1_byte, "use this argument");
3023 prepare_invoke(rbx, noreg, byte_no);
3024 // do the call
3025 __ verify_oop(rbx);
3026 __ profile_call(rax);
3027 __ jump_from_interpreted(rbx, rax);
3028 }
3031 void TemplateTable::fast_invokevfinal(int byte_no) {
3032 transition(vtos, vtos);
3033 assert(byte_no == f2_byte, "use this argument");
3034 __ stop("fast_invokevfinal not used on x86");
3035 }
3038 void TemplateTable::invokeinterface(int byte_no) {
3039 transition(vtos, vtos);
3040 assert(byte_no == f1_byte, "use this argument");
3041 prepare_invoke(rax, rbx, byte_no);
3043 // rax,: Interface
3044 // rbx,: index
3045 // rcx: receiver
3046 // rdx: flags
3048 // Special case of invokeinterface called for virtual method of
3049 // java.lang.Object. See cpCacheOop.cpp for details.
3050 // This code isn't produced by javac, but could be produced by
3051 // another compliant java compiler.
3052 Label notMethod;
3053 __ movl(rdi, rdx);
3054 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3055 __ jcc(Assembler::zero, notMethod);
3057 invokevirtual_helper(rbx, rcx, rdx);
3058 __ bind(notMethod);
3060 // Get receiver klass into rdx - also a null check
3061 __ restore_locals(); // restore rdi
3062 __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
3063 __ verify_oop(rdx);
3065 // profile this call
3066 __ profile_virtual_call(rdx, rsi, rdi);
3068 Label no_such_interface, no_such_method;
3070 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3071 rdx, rax, rbx,
3072 // outputs: method, scan temp. reg
3073 rbx, rsi,
3074 no_such_interface);
3076 // rbx,: methodOop to call
3077 // rcx: receiver
3078 // Check for abstract method error
3079 // Note: This should be done more efficiently via a throw_abstract_method_error
3080 // interpreter entry point and a conditional jump to it in case of a null
3081 // method.
3082 __ testptr(rbx, rbx);
3083 __ jcc(Assembler::zero, no_such_method);
3085 // do the call
3086 // rcx: receiver
3087 // rbx,: methodOop
3088 __ jump_from_interpreted(rbx, rdx);
3089 __ should_not_reach_here();
3091 // exception handling code follows...
3092 // note: must restore interpreter registers to canonical
3093 // state for exception handling to work correctly!
3095 __ bind(no_such_method);
3096 // throw exception
3097 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3098 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3099 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3100 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3101 // the call_VM checks for exception, so we should never return here.
3102 __ should_not_reach_here();
3104 __ bind(no_such_interface);
3105 // throw exception
3106 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3107 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3108 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3109 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3110 InterpreterRuntime::throw_IncompatibleClassChangeError));
3111 // the call_VM checks for exception, so we should never return here.
3112 __ should_not_reach_here();
3113 }
3115 void TemplateTable::invokedynamic(int byte_no) {
3116 transition(vtos, vtos);
3118 if (!EnableInvokeDynamic) {
3119 // We should not encounter this bytecode if !EnableInvokeDynamic.
3120 // The verifier will stop it. However, if we get past the verifier,
3121 // this will stop the thread in a reasonable way, without crashing the JVM.
3122 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3123 InterpreterRuntime::throw_IncompatibleClassChangeError));
3124 // the call_VM checks for exception, so we should never return here.
3125 __ should_not_reach_here();
3126 return;
3127 }
3129 assert(byte_no == f1_oop, "use this argument");
3130 prepare_invoke(rax, rbx, byte_no);
3132 // rax: CallSite object (f1)
3133 // rbx: unused (f2)
3134 // rcx: receiver address
3135 // rdx: flags (unused)
3137 Register rax_callsite = rax;
3138 Register rcx_method_handle = rcx;
3140 if (ProfileInterpreter) {
3141 // %%% should make a type profile for any invokedynamic that takes a ref argument
3142 // profile this call
3143 __ profile_call(rsi);
3144 }
3146 __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
3147 __ null_check(rcx_method_handle);
3148 __ prepare_to_jump_from_interpreted();
3149 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
3150 }
3152 //----------------------------------------------------------------------------------------------------
3153 // Allocation
3155 void TemplateTable::_new() {
3156 transition(vtos, atos);
3157 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3158 Label slow_case;
3159 Label slow_case_no_pop;
3160 Label done;
3161 Label initialize_header;
3162 Label initialize_object; // including clearing the fields
3163 Label allocate_shared;
3165 __ get_cpool_and_tags(rcx, rax);
3167 // Make sure the class we're about to instantiate has been resolved.
3168 // This is done before loading instanceKlass to be consistent with the order
3169 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3170 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3171 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3172 __ jcc(Assembler::notEqual, slow_case_no_pop);
3174 // get instanceKlass
3175 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3176 __ push(rcx); // save the contexts of klass for initializing the header
3178 // make sure klass is initialized & doesn't have finalizer
3179 // make sure klass is fully initialized
3180 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3181 __ jcc(Assembler::notEqual, slow_case);
3183 // get instance_size in instanceKlass (scaled to a count of bytes)
3184 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3185 // test to see if it has a finalizer or is malformed in some way
3186 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3187 __ jcc(Assembler::notZero, slow_case);
3189 //
3190 // Allocate the instance
3191 // 1) Try to allocate in the TLAB
3192 // 2) if fail and the object is large allocate in the shared Eden
3193 // 3) if the above fails (or is not applicable), go to a slow case
3194 // (creates a new TLAB, etc.)
3196 const bool allow_shared_alloc =
3197 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3199 const Register thread = rcx;
3200 if (UseTLAB || allow_shared_alloc) {
3201 __ get_thread(thread);
3202 }
3204 if (UseTLAB) {
3205 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3206 __ lea(rbx, Address(rax, rdx, Address::times_1));
3207 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3208 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3209 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3210 if (ZeroTLAB) {
3211 // the fields have been already cleared
3212 __ jmp(initialize_header);
3213 } else {
3214 // initialize both the header and fields
3215 __ jmp(initialize_object);
3216 }
3217 }
3219 // Allocation in the shared Eden, if allowed.
3220 //
3221 // rdx: instance size in bytes
3222 if (allow_shared_alloc) {
3223 __ bind(allocate_shared);
3225 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3227 Label retry;
3228 __ bind(retry);
3229 __ movptr(rax, heap_top);
3230 __ lea(rbx, Address(rax, rdx, Address::times_1));
3231 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3232 __ jcc(Assembler::above, slow_case);
3234 // Compare rax, with the top addr, and if still equal, store the new
3235 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3236 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3237 //
3238 // rax,: object begin
3239 // rbx,: object end
3240 // rdx: instance size in bytes
3241 __ locked_cmpxchgptr(rbx, heap_top);
3243 // if someone beat us on the allocation, try again, otherwise continue
3244 __ jcc(Assembler::notEqual, retry);
3246 __ incr_allocated_bytes(thread, rdx, 0);
3247 }
3249 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3250 // The object is initialized before the header. If the object size is
3251 // zero, go directly to the header initialization.
3252 __ bind(initialize_object);
3253 __ decrement(rdx, sizeof(oopDesc));
3254 __ jcc(Assembler::zero, initialize_header);
3256 // Initialize topmost object field, divide rdx by 8, check if odd and
3257 // test if zero.
3258 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3259 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3261 // rdx must have been multiple of 8
3262 #ifdef ASSERT
3263 // make sure rdx was multiple of 8
3264 Label L;
3265 // Ignore partial flag stall after shrl() since it is debug VM
3266 __ jccb(Assembler::carryClear, L);
3267 __ stop("object size is not multiple of 2 - adjust this code");
3268 __ bind(L);
3269 // rdx must be > 0, no extra check needed here
3270 #endif
3272 // initialize remaining object fields: rdx was a multiple of 8
3273 { Label loop;
3274 __ bind(loop);
3275 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3276 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3277 __ decrement(rdx);
3278 __ jcc(Assembler::notZero, loop);
3279 }
3281 // initialize object header only.
3282 __ bind(initialize_header);
3283 if (UseBiasedLocking) {
3284 __ pop(rcx); // get saved klass back in the register.
3285 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3286 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3287 } else {
3288 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3289 (int32_t)markOopDesc::prototype()); // header
3290 __ pop(rcx); // get saved klass back in the register.
3291 }
3292 __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
3294 {
3295 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3296 // Trigger dtrace event for fastpath
3297 __ push(atos);
3298 __ call_VM_leaf(
3299 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3300 __ pop(atos);
3301 }
3303 __ jmp(done);
3304 }
3306 // slow case
3307 __ bind(slow_case);
3308 __ pop(rcx); // restore stack pointer to what it was when we came in.
3309 __ bind(slow_case_no_pop);
3310 __ get_constant_pool(rax);
3311 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3312 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3314 // continue
3315 __ bind(done);
3316 }
3319 void TemplateTable::newarray() {
3320 transition(itos, atos);
3321 __ push_i(rax); // make sure everything is on the stack
3322 __ load_unsigned_byte(rdx, at_bcp(1));
3323 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3324 __ pop_i(rdx); // discard size
3325 }
3328 void TemplateTable::anewarray() {
3329 transition(itos, atos);
3330 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3331 __ get_constant_pool(rcx);
3332 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3333 }
3336 void TemplateTable::arraylength() {
3337 transition(atos, itos);
3338 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3339 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3340 }
3343 void TemplateTable::checkcast() {
3344 transition(atos, atos);
3345 Label done, is_null, ok_is_subtype, quicked, resolved;
3346 __ testptr(rax, rax); // Object is in EAX
3347 __ jcc(Assembler::zero, is_null);
3349 // Get cpool & tags index
3350 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3351 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3352 // See if bytecode has already been quicked
3353 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3354 __ jcc(Assembler::equal, quicked);
3356 __ push(atos);
3357 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3358 __ pop_ptr(rdx);
3359 __ jmpb(resolved);
3361 // Get superklass in EAX and subklass in EBX
3362 __ bind(quicked);
3363 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3364 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3366 __ bind(resolved);
3367 __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3369 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3370 // Superklass in EAX. Subklass in EBX.
3371 __ gen_subtype_check( rbx, ok_is_subtype );
3373 // Come here on failure
3374 __ push(rdx);
3375 // object is at TOS
3376 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3378 // Come here on success
3379 __ bind(ok_is_subtype);
3380 __ mov(rax,rdx); // Restore object in EDX
3382 // Collect counts on whether this check-cast sees NULLs a lot or not.
3383 if (ProfileInterpreter) {
3384 __ jmp(done);
3385 __ bind(is_null);
3386 __ profile_null_seen(rcx);
3387 } else {
3388 __ bind(is_null); // same as 'done'
3389 }
3390 __ bind(done);
3391 }
3394 void TemplateTable::instanceof() {
3395 transition(atos, itos);
3396 Label done, is_null, ok_is_subtype, quicked, resolved;
3397 __ testptr(rax, rax);
3398 __ jcc(Assembler::zero, is_null);
3400 // Get cpool & tags index
3401 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3402 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3403 // See if bytecode has already been quicked
3404 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3405 __ jcc(Assembler::equal, quicked);
3407 __ push(atos);
3408 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3409 __ pop_ptr(rdx);
3410 __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3411 __ jmp(resolved);
3413 // Get superklass in EAX and subklass in EDX
3414 __ bind(quicked);
3415 __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
3416 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3418 __ bind(resolved);
3420 // Generate subtype check. Blows ECX. Resets EDI.
3421 // Superklass in EAX. Subklass in EDX.
3422 __ gen_subtype_check( rdx, ok_is_subtype );
3424 // Come here on failure
3425 __ xorl(rax,rax);
3426 __ jmpb(done);
3427 // Come here on success
3428 __ bind(ok_is_subtype);
3429 __ movl(rax, 1);
3431 // Collect counts on whether this test sees NULLs a lot or not.
3432 if (ProfileInterpreter) {
3433 __ jmp(done);
3434 __ bind(is_null);
3435 __ profile_null_seen(rcx);
3436 } else {
3437 __ bind(is_null); // same as 'done'
3438 }
3439 __ bind(done);
3440 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3441 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3442 }
3445 //----------------------------------------------------------------------------------------------------
3446 // Breakpoints
3447 void TemplateTable::_breakpoint() {
3449 // Note: We get here even if we are single stepping..
3450 // jbug inists on setting breakpoints at every bytecode
3451 // even if we are in single step mode.
3453 transition(vtos, vtos);
3455 // get the unpatched byte code
3456 __ get_method(rcx);
3457 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3458 __ mov(rbx, rax);
3460 // post the breakpoint event
3461 __ get_method(rcx);
3462 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3464 // complete the execution of original bytecode
3465 __ dispatch_only_normal(vtos);
3466 }
3469 //----------------------------------------------------------------------------------------------------
3470 // Exceptions
3472 void TemplateTable::athrow() {
3473 transition(atos, vtos);
3474 __ null_check(rax);
3475 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3476 }
3479 //----------------------------------------------------------------------------------------------------
3480 // Synchronization
3481 //
3482 // Note: monitorenter & exit are symmetric routines; which is reflected
3483 // in the assembly code structure as well
3484 //
3485 // Stack layout:
3486 //
3487 // [expressions ] <--- rsp = expression stack top
3488 // ..
3489 // [expressions ]
3490 // [monitor entry] <--- monitor block top = expression stack bot
3491 // ..
3492 // [monitor entry]
3493 // [frame data ] <--- monitor block bot
3494 // ...
3495 // [saved rbp, ] <--- rbp,
3498 void TemplateTable::monitorenter() {
3499 transition(atos, vtos);
3501 // check for NULL object
3502 __ null_check(rax);
3504 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3505 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3506 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3507 Label allocated;
3509 // initialize entry pointer
3510 __ xorl(rdx, rdx); // points to free slot or NULL
3512 // find a free slot in the monitor block (result in rdx)
3513 { Label entry, loop, exit;
3514 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3515 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3516 __ jmpb(entry);
3518 __ bind(loop);
3519 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3521 // TODO - need new func here - kbt
3522 if (VM_Version::supports_cmov()) {
3523 __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3524 } else {
3525 Label L;
3526 __ jccb(Assembler::notEqual, L);
3527 __ mov(rdx, rcx); // if not used then remember entry in rdx
3528 __ bind(L);
3529 }
3530 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3531 __ jccb(Assembler::equal, exit); // if same object then stop searching
3532 __ addptr(rcx, entry_size); // otherwise advance to next entry
3533 __ bind(entry);
3534 __ cmpptr(rcx, rbx); // check if bottom reached
3535 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3536 __ bind(exit);
3537 }
3539 __ testptr(rdx, rdx); // check if a slot has been found
3540 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3542 // allocate one if there's no free slot
3543 { Label entry, loop;
3544 // 1. compute new pointers // rsp: old expression stack top
3545 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3546 __ subptr(rsp, entry_size); // move expression stack top
3547 __ subptr(rdx, entry_size); // move expression stack bottom
3548 __ mov(rcx, rsp); // set start value for copy loop
3549 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3550 __ jmp(entry);
3551 // 2. move expression stack contents
3552 __ bind(loop);
3553 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3554 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3555 __ addptr(rcx, wordSize); // advance to next word
3556 __ bind(entry);
3557 __ cmpptr(rcx, rdx); // check if bottom reached
3558 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3559 }
3561 // call run-time routine
3562 // rdx: points to monitor entry
3563 __ bind(allocated);
3565 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3566 // The object has already been poped from the stack, so the expression stack looks correct.
3567 __ increment(rsi);
3569 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3570 __ lock_object(rdx);
3572 // check to make sure this monitor doesn't cause stack overflow after locking
3573 __ save_bcp(); // in case of exception
3574 __ generate_stack_overflow_check(0);
3576 // The bcp has already been incremented. Just need to dispatch to next instruction.
3577 __ dispatch_next(vtos);
3578 }
3581 void TemplateTable::monitorexit() {
3582 transition(atos, vtos);
3584 // check for NULL object
3585 __ null_check(rax);
3587 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3588 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3589 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3590 Label found;
3592 // find matching slot
3593 { Label entry, loop;
3594 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3595 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3596 __ jmpb(entry);
3598 __ bind(loop);
3599 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3600 __ jcc(Assembler::equal, found); // if same object then stop searching
3601 __ addptr(rdx, entry_size); // otherwise advance to next entry
3602 __ bind(entry);
3603 __ cmpptr(rdx, rbx); // check if bottom reached
3604 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3605 }
3607 // error handling. Unlocking was not block-structured
3608 Label end;
3609 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3610 __ should_not_reach_here();
3612 // call run-time routine
3613 // rcx: points to monitor entry
3614 __ bind(found);
3615 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3616 __ unlock_object(rdx);
3617 __ pop_ptr(rax); // discard object
3618 __ bind(end);
3619 }
3622 //----------------------------------------------------------------------------------------------------
3623 // Wide instructions
3625 void TemplateTable::wide() {
3626 transition(vtos, vtos);
3627 __ load_unsigned_byte(rbx, at_bcp(1));
3628 ExternalAddress wtable((address)Interpreter::_wentry_point);
3629 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3630 // Note: the rsi increment step is part of the individual wide bytecode implementations
3631 }
3634 //----------------------------------------------------------------------------------------------------
3635 // Multi arrays
3637 void TemplateTable::multianewarray() {
3638 transition(vtos, atos);
3639 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3640 // last dim is on top of stack; we want address of first one:
3641 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3642 // the latter wordSize to point to the beginning of the array.
3643 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3644 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3645 __ load_unsigned_byte(rbx, at_bcp(3));
3646 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3647 }
3649 #endif /* !CC_INTERP */