Tue, 02 Aug 2011 18:36:40 +0200
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
Summary: replace MemBarAcquire/MemBarRelease nodes on the monitor enter/exit code paths with new MemBarAcquireLock/MemBarReleaseLock nodes
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodDataOop.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
39 #ifndef CC_INTERP
40 #define __ _masm->
42 //----------------------------------------------------------------------------------------------------
43 // Platform-dependent initialization
45 void TemplateTable::pd_initialize() {
46 // No i486 specific initialization
47 }
49 //----------------------------------------------------------------------------------------------------
50 // Address computation
52 // local variables
53 static inline Address iaddress(int n) {
54 return Address(rdi, Interpreter::local_offset_in_bytes(n));
55 }
57 static inline Address laddress(int n) { return iaddress(n + 1); }
58 static inline Address haddress(int n) { return iaddress(n + 0); }
59 static inline Address faddress(int n) { return iaddress(n); }
60 static inline Address daddress(int n) { return laddress(n); }
61 static inline Address aaddress(int n) { return iaddress(n); }
63 static inline Address iaddress(Register r) {
64 return Address(rdi, r, Interpreter::stackElementScale());
65 }
66 static inline Address laddress(Register r) {
67 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
68 }
69 static inline Address haddress(Register r) {
70 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
71 }
73 static inline Address faddress(Register r) { return iaddress(r); }
74 static inline Address daddress(Register r) { return laddress(r); }
75 static inline Address aaddress(Register r) { return iaddress(r); }
77 // expression stack
78 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
79 // data beyond the rsp which is potentially unsafe in an MT environment;
80 // an interrupt may overwrite that data.)
81 static inline Address at_rsp () {
82 return Address(rsp, 0);
83 }
85 // At top of Java expression stack which may be different than rsp(). It
86 // isn't for category 1 objects.
87 static inline Address at_tos () {
88 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
89 return tos;
90 }
92 static inline Address at_tos_p1() {
93 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
94 }
96 static inline Address at_tos_p2() {
97 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
98 }
100 // Condition conversion
101 static Assembler::Condition j_not(TemplateTable::Condition cc) {
102 switch (cc) {
103 case TemplateTable::equal : return Assembler::notEqual;
104 case TemplateTable::not_equal : return Assembler::equal;
105 case TemplateTable::less : return Assembler::greaterEqual;
106 case TemplateTable::less_equal : return Assembler::greater;
107 case TemplateTable::greater : return Assembler::lessEqual;
108 case TemplateTable::greater_equal: return Assembler::less;
109 }
110 ShouldNotReachHere();
111 return Assembler::zero;
112 }
115 //----------------------------------------------------------------------------------------------------
116 // Miscelaneous helper routines
118 // Store an oop (or NULL) at the address described by obj.
119 // If val == noreg this means store a NULL
121 static void do_oop_store(InterpreterMacroAssembler* _masm,
122 Address obj,
123 Register val,
124 BarrierSet::Name barrier,
125 bool precise) {
126 assert(val == noreg || val == rax, "parameter is just for looks");
127 switch (barrier) {
128 #ifndef SERIALGC
129 case BarrierSet::G1SATBCT:
130 case BarrierSet::G1SATBCTLogging:
131 {
132 // flatten object address if needed
133 // We do it regardless of precise because we need the registers
134 if (obj.index() == noreg && obj.disp() == 0) {
135 if (obj.base() != rdx) {
136 __ movl(rdx, obj.base());
137 }
138 } else {
139 __ leal(rdx, obj);
140 }
141 __ get_thread(rcx);
142 __ save_bcp();
143 __ g1_write_barrier_pre(rdx /* obj */,
144 rbx /* pre_val */,
145 rcx /* thread */,
146 rsi /* tmp */,
147 val != noreg /* tosca_live */,
148 false /* expand_call */);
150 // Do the actual store
151 // noreg means NULL
152 if (val == noreg) {
153 __ movptr(Address(rdx, 0), NULL_WORD);
154 // No post barrier for NULL
155 } else {
156 __ movl(Address(rdx, 0), val);
157 __ g1_write_barrier_post(rdx /* store_adr */,
158 val /* new_val */,
159 rcx /* thread */,
160 rbx /* tmp */,
161 rsi /* tmp2 */);
162 }
163 __ restore_bcp();
165 }
166 break;
167 #endif // SERIALGC
168 case BarrierSet::CardTableModRef:
169 case BarrierSet::CardTableExtension:
170 {
171 if (val == noreg) {
172 __ movptr(obj, NULL_WORD);
173 } else {
174 __ movl(obj, val);
175 // flatten object address if needed
176 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
177 __ store_check(obj.base());
178 } else {
179 __ leal(rdx, obj);
180 __ store_check(rdx);
181 }
182 }
183 }
184 break;
185 case BarrierSet::ModRef:
186 case BarrierSet::Other:
187 if (val == noreg) {
188 __ movptr(obj, NULL_WORD);
189 } else {
190 __ movl(obj, val);
191 }
192 break;
193 default :
194 ShouldNotReachHere();
196 }
197 }
199 Address TemplateTable::at_bcp(int offset) {
200 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
201 return Address(rsi, offset);
202 }
205 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
206 Register scratch,
207 bool load_bc_into_scratch/*=true*/) {
209 if (!RewriteBytecodes) return;
210 // the pair bytecodes have already done the load.
211 if (load_bc_into_scratch) {
212 __ movl(bc, bytecode);
213 }
214 Label patch_done;
215 if (JvmtiExport::can_post_breakpoint()) {
216 Label fast_patch;
217 // if a breakpoint is present we can't rewrite the stream directly
218 __ movzbl(scratch, at_bcp(0));
219 __ cmpl(scratch, Bytecodes::_breakpoint);
220 __ jcc(Assembler::notEqual, fast_patch);
221 __ get_method(scratch);
222 // Let breakpoint table handling rewrite to quicker bytecode
223 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
224 #ifndef ASSERT
225 __ jmpb(patch_done);
226 #else
227 __ jmp(patch_done);
228 #endif
229 __ bind(fast_patch);
230 }
231 #ifdef ASSERT
232 Label okay;
233 __ load_unsigned_byte(scratch, at_bcp(0));
234 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
235 __ jccb(Assembler::equal, okay);
236 __ cmpl(scratch, bc);
237 __ jcc(Assembler::equal, okay);
238 __ stop("patching the wrong bytecode");
239 __ bind(okay);
240 #endif
241 // patch bytecode
242 __ movb(at_bcp(0), bc);
243 __ bind(patch_done);
244 }
246 //----------------------------------------------------------------------------------------------------
247 // Individual instructions
249 void TemplateTable::nop() {
250 transition(vtos, vtos);
251 // nothing to do
252 }
254 void TemplateTable::shouldnotreachhere() {
255 transition(vtos, vtos);
256 __ stop("shouldnotreachhere bytecode");
257 }
261 void TemplateTable::aconst_null() {
262 transition(vtos, atos);
263 __ xorptr(rax, rax);
264 }
267 void TemplateTable::iconst(int value) {
268 transition(vtos, itos);
269 if (value == 0) {
270 __ xorptr(rax, rax);
271 } else {
272 __ movptr(rax, value);
273 }
274 }
277 void TemplateTable::lconst(int value) {
278 transition(vtos, ltos);
279 if (value == 0) {
280 __ xorptr(rax, rax);
281 } else {
282 __ movptr(rax, value);
283 }
284 assert(value >= 0, "check this code");
285 __ xorptr(rdx, rdx);
286 }
289 void TemplateTable::fconst(int value) {
290 transition(vtos, ftos);
291 if (value == 0) { __ fldz();
292 } else if (value == 1) { __ fld1();
293 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
294 } else { ShouldNotReachHere();
295 }
296 }
299 void TemplateTable::dconst(int value) {
300 transition(vtos, dtos);
301 if (value == 0) { __ fldz();
302 } else if (value == 1) { __ fld1();
303 } else { ShouldNotReachHere();
304 }
305 }
308 void TemplateTable::bipush() {
309 transition(vtos, itos);
310 __ load_signed_byte(rax, at_bcp(1));
311 }
314 void TemplateTable::sipush() {
315 transition(vtos, itos);
316 __ load_unsigned_short(rax, at_bcp(1));
317 __ bswapl(rax);
318 __ sarl(rax, 16);
319 }
321 void TemplateTable::ldc(bool wide) {
322 transition(vtos, vtos);
323 Label call_ldc, notFloat, notClass, Done;
325 if (wide) {
326 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
327 } else {
328 __ load_unsigned_byte(rbx, at_bcp(1));
329 }
330 __ get_cpool_and_tags(rcx, rax);
331 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
332 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
334 // get type
335 __ xorptr(rdx, rdx);
336 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
338 // unresolved string - get the resolved string
339 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
340 __ jccb(Assembler::equal, call_ldc);
342 // unresolved class - get the resolved class
343 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
344 __ jccb(Assembler::equal, call_ldc);
346 // unresolved class in error (resolution failed) - call into runtime
347 // so that the same error from first resolution attempt is thrown.
348 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
349 __ jccb(Assembler::equal, call_ldc);
351 // resolved class - need to call vm to get java mirror of the class
352 __ cmpl(rdx, JVM_CONSTANT_Class);
353 __ jcc(Assembler::notEqual, notClass);
355 __ bind(call_ldc);
356 __ movl(rcx, wide);
357 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
358 __ push(atos);
359 __ jmp(Done);
361 __ bind(notClass);
362 __ cmpl(rdx, JVM_CONSTANT_Float);
363 __ jccb(Assembler::notEqual, notFloat);
364 // ftos
365 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
366 __ push(ftos);
367 __ jmp(Done);
369 __ bind(notFloat);
370 #ifdef ASSERT
371 { Label L;
372 __ cmpl(rdx, JVM_CONSTANT_Integer);
373 __ jcc(Assembler::equal, L);
374 __ cmpl(rdx, JVM_CONSTANT_String);
375 __ jcc(Assembler::equal, L);
376 __ cmpl(rdx, JVM_CONSTANT_Object);
377 __ jcc(Assembler::equal, L);
378 __ stop("unexpected tag type in ldc");
379 __ bind(L);
380 }
381 #endif
382 Label isOop;
383 // atos and itos
384 // Integer is only non-oop type we will see here
385 __ cmpl(rdx, JVM_CONSTANT_Integer);
386 __ jccb(Assembler::notEqual, isOop);
387 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
388 __ push(itos);
389 __ jmp(Done);
390 __ bind(isOop);
391 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
392 __ push(atos);
394 if (VerifyOops) {
395 __ verify_oop(rax);
396 }
397 __ bind(Done);
398 }
400 // Fast path for caching oop constants.
401 // %%% We should use this to handle Class and String constants also.
402 // %%% It will simplify the ldc/primitive path considerably.
403 void TemplateTable::fast_aldc(bool wide) {
404 transition(vtos, atos);
406 if (!EnableInvokeDynamic) {
407 // We should not encounter this bytecode if !EnableInvokeDynamic.
408 // The verifier will stop it. However, if we get past the verifier,
409 // this will stop the thread in a reasonable way, without crashing the JVM.
410 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
411 InterpreterRuntime::throw_IncompatibleClassChangeError));
412 // the call_VM checks for exception, so we should never return here.
413 __ should_not_reach_here();
414 return;
415 }
417 const Register cache = rcx;
418 const Register index = rdx;
420 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
421 if (VerifyOops) {
422 __ verify_oop(rax);
423 }
425 Label L_done, L_throw_exception;
426 const Register con_klass_temp = rcx; // same as Rcache
427 __ load_klass(con_klass_temp, rax);
428 __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
429 __ jcc(Assembler::notEqual, L_done);
430 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
431 __ jcc(Assembler::notEqual, L_throw_exception);
432 __ xorptr(rax, rax);
433 __ jmp(L_done);
435 // Load the exception from the system-array which wraps it:
436 __ bind(L_throw_exception);
437 __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
438 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
440 __ bind(L_done);
441 }
443 void TemplateTable::ldc2_w() {
444 transition(vtos, vtos);
445 Label Long, Done;
446 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
448 __ get_cpool_and_tags(rcx, rax);
449 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
450 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
452 // get type
453 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
454 __ jccb(Assembler::notEqual, Long);
455 // dtos
456 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
457 __ push(dtos);
458 __ jmpb(Done);
460 __ bind(Long);
461 // ltos
462 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
463 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
465 __ push(ltos);
467 __ bind(Done);
468 }
471 void TemplateTable::locals_index(Register reg, int offset) {
472 __ load_unsigned_byte(reg, at_bcp(offset));
473 __ negptr(reg);
474 }
477 void TemplateTable::iload() {
478 transition(vtos, itos);
479 if (RewriteFrequentPairs) {
480 Label rewrite, done;
482 // get next byte
483 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
484 // if _iload, wait to rewrite to iload2. We only want to rewrite the
485 // last two iloads in a pair. Comparing against fast_iload means that
486 // the next bytecode is neither an iload or a caload, and therefore
487 // an iload pair.
488 __ cmpl(rbx, Bytecodes::_iload);
489 __ jcc(Assembler::equal, done);
491 __ cmpl(rbx, Bytecodes::_fast_iload);
492 __ movl(rcx, Bytecodes::_fast_iload2);
493 __ jccb(Assembler::equal, rewrite);
495 // if _caload, rewrite to fast_icaload
496 __ cmpl(rbx, Bytecodes::_caload);
497 __ movl(rcx, Bytecodes::_fast_icaload);
498 __ jccb(Assembler::equal, rewrite);
500 // rewrite so iload doesn't check again.
501 __ movl(rcx, Bytecodes::_fast_iload);
503 // rewrite
504 // rcx: fast bytecode
505 __ bind(rewrite);
506 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
507 __ bind(done);
508 }
510 // Get the local value into tos
511 locals_index(rbx);
512 __ movl(rax, iaddress(rbx));
513 }
516 void TemplateTable::fast_iload2() {
517 transition(vtos, itos);
518 locals_index(rbx);
519 __ movl(rax, iaddress(rbx));
520 __ push(itos);
521 locals_index(rbx, 3);
522 __ movl(rax, iaddress(rbx));
523 }
525 void TemplateTable::fast_iload() {
526 transition(vtos, itos);
527 locals_index(rbx);
528 __ movl(rax, iaddress(rbx));
529 }
532 void TemplateTable::lload() {
533 transition(vtos, ltos);
534 locals_index(rbx);
535 __ movptr(rax, laddress(rbx));
536 NOT_LP64(__ movl(rdx, haddress(rbx)));
537 }
540 void TemplateTable::fload() {
541 transition(vtos, ftos);
542 locals_index(rbx);
543 __ fld_s(faddress(rbx));
544 }
547 void TemplateTable::dload() {
548 transition(vtos, dtos);
549 locals_index(rbx);
550 __ fld_d(daddress(rbx));
551 }
554 void TemplateTable::aload() {
555 transition(vtos, atos);
556 locals_index(rbx);
557 __ movptr(rax, aaddress(rbx));
558 }
561 void TemplateTable::locals_index_wide(Register reg) {
562 __ movl(reg, at_bcp(2));
563 __ bswapl(reg);
564 __ shrl(reg, 16);
565 __ negptr(reg);
566 }
569 void TemplateTable::wide_iload() {
570 transition(vtos, itos);
571 locals_index_wide(rbx);
572 __ movl(rax, iaddress(rbx));
573 }
576 void TemplateTable::wide_lload() {
577 transition(vtos, ltos);
578 locals_index_wide(rbx);
579 __ movptr(rax, laddress(rbx));
580 NOT_LP64(__ movl(rdx, haddress(rbx)));
581 }
584 void TemplateTable::wide_fload() {
585 transition(vtos, ftos);
586 locals_index_wide(rbx);
587 __ fld_s(faddress(rbx));
588 }
591 void TemplateTable::wide_dload() {
592 transition(vtos, dtos);
593 locals_index_wide(rbx);
594 __ fld_d(daddress(rbx));
595 }
598 void TemplateTable::wide_aload() {
599 transition(vtos, atos);
600 locals_index_wide(rbx);
601 __ movptr(rax, aaddress(rbx));
602 }
604 void TemplateTable::index_check(Register array, Register index) {
605 // Pop ptr into array
606 __ pop_ptr(array);
607 index_check_without_pop(array, index);
608 }
610 void TemplateTable::index_check_without_pop(Register array, Register index) {
611 // destroys rbx,
612 // check array
613 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
614 LP64_ONLY(__ movslq(index, index));
615 // check index
616 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
617 if (index != rbx) {
618 // ??? convention: move aberrant index into rbx, for exception message
619 assert(rbx != array, "different registers");
620 __ mov(rbx, index);
621 }
622 __ jump_cc(Assembler::aboveEqual,
623 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
624 }
627 void TemplateTable::iaload() {
628 transition(itos, itos);
629 // rdx: array
630 index_check(rdx, rax); // kills rbx,
631 // rax,: index
632 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
633 }
636 void TemplateTable::laload() {
637 transition(itos, ltos);
638 // rax,: index
639 // rdx: array
640 index_check(rdx, rax);
641 __ mov(rbx, rax);
642 // rbx,: index
643 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
644 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
645 }
648 void TemplateTable::faload() {
649 transition(itos, ftos);
650 // rdx: array
651 index_check(rdx, rax); // kills rbx,
652 // rax,: index
653 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
654 }
657 void TemplateTable::daload() {
658 transition(itos, dtos);
659 // rdx: array
660 index_check(rdx, rax); // kills rbx,
661 // rax,: index
662 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
663 }
666 void TemplateTable::aaload() {
667 transition(itos, atos);
668 // rdx: array
669 index_check(rdx, rax); // kills rbx,
670 // rax,: index
671 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
672 }
675 void TemplateTable::baload() {
676 transition(itos, itos);
677 // rdx: array
678 index_check(rdx, rax); // kills rbx,
679 // rax,: index
680 // can do better code for P5 - fix this at some point
681 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
682 __ mov(rax, rbx);
683 }
686 void TemplateTable::caload() {
687 transition(itos, itos);
688 // rdx: array
689 index_check(rdx, rax); // kills rbx,
690 // rax,: index
691 // can do better code for P5 - may want to improve this at some point
692 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
693 __ mov(rax, rbx);
694 }
696 // iload followed by caload frequent pair
697 void TemplateTable::fast_icaload() {
698 transition(vtos, itos);
699 // load index out of locals
700 locals_index(rbx);
701 __ movl(rax, iaddress(rbx));
703 // rdx: array
704 index_check(rdx, rax);
705 // rax,: index
706 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
707 __ mov(rax, rbx);
708 }
710 void TemplateTable::saload() {
711 transition(itos, itos);
712 // rdx: array
713 index_check(rdx, rax); // kills rbx,
714 // rax,: index
715 // can do better code for P5 - may want to improve this at some point
716 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
717 __ mov(rax, rbx);
718 }
721 void TemplateTable::iload(int n) {
722 transition(vtos, itos);
723 __ movl(rax, iaddress(n));
724 }
727 void TemplateTable::lload(int n) {
728 transition(vtos, ltos);
729 __ movptr(rax, laddress(n));
730 NOT_LP64(__ movptr(rdx, haddress(n)));
731 }
734 void TemplateTable::fload(int n) {
735 transition(vtos, ftos);
736 __ fld_s(faddress(n));
737 }
740 void TemplateTable::dload(int n) {
741 transition(vtos, dtos);
742 __ fld_d(daddress(n));
743 }
746 void TemplateTable::aload(int n) {
747 transition(vtos, atos);
748 __ movptr(rax, aaddress(n));
749 }
752 void TemplateTable::aload_0() {
753 transition(vtos, atos);
754 // According to bytecode histograms, the pairs:
755 //
756 // _aload_0, _fast_igetfield
757 // _aload_0, _fast_agetfield
758 // _aload_0, _fast_fgetfield
759 //
760 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
761 // bytecode checks if the next bytecode is either _fast_igetfield,
762 // _fast_agetfield or _fast_fgetfield and then rewrites the
763 // current bytecode into a pair bytecode; otherwise it rewrites the current
764 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
765 //
766 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
767 // otherwise we may miss an opportunity for a pair.
768 //
769 // Also rewrite frequent pairs
770 // aload_0, aload_1
771 // aload_0, iload_1
772 // These bytecodes with a small amount of code are most profitable to rewrite
773 if (RewriteFrequentPairs) {
774 Label rewrite, done;
775 // get next byte
776 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
778 // do actual aload_0
779 aload(0);
781 // if _getfield then wait with rewrite
782 __ cmpl(rbx, Bytecodes::_getfield);
783 __ jcc(Assembler::equal, done);
785 // if _igetfield then reqrite to _fast_iaccess_0
786 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
787 __ cmpl(rbx, Bytecodes::_fast_igetfield);
788 __ movl(rcx, Bytecodes::_fast_iaccess_0);
789 __ jccb(Assembler::equal, rewrite);
791 // if _agetfield then reqrite to _fast_aaccess_0
792 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
793 __ cmpl(rbx, Bytecodes::_fast_agetfield);
794 __ movl(rcx, Bytecodes::_fast_aaccess_0);
795 __ jccb(Assembler::equal, rewrite);
797 // if _fgetfield then reqrite to _fast_faccess_0
798 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
799 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
800 __ movl(rcx, Bytecodes::_fast_faccess_0);
801 __ jccb(Assembler::equal, rewrite);
803 // else rewrite to _fast_aload0
804 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
805 __ movl(rcx, Bytecodes::_fast_aload_0);
807 // rewrite
808 // rcx: fast bytecode
809 __ bind(rewrite);
810 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
812 __ bind(done);
813 } else {
814 aload(0);
815 }
816 }
818 void TemplateTable::istore() {
819 transition(itos, vtos);
820 locals_index(rbx);
821 __ movl(iaddress(rbx), rax);
822 }
825 void TemplateTable::lstore() {
826 transition(ltos, vtos);
827 locals_index(rbx);
828 __ movptr(laddress(rbx), rax);
829 NOT_LP64(__ movptr(haddress(rbx), rdx));
830 }
833 void TemplateTable::fstore() {
834 transition(ftos, vtos);
835 locals_index(rbx);
836 __ fstp_s(faddress(rbx));
837 }
840 void TemplateTable::dstore() {
841 transition(dtos, vtos);
842 locals_index(rbx);
843 __ fstp_d(daddress(rbx));
844 }
847 void TemplateTable::astore() {
848 transition(vtos, vtos);
849 __ pop_ptr(rax);
850 locals_index(rbx);
851 __ movptr(aaddress(rbx), rax);
852 }
855 void TemplateTable::wide_istore() {
856 transition(vtos, vtos);
857 __ pop_i(rax);
858 locals_index_wide(rbx);
859 __ movl(iaddress(rbx), rax);
860 }
863 void TemplateTable::wide_lstore() {
864 transition(vtos, vtos);
865 __ pop_l(rax, rdx);
866 locals_index_wide(rbx);
867 __ movptr(laddress(rbx), rax);
868 NOT_LP64(__ movl(haddress(rbx), rdx));
869 }
872 void TemplateTable::wide_fstore() {
873 wide_istore();
874 }
877 void TemplateTable::wide_dstore() {
878 wide_lstore();
879 }
882 void TemplateTable::wide_astore() {
883 transition(vtos, vtos);
884 __ pop_ptr(rax);
885 locals_index_wide(rbx);
886 __ movptr(aaddress(rbx), rax);
887 }
890 void TemplateTable::iastore() {
891 transition(itos, vtos);
892 __ pop_i(rbx);
893 // rax,: value
894 // rdx: array
895 index_check(rdx, rbx); // prefer index in rbx,
896 // rbx,: index
897 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
898 }
901 void TemplateTable::lastore() {
902 transition(ltos, vtos);
903 __ pop_i(rbx);
904 // rax,: low(value)
905 // rcx: array
906 // rdx: high(value)
907 index_check(rcx, rbx); // prefer index in rbx,
908 // rbx,: index
909 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
910 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
911 }
914 void TemplateTable::fastore() {
915 transition(ftos, vtos);
916 __ pop_i(rbx);
917 // rdx: array
918 // st0: value
919 index_check(rdx, rbx); // prefer index in rbx,
920 // rbx,: index
921 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
922 }
925 void TemplateTable::dastore() {
926 transition(dtos, vtos);
927 __ pop_i(rbx);
928 // rdx: array
929 // st0: value
930 index_check(rdx, rbx); // prefer index in rbx,
931 // rbx,: index
932 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
933 }
936 void TemplateTable::aastore() {
937 Label is_null, ok_is_subtype, done;
938 transition(vtos, vtos);
939 // stack: ..., array, index, value
940 __ movptr(rax, at_tos()); // Value
941 __ movl(rcx, at_tos_p1()); // Index
942 __ movptr(rdx, at_tos_p2()); // Array
944 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
945 index_check_without_pop(rdx, rcx); // kills rbx,
946 // do array store check - check for NULL value first
947 __ testptr(rax, rax);
948 __ jcc(Assembler::zero, is_null);
950 // Move subklass into EBX
951 __ load_klass(rbx, rax);
952 // Move superklass into EAX
953 __ load_klass(rax, rdx);
954 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
955 // Compress array+index*wordSize+12 into a single register. Frees ECX.
956 __ lea(rdx, element_address);
958 // Generate subtype check. Blows ECX. Resets EDI to locals.
959 // Superklass in EAX. Subklass in EBX.
960 __ gen_subtype_check( rbx, ok_is_subtype );
962 // Come here on failure
963 // object is at TOS
964 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
966 // Come here on success
967 __ bind(ok_is_subtype);
969 // Get the value to store
970 __ movptr(rax, at_rsp());
971 // and store it with appropriate barrier
972 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
974 __ jmp(done);
976 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
977 __ bind(is_null);
978 __ profile_null_seen(rbx);
980 // Store NULL, (noreg means NULL to do_oop_store)
981 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
983 // Pop stack arguments
984 __ bind(done);
985 __ addptr(rsp, 3 * Interpreter::stackElementSize);
986 }
989 void TemplateTable::bastore() {
990 transition(itos, vtos);
991 __ pop_i(rbx);
992 // rax,: value
993 // rdx: array
994 index_check(rdx, rbx); // prefer index in rbx,
995 // rbx,: index
996 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
997 }
1000 void TemplateTable::castore() {
1001 transition(itos, vtos);
1002 __ pop_i(rbx);
1003 // rax,: value
1004 // rdx: array
1005 index_check(rdx, rbx); // prefer index in rbx,
1006 // rbx,: index
1007 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1008 }
1011 void TemplateTable::sastore() {
1012 castore();
1013 }
1016 void TemplateTable::istore(int n) {
1017 transition(itos, vtos);
1018 __ movl(iaddress(n), rax);
1019 }
1022 void TemplateTable::lstore(int n) {
1023 transition(ltos, vtos);
1024 __ movptr(laddress(n), rax);
1025 NOT_LP64(__ movptr(haddress(n), rdx));
1026 }
1029 void TemplateTable::fstore(int n) {
1030 transition(ftos, vtos);
1031 __ fstp_s(faddress(n));
1032 }
1035 void TemplateTable::dstore(int n) {
1036 transition(dtos, vtos);
1037 __ fstp_d(daddress(n));
1038 }
1041 void TemplateTable::astore(int n) {
1042 transition(vtos, vtos);
1043 __ pop_ptr(rax);
1044 __ movptr(aaddress(n), rax);
1045 }
1048 void TemplateTable::pop() {
1049 transition(vtos, vtos);
1050 __ addptr(rsp, Interpreter::stackElementSize);
1051 }
1054 void TemplateTable::pop2() {
1055 transition(vtos, vtos);
1056 __ addptr(rsp, 2*Interpreter::stackElementSize);
1057 }
1060 void TemplateTable::dup() {
1061 transition(vtos, vtos);
1062 // stack: ..., a
1063 __ load_ptr(0, rax);
1064 __ push_ptr(rax);
1065 // stack: ..., a, a
1066 }
1069 void TemplateTable::dup_x1() {
1070 transition(vtos, vtos);
1071 // stack: ..., a, b
1072 __ load_ptr( 0, rax); // load b
1073 __ load_ptr( 1, rcx); // load a
1074 __ store_ptr(1, rax); // store b
1075 __ store_ptr(0, rcx); // store a
1076 __ push_ptr(rax); // push b
1077 // stack: ..., b, a, b
1078 }
1081 void TemplateTable::dup_x2() {
1082 transition(vtos, vtos);
1083 // stack: ..., a, b, c
1084 __ load_ptr( 0, rax); // load c
1085 __ load_ptr( 2, rcx); // load a
1086 __ store_ptr(2, rax); // store c in a
1087 __ push_ptr(rax); // push c
1088 // stack: ..., c, b, c, c
1089 __ load_ptr( 2, rax); // load b
1090 __ store_ptr(2, rcx); // store a in b
1091 // stack: ..., c, a, c, c
1092 __ store_ptr(1, rax); // store b in c
1093 // stack: ..., c, a, b, c
1094 }
1097 void TemplateTable::dup2() {
1098 transition(vtos, vtos);
1099 // stack: ..., a, b
1100 __ load_ptr(1, rax); // load a
1101 __ push_ptr(rax); // push a
1102 __ load_ptr(1, rax); // load b
1103 __ push_ptr(rax); // push b
1104 // stack: ..., a, b, a, b
1105 }
1108 void TemplateTable::dup2_x1() {
1109 transition(vtos, vtos);
1110 // stack: ..., a, b, c
1111 __ load_ptr( 0, rcx); // load c
1112 __ load_ptr( 1, rax); // load b
1113 __ push_ptr(rax); // push b
1114 __ push_ptr(rcx); // push c
1115 // stack: ..., a, b, c, b, c
1116 __ store_ptr(3, rcx); // store c in b
1117 // stack: ..., a, c, c, b, c
1118 __ load_ptr( 4, rcx); // load a
1119 __ store_ptr(2, rcx); // store a in 2nd c
1120 // stack: ..., a, c, a, b, c
1121 __ store_ptr(4, rax); // store b in a
1122 // stack: ..., b, c, a, b, c
1123 // stack: ..., b, c, a, b, c
1124 }
1127 void TemplateTable::dup2_x2() {
1128 transition(vtos, vtos);
1129 // stack: ..., a, b, c, d
1130 __ load_ptr( 0, rcx); // load d
1131 __ load_ptr( 1, rax); // load c
1132 __ push_ptr(rax); // push c
1133 __ push_ptr(rcx); // push d
1134 // stack: ..., a, b, c, d, c, d
1135 __ load_ptr( 4, rax); // load b
1136 __ store_ptr(2, rax); // store b in d
1137 __ store_ptr(4, rcx); // store d in b
1138 // stack: ..., a, d, c, b, c, d
1139 __ load_ptr( 5, rcx); // load a
1140 __ load_ptr( 3, rax); // load c
1141 __ store_ptr(3, rcx); // store a in c
1142 __ store_ptr(5, rax); // store c in a
1143 // stack: ..., c, d, a, b, c, d
1144 // stack: ..., c, d, a, b, c, d
1145 }
1148 void TemplateTable::swap() {
1149 transition(vtos, vtos);
1150 // stack: ..., a, b
1151 __ load_ptr( 1, rcx); // load a
1152 __ load_ptr( 0, rax); // load b
1153 __ store_ptr(0, rcx); // store a in b
1154 __ store_ptr(1, rax); // store b in a
1155 // stack: ..., b, a
1156 }
1159 void TemplateTable::iop2(Operation op) {
1160 transition(itos, itos);
1161 switch (op) {
1162 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1163 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1164 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1165 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1166 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1167 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1168 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1169 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1170 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1171 default : ShouldNotReachHere();
1172 }
1173 }
1176 void TemplateTable::lop2(Operation op) {
1177 transition(ltos, ltos);
1178 __ pop_l(rbx, rcx);
1179 switch (op) {
1180 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1181 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1182 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1183 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1184 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1185 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1186 default : ShouldNotReachHere();
1187 }
1188 }
1191 void TemplateTable::idiv() {
1192 transition(itos, itos);
1193 __ mov(rcx, rax);
1194 __ pop_i(rax);
1195 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1196 // they are not equal, one could do a normal division (no correction
1197 // needed), which may speed up this implementation for the common case.
1198 // (see also JVM spec., p.243 & p.271)
1199 __ corrected_idivl(rcx);
1200 }
1203 void TemplateTable::irem() {
1204 transition(itos, itos);
1205 __ mov(rcx, rax);
1206 __ pop_i(rax);
1207 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1208 // they are not equal, one could do a normal division (no correction
1209 // needed), which may speed up this implementation for the common case.
1210 // (see also JVM spec., p.243 & p.271)
1211 __ corrected_idivl(rcx);
1212 __ mov(rax, rdx);
1213 }
1216 void TemplateTable::lmul() {
1217 transition(ltos, ltos);
1218 __ pop_l(rbx, rcx);
1219 __ push(rcx); __ push(rbx);
1220 __ push(rdx); __ push(rax);
1221 __ lmul(2 * wordSize, 0);
1222 __ addptr(rsp, 4 * wordSize); // take off temporaries
1223 }
1226 void TemplateTable::ldiv() {
1227 transition(ltos, ltos);
1228 __ pop_l(rbx, rcx);
1229 __ push(rcx); __ push(rbx);
1230 __ push(rdx); __ push(rax);
1231 // check if y = 0
1232 __ orl(rax, rdx);
1233 __ jump_cc(Assembler::zero,
1234 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1235 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1236 __ addptr(rsp, 4 * wordSize); // take off temporaries
1237 }
1240 void TemplateTable::lrem() {
1241 transition(ltos, ltos);
1242 __ pop_l(rbx, rcx);
1243 __ push(rcx); __ push(rbx);
1244 __ push(rdx); __ push(rax);
1245 // check if y = 0
1246 __ orl(rax, rdx);
1247 __ jump_cc(Assembler::zero,
1248 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1249 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1250 __ addptr(rsp, 4 * wordSize);
1251 }
1254 void TemplateTable::lshl() {
1255 transition(itos, ltos);
1256 __ movl(rcx, rax); // get shift count
1257 __ pop_l(rax, rdx); // get shift value
1258 __ lshl(rdx, rax);
1259 }
1262 void TemplateTable::lshr() {
1263 transition(itos, ltos);
1264 __ mov(rcx, rax); // get shift count
1265 __ pop_l(rax, rdx); // get shift value
1266 __ lshr(rdx, rax, true);
1267 }
1270 void TemplateTable::lushr() {
1271 transition(itos, ltos);
1272 __ mov(rcx, rax); // get shift count
1273 __ pop_l(rax, rdx); // get shift value
1274 __ lshr(rdx, rax);
1275 }
1278 void TemplateTable::fop2(Operation op) {
1279 transition(ftos, ftos);
1280 switch (op) {
1281 case add: __ fadd_s (at_rsp()); break;
1282 case sub: __ fsubr_s(at_rsp()); break;
1283 case mul: __ fmul_s (at_rsp()); break;
1284 case div: __ fdivr_s(at_rsp()); break;
1285 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1286 default : ShouldNotReachHere();
1287 }
1288 __ f2ieee();
1289 __ pop(rax); // pop float thing off
1290 }
1293 void TemplateTable::dop2(Operation op) {
1294 transition(dtos, dtos);
1296 switch (op) {
1297 case add: __ fadd_d (at_rsp()); break;
1298 case sub: __ fsubr_d(at_rsp()); break;
1299 case mul: {
1300 Label L_strict;
1301 Label L_join;
1302 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1303 __ get_method(rcx);
1304 __ movl(rcx, access_flags);
1305 __ testl(rcx, JVM_ACC_STRICT);
1306 __ jccb(Assembler::notZero, L_strict);
1307 __ fmul_d (at_rsp());
1308 __ jmpb(L_join);
1309 __ bind(L_strict);
1310 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1311 __ fmulp();
1312 __ fmul_d (at_rsp());
1313 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1314 __ fmulp();
1315 __ bind(L_join);
1316 break;
1317 }
1318 case div: {
1319 Label L_strict;
1320 Label L_join;
1321 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1322 __ get_method(rcx);
1323 __ movl(rcx, access_flags);
1324 __ testl(rcx, JVM_ACC_STRICT);
1325 __ jccb(Assembler::notZero, L_strict);
1326 __ fdivr_d(at_rsp());
1327 __ jmp(L_join);
1328 __ bind(L_strict);
1329 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1330 __ fmul_d (at_rsp());
1331 __ fdivrp();
1332 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1333 __ fmulp();
1334 __ bind(L_join);
1335 break;
1336 }
1337 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1338 default : ShouldNotReachHere();
1339 }
1340 __ d2ieee();
1341 // Pop double precision number from rsp.
1342 __ pop(rax);
1343 __ pop(rdx);
1344 }
1347 void TemplateTable::ineg() {
1348 transition(itos, itos);
1349 __ negl(rax);
1350 }
1353 void TemplateTable::lneg() {
1354 transition(ltos, ltos);
1355 __ lneg(rdx, rax);
1356 }
1359 void TemplateTable::fneg() {
1360 transition(ftos, ftos);
1361 __ fchs();
1362 }
1365 void TemplateTable::dneg() {
1366 transition(dtos, dtos);
1367 __ fchs();
1368 }
1371 void TemplateTable::iinc() {
1372 transition(vtos, vtos);
1373 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1374 locals_index(rbx);
1375 __ addl(iaddress(rbx), rdx);
1376 }
1379 void TemplateTable::wide_iinc() {
1380 transition(vtos, vtos);
1381 __ movl(rdx, at_bcp(4)); // get constant
1382 locals_index_wide(rbx);
1383 __ bswapl(rdx); // swap bytes & sign-extend constant
1384 __ sarl(rdx, 16);
1385 __ addl(iaddress(rbx), rdx);
1386 // Note: should probably use only one movl to get both
1387 // the index and the constant -> fix this
1388 }
1391 void TemplateTable::convert() {
1392 // Checking
1393 #ifdef ASSERT
1394 { TosState tos_in = ilgl;
1395 TosState tos_out = ilgl;
1396 switch (bytecode()) {
1397 case Bytecodes::_i2l: // fall through
1398 case Bytecodes::_i2f: // fall through
1399 case Bytecodes::_i2d: // fall through
1400 case Bytecodes::_i2b: // fall through
1401 case Bytecodes::_i2c: // fall through
1402 case Bytecodes::_i2s: tos_in = itos; break;
1403 case Bytecodes::_l2i: // fall through
1404 case Bytecodes::_l2f: // fall through
1405 case Bytecodes::_l2d: tos_in = ltos; break;
1406 case Bytecodes::_f2i: // fall through
1407 case Bytecodes::_f2l: // fall through
1408 case Bytecodes::_f2d: tos_in = ftos; break;
1409 case Bytecodes::_d2i: // fall through
1410 case Bytecodes::_d2l: // fall through
1411 case Bytecodes::_d2f: tos_in = dtos; break;
1412 default : ShouldNotReachHere();
1413 }
1414 switch (bytecode()) {
1415 case Bytecodes::_l2i: // fall through
1416 case Bytecodes::_f2i: // fall through
1417 case Bytecodes::_d2i: // fall through
1418 case Bytecodes::_i2b: // fall through
1419 case Bytecodes::_i2c: // fall through
1420 case Bytecodes::_i2s: tos_out = itos; break;
1421 case Bytecodes::_i2l: // fall through
1422 case Bytecodes::_f2l: // fall through
1423 case Bytecodes::_d2l: tos_out = ltos; break;
1424 case Bytecodes::_i2f: // fall through
1425 case Bytecodes::_l2f: // fall through
1426 case Bytecodes::_d2f: tos_out = ftos; break;
1427 case Bytecodes::_i2d: // fall through
1428 case Bytecodes::_l2d: // fall through
1429 case Bytecodes::_f2d: tos_out = dtos; break;
1430 default : ShouldNotReachHere();
1431 }
1432 transition(tos_in, tos_out);
1433 }
1434 #endif // ASSERT
1436 // Conversion
1437 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1438 switch (bytecode()) {
1439 case Bytecodes::_i2l:
1440 __ extend_sign(rdx, rax);
1441 break;
1442 case Bytecodes::_i2f:
1443 __ push(rax); // store int on tos
1444 __ fild_s(at_rsp()); // load int to ST0
1445 __ f2ieee(); // truncate to float size
1446 __ pop(rcx); // adjust rsp
1447 break;
1448 case Bytecodes::_i2d:
1449 __ push(rax); // add one slot for d2ieee()
1450 __ push(rax); // store int on tos
1451 __ fild_s(at_rsp()); // load int to ST0
1452 __ d2ieee(); // truncate to double size
1453 __ pop(rcx); // adjust rsp
1454 __ pop(rcx);
1455 break;
1456 case Bytecodes::_i2b:
1457 __ shll(rax, 24); // truncate upper 24 bits
1458 __ sarl(rax, 24); // and sign-extend byte
1459 LP64_ONLY(__ movsbl(rax, rax));
1460 break;
1461 case Bytecodes::_i2c:
1462 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1463 LP64_ONLY(__ movzwl(rax, rax));
1464 break;
1465 case Bytecodes::_i2s:
1466 __ shll(rax, 16); // truncate upper 16 bits
1467 __ sarl(rax, 16); // and sign-extend short
1468 LP64_ONLY(__ movswl(rax, rax));
1469 break;
1470 case Bytecodes::_l2i:
1471 /* nothing to do */
1472 break;
1473 case Bytecodes::_l2f:
1474 __ push(rdx); // store long on tos
1475 __ push(rax);
1476 __ fild_d(at_rsp()); // load long to ST0
1477 __ f2ieee(); // truncate to float size
1478 __ pop(rcx); // adjust rsp
1479 __ pop(rcx);
1480 break;
1481 case Bytecodes::_l2d:
1482 __ push(rdx); // store long on tos
1483 __ push(rax);
1484 __ fild_d(at_rsp()); // load long to ST0
1485 __ d2ieee(); // truncate to double size
1486 __ pop(rcx); // adjust rsp
1487 __ pop(rcx);
1488 break;
1489 case Bytecodes::_f2i:
1490 __ push(rcx); // reserve space for argument
1491 __ fstp_s(at_rsp()); // pass float argument on stack
1492 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1493 break;
1494 case Bytecodes::_f2l:
1495 __ push(rcx); // reserve space for argument
1496 __ fstp_s(at_rsp()); // pass float argument on stack
1497 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1498 break;
1499 case Bytecodes::_f2d:
1500 /* nothing to do */
1501 break;
1502 case Bytecodes::_d2i:
1503 __ push(rcx); // reserve space for argument
1504 __ push(rcx);
1505 __ fstp_d(at_rsp()); // pass double argument on stack
1506 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1507 break;
1508 case Bytecodes::_d2l:
1509 __ push(rcx); // reserve space for argument
1510 __ push(rcx);
1511 __ fstp_d(at_rsp()); // pass double argument on stack
1512 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1513 break;
1514 case Bytecodes::_d2f:
1515 __ push(rcx); // reserve space for f2ieee()
1516 __ f2ieee(); // truncate to float size
1517 __ pop(rcx); // adjust rsp
1518 break;
1519 default :
1520 ShouldNotReachHere();
1521 }
1522 }
1525 void TemplateTable::lcmp() {
1526 transition(ltos, itos);
1527 // y = rdx:rax
1528 __ pop_l(rbx, rcx); // get x = rcx:rbx
1529 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1530 __ mov(rax, rcx);
1531 }
1534 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1535 if (is_float) {
1536 __ fld_s(at_rsp());
1537 } else {
1538 __ fld_d(at_rsp());
1539 __ pop(rdx);
1540 }
1541 __ pop(rcx);
1542 __ fcmp2int(rax, unordered_result < 0);
1543 }
1546 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1547 __ get_method(rcx); // ECX holds method
1548 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1550 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1551 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1552 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1554 // Load up EDX with the branch displacement
1555 __ movl(rdx, at_bcp(1));
1556 __ bswapl(rdx);
1557 if (!is_wide) __ sarl(rdx, 16);
1558 LP64_ONLY(__ movslq(rdx, rdx));
1561 // Handle all the JSR stuff here, then exit.
1562 // It's much shorter and cleaner than intermingling with the
1563 // non-JSR normal-branch stuff occurring below.
1564 if (is_jsr) {
1565 // Pre-load the next target bytecode into EBX
1566 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1568 // compute return address as bci in rax,
1569 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1570 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1571 // Adjust the bcp in RSI by the displacement in EDX
1572 __ addptr(rsi, rdx);
1573 // Push return address
1574 __ push_i(rax);
1575 // jsr returns vtos
1576 __ dispatch_only_noverify(vtos);
1577 return;
1578 }
1580 // Normal (non-jsr) branch handling
1582 // Adjust the bcp in RSI by the displacement in EDX
1583 __ addptr(rsi, rdx);
1585 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1586 Label backedge_counter_overflow;
1587 Label profile_method;
1588 Label dispatch;
1589 if (UseLoopCounter) {
1590 // increment backedge counter for backward branches
1591 // rax,: MDO
1592 // rbx,: MDO bumped taken-count
1593 // rcx: method
1594 // rdx: target offset
1595 // rsi: target bcp
1596 // rdi: locals pointer
1597 __ testl(rdx, rdx); // check if forward or backward branch
1598 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1600 if (TieredCompilation) {
1601 Label no_mdo;
1602 int increment = InvocationCounter::count_increment;
1603 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1604 if (ProfileInterpreter) {
1605 // Are we profiling?
1606 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1607 __ testptr(rbx, rbx);
1608 __ jccb(Assembler::zero, no_mdo);
1609 // Increment the MDO backedge counter
1610 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1611 in_bytes(InvocationCounter::counter_offset()));
1612 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1613 rax, false, Assembler::zero, &backedge_counter_overflow);
1614 __ jmp(dispatch);
1615 }
1616 __ bind(no_mdo);
1617 // Increment backedge counter in methodOop
1618 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1619 rax, false, Assembler::zero, &backedge_counter_overflow);
1620 } else {
1621 // increment counter
1622 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1623 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1624 __ movl(Address(rcx, be_offset), rax); // store counter
1626 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1627 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1628 __ addl(rax, Address(rcx, be_offset)); // add both counters
1630 if (ProfileInterpreter) {
1631 // Test to see if we should create a method data oop
1632 __ cmp32(rax,
1633 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1634 __ jcc(Assembler::less, dispatch);
1636 // if no method data exists, go to profile method
1637 __ test_method_data_pointer(rax, profile_method);
1639 if (UseOnStackReplacement) {
1640 // check for overflow against rbx, which is the MDO taken count
1641 __ cmp32(rbx,
1642 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1643 __ jcc(Assembler::below, dispatch);
1645 // When ProfileInterpreter is on, the backedge_count comes from the
1646 // methodDataOop, which value does not get reset on the call to
1647 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1648 // routine while the method is being compiled, add a second test to make
1649 // sure the overflow function is called only once every overflow_frequency.
1650 const int overflow_frequency = 1024;
1651 __ andptr(rbx, overflow_frequency-1);
1652 __ jcc(Assembler::zero, backedge_counter_overflow);
1653 }
1654 } else {
1655 if (UseOnStackReplacement) {
1656 // check for overflow against rax, which is the sum of the counters
1657 __ cmp32(rax,
1658 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1659 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1661 }
1662 }
1663 }
1664 __ bind(dispatch);
1665 }
1667 // Pre-load the next target bytecode into EBX
1668 __ load_unsigned_byte(rbx, Address(rsi, 0));
1670 // continue with the bytecode @ target
1671 // rax,: return bci for jsr's, unused otherwise
1672 // rbx,: target bytecode
1673 // rsi: target bcp
1674 __ dispatch_only(vtos);
1676 if (UseLoopCounter) {
1677 if (ProfileInterpreter) {
1678 // Out-of-line code to allocate method data oop.
1679 __ bind(profile_method);
1680 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1681 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1682 __ set_method_data_pointer_for_bcp();
1683 __ jmp(dispatch);
1684 }
1686 if (UseOnStackReplacement) {
1688 // invocation counter overflow
1689 __ bind(backedge_counter_overflow);
1690 __ negptr(rdx);
1691 __ addptr(rdx, rsi); // branch bcp
1692 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1693 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1695 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1696 // rbx,: target bytecode
1697 // rdx: scratch
1698 // rdi: locals pointer
1699 // rsi: bcp
1700 __ testptr(rax, rax); // test result
1701 __ jcc(Assembler::zero, dispatch); // no osr if null
1702 // nmethod may have been invalidated (VM may block upon call_VM return)
1703 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1704 __ cmpl(rcx, InvalidOSREntryBci);
1705 __ jcc(Assembler::equal, dispatch);
1707 // We have the address of an on stack replacement routine in rax,
1708 // We need to prepare to execute the OSR method. First we must
1709 // migrate the locals and monitors off of the stack.
1711 __ mov(rbx, rax); // save the nmethod
1713 const Register thread = rcx;
1714 __ get_thread(thread);
1715 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1716 // rax, is OSR buffer, move it to expected parameter location
1717 __ mov(rcx, rax);
1719 // pop the interpreter frame
1720 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1721 __ leave(); // remove frame anchor
1722 __ pop(rdi); // get return address
1723 __ mov(rsp, rdx); // set sp to sender sp
1725 // Align stack pointer for compiled code (note that caller is
1726 // responsible for undoing this fixup by remembering the old SP
1727 // in an rbp,-relative location)
1728 __ andptr(rsp, -(StackAlignmentInBytes));
1730 // push the (possibly adjusted) return address
1731 __ push(rdi);
1733 // and begin the OSR nmethod
1734 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1735 }
1736 }
1737 }
1740 void TemplateTable::if_0cmp(Condition cc) {
1741 transition(itos, vtos);
1742 // assume branch is more often taken than not (loops use backward branches)
1743 Label not_taken;
1744 __ testl(rax, rax);
1745 __ jcc(j_not(cc), not_taken);
1746 branch(false, false);
1747 __ bind(not_taken);
1748 __ profile_not_taken_branch(rax);
1749 }
1752 void TemplateTable::if_icmp(Condition cc) {
1753 transition(itos, vtos);
1754 // assume branch is more often taken than not (loops use backward branches)
1755 Label not_taken;
1756 __ pop_i(rdx);
1757 __ cmpl(rdx, rax);
1758 __ jcc(j_not(cc), not_taken);
1759 branch(false, false);
1760 __ bind(not_taken);
1761 __ profile_not_taken_branch(rax);
1762 }
1765 void TemplateTable::if_nullcmp(Condition cc) {
1766 transition(atos, vtos);
1767 // assume branch is more often taken than not (loops use backward branches)
1768 Label not_taken;
1769 __ testptr(rax, rax);
1770 __ jcc(j_not(cc), not_taken);
1771 branch(false, false);
1772 __ bind(not_taken);
1773 __ profile_not_taken_branch(rax);
1774 }
1777 void TemplateTable::if_acmp(Condition cc) {
1778 transition(atos, vtos);
1779 // assume branch is more often taken than not (loops use backward branches)
1780 Label not_taken;
1781 __ pop_ptr(rdx);
1782 __ cmpptr(rdx, rax);
1783 __ jcc(j_not(cc), not_taken);
1784 branch(false, false);
1785 __ bind(not_taken);
1786 __ profile_not_taken_branch(rax);
1787 }
1790 void TemplateTable::ret() {
1791 transition(vtos, vtos);
1792 locals_index(rbx);
1793 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1794 __ profile_ret(rbx, rcx);
1795 __ get_method(rax);
1796 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1797 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1798 constMethodOopDesc::codes_offset()));
1799 __ dispatch_next(vtos);
1800 }
1803 void TemplateTable::wide_ret() {
1804 transition(vtos, vtos);
1805 locals_index_wide(rbx);
1806 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1807 __ profile_ret(rbx, rcx);
1808 __ get_method(rax);
1809 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1810 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1811 __ dispatch_next(vtos);
1812 }
1815 void TemplateTable::tableswitch() {
1816 Label default_case, continue_execution;
1817 transition(itos, vtos);
1818 // align rsi
1819 __ lea(rbx, at_bcp(wordSize));
1820 __ andptr(rbx, -wordSize);
1821 // load lo & hi
1822 __ movl(rcx, Address(rbx, 1 * wordSize));
1823 __ movl(rdx, Address(rbx, 2 * wordSize));
1824 __ bswapl(rcx);
1825 __ bswapl(rdx);
1826 // check against lo & hi
1827 __ cmpl(rax, rcx);
1828 __ jccb(Assembler::less, default_case);
1829 __ cmpl(rax, rdx);
1830 __ jccb(Assembler::greater, default_case);
1831 // lookup dispatch offset
1832 __ subl(rax, rcx);
1833 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1834 __ profile_switch_case(rax, rbx, rcx);
1835 // continue execution
1836 __ bind(continue_execution);
1837 __ bswapl(rdx);
1838 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1839 __ addptr(rsi, rdx);
1840 __ dispatch_only(vtos);
1841 // handle default
1842 __ bind(default_case);
1843 __ profile_switch_default(rax);
1844 __ movl(rdx, Address(rbx, 0));
1845 __ jmp(continue_execution);
1846 }
1849 void TemplateTable::lookupswitch() {
1850 transition(itos, itos);
1851 __ stop("lookupswitch bytecode should have been rewritten");
1852 }
1855 void TemplateTable::fast_linearswitch() {
1856 transition(itos, vtos);
1857 Label loop_entry, loop, found, continue_execution;
1858 // bswapl rax, so we can avoid bswapping the table entries
1859 __ bswapl(rax);
1860 // align rsi
1861 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1862 __ andptr(rbx, -wordSize);
1863 // set counter
1864 __ movl(rcx, Address(rbx, wordSize));
1865 __ bswapl(rcx);
1866 __ jmpb(loop_entry);
1867 // table search
1868 __ bind(loop);
1869 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1870 __ jccb(Assembler::equal, found);
1871 __ bind(loop_entry);
1872 __ decrementl(rcx);
1873 __ jcc(Assembler::greaterEqual, loop);
1874 // default case
1875 __ profile_switch_default(rax);
1876 __ movl(rdx, Address(rbx, 0));
1877 __ jmpb(continue_execution);
1878 // entry found -> get offset
1879 __ bind(found);
1880 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1881 __ profile_switch_case(rcx, rax, rbx);
1882 // continue execution
1883 __ bind(continue_execution);
1884 __ bswapl(rdx);
1885 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1886 __ addptr(rsi, rdx);
1887 __ dispatch_only(vtos);
1888 }
1891 void TemplateTable::fast_binaryswitch() {
1892 transition(itos, vtos);
1893 // Implementation using the following core algorithm:
1894 //
1895 // int binary_search(int key, LookupswitchPair* array, int n) {
1896 // // Binary search according to "Methodik des Programmierens" by
1897 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1898 // int i = 0;
1899 // int j = n;
1900 // while (i+1 < j) {
1901 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1902 // // with Q: for all i: 0 <= i < n: key < a[i]
1903 // // where a stands for the array and assuming that the (inexisting)
1904 // // element a[n] is infinitely big.
1905 // int h = (i + j) >> 1;
1906 // // i < h < j
1907 // if (key < array[h].fast_match()) {
1908 // j = h;
1909 // } else {
1910 // i = h;
1911 // }
1912 // }
1913 // // R: a[i] <= key < a[i+1] or Q
1914 // // (i.e., if key is within array, i is the correct index)
1915 // return i;
1916 // }
1918 // register allocation
1919 const Register key = rax; // already set (tosca)
1920 const Register array = rbx;
1921 const Register i = rcx;
1922 const Register j = rdx;
1923 const Register h = rdi; // needs to be restored
1924 const Register temp = rsi;
1925 // setup array
1926 __ save_bcp();
1928 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1929 __ andptr(array, -wordSize);
1930 // initialize i & j
1931 __ xorl(i, i); // i = 0;
1932 __ movl(j, Address(array, -wordSize)); // j = length(array);
1933 // Convert j into native byteordering
1934 __ bswapl(j);
1935 // and start
1936 Label entry;
1937 __ jmp(entry);
1939 // binary search loop
1940 { Label loop;
1941 __ bind(loop);
1942 // int h = (i + j) >> 1;
1943 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1944 __ sarl(h, 1); // h = (i + j) >> 1;
1945 // if (key < array[h].fast_match()) {
1946 // j = h;
1947 // } else {
1948 // i = h;
1949 // }
1950 // Convert array[h].match to native byte-ordering before compare
1951 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1952 __ bswapl(temp);
1953 __ cmpl(key, temp);
1954 // j = h if (key < array[h].fast_match())
1955 __ cmov32(Assembler::less , j, h);
1956 // i = h if (key >= array[h].fast_match())
1957 __ cmov32(Assembler::greaterEqual, i, h);
1958 // while (i+1 < j)
1959 __ bind(entry);
1960 __ leal(h, Address(i, 1)); // i+1
1961 __ cmpl(h, j); // i+1 < j
1962 __ jcc(Assembler::less, loop);
1963 }
1965 // end of binary search, result index is i (must check again!)
1966 Label default_case;
1967 // Convert array[i].match to native byte-ordering before compare
1968 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1969 __ bswapl(temp);
1970 __ cmpl(key, temp);
1971 __ jcc(Assembler::notEqual, default_case);
1973 // entry found -> j = offset
1974 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1975 __ profile_switch_case(i, key, array);
1976 __ bswapl(j);
1977 LP64_ONLY(__ movslq(j, j));
1978 __ restore_bcp();
1979 __ restore_locals(); // restore rdi
1980 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1982 __ addptr(rsi, j);
1983 __ dispatch_only(vtos);
1985 // default case -> j = default offset
1986 __ bind(default_case);
1987 __ profile_switch_default(i);
1988 __ movl(j, Address(array, -2*wordSize));
1989 __ bswapl(j);
1990 LP64_ONLY(__ movslq(j, j));
1991 __ restore_bcp();
1992 __ restore_locals(); // restore rdi
1993 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1994 __ addptr(rsi, j);
1995 __ dispatch_only(vtos);
1996 }
1999 void TemplateTable::_return(TosState state) {
2000 transition(state, state);
2001 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2003 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2004 assert(state == vtos, "only valid state");
2005 __ movptr(rax, aaddress(0));
2006 __ load_klass(rdi, rax);
2007 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2008 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2009 Label skip_register_finalizer;
2010 __ jcc(Assembler::zero, skip_register_finalizer);
2012 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2014 __ bind(skip_register_finalizer);
2015 }
2017 __ remove_activation(state, rsi);
2018 __ jmp(rsi);
2019 }
2022 // ----------------------------------------------------------------------------
2023 // Volatile variables demand their effects be made known to all CPU's in
2024 // order. Store buffers on most chips allow reads & writes to reorder; the
2025 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2026 // memory barrier (i.e., it's not sufficient that the interpreter does not
2027 // reorder volatile references, the hardware also must not reorder them).
2028 //
2029 // According to the new Java Memory Model (JMM):
2030 // (1) All volatiles are serialized wrt to each other.
2031 // ALSO reads & writes act as aquire & release, so:
2032 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2033 // the read float up to before the read. It's OK for non-volatile memory refs
2034 // that happen before the volatile read to float down below it.
2035 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2036 // that happen BEFORE the write float down to after the write. It's OK for
2037 // non-volatile memory refs that happen after the volatile write to float up
2038 // before it.
2039 //
2040 // We only put in barriers around volatile refs (they are expensive), not
2041 // _between_ memory refs (that would require us to track the flavor of the
2042 // previous memory refs). Requirements (2) and (3) require some barriers
2043 // before volatile stores and after volatile loads. These nearly cover
2044 // requirement (1) but miss the volatile-store-volatile-load case. This final
2045 // case is placed after volatile-stores although it could just as well go
2046 // before volatile-loads.
2047 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2048 // Helper function to insert a is-volatile test and memory barrier
2049 if( !os::is_MP() ) return; // Not needed on single CPU
2050 __ membar(order_constraint);
2051 }
2053 void TemplateTable::resolve_cache_and_index(int byte_no,
2054 Register result,
2055 Register Rcache,
2056 Register index,
2057 size_t index_size) {
2058 Register temp = rbx;
2060 assert_different_registers(result, Rcache, index, temp);
2062 Label resolved;
2063 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2064 if (byte_no == f1_oop) {
2065 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2066 // This kind of CP cache entry does not need to match the flags byte, because
2067 // there is a 1-1 relation between bytecode type and CP entry type.
2068 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2069 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2070 __ testptr(result, result);
2071 __ jcc(Assembler::notEqual, resolved);
2072 } else {
2073 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2074 assert(result == noreg, ""); //else change code for setting result
2075 const int shift_count = (1 + byte_no)*BitsPerByte;
2076 __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2077 __ shrl(temp, shift_count);
2078 // have we resolved this bytecode?
2079 __ andl(temp, 0xFF);
2080 __ cmpl(temp, (int)bytecode());
2081 __ jcc(Assembler::equal, resolved);
2082 }
2084 // resolve first time through
2085 address entry;
2086 switch (bytecode()) {
2087 case Bytecodes::_getstatic : // fall through
2088 case Bytecodes::_putstatic : // fall through
2089 case Bytecodes::_getfield : // fall through
2090 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2091 case Bytecodes::_invokevirtual : // fall through
2092 case Bytecodes::_invokespecial : // fall through
2093 case Bytecodes::_invokestatic : // fall through
2094 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2095 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2096 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2097 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2098 default : ShouldNotReachHere(); break;
2099 }
2100 __ movl(temp, (int)bytecode());
2101 __ call_VM(noreg, entry, temp);
2102 // Update registers with resolved info
2103 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2104 if (result != noreg)
2105 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2106 __ bind(resolved);
2107 }
2110 // The cache and index registers must be set before call
2111 void TemplateTable::load_field_cp_cache_entry(Register obj,
2112 Register cache,
2113 Register index,
2114 Register off,
2115 Register flags,
2116 bool is_static = false) {
2117 assert_different_registers(cache, index, flags, off);
2119 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2120 // Field offset
2121 __ movptr(off, Address(cache, index, Address::times_ptr,
2122 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2123 // Flags
2124 __ movl(flags, Address(cache, index, Address::times_ptr,
2125 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2127 // klass overwrite register
2128 if (is_static) {
2129 __ movptr(obj, Address(cache, index, Address::times_ptr,
2130 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2131 }
2132 }
2134 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2135 Register method,
2136 Register itable_index,
2137 Register flags,
2138 bool is_invokevirtual,
2139 bool is_invokevfinal /*unused*/,
2140 bool is_invokedynamic) {
2141 // setup registers
2142 const Register cache = rcx;
2143 const Register index = rdx;
2144 assert_different_registers(method, flags);
2145 assert_different_registers(method, cache, index);
2146 assert_different_registers(itable_index, flags);
2147 assert_different_registers(itable_index, cache, index);
2148 // determine constant pool cache field offsets
2149 const int method_offset = in_bytes(
2150 constantPoolCacheOopDesc::base_offset() +
2151 (is_invokevirtual
2152 ? ConstantPoolCacheEntry::f2_offset()
2153 : ConstantPoolCacheEntry::f1_offset()
2154 )
2155 );
2156 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2157 ConstantPoolCacheEntry::flags_offset());
2158 // access constant pool cache fields
2159 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2160 ConstantPoolCacheEntry::f2_offset());
2162 if (byte_no == f1_oop) {
2163 // Resolved f1_oop goes directly into 'method' register.
2164 assert(is_invokedynamic, "");
2165 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2166 } else {
2167 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2168 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2169 }
2170 if (itable_index != noreg) {
2171 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2172 }
2173 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2174 }
2177 // The registers cache and index expected to be set before call.
2178 // Correct values of the cache and index registers are preserved.
2179 void TemplateTable::jvmti_post_field_access(Register cache,
2180 Register index,
2181 bool is_static,
2182 bool has_tos) {
2183 if (JvmtiExport::can_post_field_access()) {
2184 // Check to see if a field access watch has been set before we take
2185 // the time to call into the VM.
2186 Label L1;
2187 assert_different_registers(cache, index, rax);
2188 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2189 __ testl(rax,rax);
2190 __ jcc(Assembler::zero, L1);
2192 // cache entry pointer
2193 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2194 __ shll(index, LogBytesPerWord);
2195 __ addptr(cache, index);
2196 if (is_static) {
2197 __ xorptr(rax, rax); // NULL object reference
2198 } else {
2199 __ pop(atos); // Get the object
2200 __ verify_oop(rax);
2201 __ push(atos); // Restore stack state
2202 }
2203 // rax,: object pointer or NULL
2204 // cache: cache entry pointer
2205 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2206 rax, cache);
2207 __ get_cache_and_index_at_bcp(cache, index, 1);
2208 __ bind(L1);
2209 }
2210 }
2212 void TemplateTable::pop_and_check_object(Register r) {
2213 __ pop_ptr(r);
2214 __ null_check(r); // for field access must check obj.
2215 __ verify_oop(r);
2216 }
2218 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2219 transition(vtos, vtos);
2221 const Register cache = rcx;
2222 const Register index = rdx;
2223 const Register obj = rcx;
2224 const Register off = rbx;
2225 const Register flags = rax;
2227 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2228 jvmti_post_field_access(cache, index, is_static, false);
2229 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2231 if (!is_static) pop_and_check_object(obj);
2233 const Address lo(obj, off, Address::times_1, 0*wordSize);
2234 const Address hi(obj, off, Address::times_1, 1*wordSize);
2236 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2238 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2239 assert(btos == 0, "change code, btos != 0");
2240 // btos
2241 __ andptr(flags, 0x0f);
2242 __ jcc(Assembler::notZero, notByte);
2244 __ load_signed_byte(rax, lo );
2245 __ push(btos);
2246 // Rewrite bytecode to be faster
2247 if (!is_static) {
2248 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2249 }
2250 __ jmp(Done);
2252 __ bind(notByte);
2253 // itos
2254 __ cmpl(flags, itos );
2255 __ jcc(Assembler::notEqual, notInt);
2257 __ movl(rax, lo );
2258 __ push(itos);
2259 // Rewrite bytecode to be faster
2260 if (!is_static) {
2261 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2262 }
2263 __ jmp(Done);
2265 __ bind(notInt);
2266 // atos
2267 __ cmpl(flags, atos );
2268 __ jcc(Assembler::notEqual, notObj);
2270 __ movl(rax, lo );
2271 __ push(atos);
2272 if (!is_static) {
2273 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2274 }
2275 __ jmp(Done);
2277 __ bind(notObj);
2278 // ctos
2279 __ cmpl(flags, ctos );
2280 __ jcc(Assembler::notEqual, notChar);
2282 __ load_unsigned_short(rax, lo );
2283 __ push(ctos);
2284 if (!is_static) {
2285 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2286 }
2287 __ jmp(Done);
2289 __ bind(notChar);
2290 // stos
2291 __ cmpl(flags, stos );
2292 __ jcc(Assembler::notEqual, notShort);
2294 __ load_signed_short(rax, lo );
2295 __ push(stos);
2296 if (!is_static) {
2297 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2298 }
2299 __ jmp(Done);
2301 __ bind(notShort);
2302 // ltos
2303 __ cmpl(flags, ltos );
2304 __ jcc(Assembler::notEqual, notLong);
2306 // Generate code as if volatile. There just aren't enough registers to
2307 // save that information and this code is faster than the test.
2308 __ fild_d(lo); // Must load atomically
2309 __ subptr(rsp,2*wordSize); // Make space for store
2310 __ fistp_d(Address(rsp,0));
2311 __ pop(rax);
2312 __ pop(rdx);
2314 __ push(ltos);
2315 // Don't rewrite to _fast_lgetfield for potential volatile case.
2316 __ jmp(Done);
2318 __ bind(notLong);
2319 // ftos
2320 __ cmpl(flags, ftos );
2321 __ jcc(Assembler::notEqual, notFloat);
2323 __ fld_s(lo);
2324 __ push(ftos);
2325 if (!is_static) {
2326 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2327 }
2328 __ jmp(Done);
2330 __ bind(notFloat);
2331 // dtos
2332 __ cmpl(flags, dtos );
2333 __ jcc(Assembler::notEqual, notDouble);
2335 __ fld_d(lo);
2336 __ push(dtos);
2337 if (!is_static) {
2338 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2339 }
2340 __ jmpb(Done);
2342 __ bind(notDouble);
2344 __ stop("Bad state");
2346 __ bind(Done);
2347 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2348 // volatile_barrier( );
2349 }
2352 void TemplateTable::getfield(int byte_no) {
2353 getfield_or_static(byte_no, false);
2354 }
2357 void TemplateTable::getstatic(int byte_no) {
2358 getfield_or_static(byte_no, true);
2359 }
2361 // The registers cache and index expected to be set before call.
2362 // The function may destroy various registers, just not the cache and index registers.
2363 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2365 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2367 if (JvmtiExport::can_post_field_modification()) {
2368 // Check to see if a field modification watch has been set before we take
2369 // the time to call into the VM.
2370 Label L1;
2371 assert_different_registers(cache, index, rax);
2372 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2373 __ testl(rax, rax);
2374 __ jcc(Assembler::zero, L1);
2376 // The cache and index registers have been already set.
2377 // This allows to eliminate this call but the cache and index
2378 // registers have to be correspondingly used after this line.
2379 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2381 if (is_static) {
2382 // Life is simple. Null out the object pointer.
2383 __ xorptr(rbx, rbx);
2384 } else {
2385 // Life is harder. The stack holds the value on top, followed by the object.
2386 // We don't know the size of the value, though; it could be one or two words
2387 // depending on its type. As a result, we must find the type to determine where
2388 // the object is.
2389 Label two_word, valsize_known;
2390 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2391 ConstantPoolCacheEntry::flags_offset())));
2392 __ mov(rbx, rsp);
2393 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2394 // Make sure we don't need to mask rcx for tosBits after the above shift
2395 ConstantPoolCacheEntry::verify_tosBits();
2396 __ cmpl(rcx, ltos);
2397 __ jccb(Assembler::equal, two_word);
2398 __ cmpl(rcx, dtos);
2399 __ jccb(Assembler::equal, two_word);
2400 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2401 __ jmpb(valsize_known);
2403 __ bind(two_word);
2404 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2406 __ bind(valsize_known);
2407 // setup object pointer
2408 __ movptr(rbx, Address(rbx, 0));
2409 }
2410 // cache entry pointer
2411 __ addptr(rax, in_bytes(cp_base_offset));
2412 __ shll(rdx, LogBytesPerWord);
2413 __ addptr(rax, rdx);
2414 // object (tos)
2415 __ mov(rcx, rsp);
2416 // rbx,: object pointer set up above (NULL if static)
2417 // rax,: cache entry pointer
2418 // rcx: jvalue object on the stack
2419 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2420 rbx, rax, rcx);
2421 __ get_cache_and_index_at_bcp(cache, index, 1);
2422 __ bind(L1);
2423 }
2424 }
2427 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2428 transition(vtos, vtos);
2430 const Register cache = rcx;
2431 const Register index = rdx;
2432 const Register obj = rcx;
2433 const Register off = rbx;
2434 const Register flags = rax;
2436 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2437 jvmti_post_field_mod(cache, index, is_static);
2438 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2440 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2441 // volatile_barrier( );
2443 Label notVolatile, Done;
2444 __ movl(rdx, flags);
2445 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2446 __ andl(rdx, 0x1);
2448 // field addresses
2449 const Address lo(obj, off, Address::times_1, 0*wordSize);
2450 const Address hi(obj, off, Address::times_1, 1*wordSize);
2452 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2454 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2455 assert(btos == 0, "change code, btos != 0");
2456 // btos
2457 __ andl(flags, 0x0f);
2458 __ jcc(Assembler::notZero, notByte);
2460 __ pop(btos);
2461 if (!is_static) pop_and_check_object(obj);
2462 __ movb(lo, rax );
2463 if (!is_static) {
2464 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2465 }
2466 __ jmp(Done);
2468 __ bind(notByte);
2469 // itos
2470 __ cmpl(flags, itos );
2471 __ jcc(Assembler::notEqual, notInt);
2473 __ pop(itos);
2474 if (!is_static) pop_and_check_object(obj);
2476 __ movl(lo, rax );
2477 if (!is_static) {
2478 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2479 }
2480 __ jmp(Done);
2482 __ bind(notInt);
2483 // atos
2484 __ cmpl(flags, atos );
2485 __ jcc(Assembler::notEqual, notObj);
2487 __ pop(atos);
2488 if (!is_static) pop_and_check_object(obj);
2490 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2492 if (!is_static) {
2493 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2494 }
2496 __ jmp(Done);
2498 __ bind(notObj);
2499 // ctos
2500 __ cmpl(flags, ctos );
2501 __ jcc(Assembler::notEqual, notChar);
2503 __ pop(ctos);
2504 if (!is_static) pop_and_check_object(obj);
2505 __ movw(lo, rax );
2506 if (!is_static) {
2507 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2508 }
2509 __ jmp(Done);
2511 __ bind(notChar);
2512 // stos
2513 __ cmpl(flags, stos );
2514 __ jcc(Assembler::notEqual, notShort);
2516 __ pop(stos);
2517 if (!is_static) pop_and_check_object(obj);
2518 __ movw(lo, rax );
2519 if (!is_static) {
2520 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2521 }
2522 __ jmp(Done);
2524 __ bind(notShort);
2525 // ltos
2526 __ cmpl(flags, ltos );
2527 __ jcc(Assembler::notEqual, notLong);
2529 Label notVolatileLong;
2530 __ testl(rdx, rdx);
2531 __ jcc(Assembler::zero, notVolatileLong);
2533 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2534 if (!is_static) pop_and_check_object(obj);
2536 // Replace with real volatile test
2537 __ push(rdx);
2538 __ push(rax); // Must update atomically with FIST
2539 __ fild_d(Address(rsp,0)); // So load into FPU register
2540 __ fistp_d(lo); // and put into memory atomically
2541 __ addptr(rsp, 2*wordSize);
2542 // volatile_barrier();
2543 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2544 Assembler::StoreStore));
2545 // Don't rewrite volatile version
2546 __ jmp(notVolatile);
2548 __ bind(notVolatileLong);
2550 __ pop(ltos); // overwrites rdx
2551 if (!is_static) pop_and_check_object(obj);
2552 NOT_LP64(__ movptr(hi, rdx));
2553 __ movptr(lo, rax);
2554 if (!is_static) {
2555 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2556 }
2557 __ jmp(notVolatile);
2559 __ bind(notLong);
2560 // ftos
2561 __ cmpl(flags, ftos );
2562 __ jcc(Assembler::notEqual, notFloat);
2564 __ pop(ftos);
2565 if (!is_static) pop_and_check_object(obj);
2566 __ fstp_s(lo);
2567 if (!is_static) {
2568 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2569 }
2570 __ jmp(Done);
2572 __ bind(notFloat);
2573 // dtos
2574 __ cmpl(flags, dtos );
2575 __ jcc(Assembler::notEqual, notDouble);
2577 __ pop(dtos);
2578 if (!is_static) pop_and_check_object(obj);
2579 __ fstp_d(lo);
2580 if (!is_static) {
2581 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2582 }
2583 __ jmp(Done);
2585 __ bind(notDouble);
2587 __ stop("Bad state");
2589 __ bind(Done);
2591 // Check for volatile store
2592 __ testl(rdx, rdx);
2593 __ jcc(Assembler::zero, notVolatile);
2594 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2595 Assembler::StoreStore));
2596 __ bind(notVolatile);
2597 }
2600 void TemplateTable::putfield(int byte_no) {
2601 putfield_or_static(byte_no, false);
2602 }
2605 void TemplateTable::putstatic(int byte_no) {
2606 putfield_or_static(byte_no, true);
2607 }
2609 void TemplateTable::jvmti_post_fast_field_mod() {
2610 if (JvmtiExport::can_post_field_modification()) {
2611 // Check to see if a field modification watch has been set before we take
2612 // the time to call into the VM.
2613 Label L2;
2614 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2615 __ testl(rcx,rcx);
2616 __ jcc(Assembler::zero, L2);
2617 __ pop_ptr(rbx); // copy the object pointer from tos
2618 __ verify_oop(rbx);
2619 __ push_ptr(rbx); // put the object pointer back on tos
2620 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2621 __ mov(rcx, rsp);
2622 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2623 __ xorptr(rbx, rbx);
2624 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2625 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2626 switch (bytecode()) { // load values into the jvalue object
2627 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2628 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2629 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2630 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2631 case Bytecodes::_fast_lputfield:
2632 NOT_LP64(__ movptr(hi_value, rdx));
2633 __ movptr(lo_value, rax);
2634 break;
2636 // need to call fld_s() after fstp_s() to restore the value for below
2637 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2639 // need to call fld_d() after fstp_d() to restore the value for below
2640 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2642 // since rcx is not an object we don't call store_check() here
2643 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2645 default: ShouldNotReachHere();
2646 }
2647 __ pop_ptr(rbx); // restore copy of object pointer
2649 // Save rax, and sometimes rdx because call_VM() will clobber them,
2650 // then use them for JVM/DI purposes
2651 __ push(rax);
2652 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2653 // access constant pool cache entry
2654 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2655 __ verify_oop(rbx);
2656 // rbx,: object pointer copied above
2657 // rax,: cache entry pointer
2658 // rcx: jvalue object on the stack
2659 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2660 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2661 __ pop(rax); // restore lower value
2662 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2663 __ bind(L2);
2664 }
2665 }
2667 void TemplateTable::fast_storefield(TosState state) {
2668 transition(state, vtos);
2670 ByteSize base = constantPoolCacheOopDesc::base_offset();
2672 jvmti_post_fast_field_mod();
2674 // access constant pool cache
2675 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2677 // test for volatile with rdx but rdx is tos register for lputfield.
2678 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2679 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2680 ConstantPoolCacheEntry::flags_offset())));
2682 // replace index with field offset from cache entry
2683 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2685 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2686 // volatile_barrier( );
2688 Label notVolatile, Done;
2689 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2690 __ andl(rdx, 0x1);
2691 // Check for volatile store
2692 __ testl(rdx, rdx);
2693 __ jcc(Assembler::zero, notVolatile);
2695 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2697 // Get object from stack
2698 pop_and_check_object(rcx);
2700 // field addresses
2701 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2702 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2704 // access field
2705 switch (bytecode()) {
2706 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2707 case Bytecodes::_fast_sputfield: // fall through
2708 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2709 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2710 case Bytecodes::_fast_lputfield:
2711 NOT_LP64(__ movptr(hi, rdx));
2712 __ movptr(lo, rax);
2713 break;
2714 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2715 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2716 case Bytecodes::_fast_aputfield: {
2717 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2718 break;
2719 }
2720 default:
2721 ShouldNotReachHere();
2722 }
2724 Label done;
2725 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2726 Assembler::StoreStore));
2727 // Barriers are so large that short branch doesn't reach!
2728 __ jmp(done);
2730 // Same code as above, but don't need rdx to test for volatile.
2731 __ bind(notVolatile);
2733 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2735 // Get object from stack
2736 pop_and_check_object(rcx);
2738 // access field
2739 switch (bytecode()) {
2740 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2741 case Bytecodes::_fast_sputfield: // fall through
2742 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2743 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2744 case Bytecodes::_fast_lputfield:
2745 NOT_LP64(__ movptr(hi, rdx));
2746 __ movptr(lo, rax);
2747 break;
2748 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2749 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2750 case Bytecodes::_fast_aputfield: {
2751 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2752 break;
2753 }
2754 default:
2755 ShouldNotReachHere();
2756 }
2757 __ bind(done);
2758 }
2761 void TemplateTable::fast_accessfield(TosState state) {
2762 transition(atos, state);
2764 // do the JVMTI work here to avoid disturbing the register state below
2765 if (JvmtiExport::can_post_field_access()) {
2766 // Check to see if a field access watch has been set before we take
2767 // the time to call into the VM.
2768 Label L1;
2769 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2770 __ testl(rcx,rcx);
2771 __ jcc(Assembler::zero, L1);
2772 // access constant pool cache entry
2773 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2774 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2775 __ verify_oop(rax);
2776 // rax,: object pointer copied above
2777 // rcx: cache entry pointer
2778 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2779 __ pop_ptr(rax); // restore object pointer
2780 __ bind(L1);
2781 }
2783 // access constant pool cache
2784 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2785 // replace index with field offset from cache entry
2786 __ movptr(rbx, Address(rcx,
2787 rbx,
2788 Address::times_ptr,
2789 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2792 // rax,: object
2793 __ verify_oop(rax);
2794 __ null_check(rax);
2795 // field addresses
2796 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2797 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2799 // access field
2800 switch (bytecode()) {
2801 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2802 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2803 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2804 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2805 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2806 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2807 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2808 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2809 default:
2810 ShouldNotReachHere();
2811 }
2813 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2814 // volatile_barrier( );
2815 }
2817 void TemplateTable::fast_xaccess(TosState state) {
2818 transition(vtos, state);
2819 // get receiver
2820 __ movptr(rax, aaddress(0));
2821 // access constant pool cache
2822 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2823 __ movptr(rbx, Address(rcx,
2824 rdx,
2825 Address::times_ptr,
2826 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2827 // make sure exception is reported in correct bcp range (getfield is next instruction)
2828 __ increment(rsi);
2829 __ null_check(rax);
2830 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2831 if (state == itos) {
2832 __ movl(rax, lo);
2833 } else if (state == atos) {
2834 __ movptr(rax, lo);
2835 __ verify_oop(rax);
2836 } else if (state == ftos) {
2837 __ fld_s(lo);
2838 } else {
2839 ShouldNotReachHere();
2840 }
2841 __ decrement(rsi);
2842 }
2846 //----------------------------------------------------------------------------------------------------
2847 // Calls
2849 void TemplateTable::count_calls(Register method, Register temp) {
2850 // implemented elsewhere
2851 ShouldNotReachHere();
2852 }
2855 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2856 // determine flags
2857 Bytecodes::Code code = bytecode();
2858 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2859 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2860 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2861 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2862 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2863 const bool receiver_null_check = is_invokespecial;
2864 const bool save_flags = is_invokeinterface || is_invokevirtual;
2865 // setup registers & access constant pool cache
2866 const Register recv = rcx;
2867 const Register flags = rdx;
2868 assert_different_registers(method, index, recv, flags);
2870 // save 'interpreter return address'
2871 __ save_bcp();
2873 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2875 // load receiver if needed (note: no return address pushed yet)
2876 if (load_receiver) {
2877 assert(!is_invokedynamic, "");
2878 __ movl(recv, flags);
2879 __ andl(recv, 0xFF);
2880 // recv count is 0 based?
2881 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2882 __ movptr(recv, recv_addr);
2883 __ verify_oop(recv);
2884 }
2886 // do null check if needed
2887 if (receiver_null_check) {
2888 __ null_check(recv);
2889 }
2891 if (save_flags) {
2892 __ mov(rsi, flags);
2893 }
2895 // compute return type
2896 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2897 // Make sure we don't need to mask flags for tosBits after the above shift
2898 ConstantPoolCacheEntry::verify_tosBits();
2899 // load return address
2900 {
2901 address table_addr;
2902 if (is_invokeinterface || is_invokedynamic)
2903 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2904 else
2905 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2906 ExternalAddress table(table_addr);
2907 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2908 }
2910 // push return address
2911 __ push(flags);
2913 // Restore flag value from the constant pool cache, and restore rsi
2914 // for later null checks. rsi is the bytecode pointer
2915 if (save_flags) {
2916 __ mov(flags, rsi);
2917 __ restore_bcp();
2918 }
2919 }
2922 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2923 Register flags) {
2925 // Uses temporary registers rax, rdx
2926 assert_different_registers(index, recv, rax, rdx);
2928 // Test for an invoke of a final method
2929 Label notFinal;
2930 __ movl(rax, flags);
2931 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2932 __ jcc(Assembler::zero, notFinal);
2934 Register method = index; // method must be rbx,
2935 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2937 // do the call - the index is actually the method to call
2938 __ verify_oop(method);
2940 // It's final, need a null check here!
2941 __ null_check(recv);
2943 // profile this call
2944 __ profile_final_call(rax);
2946 __ jump_from_interpreted(method, rax);
2948 __ bind(notFinal);
2950 // get receiver klass
2951 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2952 // Keep recv in rcx for callee expects it there
2953 __ load_klass(rax, recv);
2954 __ verify_oop(rax);
2956 // profile this call
2957 __ profile_virtual_call(rax, rdi, rdx);
2959 // get target methodOop & entry point
2960 const int base = instanceKlass::vtable_start_offset() * wordSize;
2961 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
2962 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
2963 __ jump_from_interpreted(method, rdx);
2964 }
2967 void TemplateTable::invokevirtual(int byte_no) {
2968 transition(vtos, vtos);
2969 assert(byte_no == f2_byte, "use this argument");
2970 prepare_invoke(rbx, noreg, byte_no);
2972 // rbx,: index
2973 // rcx: receiver
2974 // rdx: flags
2976 invokevirtual_helper(rbx, rcx, rdx);
2977 }
2980 void TemplateTable::invokespecial(int byte_no) {
2981 transition(vtos, vtos);
2982 assert(byte_no == f1_byte, "use this argument");
2983 prepare_invoke(rbx, noreg, byte_no);
2984 // do the call
2985 __ verify_oop(rbx);
2986 __ profile_call(rax);
2987 __ jump_from_interpreted(rbx, rax);
2988 }
2991 void TemplateTable::invokestatic(int byte_no) {
2992 transition(vtos, vtos);
2993 assert(byte_no == f1_byte, "use this argument");
2994 prepare_invoke(rbx, noreg, byte_no);
2995 // do the call
2996 __ verify_oop(rbx);
2997 __ profile_call(rax);
2998 __ jump_from_interpreted(rbx, rax);
2999 }
3002 void TemplateTable::fast_invokevfinal(int byte_no) {
3003 transition(vtos, vtos);
3004 assert(byte_no == f2_byte, "use this argument");
3005 __ stop("fast_invokevfinal not used on x86");
3006 }
3009 void TemplateTable::invokeinterface(int byte_no) {
3010 transition(vtos, vtos);
3011 assert(byte_no == f1_byte, "use this argument");
3012 prepare_invoke(rax, rbx, byte_no);
3014 // rax,: Interface
3015 // rbx,: index
3016 // rcx: receiver
3017 // rdx: flags
3019 // Special case of invokeinterface called for virtual method of
3020 // java.lang.Object. See cpCacheOop.cpp for details.
3021 // This code isn't produced by javac, but could be produced by
3022 // another compliant java compiler.
3023 Label notMethod;
3024 __ movl(rdi, rdx);
3025 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3026 __ jcc(Assembler::zero, notMethod);
3028 invokevirtual_helper(rbx, rcx, rdx);
3029 __ bind(notMethod);
3031 // Get receiver klass into rdx - also a null check
3032 __ restore_locals(); // restore rdi
3033 __ load_klass(rdx, rcx);
3034 __ verify_oop(rdx);
3036 // profile this call
3037 __ profile_virtual_call(rdx, rsi, rdi);
3039 Label no_such_interface, no_such_method;
3041 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3042 rdx, rax, rbx,
3043 // outputs: method, scan temp. reg
3044 rbx, rsi,
3045 no_such_interface);
3047 // rbx,: methodOop to call
3048 // rcx: receiver
3049 // Check for abstract method error
3050 // Note: This should be done more efficiently via a throw_abstract_method_error
3051 // interpreter entry point and a conditional jump to it in case of a null
3052 // method.
3053 __ testptr(rbx, rbx);
3054 __ jcc(Assembler::zero, no_such_method);
3056 // do the call
3057 // rcx: receiver
3058 // rbx,: methodOop
3059 __ jump_from_interpreted(rbx, rdx);
3060 __ should_not_reach_here();
3062 // exception handling code follows...
3063 // note: must restore interpreter registers to canonical
3064 // state for exception handling to work correctly!
3066 __ bind(no_such_method);
3067 // throw exception
3068 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3069 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3070 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3071 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3072 // the call_VM checks for exception, so we should never return here.
3073 __ should_not_reach_here();
3075 __ bind(no_such_interface);
3076 // throw exception
3077 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3078 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3079 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3080 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3081 InterpreterRuntime::throw_IncompatibleClassChangeError));
3082 // the call_VM checks for exception, so we should never return here.
3083 __ should_not_reach_here();
3084 }
3086 void TemplateTable::invokedynamic(int byte_no) {
3087 transition(vtos, vtos);
3088 assert(byte_no == f1_oop, "use this argument");
3090 if (!EnableInvokeDynamic) {
3091 // We should not encounter this bytecode if !EnableInvokeDynamic.
3092 // The verifier will stop it. However, if we get past the verifier,
3093 // this will stop the thread in a reasonable way, without crashing the JVM.
3094 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3095 InterpreterRuntime::throw_IncompatibleClassChangeError));
3096 // the call_VM checks for exception, so we should never return here.
3097 __ should_not_reach_here();
3098 return;
3099 }
3101 prepare_invoke(rax, rbx, byte_no);
3103 // rax: CallSite object (f1)
3104 // rbx: unused (f2)
3105 // rcx: receiver address
3106 // rdx: flags (unused)
3108 Register rax_callsite = rax;
3109 Register rcx_method_handle = rcx;
3111 // %%% should make a type profile for any invokedynamic that takes a ref argument
3112 // profile this call
3113 __ profile_call(rsi);
3115 __ verify_oop(rax_callsite);
3116 __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
3117 __ null_check(rcx_method_handle);
3118 __ verify_oop(rcx_method_handle);
3119 __ prepare_to_jump_from_interpreted();
3120 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
3121 }
3123 //----------------------------------------------------------------------------------------------------
3124 // Allocation
3126 void TemplateTable::_new() {
3127 transition(vtos, atos);
3128 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3129 Label slow_case;
3130 Label slow_case_no_pop;
3131 Label done;
3132 Label initialize_header;
3133 Label initialize_object; // including clearing the fields
3134 Label allocate_shared;
3136 __ get_cpool_and_tags(rcx, rax);
3138 // Make sure the class we're about to instantiate has been resolved.
3139 // This is done before loading instanceKlass to be consistent with the order
3140 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3141 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3142 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3143 __ jcc(Assembler::notEqual, slow_case_no_pop);
3145 // get instanceKlass
3146 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3147 __ push(rcx); // save the contexts of klass for initializing the header
3149 // make sure klass is initialized & doesn't have finalizer
3150 // make sure klass is fully initialized
3151 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3152 __ jcc(Assembler::notEqual, slow_case);
3154 // get instance_size in instanceKlass (scaled to a count of bytes)
3155 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3156 // test to see if it has a finalizer or is malformed in some way
3157 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3158 __ jcc(Assembler::notZero, slow_case);
3160 //
3161 // Allocate the instance
3162 // 1) Try to allocate in the TLAB
3163 // 2) if fail and the object is large allocate in the shared Eden
3164 // 3) if the above fails (or is not applicable), go to a slow case
3165 // (creates a new TLAB, etc.)
3167 const bool allow_shared_alloc =
3168 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3170 const Register thread = rcx;
3171 if (UseTLAB || allow_shared_alloc) {
3172 __ get_thread(thread);
3173 }
3175 if (UseTLAB) {
3176 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3177 __ lea(rbx, Address(rax, rdx, Address::times_1));
3178 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3179 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3180 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3181 if (ZeroTLAB) {
3182 // the fields have been already cleared
3183 __ jmp(initialize_header);
3184 } else {
3185 // initialize both the header and fields
3186 __ jmp(initialize_object);
3187 }
3188 }
3190 // Allocation in the shared Eden, if allowed.
3191 //
3192 // rdx: instance size in bytes
3193 if (allow_shared_alloc) {
3194 __ bind(allocate_shared);
3196 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3198 Label retry;
3199 __ bind(retry);
3200 __ movptr(rax, heap_top);
3201 __ lea(rbx, Address(rax, rdx, Address::times_1));
3202 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3203 __ jcc(Assembler::above, slow_case);
3205 // Compare rax, with the top addr, and if still equal, store the new
3206 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3207 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3208 //
3209 // rax,: object begin
3210 // rbx,: object end
3211 // rdx: instance size in bytes
3212 __ locked_cmpxchgptr(rbx, heap_top);
3214 // if someone beat us on the allocation, try again, otherwise continue
3215 __ jcc(Assembler::notEqual, retry);
3217 __ incr_allocated_bytes(thread, rdx, 0);
3218 }
3220 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3221 // The object is initialized before the header. If the object size is
3222 // zero, go directly to the header initialization.
3223 __ bind(initialize_object);
3224 __ decrement(rdx, sizeof(oopDesc));
3225 __ jcc(Assembler::zero, initialize_header);
3227 // Initialize topmost object field, divide rdx by 8, check if odd and
3228 // test if zero.
3229 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3230 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3232 // rdx must have been multiple of 8
3233 #ifdef ASSERT
3234 // make sure rdx was multiple of 8
3235 Label L;
3236 // Ignore partial flag stall after shrl() since it is debug VM
3237 __ jccb(Assembler::carryClear, L);
3238 __ stop("object size is not multiple of 2 - adjust this code");
3239 __ bind(L);
3240 // rdx must be > 0, no extra check needed here
3241 #endif
3243 // initialize remaining object fields: rdx was a multiple of 8
3244 { Label loop;
3245 __ bind(loop);
3246 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3247 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3248 __ decrement(rdx);
3249 __ jcc(Assembler::notZero, loop);
3250 }
3252 // initialize object header only.
3253 __ bind(initialize_header);
3254 if (UseBiasedLocking) {
3255 __ pop(rcx); // get saved klass back in the register.
3256 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3257 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3258 } else {
3259 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3260 (int32_t)markOopDesc::prototype()); // header
3261 __ pop(rcx); // get saved klass back in the register.
3262 }
3263 __ store_klass(rax, rcx); // klass
3265 {
3266 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3267 // Trigger dtrace event for fastpath
3268 __ push(atos);
3269 __ call_VM_leaf(
3270 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3271 __ pop(atos);
3272 }
3274 __ jmp(done);
3275 }
3277 // slow case
3278 __ bind(slow_case);
3279 __ pop(rcx); // restore stack pointer to what it was when we came in.
3280 __ bind(slow_case_no_pop);
3281 __ get_constant_pool(rax);
3282 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3283 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3285 // continue
3286 __ bind(done);
3287 }
3290 void TemplateTable::newarray() {
3291 transition(itos, atos);
3292 __ push_i(rax); // make sure everything is on the stack
3293 __ load_unsigned_byte(rdx, at_bcp(1));
3294 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3295 __ pop_i(rdx); // discard size
3296 }
3299 void TemplateTable::anewarray() {
3300 transition(itos, atos);
3301 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3302 __ get_constant_pool(rcx);
3303 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3304 }
3307 void TemplateTable::arraylength() {
3308 transition(atos, itos);
3309 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3310 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3311 }
3314 void TemplateTable::checkcast() {
3315 transition(atos, atos);
3316 Label done, is_null, ok_is_subtype, quicked, resolved;
3317 __ testptr(rax, rax); // Object is in EAX
3318 __ jcc(Assembler::zero, is_null);
3320 // Get cpool & tags index
3321 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3322 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3323 // See if bytecode has already been quicked
3324 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3325 __ jcc(Assembler::equal, quicked);
3327 __ push(atos);
3328 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3329 __ pop_ptr(rdx);
3330 __ jmpb(resolved);
3332 // Get superklass in EAX and subklass in EBX
3333 __ bind(quicked);
3334 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3335 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3337 __ bind(resolved);
3338 __ load_klass(rbx, rdx);
3340 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3341 // Superklass in EAX. Subklass in EBX.
3342 __ gen_subtype_check( rbx, ok_is_subtype );
3344 // Come here on failure
3345 __ push(rdx);
3346 // object is at TOS
3347 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3349 // Come here on success
3350 __ bind(ok_is_subtype);
3351 __ mov(rax,rdx); // Restore object in EDX
3353 // Collect counts on whether this check-cast sees NULLs a lot or not.
3354 if (ProfileInterpreter) {
3355 __ jmp(done);
3356 __ bind(is_null);
3357 __ profile_null_seen(rcx);
3358 } else {
3359 __ bind(is_null); // same as 'done'
3360 }
3361 __ bind(done);
3362 }
3365 void TemplateTable::instanceof() {
3366 transition(atos, itos);
3367 Label done, is_null, ok_is_subtype, quicked, resolved;
3368 __ testptr(rax, rax);
3369 __ jcc(Assembler::zero, is_null);
3371 // Get cpool & tags index
3372 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3373 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3374 // See if bytecode has already been quicked
3375 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3376 __ jcc(Assembler::equal, quicked);
3378 __ push(atos);
3379 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3380 __ pop_ptr(rdx);
3381 __ load_klass(rdx, rdx);
3382 __ jmp(resolved);
3384 // Get superklass in EAX and subklass in EDX
3385 __ bind(quicked);
3386 __ load_klass(rdx, rax);
3387 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3389 __ bind(resolved);
3391 // Generate subtype check. Blows ECX. Resets EDI.
3392 // Superklass in EAX. Subklass in EDX.
3393 __ gen_subtype_check( rdx, ok_is_subtype );
3395 // Come here on failure
3396 __ xorl(rax,rax);
3397 __ jmpb(done);
3398 // Come here on success
3399 __ bind(ok_is_subtype);
3400 __ movl(rax, 1);
3402 // Collect counts on whether this test sees NULLs a lot or not.
3403 if (ProfileInterpreter) {
3404 __ jmp(done);
3405 __ bind(is_null);
3406 __ profile_null_seen(rcx);
3407 } else {
3408 __ bind(is_null); // same as 'done'
3409 }
3410 __ bind(done);
3411 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3412 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3413 }
3416 //----------------------------------------------------------------------------------------------------
3417 // Breakpoints
3418 void TemplateTable::_breakpoint() {
3420 // Note: We get here even if we are single stepping..
3421 // jbug inists on setting breakpoints at every bytecode
3422 // even if we are in single step mode.
3424 transition(vtos, vtos);
3426 // get the unpatched byte code
3427 __ get_method(rcx);
3428 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3429 __ mov(rbx, rax);
3431 // post the breakpoint event
3432 __ get_method(rcx);
3433 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3435 // complete the execution of original bytecode
3436 __ dispatch_only_normal(vtos);
3437 }
3440 //----------------------------------------------------------------------------------------------------
3441 // Exceptions
3443 void TemplateTable::athrow() {
3444 transition(atos, vtos);
3445 __ null_check(rax);
3446 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3447 }
3450 //----------------------------------------------------------------------------------------------------
3451 // Synchronization
3452 //
3453 // Note: monitorenter & exit are symmetric routines; which is reflected
3454 // in the assembly code structure as well
3455 //
3456 // Stack layout:
3457 //
3458 // [expressions ] <--- rsp = expression stack top
3459 // ..
3460 // [expressions ]
3461 // [monitor entry] <--- monitor block top = expression stack bot
3462 // ..
3463 // [monitor entry]
3464 // [frame data ] <--- monitor block bot
3465 // ...
3466 // [saved rbp, ] <--- rbp,
3469 void TemplateTable::monitorenter() {
3470 transition(atos, vtos);
3472 // check for NULL object
3473 __ null_check(rax);
3475 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3476 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3477 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3478 Label allocated;
3480 // initialize entry pointer
3481 __ xorl(rdx, rdx); // points to free slot or NULL
3483 // find a free slot in the monitor block (result in rdx)
3484 { Label entry, loop, exit;
3485 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3487 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3488 __ jmpb(entry);
3490 __ bind(loop);
3491 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3492 __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3493 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3494 __ jccb(Assembler::equal, exit); // if same object then stop searching
3495 __ addptr(rcx, entry_size); // otherwise advance to next entry
3496 __ bind(entry);
3497 __ cmpptr(rcx, rbx); // check if bottom reached
3498 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3499 __ bind(exit);
3500 }
3502 __ testptr(rdx, rdx); // check if a slot has been found
3503 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3505 // allocate one if there's no free slot
3506 { Label entry, loop;
3507 // 1. compute new pointers // rsp: old expression stack top
3508 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3509 __ subptr(rsp, entry_size); // move expression stack top
3510 __ subptr(rdx, entry_size); // move expression stack bottom
3511 __ mov(rcx, rsp); // set start value for copy loop
3512 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3513 __ jmp(entry);
3514 // 2. move expression stack contents
3515 __ bind(loop);
3516 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3517 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3518 __ addptr(rcx, wordSize); // advance to next word
3519 __ bind(entry);
3520 __ cmpptr(rcx, rdx); // check if bottom reached
3521 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3522 }
3524 // call run-time routine
3525 // rdx: points to monitor entry
3526 __ bind(allocated);
3528 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3529 // The object has already been poped from the stack, so the expression stack looks correct.
3530 __ increment(rsi);
3532 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3533 __ lock_object(rdx);
3535 // check to make sure this monitor doesn't cause stack overflow after locking
3536 __ save_bcp(); // in case of exception
3537 __ generate_stack_overflow_check(0);
3539 // The bcp has already been incremented. Just need to dispatch to next instruction.
3540 __ dispatch_next(vtos);
3541 }
3544 void TemplateTable::monitorexit() {
3545 transition(atos, vtos);
3547 // check for NULL object
3548 __ null_check(rax);
3550 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3551 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3552 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3553 Label found;
3555 // find matching slot
3556 { Label entry, loop;
3557 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3558 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3559 __ jmpb(entry);
3561 __ bind(loop);
3562 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3563 __ jcc(Assembler::equal, found); // if same object then stop searching
3564 __ addptr(rdx, entry_size); // otherwise advance to next entry
3565 __ bind(entry);
3566 __ cmpptr(rdx, rbx); // check if bottom reached
3567 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3568 }
3570 // error handling. Unlocking was not block-structured
3571 Label end;
3572 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3573 __ should_not_reach_here();
3575 // call run-time routine
3576 // rcx: points to monitor entry
3577 __ bind(found);
3578 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3579 __ unlock_object(rdx);
3580 __ pop_ptr(rax); // discard object
3581 __ bind(end);
3582 }
3585 //----------------------------------------------------------------------------------------------------
3586 // Wide instructions
3588 void TemplateTable::wide() {
3589 transition(vtos, vtos);
3590 __ load_unsigned_byte(rbx, at_bcp(1));
3591 ExternalAddress wtable((address)Interpreter::_wentry_point);
3592 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3593 // Note: the rsi increment step is part of the individual wide bytecode implementations
3594 }
3597 //----------------------------------------------------------------------------------------------------
3598 // Multi arrays
3600 void TemplateTable::multianewarray() {
3601 transition(vtos, atos);
3602 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3603 // last dim is on top of stack; we want address of first one:
3604 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3605 // the latter wordSize to point to the beginning of the array.
3606 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3607 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3608 __ load_unsigned_byte(rbx, at_bcp(3));
3609 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3610 }
3612 #endif /* !CC_INTERP */