Thu, 15 Aug 2013 20:04:10 -0400
8003424: Enable Class Data Sharing for CompressedOops
8016729: ObjectAlignmentInBytes=16 now forces the use of heap based compressed oops
8005933: The -Xshare:auto option is ignored for -server
Summary: Move klass metaspace above the heap and support CDS with compressed klass ptrs.
Reviewed-by: coleenp, kvn, mgerdin, tschatzl, stefank
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "gc_interface/collectedHeap.hpp"
36 #include "memory/barrierSet.hpp"
37 #include "memory/cardTableModRefBS.hpp"
38 #include "nativeInst_x86.hpp"
39 #include "oops/objArrayKlass.hpp"
40 #include "runtime/sharedRuntime.hpp"
43 // These masks are used to provide 128-bit aligned bitmasks to the XMM
44 // instructions, to allow sign-masking or sign-bit flipping. They allow
45 // fast versions of NegF/NegD and AbsF/AbsD.
47 // Note: 'double' and 'long long' have 32-bits alignment on x86.
48 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
49 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
50 // of 128-bits operands for SSE instructions.
51 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
52 // Store the value to a 128-bits operand.
53 operand[0] = lo;
54 operand[1] = hi;
55 return operand;
56 }
58 // Buffer for 128-bits masks used by SSE instructions.
59 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
61 // Static initialization during VM startup.
62 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
63 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
64 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
65 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
69 NEEDS_CLEANUP // remove this definitions ?
70 const Register IC_Klass = rax; // where the IC klass is cached
71 const Register SYNC_header = rax; // synchronization header
72 const Register SHIFT_count = rcx; // where count for shift operations must be
74 #define __ _masm->
77 static void select_different_registers(Register preserve,
78 Register extra,
79 Register &tmp1,
80 Register &tmp2) {
81 if (tmp1 == preserve) {
82 assert_different_registers(tmp1, tmp2, extra);
83 tmp1 = extra;
84 } else if (tmp2 == preserve) {
85 assert_different_registers(tmp1, tmp2, extra);
86 tmp2 = extra;
87 }
88 assert_different_registers(preserve, tmp1, tmp2);
89 }
93 static void select_different_registers(Register preserve,
94 Register extra,
95 Register &tmp1,
96 Register &tmp2,
97 Register &tmp3) {
98 if (tmp1 == preserve) {
99 assert_different_registers(tmp1, tmp2, tmp3, extra);
100 tmp1 = extra;
101 } else if (tmp2 == preserve) {
102 assert_different_registers(tmp1, tmp2, tmp3, extra);
103 tmp2 = extra;
104 } else if (tmp3 == preserve) {
105 assert_different_registers(tmp1, tmp2, tmp3, extra);
106 tmp3 = extra;
107 }
108 assert_different_registers(preserve, tmp1, tmp2, tmp3);
109 }
113 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
114 if (opr->is_constant()) {
115 LIR_Const* constant = opr->as_constant_ptr();
116 switch (constant->type()) {
117 case T_INT: {
118 return true;
119 }
121 default:
122 return false;
123 }
124 }
125 return false;
126 }
129 LIR_Opr LIR_Assembler::receiverOpr() {
130 return FrameMap::receiver_opr;
131 }
133 LIR_Opr LIR_Assembler::osrBufferPointer() {
134 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
135 }
137 //--------------fpu register translations-----------------------
140 address LIR_Assembler::float_constant(float f) {
141 address const_addr = __ float_constant(f);
142 if (const_addr == NULL) {
143 bailout("const section overflow");
144 return __ code()->consts()->start();
145 } else {
146 return const_addr;
147 }
148 }
151 address LIR_Assembler::double_constant(double d) {
152 address const_addr = __ double_constant(d);
153 if (const_addr == NULL) {
154 bailout("const section overflow");
155 return __ code()->consts()->start();
156 } else {
157 return const_addr;
158 }
159 }
162 void LIR_Assembler::set_24bit_FPU() {
163 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
164 }
166 void LIR_Assembler::reset_FPU() {
167 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
168 }
170 void LIR_Assembler::fpop() {
171 __ fpop();
172 }
174 void LIR_Assembler::fxch(int i) {
175 __ fxch(i);
176 }
178 void LIR_Assembler::fld(int i) {
179 __ fld_s(i);
180 }
182 void LIR_Assembler::ffree(int i) {
183 __ ffree(i);
184 }
186 void LIR_Assembler::breakpoint() {
187 __ int3();
188 }
190 void LIR_Assembler::push(LIR_Opr opr) {
191 if (opr->is_single_cpu()) {
192 __ push_reg(opr->as_register());
193 } else if (opr->is_double_cpu()) {
194 NOT_LP64(__ push_reg(opr->as_register_hi()));
195 __ push_reg(opr->as_register_lo());
196 } else if (opr->is_stack()) {
197 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
198 } else if (opr->is_constant()) {
199 LIR_Const* const_opr = opr->as_constant_ptr();
200 if (const_opr->type() == T_OBJECT) {
201 __ push_oop(const_opr->as_jobject());
202 } else if (const_opr->type() == T_INT) {
203 __ push_jint(const_opr->as_jint());
204 } else {
205 ShouldNotReachHere();
206 }
208 } else {
209 ShouldNotReachHere();
210 }
211 }
213 void LIR_Assembler::pop(LIR_Opr opr) {
214 if (opr->is_single_cpu()) {
215 __ pop_reg(opr->as_register());
216 } else {
217 ShouldNotReachHere();
218 }
219 }
221 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
222 return addr->base()->is_illegal() && addr->index()->is_illegal();
223 }
225 //-------------------------------------------
227 Address LIR_Assembler::as_Address(LIR_Address* addr) {
228 return as_Address(addr, rscratch1);
229 }
231 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
232 if (addr->base()->is_illegal()) {
233 assert(addr->index()->is_illegal(), "must be illegal too");
234 AddressLiteral laddr((address)addr->disp(), relocInfo::none);
235 if (! __ reachable(laddr)) {
236 __ movptr(tmp, laddr.addr());
237 Address res(tmp, 0);
238 return res;
239 } else {
240 return __ as_Address(laddr);
241 }
242 }
244 Register base = addr->base()->as_pointer_register();
246 if (addr->index()->is_illegal()) {
247 return Address( base, addr->disp());
248 } else if (addr->index()->is_cpu_register()) {
249 Register index = addr->index()->as_pointer_register();
250 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
251 } else if (addr->index()->is_constant()) {
252 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
253 assert(Assembler::is_simm32(addr_offset), "must be");
255 return Address(base, addr_offset);
256 } else {
257 Unimplemented();
258 return Address();
259 }
260 }
263 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
264 Address base = as_Address(addr);
265 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
266 }
269 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
270 return as_Address(addr);
271 }
274 void LIR_Assembler::osr_entry() {
275 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
276 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
277 ValueStack* entry_state = osr_entry->state();
278 int number_of_locks = entry_state->locks_size();
280 // we jump here if osr happens with the interpreter
281 // state set up to continue at the beginning of the
282 // loop that triggered osr - in particular, we have
283 // the following registers setup:
284 //
285 // rcx: osr buffer
286 //
288 // build frame
289 ciMethod* m = compilation()->method();
290 __ build_frame(initial_frame_size_in_bytes());
292 // OSR buffer is
293 //
294 // locals[nlocals-1..0]
295 // monitors[0..number_of_locks]
296 //
297 // locals is a direct copy of the interpreter frame so in the osr buffer
298 // so first slot in the local array is the last local from the interpreter
299 // and last slot is local[0] (receiver) from the interpreter
300 //
301 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
302 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
303 // in the interpreter frame (the method lock if a sync method)
305 // Initialize monitors in the compiled activation.
306 // rcx: pointer to osr buffer
307 //
308 // All other registers are dead at this point and the locals will be
309 // copied into place by code emitted in the IR.
311 Register OSR_buf = osrBufferPointer()->as_pointer_register();
312 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
313 int monitor_offset = BytesPerWord * method()->max_locals() +
314 (2 * BytesPerWord) * (number_of_locks - 1);
315 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
316 // the OSR buffer using 2 word entries: first the lock and then
317 // the oop.
318 for (int i = 0; i < number_of_locks; i++) {
319 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
320 #ifdef ASSERT
321 // verify the interpreter's monitor has a non-null object
322 {
323 Label L;
324 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
325 __ jcc(Assembler::notZero, L);
326 __ stop("locked object is NULL");
327 __ bind(L);
328 }
329 #endif
330 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
331 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
332 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
333 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
334 }
335 }
336 }
339 // inline cache check; done before the frame is built.
340 int LIR_Assembler::check_icache() {
341 Register receiver = FrameMap::receiver_opr->as_register();
342 Register ic_klass = IC_Klass;
343 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
344 const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
345 if (!do_post_padding) {
346 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
347 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
348 __ nop();
349 }
350 }
351 int offset = __ offset();
352 __ inline_cache_check(receiver, IC_Klass);
353 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
354 if (do_post_padding) {
355 // force alignment after the cache check.
356 // It's been verified to be aligned if !VerifyOops
357 __ align(CodeEntryAlignment);
358 }
359 return offset;
360 }
363 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
364 jobject o = NULL;
365 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
366 __ movoop(reg, o);
367 patching_epilog(patch, lir_patch_normal, reg, info);
368 }
370 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
371 Metadata* o = NULL;
372 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
373 __ mov_metadata(reg, o);
374 patching_epilog(patch, lir_patch_normal, reg, info);
375 }
377 // This specifies the rsp decrement needed to build the frame
378 int LIR_Assembler::initial_frame_size_in_bytes() {
379 // if rounding, must let FrameMap know!
381 // The frame_map records size in slots (32bit word)
383 // subtract two words to account for return address and link
384 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
385 }
388 int LIR_Assembler::emit_exception_handler() {
389 // if the last instruction is a call (typically to do a throw which
390 // is coming at the end after block reordering) the return address
391 // must still point into the code area in order to avoid assertion
392 // failures when searching for the corresponding bci => add a nop
393 // (was bug 5/14/1999 - gri)
394 __ nop();
396 // generate code for exception handler
397 address handler_base = __ start_a_stub(exception_handler_size);
398 if (handler_base == NULL) {
399 // not enough space left for the handler
400 bailout("exception handler overflow");
401 return -1;
402 }
404 int offset = code_offset();
406 // the exception oop and pc are in rax, and rdx
407 // no other registers need to be preserved, so invalidate them
408 __ invalidate_registers(false, true, true, false, true, true);
410 // check that there is really an exception
411 __ verify_not_null_oop(rax);
413 // search an exception handler (rax: exception oop, rdx: throwing pc)
414 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
415 __ should_not_reach_here();
416 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
417 __ end_a_stub();
419 return offset;
420 }
423 // Emit the code to remove the frame from the stack in the exception
424 // unwind path.
425 int LIR_Assembler::emit_unwind_handler() {
426 #ifndef PRODUCT
427 if (CommentedAssembly) {
428 _masm->block_comment("Unwind handler");
429 }
430 #endif
432 int offset = code_offset();
434 // Fetch the exception from TLS and clear out exception related thread state
435 __ get_thread(rsi);
436 __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
437 __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
438 __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
440 __ bind(_unwind_handler_entry);
441 __ verify_not_null_oop(rax);
442 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
443 __ mov(rsi, rax); // Preserve the exception
444 }
446 // Preform needed unlocking
447 MonitorExitStub* stub = NULL;
448 if (method()->is_synchronized()) {
449 monitor_address(0, FrameMap::rax_opr);
450 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
451 __ unlock_object(rdi, rbx, rax, *stub->entry());
452 __ bind(*stub->continuation());
453 }
455 if (compilation()->env()->dtrace_method_probes()) {
456 __ get_thread(rax);
457 __ movptr(Address(rsp, 0), rax);
458 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
459 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
460 }
462 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
463 __ mov(rax, rsi); // Restore the exception
464 }
466 // remove the activation and dispatch to the unwind handler
467 __ remove_frame(initial_frame_size_in_bytes());
468 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
470 // Emit the slow path assembly
471 if (stub != NULL) {
472 stub->emit_code(this);
473 }
475 return offset;
476 }
479 int LIR_Assembler::emit_deopt_handler() {
480 // if the last instruction is a call (typically to do a throw which
481 // is coming at the end after block reordering) the return address
482 // must still point into the code area in order to avoid assertion
483 // failures when searching for the corresponding bci => add a nop
484 // (was bug 5/14/1999 - gri)
485 __ nop();
487 // generate code for exception handler
488 address handler_base = __ start_a_stub(deopt_handler_size);
489 if (handler_base == NULL) {
490 // not enough space left for the handler
491 bailout("deopt handler overflow");
492 return -1;
493 }
495 int offset = code_offset();
496 InternalAddress here(__ pc());
498 __ pushptr(here.addr());
499 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
500 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
501 __ end_a_stub();
503 return offset;
504 }
507 // This is the fast version of java.lang.String.compare; it has not
508 // OSR-entry and therefore, we generate a slow version for OSR's
509 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
510 __ movptr (rbx, rcx); // receiver is in rcx
511 __ movptr (rax, arg1->as_register());
513 // Get addresses of first characters from both Strings
514 __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
515 if (java_lang_String::has_offset_field()) {
516 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
517 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
518 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
519 } else {
520 __ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));
521 __ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
522 }
524 // rbx, may be NULL
525 add_debug_info_for_null_check_here(info);
526 __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
527 if (java_lang_String::has_offset_field()) {
528 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
529 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
530 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
531 } else {
532 __ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
533 __ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
534 }
536 // compute minimum length (in rax) and difference of lengths (on top of stack)
537 __ mov (rcx, rbx);
538 __ subptr(rbx, rax); // subtract lengths
539 __ push (rbx); // result
540 __ cmov (Assembler::lessEqual, rax, rcx);
542 // is minimum length 0?
543 Label noLoop, haveResult;
544 __ testptr (rax, rax);
545 __ jcc (Assembler::zero, noLoop);
547 // compare first characters
548 __ load_unsigned_short(rcx, Address(rdi, 0));
549 __ load_unsigned_short(rbx, Address(rsi, 0));
550 __ subl(rcx, rbx);
551 __ jcc(Assembler::notZero, haveResult);
552 // starting loop
553 __ decrement(rax); // we already tested index: skip one
554 __ jcc(Assembler::zero, noLoop);
556 // set rsi.edi to the end of the arrays (arrays have same length)
557 // negate the index
559 __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
560 __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
561 __ negptr(rax);
563 // compare the strings in a loop
565 Label loop;
566 __ align(wordSize);
567 __ bind(loop);
568 __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));
569 __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));
570 __ subl(rcx, rbx);
571 __ jcc(Assembler::notZero, haveResult);
572 __ increment(rax);
573 __ jcc(Assembler::notZero, loop);
575 // strings are equal up to min length
577 __ bind(noLoop);
578 __ pop(rax);
579 return_op(LIR_OprFact::illegalOpr);
581 __ bind(haveResult);
582 // leave instruction is going to discard the TOS value
583 __ mov (rax, rcx); // result of call is in rax,
584 }
587 void LIR_Assembler::return_op(LIR_Opr result) {
588 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
589 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
590 assert(result->fpu() == 0, "result must already be on TOS");
591 }
593 // Pop the stack before the safepoint code
594 __ remove_frame(initial_frame_size_in_bytes());
596 bool result_is_oop = result->is_valid() ? result->is_oop() : false;
598 // Note: we do not need to round double result; float result has the right precision
599 // the poll sets the condition code, but no data registers
600 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
601 relocInfo::poll_return_type);
603 if (Assembler::is_polling_page_far()) {
604 __ lea(rscratch1, polling_page);
605 __ relocate(relocInfo::poll_return_type);
606 __ testl(rax, Address(rscratch1, 0));
607 } else {
608 __ testl(rax, polling_page);
609 }
610 __ ret(0);
611 }
614 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
615 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
616 relocInfo::poll_type);
617 guarantee(info != NULL, "Shouldn't be NULL");
618 int offset = __ offset();
619 if (Assembler::is_polling_page_far()) {
620 __ lea(rscratch1, polling_page);
621 offset = __ offset();
622 add_debug_info_for_branch(info);
623 __ testl(rax, Address(rscratch1, 0));
624 } else {
625 add_debug_info_for_branch(info);
626 __ testl(rax, polling_page);
627 }
628 return offset;
629 }
632 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
633 if (from_reg != to_reg) __ mov(to_reg, from_reg);
634 }
636 void LIR_Assembler::swap_reg(Register a, Register b) {
637 __ xchgptr(a, b);
638 }
641 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
642 assert(src->is_constant(), "should not call otherwise");
643 assert(dest->is_register(), "should not call otherwise");
644 LIR_Const* c = src->as_constant_ptr();
646 switch (c->type()) {
647 case T_INT: {
648 assert(patch_code == lir_patch_none, "no patching handled here");
649 __ movl(dest->as_register(), c->as_jint());
650 break;
651 }
653 case T_ADDRESS: {
654 assert(patch_code == lir_patch_none, "no patching handled here");
655 __ movptr(dest->as_register(), c->as_jint());
656 break;
657 }
659 case T_LONG: {
660 assert(patch_code == lir_patch_none, "no patching handled here");
661 #ifdef _LP64
662 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
663 #else
664 __ movptr(dest->as_register_lo(), c->as_jint_lo());
665 __ movptr(dest->as_register_hi(), c->as_jint_hi());
666 #endif // _LP64
667 break;
668 }
670 case T_OBJECT: {
671 if (patch_code != lir_patch_none) {
672 jobject2reg_with_patching(dest->as_register(), info);
673 } else {
674 __ movoop(dest->as_register(), c->as_jobject());
675 }
676 break;
677 }
679 case T_METADATA: {
680 if (patch_code != lir_patch_none) {
681 klass2reg_with_patching(dest->as_register(), info);
682 } else {
683 __ mov_metadata(dest->as_register(), c->as_metadata());
684 }
685 break;
686 }
688 case T_FLOAT: {
689 if (dest->is_single_xmm()) {
690 if (c->is_zero_float()) {
691 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
692 } else {
693 __ movflt(dest->as_xmm_float_reg(),
694 InternalAddress(float_constant(c->as_jfloat())));
695 }
696 } else {
697 assert(dest->is_single_fpu(), "must be");
698 assert(dest->fpu_regnr() == 0, "dest must be TOS");
699 if (c->is_zero_float()) {
700 __ fldz();
701 } else if (c->is_one_float()) {
702 __ fld1();
703 } else {
704 __ fld_s (InternalAddress(float_constant(c->as_jfloat())));
705 }
706 }
707 break;
708 }
710 case T_DOUBLE: {
711 if (dest->is_double_xmm()) {
712 if (c->is_zero_double()) {
713 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
714 } else {
715 __ movdbl(dest->as_xmm_double_reg(),
716 InternalAddress(double_constant(c->as_jdouble())));
717 }
718 } else {
719 assert(dest->is_double_fpu(), "must be");
720 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
721 if (c->is_zero_double()) {
722 __ fldz();
723 } else if (c->is_one_double()) {
724 __ fld1();
725 } else {
726 __ fld_d (InternalAddress(double_constant(c->as_jdouble())));
727 }
728 }
729 break;
730 }
732 default:
733 ShouldNotReachHere();
734 }
735 }
737 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
738 assert(src->is_constant(), "should not call otherwise");
739 assert(dest->is_stack(), "should not call otherwise");
740 LIR_Const* c = src->as_constant_ptr();
742 switch (c->type()) {
743 case T_INT: // fall through
744 case T_FLOAT:
745 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
746 break;
748 case T_ADDRESS:
749 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
750 break;
752 case T_OBJECT:
753 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
754 break;
756 case T_LONG: // fall through
757 case T_DOUBLE:
758 #ifdef _LP64
759 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
760 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
761 #else
762 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
763 lo_word_offset_in_bytes), c->as_jint_lo_bits());
764 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
765 hi_word_offset_in_bytes), c->as_jint_hi_bits());
766 #endif // _LP64
767 break;
769 default:
770 ShouldNotReachHere();
771 }
772 }
774 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
775 assert(src->is_constant(), "should not call otherwise");
776 assert(dest->is_address(), "should not call otherwise");
777 LIR_Const* c = src->as_constant_ptr();
778 LIR_Address* addr = dest->as_address_ptr();
780 int null_check_here = code_offset();
781 switch (type) {
782 case T_INT: // fall through
783 case T_FLOAT:
784 __ movl(as_Address(addr), c->as_jint_bits());
785 break;
787 case T_ADDRESS:
788 __ movptr(as_Address(addr), c->as_jint_bits());
789 break;
791 case T_OBJECT: // fall through
792 case T_ARRAY:
793 if (c->as_jobject() == NULL) {
794 if (UseCompressedOops && !wide) {
795 __ movl(as_Address(addr), (int32_t)NULL_WORD);
796 } else {
797 __ movptr(as_Address(addr), NULL_WORD);
798 }
799 } else {
800 if (is_literal_address(addr)) {
801 ShouldNotReachHere();
802 __ movoop(as_Address(addr, noreg), c->as_jobject());
803 } else {
804 #ifdef _LP64
805 __ movoop(rscratch1, c->as_jobject());
806 if (UseCompressedOops && !wide) {
807 __ encode_heap_oop(rscratch1);
808 null_check_here = code_offset();
809 __ movl(as_Address_lo(addr), rscratch1);
810 } else {
811 null_check_here = code_offset();
812 __ movptr(as_Address_lo(addr), rscratch1);
813 }
814 #else
815 __ movoop(as_Address(addr), c->as_jobject());
816 #endif
817 }
818 }
819 break;
821 case T_LONG: // fall through
822 case T_DOUBLE:
823 #ifdef _LP64
824 if (is_literal_address(addr)) {
825 ShouldNotReachHere();
826 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
827 } else {
828 __ movptr(r10, (intptr_t)c->as_jlong_bits());
829 null_check_here = code_offset();
830 __ movptr(as_Address_lo(addr), r10);
831 }
832 #else
833 // Always reachable in 32bit so this doesn't produce useless move literal
834 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
835 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
836 #endif // _LP64
837 break;
839 case T_BOOLEAN: // fall through
840 case T_BYTE:
841 __ movb(as_Address(addr), c->as_jint() & 0xFF);
842 break;
844 case T_CHAR: // fall through
845 case T_SHORT:
846 __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
847 break;
849 default:
850 ShouldNotReachHere();
851 };
853 if (info != NULL) {
854 add_debug_info_for_null_check(null_check_here, info);
855 }
856 }
859 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
860 assert(src->is_register(), "should not call otherwise");
861 assert(dest->is_register(), "should not call otherwise");
863 // move between cpu-registers
864 if (dest->is_single_cpu()) {
865 #ifdef _LP64
866 if (src->type() == T_LONG) {
867 // Can do LONG -> OBJECT
868 move_regs(src->as_register_lo(), dest->as_register());
869 return;
870 }
871 #endif
872 assert(src->is_single_cpu(), "must match");
873 if (src->type() == T_OBJECT) {
874 __ verify_oop(src->as_register());
875 }
876 move_regs(src->as_register(), dest->as_register());
878 } else if (dest->is_double_cpu()) {
879 #ifdef _LP64
880 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
881 // Surprising to me but we can see move of a long to t_object
882 __ verify_oop(src->as_register());
883 move_regs(src->as_register(), dest->as_register_lo());
884 return;
885 }
886 #endif
887 assert(src->is_double_cpu(), "must match");
888 Register f_lo = src->as_register_lo();
889 Register f_hi = src->as_register_hi();
890 Register t_lo = dest->as_register_lo();
891 Register t_hi = dest->as_register_hi();
892 #ifdef _LP64
893 assert(f_hi == f_lo, "must be same");
894 assert(t_hi == t_lo, "must be same");
895 move_regs(f_lo, t_lo);
896 #else
897 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
900 if (f_lo == t_hi && f_hi == t_lo) {
901 swap_reg(f_lo, f_hi);
902 } else if (f_hi == t_lo) {
903 assert(f_lo != t_hi, "overwriting register");
904 move_regs(f_hi, t_hi);
905 move_regs(f_lo, t_lo);
906 } else {
907 assert(f_hi != t_lo, "overwriting register");
908 move_regs(f_lo, t_lo);
909 move_regs(f_hi, t_hi);
910 }
911 #endif // LP64
913 // special moves from fpu-register to xmm-register
914 // necessary for method results
915 } else if (src->is_single_xmm() && !dest->is_single_xmm()) {
916 __ movflt(Address(rsp, 0), src->as_xmm_float_reg());
917 __ fld_s(Address(rsp, 0));
918 } else if (src->is_double_xmm() && !dest->is_double_xmm()) {
919 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
920 __ fld_d(Address(rsp, 0));
921 } else if (dest->is_single_xmm() && !src->is_single_xmm()) {
922 __ fstp_s(Address(rsp, 0));
923 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
924 } else if (dest->is_double_xmm() && !src->is_double_xmm()) {
925 __ fstp_d(Address(rsp, 0));
926 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
928 // move between xmm-registers
929 } else if (dest->is_single_xmm()) {
930 assert(src->is_single_xmm(), "must match");
931 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
932 } else if (dest->is_double_xmm()) {
933 assert(src->is_double_xmm(), "must match");
934 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
936 // move between fpu-registers (no instruction necessary because of fpu-stack)
937 } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
938 assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
939 assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
940 } else {
941 ShouldNotReachHere();
942 }
943 }
945 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
946 assert(src->is_register(), "should not call otherwise");
947 assert(dest->is_stack(), "should not call otherwise");
949 if (src->is_single_cpu()) {
950 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
951 if (type == T_OBJECT || type == T_ARRAY) {
952 __ verify_oop(src->as_register());
953 __ movptr (dst, src->as_register());
954 } else if (type == T_METADATA) {
955 __ movptr (dst, src->as_register());
956 } else {
957 __ movl (dst, src->as_register());
958 }
960 } else if (src->is_double_cpu()) {
961 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
962 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
963 __ movptr (dstLO, src->as_register_lo());
964 NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
966 } else if (src->is_single_xmm()) {
967 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
968 __ movflt(dst_addr, src->as_xmm_float_reg());
970 } else if (src->is_double_xmm()) {
971 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
972 __ movdbl(dst_addr, src->as_xmm_double_reg());
974 } else if (src->is_single_fpu()) {
975 assert(src->fpu_regnr() == 0, "argument must be on TOS");
976 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
977 if (pop_fpu_stack) __ fstp_s (dst_addr);
978 else __ fst_s (dst_addr);
980 } else if (src->is_double_fpu()) {
981 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
982 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
983 if (pop_fpu_stack) __ fstp_d (dst_addr);
984 else __ fst_d (dst_addr);
986 } else {
987 ShouldNotReachHere();
988 }
989 }
992 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
993 LIR_Address* to_addr = dest->as_address_ptr();
994 PatchingStub* patch = NULL;
995 Register compressed_src = rscratch1;
997 if (type == T_ARRAY || type == T_OBJECT) {
998 __ verify_oop(src->as_register());
999 #ifdef _LP64
1000 if (UseCompressedOops && !wide) {
1001 __ movptr(compressed_src, src->as_register());
1002 __ encode_heap_oop(compressed_src);
1003 }
1004 #endif
1005 }
1007 if (patch_code != lir_patch_none) {
1008 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1009 Address toa = as_Address(to_addr);
1010 assert(toa.disp() != 0, "must have");
1011 }
1013 int null_check_here = code_offset();
1014 switch (type) {
1015 case T_FLOAT: {
1016 if (src->is_single_xmm()) {
1017 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
1018 } else {
1019 assert(src->is_single_fpu(), "must be");
1020 assert(src->fpu_regnr() == 0, "argument must be on TOS");
1021 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
1022 else __ fst_s (as_Address(to_addr));
1023 }
1024 break;
1025 }
1027 case T_DOUBLE: {
1028 if (src->is_double_xmm()) {
1029 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1030 } else {
1031 assert(src->is_double_fpu(), "must be");
1032 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1033 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
1034 else __ fst_d (as_Address(to_addr));
1035 }
1036 break;
1037 }
1039 case T_ARRAY: // fall through
1040 case T_OBJECT: // fall through
1041 if (UseCompressedOops && !wide) {
1042 __ movl(as_Address(to_addr), compressed_src);
1043 } else {
1044 __ movptr(as_Address(to_addr), src->as_register());
1045 }
1046 break;
1047 case T_METADATA:
1048 // We get here to store a method pointer to the stack to pass to
1049 // a dtrace runtime call. This can't work on 64 bit with
1050 // compressed klass ptrs: T_METADATA can be a compressed klass
1051 // ptr or a 64 bit method pointer.
1052 LP64_ONLY(ShouldNotReachHere());
1053 __ movptr(as_Address(to_addr), src->as_register());
1054 break;
1055 case T_ADDRESS:
1056 __ movptr(as_Address(to_addr), src->as_register());
1057 break;
1058 case T_INT:
1059 __ movl(as_Address(to_addr), src->as_register());
1060 break;
1062 case T_LONG: {
1063 Register from_lo = src->as_register_lo();
1064 Register from_hi = src->as_register_hi();
1065 #ifdef _LP64
1066 __ movptr(as_Address_lo(to_addr), from_lo);
1067 #else
1068 Register base = to_addr->base()->as_register();
1069 Register index = noreg;
1070 if (to_addr->index()->is_register()) {
1071 index = to_addr->index()->as_register();
1072 }
1073 if (base == from_lo || index == from_lo) {
1074 assert(base != from_hi, "can't be");
1075 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1076 __ movl(as_Address_hi(to_addr), from_hi);
1077 if (patch != NULL) {
1078 patching_epilog(patch, lir_patch_high, base, info);
1079 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1080 patch_code = lir_patch_low;
1081 }
1082 __ movl(as_Address_lo(to_addr), from_lo);
1083 } else {
1084 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1085 __ movl(as_Address_lo(to_addr), from_lo);
1086 if (patch != NULL) {
1087 patching_epilog(patch, lir_patch_low, base, info);
1088 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1089 patch_code = lir_patch_high;
1090 }
1091 __ movl(as_Address_hi(to_addr), from_hi);
1092 }
1093 #endif // _LP64
1094 break;
1095 }
1097 case T_BYTE: // fall through
1098 case T_BOOLEAN: {
1099 Register src_reg = src->as_register();
1100 Address dst_addr = as_Address(to_addr);
1101 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1102 __ movb(dst_addr, src_reg);
1103 break;
1104 }
1106 case T_CHAR: // fall through
1107 case T_SHORT:
1108 __ movw(as_Address(to_addr), src->as_register());
1109 break;
1111 default:
1112 ShouldNotReachHere();
1113 }
1114 if (info != NULL) {
1115 add_debug_info_for_null_check(null_check_here, info);
1116 }
1118 if (patch_code != lir_patch_none) {
1119 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1120 }
1121 }
1124 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1125 assert(src->is_stack(), "should not call otherwise");
1126 assert(dest->is_register(), "should not call otherwise");
1128 if (dest->is_single_cpu()) {
1129 if (type == T_ARRAY || type == T_OBJECT) {
1130 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1131 __ verify_oop(dest->as_register());
1132 } else if (type == T_METADATA) {
1133 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1134 } else {
1135 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1136 }
1138 } else if (dest->is_double_cpu()) {
1139 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1140 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1141 __ movptr(dest->as_register_lo(), src_addr_LO);
1142 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1144 } else if (dest->is_single_xmm()) {
1145 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1146 __ movflt(dest->as_xmm_float_reg(), src_addr);
1148 } else if (dest->is_double_xmm()) {
1149 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1150 __ movdbl(dest->as_xmm_double_reg(), src_addr);
1152 } else if (dest->is_single_fpu()) {
1153 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1154 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1155 __ fld_s(src_addr);
1157 } else if (dest->is_double_fpu()) {
1158 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1159 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1160 __ fld_d(src_addr);
1162 } else {
1163 ShouldNotReachHere();
1164 }
1165 }
1168 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1169 if (src->is_single_stack()) {
1170 if (type == T_OBJECT || type == T_ARRAY) {
1171 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1172 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1173 } else {
1174 #ifndef _LP64
1175 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1176 __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1177 #else
1178 //no pushl on 64bits
1179 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1180 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1181 #endif
1182 }
1184 } else if (src->is_double_stack()) {
1185 #ifdef _LP64
1186 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1187 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1188 #else
1189 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1190 // push and pop the part at src + wordSize, adding wordSize for the previous push
1191 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1192 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1193 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1194 #endif // _LP64
1196 } else {
1197 ShouldNotReachHere();
1198 }
1199 }
1202 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1203 assert(src->is_address(), "should not call otherwise");
1204 assert(dest->is_register(), "should not call otherwise");
1206 LIR_Address* addr = src->as_address_ptr();
1207 Address from_addr = as_Address(addr);
1209 switch (type) {
1210 case T_BOOLEAN: // fall through
1211 case T_BYTE: // fall through
1212 case T_CHAR: // fall through
1213 case T_SHORT:
1214 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1215 // on pre P6 processors we may get partial register stalls
1216 // so blow away the value of to_rinfo before loading a
1217 // partial word into it. Do it here so that it precedes
1218 // the potential patch point below.
1219 __ xorptr(dest->as_register(), dest->as_register());
1220 }
1221 break;
1222 }
1224 PatchingStub* patch = NULL;
1225 if (patch_code != lir_patch_none) {
1226 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1227 assert(from_addr.disp() != 0, "must have");
1228 }
1229 if (info != NULL) {
1230 add_debug_info_for_null_check_here(info);
1231 }
1233 switch (type) {
1234 case T_FLOAT: {
1235 if (dest->is_single_xmm()) {
1236 __ movflt(dest->as_xmm_float_reg(), from_addr);
1237 } else {
1238 assert(dest->is_single_fpu(), "must be");
1239 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1240 __ fld_s(from_addr);
1241 }
1242 break;
1243 }
1245 case T_DOUBLE: {
1246 if (dest->is_double_xmm()) {
1247 __ movdbl(dest->as_xmm_double_reg(), from_addr);
1248 } else {
1249 assert(dest->is_double_fpu(), "must be");
1250 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1251 __ fld_d(from_addr);
1252 }
1253 break;
1254 }
1256 case T_OBJECT: // fall through
1257 case T_ARRAY: // fall through
1258 if (UseCompressedOops && !wide) {
1259 __ movl(dest->as_register(), from_addr);
1260 } else {
1261 __ movptr(dest->as_register(), from_addr);
1262 }
1263 break;
1265 case T_ADDRESS:
1266 if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1267 __ movl(dest->as_register(), from_addr);
1268 } else {
1269 __ movptr(dest->as_register(), from_addr);
1270 }
1271 break;
1272 case T_INT:
1273 __ movl(dest->as_register(), from_addr);
1274 break;
1276 case T_LONG: {
1277 Register to_lo = dest->as_register_lo();
1278 Register to_hi = dest->as_register_hi();
1279 #ifdef _LP64
1280 __ movptr(to_lo, as_Address_lo(addr));
1281 #else
1282 Register base = addr->base()->as_register();
1283 Register index = noreg;
1284 if (addr->index()->is_register()) {
1285 index = addr->index()->as_register();
1286 }
1287 if ((base == to_lo && index == to_hi) ||
1288 (base == to_hi && index == to_lo)) {
1289 // addresses with 2 registers are only formed as a result of
1290 // array access so this code will never have to deal with
1291 // patches or null checks.
1292 assert(info == NULL && patch == NULL, "must be");
1293 __ lea(to_hi, as_Address(addr));
1294 __ movl(to_lo, Address(to_hi, 0));
1295 __ movl(to_hi, Address(to_hi, BytesPerWord));
1296 } else if (base == to_lo || index == to_lo) {
1297 assert(base != to_hi, "can't be");
1298 assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1299 __ movl(to_hi, as_Address_hi(addr));
1300 if (patch != NULL) {
1301 patching_epilog(patch, lir_patch_high, base, info);
1302 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1303 patch_code = lir_patch_low;
1304 }
1305 __ movl(to_lo, as_Address_lo(addr));
1306 } else {
1307 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1308 __ movl(to_lo, as_Address_lo(addr));
1309 if (patch != NULL) {
1310 patching_epilog(patch, lir_patch_low, base, info);
1311 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1312 patch_code = lir_patch_high;
1313 }
1314 __ movl(to_hi, as_Address_hi(addr));
1315 }
1316 #endif // _LP64
1317 break;
1318 }
1320 case T_BOOLEAN: // fall through
1321 case T_BYTE: {
1322 Register dest_reg = dest->as_register();
1323 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1324 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1325 __ movsbl(dest_reg, from_addr);
1326 } else {
1327 __ movb(dest_reg, from_addr);
1328 __ shll(dest_reg, 24);
1329 __ sarl(dest_reg, 24);
1330 }
1331 break;
1332 }
1334 case T_CHAR: {
1335 Register dest_reg = dest->as_register();
1336 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1337 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1338 __ movzwl(dest_reg, from_addr);
1339 } else {
1340 __ movw(dest_reg, from_addr);
1341 }
1342 break;
1343 }
1345 case T_SHORT: {
1346 Register dest_reg = dest->as_register();
1347 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1348 __ movswl(dest_reg, from_addr);
1349 } else {
1350 __ movw(dest_reg, from_addr);
1351 __ shll(dest_reg, 16);
1352 __ sarl(dest_reg, 16);
1353 }
1354 break;
1355 }
1357 default:
1358 ShouldNotReachHere();
1359 }
1361 if (patch != NULL) {
1362 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1363 }
1365 if (type == T_ARRAY || type == T_OBJECT) {
1366 #ifdef _LP64
1367 if (UseCompressedOops && !wide) {
1368 __ decode_heap_oop(dest->as_register());
1369 }
1370 #endif
1371 __ verify_oop(dest->as_register());
1372 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1373 #ifdef _LP64
1374 if (UseCompressedKlassPointers) {
1375 __ decode_klass_not_null(dest->as_register());
1376 }
1377 #endif
1378 }
1379 }
1382 void LIR_Assembler::prefetchr(LIR_Opr src) {
1383 LIR_Address* addr = src->as_address_ptr();
1384 Address from_addr = as_Address(addr);
1386 if (VM_Version::supports_sse()) {
1387 switch (ReadPrefetchInstr) {
1388 case 0:
1389 __ prefetchnta(from_addr); break;
1390 case 1:
1391 __ prefetcht0(from_addr); break;
1392 case 2:
1393 __ prefetcht2(from_addr); break;
1394 default:
1395 ShouldNotReachHere(); break;
1396 }
1397 } else if (VM_Version::supports_3dnow_prefetch()) {
1398 __ prefetchr(from_addr);
1399 }
1400 }
1403 void LIR_Assembler::prefetchw(LIR_Opr src) {
1404 LIR_Address* addr = src->as_address_ptr();
1405 Address from_addr = as_Address(addr);
1407 if (VM_Version::supports_sse()) {
1408 switch (AllocatePrefetchInstr) {
1409 case 0:
1410 __ prefetchnta(from_addr); break;
1411 case 1:
1412 __ prefetcht0(from_addr); break;
1413 case 2:
1414 __ prefetcht2(from_addr); break;
1415 case 3:
1416 __ prefetchw(from_addr); break;
1417 default:
1418 ShouldNotReachHere(); break;
1419 }
1420 } else if (VM_Version::supports_3dnow_prefetch()) {
1421 __ prefetchw(from_addr);
1422 }
1423 }
1426 NEEDS_CLEANUP; // This could be static?
1427 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1428 int elem_size = type2aelembytes(type);
1429 switch (elem_size) {
1430 case 1: return Address::times_1;
1431 case 2: return Address::times_2;
1432 case 4: return Address::times_4;
1433 case 8: return Address::times_8;
1434 }
1435 ShouldNotReachHere();
1436 return Address::no_scale;
1437 }
1440 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1441 switch (op->code()) {
1442 case lir_idiv:
1443 case lir_irem:
1444 arithmetic_idiv(op->code(),
1445 op->in_opr1(),
1446 op->in_opr2(),
1447 op->in_opr3(),
1448 op->result_opr(),
1449 op->info());
1450 break;
1451 default: ShouldNotReachHere(); break;
1452 }
1453 }
1455 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1456 #ifdef ASSERT
1457 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1458 if (op->block() != NULL) _branch_target_blocks.append(op->block());
1459 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1460 #endif
1462 if (op->cond() == lir_cond_always) {
1463 if (op->info() != NULL) add_debug_info_for_branch(op->info());
1464 __ jmp (*(op->label()));
1465 } else {
1466 Assembler::Condition acond = Assembler::zero;
1467 if (op->code() == lir_cond_float_branch) {
1468 assert(op->ublock() != NULL, "must have unordered successor");
1469 __ jcc(Assembler::parity, *(op->ublock()->label()));
1470 switch(op->cond()) {
1471 case lir_cond_equal: acond = Assembler::equal; break;
1472 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1473 case lir_cond_less: acond = Assembler::below; break;
1474 case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1475 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1476 case lir_cond_greater: acond = Assembler::above; break;
1477 default: ShouldNotReachHere();
1478 }
1479 } else {
1480 switch (op->cond()) {
1481 case lir_cond_equal: acond = Assembler::equal; break;
1482 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1483 case lir_cond_less: acond = Assembler::less; break;
1484 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1485 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1486 case lir_cond_greater: acond = Assembler::greater; break;
1487 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1488 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1489 default: ShouldNotReachHere();
1490 }
1491 }
1492 __ jcc(acond,*(op->label()));
1493 }
1494 }
1496 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1497 LIR_Opr src = op->in_opr();
1498 LIR_Opr dest = op->result_opr();
1500 switch (op->bytecode()) {
1501 case Bytecodes::_i2l:
1502 #ifdef _LP64
1503 __ movl2ptr(dest->as_register_lo(), src->as_register());
1504 #else
1505 move_regs(src->as_register(), dest->as_register_lo());
1506 move_regs(src->as_register(), dest->as_register_hi());
1507 __ sarl(dest->as_register_hi(), 31);
1508 #endif // LP64
1509 break;
1511 case Bytecodes::_l2i:
1512 #ifdef _LP64
1513 __ movl(dest->as_register(), src->as_register_lo());
1514 #else
1515 move_regs(src->as_register_lo(), dest->as_register());
1516 #endif
1517 break;
1519 case Bytecodes::_i2b:
1520 move_regs(src->as_register(), dest->as_register());
1521 __ sign_extend_byte(dest->as_register());
1522 break;
1524 case Bytecodes::_i2c:
1525 move_regs(src->as_register(), dest->as_register());
1526 __ andl(dest->as_register(), 0xFFFF);
1527 break;
1529 case Bytecodes::_i2s:
1530 move_regs(src->as_register(), dest->as_register());
1531 __ sign_extend_short(dest->as_register());
1532 break;
1535 case Bytecodes::_f2d:
1536 case Bytecodes::_d2f:
1537 if (dest->is_single_xmm()) {
1538 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1539 } else if (dest->is_double_xmm()) {
1540 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1541 } else {
1542 assert(src->fpu() == dest->fpu(), "register must be equal");
1543 // do nothing (float result is rounded later through spilling)
1544 }
1545 break;
1547 case Bytecodes::_i2f:
1548 case Bytecodes::_i2d:
1549 if (dest->is_single_xmm()) {
1550 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1551 } else if (dest->is_double_xmm()) {
1552 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1553 } else {
1554 assert(dest->fpu() == 0, "result must be on TOS");
1555 __ movl(Address(rsp, 0), src->as_register());
1556 __ fild_s(Address(rsp, 0));
1557 }
1558 break;
1560 case Bytecodes::_f2i:
1561 case Bytecodes::_d2i:
1562 if (src->is_single_xmm()) {
1563 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1564 } else if (src->is_double_xmm()) {
1565 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1566 } else {
1567 assert(src->fpu() == 0, "input must be on TOS");
1568 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
1569 __ fist_s(Address(rsp, 0));
1570 __ movl(dest->as_register(), Address(rsp, 0));
1571 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1572 }
1574 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1575 assert(op->stub() != NULL, "stub required");
1576 __ cmpl(dest->as_register(), 0x80000000);
1577 __ jcc(Assembler::equal, *op->stub()->entry());
1578 __ bind(*op->stub()->continuation());
1579 break;
1581 case Bytecodes::_l2f:
1582 case Bytecodes::_l2d:
1583 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1584 assert(dest->fpu() == 0, "result must be on TOS");
1586 __ movptr(Address(rsp, 0), src->as_register_lo());
1587 NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));
1588 __ fild_d(Address(rsp, 0));
1589 // float result is rounded later through spilling
1590 break;
1592 case Bytecodes::_f2l:
1593 case Bytecodes::_d2l:
1594 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1595 assert(src->fpu() == 0, "input must be on TOS");
1596 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1598 // instruction sequence too long to inline it here
1599 {
1600 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1601 }
1602 break;
1604 default: ShouldNotReachHere();
1605 }
1606 }
1608 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1609 if (op->init_check()) {
1610 __ cmpb(Address(op->klass()->as_register(),
1611 InstanceKlass::init_state_offset()),
1612 InstanceKlass::fully_initialized);
1613 add_debug_info_for_null_check_here(op->stub()->info());
1614 __ jcc(Assembler::notEqual, *op->stub()->entry());
1615 }
1616 __ allocate_object(op->obj()->as_register(),
1617 op->tmp1()->as_register(),
1618 op->tmp2()->as_register(),
1619 op->header_size(),
1620 op->object_size(),
1621 op->klass()->as_register(),
1622 *op->stub()->entry());
1623 __ bind(*op->stub()->continuation());
1624 }
1626 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1627 Register len = op->len()->as_register();
1628 LP64_ONLY( __ movslq(len, len); )
1630 if (UseSlowPath ||
1631 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1632 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1633 __ jmp(*op->stub()->entry());
1634 } else {
1635 Register tmp1 = op->tmp1()->as_register();
1636 Register tmp2 = op->tmp2()->as_register();
1637 Register tmp3 = op->tmp3()->as_register();
1638 if (len == tmp1) {
1639 tmp1 = tmp3;
1640 } else if (len == tmp2) {
1641 tmp2 = tmp3;
1642 } else if (len == tmp3) {
1643 // everything is ok
1644 } else {
1645 __ mov(tmp3, len);
1646 }
1647 __ allocate_array(op->obj()->as_register(),
1648 len,
1649 tmp1,
1650 tmp2,
1651 arrayOopDesc::header_size(op->type()),
1652 array_element_size(op->type()),
1653 op->klass()->as_register(),
1654 *op->stub()->entry());
1655 }
1656 __ bind(*op->stub()->continuation());
1657 }
1659 void LIR_Assembler::type_profile_helper(Register mdo,
1660 ciMethodData *md, ciProfileData *data,
1661 Register recv, Label* update_done) {
1662 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1663 Label next_test;
1664 // See if the receiver is receiver[n].
1665 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1666 __ jccb(Assembler::notEqual, next_test);
1667 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1668 __ addptr(data_addr, DataLayout::counter_increment);
1669 __ jmp(*update_done);
1670 __ bind(next_test);
1671 }
1673 // Didn't find receiver; find next empty slot and fill it in
1674 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1675 Label next_test;
1676 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1677 __ cmpptr(recv_addr, (intptr_t)NULL_WORD);
1678 __ jccb(Assembler::notEqual, next_test);
1679 __ movptr(recv_addr, recv);
1680 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1681 __ jmp(*update_done);
1682 __ bind(next_test);
1683 }
1684 }
1686 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1687 // we always need a stub for the failure case.
1688 CodeStub* stub = op->stub();
1689 Register obj = op->object()->as_register();
1690 Register k_RInfo = op->tmp1()->as_register();
1691 Register klass_RInfo = op->tmp2()->as_register();
1692 Register dst = op->result_opr()->as_register();
1693 ciKlass* k = op->klass();
1694 Register Rtmp1 = noreg;
1696 // check if it needs to be profiled
1697 ciMethodData* md;
1698 ciProfileData* data;
1700 if (op->should_profile()) {
1701 ciMethod* method = op->profiled_method();
1702 assert(method != NULL, "Should have method");
1703 int bci = op->profiled_bci();
1704 md = method->method_data_or_null();
1705 assert(md != NULL, "Sanity");
1706 data = md->bci_to_data(bci);
1707 assert(data != NULL, "need data for type check");
1708 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1709 }
1710 Label profile_cast_success, profile_cast_failure;
1711 Label *success_target = op->should_profile() ? &profile_cast_success : success;
1712 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1714 if (obj == k_RInfo) {
1715 k_RInfo = dst;
1716 } else if (obj == klass_RInfo) {
1717 klass_RInfo = dst;
1718 }
1719 if (k->is_loaded() && !UseCompressedKlassPointers) {
1720 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1721 } else {
1722 Rtmp1 = op->tmp3()->as_register();
1723 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1724 }
1726 assert_different_registers(obj, k_RInfo, klass_RInfo);
1727 if (!k->is_loaded()) {
1728 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1729 } else {
1730 #ifdef _LP64
1731 __ mov_metadata(k_RInfo, k->constant_encoding());
1732 #endif // _LP64
1733 }
1734 assert(obj != k_RInfo, "must be different");
1736 __ cmpptr(obj, (int32_t)NULL_WORD);
1737 if (op->should_profile()) {
1738 Label not_null;
1739 __ jccb(Assembler::notEqual, not_null);
1740 // Object is null; update MDO and exit
1741 Register mdo = klass_RInfo;
1742 __ mov_metadata(mdo, md->constant_encoding());
1743 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1744 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1745 __ orl(data_addr, header_bits);
1746 __ jmp(*obj_is_null);
1747 __ bind(not_null);
1748 } else {
1749 __ jcc(Assembler::equal, *obj_is_null);
1750 }
1751 __ verify_oop(obj);
1753 if (op->fast_check()) {
1754 // get object class
1755 // not a safepoint as obj null check happens earlier
1756 #ifdef _LP64
1757 if (UseCompressedKlassPointers) {
1758 __ load_klass(Rtmp1, obj);
1759 __ cmpptr(k_RInfo, Rtmp1);
1760 } else {
1761 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1762 }
1763 #else
1764 if (k->is_loaded()) {
1765 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1766 } else {
1767 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1768 }
1769 #endif
1770 __ jcc(Assembler::notEqual, *failure_target);
1771 // successful cast, fall through to profile or jump
1772 } else {
1773 // get object class
1774 // not a safepoint as obj null check happens earlier
1775 __ load_klass(klass_RInfo, obj);
1776 if (k->is_loaded()) {
1777 // See if we get an immediate positive hit
1778 #ifdef _LP64
1779 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1780 #else
1781 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1782 #endif // _LP64
1783 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1784 __ jcc(Assembler::notEqual, *failure_target);
1785 // successful cast, fall through to profile or jump
1786 } else {
1787 // See if we get an immediate positive hit
1788 __ jcc(Assembler::equal, *success_target);
1789 // check for self
1790 #ifdef _LP64
1791 __ cmpptr(klass_RInfo, k_RInfo);
1792 #else
1793 __ cmpklass(klass_RInfo, k->constant_encoding());
1794 #endif // _LP64
1795 __ jcc(Assembler::equal, *success_target);
1797 __ push(klass_RInfo);
1798 #ifdef _LP64
1799 __ push(k_RInfo);
1800 #else
1801 __ pushklass(k->constant_encoding());
1802 #endif // _LP64
1803 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1804 __ pop(klass_RInfo);
1805 __ pop(klass_RInfo);
1806 // result is a boolean
1807 __ cmpl(klass_RInfo, 0);
1808 __ jcc(Assembler::equal, *failure_target);
1809 // successful cast, fall through to profile or jump
1810 }
1811 } else {
1812 // perform the fast part of the checking logic
1813 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1814 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1815 __ push(klass_RInfo);
1816 __ push(k_RInfo);
1817 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1818 __ pop(klass_RInfo);
1819 __ pop(k_RInfo);
1820 // result is a boolean
1821 __ cmpl(k_RInfo, 0);
1822 __ jcc(Assembler::equal, *failure_target);
1823 // successful cast, fall through to profile or jump
1824 }
1825 }
1826 if (op->should_profile()) {
1827 Register mdo = klass_RInfo, recv = k_RInfo;
1828 __ bind(profile_cast_success);
1829 __ mov_metadata(mdo, md->constant_encoding());
1830 __ load_klass(recv, obj);
1831 Label update_done;
1832 type_profile_helper(mdo, md, data, recv, success);
1833 __ jmp(*success);
1835 __ bind(profile_cast_failure);
1836 __ mov_metadata(mdo, md->constant_encoding());
1837 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1838 __ subptr(counter_addr, DataLayout::counter_increment);
1839 __ jmp(*failure);
1840 }
1841 __ jmp(*success);
1842 }
1845 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1846 LIR_Code code = op->code();
1847 if (code == lir_store_check) {
1848 Register value = op->object()->as_register();
1849 Register array = op->array()->as_register();
1850 Register k_RInfo = op->tmp1()->as_register();
1851 Register klass_RInfo = op->tmp2()->as_register();
1852 Register Rtmp1 = op->tmp3()->as_register();
1854 CodeStub* stub = op->stub();
1856 // check if it needs to be profiled
1857 ciMethodData* md;
1858 ciProfileData* data;
1860 if (op->should_profile()) {
1861 ciMethod* method = op->profiled_method();
1862 assert(method != NULL, "Should have method");
1863 int bci = op->profiled_bci();
1864 md = method->method_data_or_null();
1865 assert(md != NULL, "Sanity");
1866 data = md->bci_to_data(bci);
1867 assert(data != NULL, "need data for type check");
1868 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1869 }
1870 Label profile_cast_success, profile_cast_failure, done;
1871 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1872 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1874 __ cmpptr(value, (int32_t)NULL_WORD);
1875 if (op->should_profile()) {
1876 Label not_null;
1877 __ jccb(Assembler::notEqual, not_null);
1878 // Object is null; update MDO and exit
1879 Register mdo = klass_RInfo;
1880 __ mov_metadata(mdo, md->constant_encoding());
1881 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1882 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1883 __ orl(data_addr, header_bits);
1884 __ jmp(done);
1885 __ bind(not_null);
1886 } else {
1887 __ jcc(Assembler::equal, done);
1888 }
1890 add_debug_info_for_null_check_here(op->info_for_exception());
1891 __ load_klass(k_RInfo, array);
1892 __ load_klass(klass_RInfo, value);
1894 // get instance klass (it's already uncompressed)
1895 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1896 // perform the fast part of the checking logic
1897 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1898 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1899 __ push(klass_RInfo);
1900 __ push(k_RInfo);
1901 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1902 __ pop(klass_RInfo);
1903 __ pop(k_RInfo);
1904 // result is a boolean
1905 __ cmpl(k_RInfo, 0);
1906 __ jcc(Assembler::equal, *failure_target);
1907 // fall through to the success case
1909 if (op->should_profile()) {
1910 Register mdo = klass_RInfo, recv = k_RInfo;
1911 __ bind(profile_cast_success);
1912 __ mov_metadata(mdo, md->constant_encoding());
1913 __ load_klass(recv, value);
1914 Label update_done;
1915 type_profile_helper(mdo, md, data, recv, &done);
1916 __ jmpb(done);
1918 __ bind(profile_cast_failure);
1919 __ mov_metadata(mdo, md->constant_encoding());
1920 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1921 __ subptr(counter_addr, DataLayout::counter_increment);
1922 __ jmp(*stub->entry());
1923 }
1925 __ bind(done);
1926 } else
1927 if (code == lir_checkcast) {
1928 Register obj = op->object()->as_register();
1929 Register dst = op->result_opr()->as_register();
1930 Label success;
1931 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1932 __ bind(success);
1933 if (dst != obj) {
1934 __ mov(dst, obj);
1935 }
1936 } else
1937 if (code == lir_instanceof) {
1938 Register obj = op->object()->as_register();
1939 Register dst = op->result_opr()->as_register();
1940 Label success, failure, done;
1941 emit_typecheck_helper(op, &success, &failure, &failure);
1942 __ bind(failure);
1943 __ xorptr(dst, dst);
1944 __ jmpb(done);
1945 __ bind(success);
1946 __ movptr(dst, 1);
1947 __ bind(done);
1948 } else {
1949 ShouldNotReachHere();
1950 }
1952 }
1955 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1956 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1957 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1958 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1959 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1960 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1961 Register addr = op->addr()->as_register();
1962 if (os::is_MP()) {
1963 __ lock();
1964 }
1965 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1967 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1968 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1969 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1970 Register newval = op->new_value()->as_register();
1971 Register cmpval = op->cmp_value()->as_register();
1972 assert(cmpval == rax, "wrong register");
1973 assert(newval != NULL, "new val must be register");
1974 assert(cmpval != newval, "cmp and new values must be in different registers");
1975 assert(cmpval != addr, "cmp and addr must be in different registers");
1976 assert(newval != addr, "new value and addr must be in different registers");
1978 if ( op->code() == lir_cas_obj) {
1979 #ifdef _LP64
1980 if (UseCompressedOops) {
1981 __ encode_heap_oop(cmpval);
1982 __ mov(rscratch1, newval);
1983 __ encode_heap_oop(rscratch1);
1984 if (os::is_MP()) {
1985 __ lock();
1986 }
1987 // cmpval (rax) is implicitly used by this instruction
1988 __ cmpxchgl(rscratch1, Address(addr, 0));
1989 } else
1990 #endif
1991 {
1992 if (os::is_MP()) {
1993 __ lock();
1994 }
1995 __ cmpxchgptr(newval, Address(addr, 0));
1996 }
1997 } else {
1998 assert(op->code() == lir_cas_int, "lir_cas_int expected");
1999 if (os::is_MP()) {
2000 __ lock();
2001 }
2002 __ cmpxchgl(newval, Address(addr, 0));
2003 }
2004 #ifdef _LP64
2005 } else if (op->code() == lir_cas_long) {
2006 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2007 Register newval = op->new_value()->as_register_lo();
2008 Register cmpval = op->cmp_value()->as_register_lo();
2009 assert(cmpval == rax, "wrong register");
2010 assert(newval != NULL, "new val must be register");
2011 assert(cmpval != newval, "cmp and new values must be in different registers");
2012 assert(cmpval != addr, "cmp and addr must be in different registers");
2013 assert(newval != addr, "new value and addr must be in different registers");
2014 if (os::is_MP()) {
2015 __ lock();
2016 }
2017 __ cmpxchgq(newval, Address(addr, 0));
2018 #endif // _LP64
2019 } else {
2020 Unimplemented();
2021 }
2022 }
2024 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2025 Assembler::Condition acond, ncond;
2026 switch (condition) {
2027 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2028 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2029 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2030 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
2031 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
2032 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
2033 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
2034 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
2035 default: ShouldNotReachHere();
2036 }
2038 if (opr1->is_cpu_register()) {
2039 reg2reg(opr1, result);
2040 } else if (opr1->is_stack()) {
2041 stack2reg(opr1, result, result->type());
2042 } else if (opr1->is_constant()) {
2043 const2reg(opr1, result, lir_patch_none, NULL);
2044 } else {
2045 ShouldNotReachHere();
2046 }
2048 if (VM_Version::supports_cmov() && !opr2->is_constant()) {
2049 // optimized version that does not require a branch
2050 if (opr2->is_single_cpu()) {
2051 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
2052 __ cmov(ncond, result->as_register(), opr2->as_register());
2053 } else if (opr2->is_double_cpu()) {
2054 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2055 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2056 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
2057 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
2058 } else if (opr2->is_single_stack()) {
2059 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
2060 } else if (opr2->is_double_stack()) {
2061 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
2062 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
2063 } else {
2064 ShouldNotReachHere();
2065 }
2067 } else {
2068 Label skip;
2069 __ jcc (acond, skip);
2070 if (opr2->is_cpu_register()) {
2071 reg2reg(opr2, result);
2072 } else if (opr2->is_stack()) {
2073 stack2reg(opr2, result, result->type());
2074 } else if (opr2->is_constant()) {
2075 const2reg(opr2, result, lir_patch_none, NULL);
2076 } else {
2077 ShouldNotReachHere();
2078 }
2079 __ bind(skip);
2080 }
2081 }
2084 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
2085 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
2087 if (left->is_single_cpu()) {
2088 assert(left == dest, "left and dest must be equal");
2089 Register lreg = left->as_register();
2091 if (right->is_single_cpu()) {
2092 // cpu register - cpu register
2093 Register rreg = right->as_register();
2094 switch (code) {
2095 case lir_add: __ addl (lreg, rreg); break;
2096 case lir_sub: __ subl (lreg, rreg); break;
2097 case lir_mul: __ imull(lreg, rreg); break;
2098 default: ShouldNotReachHere();
2099 }
2101 } else if (right->is_stack()) {
2102 // cpu register - stack
2103 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2104 switch (code) {
2105 case lir_add: __ addl(lreg, raddr); break;
2106 case lir_sub: __ subl(lreg, raddr); break;
2107 default: ShouldNotReachHere();
2108 }
2110 } else if (right->is_constant()) {
2111 // cpu register - constant
2112 jint c = right->as_constant_ptr()->as_jint();
2113 switch (code) {
2114 case lir_add: {
2115 __ incrementl(lreg, c);
2116 break;
2117 }
2118 case lir_sub: {
2119 __ decrementl(lreg, c);
2120 break;
2121 }
2122 default: ShouldNotReachHere();
2123 }
2125 } else {
2126 ShouldNotReachHere();
2127 }
2129 } else if (left->is_double_cpu()) {
2130 assert(left == dest, "left and dest must be equal");
2131 Register lreg_lo = left->as_register_lo();
2132 Register lreg_hi = left->as_register_hi();
2134 if (right->is_double_cpu()) {
2135 // cpu register - cpu register
2136 Register rreg_lo = right->as_register_lo();
2137 Register rreg_hi = right->as_register_hi();
2138 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2139 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2140 switch (code) {
2141 case lir_add:
2142 __ addptr(lreg_lo, rreg_lo);
2143 NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2144 break;
2145 case lir_sub:
2146 __ subptr(lreg_lo, rreg_lo);
2147 NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2148 break;
2149 case lir_mul:
2150 #ifdef _LP64
2151 __ imulq(lreg_lo, rreg_lo);
2152 #else
2153 assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2154 __ imull(lreg_hi, rreg_lo);
2155 __ imull(rreg_hi, lreg_lo);
2156 __ addl (rreg_hi, lreg_hi);
2157 __ mull (rreg_lo);
2158 __ addl (lreg_hi, rreg_hi);
2159 #endif // _LP64
2160 break;
2161 default:
2162 ShouldNotReachHere();
2163 }
2165 } else if (right->is_constant()) {
2166 // cpu register - constant
2167 #ifdef _LP64
2168 jlong c = right->as_constant_ptr()->as_jlong_bits();
2169 __ movptr(r10, (intptr_t) c);
2170 switch (code) {
2171 case lir_add:
2172 __ addptr(lreg_lo, r10);
2173 break;
2174 case lir_sub:
2175 __ subptr(lreg_lo, r10);
2176 break;
2177 default:
2178 ShouldNotReachHere();
2179 }
2180 #else
2181 jint c_lo = right->as_constant_ptr()->as_jint_lo();
2182 jint c_hi = right->as_constant_ptr()->as_jint_hi();
2183 switch (code) {
2184 case lir_add:
2185 __ addptr(lreg_lo, c_lo);
2186 __ adcl(lreg_hi, c_hi);
2187 break;
2188 case lir_sub:
2189 __ subptr(lreg_lo, c_lo);
2190 __ sbbl(lreg_hi, c_hi);
2191 break;
2192 default:
2193 ShouldNotReachHere();
2194 }
2195 #endif // _LP64
2197 } else {
2198 ShouldNotReachHere();
2199 }
2201 } else if (left->is_single_xmm()) {
2202 assert(left == dest, "left and dest must be equal");
2203 XMMRegister lreg = left->as_xmm_float_reg();
2205 if (right->is_single_xmm()) {
2206 XMMRegister rreg = right->as_xmm_float_reg();
2207 switch (code) {
2208 case lir_add: __ addss(lreg, rreg); break;
2209 case lir_sub: __ subss(lreg, rreg); break;
2210 case lir_mul_strictfp: // fall through
2211 case lir_mul: __ mulss(lreg, rreg); break;
2212 case lir_div_strictfp: // fall through
2213 case lir_div: __ divss(lreg, rreg); break;
2214 default: ShouldNotReachHere();
2215 }
2216 } else {
2217 Address raddr;
2218 if (right->is_single_stack()) {
2219 raddr = frame_map()->address_for_slot(right->single_stack_ix());
2220 } else if (right->is_constant()) {
2221 // hack for now
2222 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2223 } else {
2224 ShouldNotReachHere();
2225 }
2226 switch (code) {
2227 case lir_add: __ addss(lreg, raddr); break;
2228 case lir_sub: __ subss(lreg, raddr); break;
2229 case lir_mul_strictfp: // fall through
2230 case lir_mul: __ mulss(lreg, raddr); break;
2231 case lir_div_strictfp: // fall through
2232 case lir_div: __ divss(lreg, raddr); break;
2233 default: ShouldNotReachHere();
2234 }
2235 }
2237 } else if (left->is_double_xmm()) {
2238 assert(left == dest, "left and dest must be equal");
2240 XMMRegister lreg = left->as_xmm_double_reg();
2241 if (right->is_double_xmm()) {
2242 XMMRegister rreg = right->as_xmm_double_reg();
2243 switch (code) {
2244 case lir_add: __ addsd(lreg, rreg); break;
2245 case lir_sub: __ subsd(lreg, rreg); break;
2246 case lir_mul_strictfp: // fall through
2247 case lir_mul: __ mulsd(lreg, rreg); break;
2248 case lir_div_strictfp: // fall through
2249 case lir_div: __ divsd(lreg, rreg); break;
2250 default: ShouldNotReachHere();
2251 }
2252 } else {
2253 Address raddr;
2254 if (right->is_double_stack()) {
2255 raddr = frame_map()->address_for_slot(right->double_stack_ix());
2256 } else if (right->is_constant()) {
2257 // hack for now
2258 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2259 } else {
2260 ShouldNotReachHere();
2261 }
2262 switch (code) {
2263 case lir_add: __ addsd(lreg, raddr); break;
2264 case lir_sub: __ subsd(lreg, raddr); break;
2265 case lir_mul_strictfp: // fall through
2266 case lir_mul: __ mulsd(lreg, raddr); break;
2267 case lir_div_strictfp: // fall through
2268 case lir_div: __ divsd(lreg, raddr); break;
2269 default: ShouldNotReachHere();
2270 }
2271 }
2273 } else if (left->is_single_fpu()) {
2274 assert(dest->is_single_fpu(), "fpu stack allocation required");
2276 if (right->is_single_fpu()) {
2277 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2279 } else {
2280 assert(left->fpu_regnr() == 0, "left must be on TOS");
2281 assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2283 Address raddr;
2284 if (right->is_single_stack()) {
2285 raddr = frame_map()->address_for_slot(right->single_stack_ix());
2286 } else if (right->is_constant()) {
2287 address const_addr = float_constant(right->as_jfloat());
2288 assert(const_addr != NULL, "incorrect float/double constant maintainance");
2289 // hack for now
2290 raddr = __ as_Address(InternalAddress(const_addr));
2291 } else {
2292 ShouldNotReachHere();
2293 }
2295 switch (code) {
2296 case lir_add: __ fadd_s(raddr); break;
2297 case lir_sub: __ fsub_s(raddr); break;
2298 case lir_mul_strictfp: // fall through
2299 case lir_mul: __ fmul_s(raddr); break;
2300 case lir_div_strictfp: // fall through
2301 case lir_div: __ fdiv_s(raddr); break;
2302 default: ShouldNotReachHere();
2303 }
2304 }
2306 } else if (left->is_double_fpu()) {
2307 assert(dest->is_double_fpu(), "fpu stack allocation required");
2309 if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2310 // Double values require special handling for strictfp mul/div on x86
2311 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
2312 __ fmulp(left->fpu_regnrLo() + 1);
2313 }
2315 if (right->is_double_fpu()) {
2316 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2318 } else {
2319 assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2320 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2322 Address raddr;
2323 if (right->is_double_stack()) {
2324 raddr = frame_map()->address_for_slot(right->double_stack_ix());
2325 } else if (right->is_constant()) {
2326 // hack for now
2327 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2328 } else {
2329 ShouldNotReachHere();
2330 }
2332 switch (code) {
2333 case lir_add: __ fadd_d(raddr); break;
2334 case lir_sub: __ fsub_d(raddr); break;
2335 case lir_mul_strictfp: // fall through
2336 case lir_mul: __ fmul_d(raddr); break;
2337 case lir_div_strictfp: // fall through
2338 case lir_div: __ fdiv_d(raddr); break;
2339 default: ShouldNotReachHere();
2340 }
2341 }
2343 if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2344 // Double values require special handling for strictfp mul/div on x86
2345 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
2346 __ fmulp(dest->fpu_regnrLo() + 1);
2347 }
2349 } else if (left->is_single_stack() || left->is_address()) {
2350 assert(left == dest, "left and dest must be equal");
2352 Address laddr;
2353 if (left->is_single_stack()) {
2354 laddr = frame_map()->address_for_slot(left->single_stack_ix());
2355 } else if (left->is_address()) {
2356 laddr = as_Address(left->as_address_ptr());
2357 } else {
2358 ShouldNotReachHere();
2359 }
2361 if (right->is_single_cpu()) {
2362 Register rreg = right->as_register();
2363 switch (code) {
2364 case lir_add: __ addl(laddr, rreg); break;
2365 case lir_sub: __ subl(laddr, rreg); break;
2366 default: ShouldNotReachHere();
2367 }
2368 } else if (right->is_constant()) {
2369 jint c = right->as_constant_ptr()->as_jint();
2370 switch (code) {
2371 case lir_add: {
2372 __ incrementl(laddr, c);
2373 break;
2374 }
2375 case lir_sub: {
2376 __ decrementl(laddr, c);
2377 break;
2378 }
2379 default: ShouldNotReachHere();
2380 }
2381 } else {
2382 ShouldNotReachHere();
2383 }
2385 } else {
2386 ShouldNotReachHere();
2387 }
2388 }
2390 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2391 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");
2392 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2393 assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2395 bool left_is_tos = (left_index == 0);
2396 bool dest_is_tos = (dest_index == 0);
2397 int non_tos_index = (left_is_tos ? right_index : left_index);
2399 switch (code) {
2400 case lir_add:
2401 if (pop_fpu_stack) __ faddp(non_tos_index);
2402 else if (dest_is_tos) __ fadd (non_tos_index);
2403 else __ fadda(non_tos_index);
2404 break;
2406 case lir_sub:
2407 if (left_is_tos) {
2408 if (pop_fpu_stack) __ fsubrp(non_tos_index);
2409 else if (dest_is_tos) __ fsub (non_tos_index);
2410 else __ fsubra(non_tos_index);
2411 } else {
2412 if (pop_fpu_stack) __ fsubp (non_tos_index);
2413 else if (dest_is_tos) __ fsubr (non_tos_index);
2414 else __ fsuba (non_tos_index);
2415 }
2416 break;
2418 case lir_mul_strictfp: // fall through
2419 case lir_mul:
2420 if (pop_fpu_stack) __ fmulp(non_tos_index);
2421 else if (dest_is_tos) __ fmul (non_tos_index);
2422 else __ fmula(non_tos_index);
2423 break;
2425 case lir_div_strictfp: // fall through
2426 case lir_div:
2427 if (left_is_tos) {
2428 if (pop_fpu_stack) __ fdivrp(non_tos_index);
2429 else if (dest_is_tos) __ fdiv (non_tos_index);
2430 else __ fdivra(non_tos_index);
2431 } else {
2432 if (pop_fpu_stack) __ fdivp (non_tos_index);
2433 else if (dest_is_tos) __ fdivr (non_tos_index);
2434 else __ fdiva (non_tos_index);
2435 }
2436 break;
2438 case lir_rem:
2439 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2440 __ fremr(noreg);
2441 break;
2443 default:
2444 ShouldNotReachHere();
2445 }
2446 }
2449 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
2450 if (value->is_double_xmm()) {
2451 switch(code) {
2452 case lir_abs :
2453 {
2454 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2455 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2456 }
2457 __ andpd(dest->as_xmm_double_reg(),
2458 ExternalAddress((address)double_signmask_pool));
2459 }
2460 break;
2462 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2463 // all other intrinsics are not available in the SSE instruction set, so FPU is used
2464 default : ShouldNotReachHere();
2465 }
2467 } else if (value->is_double_fpu()) {
2468 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2469 switch(code) {
2470 case lir_log : __ flog() ; break;
2471 case lir_log10 : __ flog10() ; break;
2472 case lir_abs : __ fabs() ; break;
2473 case lir_sqrt : __ fsqrt(); break;
2474 case lir_sin :
2475 // Should consider not saving rbx, if not necessary
2476 __ trigfunc('s', op->as_Op2()->fpu_stack_size());
2477 break;
2478 case lir_cos :
2479 // Should consider not saving rbx, if not necessary
2480 assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
2481 __ trigfunc('c', op->as_Op2()->fpu_stack_size());
2482 break;
2483 case lir_tan :
2484 // Should consider not saving rbx, if not necessary
2485 __ trigfunc('t', op->as_Op2()->fpu_stack_size());
2486 break;
2487 case lir_exp :
2488 __ exp_with_fallback(op->as_Op2()->fpu_stack_size());
2489 break;
2490 case lir_pow :
2491 __ pow_with_fallback(op->as_Op2()->fpu_stack_size());
2492 break;
2493 default : ShouldNotReachHere();
2494 }
2495 } else {
2496 Unimplemented();
2497 }
2498 }
2500 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2501 // assert(left->destroys_register(), "check");
2502 if (left->is_single_cpu()) {
2503 Register reg = left->as_register();
2504 if (right->is_constant()) {
2505 int val = right->as_constant_ptr()->as_jint();
2506 switch (code) {
2507 case lir_logic_and: __ andl (reg, val); break;
2508 case lir_logic_or: __ orl (reg, val); break;
2509 case lir_logic_xor: __ xorl (reg, val); break;
2510 default: ShouldNotReachHere();
2511 }
2512 } else if (right->is_stack()) {
2513 // added support for stack operands
2514 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2515 switch (code) {
2516 case lir_logic_and: __ andl (reg, raddr); break;
2517 case lir_logic_or: __ orl (reg, raddr); break;
2518 case lir_logic_xor: __ xorl (reg, raddr); break;
2519 default: ShouldNotReachHere();
2520 }
2521 } else {
2522 Register rright = right->as_register();
2523 switch (code) {
2524 case lir_logic_and: __ andptr (reg, rright); break;
2525 case lir_logic_or : __ orptr (reg, rright); break;
2526 case lir_logic_xor: __ xorptr (reg, rright); break;
2527 default: ShouldNotReachHere();
2528 }
2529 }
2530 move_regs(reg, dst->as_register());
2531 } else {
2532 Register l_lo = left->as_register_lo();
2533 Register l_hi = left->as_register_hi();
2534 if (right->is_constant()) {
2535 #ifdef _LP64
2536 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2537 switch (code) {
2538 case lir_logic_and:
2539 __ andq(l_lo, rscratch1);
2540 break;
2541 case lir_logic_or:
2542 __ orq(l_lo, rscratch1);
2543 break;
2544 case lir_logic_xor:
2545 __ xorq(l_lo, rscratch1);
2546 break;
2547 default: ShouldNotReachHere();
2548 }
2549 #else
2550 int r_lo = right->as_constant_ptr()->as_jint_lo();
2551 int r_hi = right->as_constant_ptr()->as_jint_hi();
2552 switch (code) {
2553 case lir_logic_and:
2554 __ andl(l_lo, r_lo);
2555 __ andl(l_hi, r_hi);
2556 break;
2557 case lir_logic_or:
2558 __ orl(l_lo, r_lo);
2559 __ orl(l_hi, r_hi);
2560 break;
2561 case lir_logic_xor:
2562 __ xorl(l_lo, r_lo);
2563 __ xorl(l_hi, r_hi);
2564 break;
2565 default: ShouldNotReachHere();
2566 }
2567 #endif // _LP64
2568 } else {
2569 #ifdef _LP64
2570 Register r_lo;
2571 if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2572 r_lo = right->as_register();
2573 } else {
2574 r_lo = right->as_register_lo();
2575 }
2576 #else
2577 Register r_lo = right->as_register_lo();
2578 Register r_hi = right->as_register_hi();
2579 assert(l_lo != r_hi, "overwriting registers");
2580 #endif
2581 switch (code) {
2582 case lir_logic_and:
2583 __ andptr(l_lo, r_lo);
2584 NOT_LP64(__ andptr(l_hi, r_hi);)
2585 break;
2586 case lir_logic_or:
2587 __ orptr(l_lo, r_lo);
2588 NOT_LP64(__ orptr(l_hi, r_hi);)
2589 break;
2590 case lir_logic_xor:
2591 __ xorptr(l_lo, r_lo);
2592 NOT_LP64(__ xorptr(l_hi, r_hi);)
2593 break;
2594 default: ShouldNotReachHere();
2595 }
2596 }
2598 Register dst_lo = dst->as_register_lo();
2599 Register dst_hi = dst->as_register_hi();
2601 #ifdef _LP64
2602 move_regs(l_lo, dst_lo);
2603 #else
2604 if (dst_lo == l_hi) {
2605 assert(dst_hi != l_lo, "overwriting registers");
2606 move_regs(l_hi, dst_hi);
2607 move_regs(l_lo, dst_lo);
2608 } else {
2609 assert(dst_lo != l_hi, "overwriting registers");
2610 move_regs(l_lo, dst_lo);
2611 move_regs(l_hi, dst_hi);
2612 }
2613 #endif // _LP64
2614 }
2615 }
2618 // we assume that rax, and rdx can be overwritten
2619 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2621 assert(left->is_single_cpu(), "left must be register");
2622 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
2623 assert(result->is_single_cpu(), "result must be register");
2625 // assert(left->destroys_register(), "check");
2626 // assert(right->destroys_register(), "check");
2628 Register lreg = left->as_register();
2629 Register dreg = result->as_register();
2631 if (right->is_constant()) {
2632 int divisor = right->as_constant_ptr()->as_jint();
2633 assert(divisor > 0 && is_power_of_2(divisor), "must be");
2634 if (code == lir_idiv) {
2635 assert(lreg == rax, "must be rax,");
2636 assert(temp->as_register() == rdx, "tmp register must be rdx");
2637 __ cdql(); // sign extend into rdx:rax
2638 if (divisor == 2) {
2639 __ subl(lreg, rdx);
2640 } else {
2641 __ andl(rdx, divisor - 1);
2642 __ addl(lreg, rdx);
2643 }
2644 __ sarl(lreg, log2_intptr(divisor));
2645 move_regs(lreg, dreg);
2646 } else if (code == lir_irem) {
2647 Label done;
2648 __ mov(dreg, lreg);
2649 __ andl(dreg, 0x80000000 | (divisor - 1));
2650 __ jcc(Assembler::positive, done);
2651 __ decrement(dreg);
2652 __ orl(dreg, ~(divisor - 1));
2653 __ increment(dreg);
2654 __ bind(done);
2655 } else {
2656 ShouldNotReachHere();
2657 }
2658 } else {
2659 Register rreg = right->as_register();
2660 assert(lreg == rax, "left register must be rax,");
2661 assert(rreg != rdx, "right register must not be rdx");
2662 assert(temp->as_register() == rdx, "tmp register must be rdx");
2664 move_regs(lreg, rax);
2666 int idivl_offset = __ corrected_idivl(rreg);
2667 add_debug_info_for_div0(idivl_offset, info);
2668 if (code == lir_irem) {
2669 move_regs(rdx, dreg); // result is in rdx
2670 } else {
2671 move_regs(rax, dreg);
2672 }
2673 }
2674 }
2677 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2678 if (opr1->is_single_cpu()) {
2679 Register reg1 = opr1->as_register();
2680 if (opr2->is_single_cpu()) {
2681 // cpu register - cpu register
2682 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2683 __ cmpptr(reg1, opr2->as_register());
2684 } else {
2685 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2686 __ cmpl(reg1, opr2->as_register());
2687 }
2688 } else if (opr2->is_stack()) {
2689 // cpu register - stack
2690 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2691 __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2692 } else {
2693 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2694 }
2695 } else if (opr2->is_constant()) {
2696 // cpu register - constant
2697 LIR_Const* c = opr2->as_constant_ptr();
2698 if (c->type() == T_INT) {
2699 __ cmpl(reg1, c->as_jint());
2700 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2701 // In 64bit oops are single register
2702 jobject o = c->as_jobject();
2703 if (o == NULL) {
2704 __ cmpptr(reg1, (int32_t)NULL_WORD);
2705 } else {
2706 #ifdef _LP64
2707 __ movoop(rscratch1, o);
2708 __ cmpptr(reg1, rscratch1);
2709 #else
2710 __ cmpoop(reg1, c->as_jobject());
2711 #endif // _LP64
2712 }
2713 } else {
2714 fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
2715 }
2716 // cpu register - address
2717 } else if (opr2->is_address()) {
2718 if (op->info() != NULL) {
2719 add_debug_info_for_null_check_here(op->info());
2720 }
2721 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2722 } else {
2723 ShouldNotReachHere();
2724 }
2726 } else if(opr1->is_double_cpu()) {
2727 Register xlo = opr1->as_register_lo();
2728 Register xhi = opr1->as_register_hi();
2729 if (opr2->is_double_cpu()) {
2730 #ifdef _LP64
2731 __ cmpptr(xlo, opr2->as_register_lo());
2732 #else
2733 // cpu register - cpu register
2734 Register ylo = opr2->as_register_lo();
2735 Register yhi = opr2->as_register_hi();
2736 __ subl(xlo, ylo);
2737 __ sbbl(xhi, yhi);
2738 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2739 __ orl(xhi, xlo);
2740 }
2741 #endif // _LP64
2742 } else if (opr2->is_constant()) {
2743 // cpu register - constant 0
2744 assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2745 #ifdef _LP64
2746 __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2747 #else
2748 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2749 __ orl(xhi, xlo);
2750 #endif // _LP64
2751 } else {
2752 ShouldNotReachHere();
2753 }
2755 } else if (opr1->is_single_xmm()) {
2756 XMMRegister reg1 = opr1->as_xmm_float_reg();
2757 if (opr2->is_single_xmm()) {
2758 // xmm register - xmm register
2759 __ ucomiss(reg1, opr2->as_xmm_float_reg());
2760 } else if (opr2->is_stack()) {
2761 // xmm register - stack
2762 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2763 } else if (opr2->is_constant()) {
2764 // xmm register - constant
2765 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2766 } else if (opr2->is_address()) {
2767 // xmm register - address
2768 if (op->info() != NULL) {
2769 add_debug_info_for_null_check_here(op->info());
2770 }
2771 __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2772 } else {
2773 ShouldNotReachHere();
2774 }
2776 } else if (opr1->is_double_xmm()) {
2777 XMMRegister reg1 = opr1->as_xmm_double_reg();
2778 if (opr2->is_double_xmm()) {
2779 // xmm register - xmm register
2780 __ ucomisd(reg1, opr2->as_xmm_double_reg());
2781 } else if (opr2->is_stack()) {
2782 // xmm register - stack
2783 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2784 } else if (opr2->is_constant()) {
2785 // xmm register - constant
2786 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2787 } else if (opr2->is_address()) {
2788 // xmm register - address
2789 if (op->info() != NULL) {
2790 add_debug_info_for_null_check_here(op->info());
2791 }
2792 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2793 } else {
2794 ShouldNotReachHere();
2795 }
2797 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2798 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2799 assert(opr2->is_fpu_register(), "both must be registers");
2800 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2802 } else if (opr1->is_address() && opr2->is_constant()) {
2803 LIR_Const* c = opr2->as_constant_ptr();
2804 #ifdef _LP64
2805 if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2806 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2807 __ movoop(rscratch1, c->as_jobject());
2808 }
2809 #endif // LP64
2810 if (op->info() != NULL) {
2811 add_debug_info_for_null_check_here(op->info());
2812 }
2813 // special case: address - constant
2814 LIR_Address* addr = opr1->as_address_ptr();
2815 if (c->type() == T_INT) {
2816 __ cmpl(as_Address(addr), c->as_jint());
2817 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2818 #ifdef _LP64
2819 // %%% Make this explode if addr isn't reachable until we figure out a
2820 // better strategy by giving noreg as the temp for as_Address
2821 __ cmpptr(rscratch1, as_Address(addr, noreg));
2822 #else
2823 __ cmpoop(as_Address(addr), c->as_jobject());
2824 #endif // _LP64
2825 } else {
2826 ShouldNotReachHere();
2827 }
2829 } else {
2830 ShouldNotReachHere();
2831 }
2832 }
2834 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2835 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2836 if (left->is_single_xmm()) {
2837 assert(right->is_single_xmm(), "must match");
2838 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2839 } else if (left->is_double_xmm()) {
2840 assert(right->is_double_xmm(), "must match");
2841 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2843 } else {
2844 assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2845 assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2847 assert(left->fpu() == 0, "left must be on TOS");
2848 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2849 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2850 }
2851 } else {
2852 assert(code == lir_cmp_l2i, "check");
2853 #ifdef _LP64
2854 Label done;
2855 Register dest = dst->as_register();
2856 __ cmpptr(left->as_register_lo(), right->as_register_lo());
2857 __ movl(dest, -1);
2858 __ jccb(Assembler::less, done);
2859 __ set_byte_if_not_zero(dest);
2860 __ movzbl(dest, dest);
2861 __ bind(done);
2862 #else
2863 __ lcmp2int(left->as_register_hi(),
2864 left->as_register_lo(),
2865 right->as_register_hi(),
2866 right->as_register_lo());
2867 move_regs(left->as_register_hi(), dst->as_register());
2868 #endif // _LP64
2869 }
2870 }
2873 void LIR_Assembler::align_call(LIR_Code code) {
2874 if (os::is_MP()) {
2875 // make sure that the displacement word of the call ends up word aligned
2876 int offset = __ offset();
2877 switch (code) {
2878 case lir_static_call:
2879 case lir_optvirtual_call:
2880 case lir_dynamic_call:
2881 offset += NativeCall::displacement_offset;
2882 break;
2883 case lir_icvirtual_call:
2884 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2885 break;
2886 case lir_virtual_call: // currently, sparc-specific for niagara
2887 default: ShouldNotReachHere();
2888 }
2889 while (offset++ % BytesPerWord != 0) {
2890 __ nop();
2891 }
2892 }
2893 }
2896 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2897 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2898 "must be aligned");
2899 __ call(AddressLiteral(op->addr(), rtype));
2900 add_call_info(code_offset(), op->info());
2901 }
2904 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2905 __ ic_call(op->addr());
2906 add_call_info(code_offset(), op->info());
2907 assert(!os::is_MP() ||
2908 (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2909 "must be aligned");
2910 }
2913 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2914 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2915 ShouldNotReachHere();
2916 }
2919 void LIR_Assembler::emit_static_call_stub() {
2920 address call_pc = __ pc();
2921 address stub = __ start_a_stub(call_stub_size);
2922 if (stub == NULL) {
2923 bailout("static call stub overflow");
2924 return;
2925 }
2927 int start = __ offset();
2928 if (os::is_MP()) {
2929 // make sure that the displacement word of the call ends up word aligned
2930 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
2931 while (offset++ % BytesPerWord != 0) {
2932 __ nop();
2933 }
2934 }
2935 __ relocate(static_stub_Relocation::spec(call_pc));
2936 __ mov_metadata(rbx, (Metadata*)NULL);
2937 // must be set to -1 at code generation time
2938 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2939 // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2940 __ jump(RuntimeAddress(__ pc()));
2942 assert(__ offset() - start <= call_stub_size, "stub too big");
2943 __ end_a_stub();
2944 }
2947 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2948 assert(exceptionOop->as_register() == rax, "must match");
2949 assert(exceptionPC->as_register() == rdx, "must match");
2951 // exception object is not added to oop map by LinearScan
2952 // (LinearScan assumes that no oops are in fixed registers)
2953 info->add_register_oop(exceptionOop);
2954 Runtime1::StubID unwind_id;
2956 // get current pc information
2957 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2958 int pc_for_athrow_offset = __ offset();
2959 InternalAddress pc_for_athrow(__ pc());
2960 __ lea(exceptionPC->as_register(), pc_for_athrow);
2961 add_call_info(pc_for_athrow_offset, info); // for exception handler
2963 __ verify_not_null_oop(rax);
2964 // search an exception handler (rax: exception oop, rdx: throwing pc)
2965 if (compilation()->has_fpu_code()) {
2966 unwind_id = Runtime1::handle_exception_id;
2967 } else {
2968 unwind_id = Runtime1::handle_exception_nofpu_id;
2969 }
2970 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2972 // enough room for two byte trap
2973 __ nop();
2974 }
2977 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2978 assert(exceptionOop->as_register() == rax, "must match");
2980 __ jmp(_unwind_handler_entry);
2981 }
2984 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2986 // optimized version for linear scan:
2987 // * count must be already in ECX (guaranteed by LinearScan)
2988 // * left and dest must be equal
2989 // * tmp must be unused
2990 assert(count->as_register() == SHIFT_count, "count must be in ECX");
2991 assert(left == dest, "left and dest must be equal");
2992 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2994 if (left->is_single_cpu()) {
2995 Register value = left->as_register();
2996 assert(value != SHIFT_count, "left cannot be ECX");
2998 switch (code) {
2999 case lir_shl: __ shll(value); break;
3000 case lir_shr: __ sarl(value); break;
3001 case lir_ushr: __ shrl(value); break;
3002 default: ShouldNotReachHere();
3003 }
3004 } else if (left->is_double_cpu()) {
3005 Register lo = left->as_register_lo();
3006 Register hi = left->as_register_hi();
3007 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
3008 #ifdef _LP64
3009 switch (code) {
3010 case lir_shl: __ shlptr(lo); break;
3011 case lir_shr: __ sarptr(lo); break;
3012 case lir_ushr: __ shrptr(lo); break;
3013 default: ShouldNotReachHere();
3014 }
3015 #else
3017 switch (code) {
3018 case lir_shl: __ lshl(hi, lo); break;
3019 case lir_shr: __ lshr(hi, lo, true); break;
3020 case lir_ushr: __ lshr(hi, lo, false); break;
3021 default: ShouldNotReachHere();
3022 }
3023 #endif // LP64
3024 } else {
3025 ShouldNotReachHere();
3026 }
3027 }
3030 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
3031 if (dest->is_single_cpu()) {
3032 // first move left into dest so that left is not destroyed by the shift
3033 Register value = dest->as_register();
3034 count = count & 0x1F; // Java spec
3036 move_regs(left->as_register(), value);
3037 switch (code) {
3038 case lir_shl: __ shll(value, count); break;
3039 case lir_shr: __ sarl(value, count); break;
3040 case lir_ushr: __ shrl(value, count); break;
3041 default: ShouldNotReachHere();
3042 }
3043 } else if (dest->is_double_cpu()) {
3044 #ifndef _LP64
3045 Unimplemented();
3046 #else
3047 // first move left into dest so that left is not destroyed by the shift
3048 Register value = dest->as_register_lo();
3049 count = count & 0x1F; // Java spec
3051 move_regs(left->as_register_lo(), value);
3052 switch (code) {
3053 case lir_shl: __ shlptr(value, count); break;
3054 case lir_shr: __ sarptr(value, count); break;
3055 case lir_ushr: __ shrptr(value, count); break;
3056 default: ShouldNotReachHere();
3057 }
3058 #endif // _LP64
3059 } else {
3060 ShouldNotReachHere();
3061 }
3062 }
3065 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
3066 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3067 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3068 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3069 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
3070 }
3073 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
3074 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3075 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3076 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3077 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3078 }
3081 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3082 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3083 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3084 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3085 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3086 }
3089 // This code replaces a call to arraycopy; no exception may
3090 // be thrown in this code, they must be thrown in the System.arraycopy
3091 // activation frame; we could save some checks if this would not be the case
3092 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3093 ciArrayKlass* default_type = op->expected_type();
3094 Register src = op->src()->as_register();
3095 Register dst = op->dst()->as_register();
3096 Register src_pos = op->src_pos()->as_register();
3097 Register dst_pos = op->dst_pos()->as_register();
3098 Register length = op->length()->as_register();
3099 Register tmp = op->tmp()->as_register();
3101 CodeStub* stub = op->stub();
3102 int flags = op->flags();
3103 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3104 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
3106 // if we don't know anything, just go through the generic arraycopy
3107 if (default_type == NULL) {
3108 Label done;
3109 // save outgoing arguments on stack in case call to System.arraycopy is needed
3110 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3111 // for interpreter calling conventions. Now we have to do it in new style conventions.
3112 // For the moment until C1 gets the new register allocator I just force all the
3113 // args to the right place (except the register args) and then on the back side
3114 // reload the register args properly if we go slow path. Yuck
3116 // These are proper for the calling convention
3117 store_parameter(length, 2);
3118 store_parameter(dst_pos, 1);
3119 store_parameter(dst, 0);
3121 // these are just temporary placements until we need to reload
3122 store_parameter(src_pos, 3);
3123 store_parameter(src, 4);
3124 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3126 address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
3128 address copyfunc_addr = StubRoutines::generic_arraycopy();
3130 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
3131 #ifdef _LP64
3132 // The arguments are in java calling convention so we can trivially shift them to C
3133 // convention
3134 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3135 __ mov(c_rarg0, j_rarg0);
3136 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3137 __ mov(c_rarg1, j_rarg1);
3138 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3139 __ mov(c_rarg2, j_rarg2);
3140 assert_different_registers(c_rarg3, j_rarg4);
3141 __ mov(c_rarg3, j_rarg3);
3142 #ifdef _WIN64
3143 // Allocate abi space for args but be sure to keep stack aligned
3144 __ subptr(rsp, 6*wordSize);
3145 store_parameter(j_rarg4, 4);
3146 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3147 __ call(RuntimeAddress(C_entry));
3148 } else {
3149 #ifndef PRODUCT
3150 if (PrintC1Statistics) {
3151 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3152 }
3153 #endif
3154 __ call(RuntimeAddress(copyfunc_addr));
3155 }
3156 __ addptr(rsp, 6*wordSize);
3157 #else
3158 __ mov(c_rarg4, j_rarg4);
3159 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3160 __ call(RuntimeAddress(C_entry));
3161 } else {
3162 #ifndef PRODUCT
3163 if (PrintC1Statistics) {
3164 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3165 }
3166 #endif
3167 __ call(RuntimeAddress(copyfunc_addr));
3168 }
3169 #endif // _WIN64
3170 #else
3171 __ push(length);
3172 __ push(dst_pos);
3173 __ push(dst);
3174 __ push(src_pos);
3175 __ push(src);
3177 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3178 __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack
3179 } else {
3180 #ifndef PRODUCT
3181 if (PrintC1Statistics) {
3182 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3183 }
3184 #endif
3185 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
3186 }
3188 #endif // _LP64
3190 __ cmpl(rax, 0);
3191 __ jcc(Assembler::equal, *stub->continuation());
3193 if (copyfunc_addr != NULL) {
3194 __ mov(tmp, rax);
3195 __ xorl(tmp, -1);
3196 }
3198 // Reload values from the stack so they are where the stub
3199 // expects them.
3200 __ movptr (dst, Address(rsp, 0*BytesPerWord));
3201 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3202 __ movptr (length, Address(rsp, 2*BytesPerWord));
3203 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3204 __ movptr (src, Address(rsp, 4*BytesPerWord));
3206 if (copyfunc_addr != NULL) {
3207 __ subl(length, tmp);
3208 __ addl(src_pos, tmp);
3209 __ addl(dst_pos, tmp);
3210 }
3211 __ jmp(*stub->entry());
3213 __ bind(*stub->continuation());
3214 return;
3215 }
3217 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3219 int elem_size = type2aelembytes(basic_type);
3220 int shift_amount;
3221 Address::ScaleFactor scale;
3223 switch (elem_size) {
3224 case 1 :
3225 shift_amount = 0;
3226 scale = Address::times_1;
3227 break;
3228 case 2 :
3229 shift_amount = 1;
3230 scale = Address::times_2;
3231 break;
3232 case 4 :
3233 shift_amount = 2;
3234 scale = Address::times_4;
3235 break;
3236 case 8 :
3237 shift_amount = 3;
3238 scale = Address::times_8;
3239 break;
3240 default:
3241 ShouldNotReachHere();
3242 }
3244 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3245 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3246 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3247 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3249 // length and pos's are all sign extended at this point on 64bit
3251 // test for NULL
3252 if (flags & LIR_OpArrayCopy::src_null_check) {
3253 __ testptr(src, src);
3254 __ jcc(Assembler::zero, *stub->entry());
3255 }
3256 if (flags & LIR_OpArrayCopy::dst_null_check) {
3257 __ testptr(dst, dst);
3258 __ jcc(Assembler::zero, *stub->entry());
3259 }
3261 // check if negative
3262 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3263 __ testl(src_pos, src_pos);
3264 __ jcc(Assembler::less, *stub->entry());
3265 }
3266 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3267 __ testl(dst_pos, dst_pos);
3268 __ jcc(Assembler::less, *stub->entry());
3269 }
3271 if (flags & LIR_OpArrayCopy::src_range_check) {
3272 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3273 __ cmpl(tmp, src_length_addr);
3274 __ jcc(Assembler::above, *stub->entry());
3275 }
3276 if (flags & LIR_OpArrayCopy::dst_range_check) {
3277 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3278 __ cmpl(tmp, dst_length_addr);
3279 __ jcc(Assembler::above, *stub->entry());
3280 }
3282 if (flags & LIR_OpArrayCopy::length_positive_check) {
3283 __ testl(length, length);
3284 __ jcc(Assembler::less, *stub->entry());
3285 __ jcc(Assembler::zero, *stub->continuation());
3286 }
3288 #ifdef _LP64
3289 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3290 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3291 #endif
3293 if (flags & LIR_OpArrayCopy::type_check) {
3294 // We don't know the array types are compatible
3295 if (basic_type != T_OBJECT) {
3296 // Simple test for basic type arrays
3297 if (UseCompressedKlassPointers) {
3298 __ movl(tmp, src_klass_addr);
3299 __ cmpl(tmp, dst_klass_addr);
3300 } else {
3301 __ movptr(tmp, src_klass_addr);
3302 __ cmpptr(tmp, dst_klass_addr);
3303 }
3304 __ jcc(Assembler::notEqual, *stub->entry());
3305 } else {
3306 // For object arrays, if src is a sub class of dst then we can
3307 // safely do the copy.
3308 Label cont, slow;
3310 __ push(src);
3311 __ push(dst);
3313 __ load_klass(src, src);
3314 __ load_klass(dst, dst);
3316 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3318 __ push(src);
3319 __ push(dst);
3320 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3321 __ pop(dst);
3322 __ pop(src);
3324 __ cmpl(src, 0);
3325 __ jcc(Assembler::notEqual, cont);
3327 __ bind(slow);
3328 __ pop(dst);
3329 __ pop(src);
3331 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3332 if (copyfunc_addr != NULL) { // use stub if available
3333 // src is not a sub class of dst so we have to do a
3334 // per-element check.
3336 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3337 if ((flags & mask) != mask) {
3338 // Check that at least both of them object arrays.
3339 assert(flags & mask, "one of the two should be known to be an object array");
3341 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3342 __ load_klass(tmp, src);
3343 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3344 __ load_klass(tmp, dst);
3345 }
3346 int lh_offset = in_bytes(Klass::layout_helper_offset());
3347 Address klass_lh_addr(tmp, lh_offset);
3348 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3349 __ cmpl(klass_lh_addr, objArray_lh);
3350 __ jcc(Assembler::notEqual, *stub->entry());
3351 }
3353 // Spill because stubs can use any register they like and it's
3354 // easier to restore just those that we care about.
3355 store_parameter(dst, 0);
3356 store_parameter(dst_pos, 1);
3357 store_parameter(length, 2);
3358 store_parameter(src_pos, 3);
3359 store_parameter(src, 4);
3361 #ifndef _LP64
3362 __ movptr(tmp, dst_klass_addr);
3363 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3364 __ push(tmp);
3365 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3366 __ push(tmp);
3367 __ push(length);
3368 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3369 __ push(tmp);
3370 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3371 __ push(tmp);
3373 __ call_VM_leaf(copyfunc_addr, 5);
3374 #else
3375 __ movl2ptr(length, length); //higher 32bits must be null
3377 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3378 assert_different_registers(c_rarg0, dst, dst_pos, length);
3379 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3380 assert_different_registers(c_rarg1, dst, length);
3382 __ mov(c_rarg2, length);
3383 assert_different_registers(c_rarg2, dst);
3385 #ifdef _WIN64
3386 // Allocate abi space for args but be sure to keep stack aligned
3387 __ subptr(rsp, 6*wordSize);
3388 __ load_klass(c_rarg3, dst);
3389 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3390 store_parameter(c_rarg3, 4);
3391 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3392 __ call(RuntimeAddress(copyfunc_addr));
3393 __ addptr(rsp, 6*wordSize);
3394 #else
3395 __ load_klass(c_rarg4, dst);
3396 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3397 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3398 __ call(RuntimeAddress(copyfunc_addr));
3399 #endif
3401 #endif
3403 #ifndef PRODUCT
3404 if (PrintC1Statistics) {
3405 Label failed;
3406 __ testl(rax, rax);
3407 __ jcc(Assembler::notZero, failed);
3408 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
3409 __ bind(failed);
3410 }
3411 #endif
3413 __ testl(rax, rax);
3414 __ jcc(Assembler::zero, *stub->continuation());
3416 #ifndef PRODUCT
3417 if (PrintC1Statistics) {
3418 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
3419 }
3420 #endif
3422 __ mov(tmp, rax);
3424 __ xorl(tmp, -1);
3426 // Restore previously spilled arguments
3427 __ movptr (dst, Address(rsp, 0*BytesPerWord));
3428 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3429 __ movptr (length, Address(rsp, 2*BytesPerWord));
3430 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3431 __ movptr (src, Address(rsp, 4*BytesPerWord));
3434 __ subl(length, tmp);
3435 __ addl(src_pos, tmp);
3436 __ addl(dst_pos, tmp);
3437 }
3439 __ jmp(*stub->entry());
3441 __ bind(cont);
3442 __ pop(dst);
3443 __ pop(src);
3444 }
3445 }
3447 #ifdef ASSERT
3448 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3449 // Sanity check the known type with the incoming class. For the
3450 // primitive case the types must match exactly with src.klass and
3451 // dst.klass each exactly matching the default type. For the
3452 // object array case, if no type check is needed then either the
3453 // dst type is exactly the expected type and the src type is a
3454 // subtype which we can't check or src is the same array as dst
3455 // but not necessarily exactly of type default_type.
3456 Label known_ok, halt;
3457 __ mov_metadata(tmp, default_type->constant_encoding());
3458 #ifdef _LP64
3459 if (UseCompressedKlassPointers) {
3460 __ encode_klass_not_null(tmp);
3461 }
3462 #endif
3464 if (basic_type != T_OBJECT) {
3466 if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
3467 else __ cmpptr(tmp, dst_klass_addr);
3468 __ jcc(Assembler::notEqual, halt);
3469 if (UseCompressedKlassPointers) __ cmpl(tmp, src_klass_addr);
3470 else __ cmpptr(tmp, src_klass_addr);
3471 __ jcc(Assembler::equal, known_ok);
3472 } else {
3473 if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
3474 else __ cmpptr(tmp, dst_klass_addr);
3475 __ jcc(Assembler::equal, known_ok);
3476 __ cmpptr(src, dst);
3477 __ jcc(Assembler::equal, known_ok);
3478 }
3479 __ bind(halt);
3480 __ stop("incorrect type information in arraycopy");
3481 __ bind(known_ok);
3482 }
3483 #endif
3485 #ifndef PRODUCT
3486 if (PrintC1Statistics) {
3487 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3488 }
3489 #endif
3491 #ifdef _LP64
3492 assert_different_registers(c_rarg0, dst, dst_pos, length);
3493 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3494 assert_different_registers(c_rarg1, length);
3495 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3496 __ mov(c_rarg2, length);
3498 #else
3499 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3500 store_parameter(tmp, 0);
3501 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3502 store_parameter(tmp, 1);
3503 store_parameter(length, 2);
3504 #endif // _LP64
3506 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3507 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3508 const char *name;
3509 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3510 __ call_VM_leaf(entry, 0);
3512 __ bind(*stub->continuation());
3513 }
3515 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3516 assert(op->crc()->is_single_cpu(), "crc must be register");
3517 assert(op->val()->is_single_cpu(), "byte value must be register");
3518 assert(op->result_opr()->is_single_cpu(), "result must be register");
3519 Register crc = op->crc()->as_register();
3520 Register val = op->val()->as_register();
3521 Register res = op->result_opr()->as_register();
3523 assert_different_registers(val, crc, res);
3525 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3526 __ notl(crc); // ~crc
3527 __ update_byte_crc32(crc, val, res);
3528 __ notl(crc); // ~crc
3529 __ mov(res, crc);
3530 }
3532 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3533 Register obj = op->obj_opr()->as_register(); // may not be an oop
3534 Register hdr = op->hdr_opr()->as_register();
3535 Register lock = op->lock_opr()->as_register();
3536 if (!UseFastLocking) {
3537 __ jmp(*op->stub()->entry());
3538 } else if (op->code() == lir_lock) {
3539 Register scratch = noreg;
3540 if (UseBiasedLocking) {
3541 scratch = op->scratch_opr()->as_register();
3542 }
3543 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3544 // add debug info for NullPointerException only if one is possible
3545 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3546 if (op->info() != NULL) {
3547 add_debug_info_for_null_check(null_check_offset, op->info());
3548 }
3549 // done
3550 } else if (op->code() == lir_unlock) {
3551 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3552 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3553 } else {
3554 Unimplemented();
3555 }
3556 __ bind(*op->stub()->continuation());
3557 }
3560 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3561 ciMethod* method = op->profiled_method();
3562 int bci = op->profiled_bci();
3563 ciMethod* callee = op->profiled_callee();
3565 // Update counter for all call types
3566 ciMethodData* md = method->method_data_or_null();
3567 assert(md != NULL, "Sanity");
3568 ciProfileData* data = md->bci_to_data(bci);
3569 assert(data->is_CounterData(), "need CounterData for calls");
3570 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3571 Register mdo = op->mdo()->as_register();
3572 __ mov_metadata(mdo, md->constant_encoding());
3573 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3574 Bytecodes::Code bc = method->java_code_at_bci(bci);
3575 const bool callee_is_static = callee->is_loaded() && callee->is_static();
3576 // Perform additional virtual call profiling for invokevirtual and
3577 // invokeinterface bytecodes
3578 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3579 !callee_is_static && // required for optimized MH invokes
3580 C1ProfileVirtualCalls) {
3581 assert(op->recv()->is_single_cpu(), "recv must be allocated");
3582 Register recv = op->recv()->as_register();
3583 assert_different_registers(mdo, recv);
3584 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3585 ciKlass* known_klass = op->known_holder();
3586 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3587 // We know the type that will be seen at this call site; we can
3588 // statically update the MethodData* rather than needing to do
3589 // dynamic tests on the receiver type
3591 // NOTE: we should probably put a lock around this search to
3592 // avoid collisions by concurrent compilations
3593 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3594 uint i;
3595 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3596 ciKlass* receiver = vc_data->receiver(i);
3597 if (known_klass->equals(receiver)) {
3598 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3599 __ addptr(data_addr, DataLayout::counter_increment);
3600 return;
3601 }
3602 }
3604 // Receiver type not found in profile data; select an empty slot
3606 // Note that this is less efficient than it should be because it
3607 // always does a write to the receiver part of the
3608 // VirtualCallData rather than just the first time
3609 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3610 ciKlass* receiver = vc_data->receiver(i);
3611 if (receiver == NULL) {
3612 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3613 __ mov_metadata(recv_addr, known_klass->constant_encoding());
3614 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3615 __ addptr(data_addr, DataLayout::counter_increment);
3616 return;
3617 }
3618 }
3619 } else {
3620 __ load_klass(recv, recv);
3621 Label update_done;
3622 type_profile_helper(mdo, md, data, recv, &update_done);
3623 // Receiver did not match any saved receiver and there is no empty row for it.
3624 // Increment total counter to indicate polymorphic case.
3625 __ addptr(counter_addr, DataLayout::counter_increment);
3627 __ bind(update_done);
3628 }
3629 } else {
3630 // Static call
3631 __ addptr(counter_addr, DataLayout::counter_increment);
3632 }
3633 }
3635 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3636 Unimplemented();
3637 }
3640 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3641 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3642 }
3645 void LIR_Assembler::align_backward_branch_target() {
3646 __ align(BytesPerWord);
3647 }
3650 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3651 if (left->is_single_cpu()) {
3652 __ negl(left->as_register());
3653 move_regs(left->as_register(), dest->as_register());
3655 } else if (left->is_double_cpu()) {
3656 Register lo = left->as_register_lo();
3657 #ifdef _LP64
3658 Register dst = dest->as_register_lo();
3659 __ movptr(dst, lo);
3660 __ negptr(dst);
3661 #else
3662 Register hi = left->as_register_hi();
3663 __ lneg(hi, lo);
3664 if (dest->as_register_lo() == hi) {
3665 assert(dest->as_register_hi() != lo, "destroying register");
3666 move_regs(hi, dest->as_register_hi());
3667 move_regs(lo, dest->as_register_lo());
3668 } else {
3669 move_regs(lo, dest->as_register_lo());
3670 move_regs(hi, dest->as_register_hi());
3671 }
3672 #endif // _LP64
3674 } else if (dest->is_single_xmm()) {
3675 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3676 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3677 }
3678 __ xorps(dest->as_xmm_float_reg(),
3679 ExternalAddress((address)float_signflip_pool));
3681 } else if (dest->is_double_xmm()) {
3682 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3683 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3684 }
3685 __ xorpd(dest->as_xmm_double_reg(),
3686 ExternalAddress((address)double_signflip_pool));
3688 } else if (left->is_single_fpu() || left->is_double_fpu()) {
3689 assert(left->fpu() == 0, "arg must be on TOS");
3690 assert(dest->fpu() == 0, "dest must be TOS");
3691 __ fchs();
3693 } else {
3694 ShouldNotReachHere();
3695 }
3696 }
3699 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
3700 assert(addr->is_address() && dest->is_register(), "check");
3701 Register reg;
3702 reg = dest->as_pointer_register();
3703 __ lea(reg, as_Address(addr->as_address_ptr()));
3704 }
3708 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3709 assert(!tmp->is_valid(), "don't need temporary");
3710 __ call(RuntimeAddress(dest));
3711 if (info != NULL) {
3712 add_call_info_here(info);
3713 }
3714 }
3717 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3718 assert(type == T_LONG, "only for volatile long fields");
3720 if (info != NULL) {
3721 add_debug_info_for_null_check_here(info);
3722 }
3724 if (src->is_double_xmm()) {
3725 if (dest->is_double_cpu()) {
3726 #ifdef _LP64
3727 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3728 #else
3729 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3730 __ psrlq(src->as_xmm_double_reg(), 32);
3731 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3732 #endif // _LP64
3733 } else if (dest->is_double_stack()) {
3734 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3735 } else if (dest->is_address()) {
3736 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3737 } else {
3738 ShouldNotReachHere();
3739 }
3741 } else if (dest->is_double_xmm()) {
3742 if (src->is_double_stack()) {
3743 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3744 } else if (src->is_address()) {
3745 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3746 } else {
3747 ShouldNotReachHere();
3748 }
3750 } else if (src->is_double_fpu()) {
3751 assert(src->fpu_regnrLo() == 0, "must be TOS");
3752 if (dest->is_double_stack()) {
3753 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3754 } else if (dest->is_address()) {
3755 __ fistp_d(as_Address(dest->as_address_ptr()));
3756 } else {
3757 ShouldNotReachHere();
3758 }
3760 } else if (dest->is_double_fpu()) {
3761 assert(dest->fpu_regnrLo() == 0, "must be TOS");
3762 if (src->is_double_stack()) {
3763 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3764 } else if (src->is_address()) {
3765 __ fild_d(as_Address(src->as_address_ptr()));
3766 } else {
3767 ShouldNotReachHere();
3768 }
3769 } else {
3770 ShouldNotReachHere();
3771 }
3772 }
3774 #ifdef ASSERT
3775 // emit run-time assertion
3776 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3777 assert(op->code() == lir_assert, "must be");
3779 if (op->in_opr1()->is_valid()) {
3780 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3781 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3782 } else {
3783 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3784 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3785 }
3787 Label ok;
3788 if (op->condition() != lir_cond_always) {
3789 Assembler::Condition acond = Assembler::zero;
3790 switch (op->condition()) {
3791 case lir_cond_equal: acond = Assembler::equal; break;
3792 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3793 case lir_cond_less: acond = Assembler::less; break;
3794 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3795 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3796 case lir_cond_greater: acond = Assembler::greater; break;
3797 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3798 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3799 default: ShouldNotReachHere();
3800 }
3801 __ jcc(acond, ok);
3802 }
3803 if (op->halt()) {
3804 const char* str = __ code_string(op->msg());
3805 __ stop(str);
3806 } else {
3807 breakpoint();
3808 }
3809 __ bind(ok);
3810 }
3811 #endif
3813 void LIR_Assembler::membar() {
3814 // QQQ sparc TSO uses this,
3815 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3816 }
3818 void LIR_Assembler::membar_acquire() {
3819 // No x86 machines currently require load fences
3820 // __ load_fence();
3821 }
3823 void LIR_Assembler::membar_release() {
3824 // No x86 machines currently require store fences
3825 // __ store_fence();
3826 }
3828 void LIR_Assembler::membar_loadload() {
3829 // no-op
3830 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3831 }
3833 void LIR_Assembler::membar_storestore() {
3834 // no-op
3835 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3836 }
3838 void LIR_Assembler::membar_loadstore() {
3839 // no-op
3840 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3841 }
3843 void LIR_Assembler::membar_storeload() {
3844 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3845 }
3847 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3848 assert(result_reg->is_register(), "check");
3849 #ifdef _LP64
3850 // __ get_thread(result_reg->as_register_lo());
3851 __ mov(result_reg->as_register(), r15_thread);
3852 #else
3853 __ get_thread(result_reg->as_register());
3854 #endif // _LP64
3855 }
3858 void LIR_Assembler::peephole(LIR_List*) {
3859 // do nothing for now
3860 }
3862 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3863 assert(data == dest, "xchg/xadd uses only 2 operands");
3865 if (data->type() == T_INT) {
3866 if (code == lir_xadd) {
3867 if (os::is_MP()) {
3868 __ lock();
3869 }
3870 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3871 } else {
3872 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3873 }
3874 } else if (data->is_oop()) {
3875 assert (code == lir_xchg, "xadd for oops");
3876 Register obj = data->as_register();
3877 #ifdef _LP64
3878 if (UseCompressedOops) {
3879 __ encode_heap_oop(obj);
3880 __ xchgl(obj, as_Address(src->as_address_ptr()));
3881 __ decode_heap_oop(obj);
3882 } else {
3883 __ xchgptr(obj, as_Address(src->as_address_ptr()));
3884 }
3885 #else
3886 __ xchgl(obj, as_Address(src->as_address_ptr()));
3887 #endif
3888 } else if (data->type() == T_LONG) {
3889 #ifdef _LP64
3890 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3891 if (code == lir_xadd) {
3892 if (os::is_MP()) {
3893 __ lock();
3894 }
3895 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3896 } else {
3897 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3898 }
3899 #else
3900 ShouldNotReachHere();
3901 #endif
3902 } else {
3903 ShouldNotReachHere();
3904 }
3905 }
3907 #undef __