|
1 /* |
|
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "asm/macroAssembler.hpp" |
|
27 #include "asm/macroAssembler.inline.hpp" |
|
28 #include "c1/c1_Compilation.hpp" |
|
29 #include "c1/c1_LIRAssembler.hpp" |
|
30 #include "c1/c1_MacroAssembler.hpp" |
|
31 #include "c1/c1_Runtime1.hpp" |
|
32 #include "c1/c1_ValueStack.hpp" |
|
33 #include "ci/ciArrayKlass.hpp" |
|
34 #include "ci/ciInstance.hpp" |
|
35 #include "gc_interface/collectedHeap.hpp" |
|
36 #include "memory/barrierSet.hpp" |
|
37 #include "memory/cardTableModRefBS.hpp" |
|
38 #include "nativeInst_x86.hpp" |
|
39 #include "oops/objArrayKlass.hpp" |
|
40 #include "runtime/sharedRuntime.hpp" |
|
41 #include "vmreg_x86.inline.hpp" |
|
42 |
|
43 |
|
44 // These masks are used to provide 128-bit aligned bitmasks to the XMM |
|
45 // instructions, to allow sign-masking or sign-bit flipping. They allow |
|
46 // fast versions of NegF/NegD and AbsF/AbsD. |
|
47 |
|
48 // Note: 'double' and 'long long' have 32-bits alignment on x86. |
|
49 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { |
|
50 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address |
|
51 // of 128-bits operands for SSE instructions. |
|
52 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); |
|
53 // Store the value to a 128-bits operand. |
|
54 operand[0] = lo; |
|
55 operand[1] = hi; |
|
56 return operand; |
|
57 } |
|
58 |
|
59 // Buffer for 128-bits masks used by SSE instructions. |
|
60 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) |
|
61 |
|
62 // Static initialization during VM startup. |
|
63 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); |
|
64 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); |
|
65 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); |
|
66 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); |
|
67 |
|
68 |
|
69 |
|
70 NEEDS_CLEANUP // remove this definitions ? |
|
71 const Register IC_Klass = rax; // where the IC klass is cached |
|
72 const Register SYNC_header = rax; // synchronization header |
|
73 const Register SHIFT_count = rcx; // where count for shift operations must be |
|
74 |
|
75 #define __ _masm-> |
|
76 |
|
77 |
|
78 static void select_different_registers(Register preserve, |
|
79 Register extra, |
|
80 Register &tmp1, |
|
81 Register &tmp2) { |
|
82 if (tmp1 == preserve) { |
|
83 assert_different_registers(tmp1, tmp2, extra); |
|
84 tmp1 = extra; |
|
85 } else if (tmp2 == preserve) { |
|
86 assert_different_registers(tmp1, tmp2, extra); |
|
87 tmp2 = extra; |
|
88 } |
|
89 assert_different_registers(preserve, tmp1, tmp2); |
|
90 } |
|
91 |
|
92 |
|
93 |
|
94 static void select_different_registers(Register preserve, |
|
95 Register extra, |
|
96 Register &tmp1, |
|
97 Register &tmp2, |
|
98 Register &tmp3) { |
|
99 if (tmp1 == preserve) { |
|
100 assert_different_registers(tmp1, tmp2, tmp3, extra); |
|
101 tmp1 = extra; |
|
102 } else if (tmp2 == preserve) { |
|
103 assert_different_registers(tmp1, tmp2, tmp3, extra); |
|
104 tmp2 = extra; |
|
105 } else if (tmp3 == preserve) { |
|
106 assert_different_registers(tmp1, tmp2, tmp3, extra); |
|
107 tmp3 = extra; |
|
108 } |
|
109 assert_different_registers(preserve, tmp1, tmp2, tmp3); |
|
110 } |
|
111 |
|
112 |
|
113 |
|
114 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { |
|
115 if (opr->is_constant()) { |
|
116 LIR_Const* constant = opr->as_constant_ptr(); |
|
117 switch (constant->type()) { |
|
118 case T_INT: { |
|
119 return true; |
|
120 } |
|
121 |
|
122 default: |
|
123 return false; |
|
124 } |
|
125 } |
|
126 return false; |
|
127 } |
|
128 |
|
129 |
|
130 LIR_Opr LIR_Assembler::receiverOpr() { |
|
131 return FrameMap::receiver_opr; |
|
132 } |
|
133 |
|
134 LIR_Opr LIR_Assembler::osrBufferPointer() { |
|
135 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); |
|
136 } |
|
137 |
|
138 //--------------fpu register translations----------------------- |
|
139 |
|
140 |
|
141 address LIR_Assembler::float_constant(float f) { |
|
142 address const_addr = __ float_constant(f); |
|
143 if (const_addr == NULL) { |
|
144 bailout("const section overflow"); |
|
145 return __ code()->consts()->start(); |
|
146 } else { |
|
147 return const_addr; |
|
148 } |
|
149 } |
|
150 |
|
151 |
|
152 address LIR_Assembler::double_constant(double d) { |
|
153 address const_addr = __ double_constant(d); |
|
154 if (const_addr == NULL) { |
|
155 bailout("const section overflow"); |
|
156 return __ code()->consts()->start(); |
|
157 } else { |
|
158 return const_addr; |
|
159 } |
|
160 } |
|
161 |
|
162 |
|
163 void LIR_Assembler::set_24bit_FPU() { |
|
164 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); |
|
165 } |
|
166 |
|
167 void LIR_Assembler::reset_FPU() { |
|
168 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); |
|
169 } |
|
170 |
|
171 void LIR_Assembler::fpop() { |
|
172 __ fpop(); |
|
173 } |
|
174 |
|
175 void LIR_Assembler::fxch(int i) { |
|
176 __ fxch(i); |
|
177 } |
|
178 |
|
179 void LIR_Assembler::fld(int i) { |
|
180 __ fld_s(i); |
|
181 } |
|
182 |
|
183 void LIR_Assembler::ffree(int i) { |
|
184 __ ffree(i); |
|
185 } |
|
186 |
|
187 void LIR_Assembler::breakpoint() { |
|
188 __ int3(); |
|
189 } |
|
190 |
|
191 void LIR_Assembler::push(LIR_Opr opr) { |
|
192 if (opr->is_single_cpu()) { |
|
193 __ push_reg(opr->as_register()); |
|
194 } else if (opr->is_double_cpu()) { |
|
195 NOT_LP64(__ push_reg(opr->as_register_hi())); |
|
196 __ push_reg(opr->as_register_lo()); |
|
197 } else if (opr->is_stack()) { |
|
198 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); |
|
199 } else if (opr->is_constant()) { |
|
200 LIR_Const* const_opr = opr->as_constant_ptr(); |
|
201 if (const_opr->type() == T_OBJECT) { |
|
202 __ push_oop(const_opr->as_jobject()); |
|
203 } else if (const_opr->type() == T_INT) { |
|
204 __ push_jint(const_opr->as_jint()); |
|
205 } else { |
|
206 ShouldNotReachHere(); |
|
207 } |
|
208 |
|
209 } else { |
|
210 ShouldNotReachHere(); |
|
211 } |
|
212 } |
|
213 |
|
214 void LIR_Assembler::pop(LIR_Opr opr) { |
|
215 if (opr->is_single_cpu()) { |
|
216 __ pop_reg(opr->as_register()); |
|
217 } else { |
|
218 ShouldNotReachHere(); |
|
219 } |
|
220 } |
|
221 |
|
222 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { |
|
223 return addr->base()->is_illegal() && addr->index()->is_illegal(); |
|
224 } |
|
225 |
|
226 //------------------------------------------- |
|
227 |
|
228 Address LIR_Assembler::as_Address(LIR_Address* addr) { |
|
229 return as_Address(addr, rscratch1); |
|
230 } |
|
231 |
|
232 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { |
|
233 if (addr->base()->is_illegal()) { |
|
234 assert(addr->index()->is_illegal(), "must be illegal too"); |
|
235 AddressLiteral laddr((address)addr->disp(), relocInfo::none); |
|
236 if (! __ reachable(laddr)) { |
|
237 __ movptr(tmp, laddr.addr()); |
|
238 Address res(tmp, 0); |
|
239 return res; |
|
240 } else { |
|
241 return __ as_Address(laddr); |
|
242 } |
|
243 } |
|
244 |
|
245 Register base = addr->base()->as_pointer_register(); |
|
246 |
|
247 if (addr->index()->is_illegal()) { |
|
248 return Address( base, addr->disp()); |
|
249 } else if (addr->index()->is_cpu_register()) { |
|
250 Register index = addr->index()->as_pointer_register(); |
|
251 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); |
|
252 } else if (addr->index()->is_constant()) { |
|
253 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); |
|
254 assert(Assembler::is_simm32(addr_offset), "must be"); |
|
255 |
|
256 return Address(base, addr_offset); |
|
257 } else { |
|
258 Unimplemented(); |
|
259 return Address(); |
|
260 } |
|
261 } |
|
262 |
|
263 |
|
264 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { |
|
265 Address base = as_Address(addr); |
|
266 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); |
|
267 } |
|
268 |
|
269 |
|
270 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { |
|
271 return as_Address(addr); |
|
272 } |
|
273 |
|
274 |
|
275 void LIR_Assembler::osr_entry() { |
|
276 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); |
|
277 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); |
|
278 ValueStack* entry_state = osr_entry->state(); |
|
279 int number_of_locks = entry_state->locks_size(); |
|
280 |
|
281 // we jump here if osr happens with the interpreter |
|
282 // state set up to continue at the beginning of the |
|
283 // loop that triggered osr - in particular, we have |
|
284 // the following registers setup: |
|
285 // |
|
286 // rcx: osr buffer |
|
287 // |
|
288 |
|
289 // build frame |
|
290 ciMethod* m = compilation()->method(); |
|
291 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); |
|
292 |
|
293 // OSR buffer is |
|
294 // |
|
295 // locals[nlocals-1..0] |
|
296 // monitors[0..number_of_locks] |
|
297 // |
|
298 // locals is a direct copy of the interpreter frame so in the osr buffer |
|
299 // so first slot in the local array is the last local from the interpreter |
|
300 // and last slot is local[0] (receiver) from the interpreter |
|
301 // |
|
302 // Similarly with locks. The first lock slot in the osr buffer is the nth lock |
|
303 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock |
|
304 // in the interpreter frame (the method lock if a sync method) |
|
305 |
|
306 // Initialize monitors in the compiled activation. |
|
307 // rcx: pointer to osr buffer |
|
308 // |
|
309 // All other registers are dead at this point and the locals will be |
|
310 // copied into place by code emitted in the IR. |
|
311 |
|
312 Register OSR_buf = osrBufferPointer()->as_pointer_register(); |
|
313 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); |
|
314 int monitor_offset = BytesPerWord * method()->max_locals() + |
|
315 (2 * BytesPerWord) * (number_of_locks - 1); |
|
316 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in |
|
317 // the OSR buffer using 2 word entries: first the lock and then |
|
318 // the oop. |
|
319 for (int i = 0; i < number_of_locks; i++) { |
|
320 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); |
|
321 #ifdef ASSERT |
|
322 // verify the interpreter's monitor has a non-null object |
|
323 { |
|
324 Label L; |
|
325 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD); |
|
326 __ jcc(Assembler::notZero, L); |
|
327 __ stop("locked object is NULL"); |
|
328 __ bind(L); |
|
329 } |
|
330 #endif |
|
331 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); |
|
332 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); |
|
333 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); |
|
334 __ movptr(frame_map()->address_for_monitor_object(i), rbx); |
|
335 } |
|
336 } |
|
337 } |
|
338 |
|
339 |
|
340 // inline cache check; done before the frame is built. |
|
341 int LIR_Assembler::check_icache() { |
|
342 Register receiver = FrameMap::receiver_opr->as_register(); |
|
343 Register ic_klass = IC_Klass; |
|
344 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); |
|
345 const bool do_post_padding = VerifyOops || UseCompressedClassPointers; |
|
346 if (!do_post_padding) { |
|
347 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment |
|
348 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { |
|
349 __ nop(); |
|
350 } |
|
351 } |
|
352 int offset = __ offset(); |
|
353 __ inline_cache_check(receiver, IC_Klass); |
|
354 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); |
|
355 if (do_post_padding) { |
|
356 // force alignment after the cache check. |
|
357 // It's been verified to be aligned if !VerifyOops |
|
358 __ align(CodeEntryAlignment); |
|
359 } |
|
360 return offset; |
|
361 } |
|
362 |
|
363 |
|
364 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { |
|
365 jobject o = NULL; |
|
366 PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); |
|
367 __ movoop(reg, o); |
|
368 patching_epilog(patch, lir_patch_normal, reg, info); |
|
369 } |
|
370 |
|
371 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { |
|
372 Metadata* o = NULL; |
|
373 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); |
|
374 __ mov_metadata(reg, o); |
|
375 patching_epilog(patch, lir_patch_normal, reg, info); |
|
376 } |
|
377 |
|
378 // This specifies the rsp decrement needed to build the frame |
|
379 int LIR_Assembler::initial_frame_size_in_bytes() const { |
|
380 // if rounding, must let FrameMap know! |
|
381 |
|
382 // The frame_map records size in slots (32bit word) |
|
383 |
|
384 // subtract two words to account for return address and link |
|
385 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; |
|
386 } |
|
387 |
|
388 |
|
389 int LIR_Assembler::emit_exception_handler() { |
|
390 // if the last instruction is a call (typically to do a throw which |
|
391 // is coming at the end after block reordering) the return address |
|
392 // must still point into the code area in order to avoid assertion |
|
393 // failures when searching for the corresponding bci => add a nop |
|
394 // (was bug 5/14/1999 - gri) |
|
395 __ nop(); |
|
396 |
|
397 // generate code for exception handler |
|
398 address handler_base = __ start_a_stub(exception_handler_size); |
|
399 if (handler_base == NULL) { |
|
400 // not enough space left for the handler |
|
401 bailout("exception handler overflow"); |
|
402 return -1; |
|
403 } |
|
404 |
|
405 int offset = code_offset(); |
|
406 |
|
407 // the exception oop and pc are in rax, and rdx |
|
408 // no other registers need to be preserved, so invalidate them |
|
409 __ invalidate_registers(false, true, true, false, true, true); |
|
410 |
|
411 // check that there is really an exception |
|
412 __ verify_not_null_oop(rax); |
|
413 |
|
414 // search an exception handler (rax: exception oop, rdx: throwing pc) |
|
415 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); |
|
416 __ should_not_reach_here(); |
|
417 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); |
|
418 __ end_a_stub(); |
|
419 |
|
420 return offset; |
|
421 } |
|
422 |
|
423 |
|
424 // Emit the code to remove the frame from the stack in the exception |
|
425 // unwind path. |
|
426 int LIR_Assembler::emit_unwind_handler() { |
|
427 #ifndef PRODUCT |
|
428 if (CommentedAssembly) { |
|
429 _masm->block_comment("Unwind handler"); |
|
430 } |
|
431 #endif |
|
432 |
|
433 int offset = code_offset(); |
|
434 |
|
435 // Fetch the exception from TLS and clear out exception related thread state |
|
436 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); |
|
437 NOT_LP64(__ get_thread(rsi)); |
|
438 __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); |
|
439 __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD); |
|
440 __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD); |
|
441 |
|
442 __ bind(_unwind_handler_entry); |
|
443 __ verify_not_null_oop(rax); |
|
444 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { |
|
445 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved) |
|
446 } |
|
447 |
|
448 // Preform needed unlocking |
|
449 MonitorExitStub* stub = NULL; |
|
450 if (method()->is_synchronized()) { |
|
451 monitor_address(0, FrameMap::rax_opr); |
|
452 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); |
|
453 __ unlock_object(rdi, rsi, rax, *stub->entry()); |
|
454 __ bind(*stub->continuation()); |
|
455 } |
|
456 |
|
457 if (compilation()->env()->dtrace_method_probes()) { |
|
458 #ifdef _LP64 |
|
459 __ mov(rdi, r15_thread); |
|
460 __ mov_metadata(rsi, method()->constant_encoding()); |
|
461 #else |
|
462 __ get_thread(rax); |
|
463 __ movptr(Address(rsp, 0), rax); |
|
464 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); |
|
465 #endif |
|
466 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); |
|
467 } |
|
468 |
|
469 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { |
|
470 __ mov(rax, rbx); // Restore the exception |
|
471 } |
|
472 |
|
473 // remove the activation and dispatch to the unwind handler |
|
474 __ remove_frame(initial_frame_size_in_bytes()); |
|
475 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); |
|
476 |
|
477 // Emit the slow path assembly |
|
478 if (stub != NULL) { |
|
479 stub->emit_code(this); |
|
480 } |
|
481 |
|
482 return offset; |
|
483 } |
|
484 |
|
485 |
|
486 int LIR_Assembler::emit_deopt_handler() { |
|
487 // if the last instruction is a call (typically to do a throw which |
|
488 // is coming at the end after block reordering) the return address |
|
489 // must still point into the code area in order to avoid assertion |
|
490 // failures when searching for the corresponding bci => add a nop |
|
491 // (was bug 5/14/1999 - gri) |
|
492 __ nop(); |
|
493 |
|
494 // generate code for exception handler |
|
495 address handler_base = __ start_a_stub(deopt_handler_size); |
|
496 if (handler_base == NULL) { |
|
497 // not enough space left for the handler |
|
498 bailout("deopt handler overflow"); |
|
499 return -1; |
|
500 } |
|
501 |
|
502 int offset = code_offset(); |
|
503 InternalAddress here(__ pc()); |
|
504 |
|
505 __ pushptr(here.addr()); |
|
506 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); |
|
507 guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); |
|
508 __ end_a_stub(); |
|
509 |
|
510 return offset; |
|
511 } |
|
512 |
|
513 |
|
514 // This is the fast version of java.lang.String.compare; it has not |
|
515 // OSR-entry and therefore, we generate a slow version for OSR's |
|
516 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { |
|
517 __ movptr (rbx, rcx); // receiver is in rcx |
|
518 __ movptr (rax, arg1->as_register()); |
|
519 |
|
520 // Get addresses of first characters from both Strings |
|
521 __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); |
|
522 if (java_lang_String::has_offset_field()) { |
|
523 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); |
|
524 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); |
|
525 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
|
526 } else { |
|
527 __ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes())); |
|
528 __ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
|
529 } |
|
530 |
|
531 // rbx, may be NULL |
|
532 add_debug_info_for_null_check_here(info); |
|
533 __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); |
|
534 if (java_lang_String::has_offset_field()) { |
|
535 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); |
|
536 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); |
|
537 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
|
538 } else { |
|
539 __ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); |
|
540 __ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
|
541 } |
|
542 |
|
543 // compute minimum length (in rax) and difference of lengths (on top of stack) |
|
544 __ mov (rcx, rbx); |
|
545 __ subptr(rbx, rax); // subtract lengths |
|
546 __ push (rbx); // result |
|
547 __ cmov (Assembler::lessEqual, rax, rcx); |
|
548 |
|
549 // is minimum length 0? |
|
550 Label noLoop, haveResult; |
|
551 __ testptr (rax, rax); |
|
552 __ jcc (Assembler::zero, noLoop); |
|
553 |
|
554 // compare first characters |
|
555 __ load_unsigned_short(rcx, Address(rdi, 0)); |
|
556 __ load_unsigned_short(rbx, Address(rsi, 0)); |
|
557 __ subl(rcx, rbx); |
|
558 __ jcc(Assembler::notZero, haveResult); |
|
559 // starting loop |
|
560 __ decrement(rax); // we already tested index: skip one |
|
561 __ jcc(Assembler::zero, noLoop); |
|
562 |
|
563 // set rsi.edi to the end of the arrays (arrays have same length) |
|
564 // negate the index |
|
565 |
|
566 __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR))); |
|
567 __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR))); |
|
568 __ negptr(rax); |
|
569 |
|
570 // compare the strings in a loop |
|
571 |
|
572 Label loop; |
|
573 __ align(wordSize); |
|
574 __ bind(loop); |
|
575 __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0)); |
|
576 __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0)); |
|
577 __ subl(rcx, rbx); |
|
578 __ jcc(Assembler::notZero, haveResult); |
|
579 __ increment(rax); |
|
580 __ jcc(Assembler::notZero, loop); |
|
581 |
|
582 // strings are equal up to min length |
|
583 |
|
584 __ bind(noLoop); |
|
585 __ pop(rax); |
|
586 return_op(LIR_OprFact::illegalOpr); |
|
587 |
|
588 __ bind(haveResult); |
|
589 // leave instruction is going to discard the TOS value |
|
590 __ mov (rax, rcx); // result of call is in rax, |
|
591 } |
|
592 |
|
593 |
|
594 void LIR_Assembler::return_op(LIR_Opr result) { |
|
595 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); |
|
596 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { |
|
597 assert(result->fpu() == 0, "result must already be on TOS"); |
|
598 } |
|
599 |
|
600 // Pop the stack before the safepoint code |
|
601 __ remove_frame(initial_frame_size_in_bytes()); |
|
602 |
|
603 bool result_is_oop = result->is_valid() ? result->is_oop() : false; |
|
604 |
|
605 // Note: we do not need to round double result; float result has the right precision |
|
606 // the poll sets the condition code, but no data registers |
|
607 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), |
|
608 relocInfo::poll_return_type); |
|
609 |
|
610 if (Assembler::is_polling_page_far()) { |
|
611 __ lea(rscratch1, polling_page); |
|
612 __ relocate(relocInfo::poll_return_type); |
|
613 __ testl(rax, Address(rscratch1, 0)); |
|
614 } else { |
|
615 __ testl(rax, polling_page); |
|
616 } |
|
617 __ ret(0); |
|
618 } |
|
619 |
|
620 |
|
621 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { |
|
622 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), |
|
623 relocInfo::poll_type); |
|
624 guarantee(info != NULL, "Shouldn't be NULL"); |
|
625 int offset = __ offset(); |
|
626 if (Assembler::is_polling_page_far()) { |
|
627 __ lea(rscratch1, polling_page); |
|
628 offset = __ offset(); |
|
629 add_debug_info_for_branch(info); |
|
630 __ testl(rax, Address(rscratch1, 0)); |
|
631 } else { |
|
632 add_debug_info_for_branch(info); |
|
633 __ testl(rax, polling_page); |
|
634 } |
|
635 return offset; |
|
636 } |
|
637 |
|
638 |
|
639 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { |
|
640 if (from_reg != to_reg) __ mov(to_reg, from_reg); |
|
641 } |
|
642 |
|
643 void LIR_Assembler::swap_reg(Register a, Register b) { |
|
644 __ xchgptr(a, b); |
|
645 } |
|
646 |
|
647 |
|
648 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { |
|
649 assert(src->is_constant(), "should not call otherwise"); |
|
650 assert(dest->is_register(), "should not call otherwise"); |
|
651 LIR_Const* c = src->as_constant_ptr(); |
|
652 |
|
653 switch (c->type()) { |
|
654 case T_INT: { |
|
655 assert(patch_code == lir_patch_none, "no patching handled here"); |
|
656 __ movl(dest->as_register(), c->as_jint()); |
|
657 break; |
|
658 } |
|
659 |
|
660 case T_ADDRESS: { |
|
661 assert(patch_code == lir_patch_none, "no patching handled here"); |
|
662 __ movptr(dest->as_register(), c->as_jint()); |
|
663 break; |
|
664 } |
|
665 |
|
666 case T_LONG: { |
|
667 assert(patch_code == lir_patch_none, "no patching handled here"); |
|
668 #ifdef _LP64 |
|
669 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); |
|
670 #else |
|
671 __ movptr(dest->as_register_lo(), c->as_jint_lo()); |
|
672 __ movptr(dest->as_register_hi(), c->as_jint_hi()); |
|
673 #endif // _LP64 |
|
674 break; |
|
675 } |
|
676 |
|
677 case T_OBJECT: { |
|
678 if (patch_code != lir_patch_none) { |
|
679 jobject2reg_with_patching(dest->as_register(), info); |
|
680 } else { |
|
681 __ movoop(dest->as_register(), c->as_jobject()); |
|
682 } |
|
683 break; |
|
684 } |
|
685 |
|
686 case T_METADATA: { |
|
687 if (patch_code != lir_patch_none) { |
|
688 klass2reg_with_patching(dest->as_register(), info); |
|
689 } else { |
|
690 __ mov_metadata(dest->as_register(), c->as_metadata()); |
|
691 } |
|
692 break; |
|
693 } |
|
694 |
|
695 case T_FLOAT: { |
|
696 if (dest->is_single_xmm()) { |
|
697 if (c->is_zero_float()) { |
|
698 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); |
|
699 } else { |
|
700 __ movflt(dest->as_xmm_float_reg(), |
|
701 InternalAddress(float_constant(c->as_jfloat()))); |
|
702 } |
|
703 } else { |
|
704 assert(dest->is_single_fpu(), "must be"); |
|
705 assert(dest->fpu_regnr() == 0, "dest must be TOS"); |
|
706 if (c->is_zero_float()) { |
|
707 __ fldz(); |
|
708 } else if (c->is_one_float()) { |
|
709 __ fld1(); |
|
710 } else { |
|
711 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); |
|
712 } |
|
713 } |
|
714 break; |
|
715 } |
|
716 |
|
717 case T_DOUBLE: { |
|
718 if (dest->is_double_xmm()) { |
|
719 if (c->is_zero_double()) { |
|
720 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); |
|
721 } else { |
|
722 __ movdbl(dest->as_xmm_double_reg(), |
|
723 InternalAddress(double_constant(c->as_jdouble()))); |
|
724 } |
|
725 } else { |
|
726 assert(dest->is_double_fpu(), "must be"); |
|
727 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); |
|
728 if (c->is_zero_double()) { |
|
729 __ fldz(); |
|
730 } else if (c->is_one_double()) { |
|
731 __ fld1(); |
|
732 } else { |
|
733 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); |
|
734 } |
|
735 } |
|
736 break; |
|
737 } |
|
738 |
|
739 default: |
|
740 ShouldNotReachHere(); |
|
741 } |
|
742 } |
|
743 |
|
744 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { |
|
745 assert(src->is_constant(), "should not call otherwise"); |
|
746 assert(dest->is_stack(), "should not call otherwise"); |
|
747 LIR_Const* c = src->as_constant_ptr(); |
|
748 |
|
749 switch (c->type()) { |
|
750 case T_INT: // fall through |
|
751 case T_FLOAT: |
|
752 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); |
|
753 break; |
|
754 |
|
755 case T_ADDRESS: |
|
756 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); |
|
757 break; |
|
758 |
|
759 case T_OBJECT: |
|
760 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); |
|
761 break; |
|
762 |
|
763 case T_LONG: // fall through |
|
764 case T_DOUBLE: |
|
765 #ifdef _LP64 |
|
766 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), |
|
767 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); |
|
768 #else |
|
769 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), |
|
770 lo_word_offset_in_bytes), c->as_jint_lo_bits()); |
|
771 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), |
|
772 hi_word_offset_in_bytes), c->as_jint_hi_bits()); |
|
773 #endif // _LP64 |
|
774 break; |
|
775 |
|
776 default: |
|
777 ShouldNotReachHere(); |
|
778 } |
|
779 } |
|
780 |
|
781 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { |
|
782 assert(src->is_constant(), "should not call otherwise"); |
|
783 assert(dest->is_address(), "should not call otherwise"); |
|
784 LIR_Const* c = src->as_constant_ptr(); |
|
785 LIR_Address* addr = dest->as_address_ptr(); |
|
786 |
|
787 int null_check_here = code_offset(); |
|
788 switch (type) { |
|
789 case T_INT: // fall through |
|
790 case T_FLOAT: |
|
791 __ movl(as_Address(addr), c->as_jint_bits()); |
|
792 break; |
|
793 |
|
794 case T_ADDRESS: |
|
795 __ movptr(as_Address(addr), c->as_jint_bits()); |
|
796 break; |
|
797 |
|
798 case T_OBJECT: // fall through |
|
799 case T_ARRAY: |
|
800 if (c->as_jobject() == NULL) { |
|
801 if (UseCompressedOops && !wide) { |
|
802 __ movl(as_Address(addr), (int32_t)NULL_WORD); |
|
803 } else { |
|
804 #ifdef _LP64 |
|
805 __ xorptr(rscratch1, rscratch1); |
|
806 null_check_here = code_offset(); |
|
807 __ movptr(as_Address(addr), rscratch1); |
|
808 #else |
|
809 __ movptr(as_Address(addr), NULL_WORD); |
|
810 #endif |
|
811 } |
|
812 } else { |
|
813 if (is_literal_address(addr)) { |
|
814 ShouldNotReachHere(); |
|
815 __ movoop(as_Address(addr, noreg), c->as_jobject()); |
|
816 } else { |
|
817 #ifdef _LP64 |
|
818 __ movoop(rscratch1, c->as_jobject()); |
|
819 if (UseCompressedOops && !wide) { |
|
820 __ encode_heap_oop(rscratch1); |
|
821 null_check_here = code_offset(); |
|
822 __ movl(as_Address_lo(addr), rscratch1); |
|
823 } else { |
|
824 null_check_here = code_offset(); |
|
825 __ movptr(as_Address_lo(addr), rscratch1); |
|
826 } |
|
827 #else |
|
828 __ movoop(as_Address(addr), c->as_jobject()); |
|
829 #endif |
|
830 } |
|
831 } |
|
832 break; |
|
833 |
|
834 case T_LONG: // fall through |
|
835 case T_DOUBLE: |
|
836 #ifdef _LP64 |
|
837 if (is_literal_address(addr)) { |
|
838 ShouldNotReachHere(); |
|
839 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); |
|
840 } else { |
|
841 __ movptr(r10, (intptr_t)c->as_jlong_bits()); |
|
842 null_check_here = code_offset(); |
|
843 __ movptr(as_Address_lo(addr), r10); |
|
844 } |
|
845 #else |
|
846 // Always reachable in 32bit so this doesn't produce useless move literal |
|
847 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); |
|
848 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); |
|
849 #endif // _LP64 |
|
850 break; |
|
851 |
|
852 case T_BOOLEAN: // fall through |
|
853 case T_BYTE: |
|
854 __ movb(as_Address(addr), c->as_jint() & 0xFF); |
|
855 break; |
|
856 |
|
857 case T_CHAR: // fall through |
|
858 case T_SHORT: |
|
859 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); |
|
860 break; |
|
861 |
|
862 default: |
|
863 ShouldNotReachHere(); |
|
864 }; |
|
865 |
|
866 if (info != NULL) { |
|
867 add_debug_info_for_null_check(null_check_here, info); |
|
868 } |
|
869 } |
|
870 |
|
871 |
|
872 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { |
|
873 assert(src->is_register(), "should not call otherwise"); |
|
874 assert(dest->is_register(), "should not call otherwise"); |
|
875 |
|
876 // move between cpu-registers |
|
877 if (dest->is_single_cpu()) { |
|
878 #ifdef _LP64 |
|
879 if (src->type() == T_LONG) { |
|
880 // Can do LONG -> OBJECT |
|
881 move_regs(src->as_register_lo(), dest->as_register()); |
|
882 return; |
|
883 } |
|
884 #endif |
|
885 assert(src->is_single_cpu(), "must match"); |
|
886 if (src->type() == T_OBJECT) { |
|
887 __ verify_oop(src->as_register()); |
|
888 } |
|
889 move_regs(src->as_register(), dest->as_register()); |
|
890 |
|
891 } else if (dest->is_double_cpu()) { |
|
892 #ifdef _LP64 |
|
893 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { |
|
894 // Surprising to me but we can see move of a long to t_object |
|
895 __ verify_oop(src->as_register()); |
|
896 move_regs(src->as_register(), dest->as_register_lo()); |
|
897 return; |
|
898 } |
|
899 #endif |
|
900 assert(src->is_double_cpu(), "must match"); |
|
901 Register f_lo = src->as_register_lo(); |
|
902 Register f_hi = src->as_register_hi(); |
|
903 Register t_lo = dest->as_register_lo(); |
|
904 Register t_hi = dest->as_register_hi(); |
|
905 #ifdef _LP64 |
|
906 assert(f_hi == f_lo, "must be same"); |
|
907 assert(t_hi == t_lo, "must be same"); |
|
908 move_regs(f_lo, t_lo); |
|
909 #else |
|
910 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); |
|
911 |
|
912 |
|
913 if (f_lo == t_hi && f_hi == t_lo) { |
|
914 swap_reg(f_lo, f_hi); |
|
915 } else if (f_hi == t_lo) { |
|
916 assert(f_lo != t_hi, "overwriting register"); |
|
917 move_regs(f_hi, t_hi); |
|
918 move_regs(f_lo, t_lo); |
|
919 } else { |
|
920 assert(f_hi != t_lo, "overwriting register"); |
|
921 move_regs(f_lo, t_lo); |
|
922 move_regs(f_hi, t_hi); |
|
923 } |
|
924 #endif // LP64 |
|
925 |
|
926 // special moves from fpu-register to xmm-register |
|
927 // necessary for method results |
|
928 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { |
|
929 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); |
|
930 __ fld_s(Address(rsp, 0)); |
|
931 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { |
|
932 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); |
|
933 __ fld_d(Address(rsp, 0)); |
|
934 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { |
|
935 __ fstp_s(Address(rsp, 0)); |
|
936 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); |
|
937 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { |
|
938 __ fstp_d(Address(rsp, 0)); |
|
939 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); |
|
940 |
|
941 // move between xmm-registers |
|
942 } else if (dest->is_single_xmm()) { |
|
943 assert(src->is_single_xmm(), "must match"); |
|
944 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); |
|
945 } else if (dest->is_double_xmm()) { |
|
946 assert(src->is_double_xmm(), "must match"); |
|
947 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); |
|
948 |
|
949 // move between fpu-registers (no instruction necessary because of fpu-stack) |
|
950 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { |
|
951 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); |
|
952 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); |
|
953 } else { |
|
954 ShouldNotReachHere(); |
|
955 } |
|
956 } |
|
957 |
|
958 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { |
|
959 assert(src->is_register(), "should not call otherwise"); |
|
960 assert(dest->is_stack(), "should not call otherwise"); |
|
961 |
|
962 if (src->is_single_cpu()) { |
|
963 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); |
|
964 if (type == T_OBJECT || type == T_ARRAY) { |
|
965 __ verify_oop(src->as_register()); |
|
966 __ movptr (dst, src->as_register()); |
|
967 } else if (type == T_METADATA) { |
|
968 __ movptr (dst, src->as_register()); |
|
969 } else { |
|
970 __ movl (dst, src->as_register()); |
|
971 } |
|
972 |
|
973 } else if (src->is_double_cpu()) { |
|
974 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); |
|
975 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); |
|
976 __ movptr (dstLO, src->as_register_lo()); |
|
977 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); |
|
978 |
|
979 } else if (src->is_single_xmm()) { |
|
980 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); |
|
981 __ movflt(dst_addr, src->as_xmm_float_reg()); |
|
982 |
|
983 } else if (src->is_double_xmm()) { |
|
984 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); |
|
985 __ movdbl(dst_addr, src->as_xmm_double_reg()); |
|
986 |
|
987 } else if (src->is_single_fpu()) { |
|
988 assert(src->fpu_regnr() == 0, "argument must be on TOS"); |
|
989 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); |
|
990 if (pop_fpu_stack) __ fstp_s (dst_addr); |
|
991 else __ fst_s (dst_addr); |
|
992 |
|
993 } else if (src->is_double_fpu()) { |
|
994 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); |
|
995 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); |
|
996 if (pop_fpu_stack) __ fstp_d (dst_addr); |
|
997 else __ fst_d (dst_addr); |
|
998 |
|
999 } else { |
|
1000 ShouldNotReachHere(); |
|
1001 } |
|
1002 } |
|
1003 |
|
1004 |
|
1005 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { |
|
1006 LIR_Address* to_addr = dest->as_address_ptr(); |
|
1007 PatchingStub* patch = NULL; |
|
1008 Register compressed_src = rscratch1; |
|
1009 |
|
1010 if (type == T_ARRAY || type == T_OBJECT) { |
|
1011 __ verify_oop(src->as_register()); |
|
1012 #ifdef _LP64 |
|
1013 if (UseCompressedOops && !wide) { |
|
1014 __ movptr(compressed_src, src->as_register()); |
|
1015 __ encode_heap_oop(compressed_src); |
|
1016 if (patch_code != lir_patch_none) { |
|
1017 info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); |
|
1018 } |
|
1019 } |
|
1020 #endif |
|
1021 } |
|
1022 |
|
1023 if (patch_code != lir_patch_none) { |
|
1024 patch = new PatchingStub(_masm, PatchingStub::access_field_id); |
|
1025 Address toa = as_Address(to_addr); |
|
1026 assert(toa.disp() != 0, "must have"); |
|
1027 } |
|
1028 |
|
1029 int null_check_here = code_offset(); |
|
1030 switch (type) { |
|
1031 case T_FLOAT: { |
|
1032 if (src->is_single_xmm()) { |
|
1033 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); |
|
1034 } else { |
|
1035 assert(src->is_single_fpu(), "must be"); |
|
1036 assert(src->fpu_regnr() == 0, "argument must be on TOS"); |
|
1037 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); |
|
1038 else __ fst_s (as_Address(to_addr)); |
|
1039 } |
|
1040 break; |
|
1041 } |
|
1042 |
|
1043 case T_DOUBLE: { |
|
1044 if (src->is_double_xmm()) { |
|
1045 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); |
|
1046 } else { |
|
1047 assert(src->is_double_fpu(), "must be"); |
|
1048 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); |
|
1049 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); |
|
1050 else __ fst_d (as_Address(to_addr)); |
|
1051 } |
|
1052 break; |
|
1053 } |
|
1054 |
|
1055 case T_ARRAY: // fall through |
|
1056 case T_OBJECT: // fall through |
|
1057 if (UseCompressedOops && !wide) { |
|
1058 __ movl(as_Address(to_addr), compressed_src); |
|
1059 } else { |
|
1060 __ movptr(as_Address(to_addr), src->as_register()); |
|
1061 } |
|
1062 break; |
|
1063 case T_METADATA: |
|
1064 // We get here to store a method pointer to the stack to pass to |
|
1065 // a dtrace runtime call. This can't work on 64 bit with |
|
1066 // compressed klass ptrs: T_METADATA can be a compressed klass |
|
1067 // ptr or a 64 bit method pointer. |
|
1068 LP64_ONLY(ShouldNotReachHere()); |
|
1069 __ movptr(as_Address(to_addr), src->as_register()); |
|
1070 break; |
|
1071 case T_ADDRESS: |
|
1072 __ movptr(as_Address(to_addr), src->as_register()); |
|
1073 break; |
|
1074 case T_INT: |
|
1075 __ movl(as_Address(to_addr), src->as_register()); |
|
1076 break; |
|
1077 |
|
1078 case T_LONG: { |
|
1079 Register from_lo = src->as_register_lo(); |
|
1080 Register from_hi = src->as_register_hi(); |
|
1081 #ifdef _LP64 |
|
1082 __ movptr(as_Address_lo(to_addr), from_lo); |
|
1083 #else |
|
1084 Register base = to_addr->base()->as_register(); |
|
1085 Register index = noreg; |
|
1086 if (to_addr->index()->is_register()) { |
|
1087 index = to_addr->index()->as_register(); |
|
1088 } |
|
1089 if (base == from_lo || index == from_lo) { |
|
1090 assert(base != from_hi, "can't be"); |
|
1091 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); |
|
1092 __ movl(as_Address_hi(to_addr), from_hi); |
|
1093 if (patch != NULL) { |
|
1094 patching_epilog(patch, lir_patch_high, base, info); |
|
1095 patch = new PatchingStub(_masm, PatchingStub::access_field_id); |
|
1096 patch_code = lir_patch_low; |
|
1097 } |
|
1098 __ movl(as_Address_lo(to_addr), from_lo); |
|
1099 } else { |
|
1100 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); |
|
1101 __ movl(as_Address_lo(to_addr), from_lo); |
|
1102 if (patch != NULL) { |
|
1103 patching_epilog(patch, lir_patch_low, base, info); |
|
1104 patch = new PatchingStub(_masm, PatchingStub::access_field_id); |
|
1105 patch_code = lir_patch_high; |
|
1106 } |
|
1107 __ movl(as_Address_hi(to_addr), from_hi); |
|
1108 } |
|
1109 #endif // _LP64 |
|
1110 break; |
|
1111 } |
|
1112 |
|
1113 case T_BYTE: // fall through |
|
1114 case T_BOOLEAN: { |
|
1115 Register src_reg = src->as_register(); |
|
1116 Address dst_addr = as_Address(to_addr); |
|
1117 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); |
|
1118 __ movb(dst_addr, src_reg); |
|
1119 break; |
|
1120 } |
|
1121 |
|
1122 case T_CHAR: // fall through |
|
1123 case T_SHORT: |
|
1124 __ movw(as_Address(to_addr), src->as_register()); |
|
1125 break; |
|
1126 |
|
1127 default: |
|
1128 ShouldNotReachHere(); |
|
1129 } |
|
1130 if (info != NULL) { |
|
1131 add_debug_info_for_null_check(null_check_here, info); |
|
1132 } |
|
1133 |
|
1134 if (patch_code != lir_patch_none) { |
|
1135 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); |
|
1136 } |
|
1137 } |
|
1138 |
|
1139 |
|
1140 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { |
|
1141 assert(src->is_stack(), "should not call otherwise"); |
|
1142 assert(dest->is_register(), "should not call otherwise"); |
|
1143 |
|
1144 if (dest->is_single_cpu()) { |
|
1145 if (type == T_ARRAY || type == T_OBJECT) { |
|
1146 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); |
|
1147 __ verify_oop(dest->as_register()); |
|
1148 } else if (type == T_METADATA) { |
|
1149 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); |
|
1150 } else { |
|
1151 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); |
|
1152 } |
|
1153 |
|
1154 } else if (dest->is_double_cpu()) { |
|
1155 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); |
|
1156 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); |
|
1157 __ movptr(dest->as_register_lo(), src_addr_LO); |
|
1158 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); |
|
1159 |
|
1160 } else if (dest->is_single_xmm()) { |
|
1161 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); |
|
1162 __ movflt(dest->as_xmm_float_reg(), src_addr); |
|
1163 |
|
1164 } else if (dest->is_double_xmm()) { |
|
1165 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); |
|
1166 __ movdbl(dest->as_xmm_double_reg(), src_addr); |
|
1167 |
|
1168 } else if (dest->is_single_fpu()) { |
|
1169 assert(dest->fpu_regnr() == 0, "dest must be TOS"); |
|
1170 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); |
|
1171 __ fld_s(src_addr); |
|
1172 |
|
1173 } else if (dest->is_double_fpu()) { |
|
1174 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); |
|
1175 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); |
|
1176 __ fld_d(src_addr); |
|
1177 |
|
1178 } else { |
|
1179 ShouldNotReachHere(); |
|
1180 } |
|
1181 } |
|
1182 |
|
1183 |
|
1184 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { |
|
1185 if (src->is_single_stack()) { |
|
1186 if (type == T_OBJECT || type == T_ARRAY) { |
|
1187 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); |
|
1188 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); |
|
1189 } else { |
|
1190 #ifndef _LP64 |
|
1191 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); |
|
1192 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); |
|
1193 #else |
|
1194 //no pushl on 64bits |
|
1195 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); |
|
1196 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); |
|
1197 #endif |
|
1198 } |
|
1199 |
|
1200 } else if (src->is_double_stack()) { |
|
1201 #ifdef _LP64 |
|
1202 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); |
|
1203 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); |
|
1204 #else |
|
1205 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); |
|
1206 // push and pop the part at src + wordSize, adding wordSize for the previous push |
|
1207 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); |
|
1208 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); |
|
1209 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); |
|
1210 #endif // _LP64 |
|
1211 |
|
1212 } else { |
|
1213 ShouldNotReachHere(); |
|
1214 } |
|
1215 } |
|
1216 |
|
1217 |
|
1218 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { |
|
1219 assert(src->is_address(), "should not call otherwise"); |
|
1220 assert(dest->is_register(), "should not call otherwise"); |
|
1221 |
|
1222 LIR_Address* addr = src->as_address_ptr(); |
|
1223 Address from_addr = as_Address(addr); |
|
1224 |
|
1225 if (addr->base()->type() == T_OBJECT) { |
|
1226 __ verify_oop(addr->base()->as_pointer_register()); |
|
1227 } |
|
1228 |
|
1229 switch (type) { |
|
1230 case T_BOOLEAN: // fall through |
|
1231 case T_BYTE: // fall through |
|
1232 case T_CHAR: // fall through |
|
1233 case T_SHORT: |
|
1234 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { |
|
1235 // on pre P6 processors we may get partial register stalls |
|
1236 // so blow away the value of to_rinfo before loading a |
|
1237 // partial word into it. Do it here so that it precedes |
|
1238 // the potential patch point below. |
|
1239 __ xorptr(dest->as_register(), dest->as_register()); |
|
1240 } |
|
1241 break; |
|
1242 } |
|
1243 |
|
1244 PatchingStub* patch = NULL; |
|
1245 if (patch_code != lir_patch_none) { |
|
1246 patch = new PatchingStub(_masm, PatchingStub::access_field_id); |
|
1247 assert(from_addr.disp() != 0, "must have"); |
|
1248 } |
|
1249 if (info != NULL) { |
|
1250 add_debug_info_for_null_check_here(info); |
|
1251 } |
|
1252 |
|
1253 switch (type) { |
|
1254 case T_FLOAT: { |
|
1255 if (dest->is_single_xmm()) { |
|
1256 __ movflt(dest->as_xmm_float_reg(), from_addr); |
|
1257 } else { |
|
1258 assert(dest->is_single_fpu(), "must be"); |
|
1259 assert(dest->fpu_regnr() == 0, "dest must be TOS"); |
|
1260 __ fld_s(from_addr); |
|
1261 } |
|
1262 break; |
|
1263 } |
|
1264 |
|
1265 case T_DOUBLE: { |
|
1266 if (dest->is_double_xmm()) { |
|
1267 __ movdbl(dest->as_xmm_double_reg(), from_addr); |
|
1268 } else { |
|
1269 assert(dest->is_double_fpu(), "must be"); |
|
1270 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); |
|
1271 __ fld_d(from_addr); |
|
1272 } |
|
1273 break; |
|
1274 } |
|
1275 |
|
1276 case T_OBJECT: // fall through |
|
1277 case T_ARRAY: // fall through |
|
1278 if (UseCompressedOops && !wide) { |
|
1279 __ movl(dest->as_register(), from_addr); |
|
1280 } else { |
|
1281 __ movptr(dest->as_register(), from_addr); |
|
1282 } |
|
1283 break; |
|
1284 |
|
1285 case T_ADDRESS: |
|
1286 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { |
|
1287 __ movl(dest->as_register(), from_addr); |
|
1288 } else { |
|
1289 __ movptr(dest->as_register(), from_addr); |
|
1290 } |
|
1291 break; |
|
1292 case T_INT: |
|
1293 __ movl(dest->as_register(), from_addr); |
|
1294 break; |
|
1295 |
|
1296 case T_LONG: { |
|
1297 Register to_lo = dest->as_register_lo(); |
|
1298 Register to_hi = dest->as_register_hi(); |
|
1299 #ifdef _LP64 |
|
1300 __ movptr(to_lo, as_Address_lo(addr)); |
|
1301 #else |
|
1302 Register base = addr->base()->as_register(); |
|
1303 Register index = noreg; |
|
1304 if (addr->index()->is_register()) { |
|
1305 index = addr->index()->as_register(); |
|
1306 } |
|
1307 if ((base == to_lo && index == to_hi) || |
|
1308 (base == to_hi && index == to_lo)) { |
|
1309 // addresses with 2 registers are only formed as a result of |
|
1310 // array access so this code will never have to deal with |
|
1311 // patches or null checks. |
|
1312 assert(info == NULL && patch == NULL, "must be"); |
|
1313 __ lea(to_hi, as_Address(addr)); |
|
1314 __ movl(to_lo, Address(to_hi, 0)); |
|
1315 __ movl(to_hi, Address(to_hi, BytesPerWord)); |
|
1316 } else if (base == to_lo || index == to_lo) { |
|
1317 assert(base != to_hi, "can't be"); |
|
1318 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); |
|
1319 __ movl(to_hi, as_Address_hi(addr)); |
|
1320 if (patch != NULL) { |
|
1321 patching_epilog(patch, lir_patch_high, base, info); |
|
1322 patch = new PatchingStub(_masm, PatchingStub::access_field_id); |
|
1323 patch_code = lir_patch_low; |
|
1324 } |
|
1325 __ movl(to_lo, as_Address_lo(addr)); |
|
1326 } else { |
|
1327 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); |
|
1328 __ movl(to_lo, as_Address_lo(addr)); |
|
1329 if (patch != NULL) { |
|
1330 patching_epilog(patch, lir_patch_low, base, info); |
|
1331 patch = new PatchingStub(_masm, PatchingStub::access_field_id); |
|
1332 patch_code = lir_patch_high; |
|
1333 } |
|
1334 __ movl(to_hi, as_Address_hi(addr)); |
|
1335 } |
|
1336 #endif // _LP64 |
|
1337 break; |
|
1338 } |
|
1339 |
|
1340 case T_BOOLEAN: // fall through |
|
1341 case T_BYTE: { |
|
1342 Register dest_reg = dest->as_register(); |
|
1343 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); |
|
1344 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { |
|
1345 __ movsbl(dest_reg, from_addr); |
|
1346 } else { |
|
1347 __ movb(dest_reg, from_addr); |
|
1348 __ shll(dest_reg, 24); |
|
1349 __ sarl(dest_reg, 24); |
|
1350 } |
|
1351 break; |
|
1352 } |
|
1353 |
|
1354 case T_CHAR: { |
|
1355 Register dest_reg = dest->as_register(); |
|
1356 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); |
|
1357 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { |
|
1358 __ movzwl(dest_reg, from_addr); |
|
1359 } else { |
|
1360 __ movw(dest_reg, from_addr); |
|
1361 } |
|
1362 break; |
|
1363 } |
|
1364 |
|
1365 case T_SHORT: { |
|
1366 Register dest_reg = dest->as_register(); |
|
1367 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { |
|
1368 __ movswl(dest_reg, from_addr); |
|
1369 } else { |
|
1370 __ movw(dest_reg, from_addr); |
|
1371 __ shll(dest_reg, 16); |
|
1372 __ sarl(dest_reg, 16); |
|
1373 } |
|
1374 break; |
|
1375 } |
|
1376 |
|
1377 default: |
|
1378 ShouldNotReachHere(); |
|
1379 } |
|
1380 |
|
1381 if (patch != NULL) { |
|
1382 patching_epilog(patch, patch_code, addr->base()->as_register(), info); |
|
1383 } |
|
1384 |
|
1385 if (type == T_ARRAY || type == T_OBJECT) { |
|
1386 #ifdef _LP64 |
|
1387 if (UseCompressedOops && !wide) { |
|
1388 __ decode_heap_oop(dest->as_register()); |
|
1389 } |
|
1390 #endif |
|
1391 __ verify_oop(dest->as_register()); |
|
1392 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { |
|
1393 #ifdef _LP64 |
|
1394 if (UseCompressedClassPointers) { |
|
1395 __ decode_klass_not_null(dest->as_register()); |
|
1396 } |
|
1397 #endif |
|
1398 } |
|
1399 } |
|
1400 |
|
1401 |
|
1402 void LIR_Assembler::prefetchr(LIR_Opr src) { |
|
1403 LIR_Address* addr = src->as_address_ptr(); |
|
1404 Address from_addr = as_Address(addr); |
|
1405 |
|
1406 if (VM_Version::supports_sse()) { |
|
1407 switch (ReadPrefetchInstr) { |
|
1408 case 0: |
|
1409 __ prefetchnta(from_addr); break; |
|
1410 case 1: |
|
1411 __ prefetcht0(from_addr); break; |
|
1412 case 2: |
|
1413 __ prefetcht2(from_addr); break; |
|
1414 default: |
|
1415 ShouldNotReachHere(); break; |
|
1416 } |
|
1417 } else if (VM_Version::supports_3dnow_prefetch()) { |
|
1418 __ prefetchr(from_addr); |
|
1419 } |
|
1420 } |
|
1421 |
|
1422 |
|
1423 void LIR_Assembler::prefetchw(LIR_Opr src) { |
|
1424 LIR_Address* addr = src->as_address_ptr(); |
|
1425 Address from_addr = as_Address(addr); |
|
1426 |
|
1427 if (VM_Version::supports_sse()) { |
|
1428 switch (AllocatePrefetchInstr) { |
|
1429 case 0: |
|
1430 __ prefetchnta(from_addr); break; |
|
1431 case 1: |
|
1432 __ prefetcht0(from_addr); break; |
|
1433 case 2: |
|
1434 __ prefetcht2(from_addr); break; |
|
1435 case 3: |
|
1436 __ prefetchw(from_addr); break; |
|
1437 default: |
|
1438 ShouldNotReachHere(); break; |
|
1439 } |
|
1440 } else if (VM_Version::supports_3dnow_prefetch()) { |
|
1441 __ prefetchw(from_addr); |
|
1442 } |
|
1443 } |
|
1444 |
|
1445 |
|
1446 NEEDS_CLEANUP; // This could be static? |
|
1447 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { |
|
1448 int elem_size = type2aelembytes(type); |
|
1449 switch (elem_size) { |
|
1450 case 1: return Address::times_1; |
|
1451 case 2: return Address::times_2; |
|
1452 case 4: return Address::times_4; |
|
1453 case 8: return Address::times_8; |
|
1454 } |
|
1455 ShouldNotReachHere(); |
|
1456 return Address::no_scale; |
|
1457 } |
|
1458 |
|
1459 |
|
1460 void LIR_Assembler::emit_op3(LIR_Op3* op) { |
|
1461 switch (op->code()) { |
|
1462 case lir_idiv: |
|
1463 case lir_irem: |
|
1464 arithmetic_idiv(op->code(), |
|
1465 op->in_opr1(), |
|
1466 op->in_opr2(), |
|
1467 op->in_opr3(), |
|
1468 op->result_opr(), |
|
1469 op->info()); |
|
1470 break; |
|
1471 default: ShouldNotReachHere(); break; |
|
1472 } |
|
1473 } |
|
1474 |
|
1475 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { |
|
1476 #ifdef ASSERT |
|
1477 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); |
|
1478 if (op->block() != NULL) _branch_target_blocks.append(op->block()); |
|
1479 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); |
|
1480 #endif |
|
1481 |
|
1482 if (op->cond() == lir_cond_always) { |
|
1483 if (op->info() != NULL) add_debug_info_for_branch(op->info()); |
|
1484 __ jmp (*(op->label())); |
|
1485 } else { |
|
1486 Assembler::Condition acond = Assembler::zero; |
|
1487 if (op->code() == lir_cond_float_branch) { |
|
1488 assert(op->ublock() != NULL, "must have unordered successor"); |
|
1489 __ jcc(Assembler::parity, *(op->ublock()->label())); |
|
1490 switch(op->cond()) { |
|
1491 case lir_cond_equal: acond = Assembler::equal; break; |
|
1492 case lir_cond_notEqual: acond = Assembler::notEqual; break; |
|
1493 case lir_cond_less: acond = Assembler::below; break; |
|
1494 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; |
|
1495 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; |
|
1496 case lir_cond_greater: acond = Assembler::above; break; |
|
1497 default: ShouldNotReachHere(); |
|
1498 } |
|
1499 } else { |
|
1500 switch (op->cond()) { |
|
1501 case lir_cond_equal: acond = Assembler::equal; break; |
|
1502 case lir_cond_notEqual: acond = Assembler::notEqual; break; |
|
1503 case lir_cond_less: acond = Assembler::less; break; |
|
1504 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; |
|
1505 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; |
|
1506 case lir_cond_greater: acond = Assembler::greater; break; |
|
1507 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; |
|
1508 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; |
|
1509 default: ShouldNotReachHere(); |
|
1510 } |
|
1511 } |
|
1512 __ jcc(acond,*(op->label())); |
|
1513 } |
|
1514 } |
|
1515 |
|
1516 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { |
|
1517 LIR_Opr src = op->in_opr(); |
|
1518 LIR_Opr dest = op->result_opr(); |
|
1519 |
|
1520 switch (op->bytecode()) { |
|
1521 case Bytecodes::_i2l: |
|
1522 #ifdef _LP64 |
|
1523 __ movl2ptr(dest->as_register_lo(), src->as_register()); |
|
1524 #else |
|
1525 move_regs(src->as_register(), dest->as_register_lo()); |
|
1526 move_regs(src->as_register(), dest->as_register_hi()); |
|
1527 __ sarl(dest->as_register_hi(), 31); |
|
1528 #endif // LP64 |
|
1529 break; |
|
1530 |
|
1531 case Bytecodes::_l2i: |
|
1532 #ifdef _LP64 |
|
1533 __ movl(dest->as_register(), src->as_register_lo()); |
|
1534 #else |
|
1535 move_regs(src->as_register_lo(), dest->as_register()); |
|
1536 #endif |
|
1537 break; |
|
1538 |
|
1539 case Bytecodes::_i2b: |
|
1540 move_regs(src->as_register(), dest->as_register()); |
|
1541 __ sign_extend_byte(dest->as_register()); |
|
1542 break; |
|
1543 |
|
1544 case Bytecodes::_i2c: |
|
1545 move_regs(src->as_register(), dest->as_register()); |
|
1546 __ andl(dest->as_register(), 0xFFFF); |
|
1547 break; |
|
1548 |
|
1549 case Bytecodes::_i2s: |
|
1550 move_regs(src->as_register(), dest->as_register()); |
|
1551 __ sign_extend_short(dest->as_register()); |
|
1552 break; |
|
1553 |
|
1554 |
|
1555 case Bytecodes::_f2d: |
|
1556 case Bytecodes::_d2f: |
|
1557 if (dest->is_single_xmm()) { |
|
1558 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); |
|
1559 } else if (dest->is_double_xmm()) { |
|
1560 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); |
|
1561 } else { |
|
1562 assert(src->fpu() == dest->fpu(), "register must be equal"); |
|
1563 // do nothing (float result is rounded later through spilling) |
|
1564 } |
|
1565 break; |
|
1566 |
|
1567 case Bytecodes::_i2f: |
|
1568 case Bytecodes::_i2d: |
|
1569 if (dest->is_single_xmm()) { |
|
1570 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); |
|
1571 } else if (dest->is_double_xmm()) { |
|
1572 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); |
|
1573 } else { |
|
1574 assert(dest->fpu() == 0, "result must be on TOS"); |
|
1575 __ movl(Address(rsp, 0), src->as_register()); |
|
1576 __ fild_s(Address(rsp, 0)); |
|
1577 } |
|
1578 break; |
|
1579 |
|
1580 case Bytecodes::_f2i: |
|
1581 case Bytecodes::_d2i: |
|
1582 if (src->is_single_xmm()) { |
|
1583 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); |
|
1584 } else if (src->is_double_xmm()) { |
|
1585 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); |
|
1586 } else { |
|
1587 assert(src->fpu() == 0, "input must be on TOS"); |
|
1588 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc())); |
|
1589 __ fist_s(Address(rsp, 0)); |
|
1590 __ movl(dest->as_register(), Address(rsp, 0)); |
|
1591 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); |
|
1592 } |
|
1593 |
|
1594 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub |
|
1595 assert(op->stub() != NULL, "stub required"); |
|
1596 __ cmpl(dest->as_register(), 0x80000000); |
|
1597 __ jcc(Assembler::equal, *op->stub()->entry()); |
|
1598 __ bind(*op->stub()->continuation()); |
|
1599 break; |
|
1600 |
|
1601 case Bytecodes::_l2f: |
|
1602 case Bytecodes::_l2d: |
|
1603 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); |
|
1604 assert(dest->fpu() == 0, "result must be on TOS"); |
|
1605 |
|
1606 __ movptr(Address(rsp, 0), src->as_register_lo()); |
|
1607 NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi())); |
|
1608 __ fild_d(Address(rsp, 0)); |
|
1609 // float result is rounded later through spilling |
|
1610 break; |
|
1611 |
|
1612 case Bytecodes::_f2l: |
|
1613 case Bytecodes::_d2l: |
|
1614 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); |
|
1615 assert(src->fpu() == 0, "input must be on TOS"); |
|
1616 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); |
|
1617 |
|
1618 // instruction sequence too long to inline it here |
|
1619 { |
|
1620 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); |
|
1621 } |
|
1622 break; |
|
1623 |
|
1624 default: ShouldNotReachHere(); |
|
1625 } |
|
1626 } |
|
1627 |
|
1628 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { |
|
1629 if (op->init_check()) { |
|
1630 __ cmpb(Address(op->klass()->as_register(), |
|
1631 InstanceKlass::init_state_offset()), |
|
1632 InstanceKlass::fully_initialized); |
|
1633 add_debug_info_for_null_check_here(op->stub()->info()); |
|
1634 __ jcc(Assembler::notEqual, *op->stub()->entry()); |
|
1635 } |
|
1636 __ allocate_object(op->obj()->as_register(), |
|
1637 op->tmp1()->as_register(), |
|
1638 op->tmp2()->as_register(), |
|
1639 op->header_size(), |
|
1640 op->object_size(), |
|
1641 op->klass()->as_register(), |
|
1642 *op->stub()->entry()); |
|
1643 __ bind(*op->stub()->continuation()); |
|
1644 } |
|
1645 |
|
1646 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { |
|
1647 Register len = op->len()->as_register(); |
|
1648 LP64_ONLY( __ movslq(len, len); ) |
|
1649 |
|
1650 if (UseSlowPath || |
|
1651 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || |
|
1652 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { |
|
1653 __ jmp(*op->stub()->entry()); |
|
1654 } else { |
|
1655 Register tmp1 = op->tmp1()->as_register(); |
|
1656 Register tmp2 = op->tmp2()->as_register(); |
|
1657 Register tmp3 = op->tmp3()->as_register(); |
|
1658 if (len == tmp1) { |
|
1659 tmp1 = tmp3; |
|
1660 } else if (len == tmp2) { |
|
1661 tmp2 = tmp3; |
|
1662 } else if (len == tmp3) { |
|
1663 // everything is ok |
|
1664 } else { |
|
1665 __ mov(tmp3, len); |
|
1666 } |
|
1667 __ allocate_array(op->obj()->as_register(), |
|
1668 len, |
|
1669 tmp1, |
|
1670 tmp2, |
|
1671 arrayOopDesc::header_size(op->type()), |
|
1672 array_element_size(op->type()), |
|
1673 op->klass()->as_register(), |
|
1674 *op->stub()->entry()); |
|
1675 } |
|
1676 __ bind(*op->stub()->continuation()); |
|
1677 } |
|
1678 |
|
1679 void LIR_Assembler::type_profile_helper(Register mdo, |
|
1680 ciMethodData *md, ciProfileData *data, |
|
1681 Register recv, Label* update_done) { |
|
1682 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { |
|
1683 Label next_test; |
|
1684 // See if the receiver is receiver[n]. |
|
1685 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); |
|
1686 __ jccb(Assembler::notEqual, next_test); |
|
1687 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); |
|
1688 __ addptr(data_addr, DataLayout::counter_increment); |
|
1689 __ jmp(*update_done); |
|
1690 __ bind(next_test); |
|
1691 } |
|
1692 |
|
1693 // Didn't find receiver; find next empty slot and fill it in |
|
1694 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { |
|
1695 Label next_test; |
|
1696 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); |
|
1697 __ cmpptr(recv_addr, (intptr_t)NULL_WORD); |
|
1698 __ jccb(Assembler::notEqual, next_test); |
|
1699 __ movptr(recv_addr, recv); |
|
1700 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); |
|
1701 __ jmp(*update_done); |
|
1702 __ bind(next_test); |
|
1703 } |
|
1704 } |
|
1705 |
|
1706 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { |
|
1707 // we always need a stub for the failure case. |
|
1708 CodeStub* stub = op->stub(); |
|
1709 Register obj = op->object()->as_register(); |
|
1710 Register k_RInfo = op->tmp1()->as_register(); |
|
1711 Register klass_RInfo = op->tmp2()->as_register(); |
|
1712 Register dst = op->result_opr()->as_register(); |
|
1713 ciKlass* k = op->klass(); |
|
1714 Register Rtmp1 = noreg; |
|
1715 |
|
1716 // check if it needs to be profiled |
|
1717 ciMethodData* md; |
|
1718 ciProfileData* data; |
|
1719 |
|
1720 if (op->should_profile()) { |
|
1721 ciMethod* method = op->profiled_method(); |
|
1722 assert(method != NULL, "Should have method"); |
|
1723 int bci = op->profiled_bci(); |
|
1724 md = method->method_data_or_null(); |
|
1725 assert(md != NULL, "Sanity"); |
|
1726 data = md->bci_to_data(bci); |
|
1727 assert(data != NULL, "need data for type check"); |
|
1728 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); |
|
1729 } |
|
1730 Label profile_cast_success, profile_cast_failure; |
|
1731 Label *success_target = op->should_profile() ? &profile_cast_success : success; |
|
1732 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; |
|
1733 |
|
1734 if (obj == k_RInfo) { |
|
1735 k_RInfo = dst; |
|
1736 } else if (obj == klass_RInfo) { |
|
1737 klass_RInfo = dst; |
|
1738 } |
|
1739 if (k->is_loaded() && !UseCompressedClassPointers) { |
|
1740 select_different_registers(obj, dst, k_RInfo, klass_RInfo); |
|
1741 } else { |
|
1742 Rtmp1 = op->tmp3()->as_register(); |
|
1743 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); |
|
1744 } |
|
1745 |
|
1746 assert_different_registers(obj, k_RInfo, klass_RInfo); |
|
1747 |
|
1748 __ cmpptr(obj, (int32_t)NULL_WORD); |
|
1749 if (op->should_profile()) { |
|
1750 Label not_null; |
|
1751 __ jccb(Assembler::notEqual, not_null); |
|
1752 // Object is null; update MDO and exit |
|
1753 Register mdo = klass_RInfo; |
|
1754 __ mov_metadata(mdo, md->constant_encoding()); |
|
1755 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); |
|
1756 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); |
|
1757 __ orl(data_addr, header_bits); |
|
1758 __ jmp(*obj_is_null); |
|
1759 __ bind(not_null); |
|
1760 } else { |
|
1761 __ jcc(Assembler::equal, *obj_is_null); |
|
1762 } |
|
1763 |
|
1764 if (!k->is_loaded()) { |
|
1765 klass2reg_with_patching(k_RInfo, op->info_for_patch()); |
|
1766 } else { |
|
1767 #ifdef _LP64 |
|
1768 __ mov_metadata(k_RInfo, k->constant_encoding()); |
|
1769 #endif // _LP64 |
|
1770 } |
|
1771 __ verify_oop(obj); |
|
1772 |
|
1773 if (op->fast_check()) { |
|
1774 // get object class |
|
1775 // not a safepoint as obj null check happens earlier |
|
1776 #ifdef _LP64 |
|
1777 if (UseCompressedClassPointers) { |
|
1778 __ load_klass(Rtmp1, obj); |
|
1779 __ cmpptr(k_RInfo, Rtmp1); |
|
1780 } else { |
|
1781 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
|
1782 } |
|
1783 #else |
|
1784 if (k->is_loaded()) { |
|
1785 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); |
|
1786 } else { |
|
1787 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
|
1788 } |
|
1789 #endif |
|
1790 __ jcc(Assembler::notEqual, *failure_target); |
|
1791 // successful cast, fall through to profile or jump |
|
1792 } else { |
|
1793 // get object class |
|
1794 // not a safepoint as obj null check happens earlier |
|
1795 __ load_klass(klass_RInfo, obj); |
|
1796 if (k->is_loaded()) { |
|
1797 // See if we get an immediate positive hit |
|
1798 #ifdef _LP64 |
|
1799 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); |
|
1800 #else |
|
1801 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); |
|
1802 #endif // _LP64 |
|
1803 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { |
|
1804 __ jcc(Assembler::notEqual, *failure_target); |
|
1805 // successful cast, fall through to profile or jump |
|
1806 } else { |
|
1807 // See if we get an immediate positive hit |
|
1808 __ jcc(Assembler::equal, *success_target); |
|
1809 // check for self |
|
1810 #ifdef _LP64 |
|
1811 __ cmpptr(klass_RInfo, k_RInfo); |
|
1812 #else |
|
1813 __ cmpklass(klass_RInfo, k->constant_encoding()); |
|
1814 #endif // _LP64 |
|
1815 __ jcc(Assembler::equal, *success_target); |
|
1816 |
|
1817 __ push(klass_RInfo); |
|
1818 #ifdef _LP64 |
|
1819 __ push(k_RInfo); |
|
1820 #else |
|
1821 __ pushklass(k->constant_encoding()); |
|
1822 #endif // _LP64 |
|
1823 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
|
1824 __ pop(klass_RInfo); |
|
1825 __ pop(klass_RInfo); |
|
1826 // result is a boolean |
|
1827 __ cmpl(klass_RInfo, 0); |
|
1828 __ jcc(Assembler::equal, *failure_target); |
|
1829 // successful cast, fall through to profile or jump |
|
1830 } |
|
1831 } else { |
|
1832 // perform the fast part of the checking logic |
|
1833 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); |
|
1834 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
|
1835 __ push(klass_RInfo); |
|
1836 __ push(k_RInfo); |
|
1837 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
|
1838 __ pop(klass_RInfo); |
|
1839 __ pop(k_RInfo); |
|
1840 // result is a boolean |
|
1841 __ cmpl(k_RInfo, 0); |
|
1842 __ jcc(Assembler::equal, *failure_target); |
|
1843 // successful cast, fall through to profile or jump |
|
1844 } |
|
1845 } |
|
1846 if (op->should_profile()) { |
|
1847 Register mdo = klass_RInfo, recv = k_RInfo; |
|
1848 __ bind(profile_cast_success); |
|
1849 __ mov_metadata(mdo, md->constant_encoding()); |
|
1850 __ load_klass(recv, obj); |
|
1851 Label update_done; |
|
1852 type_profile_helper(mdo, md, data, recv, success); |
|
1853 __ jmp(*success); |
|
1854 |
|
1855 __ bind(profile_cast_failure); |
|
1856 __ mov_metadata(mdo, md->constant_encoding()); |
|
1857 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); |
|
1858 __ subptr(counter_addr, DataLayout::counter_increment); |
|
1859 __ jmp(*failure); |
|
1860 } |
|
1861 __ jmp(*success); |
|
1862 } |
|
1863 |
|
1864 |
|
1865 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { |
|
1866 LIR_Code code = op->code(); |
|
1867 if (code == lir_store_check) { |
|
1868 Register value = op->object()->as_register(); |
|
1869 Register array = op->array()->as_register(); |
|
1870 Register k_RInfo = op->tmp1()->as_register(); |
|
1871 Register klass_RInfo = op->tmp2()->as_register(); |
|
1872 Register Rtmp1 = op->tmp3()->as_register(); |
|
1873 |
|
1874 CodeStub* stub = op->stub(); |
|
1875 |
|
1876 // check if it needs to be profiled |
|
1877 ciMethodData* md; |
|
1878 ciProfileData* data; |
|
1879 |
|
1880 if (op->should_profile()) { |
|
1881 ciMethod* method = op->profiled_method(); |
|
1882 assert(method != NULL, "Should have method"); |
|
1883 int bci = op->profiled_bci(); |
|
1884 md = method->method_data_or_null(); |
|
1885 assert(md != NULL, "Sanity"); |
|
1886 data = md->bci_to_data(bci); |
|
1887 assert(data != NULL, "need data for type check"); |
|
1888 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); |
|
1889 } |
|
1890 Label profile_cast_success, profile_cast_failure, done; |
|
1891 Label *success_target = op->should_profile() ? &profile_cast_success : &done; |
|
1892 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); |
|
1893 |
|
1894 __ cmpptr(value, (int32_t)NULL_WORD); |
|
1895 if (op->should_profile()) { |
|
1896 Label not_null; |
|
1897 __ jccb(Assembler::notEqual, not_null); |
|
1898 // Object is null; update MDO and exit |
|
1899 Register mdo = klass_RInfo; |
|
1900 __ mov_metadata(mdo, md->constant_encoding()); |
|
1901 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); |
|
1902 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); |
|
1903 __ orl(data_addr, header_bits); |
|
1904 __ jmp(done); |
|
1905 __ bind(not_null); |
|
1906 } else { |
|
1907 __ jcc(Assembler::equal, done); |
|
1908 } |
|
1909 |
|
1910 add_debug_info_for_null_check_here(op->info_for_exception()); |
|
1911 __ load_klass(k_RInfo, array); |
|
1912 __ load_klass(klass_RInfo, value); |
|
1913 |
|
1914 // get instance klass (it's already uncompressed) |
|
1915 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); |
|
1916 // perform the fast part of the checking logic |
|
1917 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); |
|
1918 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
|
1919 __ push(klass_RInfo); |
|
1920 __ push(k_RInfo); |
|
1921 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
|
1922 __ pop(klass_RInfo); |
|
1923 __ pop(k_RInfo); |
|
1924 // result is a boolean |
|
1925 __ cmpl(k_RInfo, 0); |
|
1926 __ jcc(Assembler::equal, *failure_target); |
|
1927 // fall through to the success case |
|
1928 |
|
1929 if (op->should_profile()) { |
|
1930 Register mdo = klass_RInfo, recv = k_RInfo; |
|
1931 __ bind(profile_cast_success); |
|
1932 __ mov_metadata(mdo, md->constant_encoding()); |
|
1933 __ load_klass(recv, value); |
|
1934 Label update_done; |
|
1935 type_profile_helper(mdo, md, data, recv, &done); |
|
1936 __ jmpb(done); |
|
1937 |
|
1938 __ bind(profile_cast_failure); |
|
1939 __ mov_metadata(mdo, md->constant_encoding()); |
|
1940 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); |
|
1941 __ subptr(counter_addr, DataLayout::counter_increment); |
|
1942 __ jmp(*stub->entry()); |
|
1943 } |
|
1944 |
|
1945 __ bind(done); |
|
1946 } else |
|
1947 if (code == lir_checkcast) { |
|
1948 Register obj = op->object()->as_register(); |
|
1949 Register dst = op->result_opr()->as_register(); |
|
1950 Label success; |
|
1951 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); |
|
1952 __ bind(success); |
|
1953 if (dst != obj) { |
|
1954 __ mov(dst, obj); |
|
1955 } |
|
1956 } else |
|
1957 if (code == lir_instanceof) { |
|
1958 Register obj = op->object()->as_register(); |
|
1959 Register dst = op->result_opr()->as_register(); |
|
1960 Label success, failure, done; |
|
1961 emit_typecheck_helper(op, &success, &failure, &failure); |
|
1962 __ bind(failure); |
|
1963 __ xorptr(dst, dst); |
|
1964 __ jmpb(done); |
|
1965 __ bind(success); |
|
1966 __ movptr(dst, 1); |
|
1967 __ bind(done); |
|
1968 } else { |
|
1969 ShouldNotReachHere(); |
|
1970 } |
|
1971 |
|
1972 } |
|
1973 |
|
1974 |
|
1975 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { |
|
1976 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { |
|
1977 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); |
|
1978 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); |
|
1979 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); |
|
1980 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); |
|
1981 Register addr = op->addr()->as_register(); |
|
1982 if (os::is_MP()) { |
|
1983 __ lock(); |
|
1984 } |
|
1985 NOT_LP64(__ cmpxchg8(Address(addr, 0))); |
|
1986 |
|
1987 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { |
|
1988 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) |
|
1989 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); |
|
1990 Register newval = op->new_value()->as_register(); |
|
1991 Register cmpval = op->cmp_value()->as_register(); |
|
1992 assert(cmpval == rax, "wrong register"); |
|
1993 assert(newval != NULL, "new val must be register"); |
|
1994 assert(cmpval != newval, "cmp and new values must be in different registers"); |
|
1995 assert(cmpval != addr, "cmp and addr must be in different registers"); |
|
1996 assert(newval != addr, "new value and addr must be in different registers"); |
|
1997 |
|
1998 if ( op->code() == lir_cas_obj) { |
|
1999 #ifdef _LP64 |
|
2000 if (UseCompressedOops) { |
|
2001 __ encode_heap_oop(cmpval); |
|
2002 __ mov(rscratch1, newval); |
|
2003 __ encode_heap_oop(rscratch1); |
|
2004 if (os::is_MP()) { |
|
2005 __ lock(); |
|
2006 } |
|
2007 // cmpval (rax) is implicitly used by this instruction |
|
2008 __ cmpxchgl(rscratch1, Address(addr, 0)); |
|
2009 } else |
|
2010 #endif |
|
2011 { |
|
2012 if (os::is_MP()) { |
|
2013 __ lock(); |
|
2014 } |
|
2015 __ cmpxchgptr(newval, Address(addr, 0)); |
|
2016 } |
|
2017 } else { |
|
2018 assert(op->code() == lir_cas_int, "lir_cas_int expected"); |
|
2019 if (os::is_MP()) { |
|
2020 __ lock(); |
|
2021 } |
|
2022 __ cmpxchgl(newval, Address(addr, 0)); |
|
2023 } |
|
2024 #ifdef _LP64 |
|
2025 } else if (op->code() == lir_cas_long) { |
|
2026 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); |
|
2027 Register newval = op->new_value()->as_register_lo(); |
|
2028 Register cmpval = op->cmp_value()->as_register_lo(); |
|
2029 assert(cmpval == rax, "wrong register"); |
|
2030 assert(newval != NULL, "new val must be register"); |
|
2031 assert(cmpval != newval, "cmp and new values must be in different registers"); |
|
2032 assert(cmpval != addr, "cmp and addr must be in different registers"); |
|
2033 assert(newval != addr, "new value and addr must be in different registers"); |
|
2034 if (os::is_MP()) { |
|
2035 __ lock(); |
|
2036 } |
|
2037 __ cmpxchgq(newval, Address(addr, 0)); |
|
2038 #endif // _LP64 |
|
2039 } else { |
|
2040 Unimplemented(); |
|
2041 } |
|
2042 } |
|
2043 |
|
2044 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { |
|
2045 Assembler::Condition acond, ncond; |
|
2046 switch (condition) { |
|
2047 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; |
|
2048 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; |
|
2049 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; |
|
2050 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; |
|
2051 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; |
|
2052 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; |
|
2053 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; |
|
2054 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; |
|
2055 default: ShouldNotReachHere(); |
|
2056 } |
|
2057 |
|
2058 if (opr1->is_cpu_register()) { |
|
2059 reg2reg(opr1, result); |
|
2060 } else if (opr1->is_stack()) { |
|
2061 stack2reg(opr1, result, result->type()); |
|
2062 } else if (opr1->is_constant()) { |
|
2063 const2reg(opr1, result, lir_patch_none, NULL); |
|
2064 } else { |
|
2065 ShouldNotReachHere(); |
|
2066 } |
|
2067 |
|
2068 if (VM_Version::supports_cmov() && !opr2->is_constant()) { |
|
2069 // optimized version that does not require a branch |
|
2070 if (opr2->is_single_cpu()) { |
|
2071 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); |
|
2072 __ cmov(ncond, result->as_register(), opr2->as_register()); |
|
2073 } else if (opr2->is_double_cpu()) { |
|
2074 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); |
|
2075 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); |
|
2076 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); |
|
2077 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) |
|
2078 } else if (opr2->is_single_stack()) { |
|
2079 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); |
|
2080 } else if (opr2->is_double_stack()) { |
|
2081 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); |
|
2082 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) |
|
2083 } else { |
|
2084 ShouldNotReachHere(); |
|
2085 } |
|
2086 |
|
2087 } else { |
|
2088 Label skip; |
|
2089 __ jcc (acond, skip); |
|
2090 if (opr2->is_cpu_register()) { |
|
2091 reg2reg(opr2, result); |
|
2092 } else if (opr2->is_stack()) { |
|
2093 stack2reg(opr2, result, result->type()); |
|
2094 } else if (opr2->is_constant()) { |
|
2095 const2reg(opr2, result, lir_patch_none, NULL); |
|
2096 } else { |
|
2097 ShouldNotReachHere(); |
|
2098 } |
|
2099 __ bind(skip); |
|
2100 } |
|
2101 } |
|
2102 |
|
2103 |
|
2104 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { |
|
2105 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); |
|
2106 |
|
2107 if (left->is_single_cpu()) { |
|
2108 assert(left == dest, "left and dest must be equal"); |
|
2109 Register lreg = left->as_register(); |
|
2110 |
|
2111 if (right->is_single_cpu()) { |
|
2112 // cpu register - cpu register |
|
2113 Register rreg = right->as_register(); |
|
2114 switch (code) { |
|
2115 case lir_add: __ addl (lreg, rreg); break; |
|
2116 case lir_sub: __ subl (lreg, rreg); break; |
|
2117 case lir_mul: __ imull(lreg, rreg); break; |
|
2118 default: ShouldNotReachHere(); |
|
2119 } |
|
2120 |
|
2121 } else if (right->is_stack()) { |
|
2122 // cpu register - stack |
|
2123 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); |
|
2124 switch (code) { |
|
2125 case lir_add: __ addl(lreg, raddr); break; |
|
2126 case lir_sub: __ subl(lreg, raddr); break; |
|
2127 default: ShouldNotReachHere(); |
|
2128 } |
|
2129 |
|
2130 } else if (right->is_constant()) { |
|
2131 // cpu register - constant |
|
2132 jint c = right->as_constant_ptr()->as_jint(); |
|
2133 switch (code) { |
|
2134 case lir_add: { |
|
2135 __ incrementl(lreg, c); |
|
2136 break; |
|
2137 } |
|
2138 case lir_sub: { |
|
2139 __ decrementl(lreg, c); |
|
2140 break; |
|
2141 } |
|
2142 default: ShouldNotReachHere(); |
|
2143 } |
|
2144 |
|
2145 } else { |
|
2146 ShouldNotReachHere(); |
|
2147 } |
|
2148 |
|
2149 } else if (left->is_double_cpu()) { |
|
2150 assert(left == dest, "left and dest must be equal"); |
|
2151 Register lreg_lo = left->as_register_lo(); |
|
2152 Register lreg_hi = left->as_register_hi(); |
|
2153 |
|
2154 if (right->is_double_cpu()) { |
|
2155 // cpu register - cpu register |
|
2156 Register rreg_lo = right->as_register_lo(); |
|
2157 Register rreg_hi = right->as_register_hi(); |
|
2158 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); |
|
2159 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); |
|
2160 switch (code) { |
|
2161 case lir_add: |
|
2162 __ addptr(lreg_lo, rreg_lo); |
|
2163 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); |
|
2164 break; |
|
2165 case lir_sub: |
|
2166 __ subptr(lreg_lo, rreg_lo); |
|
2167 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); |
|
2168 break; |
|
2169 case lir_mul: |
|
2170 #ifdef _LP64 |
|
2171 __ imulq(lreg_lo, rreg_lo); |
|
2172 #else |
|
2173 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); |
|
2174 __ imull(lreg_hi, rreg_lo); |
|
2175 __ imull(rreg_hi, lreg_lo); |
|
2176 __ addl (rreg_hi, lreg_hi); |
|
2177 __ mull (rreg_lo); |
|
2178 __ addl (lreg_hi, rreg_hi); |
|
2179 #endif // _LP64 |
|
2180 break; |
|
2181 default: |
|
2182 ShouldNotReachHere(); |
|
2183 } |
|
2184 |
|
2185 } else if (right->is_constant()) { |
|
2186 // cpu register - constant |
|
2187 #ifdef _LP64 |
|
2188 jlong c = right->as_constant_ptr()->as_jlong_bits(); |
|
2189 __ movptr(r10, (intptr_t) c); |
|
2190 switch (code) { |
|
2191 case lir_add: |
|
2192 __ addptr(lreg_lo, r10); |
|
2193 break; |
|
2194 case lir_sub: |
|
2195 __ subptr(lreg_lo, r10); |
|
2196 break; |
|
2197 default: |
|
2198 ShouldNotReachHere(); |
|
2199 } |
|
2200 #else |
|
2201 jint c_lo = right->as_constant_ptr()->as_jint_lo(); |
|
2202 jint c_hi = right->as_constant_ptr()->as_jint_hi(); |
|
2203 switch (code) { |
|
2204 case lir_add: |
|
2205 __ addptr(lreg_lo, c_lo); |
|
2206 __ adcl(lreg_hi, c_hi); |
|
2207 break; |
|
2208 case lir_sub: |
|
2209 __ subptr(lreg_lo, c_lo); |
|
2210 __ sbbl(lreg_hi, c_hi); |
|
2211 break; |
|
2212 default: |
|
2213 ShouldNotReachHere(); |
|
2214 } |
|
2215 #endif // _LP64 |
|
2216 |
|
2217 } else { |
|
2218 ShouldNotReachHere(); |
|
2219 } |
|
2220 |
|
2221 } else if (left->is_single_xmm()) { |
|
2222 assert(left == dest, "left and dest must be equal"); |
|
2223 XMMRegister lreg = left->as_xmm_float_reg(); |
|
2224 |
|
2225 if (right->is_single_xmm()) { |
|
2226 XMMRegister rreg = right->as_xmm_float_reg(); |
|
2227 switch (code) { |
|
2228 case lir_add: __ addss(lreg, rreg); break; |
|
2229 case lir_sub: __ subss(lreg, rreg); break; |
|
2230 case lir_mul_strictfp: // fall through |
|
2231 case lir_mul: __ mulss(lreg, rreg); break; |
|
2232 case lir_div_strictfp: // fall through |
|
2233 case lir_div: __ divss(lreg, rreg); break; |
|
2234 default: ShouldNotReachHere(); |
|
2235 } |
|
2236 } else { |
|
2237 Address raddr; |
|
2238 if (right->is_single_stack()) { |
|
2239 raddr = frame_map()->address_for_slot(right->single_stack_ix()); |
|
2240 } else if (right->is_constant()) { |
|
2241 // hack for now |
|
2242 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); |
|
2243 } else { |
|
2244 ShouldNotReachHere(); |
|
2245 } |
|
2246 switch (code) { |
|
2247 case lir_add: __ addss(lreg, raddr); break; |
|
2248 case lir_sub: __ subss(lreg, raddr); break; |
|
2249 case lir_mul_strictfp: // fall through |
|
2250 case lir_mul: __ mulss(lreg, raddr); break; |
|
2251 case lir_div_strictfp: // fall through |
|
2252 case lir_div: __ divss(lreg, raddr); break; |
|
2253 default: ShouldNotReachHere(); |
|
2254 } |
|
2255 } |
|
2256 |
|
2257 } else if (left->is_double_xmm()) { |
|
2258 assert(left == dest, "left and dest must be equal"); |
|
2259 |
|
2260 XMMRegister lreg = left->as_xmm_double_reg(); |
|
2261 if (right->is_double_xmm()) { |
|
2262 XMMRegister rreg = right->as_xmm_double_reg(); |
|
2263 switch (code) { |
|
2264 case lir_add: __ addsd(lreg, rreg); break; |
|
2265 case lir_sub: __ subsd(lreg, rreg); break; |
|
2266 case lir_mul_strictfp: // fall through |
|
2267 case lir_mul: __ mulsd(lreg, rreg); break; |
|
2268 case lir_div_strictfp: // fall through |
|
2269 case lir_div: __ divsd(lreg, rreg); break; |
|
2270 default: ShouldNotReachHere(); |
|
2271 } |
|
2272 } else { |
|
2273 Address raddr; |
|
2274 if (right->is_double_stack()) { |
|
2275 raddr = frame_map()->address_for_slot(right->double_stack_ix()); |
|
2276 } else if (right->is_constant()) { |
|
2277 // hack for now |
|
2278 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); |
|
2279 } else { |
|
2280 ShouldNotReachHere(); |
|
2281 } |
|
2282 switch (code) { |
|
2283 case lir_add: __ addsd(lreg, raddr); break; |
|
2284 case lir_sub: __ subsd(lreg, raddr); break; |
|
2285 case lir_mul_strictfp: // fall through |
|
2286 case lir_mul: __ mulsd(lreg, raddr); break; |
|
2287 case lir_div_strictfp: // fall through |
|
2288 case lir_div: __ divsd(lreg, raddr); break; |
|
2289 default: ShouldNotReachHere(); |
|
2290 } |
|
2291 } |
|
2292 |
|
2293 } else if (left->is_single_fpu()) { |
|
2294 assert(dest->is_single_fpu(), "fpu stack allocation required"); |
|
2295 |
|
2296 if (right->is_single_fpu()) { |
|
2297 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); |
|
2298 |
|
2299 } else { |
|
2300 assert(left->fpu_regnr() == 0, "left must be on TOS"); |
|
2301 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); |
|
2302 |
|
2303 Address raddr; |
|
2304 if (right->is_single_stack()) { |
|
2305 raddr = frame_map()->address_for_slot(right->single_stack_ix()); |
|
2306 } else if (right->is_constant()) { |
|
2307 address const_addr = float_constant(right->as_jfloat()); |
|
2308 assert(const_addr != NULL, "incorrect float/double constant maintainance"); |
|
2309 // hack for now |
|
2310 raddr = __ as_Address(InternalAddress(const_addr)); |
|
2311 } else { |
|
2312 ShouldNotReachHere(); |
|
2313 } |
|
2314 |
|
2315 switch (code) { |
|
2316 case lir_add: __ fadd_s(raddr); break; |
|
2317 case lir_sub: __ fsub_s(raddr); break; |
|
2318 case lir_mul_strictfp: // fall through |
|
2319 case lir_mul: __ fmul_s(raddr); break; |
|
2320 case lir_div_strictfp: // fall through |
|
2321 case lir_div: __ fdiv_s(raddr); break; |
|
2322 default: ShouldNotReachHere(); |
|
2323 } |
|
2324 } |
|
2325 |
|
2326 } else if (left->is_double_fpu()) { |
|
2327 assert(dest->is_double_fpu(), "fpu stack allocation required"); |
|
2328 |
|
2329 if (code == lir_mul_strictfp || code == lir_div_strictfp) { |
|
2330 // Double values require special handling for strictfp mul/div on x86 |
|
2331 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); |
|
2332 __ fmulp(left->fpu_regnrLo() + 1); |
|
2333 } |
|
2334 |
|
2335 if (right->is_double_fpu()) { |
|
2336 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); |
|
2337 |
|
2338 } else { |
|
2339 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); |
|
2340 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); |
|
2341 |
|
2342 Address raddr; |
|
2343 if (right->is_double_stack()) { |
|
2344 raddr = frame_map()->address_for_slot(right->double_stack_ix()); |
|
2345 } else if (right->is_constant()) { |
|
2346 // hack for now |
|
2347 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); |
|
2348 } else { |
|
2349 ShouldNotReachHere(); |
|
2350 } |
|
2351 |
|
2352 switch (code) { |
|
2353 case lir_add: __ fadd_d(raddr); break; |
|
2354 case lir_sub: __ fsub_d(raddr); break; |
|
2355 case lir_mul_strictfp: // fall through |
|
2356 case lir_mul: __ fmul_d(raddr); break; |
|
2357 case lir_div_strictfp: // fall through |
|
2358 case lir_div: __ fdiv_d(raddr); break; |
|
2359 default: ShouldNotReachHere(); |
|
2360 } |
|
2361 } |
|
2362 |
|
2363 if (code == lir_mul_strictfp || code == lir_div_strictfp) { |
|
2364 // Double values require special handling for strictfp mul/div on x86 |
|
2365 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); |
|
2366 __ fmulp(dest->fpu_regnrLo() + 1); |
|
2367 } |
|
2368 |
|
2369 } else if (left->is_single_stack() || left->is_address()) { |
|
2370 assert(left == dest, "left and dest must be equal"); |
|
2371 |
|
2372 Address laddr; |
|
2373 if (left->is_single_stack()) { |
|
2374 laddr = frame_map()->address_for_slot(left->single_stack_ix()); |
|
2375 } else if (left->is_address()) { |
|
2376 laddr = as_Address(left->as_address_ptr()); |
|
2377 } else { |
|
2378 ShouldNotReachHere(); |
|
2379 } |
|
2380 |
|
2381 if (right->is_single_cpu()) { |
|
2382 Register rreg = right->as_register(); |
|
2383 switch (code) { |
|
2384 case lir_add: __ addl(laddr, rreg); break; |
|
2385 case lir_sub: __ subl(laddr, rreg); break; |
|
2386 default: ShouldNotReachHere(); |
|
2387 } |
|
2388 } else if (right->is_constant()) { |
|
2389 jint c = right->as_constant_ptr()->as_jint(); |
|
2390 switch (code) { |
|
2391 case lir_add: { |
|
2392 __ incrementl(laddr, c); |
|
2393 break; |
|
2394 } |
|
2395 case lir_sub: { |
|
2396 __ decrementl(laddr, c); |
|
2397 break; |
|
2398 } |
|
2399 default: ShouldNotReachHere(); |
|
2400 } |
|
2401 } else { |
|
2402 ShouldNotReachHere(); |
|
2403 } |
|
2404 |
|
2405 } else { |
|
2406 ShouldNotReachHere(); |
|
2407 } |
|
2408 } |
|
2409 |
|
2410 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { |
|
2411 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); |
|
2412 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); |
|
2413 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); |
|
2414 |
|
2415 bool left_is_tos = (left_index == 0); |
|
2416 bool dest_is_tos = (dest_index == 0); |
|
2417 int non_tos_index = (left_is_tos ? right_index : left_index); |
|
2418 |
|
2419 switch (code) { |
|
2420 case lir_add: |
|
2421 if (pop_fpu_stack) __ faddp(non_tos_index); |
|
2422 else if (dest_is_tos) __ fadd (non_tos_index); |
|
2423 else __ fadda(non_tos_index); |
|
2424 break; |
|
2425 |
|
2426 case lir_sub: |
|
2427 if (left_is_tos) { |
|
2428 if (pop_fpu_stack) __ fsubrp(non_tos_index); |
|
2429 else if (dest_is_tos) __ fsub (non_tos_index); |
|
2430 else __ fsubra(non_tos_index); |
|
2431 } else { |
|
2432 if (pop_fpu_stack) __ fsubp (non_tos_index); |
|
2433 else if (dest_is_tos) __ fsubr (non_tos_index); |
|
2434 else __ fsuba (non_tos_index); |
|
2435 } |
|
2436 break; |
|
2437 |
|
2438 case lir_mul_strictfp: // fall through |
|
2439 case lir_mul: |
|
2440 if (pop_fpu_stack) __ fmulp(non_tos_index); |
|
2441 else if (dest_is_tos) __ fmul (non_tos_index); |
|
2442 else __ fmula(non_tos_index); |
|
2443 break; |
|
2444 |
|
2445 case lir_div_strictfp: // fall through |
|
2446 case lir_div: |
|
2447 if (left_is_tos) { |
|
2448 if (pop_fpu_stack) __ fdivrp(non_tos_index); |
|
2449 else if (dest_is_tos) __ fdiv (non_tos_index); |
|
2450 else __ fdivra(non_tos_index); |
|
2451 } else { |
|
2452 if (pop_fpu_stack) __ fdivp (non_tos_index); |
|
2453 else if (dest_is_tos) __ fdivr (non_tos_index); |
|
2454 else __ fdiva (non_tos_index); |
|
2455 } |
|
2456 break; |
|
2457 |
|
2458 case lir_rem: |
|
2459 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); |
|
2460 __ fremr(noreg); |
|
2461 break; |
|
2462 |
|
2463 default: |
|
2464 ShouldNotReachHere(); |
|
2465 } |
|
2466 } |
|
2467 |
|
2468 |
|
2469 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { |
|
2470 if (value->is_double_xmm()) { |
|
2471 switch(code) { |
|
2472 case lir_abs : |
|
2473 { |
|
2474 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { |
|
2475 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); |
|
2476 } |
|
2477 __ andpd(dest->as_xmm_double_reg(), |
|
2478 ExternalAddress((address)double_signmask_pool)); |
|
2479 } |
|
2480 break; |
|
2481 |
|
2482 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; |
|
2483 // all other intrinsics are not available in the SSE instruction set, so FPU is used |
|
2484 default : ShouldNotReachHere(); |
|
2485 } |
|
2486 |
|
2487 } else if (value->is_double_fpu()) { |
|
2488 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); |
|
2489 switch(code) { |
|
2490 case lir_log : __ flog() ; break; |
|
2491 case lir_log10 : __ flog10() ; break; |
|
2492 case lir_abs : __ fabs() ; break; |
|
2493 case lir_sqrt : __ fsqrt(); break; |
|
2494 case lir_sin : |
|
2495 // Should consider not saving rbx, if not necessary |
|
2496 __ trigfunc('s', op->as_Op2()->fpu_stack_size()); |
|
2497 break; |
|
2498 case lir_cos : |
|
2499 // Should consider not saving rbx, if not necessary |
|
2500 assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots"); |
|
2501 __ trigfunc('c', op->as_Op2()->fpu_stack_size()); |
|
2502 break; |
|
2503 case lir_tan : |
|
2504 // Should consider not saving rbx, if not necessary |
|
2505 __ trigfunc('t', op->as_Op2()->fpu_stack_size()); |
|
2506 break; |
|
2507 case lir_exp : |
|
2508 __ exp_with_fallback(op->as_Op2()->fpu_stack_size()); |
|
2509 break; |
|
2510 case lir_pow : |
|
2511 __ pow_with_fallback(op->as_Op2()->fpu_stack_size()); |
|
2512 break; |
|
2513 default : ShouldNotReachHere(); |
|
2514 } |
|
2515 } else { |
|
2516 Unimplemented(); |
|
2517 } |
|
2518 } |
|
2519 |
|
2520 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { |
|
2521 // assert(left->destroys_register(), "check"); |
|
2522 if (left->is_single_cpu()) { |
|
2523 Register reg = left->as_register(); |
|
2524 if (right->is_constant()) { |
|
2525 int val = right->as_constant_ptr()->as_jint(); |
|
2526 switch (code) { |
|
2527 case lir_logic_and: __ andl (reg, val); break; |
|
2528 case lir_logic_or: __ orl (reg, val); break; |
|
2529 case lir_logic_xor: __ xorl (reg, val); break; |
|
2530 default: ShouldNotReachHere(); |
|
2531 } |
|
2532 } else if (right->is_stack()) { |
|
2533 // added support for stack operands |
|
2534 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); |
|
2535 switch (code) { |
|
2536 case lir_logic_and: __ andl (reg, raddr); break; |
|
2537 case lir_logic_or: __ orl (reg, raddr); break; |
|
2538 case lir_logic_xor: __ xorl (reg, raddr); break; |
|
2539 default: ShouldNotReachHere(); |
|
2540 } |
|
2541 } else { |
|
2542 Register rright = right->as_register(); |
|
2543 switch (code) { |
|
2544 case lir_logic_and: __ andptr (reg, rright); break; |
|
2545 case lir_logic_or : __ orptr (reg, rright); break; |
|
2546 case lir_logic_xor: __ xorptr (reg, rright); break; |
|
2547 default: ShouldNotReachHere(); |
|
2548 } |
|
2549 } |
|
2550 move_regs(reg, dst->as_register()); |
|
2551 } else { |
|
2552 Register l_lo = left->as_register_lo(); |
|
2553 Register l_hi = left->as_register_hi(); |
|
2554 if (right->is_constant()) { |
|
2555 #ifdef _LP64 |
|
2556 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); |
|
2557 switch (code) { |
|
2558 case lir_logic_and: |
|
2559 __ andq(l_lo, rscratch1); |
|
2560 break; |
|
2561 case lir_logic_or: |
|
2562 __ orq(l_lo, rscratch1); |
|
2563 break; |
|
2564 case lir_logic_xor: |
|
2565 __ xorq(l_lo, rscratch1); |
|
2566 break; |
|
2567 default: ShouldNotReachHere(); |
|
2568 } |
|
2569 #else |
|
2570 int r_lo = right->as_constant_ptr()->as_jint_lo(); |
|
2571 int r_hi = right->as_constant_ptr()->as_jint_hi(); |
|
2572 switch (code) { |
|
2573 case lir_logic_and: |
|
2574 __ andl(l_lo, r_lo); |
|
2575 __ andl(l_hi, r_hi); |
|
2576 break; |
|
2577 case lir_logic_or: |
|
2578 __ orl(l_lo, r_lo); |
|
2579 __ orl(l_hi, r_hi); |
|
2580 break; |
|
2581 case lir_logic_xor: |
|
2582 __ xorl(l_lo, r_lo); |
|
2583 __ xorl(l_hi, r_hi); |
|
2584 break; |
|
2585 default: ShouldNotReachHere(); |
|
2586 } |
|
2587 #endif // _LP64 |
|
2588 } else { |
|
2589 #ifdef _LP64 |
|
2590 Register r_lo; |
|
2591 if (right->type() == T_OBJECT || right->type() == T_ARRAY) { |
|
2592 r_lo = right->as_register(); |
|
2593 } else { |
|
2594 r_lo = right->as_register_lo(); |
|
2595 } |
|
2596 #else |
|
2597 Register r_lo = right->as_register_lo(); |
|
2598 Register r_hi = right->as_register_hi(); |
|
2599 assert(l_lo != r_hi, "overwriting registers"); |
|
2600 #endif |
|
2601 switch (code) { |
|
2602 case lir_logic_and: |
|
2603 __ andptr(l_lo, r_lo); |
|
2604 NOT_LP64(__ andptr(l_hi, r_hi);) |
|
2605 break; |
|
2606 case lir_logic_or: |
|
2607 __ orptr(l_lo, r_lo); |
|
2608 NOT_LP64(__ orptr(l_hi, r_hi);) |
|
2609 break; |
|
2610 case lir_logic_xor: |
|
2611 __ xorptr(l_lo, r_lo); |
|
2612 NOT_LP64(__ xorptr(l_hi, r_hi);) |
|
2613 break; |
|
2614 default: ShouldNotReachHere(); |
|
2615 } |
|
2616 } |
|
2617 |
|
2618 Register dst_lo = dst->as_register_lo(); |
|
2619 Register dst_hi = dst->as_register_hi(); |
|
2620 |
|
2621 #ifdef _LP64 |
|
2622 move_regs(l_lo, dst_lo); |
|
2623 #else |
|
2624 if (dst_lo == l_hi) { |
|
2625 assert(dst_hi != l_lo, "overwriting registers"); |
|
2626 move_regs(l_hi, dst_hi); |
|
2627 move_regs(l_lo, dst_lo); |
|
2628 } else { |
|
2629 assert(dst_lo != l_hi, "overwriting registers"); |
|
2630 move_regs(l_lo, dst_lo); |
|
2631 move_regs(l_hi, dst_hi); |
|
2632 } |
|
2633 #endif // _LP64 |
|
2634 } |
|
2635 } |
|
2636 |
|
2637 |
|
2638 // we assume that rax, and rdx can be overwritten |
|
2639 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { |
|
2640 |
|
2641 assert(left->is_single_cpu(), "left must be register"); |
|
2642 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); |
|
2643 assert(result->is_single_cpu(), "result must be register"); |
|
2644 |
|
2645 // assert(left->destroys_register(), "check"); |
|
2646 // assert(right->destroys_register(), "check"); |
|
2647 |
|
2648 Register lreg = left->as_register(); |
|
2649 Register dreg = result->as_register(); |
|
2650 |
|
2651 if (right->is_constant()) { |
|
2652 int divisor = right->as_constant_ptr()->as_jint(); |
|
2653 assert(divisor > 0 && is_power_of_2(divisor), "must be"); |
|
2654 if (code == lir_idiv) { |
|
2655 assert(lreg == rax, "must be rax,"); |
|
2656 assert(temp->as_register() == rdx, "tmp register must be rdx"); |
|
2657 __ cdql(); // sign extend into rdx:rax |
|
2658 if (divisor == 2) { |
|
2659 __ subl(lreg, rdx); |
|
2660 } else { |
|
2661 __ andl(rdx, divisor - 1); |
|
2662 __ addl(lreg, rdx); |
|
2663 } |
|
2664 __ sarl(lreg, log2_intptr(divisor)); |
|
2665 move_regs(lreg, dreg); |
|
2666 } else if (code == lir_irem) { |
|
2667 Label done; |
|
2668 __ mov(dreg, lreg); |
|
2669 __ andl(dreg, 0x80000000 | (divisor - 1)); |
|
2670 __ jcc(Assembler::positive, done); |
|
2671 __ decrement(dreg); |
|
2672 __ orl(dreg, ~(divisor - 1)); |
|
2673 __ increment(dreg); |
|
2674 __ bind(done); |
|
2675 } else { |
|
2676 ShouldNotReachHere(); |
|
2677 } |
|
2678 } else { |
|
2679 Register rreg = right->as_register(); |
|
2680 assert(lreg == rax, "left register must be rax,"); |
|
2681 assert(rreg != rdx, "right register must not be rdx"); |
|
2682 assert(temp->as_register() == rdx, "tmp register must be rdx"); |
|
2683 |
|
2684 move_regs(lreg, rax); |
|
2685 |
|
2686 int idivl_offset = __ corrected_idivl(rreg); |
|
2687 add_debug_info_for_div0(idivl_offset, info); |
|
2688 if (code == lir_irem) { |
|
2689 move_regs(rdx, dreg); // result is in rdx |
|
2690 } else { |
|
2691 move_regs(rax, dreg); |
|
2692 } |
|
2693 } |
|
2694 } |
|
2695 |
|
2696 |
|
2697 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { |
|
2698 if (opr1->is_single_cpu()) { |
|
2699 Register reg1 = opr1->as_register(); |
|
2700 if (opr2->is_single_cpu()) { |
|
2701 // cpu register - cpu register |
|
2702 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { |
|
2703 __ cmpptr(reg1, opr2->as_register()); |
|
2704 } else { |
|
2705 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); |
|
2706 __ cmpl(reg1, opr2->as_register()); |
|
2707 } |
|
2708 } else if (opr2->is_stack()) { |
|
2709 // cpu register - stack |
|
2710 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { |
|
2711 __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); |
|
2712 } else { |
|
2713 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); |
|
2714 } |
|
2715 } else if (opr2->is_constant()) { |
|
2716 // cpu register - constant |
|
2717 LIR_Const* c = opr2->as_constant_ptr(); |
|
2718 if (c->type() == T_INT) { |
|
2719 __ cmpl(reg1, c->as_jint()); |
|
2720 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { |
|
2721 // In 64bit oops are single register |
|
2722 jobject o = c->as_jobject(); |
|
2723 if (o == NULL) { |
|
2724 __ cmpptr(reg1, (int32_t)NULL_WORD); |
|
2725 } else { |
|
2726 #ifdef _LP64 |
|
2727 __ movoop(rscratch1, o); |
|
2728 __ cmpptr(reg1, rscratch1); |
|
2729 #else |
|
2730 __ cmpoop(reg1, c->as_jobject()); |
|
2731 #endif // _LP64 |
|
2732 } |
|
2733 } else { |
|
2734 fatal(err_msg("unexpected type: %s", basictype_to_str(c->type()))); |
|
2735 } |
|
2736 // cpu register - address |
|
2737 } else if (opr2->is_address()) { |
|
2738 if (op->info() != NULL) { |
|
2739 add_debug_info_for_null_check_here(op->info()); |
|
2740 } |
|
2741 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); |
|
2742 } else { |
|
2743 ShouldNotReachHere(); |
|
2744 } |
|
2745 |
|
2746 } else if(opr1->is_double_cpu()) { |
|
2747 Register xlo = opr1->as_register_lo(); |
|
2748 Register xhi = opr1->as_register_hi(); |
|
2749 if (opr2->is_double_cpu()) { |
|
2750 #ifdef _LP64 |
|
2751 __ cmpptr(xlo, opr2->as_register_lo()); |
|
2752 #else |
|
2753 // cpu register - cpu register |
|
2754 Register ylo = opr2->as_register_lo(); |
|
2755 Register yhi = opr2->as_register_hi(); |
|
2756 __ subl(xlo, ylo); |
|
2757 __ sbbl(xhi, yhi); |
|
2758 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { |
|
2759 __ orl(xhi, xlo); |
|
2760 } |
|
2761 #endif // _LP64 |
|
2762 } else if (opr2->is_constant()) { |
|
2763 // cpu register - constant 0 |
|
2764 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); |
|
2765 #ifdef _LP64 |
|
2766 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); |
|
2767 #else |
|
2768 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); |
|
2769 __ orl(xhi, xlo); |
|
2770 #endif // _LP64 |
|
2771 } else { |
|
2772 ShouldNotReachHere(); |
|
2773 } |
|
2774 |
|
2775 } else if (opr1->is_single_xmm()) { |
|
2776 XMMRegister reg1 = opr1->as_xmm_float_reg(); |
|
2777 if (opr2->is_single_xmm()) { |
|
2778 // xmm register - xmm register |
|
2779 __ ucomiss(reg1, opr2->as_xmm_float_reg()); |
|
2780 } else if (opr2->is_stack()) { |
|
2781 // xmm register - stack |
|
2782 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); |
|
2783 } else if (opr2->is_constant()) { |
|
2784 // xmm register - constant |
|
2785 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); |
|
2786 } else if (opr2->is_address()) { |
|
2787 // xmm register - address |
|
2788 if (op->info() != NULL) { |
|
2789 add_debug_info_for_null_check_here(op->info()); |
|
2790 } |
|
2791 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); |
|
2792 } else { |
|
2793 ShouldNotReachHere(); |
|
2794 } |
|
2795 |
|
2796 } else if (opr1->is_double_xmm()) { |
|
2797 XMMRegister reg1 = opr1->as_xmm_double_reg(); |
|
2798 if (opr2->is_double_xmm()) { |
|
2799 // xmm register - xmm register |
|
2800 __ ucomisd(reg1, opr2->as_xmm_double_reg()); |
|
2801 } else if (opr2->is_stack()) { |
|
2802 // xmm register - stack |
|
2803 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); |
|
2804 } else if (opr2->is_constant()) { |
|
2805 // xmm register - constant |
|
2806 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); |
|
2807 } else if (opr2->is_address()) { |
|
2808 // xmm register - address |
|
2809 if (op->info() != NULL) { |
|
2810 add_debug_info_for_null_check_here(op->info()); |
|
2811 } |
|
2812 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); |
|
2813 } else { |
|
2814 ShouldNotReachHere(); |
|
2815 } |
|
2816 |
|
2817 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { |
|
2818 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); |
|
2819 assert(opr2->is_fpu_register(), "both must be registers"); |
|
2820 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); |
|
2821 |
|
2822 } else if (opr1->is_address() && opr2->is_constant()) { |
|
2823 LIR_Const* c = opr2->as_constant_ptr(); |
|
2824 #ifdef _LP64 |
|
2825 if (c->type() == T_OBJECT || c->type() == T_ARRAY) { |
|
2826 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); |
|
2827 __ movoop(rscratch1, c->as_jobject()); |
|
2828 } |
|
2829 #endif // LP64 |
|
2830 if (op->info() != NULL) { |
|
2831 add_debug_info_for_null_check_here(op->info()); |
|
2832 } |
|
2833 // special case: address - constant |
|
2834 LIR_Address* addr = opr1->as_address_ptr(); |
|
2835 if (c->type() == T_INT) { |
|
2836 __ cmpl(as_Address(addr), c->as_jint()); |
|
2837 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { |
|
2838 #ifdef _LP64 |
|
2839 // %%% Make this explode if addr isn't reachable until we figure out a |
|
2840 // better strategy by giving noreg as the temp for as_Address |
|
2841 __ cmpptr(rscratch1, as_Address(addr, noreg)); |
|
2842 #else |
|
2843 __ cmpoop(as_Address(addr), c->as_jobject()); |
|
2844 #endif // _LP64 |
|
2845 } else { |
|
2846 ShouldNotReachHere(); |
|
2847 } |
|
2848 |
|
2849 } else { |
|
2850 ShouldNotReachHere(); |
|
2851 } |
|
2852 } |
|
2853 |
|
2854 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { |
|
2855 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { |
|
2856 if (left->is_single_xmm()) { |
|
2857 assert(right->is_single_xmm(), "must match"); |
|
2858 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); |
|
2859 } else if (left->is_double_xmm()) { |
|
2860 assert(right->is_double_xmm(), "must match"); |
|
2861 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); |
|
2862 |
|
2863 } else { |
|
2864 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); |
|
2865 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); |
|
2866 |
|
2867 assert(left->fpu() == 0, "left must be on TOS"); |
|
2868 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), |
|
2869 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); |
|
2870 } |
|
2871 } else { |
|
2872 assert(code == lir_cmp_l2i, "check"); |
|
2873 #ifdef _LP64 |
|
2874 Label done; |
|
2875 Register dest = dst->as_register(); |
|
2876 __ cmpptr(left->as_register_lo(), right->as_register_lo()); |
|
2877 __ movl(dest, -1); |
|
2878 __ jccb(Assembler::less, done); |
|
2879 __ set_byte_if_not_zero(dest); |
|
2880 __ movzbl(dest, dest); |
|
2881 __ bind(done); |
|
2882 #else |
|
2883 __ lcmp2int(left->as_register_hi(), |
|
2884 left->as_register_lo(), |
|
2885 right->as_register_hi(), |
|
2886 right->as_register_lo()); |
|
2887 move_regs(left->as_register_hi(), dst->as_register()); |
|
2888 #endif // _LP64 |
|
2889 } |
|
2890 } |
|
2891 |
|
2892 |
|
2893 void LIR_Assembler::align_call(LIR_Code code) { |
|
2894 if (os::is_MP()) { |
|
2895 // make sure that the displacement word of the call ends up word aligned |
|
2896 int offset = __ offset(); |
|
2897 switch (code) { |
|
2898 case lir_static_call: |
|
2899 case lir_optvirtual_call: |
|
2900 case lir_dynamic_call: |
|
2901 offset += NativeCall::displacement_offset; |
|
2902 break; |
|
2903 case lir_icvirtual_call: |
|
2904 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; |
|
2905 break; |
|
2906 case lir_virtual_call: // currently, sparc-specific for niagara |
|
2907 default: ShouldNotReachHere(); |
|
2908 } |
|
2909 while (offset++ % BytesPerWord != 0) { |
|
2910 __ nop(); |
|
2911 } |
|
2912 } |
|
2913 } |
|
2914 |
|
2915 |
|
2916 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { |
|
2917 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, |
|
2918 "must be aligned"); |
|
2919 __ call(AddressLiteral(op->addr(), rtype)); |
|
2920 add_call_info(code_offset(), op->info()); |
|
2921 } |
|
2922 |
|
2923 |
|
2924 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { |
|
2925 __ ic_call(op->addr()); |
|
2926 add_call_info(code_offset(), op->info()); |
|
2927 assert(!os::is_MP() || |
|
2928 (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, |
|
2929 "must be aligned"); |
|
2930 } |
|
2931 |
|
2932 |
|
2933 /* Currently, vtable-dispatch is only enabled for sparc platforms */ |
|
2934 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { |
|
2935 ShouldNotReachHere(); |
|
2936 } |
|
2937 |
|
2938 |
|
2939 void LIR_Assembler::emit_static_call_stub() { |
|
2940 address call_pc = __ pc(); |
|
2941 address stub = __ start_a_stub(call_stub_size); |
|
2942 if (stub == NULL) { |
|
2943 bailout("static call stub overflow"); |
|
2944 return; |
|
2945 } |
|
2946 |
|
2947 int start = __ offset(); |
|
2948 if (os::is_MP()) { |
|
2949 // make sure that the displacement word of the call ends up word aligned |
|
2950 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset; |
|
2951 while (offset++ % BytesPerWord != 0) { |
|
2952 __ nop(); |
|
2953 } |
|
2954 } |
|
2955 __ relocate(static_stub_Relocation::spec(call_pc)); |
|
2956 __ mov_metadata(rbx, (Metadata*)NULL); |
|
2957 // must be set to -1 at code generation time |
|
2958 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); |
|
2959 // On 64bit this will die since it will take a movq & jmp, must be only a jmp |
|
2960 __ jump(RuntimeAddress(__ pc())); |
|
2961 |
|
2962 assert(__ offset() - start <= call_stub_size, "stub too big"); |
|
2963 __ end_a_stub(); |
|
2964 } |
|
2965 |
|
2966 |
|
2967 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { |
|
2968 assert(exceptionOop->as_register() == rax, "must match"); |
|
2969 assert(exceptionPC->as_register() == rdx, "must match"); |
|
2970 |
|
2971 // exception object is not added to oop map by LinearScan |
|
2972 // (LinearScan assumes that no oops are in fixed registers) |
|
2973 info->add_register_oop(exceptionOop); |
|
2974 Runtime1::StubID unwind_id; |
|
2975 |
|
2976 // get current pc information |
|
2977 // pc is only needed if the method has an exception handler, the unwind code does not need it. |
|
2978 int pc_for_athrow_offset = __ offset(); |
|
2979 InternalAddress pc_for_athrow(__ pc()); |
|
2980 __ lea(exceptionPC->as_register(), pc_for_athrow); |
|
2981 add_call_info(pc_for_athrow_offset, info); // for exception handler |
|
2982 |
|
2983 __ verify_not_null_oop(rax); |
|
2984 // search an exception handler (rax: exception oop, rdx: throwing pc) |
|
2985 if (compilation()->has_fpu_code()) { |
|
2986 unwind_id = Runtime1::handle_exception_id; |
|
2987 } else { |
|
2988 unwind_id = Runtime1::handle_exception_nofpu_id; |
|
2989 } |
|
2990 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); |
|
2991 |
|
2992 // enough room for two byte trap |
|
2993 __ nop(); |
|
2994 } |
|
2995 |
|
2996 |
|
2997 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { |
|
2998 assert(exceptionOop->as_register() == rax, "must match"); |
|
2999 |
|
3000 __ jmp(_unwind_handler_entry); |
|
3001 } |
|
3002 |
|
3003 |
|
3004 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { |
|
3005 |
|
3006 // optimized version for linear scan: |
|
3007 // * count must be already in ECX (guaranteed by LinearScan) |
|
3008 // * left and dest must be equal |
|
3009 // * tmp must be unused |
|
3010 assert(count->as_register() == SHIFT_count, "count must be in ECX"); |
|
3011 assert(left == dest, "left and dest must be equal"); |
|
3012 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); |
|
3013 |
|
3014 if (left->is_single_cpu()) { |
|
3015 Register value = left->as_register(); |
|
3016 assert(value != SHIFT_count, "left cannot be ECX"); |
|
3017 |
|
3018 switch (code) { |
|
3019 case lir_shl: __ shll(value); break; |
|
3020 case lir_shr: __ sarl(value); break; |
|
3021 case lir_ushr: __ shrl(value); break; |
|
3022 default: ShouldNotReachHere(); |
|
3023 } |
|
3024 } else if (left->is_double_cpu()) { |
|
3025 Register lo = left->as_register_lo(); |
|
3026 Register hi = left->as_register_hi(); |
|
3027 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); |
|
3028 #ifdef _LP64 |
|
3029 switch (code) { |
|
3030 case lir_shl: __ shlptr(lo); break; |
|
3031 case lir_shr: __ sarptr(lo); break; |
|
3032 case lir_ushr: __ shrptr(lo); break; |
|
3033 default: ShouldNotReachHere(); |
|
3034 } |
|
3035 #else |
|
3036 |
|
3037 switch (code) { |
|
3038 case lir_shl: __ lshl(hi, lo); break; |
|
3039 case lir_shr: __ lshr(hi, lo, true); break; |
|
3040 case lir_ushr: __ lshr(hi, lo, false); break; |
|
3041 default: ShouldNotReachHere(); |
|
3042 } |
|
3043 #endif // LP64 |
|
3044 } else { |
|
3045 ShouldNotReachHere(); |
|
3046 } |
|
3047 } |
|
3048 |
|
3049 |
|
3050 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { |
|
3051 if (dest->is_single_cpu()) { |
|
3052 // first move left into dest so that left is not destroyed by the shift |
|
3053 Register value = dest->as_register(); |
|
3054 count = count & 0x1F; // Java spec |
|
3055 |
|
3056 move_regs(left->as_register(), value); |
|
3057 switch (code) { |
|
3058 case lir_shl: __ shll(value, count); break; |
|
3059 case lir_shr: __ sarl(value, count); break; |
|
3060 case lir_ushr: __ shrl(value, count); break; |
|
3061 default: ShouldNotReachHere(); |
|
3062 } |
|
3063 } else if (dest->is_double_cpu()) { |
|
3064 #ifndef _LP64 |
|
3065 Unimplemented(); |
|
3066 #else |
|
3067 // first move left into dest so that left is not destroyed by the shift |
|
3068 Register value = dest->as_register_lo(); |
|
3069 count = count & 0x1F; // Java spec |
|
3070 |
|
3071 move_regs(left->as_register_lo(), value); |
|
3072 switch (code) { |
|
3073 case lir_shl: __ shlptr(value, count); break; |
|
3074 case lir_shr: __ sarptr(value, count); break; |
|
3075 case lir_ushr: __ shrptr(value, count); break; |
|
3076 default: ShouldNotReachHere(); |
|
3077 } |
|
3078 #endif // _LP64 |
|
3079 } else { |
|
3080 ShouldNotReachHere(); |
|
3081 } |
|
3082 } |
|
3083 |
|
3084 |
|
3085 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { |
|
3086 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); |
|
3087 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; |
|
3088 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); |
|
3089 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); |
|
3090 } |
|
3091 |
|
3092 |
|
3093 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { |
|
3094 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); |
|
3095 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; |
|
3096 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); |
|
3097 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); |
|
3098 } |
|
3099 |
|
3100 |
|
3101 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { |
|
3102 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); |
|
3103 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; |
|
3104 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); |
|
3105 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o); |
|
3106 } |
|
3107 |
|
3108 |
|
3109 // This code replaces a call to arraycopy; no exception may |
|
3110 // be thrown in this code, they must be thrown in the System.arraycopy |
|
3111 // activation frame; we could save some checks if this would not be the case |
|
3112 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { |
|
3113 ciArrayKlass* default_type = op->expected_type(); |
|
3114 Register src = op->src()->as_register(); |
|
3115 Register dst = op->dst()->as_register(); |
|
3116 Register src_pos = op->src_pos()->as_register(); |
|
3117 Register dst_pos = op->dst_pos()->as_register(); |
|
3118 Register length = op->length()->as_register(); |
|
3119 Register tmp = op->tmp()->as_register(); |
|
3120 |
|
3121 CodeStub* stub = op->stub(); |
|
3122 int flags = op->flags(); |
|
3123 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; |
|
3124 if (basic_type == T_ARRAY) basic_type = T_OBJECT; |
|
3125 |
|
3126 // if we don't know anything, just go through the generic arraycopy |
|
3127 if (default_type == NULL) { |
|
3128 Label done; |
|
3129 // save outgoing arguments on stack in case call to System.arraycopy is needed |
|
3130 // HACK ALERT. This code used to push the parameters in a hardwired fashion |
|
3131 // for interpreter calling conventions. Now we have to do it in new style conventions. |
|
3132 // For the moment until C1 gets the new register allocator I just force all the |
|
3133 // args to the right place (except the register args) and then on the back side |
|
3134 // reload the register args properly if we go slow path. Yuck |
|
3135 |
|
3136 // These are proper for the calling convention |
|
3137 store_parameter(length, 2); |
|
3138 store_parameter(dst_pos, 1); |
|
3139 store_parameter(dst, 0); |
|
3140 |
|
3141 // these are just temporary placements until we need to reload |
|
3142 store_parameter(src_pos, 3); |
|
3143 store_parameter(src, 4); |
|
3144 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) |
|
3145 |
|
3146 address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); |
|
3147 |
|
3148 address copyfunc_addr = StubRoutines::generic_arraycopy(); |
|
3149 |
|
3150 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint |
|
3151 #ifdef _LP64 |
|
3152 // The arguments are in java calling convention so we can trivially shift them to C |
|
3153 // convention |
|
3154 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); |
|
3155 __ mov(c_rarg0, j_rarg0); |
|
3156 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); |
|
3157 __ mov(c_rarg1, j_rarg1); |
|
3158 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); |
|
3159 __ mov(c_rarg2, j_rarg2); |
|
3160 assert_different_registers(c_rarg3, j_rarg4); |
|
3161 __ mov(c_rarg3, j_rarg3); |
|
3162 #ifdef _WIN64 |
|
3163 // Allocate abi space for args but be sure to keep stack aligned |
|
3164 __ subptr(rsp, 6*wordSize); |
|
3165 store_parameter(j_rarg4, 4); |
|
3166 if (copyfunc_addr == NULL) { // Use C version if stub was not generated |
|
3167 __ call(RuntimeAddress(C_entry)); |
|
3168 } else { |
|
3169 #ifndef PRODUCT |
|
3170 if (PrintC1Statistics) { |
|
3171 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); |
|
3172 } |
|
3173 #endif |
|
3174 __ call(RuntimeAddress(copyfunc_addr)); |
|
3175 } |
|
3176 __ addptr(rsp, 6*wordSize); |
|
3177 #else |
|
3178 __ mov(c_rarg4, j_rarg4); |
|
3179 if (copyfunc_addr == NULL) { // Use C version if stub was not generated |
|
3180 __ call(RuntimeAddress(C_entry)); |
|
3181 } else { |
|
3182 #ifndef PRODUCT |
|
3183 if (PrintC1Statistics) { |
|
3184 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); |
|
3185 } |
|
3186 #endif |
|
3187 __ call(RuntimeAddress(copyfunc_addr)); |
|
3188 } |
|
3189 #endif // _WIN64 |
|
3190 #else |
|
3191 __ push(length); |
|
3192 __ push(dst_pos); |
|
3193 __ push(dst); |
|
3194 __ push(src_pos); |
|
3195 __ push(src); |
|
3196 |
|
3197 if (copyfunc_addr == NULL) { // Use C version if stub was not generated |
|
3198 __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack |
|
3199 } else { |
|
3200 #ifndef PRODUCT |
|
3201 if (PrintC1Statistics) { |
|
3202 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); |
|
3203 } |
|
3204 #endif |
|
3205 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack |
|
3206 } |
|
3207 |
|
3208 #endif // _LP64 |
|
3209 |
|
3210 __ cmpl(rax, 0); |
|
3211 __ jcc(Assembler::equal, *stub->continuation()); |
|
3212 |
|
3213 if (copyfunc_addr != NULL) { |
|
3214 __ mov(tmp, rax); |
|
3215 __ xorl(tmp, -1); |
|
3216 } |
|
3217 |
|
3218 // Reload values from the stack so they are where the stub |
|
3219 // expects them. |
|
3220 __ movptr (dst, Address(rsp, 0*BytesPerWord)); |
|
3221 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); |
|
3222 __ movptr (length, Address(rsp, 2*BytesPerWord)); |
|
3223 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); |
|
3224 __ movptr (src, Address(rsp, 4*BytesPerWord)); |
|
3225 |
|
3226 if (copyfunc_addr != NULL) { |
|
3227 __ subl(length, tmp); |
|
3228 __ addl(src_pos, tmp); |
|
3229 __ addl(dst_pos, tmp); |
|
3230 } |
|
3231 __ jmp(*stub->entry()); |
|
3232 |
|
3233 __ bind(*stub->continuation()); |
|
3234 return; |
|
3235 } |
|
3236 |
|
3237 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); |
|
3238 |
|
3239 int elem_size = type2aelembytes(basic_type); |
|
3240 int shift_amount; |
|
3241 Address::ScaleFactor scale; |
|
3242 |
|
3243 switch (elem_size) { |
|
3244 case 1 : |
|
3245 shift_amount = 0; |
|
3246 scale = Address::times_1; |
|
3247 break; |
|
3248 case 2 : |
|
3249 shift_amount = 1; |
|
3250 scale = Address::times_2; |
|
3251 break; |
|
3252 case 4 : |
|
3253 shift_amount = 2; |
|
3254 scale = Address::times_4; |
|
3255 break; |
|
3256 case 8 : |
|
3257 shift_amount = 3; |
|
3258 scale = Address::times_8; |
|
3259 break; |
|
3260 default: |
|
3261 ShouldNotReachHere(); |
|
3262 } |
|
3263 |
|
3264 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); |
|
3265 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); |
|
3266 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); |
|
3267 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); |
|
3268 |
|
3269 // length and pos's are all sign extended at this point on 64bit |
|
3270 |
|
3271 // test for NULL |
|
3272 if (flags & LIR_OpArrayCopy::src_null_check) { |
|
3273 __ testptr(src, src); |
|
3274 __ jcc(Assembler::zero, *stub->entry()); |
|
3275 } |
|
3276 if (flags & LIR_OpArrayCopy::dst_null_check) { |
|
3277 __ testptr(dst, dst); |
|
3278 __ jcc(Assembler::zero, *stub->entry()); |
|
3279 } |
|
3280 |
|
3281 // check if negative |
|
3282 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { |
|
3283 __ testl(src_pos, src_pos); |
|
3284 __ jcc(Assembler::less, *stub->entry()); |
|
3285 } |
|
3286 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { |
|
3287 __ testl(dst_pos, dst_pos); |
|
3288 __ jcc(Assembler::less, *stub->entry()); |
|
3289 } |
|
3290 |
|
3291 if (flags & LIR_OpArrayCopy::src_range_check) { |
|
3292 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); |
|
3293 __ cmpl(tmp, src_length_addr); |
|
3294 __ jcc(Assembler::above, *stub->entry()); |
|
3295 } |
|
3296 if (flags & LIR_OpArrayCopy::dst_range_check) { |
|
3297 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); |
|
3298 __ cmpl(tmp, dst_length_addr); |
|
3299 __ jcc(Assembler::above, *stub->entry()); |
|
3300 } |
|
3301 |
|
3302 if (flags & LIR_OpArrayCopy::length_positive_check) { |
|
3303 __ testl(length, length); |
|
3304 __ jcc(Assembler::less, *stub->entry()); |
|
3305 __ jcc(Assembler::zero, *stub->continuation()); |
|
3306 } |
|
3307 |
|
3308 #ifdef _LP64 |
|
3309 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null |
|
3310 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null |
|
3311 #endif |
|
3312 |
|
3313 if (flags & LIR_OpArrayCopy::type_check) { |
|
3314 // We don't know the array types are compatible |
|
3315 if (basic_type != T_OBJECT) { |
|
3316 // Simple test for basic type arrays |
|
3317 if (UseCompressedClassPointers) { |
|
3318 __ movl(tmp, src_klass_addr); |
|
3319 __ cmpl(tmp, dst_klass_addr); |
|
3320 } else { |
|
3321 __ movptr(tmp, src_klass_addr); |
|
3322 __ cmpptr(tmp, dst_klass_addr); |
|
3323 } |
|
3324 __ jcc(Assembler::notEqual, *stub->entry()); |
|
3325 } else { |
|
3326 // For object arrays, if src is a sub class of dst then we can |
|
3327 // safely do the copy. |
|
3328 Label cont, slow; |
|
3329 |
|
3330 __ push(src); |
|
3331 __ push(dst); |
|
3332 |
|
3333 __ load_klass(src, src); |
|
3334 __ load_klass(dst, dst); |
|
3335 |
|
3336 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); |
|
3337 |
|
3338 __ push(src); |
|
3339 __ push(dst); |
|
3340 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
|
3341 __ pop(dst); |
|
3342 __ pop(src); |
|
3343 |
|
3344 __ cmpl(src, 0); |
|
3345 __ jcc(Assembler::notEqual, cont); |
|
3346 |
|
3347 __ bind(slow); |
|
3348 __ pop(dst); |
|
3349 __ pop(src); |
|
3350 |
|
3351 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); |
|
3352 if (copyfunc_addr != NULL) { // use stub if available |
|
3353 // src is not a sub class of dst so we have to do a |
|
3354 // per-element check. |
|
3355 |
|
3356 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; |
|
3357 if ((flags & mask) != mask) { |
|
3358 // Check that at least both of them object arrays. |
|
3359 assert(flags & mask, "one of the two should be known to be an object array"); |
|
3360 |
|
3361 if (!(flags & LIR_OpArrayCopy::src_objarray)) { |
|
3362 __ load_klass(tmp, src); |
|
3363 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { |
|
3364 __ load_klass(tmp, dst); |
|
3365 } |
|
3366 int lh_offset = in_bytes(Klass::layout_helper_offset()); |
|
3367 Address klass_lh_addr(tmp, lh_offset); |
|
3368 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); |
|
3369 __ cmpl(klass_lh_addr, objArray_lh); |
|
3370 __ jcc(Assembler::notEqual, *stub->entry()); |
|
3371 } |
|
3372 |
|
3373 // Spill because stubs can use any register they like and it's |
|
3374 // easier to restore just those that we care about. |
|
3375 store_parameter(dst, 0); |
|
3376 store_parameter(dst_pos, 1); |
|
3377 store_parameter(length, 2); |
|
3378 store_parameter(src_pos, 3); |
|
3379 store_parameter(src, 4); |
|
3380 |
|
3381 #ifndef _LP64 |
|
3382 __ movptr(tmp, dst_klass_addr); |
|
3383 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); |
|
3384 __ push(tmp); |
|
3385 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); |
|
3386 __ push(tmp); |
|
3387 __ push(length); |
|
3388 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3389 __ push(tmp); |
|
3390 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3391 __ push(tmp); |
|
3392 |
|
3393 __ call_VM_leaf(copyfunc_addr, 5); |
|
3394 #else |
|
3395 __ movl2ptr(length, length); //higher 32bits must be null |
|
3396 |
|
3397 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3398 assert_different_registers(c_rarg0, dst, dst_pos, length); |
|
3399 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3400 assert_different_registers(c_rarg1, dst, length); |
|
3401 |
|
3402 __ mov(c_rarg2, length); |
|
3403 assert_different_registers(c_rarg2, dst); |
|
3404 |
|
3405 #ifdef _WIN64 |
|
3406 // Allocate abi space for args but be sure to keep stack aligned |
|
3407 __ subptr(rsp, 6*wordSize); |
|
3408 __ load_klass(c_rarg3, dst); |
|
3409 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); |
|
3410 store_parameter(c_rarg3, 4); |
|
3411 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); |
|
3412 __ call(RuntimeAddress(copyfunc_addr)); |
|
3413 __ addptr(rsp, 6*wordSize); |
|
3414 #else |
|
3415 __ load_klass(c_rarg4, dst); |
|
3416 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); |
|
3417 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); |
|
3418 __ call(RuntimeAddress(copyfunc_addr)); |
|
3419 #endif |
|
3420 |
|
3421 #endif |
|
3422 |
|
3423 #ifndef PRODUCT |
|
3424 if (PrintC1Statistics) { |
|
3425 Label failed; |
|
3426 __ testl(rax, rax); |
|
3427 __ jcc(Assembler::notZero, failed); |
|
3428 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); |
|
3429 __ bind(failed); |
|
3430 } |
|
3431 #endif |
|
3432 |
|
3433 __ testl(rax, rax); |
|
3434 __ jcc(Assembler::zero, *stub->continuation()); |
|
3435 |
|
3436 #ifndef PRODUCT |
|
3437 if (PrintC1Statistics) { |
|
3438 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); |
|
3439 } |
|
3440 #endif |
|
3441 |
|
3442 __ mov(tmp, rax); |
|
3443 |
|
3444 __ xorl(tmp, -1); |
|
3445 |
|
3446 // Restore previously spilled arguments |
|
3447 __ movptr (dst, Address(rsp, 0*BytesPerWord)); |
|
3448 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); |
|
3449 __ movptr (length, Address(rsp, 2*BytesPerWord)); |
|
3450 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); |
|
3451 __ movptr (src, Address(rsp, 4*BytesPerWord)); |
|
3452 |
|
3453 |
|
3454 __ subl(length, tmp); |
|
3455 __ addl(src_pos, tmp); |
|
3456 __ addl(dst_pos, tmp); |
|
3457 } |
|
3458 |
|
3459 __ jmp(*stub->entry()); |
|
3460 |
|
3461 __ bind(cont); |
|
3462 __ pop(dst); |
|
3463 __ pop(src); |
|
3464 } |
|
3465 } |
|
3466 |
|
3467 #ifdef ASSERT |
|
3468 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { |
|
3469 // Sanity check the known type with the incoming class. For the |
|
3470 // primitive case the types must match exactly with src.klass and |
|
3471 // dst.klass each exactly matching the default type. For the |
|
3472 // object array case, if no type check is needed then either the |
|
3473 // dst type is exactly the expected type and the src type is a |
|
3474 // subtype which we can't check or src is the same array as dst |
|
3475 // but not necessarily exactly of type default_type. |
|
3476 Label known_ok, halt; |
|
3477 __ mov_metadata(tmp, default_type->constant_encoding()); |
|
3478 #ifdef _LP64 |
|
3479 if (UseCompressedClassPointers) { |
|
3480 __ encode_klass_not_null(tmp); |
|
3481 } |
|
3482 #endif |
|
3483 |
|
3484 if (basic_type != T_OBJECT) { |
|
3485 |
|
3486 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); |
|
3487 else __ cmpptr(tmp, dst_klass_addr); |
|
3488 __ jcc(Assembler::notEqual, halt); |
|
3489 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); |
|
3490 else __ cmpptr(tmp, src_klass_addr); |
|
3491 __ jcc(Assembler::equal, known_ok); |
|
3492 } else { |
|
3493 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); |
|
3494 else __ cmpptr(tmp, dst_klass_addr); |
|
3495 __ jcc(Assembler::equal, known_ok); |
|
3496 __ cmpptr(src, dst); |
|
3497 __ jcc(Assembler::equal, known_ok); |
|
3498 } |
|
3499 __ bind(halt); |
|
3500 __ stop("incorrect type information in arraycopy"); |
|
3501 __ bind(known_ok); |
|
3502 } |
|
3503 #endif |
|
3504 |
|
3505 #ifndef PRODUCT |
|
3506 if (PrintC1Statistics) { |
|
3507 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); |
|
3508 } |
|
3509 #endif |
|
3510 |
|
3511 #ifdef _LP64 |
|
3512 assert_different_registers(c_rarg0, dst, dst_pos, length); |
|
3513 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3514 assert_different_registers(c_rarg1, length); |
|
3515 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3516 __ mov(c_rarg2, length); |
|
3517 |
|
3518 #else |
|
3519 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3520 store_parameter(tmp, 0); |
|
3521 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
|
3522 store_parameter(tmp, 1); |
|
3523 store_parameter(length, 2); |
|
3524 #endif // _LP64 |
|
3525 |
|
3526 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; |
|
3527 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; |
|
3528 const char *name; |
|
3529 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); |
|
3530 __ call_VM_leaf(entry, 0); |
|
3531 |
|
3532 __ bind(*stub->continuation()); |
|
3533 } |
|
3534 |
|
3535 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { |
|
3536 assert(op->crc()->is_single_cpu(), "crc must be register"); |
|
3537 assert(op->val()->is_single_cpu(), "byte value must be register"); |
|
3538 assert(op->result_opr()->is_single_cpu(), "result must be register"); |
|
3539 Register crc = op->crc()->as_register(); |
|
3540 Register val = op->val()->as_register(); |
|
3541 Register res = op->result_opr()->as_register(); |
|
3542 |
|
3543 assert_different_registers(val, crc, res); |
|
3544 |
|
3545 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); |
|
3546 __ notl(crc); // ~crc |
|
3547 __ update_byte_crc32(crc, val, res); |
|
3548 __ notl(crc); // ~crc |
|
3549 __ mov(res, crc); |
|
3550 } |
|
3551 |
|
3552 void LIR_Assembler::emit_lock(LIR_OpLock* op) { |
|
3553 Register obj = op->obj_opr()->as_register(); // may not be an oop |
|
3554 Register hdr = op->hdr_opr()->as_register(); |
|
3555 Register lock = op->lock_opr()->as_register(); |
|
3556 if (!UseFastLocking) { |
|
3557 __ jmp(*op->stub()->entry()); |
|
3558 } else if (op->code() == lir_lock) { |
|
3559 Register scratch = noreg; |
|
3560 if (UseBiasedLocking) { |
|
3561 scratch = op->scratch_opr()->as_register(); |
|
3562 } |
|
3563 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); |
|
3564 // add debug info for NullPointerException only if one is possible |
|
3565 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); |
|
3566 if (op->info() != NULL) { |
|
3567 add_debug_info_for_null_check(null_check_offset, op->info()); |
|
3568 } |
|
3569 // done |
|
3570 } else if (op->code() == lir_unlock) { |
|
3571 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); |
|
3572 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); |
|
3573 } else { |
|
3574 Unimplemented(); |
|
3575 } |
|
3576 __ bind(*op->stub()->continuation()); |
|
3577 } |
|
3578 |
|
3579 |
|
3580 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { |
|
3581 ciMethod* method = op->profiled_method(); |
|
3582 int bci = op->profiled_bci(); |
|
3583 ciMethod* callee = op->profiled_callee(); |
|
3584 |
|
3585 // Update counter for all call types |
|
3586 ciMethodData* md = method->method_data_or_null(); |
|
3587 assert(md != NULL, "Sanity"); |
|
3588 ciProfileData* data = md->bci_to_data(bci); |
|
3589 assert(data->is_CounterData(), "need CounterData for calls"); |
|
3590 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); |
|
3591 Register mdo = op->mdo()->as_register(); |
|
3592 __ mov_metadata(mdo, md->constant_encoding()); |
|
3593 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); |
|
3594 Bytecodes::Code bc = method->java_code_at_bci(bci); |
|
3595 const bool callee_is_static = callee->is_loaded() && callee->is_static(); |
|
3596 // Perform additional virtual call profiling for invokevirtual and |
|
3597 // invokeinterface bytecodes |
|
3598 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && |
|
3599 !callee_is_static && // required for optimized MH invokes |
|
3600 C1ProfileVirtualCalls) { |
|
3601 assert(op->recv()->is_single_cpu(), "recv must be allocated"); |
|
3602 Register recv = op->recv()->as_register(); |
|
3603 assert_different_registers(mdo, recv); |
|
3604 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); |
|
3605 ciKlass* known_klass = op->known_holder(); |
|
3606 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { |
|
3607 // We know the type that will be seen at this call site; we can |
|
3608 // statically update the MethodData* rather than needing to do |
|
3609 // dynamic tests on the receiver type |
|
3610 |
|
3611 // NOTE: we should probably put a lock around this search to |
|
3612 // avoid collisions by concurrent compilations |
|
3613 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; |
|
3614 uint i; |
|
3615 for (i = 0; i < VirtualCallData::row_limit(); i++) { |
|
3616 ciKlass* receiver = vc_data->receiver(i); |
|
3617 if (known_klass->equals(receiver)) { |
|
3618 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); |
|
3619 __ addptr(data_addr, DataLayout::counter_increment); |
|
3620 return; |
|
3621 } |
|
3622 } |
|
3623 |
|
3624 // Receiver type not found in profile data; select an empty slot |
|
3625 |
|
3626 // Note that this is less efficient than it should be because it |
|
3627 // always does a write to the receiver part of the |
|
3628 // VirtualCallData rather than just the first time |
|
3629 for (i = 0; i < VirtualCallData::row_limit(); i++) { |
|
3630 ciKlass* receiver = vc_data->receiver(i); |
|
3631 if (receiver == NULL) { |
|
3632 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); |
|
3633 __ mov_metadata(recv_addr, known_klass->constant_encoding()); |
|
3634 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); |
|
3635 __ addptr(data_addr, DataLayout::counter_increment); |
|
3636 return; |
|
3637 } |
|
3638 } |
|
3639 } else { |
|
3640 __ load_klass(recv, recv); |
|
3641 Label update_done; |
|
3642 type_profile_helper(mdo, md, data, recv, &update_done); |
|
3643 // Receiver did not match any saved receiver and there is no empty row for it. |
|
3644 // Increment total counter to indicate polymorphic case. |
|
3645 __ addptr(counter_addr, DataLayout::counter_increment); |
|
3646 |
|
3647 __ bind(update_done); |
|
3648 } |
|
3649 } else { |
|
3650 // Static call |
|
3651 __ addptr(counter_addr, DataLayout::counter_increment); |
|
3652 } |
|
3653 } |
|
3654 |
|
3655 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { |
|
3656 Register obj = op->obj()->as_register(); |
|
3657 Register tmp = op->tmp()->as_pointer_register(); |
|
3658 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); |
|
3659 ciKlass* exact_klass = op->exact_klass(); |
|
3660 intptr_t current_klass = op->current_klass(); |
|
3661 bool not_null = op->not_null(); |
|
3662 bool no_conflict = op->no_conflict(); |
|
3663 |
|
3664 Label update, next, none; |
|
3665 |
|
3666 bool do_null = !not_null; |
|
3667 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; |
|
3668 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; |
|
3669 |
|
3670 assert(do_null || do_update, "why are we here?"); |
|
3671 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); |
|
3672 |
|
3673 __ verify_oop(obj); |
|
3674 |
|
3675 if (tmp != obj) { |
|
3676 __ mov(tmp, obj); |
|
3677 } |
|
3678 if (do_null) { |
|
3679 __ testptr(tmp, tmp); |
|
3680 __ jccb(Assembler::notZero, update); |
|
3681 if (!TypeEntries::was_null_seen(current_klass)) { |
|
3682 __ orptr(mdo_addr, TypeEntries::null_seen); |
|
3683 } |
|
3684 if (do_update) { |
|
3685 #ifndef ASSERT |
|
3686 __ jmpb(next); |
|
3687 } |
|
3688 #else |
|
3689 __ jmp(next); |
|
3690 } |
|
3691 } else { |
|
3692 __ testptr(tmp, tmp); |
|
3693 __ jccb(Assembler::notZero, update); |
|
3694 __ stop("unexpect null obj"); |
|
3695 #endif |
|
3696 } |
|
3697 |
|
3698 __ bind(update); |
|
3699 |
|
3700 if (do_update) { |
|
3701 #ifdef ASSERT |
|
3702 if (exact_klass != NULL) { |
|
3703 Label ok; |
|
3704 __ load_klass(tmp, tmp); |
|
3705 __ push(tmp); |
|
3706 __ mov_metadata(tmp, exact_klass->constant_encoding()); |
|
3707 __ cmpptr(tmp, Address(rsp, 0)); |
|
3708 __ jccb(Assembler::equal, ok); |
|
3709 __ stop("exact klass and actual klass differ"); |
|
3710 __ bind(ok); |
|
3711 __ pop(tmp); |
|
3712 } |
|
3713 #endif |
|
3714 if (!no_conflict) { |
|
3715 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { |
|
3716 if (exact_klass != NULL) { |
|
3717 __ mov_metadata(tmp, exact_klass->constant_encoding()); |
|
3718 } else { |
|
3719 __ load_klass(tmp, tmp); |
|
3720 } |
|
3721 |
|
3722 __ xorptr(tmp, mdo_addr); |
|
3723 __ testptr(tmp, TypeEntries::type_klass_mask); |
|
3724 // klass seen before, nothing to do. The unknown bit may have been |
|
3725 // set already but no need to check. |
|
3726 __ jccb(Assembler::zero, next); |
|
3727 |
|
3728 __ testptr(tmp, TypeEntries::type_unknown); |
|
3729 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. |
|
3730 |
|
3731 if (TypeEntries::is_type_none(current_klass)) { |
|
3732 __ cmpptr(mdo_addr, 0); |
|
3733 __ jccb(Assembler::equal, none); |
|
3734 __ cmpptr(mdo_addr, TypeEntries::null_seen); |
|
3735 __ jccb(Assembler::equal, none); |
|
3736 // There is a chance that the checks above (re-reading profiling |
|
3737 // data from memory) fail if another thread has just set the |
|
3738 // profiling to this obj's klass |
|
3739 __ xorptr(tmp, mdo_addr); |
|
3740 __ testptr(tmp, TypeEntries::type_klass_mask); |
|
3741 __ jccb(Assembler::zero, next); |
|
3742 } |
|
3743 } else { |
|
3744 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && |
|
3745 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); |
|
3746 |
|
3747 __ movptr(tmp, mdo_addr); |
|
3748 __ testptr(tmp, TypeEntries::type_unknown); |
|
3749 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. |
|
3750 } |
|
3751 |
|
3752 // different than before. Cannot keep accurate profile. |
|
3753 __ orptr(mdo_addr, TypeEntries::type_unknown); |
|
3754 |
|
3755 if (TypeEntries::is_type_none(current_klass)) { |
|
3756 __ jmpb(next); |
|
3757 |
|
3758 __ bind(none); |
|
3759 // first time here. Set profile type. |
|
3760 __ movptr(mdo_addr, tmp); |
|
3761 } |
|
3762 } else { |
|
3763 // There's a single possible klass at this profile point |
|
3764 assert(exact_klass != NULL, "should be"); |
|
3765 if (TypeEntries::is_type_none(current_klass)) { |
|
3766 __ mov_metadata(tmp, exact_klass->constant_encoding()); |
|
3767 __ xorptr(tmp, mdo_addr); |
|
3768 __ testptr(tmp, TypeEntries::type_klass_mask); |
|
3769 #ifdef ASSERT |
|
3770 __ jcc(Assembler::zero, next); |
|
3771 |
|
3772 { |
|
3773 Label ok; |
|
3774 __ push(tmp); |
|
3775 __ cmpptr(mdo_addr, 0); |
|
3776 __ jcc(Assembler::equal, ok); |
|
3777 __ cmpptr(mdo_addr, TypeEntries::null_seen); |
|
3778 __ jcc(Assembler::equal, ok); |
|
3779 // may have been set by another thread |
|
3780 __ mov_metadata(tmp, exact_klass->constant_encoding()); |
|
3781 __ xorptr(tmp, mdo_addr); |
|
3782 __ testptr(tmp, TypeEntries::type_mask); |
|
3783 __ jcc(Assembler::zero, ok); |
|
3784 |
|
3785 __ stop("unexpected profiling mismatch"); |
|
3786 __ bind(ok); |
|
3787 __ pop(tmp); |
|
3788 } |
|
3789 #else |
|
3790 __ jccb(Assembler::zero, next); |
|
3791 #endif |
|
3792 // first time here. Set profile type. |
|
3793 __ movptr(mdo_addr, tmp); |
|
3794 } else { |
|
3795 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && |
|
3796 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); |
|
3797 |
|
3798 __ movptr(tmp, mdo_addr); |
|
3799 __ testptr(tmp, TypeEntries::type_unknown); |
|
3800 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. |
|
3801 |
|
3802 __ orptr(mdo_addr, TypeEntries::type_unknown); |
|
3803 } |
|
3804 } |
|
3805 |
|
3806 __ bind(next); |
|
3807 } |
|
3808 } |
|
3809 |
|
3810 void LIR_Assembler::emit_delay(LIR_OpDelay*) { |
|
3811 Unimplemented(); |
|
3812 } |
|
3813 |
|
3814 |
|
3815 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { |
|
3816 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); |
|
3817 } |
|
3818 |
|
3819 |
|
3820 void LIR_Assembler::align_backward_branch_target() { |
|
3821 __ align(BytesPerWord); |
|
3822 } |
|
3823 |
|
3824 |
|
3825 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { |
|
3826 if (left->is_single_cpu()) { |
|
3827 __ negl(left->as_register()); |
|
3828 move_regs(left->as_register(), dest->as_register()); |
|
3829 |
|
3830 } else if (left->is_double_cpu()) { |
|
3831 Register lo = left->as_register_lo(); |
|
3832 #ifdef _LP64 |
|
3833 Register dst = dest->as_register_lo(); |
|
3834 __ movptr(dst, lo); |
|
3835 __ negptr(dst); |
|
3836 #else |
|
3837 Register hi = left->as_register_hi(); |
|
3838 __ lneg(hi, lo); |
|
3839 if (dest->as_register_lo() == hi) { |
|
3840 assert(dest->as_register_hi() != lo, "destroying register"); |
|
3841 move_regs(hi, dest->as_register_hi()); |
|
3842 move_regs(lo, dest->as_register_lo()); |
|
3843 } else { |
|
3844 move_regs(lo, dest->as_register_lo()); |
|
3845 move_regs(hi, dest->as_register_hi()); |
|
3846 } |
|
3847 #endif // _LP64 |
|
3848 |
|
3849 } else if (dest->is_single_xmm()) { |
|
3850 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { |
|
3851 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); |
|
3852 } |
|
3853 __ xorps(dest->as_xmm_float_reg(), |
|
3854 ExternalAddress((address)float_signflip_pool)); |
|
3855 |
|
3856 } else if (dest->is_double_xmm()) { |
|
3857 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { |
|
3858 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); |
|
3859 } |
|
3860 __ xorpd(dest->as_xmm_double_reg(), |
|
3861 ExternalAddress((address)double_signflip_pool)); |
|
3862 |
|
3863 } else if (left->is_single_fpu() || left->is_double_fpu()) { |
|
3864 assert(left->fpu() == 0, "arg must be on TOS"); |
|
3865 assert(dest->fpu() == 0, "dest must be TOS"); |
|
3866 __ fchs(); |
|
3867 |
|
3868 } else { |
|
3869 ShouldNotReachHere(); |
|
3870 } |
|
3871 } |
|
3872 |
|
3873 |
|
3874 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { |
|
3875 assert(addr->is_address() && dest->is_register(), "check"); |
|
3876 Register reg; |
|
3877 reg = dest->as_pointer_register(); |
|
3878 __ lea(reg, as_Address(addr->as_address_ptr())); |
|
3879 } |
|
3880 |
|
3881 |
|
3882 |
|
3883 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { |
|
3884 assert(!tmp->is_valid(), "don't need temporary"); |
|
3885 __ call(RuntimeAddress(dest)); |
|
3886 if (info != NULL) { |
|
3887 add_call_info_here(info); |
|
3888 } |
|
3889 } |
|
3890 |
|
3891 |
|
3892 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { |
|
3893 assert(type == T_LONG, "only for volatile long fields"); |
|
3894 |
|
3895 if (info != NULL) { |
|
3896 add_debug_info_for_null_check_here(info); |
|
3897 } |
|
3898 |
|
3899 if (src->is_double_xmm()) { |
|
3900 if (dest->is_double_cpu()) { |
|
3901 #ifdef _LP64 |
|
3902 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); |
|
3903 #else |
|
3904 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); |
|
3905 __ psrlq(src->as_xmm_double_reg(), 32); |
|
3906 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); |
|
3907 #endif // _LP64 |
|
3908 } else if (dest->is_double_stack()) { |
|
3909 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); |
|
3910 } else if (dest->is_address()) { |
|
3911 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); |
|
3912 } else { |
|
3913 ShouldNotReachHere(); |
|
3914 } |
|
3915 |
|
3916 } else if (dest->is_double_xmm()) { |
|
3917 if (src->is_double_stack()) { |
|
3918 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); |
|
3919 } else if (src->is_address()) { |
|
3920 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); |
|
3921 } else { |
|
3922 ShouldNotReachHere(); |
|
3923 } |
|
3924 |
|
3925 } else if (src->is_double_fpu()) { |
|
3926 assert(src->fpu_regnrLo() == 0, "must be TOS"); |
|
3927 if (dest->is_double_stack()) { |
|
3928 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); |
|
3929 } else if (dest->is_address()) { |
|
3930 __ fistp_d(as_Address(dest->as_address_ptr())); |
|
3931 } else { |
|
3932 ShouldNotReachHere(); |
|
3933 } |
|
3934 |
|
3935 } else if (dest->is_double_fpu()) { |
|
3936 assert(dest->fpu_regnrLo() == 0, "must be TOS"); |
|
3937 if (src->is_double_stack()) { |
|
3938 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); |
|
3939 } else if (src->is_address()) { |
|
3940 __ fild_d(as_Address(src->as_address_ptr())); |
|
3941 } else { |
|
3942 ShouldNotReachHere(); |
|
3943 } |
|
3944 } else { |
|
3945 ShouldNotReachHere(); |
|
3946 } |
|
3947 } |
|
3948 |
|
3949 #ifdef ASSERT |
|
3950 // emit run-time assertion |
|
3951 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { |
|
3952 assert(op->code() == lir_assert, "must be"); |
|
3953 |
|
3954 if (op->in_opr1()->is_valid()) { |
|
3955 assert(op->in_opr2()->is_valid(), "both operands must be valid"); |
|
3956 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); |
|
3957 } else { |
|
3958 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); |
|
3959 assert(op->condition() == lir_cond_always, "no other conditions allowed"); |
|
3960 } |
|
3961 |
|
3962 Label ok; |
|
3963 if (op->condition() != lir_cond_always) { |
|
3964 Assembler::Condition acond = Assembler::zero; |
|
3965 switch (op->condition()) { |
|
3966 case lir_cond_equal: acond = Assembler::equal; break; |
|
3967 case lir_cond_notEqual: acond = Assembler::notEqual; break; |
|
3968 case lir_cond_less: acond = Assembler::less; break; |
|
3969 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; |
|
3970 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; |
|
3971 case lir_cond_greater: acond = Assembler::greater; break; |
|
3972 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; |
|
3973 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; |
|
3974 default: ShouldNotReachHere(); |
|
3975 } |
|
3976 __ jcc(acond, ok); |
|
3977 } |
|
3978 if (op->halt()) { |
|
3979 const char* str = __ code_string(op->msg()); |
|
3980 __ stop(str); |
|
3981 } else { |
|
3982 breakpoint(); |
|
3983 } |
|
3984 __ bind(ok); |
|
3985 } |
|
3986 #endif |
|
3987 |
|
3988 void LIR_Assembler::membar() { |
|
3989 // QQQ sparc TSO uses this, |
|
3990 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); |
|
3991 } |
|
3992 |
|
3993 void LIR_Assembler::membar_acquire() { |
|
3994 // No x86 machines currently require load fences |
|
3995 // __ load_fence(); |
|
3996 } |
|
3997 |
|
3998 void LIR_Assembler::membar_release() { |
|
3999 // No x86 machines currently require store fences |
|
4000 // __ store_fence(); |
|
4001 } |
|
4002 |
|
4003 void LIR_Assembler::membar_loadload() { |
|
4004 // no-op |
|
4005 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); |
|
4006 } |
|
4007 |
|
4008 void LIR_Assembler::membar_storestore() { |
|
4009 // no-op |
|
4010 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); |
|
4011 } |
|
4012 |
|
4013 void LIR_Assembler::membar_loadstore() { |
|
4014 // no-op |
|
4015 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); |
|
4016 } |
|
4017 |
|
4018 void LIR_Assembler::membar_storeload() { |
|
4019 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); |
|
4020 } |
|
4021 |
|
4022 void LIR_Assembler::get_thread(LIR_Opr result_reg) { |
|
4023 assert(result_reg->is_register(), "check"); |
|
4024 #ifdef _LP64 |
|
4025 // __ get_thread(result_reg->as_register_lo()); |
|
4026 __ mov(result_reg->as_register(), r15_thread); |
|
4027 #else |
|
4028 __ get_thread(result_reg->as_register()); |
|
4029 #endif // _LP64 |
|
4030 } |
|
4031 |
|
4032 |
|
4033 void LIR_Assembler::peephole(LIR_List*) { |
|
4034 // do nothing for now |
|
4035 } |
|
4036 |
|
4037 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { |
|
4038 assert(data == dest, "xchg/xadd uses only 2 operands"); |
|
4039 |
|
4040 if (data->type() == T_INT) { |
|
4041 if (code == lir_xadd) { |
|
4042 if (os::is_MP()) { |
|
4043 __ lock(); |
|
4044 } |
|
4045 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); |
|
4046 } else { |
|
4047 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); |
|
4048 } |
|
4049 } else if (data->is_oop()) { |
|
4050 assert (code == lir_xchg, "xadd for oops"); |
|
4051 Register obj = data->as_register(); |
|
4052 #ifdef _LP64 |
|
4053 if (UseCompressedOops) { |
|
4054 __ encode_heap_oop(obj); |
|
4055 __ xchgl(obj, as_Address(src->as_address_ptr())); |
|
4056 __ decode_heap_oop(obj); |
|
4057 } else { |
|
4058 __ xchgptr(obj, as_Address(src->as_address_ptr())); |
|
4059 } |
|
4060 #else |
|
4061 __ xchgl(obj, as_Address(src->as_address_ptr())); |
|
4062 #endif |
|
4063 } else if (data->type() == T_LONG) { |
|
4064 #ifdef _LP64 |
|
4065 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); |
|
4066 if (code == lir_xadd) { |
|
4067 if (os::is_MP()) { |
|
4068 __ lock(); |
|
4069 } |
|
4070 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); |
|
4071 } else { |
|
4072 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); |
|
4073 } |
|
4074 #else |
|
4075 ShouldNotReachHere(); |
|
4076 #endif |
|
4077 } else { |
|
4078 ShouldNotReachHere(); |
|
4079 } |
|
4080 } |
|
4081 |
|
4082 #undef __ |