Thu, 20 Sep 2012 16:49:17 +0200
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
Summary: use shorter instruction sequences for atomic add and atomic exchange when possible.
Reviewed-by: kvn, jrose
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "c1/c1_ValueStack.hpp"
31 #include "ci/ciArrayKlass.hpp"
32 #include "ci/ciInstance.hpp"
33 #include "gc_interface/collectedHeap.hpp"
34 #include "memory/barrierSet.hpp"
35 #include "memory/cardTableModRefBS.hpp"
36 #include "nativeInst_sparc.hpp"
37 #include "oops/objArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
40 #define __ _masm->
43 //------------------------------------------------------------
46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
47 if (opr->is_constant()) {
48 LIR_Const* constant = opr->as_constant_ptr();
49 switch (constant->type()) {
50 case T_INT: {
51 jint value = constant->as_jint();
52 return Assembler::is_simm13(value);
53 }
55 default:
56 return false;
57 }
58 }
59 return false;
60 }
63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
64 switch (op->code()) {
65 case lir_null_check:
66 return true;
69 case lir_add:
70 case lir_ushr:
71 case lir_shr:
72 case lir_shl:
73 // integer shifts and adds are always one instruction
74 return op->result_opr()->is_single_cpu();
77 case lir_move: {
78 LIR_Op1* op1 = op->as_Op1();
79 LIR_Opr src = op1->in_opr();
80 LIR_Opr dst = op1->result_opr();
82 if (src == dst) {
83 NEEDS_CLEANUP;
84 // this works around a problem where moves with the same src and dst
85 // end up in the delay slot and then the assembler swallows the mov
86 // since it has no effect and then it complains because the delay slot
87 // is empty. returning false stops the optimizer from putting this in
88 // the delay slot
89 return false;
90 }
92 // don't put moves involving oops into the delay slot since the VerifyOops code
93 // will make it much larger than a single instruction.
94 if (VerifyOops) {
95 return false;
96 }
98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
100 return false;
101 }
103 if (UseCompressedOops) {
104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
106 }
108 if (dst->is_register()) {
109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
110 return !PatchALot;
111 } else if (src->is_single_stack()) {
112 return true;
113 }
114 }
116 if (src->is_register()) {
117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
118 return !PatchALot;
119 } else if (dst->is_single_stack()) {
120 return true;
121 }
122 }
124 if (dst->is_register() &&
125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
127 return true;
128 }
130 return false;
131 }
133 default:
134 return false;
135 }
136 ShouldNotReachHere();
137 }
140 LIR_Opr LIR_Assembler::receiverOpr() {
141 return FrameMap::O0_oop_opr;
142 }
145 LIR_Opr LIR_Assembler::osrBufferPointer() {
146 return FrameMap::I0_opr;
147 }
150 int LIR_Assembler::initial_frame_size_in_bytes() {
151 return in_bytes(frame_map()->framesize_in_bytes());
152 }
155 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
156 // we fetch the class of the receiver (O0) and compare it with the cached class.
157 // If they do not match we jump to slow case.
158 int LIR_Assembler::check_icache() {
159 int offset = __ offset();
160 __ inline_cache_check(O0, G5_inline_cache_reg);
161 return offset;
162 }
165 void LIR_Assembler::osr_entry() {
166 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
167 //
168 // 1. Create a new compiled activation.
169 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
170 // at the osr_bci; it is not initialized.
171 // 3. Jump to the continuation address in compiled code to resume execution.
173 // OSR entry point
174 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
175 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
176 ValueStack* entry_state = osr_entry->end()->state();
177 int number_of_locks = entry_state->locks_size();
179 // Create a frame for the compiled activation.
180 __ build_frame(initial_frame_size_in_bytes());
182 // OSR buffer is
183 //
184 // locals[nlocals-1..0]
185 // monitors[number_of_locks-1..0]
186 //
187 // locals is a direct copy of the interpreter frame so in the osr buffer
188 // so first slot in the local array is the last local from the interpreter
189 // and last slot is local[0] (receiver) from the interpreter
190 //
191 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
192 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
193 // in the interpreter frame (the method lock if a sync method)
195 // Initialize monitors in the compiled activation.
196 // I0: pointer to osr buffer
197 //
198 // All other registers are dead at this point and the locals will be
199 // copied into place by code emitted in the IR.
201 Register OSR_buf = osrBufferPointer()->as_register();
202 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
203 int monitor_offset = BytesPerWord * method()->max_locals() +
204 (2 * BytesPerWord) * (number_of_locks - 1);
205 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
206 // the OSR buffer using 2 word entries: first the lock and then
207 // the oop.
208 for (int i = 0; i < number_of_locks; i++) {
209 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
210 #ifdef ASSERT
211 // verify the interpreter's monitor has a non-null object
212 {
213 Label L;
214 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
215 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L);
216 __ stop("locked object is NULL");
217 __ bind(L);
218 }
219 #endif // ASSERT
220 // Copy the lock field into the compiled activation.
221 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
222 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
223 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
224 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
225 }
226 }
227 }
230 // Optimized Library calls
231 // This is the fast version of java.lang.String.compare; it has not
232 // OSR-entry and therefore, we generate a slow version for OSR's
233 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
234 Register str0 = left->as_register();
235 Register str1 = right->as_register();
237 Label Ldone;
239 Register result = dst->as_register();
240 {
241 // Get a pointer to the first character of string0 in tmp0
242 // and get string0.length() in str0
243 // Get a pointer to the first character of string1 in tmp1
244 // and get string1.length() in str1
245 // Also, get string0.length()-string1.length() in
246 // o7 and get the condition code set
247 // Note: some instructions have been hoisted for better instruction scheduling
249 Register tmp0 = L0;
250 Register tmp1 = L1;
251 Register tmp2 = L2;
253 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
254 if (java_lang_String::has_offset_field()) {
255 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
256 int count_offset = java_lang_String:: count_offset_in_bytes();
257 __ load_heap_oop(str0, value_offset, tmp0);
258 __ ld(str0, offset_offset, tmp2);
259 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
260 __ ld(str0, count_offset, str0);
261 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
262 } else {
263 __ load_heap_oop(str0, value_offset, tmp1);
264 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
265 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0);
266 }
268 // str1 may be null
269 add_debug_info_for_null_check_here(info);
271 if (java_lang_String::has_offset_field()) {
272 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
273 int count_offset = java_lang_String:: count_offset_in_bytes();
274 __ load_heap_oop(str1, value_offset, tmp1);
275 __ add(tmp0, tmp2, tmp0);
277 __ ld(str1, offset_offset, tmp2);
278 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
279 __ ld(str1, count_offset, str1);
280 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
281 __ add(tmp1, tmp2, tmp1);
282 } else {
283 __ load_heap_oop(str1, value_offset, tmp2);
284 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
285 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1);
286 }
287 __ subcc(str0, str1, O7);
288 }
290 {
291 // Compute the minimum of the string lengths, scale it and store it in limit
292 Register count0 = I0;
293 Register count1 = I1;
294 Register limit = L3;
296 Label Lskip;
297 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
298 __ br(Assembler::greater, true, Assembler::pt, Lskip);
299 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
300 __ bind(Lskip);
302 // If either string is empty (or both of them) the result is the difference in lengths
303 __ cmp(limit, 0);
304 __ br(Assembler::equal, true, Assembler::pn, Ldone);
305 __ delayed()->mov(O7, result); // result is difference in lengths
306 }
308 {
309 // Neither string is empty
310 Label Lloop;
312 Register base0 = L0;
313 Register base1 = L1;
314 Register chr0 = I0;
315 Register chr1 = I1;
316 Register limit = L3;
318 // Shift base0 and base1 to the end of the arrays, negate limit
319 __ add(base0, limit, base0);
320 __ add(base1, limit, base1);
321 __ neg(limit); // limit = -min{string0.length(), string1.length()}
323 __ lduh(base0, limit, chr0);
324 __ bind(Lloop);
325 __ lduh(base1, limit, chr1);
326 __ subcc(chr0, chr1, chr0);
327 __ br(Assembler::notZero, false, Assembler::pn, Ldone);
328 assert(chr0 == result, "result must be pre-placed");
329 __ delayed()->inccc(limit, sizeof(jchar));
330 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
331 __ delayed()->lduh(base0, limit, chr0);
332 }
334 // If strings are equal up to min length, return the length difference.
335 __ mov(O7, result);
337 // Otherwise, return the difference between the first mismatched chars.
338 __ bind(Ldone);
339 }
342 // --------------------------------------------------------------------------------------------
344 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
345 if (!GenerateSynchronizationCode) return;
347 Register obj_reg = obj_opr->as_register();
348 Register lock_reg = lock_opr->as_register();
350 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
351 Register reg = mon_addr.base();
352 int offset = mon_addr.disp();
353 // compute pointer to BasicLock
354 if (mon_addr.is_simm13()) {
355 __ add(reg, offset, lock_reg);
356 }
357 else {
358 __ set(offset, lock_reg);
359 __ add(reg, lock_reg, lock_reg);
360 }
361 // unlock object
362 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
363 // _slow_case_stubs->append(slow_case);
364 // temporary fix: must be created after exceptionhandler, therefore as call stub
365 _slow_case_stubs->append(slow_case);
366 if (UseFastLocking) {
367 // try inlined fast unlocking first, revert to slow locking if it fails
368 // note: lock_reg points to the displaced header since the displaced header offset is 0!
369 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
370 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
371 } else {
372 // always do slow unlocking
373 // note: the slow unlocking code could be inlined here, however if we use
374 // slow unlocking, speed doesn't matter anyway and this solution is
375 // simpler and requires less duplicated code - additionally, the
376 // slow unlocking code is the same in either case which simplifies
377 // debugging
378 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
379 __ delayed()->nop();
380 }
381 // done
382 __ bind(*slow_case->continuation());
383 }
386 int LIR_Assembler::emit_exception_handler() {
387 // if the last instruction is a call (typically to do a throw which
388 // is coming at the end after block reordering) the return address
389 // must still point into the code area in order to avoid assertion
390 // failures when searching for the corresponding bci => add a nop
391 // (was bug 5/14/1999 - gri)
392 __ nop();
394 // generate code for exception handler
395 ciMethod* method = compilation()->method();
397 address handler_base = __ start_a_stub(exception_handler_size);
399 if (handler_base == NULL) {
400 // not enough space left for the handler
401 bailout("exception handler overflow");
402 return -1;
403 }
405 int offset = code_offset();
407 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
408 __ delayed()->nop();
409 __ should_not_reach_here();
410 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
411 __ end_a_stub();
413 return offset;
414 }
417 // Emit the code to remove the frame from the stack in the exception
418 // unwind path.
419 int LIR_Assembler::emit_unwind_handler() {
420 #ifndef PRODUCT
421 if (CommentedAssembly) {
422 _masm->block_comment("Unwind handler");
423 }
424 #endif
426 int offset = code_offset();
428 // Fetch the exception from TLS and clear out exception related thread state
429 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
430 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
431 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
433 __ bind(_unwind_handler_entry);
434 __ verify_not_null_oop(O0);
435 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
436 __ mov(O0, I0); // Preserve the exception
437 }
439 // Preform needed unlocking
440 MonitorExitStub* stub = NULL;
441 if (method()->is_synchronized()) {
442 monitor_address(0, FrameMap::I1_opr);
443 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
444 __ unlock_object(I3, I2, I1, *stub->entry());
445 __ bind(*stub->continuation());
446 }
448 if (compilation()->env()->dtrace_method_probes()) {
449 __ mov(G2_thread, O0);
450 __ save_thread(I1); // need to preserve thread in G2 across
451 // runtime call
452 metadata2reg(method()->constant_encoding(), O1);
453 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
454 __ delayed()->nop();
455 __ restore_thread(I1);
456 }
458 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
459 __ mov(I0, O0); // Restore the exception
460 }
462 // dispatch to the unwind logic
463 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
464 __ delayed()->nop();
466 // Emit the slow path assembly
467 if (stub != NULL) {
468 stub->emit_code(this);
469 }
471 return offset;
472 }
475 int LIR_Assembler::emit_deopt_handler() {
476 // if the last instruction is a call (typically to do a throw which
477 // is coming at the end after block reordering) the return address
478 // must still point into the code area in order to avoid assertion
479 // failures when searching for the corresponding bci => add a nop
480 // (was bug 5/14/1999 - gri)
481 __ nop();
483 // generate code for deopt handler
484 ciMethod* method = compilation()->method();
485 address handler_base = __ start_a_stub(deopt_handler_size);
486 if (handler_base == NULL) {
487 // not enough space left for the handler
488 bailout("deopt handler overflow");
489 return -1;
490 }
492 int offset = code_offset();
493 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
494 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
495 __ delayed()->nop();
496 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
497 __ end_a_stub();
499 return offset;
500 }
503 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
504 if (o == NULL) {
505 __ set(NULL_WORD, reg);
506 } else {
507 int oop_index = __ oop_recorder()->find_index(o);
508 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
509 RelocationHolder rspec = oop_Relocation::spec(oop_index);
510 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
511 }
512 }
515 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
516 // Allocate a new index in table to hold the object once it's been patched
517 int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
518 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
520 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
521 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
522 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
523 // NULL will be dynamically patched later and the patched value may be large. We must
524 // therefore generate the sethi/add as a placeholders
525 __ patchable_set(addrlit, reg);
527 patching_epilog(patch, lir_patch_normal, reg, info);
528 }
531 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
532 __ set_metadata_constant(o, reg);
533 }
535 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
536 // Allocate a new index in table to hold the klass once it's been patched
537 int index = __ oop_recorder()->allocate_metadata_index(NULL);
538 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
539 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index));
540 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
541 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
542 // NULL will be dynamically patched later and the patched value may be large. We must
543 // therefore generate the sethi/add as a placeholders
544 __ patchable_set(addrlit, reg);
546 patching_epilog(patch, lir_patch_normal, reg, info);
547 }
549 void LIR_Assembler::emit_op3(LIR_Op3* op) {
550 Register Rdividend = op->in_opr1()->as_register();
551 Register Rdivisor = noreg;
552 Register Rscratch = op->in_opr3()->as_register();
553 Register Rresult = op->result_opr()->as_register();
554 int divisor = -1;
556 if (op->in_opr2()->is_register()) {
557 Rdivisor = op->in_opr2()->as_register();
558 } else {
559 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
560 assert(Assembler::is_simm13(divisor), "can only handle simm13");
561 }
563 assert(Rdividend != Rscratch, "");
564 assert(Rdivisor != Rscratch, "");
565 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
567 if (Rdivisor == noreg && is_power_of_2(divisor)) {
568 // convert division by a power of two into some shifts and logical operations
569 if (op->code() == lir_idiv) {
570 if (divisor == 2) {
571 __ srl(Rdividend, 31, Rscratch);
572 } else {
573 __ sra(Rdividend, 31, Rscratch);
574 __ and3(Rscratch, divisor - 1, Rscratch);
575 }
576 __ add(Rdividend, Rscratch, Rscratch);
577 __ sra(Rscratch, log2_intptr(divisor), Rresult);
578 return;
579 } else {
580 if (divisor == 2) {
581 __ srl(Rdividend, 31, Rscratch);
582 } else {
583 __ sra(Rdividend, 31, Rscratch);
584 __ and3(Rscratch, divisor - 1,Rscratch);
585 }
586 __ add(Rdividend, Rscratch, Rscratch);
587 __ andn(Rscratch, divisor - 1,Rscratch);
588 __ sub(Rdividend, Rscratch, Rresult);
589 return;
590 }
591 }
593 __ sra(Rdividend, 31, Rscratch);
594 __ wry(Rscratch);
595 if (!VM_Version::v9_instructions_work()) {
596 // v9 doesn't require these nops
597 __ nop();
598 __ nop();
599 __ nop();
600 __ nop();
601 }
603 add_debug_info_for_div0_here(op->info());
605 if (Rdivisor != noreg) {
606 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
607 } else {
608 assert(Assembler::is_simm13(divisor), "can only handle simm13");
609 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
610 }
612 Label skip;
613 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
614 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
615 __ bind(skip);
617 if (op->code() == lir_irem) {
618 if (Rdivisor != noreg) {
619 __ smul(Rscratch, Rdivisor, Rscratch);
620 } else {
621 __ smul(Rscratch, divisor, Rscratch);
622 }
623 __ sub(Rdividend, Rscratch, Rresult);
624 }
625 }
628 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
629 #ifdef ASSERT
630 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
631 if (op->block() != NULL) _branch_target_blocks.append(op->block());
632 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
633 #endif
634 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
636 if (op->cond() == lir_cond_always) {
637 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
638 } else if (op->code() == lir_cond_float_branch) {
639 assert(op->ublock() != NULL, "must have unordered successor");
640 bool is_unordered = (op->ublock() == op->block());
641 Assembler::Condition acond;
642 switch (op->cond()) {
643 case lir_cond_equal: acond = Assembler::f_equal; break;
644 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
645 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
646 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
647 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
648 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
649 default : ShouldNotReachHere();
650 };
652 if (!VM_Version::v9_instructions_work()) {
653 __ nop();
654 }
655 __ fb( acond, false, Assembler::pn, *(op->label()));
656 } else {
657 assert (op->code() == lir_branch, "just checking");
659 Assembler::Condition acond;
660 switch (op->cond()) {
661 case lir_cond_equal: acond = Assembler::equal; break;
662 case lir_cond_notEqual: acond = Assembler::notEqual; break;
663 case lir_cond_less: acond = Assembler::less; break;
664 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
665 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
666 case lir_cond_greater: acond = Assembler::greater; break;
667 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
668 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
669 default: ShouldNotReachHere();
670 };
672 // sparc has different condition codes for testing 32-bit
673 // vs. 64-bit values. We could always test xcc is we could
674 // guarantee that 32-bit loads always sign extended but that isn't
675 // true and since sign extension isn't free, it would impose a
676 // slight cost.
677 #ifdef _LP64
678 if (op->type() == T_INT) {
679 __ br(acond, false, Assembler::pn, *(op->label()));
680 } else
681 #endif
682 __ brx(acond, false, Assembler::pn, *(op->label()));
683 }
684 // The peephole pass fills the delay slot
685 }
688 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
689 Bytecodes::Code code = op->bytecode();
690 LIR_Opr dst = op->result_opr();
692 switch(code) {
693 case Bytecodes::_i2l: {
694 Register rlo = dst->as_register_lo();
695 Register rhi = dst->as_register_hi();
696 Register rval = op->in_opr()->as_register();
697 #ifdef _LP64
698 __ sra(rval, 0, rlo);
699 #else
700 __ mov(rval, rlo);
701 __ sra(rval, BitsPerInt-1, rhi);
702 #endif
703 break;
704 }
705 case Bytecodes::_i2d:
706 case Bytecodes::_i2f: {
707 bool is_double = (code == Bytecodes::_i2d);
708 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
709 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
710 FloatRegister rsrc = op->in_opr()->as_float_reg();
711 if (rsrc != rdst) {
712 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
713 }
714 __ fitof(w, rdst, rdst);
715 break;
716 }
717 case Bytecodes::_f2i:{
718 FloatRegister rsrc = op->in_opr()->as_float_reg();
719 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
720 Label L;
721 // result must be 0 if value is NaN; test by comparing value to itself
722 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
723 if (!VM_Version::v9_instructions_work()) {
724 __ nop();
725 }
726 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
727 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
728 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
729 // move integer result from float register to int register
730 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
731 __ bind (L);
732 break;
733 }
734 case Bytecodes::_l2i: {
735 Register rlo = op->in_opr()->as_register_lo();
736 Register rhi = op->in_opr()->as_register_hi();
737 Register rdst = dst->as_register();
738 #ifdef _LP64
739 __ sra(rlo, 0, rdst);
740 #else
741 __ mov(rlo, rdst);
742 #endif
743 break;
744 }
745 case Bytecodes::_d2f:
746 case Bytecodes::_f2d: {
747 bool is_double = (code == Bytecodes::_f2d);
748 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
749 LIR_Opr val = op->in_opr();
750 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
751 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
752 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
753 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
754 __ ftof(vw, dw, rval, rdst);
755 break;
756 }
757 case Bytecodes::_i2s:
758 case Bytecodes::_i2b: {
759 Register rval = op->in_opr()->as_register();
760 Register rdst = dst->as_register();
761 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
762 __ sll (rval, shift, rdst);
763 __ sra (rdst, shift, rdst);
764 break;
765 }
766 case Bytecodes::_i2c: {
767 Register rval = op->in_opr()->as_register();
768 Register rdst = dst->as_register();
769 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
770 __ sll (rval, shift, rdst);
771 __ srl (rdst, shift, rdst);
772 break;
773 }
775 default: ShouldNotReachHere();
776 }
777 }
780 void LIR_Assembler::align_call(LIR_Code) {
781 // do nothing since all instructions are word aligned on sparc
782 }
785 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
786 __ call(op->addr(), rtype);
787 // The peephole pass fills the delay slot, add_call_info is done in
788 // LIR_Assembler::emit_delay.
789 }
792 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
793 __ ic_call(op->addr(), false);
794 // The peephole pass fills the delay slot, add_call_info is done in
795 // LIR_Assembler::emit_delay.
796 }
799 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
800 add_debug_info_for_null_check_here(op->info());
801 __ load_klass(O0, G3_scratch);
802 if (Assembler::is_simm13(op->vtable_offset())) {
803 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
804 } else {
805 // This will generate 2 instructions
806 __ set(op->vtable_offset(), G5_method);
807 // ld_ptr, set_hi, set
808 __ ld_ptr(G3_scratch, G5_method, G5_method);
809 }
810 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch);
811 __ callr(G3_scratch, G0);
812 // the peephole pass fills the delay slot
813 }
815 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
816 int store_offset;
817 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
818 assert(!unaligned, "can't handle this");
819 // for offsets larger than a simm13 we setup the offset in O7
820 __ set(offset, O7);
821 store_offset = store(from_reg, base, O7, type, wide);
822 } else {
823 if (type == T_ARRAY || type == T_OBJECT) {
824 __ verify_oop(from_reg->as_register());
825 }
826 store_offset = code_offset();
827 switch (type) {
828 case T_BOOLEAN: // fall through
829 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
830 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
831 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
832 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
833 case T_LONG :
834 #ifdef _LP64
835 if (unaligned || PatchALot) {
836 __ srax(from_reg->as_register_lo(), 32, O7);
837 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
838 __ stw(O7, base, offset + hi_word_offset_in_bytes);
839 } else {
840 __ stx(from_reg->as_register_lo(), base, offset);
841 }
842 #else
843 assert(Assembler::is_simm13(offset + 4), "must be");
844 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
845 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
846 #endif
847 break;
848 case T_ADDRESS:
849 case T_METADATA:
850 __ st_ptr(from_reg->as_register(), base, offset);
851 break;
852 case T_ARRAY : // fall through
853 case T_OBJECT:
854 {
855 if (UseCompressedOops && !wide) {
856 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
857 store_offset = code_offset();
858 __ stw(G3_scratch, base, offset);
859 } else {
860 __ st_ptr(from_reg->as_register(), base, offset);
861 }
862 break;
863 }
865 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
866 case T_DOUBLE:
867 {
868 FloatRegister reg = from_reg->as_double_reg();
869 // split unaligned stores
870 if (unaligned || PatchALot) {
871 assert(Assembler::is_simm13(offset + 4), "must be");
872 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
873 __ stf(FloatRegisterImpl::S, reg, base, offset);
874 } else {
875 __ stf(FloatRegisterImpl::D, reg, base, offset);
876 }
877 break;
878 }
879 default : ShouldNotReachHere();
880 }
881 }
882 return store_offset;
883 }
886 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
887 if (type == T_ARRAY || type == T_OBJECT) {
888 __ verify_oop(from_reg->as_register());
889 }
890 int store_offset = code_offset();
891 switch (type) {
892 case T_BOOLEAN: // fall through
893 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
894 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
895 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
896 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
897 case T_LONG :
898 #ifdef _LP64
899 __ stx(from_reg->as_register_lo(), base, disp);
900 #else
901 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
902 __ std(from_reg->as_register_hi(), base, disp);
903 #endif
904 break;
905 case T_ADDRESS:
906 __ st_ptr(from_reg->as_register(), base, disp);
907 break;
908 case T_ARRAY : // fall through
909 case T_OBJECT:
910 {
911 if (UseCompressedOops && !wide) {
912 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
913 store_offset = code_offset();
914 __ stw(G3_scratch, base, disp);
915 } else {
916 __ st_ptr(from_reg->as_register(), base, disp);
917 }
918 break;
919 }
920 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
921 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
922 default : ShouldNotReachHere();
923 }
924 return store_offset;
925 }
928 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
929 int load_offset;
930 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
931 assert(base != O7, "destroying register");
932 assert(!unaligned, "can't handle this");
933 // for offsets larger than a simm13 we setup the offset in O7
934 __ set(offset, O7);
935 load_offset = load(base, O7, to_reg, type, wide);
936 } else {
937 load_offset = code_offset();
938 switch(type) {
939 case T_BOOLEAN: // fall through
940 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
941 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
942 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
943 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
944 case T_LONG :
945 if (!unaligned) {
946 #ifdef _LP64
947 __ ldx(base, offset, to_reg->as_register_lo());
948 #else
949 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
950 "must be sequential");
951 __ ldd(base, offset, to_reg->as_register_hi());
952 #endif
953 } else {
954 #ifdef _LP64
955 assert(base != to_reg->as_register_lo(), "can't handle this");
956 assert(O7 != to_reg->as_register_lo(), "can't handle this");
957 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
958 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
959 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
960 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
961 #else
962 if (base == to_reg->as_register_lo()) {
963 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
964 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
965 } else {
966 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
967 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
968 }
969 #endif
970 }
971 break;
972 case T_METADATA:
973 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
974 case T_ARRAY : // fall through
975 case T_OBJECT:
976 {
977 if (UseCompressedOops && !wide) {
978 __ lduw(base, offset, to_reg->as_register());
979 __ decode_heap_oop(to_reg->as_register());
980 } else {
981 __ ld_ptr(base, offset, to_reg->as_register());
982 }
983 break;
984 }
985 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
986 case T_DOUBLE:
987 {
988 FloatRegister reg = to_reg->as_double_reg();
989 // split unaligned loads
990 if (unaligned || PatchALot) {
991 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
992 __ ldf(FloatRegisterImpl::S, base, offset, reg);
993 } else {
994 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
995 }
996 break;
997 }
998 default : ShouldNotReachHere();
999 }
1000 if (type == T_ARRAY || type == T_OBJECT) {
1001 __ verify_oop(to_reg->as_register());
1002 }
1003 }
1004 return load_offset;
1005 }
1008 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
1009 int load_offset = code_offset();
1010 switch(type) {
1011 case T_BOOLEAN: // fall through
1012 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
1013 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
1014 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
1015 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
1016 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
1017 case T_ARRAY : // fall through
1018 case T_OBJECT:
1019 {
1020 if (UseCompressedOops && !wide) {
1021 __ lduw(base, disp, to_reg->as_register());
1022 __ decode_heap_oop(to_reg->as_register());
1023 } else {
1024 __ ld_ptr(base, disp, to_reg->as_register());
1025 }
1026 break;
1027 }
1028 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
1029 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
1030 case T_LONG :
1031 #ifdef _LP64
1032 __ ldx(base, disp, to_reg->as_register_lo());
1033 #else
1034 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
1035 "must be sequential");
1036 __ ldd(base, disp, to_reg->as_register_hi());
1037 #endif
1038 break;
1039 default : ShouldNotReachHere();
1040 }
1041 if (type == T_ARRAY || type == T_OBJECT) {
1042 __ verify_oop(to_reg->as_register());
1043 }
1044 return load_offset;
1045 }
1047 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
1048 LIR_Const* c = src->as_constant_ptr();
1049 switch (c->type()) {
1050 case T_INT:
1051 case T_FLOAT: {
1052 Register src_reg = O7;
1053 int value = c->as_jint_bits();
1054 if (value == 0) {
1055 src_reg = G0;
1056 } else {
1057 __ set(value, O7);
1058 }
1059 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1060 __ stw(src_reg, addr.base(), addr.disp());
1061 break;
1062 }
1063 case T_ADDRESS: {
1064 Register src_reg = O7;
1065 int value = c->as_jint_bits();
1066 if (value == 0) {
1067 src_reg = G0;
1068 } else {
1069 __ set(value, O7);
1070 }
1071 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1072 __ st_ptr(src_reg, addr.base(), addr.disp());
1073 break;
1074 }
1075 case T_OBJECT: {
1076 Register src_reg = O7;
1077 jobject2reg(c->as_jobject(), src_reg);
1078 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1079 __ st_ptr(src_reg, addr.base(), addr.disp());
1080 break;
1081 }
1082 case T_LONG:
1083 case T_DOUBLE: {
1084 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
1086 Register tmp = O7;
1087 int value_lo = c->as_jint_lo_bits();
1088 if (value_lo == 0) {
1089 tmp = G0;
1090 } else {
1091 __ set(value_lo, O7);
1092 }
1093 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
1094 int value_hi = c->as_jint_hi_bits();
1095 if (value_hi == 0) {
1096 tmp = G0;
1097 } else {
1098 __ set(value_hi, O7);
1099 }
1100 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
1101 break;
1102 }
1103 default:
1104 Unimplemented();
1105 }
1106 }
1109 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
1110 LIR_Const* c = src->as_constant_ptr();
1111 LIR_Address* addr = dest->as_address_ptr();
1112 Register base = addr->base()->as_pointer_register();
1113 int offset = -1;
1115 switch (c->type()) {
1116 case T_INT:
1117 case T_FLOAT:
1118 case T_ADDRESS: {
1119 LIR_Opr tmp = FrameMap::O7_opr;
1120 int value = c->as_jint_bits();
1121 if (value == 0) {
1122 tmp = FrameMap::G0_opr;
1123 } else if (Assembler::is_simm13(value)) {
1124 __ set(value, O7);
1125 }
1126 if (addr->index()->is_valid()) {
1127 assert(addr->disp() == 0, "must be zero");
1128 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1129 } else {
1130 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1131 offset = store(tmp, base, addr->disp(), type, wide, false);
1132 }
1133 break;
1134 }
1135 case T_LONG:
1136 case T_DOUBLE: {
1137 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
1138 assert(Assembler::is_simm13(addr->disp()) &&
1139 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
1141 LIR_Opr tmp = FrameMap::O7_opr;
1142 int value_lo = c->as_jint_lo_bits();
1143 if (value_lo == 0) {
1144 tmp = FrameMap::G0_opr;
1145 } else {
1146 __ set(value_lo, O7);
1147 }
1148 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
1149 int value_hi = c->as_jint_hi_bits();
1150 if (value_hi == 0) {
1151 tmp = FrameMap::G0_opr;
1152 } else {
1153 __ set(value_hi, O7);
1154 }
1155 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
1156 break;
1157 }
1158 case T_OBJECT: {
1159 jobject obj = c->as_jobject();
1160 LIR_Opr tmp;
1161 if (obj == NULL) {
1162 tmp = FrameMap::G0_opr;
1163 } else {
1164 tmp = FrameMap::O7_opr;
1165 jobject2reg(c->as_jobject(), O7);
1166 }
1167 // handle either reg+reg or reg+disp address
1168 if (addr->index()->is_valid()) {
1169 assert(addr->disp() == 0, "must be zero");
1170 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1171 } else {
1172 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1173 offset = store(tmp, base, addr->disp(), type, wide, false);
1174 }
1176 break;
1177 }
1178 default:
1179 Unimplemented();
1180 }
1181 if (info != NULL) {
1182 assert(offset != -1, "offset should've been set");
1183 add_debug_info_for_null_check(offset, info);
1184 }
1185 }
1188 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1189 LIR_Const* c = src->as_constant_ptr();
1190 LIR_Opr to_reg = dest;
1192 switch (c->type()) {
1193 case T_INT:
1194 case T_ADDRESS:
1195 {
1196 jint con = c->as_jint();
1197 if (to_reg->is_single_cpu()) {
1198 assert(patch_code == lir_patch_none, "no patching handled here");
1199 __ set(con, to_reg->as_register());
1200 } else {
1201 ShouldNotReachHere();
1202 assert(to_reg->is_single_fpu(), "wrong register kind");
1204 __ set(con, O7);
1205 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
1206 __ st(O7, temp_slot);
1207 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
1208 }
1209 }
1210 break;
1212 case T_LONG:
1213 {
1214 jlong con = c->as_jlong();
1216 if (to_reg->is_double_cpu()) {
1217 #ifdef _LP64
1218 __ set(con, to_reg->as_register_lo());
1219 #else
1220 __ set(low(con), to_reg->as_register_lo());
1221 __ set(high(con), to_reg->as_register_hi());
1222 #endif
1223 #ifdef _LP64
1224 } else if (to_reg->is_single_cpu()) {
1225 __ set(con, to_reg->as_register());
1226 #endif
1227 } else {
1228 ShouldNotReachHere();
1229 assert(to_reg->is_double_fpu(), "wrong register kind");
1230 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
1231 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
1232 __ set(low(con), O7);
1233 __ st(O7, temp_slot_lo);
1234 __ set(high(con), O7);
1235 __ st(O7, temp_slot_hi);
1236 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
1237 }
1238 }
1239 break;
1241 case T_OBJECT:
1242 {
1243 if (patch_code == lir_patch_none) {
1244 jobject2reg(c->as_jobject(), to_reg->as_register());
1245 } else {
1246 jobject2reg_with_patching(to_reg->as_register(), info);
1247 }
1248 }
1249 break;
1251 case T_METADATA:
1252 {
1253 if (patch_code == lir_patch_none) {
1254 metadata2reg(c->as_metadata(), to_reg->as_register());
1255 } else {
1256 klass2reg_with_patching(to_reg->as_register(), info);
1257 }
1258 }
1259 break;
1261 case T_FLOAT:
1262 {
1263 address const_addr = __ float_constant(c->as_jfloat());
1264 if (const_addr == NULL) {
1265 bailout("const section overflow");
1266 break;
1267 }
1268 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1269 AddressLiteral const_addrlit(const_addr, rspec);
1270 if (to_reg->is_single_fpu()) {
1271 __ patchable_sethi(const_addrlit, O7);
1272 __ relocate(rspec);
1273 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
1275 } else {
1276 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1278 __ set(const_addrlit, O7);
1279 __ ld(O7, 0, to_reg->as_register());
1280 }
1281 }
1282 break;
1284 case T_DOUBLE:
1285 {
1286 address const_addr = __ double_constant(c->as_jdouble());
1287 if (const_addr == NULL) {
1288 bailout("const section overflow");
1289 break;
1290 }
1291 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1293 if (to_reg->is_double_fpu()) {
1294 AddressLiteral const_addrlit(const_addr, rspec);
1295 __ patchable_sethi(const_addrlit, O7);
1296 __ relocate(rspec);
1297 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
1298 } else {
1299 assert(to_reg->is_double_cpu(), "Must be a long register.");
1300 #ifdef _LP64
1301 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
1302 #else
1303 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
1304 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
1305 #endif
1306 }
1308 }
1309 break;
1311 default:
1312 ShouldNotReachHere();
1313 }
1314 }
1316 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1317 Register reg = addr->base()->as_register();
1318 LIR_Opr index = addr->index();
1319 if (index->is_illegal()) {
1320 return Address(reg, addr->disp());
1321 } else {
1322 assert (addr->disp() == 0, "unsupported address mode");
1323 return Address(reg, index->as_pointer_register());
1324 }
1325 }
1328 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1329 switch (type) {
1330 case T_INT:
1331 case T_FLOAT: {
1332 Register tmp = O7;
1333 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1334 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1335 __ lduw(from.base(), from.disp(), tmp);
1336 __ stw(tmp, to.base(), to.disp());
1337 break;
1338 }
1339 case T_OBJECT: {
1340 Register tmp = O7;
1341 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1342 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1343 __ ld_ptr(from.base(), from.disp(), tmp);
1344 __ st_ptr(tmp, to.base(), to.disp());
1345 break;
1346 }
1347 case T_LONG:
1348 case T_DOUBLE: {
1349 Register tmp = O7;
1350 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1351 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
1352 __ lduw(from.base(), from.disp(), tmp);
1353 __ stw(tmp, to.base(), to.disp());
1354 __ lduw(from.base(), from.disp() + 4, tmp);
1355 __ stw(tmp, to.base(), to.disp() + 4);
1356 break;
1357 }
1359 default:
1360 ShouldNotReachHere();
1361 }
1362 }
1365 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1366 Address base = as_Address(addr);
1367 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
1368 }
1371 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1372 Address base = as_Address(addr);
1373 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
1374 }
1377 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1378 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1380 assert(type != T_METADATA, "load of metadata ptr not supported");
1381 LIR_Address* addr = src_opr->as_address_ptr();
1382 LIR_Opr to_reg = dest;
1384 Register src = addr->base()->as_pointer_register();
1385 Register disp_reg = noreg;
1386 int disp_value = addr->disp();
1387 bool needs_patching = (patch_code != lir_patch_none);
1389 if (addr->base()->type() == T_OBJECT) {
1390 __ verify_oop(src);
1391 }
1393 PatchingStub* patch = NULL;
1394 if (needs_patching) {
1395 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1396 assert(!to_reg->is_double_cpu() ||
1397 patch_code == lir_patch_none ||
1398 patch_code == lir_patch_normal, "patching doesn't match register");
1399 }
1401 if (addr->index()->is_illegal()) {
1402 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1403 if (needs_patching) {
1404 __ patchable_set(0, O7);
1405 } else {
1406 __ set(disp_value, O7);
1407 }
1408 disp_reg = O7;
1409 }
1410 } else if (unaligned || PatchALot) {
1411 __ add(src, addr->index()->as_register(), O7);
1412 src = O7;
1413 } else {
1414 disp_reg = addr->index()->as_pointer_register();
1415 assert(disp_value == 0, "can't handle 3 operand addresses");
1416 }
1418 // remember the offset of the load. The patching_epilog must be done
1419 // before the call to add_debug_info, otherwise the PcDescs don't get
1420 // entered in increasing order.
1421 int offset = code_offset();
1423 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1424 if (disp_reg == noreg) {
1425 offset = load(src, disp_value, to_reg, type, wide, unaligned);
1426 } else {
1427 assert(!unaligned, "can't handle this");
1428 offset = load(src, disp_reg, to_reg, type, wide);
1429 }
1431 if (patch != NULL) {
1432 patching_epilog(patch, patch_code, src, info);
1433 }
1434 if (info != NULL) add_debug_info_for_null_check(offset, info);
1435 }
1438 void LIR_Assembler::prefetchr(LIR_Opr src) {
1439 LIR_Address* addr = src->as_address_ptr();
1440 Address from_addr = as_Address(addr);
1442 if (VM_Version::has_v9()) {
1443 __ prefetch(from_addr, Assembler::severalReads);
1444 }
1445 }
1448 void LIR_Assembler::prefetchw(LIR_Opr src) {
1449 LIR_Address* addr = src->as_address_ptr();
1450 Address from_addr = as_Address(addr);
1452 if (VM_Version::has_v9()) {
1453 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
1454 }
1455 }
1458 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1459 Address addr;
1460 if (src->is_single_word()) {
1461 addr = frame_map()->address_for_slot(src->single_stack_ix());
1462 } else if (src->is_double_word()) {
1463 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1464 }
1466 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1467 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
1468 }
1471 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1472 Address addr;
1473 if (dest->is_single_word()) {
1474 addr = frame_map()->address_for_slot(dest->single_stack_ix());
1475 } else if (dest->is_double_word()) {
1476 addr = frame_map()->address_for_slot(dest->double_stack_ix());
1477 }
1478 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1479 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
1480 }
1483 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1484 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1485 if (from_reg->is_double_fpu()) {
1486 // double to double moves
1487 assert(to_reg->is_double_fpu(), "should match");
1488 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1489 } else {
1490 // float to float moves
1491 assert(to_reg->is_single_fpu(), "should match");
1492 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1493 }
1494 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1495 if (from_reg->is_double_cpu()) {
1496 #ifdef _LP64
1497 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1498 #else
1499 assert(to_reg->is_double_cpu() &&
1500 from_reg->as_register_hi() != to_reg->as_register_lo() &&
1501 from_reg->as_register_lo() != to_reg->as_register_hi(),
1502 "should both be long and not overlap");
1503 // long to long moves
1504 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
1505 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
1506 #endif
1507 #ifdef _LP64
1508 } else if (to_reg->is_double_cpu()) {
1509 // int to int moves
1510 __ mov(from_reg->as_register(), to_reg->as_register_lo());
1511 #endif
1512 } else {
1513 // int to int moves
1514 __ mov(from_reg->as_register(), to_reg->as_register());
1515 }
1516 } else {
1517 ShouldNotReachHere();
1518 }
1519 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1520 __ verify_oop(to_reg->as_register());
1521 }
1522 }
1525 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1526 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1527 bool wide, bool unaligned) {
1528 assert(type != T_METADATA, "store of metadata ptr not supported");
1529 LIR_Address* addr = dest->as_address_ptr();
1531 Register src = addr->base()->as_pointer_register();
1532 Register disp_reg = noreg;
1533 int disp_value = addr->disp();
1534 bool needs_patching = (patch_code != lir_patch_none);
1536 if (addr->base()->is_oop_register()) {
1537 __ verify_oop(src);
1538 }
1540 PatchingStub* patch = NULL;
1541 if (needs_patching) {
1542 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1543 assert(!from_reg->is_double_cpu() ||
1544 patch_code == lir_patch_none ||
1545 patch_code == lir_patch_normal, "patching doesn't match register");
1546 }
1548 if (addr->index()->is_illegal()) {
1549 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1550 if (needs_patching) {
1551 __ patchable_set(0, O7);
1552 } else {
1553 __ set(disp_value, O7);
1554 }
1555 disp_reg = O7;
1556 }
1557 } else if (unaligned || PatchALot) {
1558 __ add(src, addr->index()->as_register(), O7);
1559 src = O7;
1560 } else {
1561 disp_reg = addr->index()->as_pointer_register();
1562 assert(disp_value == 0, "can't handle 3 operand addresses");
1563 }
1565 // remember the offset of the store. The patching_epilog must be done
1566 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1567 // entered in increasing order.
1568 int offset;
1570 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1571 if (disp_reg == noreg) {
1572 offset = store(from_reg, src, disp_value, type, wide, unaligned);
1573 } else {
1574 assert(!unaligned, "can't handle this");
1575 offset = store(from_reg, src, disp_reg, type, wide);
1576 }
1578 if (patch != NULL) {
1579 patching_epilog(patch, patch_code, src, info);
1580 }
1582 if (info != NULL) add_debug_info_for_null_check(offset, info);
1583 }
1586 void LIR_Assembler::return_op(LIR_Opr result) {
1587 // the poll may need a register so just pick one that isn't the return register
1588 #if defined(TIERED) && !defined(_LP64)
1589 if (result->type_field() == LIR_OprDesc::long_type) {
1590 // Must move the result to G1
1591 // Must leave proper result in O0,O1 and G1 (TIERED only)
1592 __ sllx(I0, 32, G1); // Shift bits into high G1
1593 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
1594 __ or3 (I1, G1, G1); // OR 64 bits into G1
1595 #ifdef ASSERT
1596 // mangle it so any problems will show up
1597 __ set(0xdeadbeef, I0);
1598 __ set(0xdeadbeef, I1);
1599 #endif
1600 }
1601 #endif // TIERED
1602 __ set((intptr_t)os::get_polling_page(), L0);
1603 __ relocate(relocInfo::poll_return_type);
1604 __ ld_ptr(L0, 0, G0);
1605 __ ret();
1606 __ delayed()->restore();
1607 }
1610 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1611 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
1612 if (info != NULL) {
1613 add_debug_info_for_branch(info);
1614 } else {
1615 __ relocate(relocInfo::poll_type);
1616 }
1618 int offset = __ offset();
1619 __ ld_ptr(tmp->as_register(), 0, G0);
1621 return offset;
1622 }
1625 void LIR_Assembler::emit_static_call_stub() {
1626 address call_pc = __ pc();
1627 address stub = __ start_a_stub(call_stub_size);
1628 if (stub == NULL) {
1629 bailout("static call stub overflow");
1630 return;
1631 }
1633 int start = __ offset();
1634 __ relocate(static_stub_Relocation::spec(call_pc));
1636 __ set_metadata(NULL, G5);
1637 // must be set to -1 at code generation time
1638 AddressLiteral addrlit(-1);
1639 __ jump_to(addrlit, G3);
1640 __ delayed()->nop();
1642 assert(__ offset() - start <= call_stub_size, "stub too big");
1643 __ end_a_stub();
1644 }
1647 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1648 if (opr1->is_single_fpu()) {
1649 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
1650 } else if (opr1->is_double_fpu()) {
1651 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
1652 } else if (opr1->is_single_cpu()) {
1653 if (opr2->is_constant()) {
1654 switch (opr2->as_constant_ptr()->type()) {
1655 case T_INT:
1656 { jint con = opr2->as_constant_ptr()->as_jint();
1657 if (Assembler::is_simm13(con)) {
1658 __ cmp(opr1->as_register(), con);
1659 } else {
1660 __ set(con, O7);
1661 __ cmp(opr1->as_register(), O7);
1662 }
1663 }
1664 break;
1666 case T_OBJECT:
1667 // there are only equal/notequal comparisions on objects
1668 { jobject con = opr2->as_constant_ptr()->as_jobject();
1669 if (con == NULL) {
1670 __ cmp(opr1->as_register(), 0);
1671 } else {
1672 jobject2reg(con, O7);
1673 __ cmp(opr1->as_register(), O7);
1674 }
1675 }
1676 break;
1678 default:
1679 ShouldNotReachHere();
1680 break;
1681 }
1682 } else {
1683 if (opr2->is_address()) {
1684 LIR_Address * addr = opr2->as_address_ptr();
1685 BasicType type = addr->type();
1686 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1687 else __ ld(as_Address(addr), O7);
1688 __ cmp(opr1->as_register(), O7);
1689 } else {
1690 __ cmp(opr1->as_register(), opr2->as_register());
1691 }
1692 }
1693 } else if (opr1->is_double_cpu()) {
1694 Register xlo = opr1->as_register_lo();
1695 Register xhi = opr1->as_register_hi();
1696 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1697 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
1698 #ifdef _LP64
1699 __ orcc(xhi, G0, G0);
1700 #else
1701 __ orcc(xhi, xlo, G0);
1702 #endif
1703 } else if (opr2->is_register()) {
1704 Register ylo = opr2->as_register_lo();
1705 Register yhi = opr2->as_register_hi();
1706 #ifdef _LP64
1707 __ cmp(xlo, ylo);
1708 #else
1709 __ subcc(xlo, ylo, xlo);
1710 __ subccc(xhi, yhi, xhi);
1711 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1712 __ orcc(xhi, xlo, G0);
1713 }
1714 #endif
1715 } else {
1716 ShouldNotReachHere();
1717 }
1718 } else if (opr1->is_address()) {
1719 LIR_Address * addr = opr1->as_address_ptr();
1720 BasicType type = addr->type();
1721 assert (opr2->is_constant(), "Checking");
1722 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1723 else __ ld(as_Address(addr), O7);
1724 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
1725 } else {
1726 ShouldNotReachHere();
1727 }
1728 }
1731 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1732 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1733 bool is_unordered_less = (code == lir_ucmp_fd2i);
1734 if (left->is_single_fpu()) {
1735 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1736 } else if (left->is_double_fpu()) {
1737 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1738 } else {
1739 ShouldNotReachHere();
1740 }
1741 } else if (code == lir_cmp_l2i) {
1742 #ifdef _LP64
1743 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
1744 #else
1745 __ lcmp(left->as_register_hi(), left->as_register_lo(),
1746 right->as_register_hi(), right->as_register_lo(),
1747 dst->as_register());
1748 #endif
1749 } else {
1750 ShouldNotReachHere();
1751 }
1752 }
1755 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1756 Assembler::Condition acond;
1757 switch (condition) {
1758 case lir_cond_equal: acond = Assembler::equal; break;
1759 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1760 case lir_cond_less: acond = Assembler::less; break;
1761 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1762 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
1763 case lir_cond_greater: acond = Assembler::greater; break;
1764 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
1765 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
1766 default: ShouldNotReachHere();
1767 };
1769 if (opr1->is_constant() && opr1->type() == T_INT) {
1770 Register dest = result->as_register();
1771 // load up first part of constant before branch
1772 // and do the rest in the delay slot.
1773 if (!Assembler::is_simm13(opr1->as_jint())) {
1774 __ sethi(opr1->as_jint(), dest);
1775 }
1776 } else if (opr1->is_constant()) {
1777 const2reg(opr1, result, lir_patch_none, NULL);
1778 } else if (opr1->is_register()) {
1779 reg2reg(opr1, result);
1780 } else if (opr1->is_stack()) {
1781 stack2reg(opr1, result, result->type());
1782 } else {
1783 ShouldNotReachHere();
1784 }
1785 Label skip;
1786 #ifdef _LP64
1787 if (type == T_INT) {
1788 __ br(acond, false, Assembler::pt, skip);
1789 } else
1790 #endif
1791 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
1792 if (opr1->is_constant() && opr1->type() == T_INT) {
1793 Register dest = result->as_register();
1794 if (Assembler::is_simm13(opr1->as_jint())) {
1795 __ delayed()->or3(G0, opr1->as_jint(), dest);
1796 } else {
1797 // the sethi has been done above, so just put in the low 10 bits
1798 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
1799 }
1800 } else {
1801 // can't do anything useful in the delay slot
1802 __ delayed()->nop();
1803 }
1804 if (opr2->is_constant()) {
1805 const2reg(opr2, result, lir_patch_none, NULL);
1806 } else if (opr2->is_register()) {
1807 reg2reg(opr2, result);
1808 } else if (opr2->is_stack()) {
1809 stack2reg(opr2, result, result->type());
1810 } else {
1811 ShouldNotReachHere();
1812 }
1813 __ bind(skip);
1814 }
1817 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1818 assert(info == NULL, "unused on this code path");
1819 assert(left->is_register(), "wrong items state");
1820 assert(dest->is_register(), "wrong items state");
1822 if (right->is_register()) {
1823 if (dest->is_float_kind()) {
1825 FloatRegister lreg, rreg, res;
1826 FloatRegisterImpl::Width w;
1827 if (right->is_single_fpu()) {
1828 w = FloatRegisterImpl::S;
1829 lreg = left->as_float_reg();
1830 rreg = right->as_float_reg();
1831 res = dest->as_float_reg();
1832 } else {
1833 w = FloatRegisterImpl::D;
1834 lreg = left->as_double_reg();
1835 rreg = right->as_double_reg();
1836 res = dest->as_double_reg();
1837 }
1839 switch (code) {
1840 case lir_add: __ fadd(w, lreg, rreg, res); break;
1841 case lir_sub: __ fsub(w, lreg, rreg, res); break;
1842 case lir_mul: // fall through
1843 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
1844 case lir_div: // fall through
1845 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
1846 default: ShouldNotReachHere();
1847 }
1849 } else if (dest->is_double_cpu()) {
1850 #ifdef _LP64
1851 Register dst_lo = dest->as_register_lo();
1852 Register op1_lo = left->as_pointer_register();
1853 Register op2_lo = right->as_pointer_register();
1855 switch (code) {
1856 case lir_add:
1857 __ add(op1_lo, op2_lo, dst_lo);
1858 break;
1860 case lir_sub:
1861 __ sub(op1_lo, op2_lo, dst_lo);
1862 break;
1864 default: ShouldNotReachHere();
1865 }
1866 #else
1867 Register op1_lo = left->as_register_lo();
1868 Register op1_hi = left->as_register_hi();
1869 Register op2_lo = right->as_register_lo();
1870 Register op2_hi = right->as_register_hi();
1871 Register dst_lo = dest->as_register_lo();
1872 Register dst_hi = dest->as_register_hi();
1874 switch (code) {
1875 case lir_add:
1876 __ addcc(op1_lo, op2_lo, dst_lo);
1877 __ addc (op1_hi, op2_hi, dst_hi);
1878 break;
1880 case lir_sub:
1881 __ subcc(op1_lo, op2_lo, dst_lo);
1882 __ subc (op1_hi, op2_hi, dst_hi);
1883 break;
1885 default: ShouldNotReachHere();
1886 }
1887 #endif
1888 } else {
1889 assert (right->is_single_cpu(), "Just Checking");
1891 Register lreg = left->as_register();
1892 Register res = dest->as_register();
1893 Register rreg = right->as_register();
1894 switch (code) {
1895 case lir_add: __ add (lreg, rreg, res); break;
1896 case lir_sub: __ sub (lreg, rreg, res); break;
1897 case lir_mul: __ mult (lreg, rreg, res); break;
1898 default: ShouldNotReachHere();
1899 }
1900 }
1901 } else {
1902 assert (right->is_constant(), "must be constant");
1904 if (dest->is_single_cpu()) {
1905 Register lreg = left->as_register();
1906 Register res = dest->as_register();
1907 int simm13 = right->as_constant_ptr()->as_jint();
1909 switch (code) {
1910 case lir_add: __ add (lreg, simm13, res); break;
1911 case lir_sub: __ sub (lreg, simm13, res); break;
1912 case lir_mul: __ mult (lreg, simm13, res); break;
1913 default: ShouldNotReachHere();
1914 }
1915 } else {
1916 Register lreg = left->as_pointer_register();
1917 Register res = dest->as_register_lo();
1918 long con = right->as_constant_ptr()->as_jlong();
1919 assert(Assembler::is_simm13(con), "must be simm13");
1921 switch (code) {
1922 case lir_add: __ add (lreg, (int)con, res); break;
1923 case lir_sub: __ sub (lreg, (int)con, res); break;
1924 case lir_mul: __ mult (lreg, (int)con, res); break;
1925 default: ShouldNotReachHere();
1926 }
1927 }
1928 }
1929 }
1932 void LIR_Assembler::fpop() {
1933 // do nothing
1934 }
1937 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1938 switch (code) {
1939 case lir_sin:
1940 case lir_tan:
1941 case lir_cos: {
1942 assert(thread->is_valid(), "preserve the thread object for performance reasons");
1943 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
1944 break;
1945 }
1946 case lir_sqrt: {
1947 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1948 FloatRegister src_reg = value->as_double_reg();
1949 FloatRegister dst_reg = dest->as_double_reg();
1950 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
1951 break;
1952 }
1953 case lir_abs: {
1954 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1955 FloatRegister src_reg = value->as_double_reg();
1956 FloatRegister dst_reg = dest->as_double_reg();
1957 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
1958 break;
1959 }
1960 default: {
1961 ShouldNotReachHere();
1962 break;
1963 }
1964 }
1965 }
1968 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1969 if (right->is_constant()) {
1970 if (dest->is_single_cpu()) {
1971 int simm13 = right->as_constant_ptr()->as_jint();
1972 switch (code) {
1973 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
1974 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
1975 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
1976 default: ShouldNotReachHere();
1977 }
1978 } else {
1979 long c = right->as_constant_ptr()->as_jlong();
1980 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
1981 int simm13 = (int)c;
1982 switch (code) {
1983 case lir_logic_and:
1984 #ifndef _LP64
1985 __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
1986 #endif
1987 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
1988 break;
1990 case lir_logic_or:
1991 #ifndef _LP64
1992 __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
1993 #endif
1994 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
1995 break;
1997 case lir_logic_xor:
1998 #ifndef _LP64
1999 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
2000 #endif
2001 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
2002 break;
2004 default: ShouldNotReachHere();
2005 }
2006 }
2007 } else {
2008 assert(right->is_register(), "right should be in register");
2010 if (dest->is_single_cpu()) {
2011 switch (code) {
2012 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
2013 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
2014 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
2015 default: ShouldNotReachHere();
2016 }
2017 } else {
2018 #ifdef _LP64
2019 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
2020 left->as_register_lo();
2021 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
2022 right->as_register_lo();
2024 switch (code) {
2025 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
2026 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
2027 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
2028 default: ShouldNotReachHere();
2029 }
2030 #else
2031 switch (code) {
2032 case lir_logic_and:
2033 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2034 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2035 break;
2037 case lir_logic_or:
2038 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2039 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2040 break;
2042 case lir_logic_xor:
2043 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2044 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2045 break;
2047 default: ShouldNotReachHere();
2048 }
2049 #endif
2050 }
2051 }
2052 }
2055 int LIR_Assembler::shift_amount(BasicType t) {
2056 int elem_size = type2aelembytes(t);
2057 switch (elem_size) {
2058 case 1 : return 0;
2059 case 2 : return 1;
2060 case 4 : return 2;
2061 case 8 : return 3;
2062 }
2063 ShouldNotReachHere();
2064 return -1;
2065 }
2068 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2069 assert(exceptionOop->as_register() == Oexception, "should match");
2070 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
2072 info->add_register_oop(exceptionOop);
2074 // reuse the debug info from the safepoint poll for the throw op itself
2075 address pc_for_athrow = __ pc();
2076 int pc_for_athrow_offset = __ offset();
2077 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
2078 __ set(pc_for_athrow, Oissuing_pc, rspec);
2079 add_call_info(pc_for_athrow_offset, info); // for exception handler
2081 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
2082 __ delayed()->nop();
2083 }
2086 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2087 assert(exceptionOop->as_register() == Oexception, "should match");
2089 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
2090 __ delayed()->nop();
2091 }
2093 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2094 Register src = op->src()->as_register();
2095 Register dst = op->dst()->as_register();
2096 Register src_pos = op->src_pos()->as_register();
2097 Register dst_pos = op->dst_pos()->as_register();
2098 Register length = op->length()->as_register();
2099 Register tmp = op->tmp()->as_register();
2100 Register tmp2 = O7;
2102 int flags = op->flags();
2103 ciArrayKlass* default_type = op->expected_type();
2104 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2105 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2107 #ifdef _LP64
2108 // higher 32bits must be null
2109 __ sra(dst_pos, 0, dst_pos);
2110 __ sra(src_pos, 0, src_pos);
2111 __ sra(length, 0, length);
2112 #endif
2114 // set up the arraycopy stub information
2115 ArrayCopyStub* stub = op->stub();
2117 // always do stub if no type information is available. it's ok if
2118 // the known type isn't loaded since the code sanity checks
2119 // in debug mode and the type isn't required when we know the exact type
2120 // also check that the type is an array type.
2121 if (op->expected_type() == NULL) {
2122 __ mov(src, O0);
2123 __ mov(src_pos, O1);
2124 __ mov(dst, O2);
2125 __ mov(dst_pos, O3);
2126 __ mov(length, O4);
2127 address copyfunc_addr = StubRoutines::generic_arraycopy();
2129 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
2130 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
2131 } else {
2132 #ifndef PRODUCT
2133 if (PrintC1Statistics) {
2134 address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
2135 __ inc_counter(counter, G1, G3);
2136 }
2137 #endif
2138 __ call_VM_leaf(tmp, copyfunc_addr);
2139 }
2141 if (copyfunc_addr != NULL) {
2142 __ xor3(O0, -1, tmp);
2143 __ sub(length, tmp, length);
2144 __ add(src_pos, tmp, src_pos);
2145 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
2146 __ delayed()->add(dst_pos, tmp, dst_pos);
2147 } else {
2148 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
2149 __ delayed()->nop();
2150 }
2151 __ bind(*stub->continuation());
2152 return;
2153 }
2155 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
2157 // make sure src and dst are non-null and load array length
2158 if (flags & LIR_OpArrayCopy::src_null_check) {
2159 __ tst(src);
2160 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2161 __ delayed()->nop();
2162 }
2164 if (flags & LIR_OpArrayCopy::dst_null_check) {
2165 __ tst(dst);
2166 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2167 __ delayed()->nop();
2168 }
2170 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2171 // test src_pos register
2172 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());
2173 __ delayed()->nop();
2174 }
2176 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2177 // test dst_pos register
2178 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry());
2179 __ delayed()->nop();
2180 }
2182 if (flags & LIR_OpArrayCopy::length_positive_check) {
2183 // make sure length isn't negative
2184 __ cmp_zero_and_br(Assembler::less, length, *stub->entry());
2185 __ delayed()->nop();
2186 }
2188 if (flags & LIR_OpArrayCopy::src_range_check) {
2189 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
2190 __ add(length, src_pos, tmp);
2191 __ cmp(tmp2, tmp);
2192 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2193 __ delayed()->nop();
2194 }
2196 if (flags & LIR_OpArrayCopy::dst_range_check) {
2197 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
2198 __ add(length, dst_pos, tmp);
2199 __ cmp(tmp2, tmp);
2200 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2201 __ delayed()->nop();
2202 }
2204 int shift = shift_amount(basic_type);
2206 if (flags & LIR_OpArrayCopy::type_check) {
2207 // We don't know the array types are compatible
2208 if (basic_type != T_OBJECT) {
2209 // Simple test for basic type arrays
2210 if (UseCompressedKlassPointers) {
2211 // We don't need decode because we just need to compare
2212 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
2213 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2214 __ cmp(tmp, tmp2);
2215 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2216 } else {
2217 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
2218 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2219 __ cmp(tmp, tmp2);
2220 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2221 }
2222 __ delayed()->nop();
2223 } else {
2224 // For object arrays, if src is a sub class of dst then we can
2225 // safely do the copy.
2226 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2228 Label cont, slow;
2229 assert_different_registers(tmp, tmp2, G3, G1);
2231 __ load_klass(src, G3);
2232 __ load_klass(dst, G1);
2234 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
2236 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2237 __ delayed()->nop();
2239 __ cmp(G3, 0);
2240 if (copyfunc_addr != NULL) { // use stub if available
2241 // src is not a sub class of dst so we have to do a
2242 // per-element check.
2243 __ br(Assembler::notEqual, false, Assembler::pt, cont);
2244 __ delayed()->nop();
2246 __ bind(slow);
2248 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2249 if ((flags & mask) != mask) {
2250 // Check that at least both of them object arrays.
2251 assert(flags & mask, "one of the two should be known to be an object array");
2253 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2254 __ load_klass(src, tmp);
2255 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2256 __ load_klass(dst, tmp);
2257 }
2258 int lh_offset = in_bytes(Klass::layout_helper_offset());
2260 __ lduw(tmp, lh_offset, tmp2);
2262 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2263 __ set(objArray_lh, tmp);
2264 __ cmp(tmp, tmp2);
2265 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2266 __ delayed()->nop();
2267 }
2269 Register src_ptr = O0;
2270 Register dst_ptr = O1;
2271 Register len = O2;
2272 Register chk_off = O3;
2273 Register super_k = O4;
2275 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2276 if (shift == 0) {
2277 __ add(src_ptr, src_pos, src_ptr);
2278 } else {
2279 __ sll(src_pos, shift, tmp);
2280 __ add(src_ptr, tmp, src_ptr);
2281 }
2283 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2284 if (shift == 0) {
2285 __ add(dst_ptr, dst_pos, dst_ptr);
2286 } else {
2287 __ sll(dst_pos, shift, tmp);
2288 __ add(dst_ptr, tmp, dst_ptr);
2289 }
2290 __ mov(length, len);
2291 __ load_klass(dst, tmp);
2293 int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
2294 __ ld_ptr(tmp, ek_offset, super_k);
2296 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2297 __ lduw(super_k, sco_offset, chk_off);
2299 __ call_VM_leaf(tmp, copyfunc_addr);
2301 #ifndef PRODUCT
2302 if (PrintC1Statistics) {
2303 Label failed;
2304 __ br_notnull_short(O0, Assembler::pn, failed);
2305 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
2306 __ bind(failed);
2307 }
2308 #endif
2310 __ br_null(O0, false, Assembler::pt, *stub->continuation());
2311 __ delayed()->xor3(O0, -1, tmp);
2313 #ifndef PRODUCT
2314 if (PrintC1Statistics) {
2315 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3);
2316 }
2317 #endif
2319 __ sub(length, tmp, length);
2320 __ add(src_pos, tmp, src_pos);
2321 __ br(Assembler::always, false, Assembler::pt, *stub->entry());
2322 __ delayed()->add(dst_pos, tmp, dst_pos);
2324 __ bind(cont);
2325 } else {
2326 __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2327 __ delayed()->nop();
2328 __ bind(cont);
2329 }
2330 }
2331 }
2333 #ifdef ASSERT
2334 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2335 // Sanity check the known type with the incoming class. For the
2336 // primitive case the types must match exactly with src.klass and
2337 // dst.klass each exactly matching the default type. For the
2338 // object array case, if no type check is needed then either the
2339 // dst type is exactly the expected type and the src type is a
2340 // subtype which we can't check or src is the same array as dst
2341 // but not necessarily exactly of type default_type.
2342 Label known_ok, halt;
2343 metadata2reg(op->expected_type()->constant_encoding(), tmp);
2344 if (UseCompressedKlassPointers) {
2345 // tmp holds the default type. It currently comes uncompressed after the
2346 // load of a constant, so encode it.
2347 __ encode_heap_oop(tmp);
2348 // load the raw value of the dst klass, since we will be comparing
2349 // uncompressed values directly.
2350 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2351 if (basic_type != T_OBJECT) {
2352 __ cmp(tmp, tmp2);
2353 __ br(Assembler::notEqual, false, Assembler::pn, halt);
2354 // load the raw value of the src klass.
2355 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
2356 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2357 } else {
2358 __ cmp(tmp, tmp2);
2359 __ br(Assembler::equal, false, Assembler::pn, known_ok);
2360 __ delayed()->cmp(src, dst);
2361 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2362 __ delayed()->nop();
2363 }
2364 } else {
2365 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2366 if (basic_type != T_OBJECT) {
2367 __ cmp(tmp, tmp2);
2368 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
2369 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
2370 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2371 } else {
2372 __ cmp(tmp, tmp2);
2373 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2374 __ delayed()->cmp(src, dst);
2375 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2376 __ delayed()->nop();
2377 }
2378 }
2379 __ bind(halt);
2380 __ stop("incorrect type information in arraycopy");
2381 __ bind(known_ok);
2382 }
2383 #endif
2385 #ifndef PRODUCT
2386 if (PrintC1Statistics) {
2387 address counter = Runtime1::arraycopy_count_address(basic_type);
2388 __ inc_counter(counter, G1, G3);
2389 }
2390 #endif
2392 Register src_ptr = O0;
2393 Register dst_ptr = O1;
2394 Register len = O2;
2396 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2397 if (shift == 0) {
2398 __ add(src_ptr, src_pos, src_ptr);
2399 } else {
2400 __ sll(src_pos, shift, tmp);
2401 __ add(src_ptr, tmp, src_ptr);
2402 }
2404 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2405 if (shift == 0) {
2406 __ add(dst_ptr, dst_pos, dst_ptr);
2407 } else {
2408 __ sll(dst_pos, shift, tmp);
2409 __ add(dst_ptr, tmp, dst_ptr);
2410 }
2412 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2413 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2414 const char *name;
2415 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2417 // arraycopy stubs takes a length in number of elements, so don't scale it.
2418 __ mov(length, len);
2419 __ call_VM_leaf(tmp, entry);
2421 __ bind(*stub->continuation());
2422 }
2425 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2426 if (dest->is_single_cpu()) {
2427 #ifdef _LP64
2428 if (left->type() == T_OBJECT) {
2429 switch (code) {
2430 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
2431 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
2432 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2433 default: ShouldNotReachHere();
2434 }
2435 } else
2436 #endif
2437 switch (code) {
2438 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
2439 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
2440 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2441 default: ShouldNotReachHere();
2442 }
2443 } else {
2444 #ifdef _LP64
2445 switch (code) {
2446 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2447 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2448 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2449 default: ShouldNotReachHere();
2450 }
2451 #else
2452 switch (code) {
2453 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2454 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2455 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2456 default: ShouldNotReachHere();
2457 }
2458 #endif
2459 }
2460 }
2463 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2464 #ifdef _LP64
2465 if (left->type() == T_OBJECT) {
2466 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
2467 Register l = left->as_register();
2468 Register d = dest->as_register_lo();
2469 switch (code) {
2470 case lir_shl: __ sllx (l, count, d); break;
2471 case lir_shr: __ srax (l, count, d); break;
2472 case lir_ushr: __ srlx (l, count, d); break;
2473 default: ShouldNotReachHere();
2474 }
2475 return;
2476 }
2477 #endif
2479 if (dest->is_single_cpu()) {
2480 count = count & 0x1F; // Java spec
2481 switch (code) {
2482 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
2483 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
2484 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
2485 default: ShouldNotReachHere();
2486 }
2487 } else if (dest->is_double_cpu()) {
2488 count = count & 63; // Java spec
2489 switch (code) {
2490 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2491 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2492 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2493 default: ShouldNotReachHere();
2494 }
2495 } else {
2496 ShouldNotReachHere();
2497 }
2498 }
2501 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2502 assert(op->tmp1()->as_register() == G1 &&
2503 op->tmp2()->as_register() == G3 &&
2504 op->tmp3()->as_register() == G4 &&
2505 op->obj()->as_register() == O0 &&
2506 op->klass()->as_register() == G5, "must be");
2507 if (op->init_check()) {
2508 __ ldub(op->klass()->as_register(),
2509 in_bytes(InstanceKlass::init_state_offset()),
2510 op->tmp1()->as_register());
2511 add_debug_info_for_null_check_here(op->stub()->info());
2512 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2513 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
2514 __ delayed()->nop();
2515 }
2516 __ allocate_object(op->obj()->as_register(),
2517 op->tmp1()->as_register(),
2518 op->tmp2()->as_register(),
2519 op->tmp3()->as_register(),
2520 op->header_size(),
2521 op->object_size(),
2522 op->klass()->as_register(),
2523 *op->stub()->entry());
2524 __ bind(*op->stub()->continuation());
2525 __ verify_oop(op->obj()->as_register());
2526 }
2529 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2530 assert(op->tmp1()->as_register() == G1 &&
2531 op->tmp2()->as_register() == G3 &&
2532 op->tmp3()->as_register() == G4 &&
2533 op->tmp4()->as_register() == O1 &&
2534 op->klass()->as_register() == G5, "must be");
2536 LP64_ONLY( __ signx(op->len()->as_register()); )
2537 if (UseSlowPath ||
2538 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2539 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2540 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2541 __ delayed()->nop();
2542 } else {
2543 __ allocate_array(op->obj()->as_register(),
2544 op->len()->as_register(),
2545 op->tmp1()->as_register(),
2546 op->tmp2()->as_register(),
2547 op->tmp3()->as_register(),
2548 arrayOopDesc::header_size(op->type()),
2549 type2aelembytes(op->type()),
2550 op->klass()->as_register(),
2551 *op->stub()->entry());
2552 }
2553 __ bind(*op->stub()->continuation());
2554 }
2557 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2558 ciMethodData *md, ciProfileData *data,
2559 Register recv, Register tmp1, Label* update_done) {
2560 uint i;
2561 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2562 Label next_test;
2563 // See if the receiver is receiver[n].
2564 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2565 mdo_offset_bias);
2566 __ ld_ptr(receiver_addr, tmp1);
2567 __ verify_oop(tmp1);
2568 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
2569 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2570 mdo_offset_bias);
2571 __ ld_ptr(data_addr, tmp1);
2572 __ add(tmp1, DataLayout::counter_increment, tmp1);
2573 __ st_ptr(tmp1, data_addr);
2574 __ ba(*update_done);
2575 __ delayed()->nop();
2576 __ bind(next_test);
2577 }
2579 // Didn't find receiver; find next empty slot and fill it in
2580 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2581 Label next_test;
2582 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2583 mdo_offset_bias);
2584 __ ld_ptr(recv_addr, tmp1);
2585 __ br_notnull_short(tmp1, Assembler::pt, next_test);
2586 __ st_ptr(recv, recv_addr);
2587 __ set(DataLayout::counter_increment, tmp1);
2588 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2589 mdo_offset_bias);
2590 __ ba(*update_done);
2591 __ delayed()->nop();
2592 __ bind(next_test);
2593 }
2594 }
2597 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2598 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2599 md = method->method_data_or_null();
2600 assert(md != NULL, "Sanity");
2601 data = md->bci_to_data(bci);
2602 assert(data != NULL, "need data for checkcast");
2603 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2604 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2605 // The offset is large so bias the mdo by the base of the slot so
2606 // that the ld can use simm13s to reference the slots of the data
2607 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2608 }
2609 }
2611 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2612 // we always need a stub for the failure case.
2613 CodeStub* stub = op->stub();
2614 Register obj = op->object()->as_register();
2615 Register k_RInfo = op->tmp1()->as_register();
2616 Register klass_RInfo = op->tmp2()->as_register();
2617 Register dst = op->result_opr()->as_register();
2618 Register Rtmp1 = op->tmp3()->as_register();
2619 ciKlass* k = op->klass();
2622 if (obj == k_RInfo) {
2623 k_RInfo = klass_RInfo;
2624 klass_RInfo = obj;
2625 }
2627 ciMethodData* md;
2628 ciProfileData* data;
2629 int mdo_offset_bias = 0;
2630 if (op->should_profile()) {
2631 ciMethod* method = op->profiled_method();
2632 assert(method != NULL, "Should have method");
2633 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2635 Label not_null;
2636 __ br_notnull_short(obj, Assembler::pn, not_null);
2637 Register mdo = k_RInfo;
2638 Register data_val = Rtmp1;
2639 metadata2reg(md->constant_encoding(), mdo);
2640 if (mdo_offset_bias > 0) {
2641 __ set(mdo_offset_bias, data_val);
2642 __ add(mdo, data_val, mdo);
2643 }
2644 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2645 __ ldub(flags_addr, data_val);
2646 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2647 __ stb(data_val, flags_addr);
2648 __ ba(*obj_is_null);
2649 __ delayed()->nop();
2650 __ bind(not_null);
2651 } else {
2652 __ br_null(obj, false, Assembler::pn, *obj_is_null);
2653 __ delayed()->nop();
2654 }
2656 Label profile_cast_failure, profile_cast_success;
2657 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
2658 Label *success_target = op->should_profile() ? &profile_cast_success : success;
2660 // patching may screw with our temporaries on sparc,
2661 // so let's do it before loading the class
2662 if (k->is_loaded()) {
2663 metadata2reg(k->constant_encoding(), k_RInfo);
2664 } else {
2665 klass2reg_with_patching(k_RInfo, op->info_for_patch());
2666 }
2667 assert(obj != k_RInfo, "must be different");
2669 // get object class
2670 // not a safepoint as obj null check happens earlier
2671 __ load_klass(obj, klass_RInfo);
2672 if (op->fast_check()) {
2673 assert_different_registers(klass_RInfo, k_RInfo);
2674 __ cmp(k_RInfo, klass_RInfo);
2675 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
2676 __ delayed()->nop();
2677 } else {
2678 bool need_slow_path = true;
2679 if (k->is_loaded()) {
2680 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
2681 need_slow_path = false;
2682 // perform the fast part of the checking logic
2683 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
2684 (need_slow_path ? success_target : NULL),
2685 failure_target, NULL,
2686 RegisterOrConstant(k->super_check_offset()));
2687 } else {
2688 // perform the fast part of the checking logic
2689 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
2690 failure_target, NULL);
2691 }
2692 if (need_slow_path) {
2693 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2694 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2695 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2696 __ delayed()->nop();
2697 __ cmp(G3, 0);
2698 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2699 __ delayed()->nop();
2700 // Fall through to success case
2701 }
2702 }
2704 if (op->should_profile()) {
2705 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2706 assert_different_registers(obj, mdo, recv, tmp1);
2707 __ bind(profile_cast_success);
2708 metadata2reg(md->constant_encoding(), mdo);
2709 if (mdo_offset_bias > 0) {
2710 __ set(mdo_offset_bias, tmp1);
2711 __ add(mdo, tmp1, mdo);
2712 }
2713 __ load_klass(obj, recv);
2714 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
2715 // Jump over the failure case
2716 __ ba(*success);
2717 __ delayed()->nop();
2718 // Cast failure case
2719 __ bind(profile_cast_failure);
2720 metadata2reg(md->constant_encoding(), mdo);
2721 if (mdo_offset_bias > 0) {
2722 __ set(mdo_offset_bias, tmp1);
2723 __ add(mdo, tmp1, mdo);
2724 }
2725 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2726 __ ld_ptr(data_addr, tmp1);
2727 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2728 __ st_ptr(tmp1, data_addr);
2729 __ ba(*failure);
2730 __ delayed()->nop();
2731 }
2732 __ ba(*success);
2733 __ delayed()->nop();
2734 }
2736 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2737 LIR_Code code = op->code();
2738 if (code == lir_store_check) {
2739 Register value = op->object()->as_register();
2740 Register array = op->array()->as_register();
2741 Register k_RInfo = op->tmp1()->as_register();
2742 Register klass_RInfo = op->tmp2()->as_register();
2743 Register Rtmp1 = op->tmp3()->as_register();
2745 __ verify_oop(value);
2746 CodeStub* stub = op->stub();
2747 // check if it needs to be profiled
2748 ciMethodData* md;
2749 ciProfileData* data;
2750 int mdo_offset_bias = 0;
2751 if (op->should_profile()) {
2752 ciMethod* method = op->profiled_method();
2753 assert(method != NULL, "Should have method");
2754 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2755 }
2756 Label profile_cast_success, profile_cast_failure, done;
2757 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
2758 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
2760 if (op->should_profile()) {
2761 Label not_null;
2762 __ br_notnull_short(value, Assembler::pn, not_null);
2763 Register mdo = k_RInfo;
2764 Register data_val = Rtmp1;
2765 metadata2reg(md->constant_encoding(), mdo);
2766 if (mdo_offset_bias > 0) {
2767 __ set(mdo_offset_bias, data_val);
2768 __ add(mdo, data_val, mdo);
2769 }
2770 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2771 __ ldub(flags_addr, data_val);
2772 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2773 __ stb(data_val, flags_addr);
2774 __ ba_short(done);
2775 __ bind(not_null);
2776 } else {
2777 __ br_null_short(value, Assembler::pn, done);
2778 }
2779 add_debug_info_for_null_check_here(op->info_for_exception());
2780 __ load_klass(array, k_RInfo);
2781 __ load_klass(value, klass_RInfo);
2783 // get instance klass
2784 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo);
2785 // perform the fast part of the checking logic
2786 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
2788 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2789 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2790 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2791 __ delayed()->nop();
2792 __ cmp(G3, 0);
2793 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2794 __ delayed()->nop();
2795 // fall through to the success case
2797 if (op->should_profile()) {
2798 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2799 assert_different_registers(value, mdo, recv, tmp1);
2800 __ bind(profile_cast_success);
2801 metadata2reg(md->constant_encoding(), mdo);
2802 if (mdo_offset_bias > 0) {
2803 __ set(mdo_offset_bias, tmp1);
2804 __ add(mdo, tmp1, mdo);
2805 }
2806 __ load_klass(value, recv);
2807 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2808 __ ba_short(done);
2809 // Cast failure case
2810 __ bind(profile_cast_failure);
2811 metadata2reg(md->constant_encoding(), mdo);
2812 if (mdo_offset_bias > 0) {
2813 __ set(mdo_offset_bias, tmp1);
2814 __ add(mdo, tmp1, mdo);
2815 }
2816 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2817 __ ld_ptr(data_addr, tmp1);
2818 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2819 __ st_ptr(tmp1, data_addr);
2820 __ ba(*stub->entry());
2821 __ delayed()->nop();
2822 }
2823 __ bind(done);
2824 } else if (code == lir_checkcast) {
2825 Register obj = op->object()->as_register();
2826 Register dst = op->result_opr()->as_register();
2827 Label success;
2828 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
2829 __ bind(success);
2830 __ mov(obj, dst);
2831 } else if (code == lir_instanceof) {
2832 Register obj = op->object()->as_register();
2833 Register dst = op->result_opr()->as_register();
2834 Label success, failure, done;
2835 emit_typecheck_helper(op, &success, &failure, &failure);
2836 __ bind(failure);
2837 __ set(0, dst);
2838 __ ba_short(done);
2839 __ bind(success);
2840 __ set(1, dst);
2841 __ bind(done);
2842 } else {
2843 ShouldNotReachHere();
2844 }
2846 }
2849 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2850 if (op->code() == lir_cas_long) {
2851 assert(VM_Version::supports_cx8(), "wrong machine");
2852 Register addr = op->addr()->as_pointer_register();
2853 Register cmp_value_lo = op->cmp_value()->as_register_lo();
2854 Register cmp_value_hi = op->cmp_value()->as_register_hi();
2855 Register new_value_lo = op->new_value()->as_register_lo();
2856 Register new_value_hi = op->new_value()->as_register_hi();
2857 Register t1 = op->tmp1()->as_register();
2858 Register t2 = op->tmp2()->as_register();
2859 #ifdef _LP64
2860 __ mov(cmp_value_lo, t1);
2861 __ mov(new_value_lo, t2);
2862 // perform the compare and swap operation
2863 __ casx(addr, t1, t2);
2864 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2865 // overwritten with the original value in "addr" and will be equal to t1.
2866 __ cmp(t1, t2);
2867 #else
2868 // move high and low halves of long values into single registers
2869 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
2870 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
2871 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
2872 __ sllx(new_value_hi, 32, t2);
2873 __ srl(new_value_lo, 0, new_value_lo);
2874 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
2875 // perform the compare and swap operation
2876 __ casx(addr, t1, t2);
2877 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2878 // overwritten with the original value in "addr" and will be equal to t1.
2879 // Produce icc flag for 32bit.
2880 __ sub(t1, t2, t2);
2881 __ srlx(t2, 32, t1);
2882 __ orcc(t2, t1, G0);
2883 #endif
2884 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2885 Register addr = op->addr()->as_pointer_register();
2886 Register cmp_value = op->cmp_value()->as_register();
2887 Register new_value = op->new_value()->as_register();
2888 Register t1 = op->tmp1()->as_register();
2889 Register t2 = op->tmp2()->as_register();
2890 __ mov(cmp_value, t1);
2891 __ mov(new_value, t2);
2892 if (op->code() == lir_cas_obj) {
2893 if (UseCompressedOops) {
2894 __ encode_heap_oop(t1);
2895 __ encode_heap_oop(t2);
2896 __ cas(addr, t1, t2);
2897 } else {
2898 __ cas_ptr(addr, t1, t2);
2899 }
2900 } else {
2901 __ cas(addr, t1, t2);
2902 }
2903 __ cmp(t1, t2);
2904 } else {
2905 Unimplemented();
2906 }
2907 }
2909 void LIR_Assembler::set_24bit_FPU() {
2910 Unimplemented();
2911 }
2914 void LIR_Assembler::reset_FPU() {
2915 Unimplemented();
2916 }
2919 void LIR_Assembler::breakpoint() {
2920 __ breakpoint_trap();
2921 }
2924 void LIR_Assembler::push(LIR_Opr opr) {
2925 Unimplemented();
2926 }
2929 void LIR_Assembler::pop(LIR_Opr opr) {
2930 Unimplemented();
2931 }
2934 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2935 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2936 Register dst = dst_opr->as_register();
2937 Register reg = mon_addr.base();
2938 int offset = mon_addr.disp();
2939 // compute pointer to BasicLock
2940 if (mon_addr.is_simm13()) {
2941 __ add(reg, offset, dst);
2942 } else {
2943 __ set(offset, dst);
2944 __ add(dst, reg, dst);
2945 }
2946 }
2949 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2950 Register obj = op->obj_opr()->as_register();
2951 Register hdr = op->hdr_opr()->as_register();
2952 Register lock = op->lock_opr()->as_register();
2954 // obj may not be an oop
2955 if (op->code() == lir_lock) {
2956 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2957 if (UseFastLocking) {
2958 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2959 // add debug info for NullPointerException only if one is possible
2960 if (op->info() != NULL) {
2961 add_debug_info_for_null_check_here(op->info());
2962 }
2963 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2964 } else {
2965 // always do slow locking
2966 // note: the slow locking code could be inlined here, however if we use
2967 // slow locking, speed doesn't matter anyway and this solution is
2968 // simpler and requires less duplicated code - additionally, the
2969 // slow locking code is the same in either case which simplifies
2970 // debugging
2971 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2972 __ delayed()->nop();
2973 }
2974 } else {
2975 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2976 if (UseFastLocking) {
2977 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2978 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2979 } else {
2980 // always do slow unlocking
2981 // note: the slow unlocking code could be inlined here, however if we use
2982 // slow unlocking, speed doesn't matter anyway and this solution is
2983 // simpler and requires less duplicated code - additionally, the
2984 // slow unlocking code is the same in either case which simplifies
2985 // debugging
2986 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2987 __ delayed()->nop();
2988 }
2989 }
2990 __ bind(*op->stub()->continuation());
2991 }
2994 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2995 ciMethod* method = op->profiled_method();
2996 int bci = op->profiled_bci();
2997 ciMethod* callee = op->profiled_callee();
2999 // Update counter for all call types
3000 ciMethodData* md = method->method_data_or_null();
3001 assert(md != NULL, "Sanity");
3002 ciProfileData* data = md->bci_to_data(bci);
3003 assert(data->is_CounterData(), "need CounterData for calls");
3004 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3005 Register mdo = op->mdo()->as_register();
3006 #ifdef _LP64
3007 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
3008 Register tmp1 = op->tmp1()->as_register_lo();
3009 #else
3010 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
3011 Register tmp1 = op->tmp1()->as_register();
3012 #endif
3013 metadata2reg(md->constant_encoding(), mdo);
3014 int mdo_offset_bias = 0;
3015 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
3016 data->size_in_bytes())) {
3017 // The offset is large so bias the mdo by the base of the slot so
3018 // that the ld can use simm13s to reference the slots of the data
3019 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
3020 __ set(mdo_offset_bias, O7);
3021 __ add(mdo, O7, mdo);
3022 }
3024 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
3025 Bytecodes::Code bc = method->java_code_at_bci(bci);
3026 const bool callee_is_static = callee->is_loaded() && callee->is_static();
3027 // Perform additional virtual call profiling for invokevirtual and
3028 // invokeinterface bytecodes
3029 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3030 !callee_is_static && // required for optimized MH invokes
3031 C1ProfileVirtualCalls) {
3032 assert(op->recv()->is_single_cpu(), "recv must be allocated");
3033 Register recv = op->recv()->as_register();
3034 assert_different_registers(mdo, tmp1, recv);
3035 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3036 ciKlass* known_klass = op->known_holder();
3037 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3038 // We know the type that will be seen at this call site; we can
3039 // statically update the MethodData* rather than needing to do
3040 // dynamic tests on the receiver type
3042 // NOTE: we should probably put a lock around this search to
3043 // avoid collisions by concurrent compilations
3044 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3045 uint i;
3046 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3047 ciKlass* receiver = vc_data->receiver(i);
3048 if (known_klass->equals(receiver)) {
3049 Address data_addr(mdo, md->byte_offset_of_slot(data,
3050 VirtualCallData::receiver_count_offset(i)) -
3051 mdo_offset_bias);
3052 __ ld_ptr(data_addr, tmp1);
3053 __ add(tmp1, DataLayout::counter_increment, tmp1);
3054 __ st_ptr(tmp1, data_addr);
3055 return;
3056 }
3057 }
3059 // Receiver type not found in profile data; select an empty slot
3061 // Note that this is less efficient than it should be because it
3062 // always does a write to the receiver part of the
3063 // VirtualCallData rather than just the first time
3064 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3065 ciKlass* receiver = vc_data->receiver(i);
3066 if (receiver == NULL) {
3067 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
3068 mdo_offset_bias);
3069 metadata2reg(known_klass->constant_encoding(), tmp1);
3070 __ st_ptr(tmp1, recv_addr);
3071 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
3072 mdo_offset_bias);
3073 __ ld_ptr(data_addr, tmp1);
3074 __ add(tmp1, DataLayout::counter_increment, tmp1);
3075 __ st_ptr(tmp1, data_addr);
3076 return;
3077 }
3078 }
3079 } else {
3080 __ load_klass(recv, recv);
3081 Label update_done;
3082 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
3083 // Receiver did not match any saved receiver and there is no empty row for it.
3084 // Increment total counter to indicate polymorphic case.
3085 __ ld_ptr(counter_addr, tmp1);
3086 __ add(tmp1, DataLayout::counter_increment, tmp1);
3087 __ st_ptr(tmp1, counter_addr);
3089 __ bind(update_done);
3090 }
3091 } else {
3092 // Static call
3093 __ ld_ptr(counter_addr, tmp1);
3094 __ add(tmp1, DataLayout::counter_increment, tmp1);
3095 __ st_ptr(tmp1, counter_addr);
3096 }
3097 }
3099 void LIR_Assembler::align_backward_branch_target() {
3100 __ align(OptoLoopAlignment);
3101 }
3104 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
3105 // make sure we are expecting a delay
3106 // this has the side effect of clearing the delay state
3107 // so we can use _masm instead of _masm->delayed() to do the
3108 // code generation.
3109 __ delayed();
3111 // make sure we only emit one instruction
3112 int offset = code_offset();
3113 op->delay_op()->emit_code(this);
3114 #ifdef ASSERT
3115 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
3116 op->delay_op()->print();
3117 }
3118 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
3119 "only one instruction can go in a delay slot");
3120 #endif
3122 // we may also be emitting the call info for the instruction
3123 // which we are the delay slot of.
3124 CodeEmitInfo* call_info = op->call_info();
3125 if (call_info) {
3126 add_call_info(code_offset(), call_info);
3127 }
3129 if (VerifyStackAtCalls) {
3130 _masm->sub(FP, SP, O7);
3131 _masm->cmp(O7, initial_frame_size_in_bytes());
3132 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
3133 }
3134 }
3137 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3138 assert(left->is_register(), "can only handle registers");
3140 if (left->is_single_cpu()) {
3141 __ neg(left->as_register(), dest->as_register());
3142 } else if (left->is_single_fpu()) {
3143 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
3144 } else if (left->is_double_fpu()) {
3145 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
3146 } else {
3147 assert (left->is_double_cpu(), "Must be a long");
3148 Register Rlow = left->as_register_lo();
3149 Register Rhi = left->as_register_hi();
3150 #ifdef _LP64
3151 __ sub(G0, Rlow, dest->as_register_lo());
3152 #else
3153 __ subcc(G0, Rlow, dest->as_register_lo());
3154 __ subc (G0, Rhi, dest->as_register_hi());
3155 #endif
3156 }
3157 }
3160 void LIR_Assembler::fxch(int i) {
3161 Unimplemented();
3162 }
3164 void LIR_Assembler::fld(int i) {
3165 Unimplemented();
3166 }
3168 void LIR_Assembler::ffree(int i) {
3169 Unimplemented();
3170 }
3172 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
3173 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3175 // if tmp is invalid, then the function being called doesn't destroy the thread
3176 if (tmp->is_valid()) {
3177 __ save_thread(tmp->as_register());
3178 }
3179 __ call(dest, relocInfo::runtime_call_type);
3180 __ delayed()->nop();
3181 if (info != NULL) {
3182 add_call_info_here(info);
3183 }
3184 if (tmp->is_valid()) {
3185 __ restore_thread(tmp->as_register());
3186 }
3188 #ifdef ASSERT
3189 __ verify_thread();
3190 #endif // ASSERT
3191 }
3194 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3195 #ifdef _LP64
3196 ShouldNotReachHere();
3197 #endif
3199 NEEDS_CLEANUP;
3200 if (type == T_LONG) {
3201 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
3203 // (extended to allow indexed as well as constant displaced for JSR-166)
3204 Register idx = noreg; // contains either constant offset or index
3206 int disp = mem_addr->disp();
3207 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3208 if (!Assembler::is_simm13(disp)) {
3209 idx = O7;
3210 __ set(disp, idx);
3211 }
3212 } else {
3213 assert(disp == 0, "not both indexed and disp");
3214 idx = mem_addr->index()->as_register();
3215 }
3217 int null_check_offset = -1;
3219 Register base = mem_addr->base()->as_register();
3220 if (src->is_register() && dest->is_address()) {
3221 // G4 is high half, G5 is low half
3222 if (VM_Version::v9_instructions_work()) {
3223 // clear the top bits of G5, and scale up G4
3224 __ srl (src->as_register_lo(), 0, G5);
3225 __ sllx(src->as_register_hi(), 32, G4);
3226 // combine the two halves into the 64 bits of G4
3227 __ or3(G4, G5, G4);
3228 null_check_offset = __ offset();
3229 if (idx == noreg) {
3230 __ stx(G4, base, disp);
3231 } else {
3232 __ stx(G4, base, idx);
3233 }
3234 } else {
3235 __ mov (src->as_register_hi(), G4);
3236 __ mov (src->as_register_lo(), G5);
3237 null_check_offset = __ offset();
3238 if (idx == noreg) {
3239 __ std(G4, base, disp);
3240 } else {
3241 __ std(G4, base, idx);
3242 }
3243 }
3244 } else if (src->is_address() && dest->is_register()) {
3245 null_check_offset = __ offset();
3246 if (VM_Version::v9_instructions_work()) {
3247 if (idx == noreg) {
3248 __ ldx(base, disp, G5);
3249 } else {
3250 __ ldx(base, idx, G5);
3251 }
3252 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3253 __ mov (G5, dest->as_register_lo()); // copy low half into lo
3254 } else {
3255 if (idx == noreg) {
3256 __ ldd(base, disp, G4);
3257 } else {
3258 __ ldd(base, idx, G4);
3259 }
3260 // G4 is high half, G5 is low half
3261 __ mov (G4, dest->as_register_hi());
3262 __ mov (G5, dest->as_register_lo());
3263 }
3264 } else {
3265 Unimplemented();
3266 }
3267 if (info != NULL) {
3268 add_debug_info_for_null_check(null_check_offset, info);
3269 }
3271 } else {
3272 // use normal move for all other volatiles since they don't need
3273 // special handling to remain atomic.
3274 move_op(src, dest, type, lir_patch_none, info, false, false, false);
3275 }
3276 }
3278 void LIR_Assembler::membar() {
3279 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3280 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3281 }
3283 void LIR_Assembler::membar_acquire() {
3284 // no-op on TSO
3285 }
3287 void LIR_Assembler::membar_release() {
3288 // no-op on TSO
3289 }
3291 void LIR_Assembler::membar_loadload() {
3292 // no-op
3293 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3294 }
3296 void LIR_Assembler::membar_storestore() {
3297 // no-op
3298 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3299 }
3301 void LIR_Assembler::membar_loadstore() {
3302 // no-op
3303 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3304 }
3306 void LIR_Assembler::membar_storeload() {
3307 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3308 }
3311 // Pack two sequential registers containing 32 bit values
3312 // into a single 64 bit register.
3313 // src and src->successor() are packed into dst
3314 // src and dst may be the same register.
3315 // Note: src is destroyed
3316 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
3317 Register rs = src->as_register();
3318 Register rd = dst->as_register_lo();
3319 __ sllx(rs, 32, rs);
3320 __ srl(rs->successor(), 0, rs->successor());
3321 __ or3(rs, rs->successor(), rd);
3322 }
3324 // Unpack a 64 bit value in a register into
3325 // two sequential registers.
3326 // src is unpacked into dst and dst->successor()
3327 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
3328 Register rs = src->as_register_lo();
3329 Register rd = dst->as_register_hi();
3330 assert_different_registers(rs, rd, rd->successor());
3331 __ srlx(rs, 32, rd);
3332 __ srl (rs, 0, rd->successor());
3333 }
3336 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
3337 LIR_Address* addr = addr_opr->as_address_ptr();
3338 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
3340 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
3341 }
3344 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3345 assert(result_reg->is_register(), "check");
3346 __ mov(G2_thread, result_reg->as_register());
3347 }
3350 void LIR_Assembler::peephole(LIR_List* lir) {
3351 LIR_OpList* inst = lir->instructions_list();
3352 for (int i = 0; i < inst->length(); i++) {
3353 LIR_Op* op = inst->at(i);
3354 switch (op->code()) {
3355 case lir_cond_float_branch:
3356 case lir_branch: {
3357 LIR_OpBranch* branch = op->as_OpBranch();
3358 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
3359 LIR_Op* delay_op = NULL;
3360 // we'd like to be able to pull following instructions into
3361 // this slot but we don't know enough to do it safely yet so
3362 // only optimize block to block control flow.
3363 if (LIRFillDelaySlots && branch->block()) {
3364 LIR_Op* prev = inst->at(i - 1);
3365 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
3366 // swap previous instruction into delay slot
3367 inst->at_put(i - 1, op);
3368 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3369 #ifndef PRODUCT
3370 if (LIRTracePeephole) {
3371 tty->print_cr("delayed");
3372 inst->at(i - 1)->print();
3373 inst->at(i)->print();
3374 tty->cr();
3375 }
3376 #endif
3377 continue;
3378 }
3379 }
3381 if (!delay_op) {
3382 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
3383 }
3384 inst->insert_before(i + 1, delay_op);
3385 break;
3386 }
3387 case lir_static_call:
3388 case lir_virtual_call:
3389 case lir_icvirtual_call:
3390 case lir_optvirtual_call:
3391 case lir_dynamic_call: {
3392 LIR_Op* prev = inst->at(i - 1);
3393 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
3394 (op->code() != lir_virtual_call ||
3395 !prev->result_opr()->is_single_cpu() ||
3396 prev->result_opr()->as_register() != O0) &&
3397 LIR_Assembler::is_single_instruction(prev)) {
3398 // Only moves without info can be put into the delay slot.
3399 // Also don't allow the setup of the receiver in the delay
3400 // slot for vtable calls.
3401 inst->at_put(i - 1, op);
3402 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3403 #ifndef PRODUCT
3404 if (LIRTracePeephole) {
3405 tty->print_cr("delayed");
3406 inst->at(i - 1)->print();
3407 inst->at(i)->print();
3408 tty->cr();
3409 }
3410 #endif
3411 } else {
3412 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
3413 inst->insert_before(i + 1, delay_op);
3414 i++;
3415 }
3417 #if defined(TIERED) && !defined(_LP64)
3418 // fixup the return value from G1 to O0/O1 for long returns.
3419 // It's done here instead of in LIRGenerator because there's
3420 // such a mismatch between the single reg and double reg
3421 // calling convention.
3422 LIR_OpJavaCall* callop = op->as_OpJavaCall();
3423 if (callop->result_opr() == FrameMap::out_long_opr) {
3424 LIR_OpJavaCall* call;
3425 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
3426 for (int a = 0; a < arguments->length(); a++) {
3427 arguments[a] = callop->arguments()[a];
3428 }
3429 if (op->code() == lir_virtual_call) {
3430 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3431 callop->vtable_offset(), arguments, callop->info());
3432 } else {
3433 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3434 callop->addr(), arguments, callop->info());
3435 }
3436 inst->at_put(i - 1, call);
3437 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
3438 T_LONG, lir_patch_none, NULL));
3439 }
3440 #endif
3441 break;
3442 }
3443 }
3444 }
3445 }
3447 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3448 LIR_Address* addr = src->as_address_ptr();
3450 assert(data == dest, "swap uses only 2 operands");
3451 assert (code == lir_xchg, "no xadd on sparc");
3453 if (data->type() == T_INT) {
3454 __ swap(as_Address(addr), data->as_register());
3455 } else if (data->is_oop()) {
3456 Register obj = data->as_register();
3457 Register narrow = tmp->as_register();
3458 #ifdef _LP64
3459 assert(UseCompressedOops, "swap is 32bit only");
3460 __ encode_heap_oop(obj, narrow);
3461 __ swap(as_Address(addr), narrow);
3462 __ decode_heap_oop(narrow, obj);
3463 #else
3464 __ swap(as_Address(addr), obj);
3465 #endif
3466 } else {
3467 ShouldNotReachHere();
3468 }
3469 }
3471 #undef __