Fri, 13 Jul 2018 14:14:12 +0800
#7277 Implement LIR_Assembler::emit_profile_type for MIPS
Reviewed-by: fujie, aoqi
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2018, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "gc_interface/collectedHeap.hpp"
37 #include "memory/barrierSet.hpp"
38 #include "memory/cardTableModRefBS.hpp"
39 #include "nativeInst_mips.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #define __ _masm->
44 static void select_different_registers(Register preserve,
45 Register extra,
46 Register &tmp1,
47 Register &tmp2) {
48 if (tmp1 == preserve) {
49 assert_different_registers(tmp1, tmp2, extra);
50 tmp1 = extra;
51 } else if (tmp2 == preserve) {
52 assert_different_registers(tmp1, tmp2, extra);
53 tmp2 = extra;
54 }
55 assert_different_registers(preserve, tmp1, tmp2);
56 }
60 static void select_different_registers(Register preserve,
61 Register extra,
62 Register &tmp1,
63 Register &tmp2,
64 Register &tmp3) {
65 if (tmp1 == preserve) {
66 assert_different_registers(tmp1, tmp2, tmp3, extra);
67 tmp1 = extra;
68 } else if (tmp2 == preserve) {
69 tmp2 = extra;
70 } else if (tmp3 == preserve) {
71 assert_different_registers(tmp1, tmp2, tmp3, extra);
72 tmp3 = extra;
73 }
74 assert_different_registers(preserve, tmp1, tmp2, tmp3);
75 }
77 // need add method Assembler::is_simm16 in assembler_gs2.hpp
78 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
79 if (opr->is_constant()) {
80 LIR_Const* constant = opr->as_constant_ptr();
81 switch (constant->type()) {
82 case T_INT: {
83 jint value = constant->as_jint();
84 return Assembler::is_simm16(value);
85 }
86 default:
87 return false;
88 }
89 }
90 return false;
91 }
93 //FIXME, which register should be used?
94 LIR_Opr LIR_Assembler::receiverOpr() {
95 return FrameMap::_t0_oop_opr;
96 }
97 /*
98 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
99 return receiverOpr();
100 }*/
102 LIR_Opr LIR_Assembler::osrBufferPointer() {
103 #ifdef _LP64
104 Register r = receiverOpr()->as_register();
105 return FrameMap::as_long_opr(r, r);
106 #else
107 return FrameMap::as_opr(receiverOpr()->as_register());
108 #endif
109 }
111 //--------------fpu register translations-----------------------
112 // FIXME:I do not know what's to do for mips fpu
114 address LIR_Assembler::float_constant(float f) {
115 address const_addr = __ float_constant(f);
116 if (const_addr == NULL) {
117 bailout("const section overflow");
118 return __ code()->consts()->start();
119 } else {
120 return const_addr;
121 }
122 }
125 address LIR_Assembler::double_constant(double d) {
126 address const_addr = __ double_constant(d);
127 if (const_addr == NULL) {
128 bailout("const section overflow");
129 return __ code()->consts()->start();
130 } else {
131 return const_addr;
132 }
133 }
139 void LIR_Assembler::reset_FPU() {
140 Unimplemented();
141 }
144 void LIR_Assembler::set_24bit_FPU() {
145 Unimplemented();
146 }
148 //FIXME.
149 void LIR_Assembler::fpop() {
150 // do nothing
151 }
152 void LIR_Assembler::fxch(int i) {
153 // do nothing
154 }
155 void LIR_Assembler::fld(int i) {
156 // do nothing
157 }
158 void LIR_Assembler::ffree(int i) {
159 // do nothing
160 }
162 void LIR_Assembler::breakpoint() {
163 __ brk(17);
164 }
165 //FIXME, opr can not be float?
166 void LIR_Assembler::push(LIR_Opr opr) {
167 if (opr->is_single_cpu()) {
168 __ push_reg(opr->as_register());
169 } else if (opr->is_double_cpu()) {
170 __ push_reg(opr->as_register_hi());
171 __ push_reg(opr->as_register_lo());
172 } else if (opr->is_stack()) {
173 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
174 } else if (opr->is_constant()) {
175 LIR_Const* const_opr = opr->as_constant_ptr();
176 if (const_opr->type() == T_OBJECT) {
177 __ push_oop(const_opr->as_jobject());
178 } else if (const_opr->type() == T_INT) {
179 __ push_jint(const_opr->as_jint());
180 } else {
181 ShouldNotReachHere();
182 }
183 } else {
184 ShouldNotReachHere();
185 }
186 }
188 void LIR_Assembler::pop(LIR_Opr opr) {
189 if (opr->is_single_cpu() ) {
190 __ pop(opr->as_register());
191 } else {
192 assert(false, "Must be single word register or floating-point register");
193 }
194 }
197 Address LIR_Assembler::as_Address(LIR_Address* addr) {
198 #ifndef _LP64
199 Register reg = addr->base()->as_register();
200 #else
201 //FIXME aoqi
202 Register reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
203 #endif
204 // now we need this for parameter pass
205 return Address(reg, addr->disp());
206 }
209 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
210 return as_Address(addr);
211 }
214 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
215 Register reg = addr->base()->as_register();
216 return Address(reg, addr->disp()+longSize/2);
217 }
220 //void LIR_Assembler::osr_entry(IRScope* scope, int number_of_locks, Label* continuation, int osr_bci) {
221 void LIR_Assembler::osr_entry() {
222 // assert(scope->is_top_scope(), "inlined OSR not yet implemented");
223 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
224 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
225 ValueStack* entry_state = osr_entry->state();
226 int number_of_locks = entry_state->locks_size();
228 // we jump here if osr happens with the interpreter
229 // state set up to continue at the beginning of the
230 // loop that triggered osr - in particular, we have
231 // the following registers setup:
232 //
233 // S7: interpreter locals pointer
234 // V1: interpreter locks pointer
235 // RA: return address
236 //T0: OSR buffer
237 // build frame
238 // ciMethod* m = scope->method();
239 ciMethod* m = compilation()->method();
240 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
242 // OSR buffer is
243 //
244 // locals[nlocals-1..0]
245 // monitors[0..number_of_locks]
246 //
247 // locals is a direct copy of the interpreter frame so in the osr buffer
248 // so first slot in the local array is the last local from the interpreter
249 // and last slot is local[0] (receiver) from the interpreter
250 //
251 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
252 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
253 // in the interpreter frame (the method lock if a sync method)
255 // Initialize monitors in the compiled activation.
256 // T0: pointer to osr buffer
257 //
258 // All other registers are dead at this point and the locals will be
259 // copied into place by code emitted in the IR.
261 Register OSR_buf = osrBufferPointer()->as_pointer_register();
264 // note: we do osr only if the expression stack at the loop beginning is empty,
265 // in which case the spill area is empty too and we don't have to setup
266 // spilled locals
267 //
268 // copy monitors
269 // V1: pointer to locks
270 {
271 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
272 int monitor_offset = BytesPerWord * method()->max_locals()+
273 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
274 for (int i = 0; i < number_of_locks; i++) {
275 int slot_offset =monitor_offset - (i * BasicObjectLock::size())*BytesPerWord;
276 #ifdef ASSERT
277 {
278 Label L;
279 //__ lw(AT, V1, slot_offset * BytesPerWord + BasicObjectLock::obj_offset_in_bytes());
280 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
281 __ bne(AT, R0, L);
282 __ delayed()->nop();
283 __ stop("locked object is NULL");
284 __ bind(L);
285 }
286 #endif
287 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes());
288 __ st_ptr(AT, frame_map()->address_for_monitor_lock(i));
289 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
290 __ st_ptr(AT, frame_map()->address_for_monitor_object(i));
291 }
292 }
293 }
296 int LIR_Assembler::check_icache() {
297 Register receiver = FrameMap::receiver_opr->as_register();
298 Register ic_klass = IC_Klass;
300 int offset = __ offset();
301 __ inline_cache_check(receiver, IC_Klass);
302 __ align(CodeEntryAlignment);
303 return offset;
306 }
308 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
309 jobject o = NULL;
310 int oop_index = __ oop_recorder()->allocate_oop_index(o);
311 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
312 RelocationHolder rspec = oop_Relocation::spec(oop_index);
313 __ relocate(rspec);
314 #ifndef _LP64
315 //by_css
316 __ lui(reg, Assembler::split_high((int)o));
317 __ addiu(reg, reg, Assembler::split_low((int)o));
318 #else
319 //li may not pass NativeMovConstReg::verify. see nativeMovConstReg_at(pc_start()); in PatchingStub::install. by aoqi
320 // __ li48(reg, (long)o);
321 __ li48(reg, (long)o);
322 #endif
323 // patching_epilog(patch, LIR_Op1::patch_normal, noreg, info);
324 patching_epilog(patch, lir_patch_normal, reg, info);
325 }
327 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
328 Metadata *o = NULL;
329 int index = __ oop_recorder()->allocate_metadata_index(o);
330 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
331 RelocationHolder rspec = metadata_Relocation::spec(index);
332 __ relocate(rspec);
333 __ li48(reg, (long)o);
334 patching_epilog(patch, lir_patch_normal, reg, info);
335 }
337 // This specifies the esp decrement needed to build the frame
338 int LIR_Assembler::initial_frame_size_in_bytes() const {
339 // if rounding, must let FrameMap know!
340 // return (frame_map()->framesize() - 2) * BytesPerWord; // subtract two words to account for return address and link
341 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
342 }
344 int LIR_Assembler::emit_exception_handler() {
345 // if the last instruction is a call (typically to do a throw which
346 // is coming at the end after block reordering) the return address
347 // must still point into the code area in order to avoid assertion
348 // failures when searching for the corresponding bci => add a nop
349 // (was bug 5/14/1999 - gri)
350 // Lazy deopt bug 4932387. If last instruction is a call then we
351 // need an area to patch where we won't overwrite the exception
352 // handler. This means we need 5 bytes. Could use a fat_nop
353 // but since this never gets executed it doesn't really make
354 // much difference.
355 //
356 for (int i = 0; i < (NativeCall::instruction_size/BytesPerInstWord + 1) ; i++ ) {
357 __ nop();
358 }
360 // generate code for exception handler
361 address handler_base = __ start_a_stub(exception_handler_size);
362 if (handler_base == NULL) {
363 // no enough space
364 bailout("exception handler overflow");
365 return -1;
366 }
368 int offset = code_offset();
370 // the exception oop and pc are in V0, and V1
371 // no other registers need to be preserved, so invalidate them
372 //__ invalidate_registers(false, true, true, false, true, true);
374 // check that there is really an exception
375 __ verify_not_null_oop(V0);
377 // search an exception handler (V0: exception oop, V1: throwing pc)
378 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
379 __ delayed()->nop();
380 __ should_not_reach_here();
381 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
382 __ end_a_stub();
384 return offset;
385 }
387 // Emit the code to remove the frame from the stack in the exception
388 // unwind path.
389 int LIR_Assembler::emit_unwind_handler() {
390 #ifndef PRODUCT
391 if (CommentedAssembly) {
392 _masm->block_comment("Unwind handler");
393 }
394 #endif
396 int offset = code_offset();
397 // Fetch the exception from TLS and clear out exception related thread state
398 Register thread = TREG;
399 #ifndef OPT_THREAD
400 __ get_thread(thread);
401 #endif
402 __ ld_ptr(V0, Address(thread, JavaThread::exception_oop_offset()));
403 __ st_ptr(R0, Address(thread, JavaThread::exception_oop_offset()));
404 __ st_ptr(R0, Address(thread, JavaThread::exception_pc_offset()));
406 __ bind(_unwind_handler_entry);
407 __ verify_not_null_oop(V0);
408 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
409 __ move(S0, V0); // Preserve the exception (rbx is always callee-saved)
410 }
412 // Preform needed unlocking
413 MonitorExitStub* stub = NULL;
414 if (method()->is_synchronized()) {
415 monitor_address(0, FrameMap::_v0_opr);
416 stub = new MonitorExitStub(FrameMap::_v0_opr, true, 0);
417 __ unlock_object(A0, A1, V0, *stub->entry());
418 __ bind(*stub->continuation());
419 }
421 if (compilation()->env()->dtrace_method_probes()) {
422 __ move(A0, thread);
423 __ mov_metadata(A1, method()->constant_encoding());
424 __ patchable_call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit));
425 }
427 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
428 __ move(V0, S0); // Restore the exception
429 }
431 // remove the activation and dispatch to the unwind handler
432 // leave activation of nmethod
433 __ remove_frame(initial_frame_size_in_bytes());
435 __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id));
436 __ delayed()->nop();
438 // Emit the slow path assembly
439 if (stub != NULL) {
440 stub->emit_code(this);
441 }
443 return offset;
444 }
447 int LIR_Assembler::emit_deopt_handler() {
448 // if the last instruction is a call (typically to do a throw which
449 // is coming at the end after block reordering) the return address
450 // must still point into the code area in order to avoid assertion
451 // failures when searching for the corresponding bci => add a nop
452 // (was bug 5/14/1999 - gri)
454 __ nop();
456 // generate code for exception handler
457 address handler_base = __ start_a_stub(deopt_handler_size);
458 if (handler_base == NULL) {
459 // not enough space left for the handler
460 bailout("deopt handler overflow");
461 return -1;
462 }
463 int offset = code_offset();
465 // compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
467 __ call(SharedRuntime::deopt_blob()->unpack());
468 __ delayed()->nop();
470 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
471 __ end_a_stub();
473 return offset;
475 }
478 // Optimized Library calls
479 // This is the fast version of java.lang.String.compare; it has not
480 // OSR-entry and therefore, we generate a slow version for OSR's
481 //void LIR_Assembler::emit_string_compare(IRScope* scope) {
482 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
483 // get two string object in T0&T1
484 //receiver already in T0
485 __ ld_ptr(T1, arg1->as_register());
486 //__ ld_ptr(T2, T0, java_lang_String::value_offset_in_bytes()); //value, T_CHAR array
487 __ load_heap_oop(T2, Address(T0, java_lang_String::value_offset_in_bytes()));
488 __ ld_ptr(AT, T0, java_lang_String::offset_offset_in_bytes()); //offset
489 __ shl(AT, 1);
490 __ add(T2, T2, AT);
491 __ addi(T2, T2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
492 // Now T2 is the address of the first char in first string(T0)
494 add_debug_info_for_null_check_here(info);
495 //__ ld_ptr(T3, T1, java_lang_String::value_offset_in_bytes());
496 __ load_heap_oop(T3, Address(T1, java_lang_String::value_offset_in_bytes()));
497 __ ld_ptr(AT, T1, java_lang_String::offset_offset_in_bytes());
498 __ shl(AT, 1);
499 __ add(T3, T3, AT);
500 __ addi(T3, T3, arrayOopDesc::base_offset_in_bytes(T_CHAR));
501 // Now T3 is the address of the first char in second string(T1)
503 #ifndef _LP64
504 //by_css
505 // compute minimum length (in T4) and difference of lengths (V0)
506 Label L;
507 __ lw (T4, Address(T0, java_lang_String::count_offset_in_bytes()));
508 // the length of the first string(T0)
509 __ lw (T5, Address(T1, java_lang_String::count_offset_in_bytes()));
510 // the length of the second string(T1)
512 __ subu(V0, T4, T5);
513 __ blez(V0, L);
514 __ delayed()->nop();
515 __ move (T4, T5);
516 __ bind (L);
518 Label Loop, haveResult, LoopEnd;
519 __ bind(Loop);
520 __ beq(T4, R0, LoopEnd);
521 __ delayed();
523 __ addi(T2, T2, 2);
525 // compare current character
526 __ lhu(T5, T2, -2);
527 __ lhu(T6, T3, 0);
528 __ bne(T5, T6, haveResult);
529 __ delayed();
531 __ addi(T3, T3, 2);
533 __ b(Loop);
534 __ delayed()->addi(T4, T4, -1);
536 __ bind(haveResult);
537 __ subu(V0, T5, T6);
539 __ bind(LoopEnd);
540 #else
541 // compute minimum length (in T4) and difference of lengths (V0)
542 Label L;
543 __ lw (A4, Address(T0, java_lang_String::count_offset_in_bytes()));
544 // the length of the first string(T0)
545 __ lw (A5, Address(T1, java_lang_String::count_offset_in_bytes()));
546 // the length of the second string(T1)
548 __ dsubu(V0, A4, A5);
549 __ blez(V0, L);
550 __ delayed()->nop();
551 __ move (A4, A5);
552 __ bind (L);
554 Label Loop, haveResult, LoopEnd;
555 __ bind(Loop);
556 __ beq(A4, R0, LoopEnd);
557 __ delayed();
559 __ daddi(T2, T2, 2);
561 // compare current character
562 __ lhu(A5, T2, -2);
563 __ lhu(A6, T3, 0);
564 __ bne(A5, A6, haveResult);
565 __ delayed();
567 __ daddi(T3, T3, 2);
569 __ b(Loop);
570 __ delayed()->addi(A4, A4, -1);
572 __ bind(haveResult);
573 __ dsubu(V0, A5, A6);
575 __ bind(LoopEnd);
576 #endif
577 return_op(FrameMap::_v0_opr);
578 }
581 void LIR_Assembler::return_op(LIR_Opr result) {
582 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0");
583 // Pop the stack before the safepoint code
584 __ remove_frame(initial_frame_size_in_bytes());
585 #ifndef _LP64
586 //by aoqi
587 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()
588 + (SafepointPollOffset % os::vm_page_size())));
589 __ relocate(relocInfo::poll_return_type);
590 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()
591 + (SafepointPollOffset % os::vm_page_size())));
592 #else
593 #ifndef OPT_SAFEPOINT
594 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
595 __ li48(AT, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
596 __ relocate(relocInfo::poll_return_type);
597 __ lw(AT, AT, 0);
598 #else
599 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
600 __ relocate(relocInfo::poll_return_type);
601 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
602 #endif
603 #endif
605 __ pop(RA);
606 __ jr(RA);
607 __ delayed()->nop();
608 }
610 //read protect mem to R0 won't cause the exception only in godson-2e, So I modify R0 to AT .@jerome,11/25,2006
611 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
612 assert(info != NULL, "info must not be null for safepoint poll");
613 int offset = __ offset();
614 Register r = tmp->as_register();
615 #ifndef _LP64
616 //by aoqi
617 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
618 add_debug_info_for_branch(info);
619 __ relocate(relocInfo::poll_type);
620 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
621 #else
622 #ifndef OPT_SAFEPOINT
623 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
624 //__ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
625 __ li48(r, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
626 add_debug_info_for_branch(info);
627 __ relocate(relocInfo::poll_type);
628 //__ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
629 __ lw(AT, r, 0);
630 #else
631 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
632 add_debug_info_for_branch(info);
633 __ relocate(relocInfo::poll_type);
634 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
635 #endif
636 #endif
637 return offset;
638 }
640 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
641 if (from_reg != to_reg) __ move(to_reg, from_reg);
642 }
645 void LIR_Assembler::swap_reg(Register a, Register b) {
646 __ xorr(a, a, b);
647 __ xorr(b, a, b);
648 __ xorr(a, a, b);
649 }
651 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
652 assert(src->is_constant(), "should not call otherwise");
653 assert(dest->is_register(), "should not call otherwise");
654 LIR_Const* c = src->as_constant_ptr();
655 switch (c->type()) {
656 case T_ADDRESS: {
657 assert(patch_code == lir_patch_none, "no patching handled here");
658 Unimplemented();
659 __ move(dest->as_register(), c->as_jint()); // FIXME
660 break;
661 }
663 case T_INT: {
664 assert(patch_code == lir_patch_none, "no patching handled here");
665 __ move(dest->as_register(), c->as_jint());
666 break;
667 }
669 case T_LONG: {
670 #ifndef _LP64
671 jlong con = c->as_jlong();
672 jint* conhi = (jint*)&con + 1;
673 jint* conlow = (jint*)&con;
675 if (dest->is_double_cpu()) {
676 __ move(dest->as_register_lo(), *conlow);
677 __ move(dest->as_register_hi(), *conhi);
678 } else {
679 // assert(dest->is_double(), "wrong register kind");
680 __ move(AT, *conlow);
681 __ mtc1(AT, dest->as_double_reg());
682 __ move(AT, *conhi);
683 __ mtc1(AT, dest->as_double_reg()+1);
684 }
685 #else
686 if (dest->is_double_cpu()) {
687 __ li(dest->as_register_lo(), c->as_jlong());
688 } else {
689 __ li(dest->as_register(), c->as_jlong());
690 }
691 #endif
692 break;
693 }
695 case T_OBJECT: {
696 if (patch_code == lir_patch_none) {
697 jobject2reg(c->as_jobject(), dest->as_register());
698 } else {
699 jobject2reg_with_patching(dest->as_register(), info);
700 }
701 break;
702 }
704 case T_METADATA: {
705 if (patch_code != lir_patch_none) {
706 klass2reg_with_patching(dest->as_register(), info);
707 } else {
708 __ mov_metadata(dest->as_register(), c->as_metadata());
709 }
710 break;
711 }
713 case T_FLOAT: {
714 address const_addr = float_constant(c->as_jfloat());
715 assert (const_addr != NULL, "must create float constant in the constant table");
717 if (dest->is_single_fpu()) {
718 __ relocate(relocInfo::internal_pc_type);
719 #ifndef _LP64
720 //by_css
721 __ lui(AT, Assembler::split_high((int)const_addr));
722 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
723 #else
724 __ li48(AT, (long)const_addr);
725 #endif
726 __ lwc1(dest->as_float_reg(), AT, 0);
728 } else {
729 assert(dest->is_single_cpu(), "Must be a cpu register.");
730 assert(dest->as_register() != AT, "AT can not be allocated.");
732 __ relocate(relocInfo::internal_pc_type);
733 #ifndef _LP64
734 //by_css
735 __ lui(AT, Assembler::split_high((int)const_addr));
736 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
737 #else
738 __ li48(AT, (long)const_addr);
739 #endif
740 __ lw(dest->as_register(), AT, 0);
741 }
742 break;
743 }
745 case T_DOUBLE: {
746 address const_addr = double_constant(c->as_jdouble());
747 assert (const_addr != NULL, "must create double constant in the constant table");
749 if (dest->is_double_fpu()) {
750 __ relocate(relocInfo::internal_pc_type);
751 #ifndef _LP64
752 //by_css
753 __ lui(AT, Assembler::split_high((int)const_addr));
754 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
755 __ lwc1(dest->as_double_reg(), AT, 0);
756 __ lwc1(dest->as_double_reg()+1, AT, 4);
757 #else
758 __ li48(AT, (long)const_addr);
759 __ ldc1(dest->as_double_reg(), AT, 0);
760 #endif
761 } else {
762 assert(dest->as_register_lo() != AT, "AT can not be allocated.");
763 assert(dest->as_register_hi() != AT, "AT can not be allocated.");
765 __ relocate(relocInfo::internal_pc_type);
766 #ifndef _LP64
767 //by_css
768 __ lui(AT, Assembler::split_high((int)const_addr));
769 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
770 __ lw(dest->as_register_lo(), AT, 0);
771 __ lw(dest->as_register_hi(), AT, 4);
772 #else
773 __ li48(AT, (long)const_addr);
774 __ ld(dest->as_register_lo(), AT, 0);
775 #endif
776 }
777 break;
778 }
780 default:
781 ShouldNotReachHere();
782 }
783 }
785 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
786 assert(src->is_constant(), "should not call otherwise");
787 assert(dest->is_stack(), "should not call otherwise");
788 LIR_Const* c = src->as_constant_ptr();
789 switch (c->type()) {
790 case T_INT: // fall through
791 __ move(AT, c->as_jint_bits());
792 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
793 break;
795 case T_FLOAT:
796 Unimplemented();
797 break;
799 case T_ADDRESS:
800 Unimplemented();
801 __ move(AT, c->as_jint_bits());
802 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
803 break;
805 case T_OBJECT:
806 if (c->as_jobject() == NULL) {
807 __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix()));
808 } else {
809 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
810 RelocationHolder rspec = oop_Relocation::spec(oop_index);
811 __ relocate(rspec);
812 #ifndef _LP64
813 //by_css
814 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
815 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
816 #else
817 __ li48(AT, (long)c->as_jobject());
818 #endif
819 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
820 }
821 break;
822 case T_LONG: // fall through
823 case T_DOUBLE:
824 #ifndef _LP64
825 __ move(AT, c->as_jint_lo_bits());
826 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
827 lo_word_offset_in_bytes));
828 __ move(AT, c->as_jint_hi_bits());
829 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
830 hi_word_offset_in_bytes));
831 #else
832 __ move(AT, c->as_jlong_bits());
833 __ sd(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
834 lo_word_offset_in_bytes));
835 #endif
836 break;
837 default:
838 ShouldNotReachHere();
839 }
840 }
842 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
843 assert(src->is_constant(), "should not call otherwise");
844 assert(dest->is_address(), "should not call otherwise");
845 LIR_Const* c = src->as_constant_ptr();
846 LIR_Address* addr = dest->as_address_ptr();
848 int null_check_here = code_offset();
849 switch (type) {
850 case T_LONG: // fall through
851 case T_DOUBLE:
852 #ifndef _LP64
853 __ move(AT, c->as_jint_hi_bits());
854 __ sw(AT, as_Address_hi(addr));
855 __ move(AT, c->as_jint_lo_bits());
856 __ sw(AT, as_Address_lo(addr));
857 #else
858 if(c->as_jlong_bits() != 0) {
859 /* DoublePrint: -0.0
860 * (gdb) print /x -9223372036854775808
861 * $1 = 0x8000000000000000
862 */
863 __ li64(AT, c->as_jlong_bits());
864 __ sd(AT, as_Address_lo(addr));
865 } else
866 __ sd(R0, as_Address(addr));
867 #endif
868 break;
869 case T_OBJECT: // fall through
870 case T_ARRAY:
871 if (c->as_jobject() == NULL){
872 if (UseCompressedOops && !wide) {
873 __ sw(R0, as_Address(addr));
874 } else {
875 __ st_ptr(R0, as_Address(addr));
876 }
877 } else {
878 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
879 RelocationHolder rspec = oop_Relocation::spec(oop_index);
880 __ relocate(rspec);
881 #ifndef _LP64
882 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
883 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
884 __ st_ptr(AT, as_Address(addr));
885 null_check_here = code_offset();
886 #else
887 //by_css
888 __ li64(AT, (long)c->as_jobject());
889 if (UseCompressedOops && !wide) {
890 __ encode_heap_oop(AT);
891 null_check_here = code_offset();
892 __ sw(AT, as_Address(addr));
893 } else {
894 __ st_ptr(AT, as_Address(addr));
895 }
896 #endif
897 }
898 break;
899 case T_INT: // fall through
900 case T_FLOAT:
901 if(c->as_jint_bits() != 0) {
902 __ move(AT, c->as_jint_bits());
903 __ sw(AT, as_Address(addr));
904 } else
905 __ sw(R0, as_Address(addr));
906 break;
907 case T_ADDRESS:
908 __ move(AT, c->as_jint_bits());
909 __ st_ptr(AT, as_Address(addr));
910 break;
911 case T_BOOLEAN: // fall through
912 case T_BYTE:
913 if(c->as_jint() != 0) {
914 __ move(AT, c->as_jint());
915 __ sb(AT, as_Address(addr));
916 }
917 else
918 __ sb(R0, as_Address(addr));
919 break;
920 case T_CHAR: // fall through
921 case T_SHORT:
922 if(c->as_jint() != 0) {
923 __ move(AT, c->as_jint());
924 __ sh(AT, as_Address(addr));
925 }
926 else
927 __ sh(R0, as_Address(addr));
928 break;
929 default: ShouldNotReachHere();
930 };
931 if (info != NULL) add_debug_info_for_null_check(null_check_here, info);
932 }
934 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
935 assert(src->is_register(), "should not call otherwise");
936 assert(dest->is_register(), "should not call otherwise");
937 if (dest->is_float_kind() && src->is_float_kind()) {
938 // float to float moves
939 if (dest->is_single_fpu()) {
940 assert(src->is_single_fpu(), "must both be float");
941 __ mov_s(dest->as_float_reg(), src->as_float_reg());
942 } else {
943 assert(src->is_double_fpu(), "must bothe be double");
944 __ mov_d( dest->as_double_reg(),src->as_double_reg());
945 }
946 } else if (!dest->is_float_kind() && !src->is_float_kind()) {
947 // int to int moves
948 if (dest->is_single_cpu()) {
949 #ifdef _LP64
950 //FIXME aoqi: copy from x86
951 if (src->type() == T_LONG) {
952 // Can do LONG -> OBJECT
953 move_regs(src->as_register_lo(), dest->as_register());
954 return;
955 }
956 #endif
957 assert(src->is_single_cpu(), "must match");
958 if (dest->type() == T_INT) {
959 __ move_u32(dest->as_register(), src->as_register());
960 } else
961 move_regs(src->as_register(), dest->as_register());
962 } else if (dest->is_double_cpu()) {
963 #ifdef _LP64
964 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
965 // Surprising to me but we can see move of a long to t_object
966 __ verify_oop(src->as_register());
967 move_regs(src->as_register(), dest->as_register_lo());
968 return;
969 }
970 #endif
971 Register f_lo;
972 Register f_hi;
973 Register t_lo;
974 Register t_hi;
976 if (src->is_single_cpu()) {
977 f_lo = src->as_register();
978 t_lo = dest->as_register_lo();
979 } else {
980 f_lo = src->as_register_lo();
981 f_hi = src->as_register_hi();
982 t_lo = dest->as_register_lo();
983 t_hi = dest->as_register_hi();
984 assert(f_hi == f_lo, "must be same");
985 assert(t_hi == t_lo, "must be same");
986 }
987 #ifdef _LP64
988 move_regs(f_lo, t_lo);
989 #else
990 /*
991 if (src->as_register_hi() != dest->as_register_lo()) {
992 move_regs(src->as_register_lo(), dest->as_register_lo());
993 move_regs(src->as_register_hi(), dest->as_register_hi());
994 } else if (src->as_register_lo() != dest->as_register_hi()) {
995 move_regs(src->as_register_hi(), dest->as_register_hi());
996 move_regs(src->as_register_lo(), dest->as_register_lo());
997 } else {
998 swap_reg(src->as_register_lo(), src->as_register_hi());
999 }
1000 */
1001 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
1003 if (f_lo == t_hi && f_hi == t_lo) {
1004 swap_reg(f_lo, f_hi);
1005 } else if (f_hi == t_lo) {
1006 assert(f_lo != t_hi, "overwriting register");
1007 move_regs(f_hi, t_hi);
1008 move_regs(f_lo, t_lo);
1009 } else {
1010 assert(f_hi != t_lo, "overwriting register");
1011 move_regs(f_lo, t_lo);
1012 move_regs(f_hi, t_hi);
1013 }
1014 #endif // LP64
1015 }
1016 } else {
1017 // float to int or int to float moves
1018 if (dest->is_double_cpu()) {
1019 assert(src->is_double_fpu(), "must match");
1020 __ mfc1(dest->as_register_lo(), src->as_double_reg());
1021 #ifndef _LP64
1022 __ mfc1(dest->as_register_hi(), src->as_double_reg() + 1);
1023 #endif
1024 } else if (dest->is_single_cpu()) {
1025 assert(src->is_single_fpu(), "must match");
1026 __ mfc1(dest->as_register(), src->as_float_reg());
1027 } else if (dest->is_double_fpu()) {
1028 assert(src->is_double_cpu(), "must match");
1029 __ mtc1(src->as_register_lo(), dest->as_double_reg());
1030 #ifndef _LP64
1031 __ mtc1(src->as_register_hi(), dest->as_double_reg() + 1);
1032 #endif
1033 } else if (dest->is_single_fpu()) {
1034 assert(src->is_single_cpu(), "must match");
1035 __ mtc1(src->as_register(), dest->as_float_reg());
1036 }
1037 }
1038 }
1041 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1042 assert(src->is_register(), "should not call otherwise");
1043 assert(dest->is_stack(), "should not call otherwise");
1045 if (src->is_single_cpu()) {
1046 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1047 if (type == T_ARRAY || type == T_OBJECT) {
1048 __ verify_oop(src->as_register());
1049 __ st_ptr(src->as_register(), dst);
1050 } else if (type == T_METADATA || type == T_DOUBLE) {
1051 __ st_ptr(src->as_register(), dst);
1052 } else {
1053 __ sw(src->as_register(), dst);
1054 }
1055 } else if (src->is_double_cpu()) {
1056 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
1057 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
1058 __ st_ptr(src->as_register_lo(), dstLO);
1059 NOT_LP64(__ st_ptr(src->as_register_hi(), dstHI));
1060 } else if (src->is_single_fpu()) {
1061 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1062 __ swc1(src->as_float_reg(), dst_addr);
1064 } else if (src->is_double_fpu()) {
1065 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1066 #ifndef _LP64
1067 __ swc1(src->as_double_reg(), dst_addr);
1068 __ swc1(src->as_double_reg() + 1, dst_addr.base(), dst_addr.disp() + 4);
1069 #else
1070 __ sdc1(src->as_double_reg(), dst_addr);
1071 #endif
1073 } else {
1074 ShouldNotReachHere();
1075 }
1076 }
1078 //FIXME
1079 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info,bool pop_fpu_stack, bool wide, bool/*unaliged*/) {
1080 LIR_Address* to_addr = dest->as_address_ptr();
1081 //Register dest_reg = to_addr->base()->as_register();
1082 // FIXME aoqi
1083 Register dest_reg = to_addr->base()->is_single_cpu()? to_addr->base()->as_register() : to_addr->base()->as_register_lo();
1084 PatchingStub* patch = NULL;
1085 bool needs_patching = (patch_code != lir_patch_none);
1086 Register disp_reg = NOREG;
1087 int disp_value = to_addr->disp();
1088 /*
1089 the start position of patch template is labeled by "new PatchingStub(...)"
1090 during patch, T9 will be changed and not restore
1091 that's why we use S7 but not T9 as compressed_src here
1092 */
1093 Register compressed_src = S7;
1095 if (type == T_ARRAY || type == T_OBJECT) {
1096 __ verify_oop(src->as_register());
1097 #ifdef _LP64
1098 if (UseCompressedOops && !wide) {
1099 __ move(compressed_src, src->as_register());
1100 __ encode_heap_oop(compressed_src);
1101 }
1102 #endif
1103 }
1105 if (needs_patching) {
1106 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1107 assert(!src->is_double_cpu() ||
1108 patch_code == lir_patch_none ||
1109 patch_code == lir_patch_normal,
1110 "patching doesn't match register");
1111 Address toa = as_Address(to_addr);
1112 assert(toa.disp() != 0, "must have");
1113 }
1115 if (info != NULL) {
1116 add_debug_info_for_null_check_here(info);
1117 }
1118 if (needs_patching) {
1119 disp_reg = AT;
1120 __ lui(AT, Assembler::split_high(disp_value));
1121 __ addiu(AT, AT, Assembler::split_low(disp_value));
1122 } else if (!Assembler::is_simm16(disp_value)) {
1123 disp_reg = AT;
1124 __ lui(AT, Assembler::split_high(disp_value));
1125 }
1126 int offset = code_offset();
1128 switch(type) {
1129 case T_DOUBLE:
1130 assert(src->is_double_fpu(), "just check");
1131 if (disp_reg == noreg) {
1132 #ifndef _LP64
1133 __ swc1(src->as_double_reg(), dest_reg, disp_value);
1134 __ swc1(src->as_double_reg()+1, dest_reg, disp_value+4);
1135 #else
1136 __ sdc1(src->as_double_reg(), dest_reg, disp_value);
1137 #endif
1138 } else if (needs_patching) {
1139 __ add(AT, dest_reg, disp_reg);
1140 #ifndef _LP64
1141 __ swc1(src->as_double_reg(), AT, 0);
1142 __ swc1(src->as_double_reg()+1, AT, 4);
1143 #else
1144 __ sdc1(src->as_double_reg(), AT, 0);
1145 #endif
1146 } else {
1147 __ add(AT, dest_reg, disp_reg);
1148 #ifndef _LP64
1149 __ swc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1150 __ swc1(src->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1151 #else
1152 __ sdc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1153 #endif
1154 }
1155 break;
1157 case T_FLOAT:
1158 if (disp_reg == noreg) {
1159 __ swc1(src->as_float_reg(), dest_reg, disp_value);
1160 } else if(needs_patching) {
1161 __ add(AT, dest_reg, disp_reg);
1162 __ swc1(src->as_float_reg(), AT, 0);
1163 } else {
1164 __ add(AT, dest_reg, disp_reg);
1165 __ swc1(src->as_float_reg(), AT, Assembler::split_low(disp_value));
1166 }
1167 break;
1169 case T_LONG: {
1170 Register from_lo = src->as_register_lo();
1171 Register from_hi = src->as_register_hi();
1172 #ifdef _LP64
1173 if (needs_patching) {
1174 __ add(AT, dest_reg, disp_reg);
1175 __ st_ptr(from_lo, AT, 0);
1176 } else {
1177 __ st_ptr(from_lo, as_Address_lo(to_addr));
1178 }
1179 #else
1180 Register base = to_addr->base()->as_register();
1181 Register index = noreg;
1182 if (to_addr->index()->is_register()) {
1183 index = to_addr->index()->as_register();
1184 }
1185 if (base == from_lo || index == from_lo) {
1186 assert(base != from_hi, "can't be");
1187 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1188 if (needs_patching) {
1189 __ add(AT, dest_reg, disp_reg);
1190 NOT_LP64(__ st_ptr(from_hi, AT, longSize/2);)
1191 __ st_ptr(from_lo, AT, 0);
1192 } else {
1193 __ st_ptr(from_hi, as_Address_hi(to_addr));
1194 __ st_ptr(from_lo, as_Address_lo(to_addr));
1195 }
1196 } else {
1197 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1198 if (needs_patching) {
1199 __ add(AT, dest_reg, disp_reg);
1200 __ st_ptr(from_lo, AT, 0);
1201 __ st_ptr(from_hi, AT, longSize/2);
1202 } else {
1203 __ st_ptr(from_lo, as_Address_lo(to_addr));
1204 __ st_ptr(from_hi, as_Address_hi(to_addr));
1205 }
1206 }
1207 #endif
1208 break;
1209 }
1210 case T_ARRAY:
1211 case T_OBJECT:
1212 #ifdef _LP64
1213 if (UseCompressedOops && !wide) {
1214 if (disp_reg == noreg) {
1215 __ sw(compressed_src, dest_reg, disp_value);
1216 } else if (needs_patching) {
1217 __ add(AT, dest_reg, disp_reg);
1218 __ sw(compressed_src, AT, 0);
1219 } else {
1220 __ add(AT, dest_reg, disp_reg);
1221 __ sw(compressed_src, AT, Assembler::split_low(disp_value));
1222 }
1223 } else {
1224 if (disp_reg == noreg) {
1225 __ st_ptr(src->as_register(), dest_reg, disp_value);
1226 } else if (needs_patching) {
1227 __ add(AT, dest_reg, disp_reg);
1228 __ st_ptr(src->as_register(), AT, 0);
1229 } else {
1230 __ add(AT, dest_reg, disp_reg);
1231 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1232 }
1233 }
1234 break;
1235 #endif
1236 case T_ADDRESS:
1237 #ifdef _LP64
1238 if (disp_reg == noreg) {
1239 __ st_ptr(src->as_register(), dest_reg, disp_value);
1240 } else if (needs_patching) {
1241 __ add(AT, dest_reg, disp_reg);
1242 __ st_ptr(src->as_register(), AT, 0);
1243 } else {
1244 __ add(AT, dest_reg, disp_reg);
1245 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1246 }
1247 break;
1248 #endif
1249 case T_INT:
1250 if (disp_reg == noreg) {
1251 __ sw(src->as_register(), dest_reg, disp_value);
1252 } else if (needs_patching) {
1253 __ add(AT, dest_reg, disp_reg);
1254 __ sw(src->as_register(), AT, 0);
1255 } else {
1256 __ add(AT, dest_reg, disp_reg);
1257 __ sw(src->as_register(), AT, Assembler::split_low(disp_value));
1258 }
1259 break;
1261 case T_CHAR:
1262 case T_SHORT:
1263 if (disp_reg == noreg) {
1264 __ sh(src->as_register(), dest_reg, disp_value);
1265 } else if (needs_patching) {
1266 __ add(AT, dest_reg, disp_reg);
1267 __ sh(src->as_register(), AT, 0);
1268 } else {
1269 __ add(AT, dest_reg, disp_reg);
1270 __ sh(src->as_register(), AT, Assembler::split_low(disp_value));
1271 }
1272 break;
1274 case T_BYTE:
1275 case T_BOOLEAN:
1276 assert(src->is_single_cpu(), "just check");
1278 if (disp_reg == noreg) {
1279 __ sb(src->as_register(), dest_reg, disp_value);
1280 } else if (needs_patching) {
1281 __ add(AT, dest_reg, disp_reg);
1282 __ sb(src->as_register(), AT, 0);
1283 } else {
1284 __ add(AT, dest_reg, disp_reg);
1285 __ sb(src->as_register(), AT, Assembler::split_low(disp_value));
1286 }
1287 break;
1289 default:
1290 ShouldNotReachHere();
1291 }
1294 if (needs_patching) {
1295 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1296 }
1297 }
1301 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1302 assert(src->is_stack(), "should not call otherwise");
1303 assert(dest->is_register(), "should not call otherwise");
1305 if (dest->is_single_cpu()) {
1306 #ifdef _LP64
1307 if (type == T_INT)
1308 __ lw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1309 else
1310 #endif
1311 __ ld_ptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1312 if (type == T_ARRAY || type == T_OBJECT) {
1313 __ verify_oop(dest->as_register());
1314 }
1315 } else if (dest->is_double_cpu()) {
1316 #ifdef _LP64
1317 /* java.util.concurrent.locks.ReentrantReadWriteLock$Sync::tryAcquire
1319 88 move [stack:2|L] [a5a5|J]
1320 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1321 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1322 0x000000556197af8c: ld a5, 0x50(sp)
1323 */
1324 Address src_addr_LO;
1325 if (src->is_single_stack())
1326 src_addr_LO = frame_map()->address_for_slot(src->single_stack_ix(),lo_word_offset_in_bytes);
1327 else if (src->is_double_stack())
1328 src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1329 else
1330 ShouldNotReachHere();
1331 #else
1332 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1333 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1334 #endif
1335 #ifdef _LP64
1336 if (src->type() == T_INT)
1337 __ lw(dest->as_register_lo(), src_addr_LO);
1338 else
1339 #endif
1340 __ ld_ptr(dest->as_register_lo(), src_addr_LO);
1341 NOT_LP64(__ ld_ptr(dest->as_register_hi(), src_addr_HI));
1342 }else if (dest->is_single_fpu()) {
1343 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1344 __ lwc1(dest->as_float_reg(), addr);
1345 } else if (dest->is_double_fpu()) {
1346 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1347 #ifndef _LP64
1348 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1349 __ lwc1(dest->as_double_reg(), src_addr_LO);
1350 __ lwc1(dest->as_double_reg()+1, src_addr_HI);
1351 #else
1352 __ ldc1(dest->as_double_reg(), src_addr_LO);
1353 #endif
1354 } else {
1355 ShouldNotReachHere();
1356 /*
1357 assert(dest->is_single_cpu(), "cannot be anything else but a single cpu");
1358 assert(type!= T_ILLEGAL, "Bad type in stack2reg")
1359 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1360 __ lw(dest->as_register(), addr);
1361 */
1362 }
1363 }
1365 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1366 if (src->is_single_stack()) {
1367 /*
1368 * 2012/5/23 Jin: YozoOffice(-Xcomp) corrupts in "New File -> word"
1369 *
1370 * [b.q.e.a.z::bw()]
1371 * move [stack:15|L] [stack:17|L]
1372 * 0x00000055584e7cf4: lw at, 0x78(sp) <--- error!
1373 * 0x00000055584e7cf8: sw at, 0x88(sp)
1374 */
1375 if (type == T_OBJECT )
1376 {
1377 __ ld(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1378 __ sd(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1379 }
1380 else
1381 {
1382 __ lw(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1383 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1384 }
1385 } else if (src->is_double_stack()) {
1386 #ifndef _LP64
1387 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1388 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1389 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix(),4));
1390 __ sw(AT, frame_map()->address_for_slot(dest ->double_stack_ix(),4));
1391 #else
1392 __ ld_ptr(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1393 __ st_ptr(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1394 #endif
1395 } else {
1396 ShouldNotReachHere();
1397 }
1398 }
1400 // if patching needed, be sure the instruction at offset is a MoveMemReg
1401 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool) {
1402 assert(src->is_address(), "should not call otherwise");
1403 assert(dest->is_register(), "should not call otherwise");
1404 LIR_Address* addr = src->as_address_ptr();
1405 //Address from_addr = as_Address(addr);
1407 //Register src_reg = addr->base()->as_register();
1408 // FIXME aoqi
1409 Register src_reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
1410 Register disp_reg = noreg;
1411 int disp_value = addr->disp();
1412 bool needs_patching = (patch_code != lir_patch_none);
1414 PatchingStub* patch = NULL;
1415 if (needs_patching) {
1416 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1417 }
1419 // we must use lui&addiu,
1420 if (needs_patching) {
1421 disp_reg = AT;
1422 __ lui(AT, Assembler::split_high(disp_value));
1423 __ addiu(AT, AT, Assembler::split_low(disp_value));
1424 } else if (!Assembler::is_simm16(disp_value)) {
1425 disp_reg = AT;
1426 __ lui(AT, Assembler::split_high(disp_value));
1427 }
1429 // remember the offset of the load. The patching_epilog must be done
1430 // before the call to add_debug_info, otherwise the PcDescs don't get
1431 // entered in increasing order.
1432 int offset = code_offset();
1434 switch(type) {
1435 case T_BOOLEAN:
1436 case T_BYTE: {
1437 //assert(to_reg.is_word(), "just check");
1438 if (disp_reg == noreg) {
1439 __ lb(dest->as_register(), src_reg, disp_value);
1440 } else if (needs_patching) {
1441 __ add(AT, src_reg, disp_reg);
1442 offset = code_offset();
1443 __ lb(dest->as_register(), AT, 0);
1444 } else {
1445 __ add(AT, src_reg, disp_reg);
1446 offset = code_offset();
1447 __ lb(dest->as_register(), AT, Assembler::split_low(disp_value));
1448 }
1449 }
1450 break;
1452 case T_CHAR: {
1453 //assert(to_reg.is_word(), "just check");
1454 if (disp_reg == noreg) {
1455 __ lhu(dest->as_register(), src_reg, disp_value);
1456 } else if (needs_patching) {
1457 __ add(AT, src_reg, disp_reg);
1458 offset = code_offset();
1459 __ lhu(dest->as_register(), AT, 0);
1460 } else {
1461 __ add(AT, src_reg, disp_reg);
1462 offset = code_offset();
1463 __ lhu(dest->as_register(), AT, Assembler::split_low(disp_value));
1464 }
1465 }
1466 break;
1468 case T_SHORT: {
1469 // assert(to_reg.is_word(), "just check");
1470 if (disp_reg == noreg) {
1471 __ lh(dest->as_register(), src_reg, disp_value);
1472 } else if (needs_patching) {
1473 __ add(AT, src_reg, disp_reg);
1474 offset = code_offset();
1475 __ lh(dest->as_register(), AT, 0);
1476 } else {
1477 __ add(AT, src_reg, disp_reg);
1478 offset = code_offset();
1479 __ lh(dest->as_register(), AT, Assembler::split_low(disp_value));
1480 }
1481 }
1482 break;
1484 case T_OBJECT:
1485 case T_ARRAY:
1486 if (UseCompressedOops && !wide) {
1487 if (disp_reg == noreg) {
1488 __ lwu(dest->as_register(), src_reg, disp_value);
1489 } else if (needs_patching) {
1490 __ dadd(AT, src_reg, disp_reg);
1491 offset = code_offset();
1492 __ lwu(dest->as_register(), AT, 0);
1493 } else {
1494 __ dadd(AT, src_reg, disp_reg);
1495 offset = code_offset();
1496 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1497 }
1498 } else {
1499 if (disp_reg == noreg) {
1500 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1501 } else if (needs_patching) {
1502 __ dadd(AT, src_reg, disp_reg);
1503 offset = code_offset();
1504 __ ld_ptr(dest->as_register(), AT, 0);
1505 } else {
1506 __ dadd(AT, src_reg, disp_reg);
1507 offset = code_offset();
1508 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1509 }
1510 }
1511 break;
1512 case T_ADDRESS:
1513 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1514 if (disp_reg == noreg) {
1515 __ lwu(dest->as_register(), src_reg, disp_value);
1516 } else if (needs_patching) {
1517 __ dadd(AT, src_reg, disp_reg);
1518 offset = code_offset();
1519 __ lwu(dest->as_register(), AT, 0);
1520 } else {
1521 __ dadd(AT, src_reg, disp_reg);
1522 offset = code_offset();
1523 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1524 }
1525 } else {
1526 if (disp_reg == noreg) {
1527 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1528 } else if (needs_patching) {
1529 __ dadd(AT, src_reg, disp_reg);
1530 offset = code_offset();
1531 __ ld_ptr(dest->as_register(), AT, 0);
1532 } else {
1533 __ dadd(AT, src_reg, disp_reg);
1534 offset = code_offset();
1535 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1536 }
1537 }
1538 break;
1539 case T_INT: {
1540 //assert(to_reg.is_word(), "just check");
1541 if (disp_reg == noreg) {
1542 __ lw(dest->as_register(), src_reg, disp_value);
1543 } else if (needs_patching) {
1544 __ add(AT, src_reg, disp_reg);
1545 offset = code_offset();
1546 __ lw(dest->as_register(), AT, 0);
1547 } else {
1548 __ add(AT, src_reg, disp_reg);
1549 offset = code_offset();
1550 __ lw(dest->as_register(), AT, Assembler::split_low(disp_value));
1551 }
1552 }
1553 break;
1555 case T_LONG: {
1556 Register to_lo = dest->as_register_lo();
1557 Register to_hi = dest->as_register_hi();
1558 #ifdef _LP64
1559 if (needs_patching) {
1560 __ add(AT, src_reg, disp_reg);
1561 __ ld_ptr(to_lo, AT, 0);
1562 } else {
1563 __ ld_ptr(to_lo, as_Address_lo(addr));
1564 }
1565 #else
1566 Register base = addr->base()->as_register();
1567 Register index = noreg;
1568 if (addr->index()->is_register()) {
1569 index = addr->index()->as_register();
1570 }
1571 if ((base == to_lo && index == to_hi) ||(base == to_hi && index == to_lo)) {
1572 // addresses with 2 registers are only formed as a result of
1573 // array access so this code will never have to deal with
1574 // patches or null checks.
1575 assert(info == NULL && patch == NULL, "must be");
1576 __ lea(to_hi, as_Address(addr));
1577 __ lw(to_lo, Address(to_hi));
1578 __ lw(to_hi, Address(to_hi, BytesPerWord));
1579 } else if (base == to_lo || index == to_lo) {
1580 assert(base != to_hi, "can't be");
1581 assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1582 if (needs_patching) {
1583 __ add(AT, src_reg, disp_reg);
1584 offset = code_offset();
1585 __ lw(to_hi, AT, longSize/2);
1586 __ lw(to_lo, AT, 0);
1587 } else {
1588 __ lw(to_hi, as_Address_hi(addr));
1589 __ lw(to_lo, as_Address_lo(addr));
1590 }
1591 } else {
1592 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1593 if (needs_patching) {
1594 __ add(AT, src_reg, disp_reg);
1595 offset = code_offset();
1596 __ lw(to_lo, AT, 0);
1597 __ lw(to_hi, AT, longSize/2);
1598 } else {
1599 __ lw(to_lo, as_Address_lo(addr));
1600 __ lw(to_hi, as_Address_hi(addr));
1601 }
1602 }
1603 #endif
1604 }
1605 break;
1607 case T_FLOAT: {
1608 //assert(to_reg.is_float(), "just check");
1609 if (disp_reg == noreg) {
1610 __ lwc1(dest->as_float_reg(), src_reg, disp_value);
1611 } else if (needs_patching) {
1612 __ add(AT, src_reg, disp_reg);
1613 offset = code_offset();
1614 __ lwc1(dest->as_float_reg(), AT, 0);
1615 } else {
1616 __ add(AT, src_reg, disp_reg);
1617 offset = code_offset();
1618 __ lwc1(dest->as_float_reg(), AT, Assembler::split_low(disp_value));
1619 }
1620 }
1621 break;
1623 case T_DOUBLE: {
1624 //assert(to_reg.is_double(), "just check");
1626 if (disp_reg == noreg) {
1627 #ifndef _LP64
1628 __ lwc1(dest->as_double_reg(), src_reg, disp_value);
1629 __ lwc1(dest->as_double_reg()+1, src_reg, disp_value+4);
1630 #else
1631 __ ldc1(dest->as_double_reg(), src_reg, disp_value);
1632 #endif
1633 } else if (needs_patching) {
1634 __ add(AT, src_reg, disp_reg);
1635 offset = code_offset();
1636 #ifndef _LP64
1637 __ lwc1(dest->as_double_reg(), AT, 0);
1638 __ lwc1(dest->as_double_reg()+1, AT, 4);
1639 #else
1640 __ ldc1(dest->as_double_reg(), AT, 0);
1641 #endif
1642 } else {
1643 __ add(AT, src_reg, disp_reg);
1644 offset = code_offset();
1645 #ifndef _LP64
1646 __ lwc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1647 __ lwc1(dest->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1648 #else
1649 __ ldc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1650 #endif
1651 }
1652 }
1653 break;
1655 default:
1656 ShouldNotReachHere();
1657 }
1659 if (needs_patching) {
1660 patching_epilog(patch, patch_code, src_reg, info);
1661 }
1663 if (type == T_ARRAY || type == T_OBJECT) {
1664 #ifdef _LP64
1665 if (UseCompressedOops && !wide) {
1666 __ decode_heap_oop(dest->as_register());
1667 }
1668 #endif
1669 __ verify_oop(dest->as_register());
1670 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1671 if (UseCompressedClassPointers) {
1672 __ decode_klass_not_null(dest->as_register());
1673 }
1674 }
1675 if (info != NULL) add_debug_info_for_null_check(offset, info);
1676 }
1679 void LIR_Assembler::prefetchr(LIR_Opr src) {
1680 LIR_Address* addr = src->as_address_ptr();
1681 Address from_addr = as_Address(addr);
1682 }
1685 void LIR_Assembler::prefetchw(LIR_Opr src) {
1686 }
1688 NEEDS_CLEANUP; // This could be static?
1689 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1690 int elem_size = type2aelembytes(type);
1691 switch (elem_size) {
1692 case 1: return Address::times_1;
1693 case 2: return Address::times_2;
1694 case 4: return Address::times_4;
1695 case 8: return Address::times_8;
1696 }
1697 ShouldNotReachHere();
1698 return Address::no_scale;
1699 }
1702 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1703 switch (op->code()) {
1704 case lir_frem:
1705 arithmetic_frem(
1706 op->code(),
1707 op->in_opr1(),
1708 op->in_opr2(),
1709 op->in_opr3(),
1710 op->result_opr(),
1711 op->info());
1712 break;
1714 case lir_idiv:
1715 case lir_irem:
1716 arithmetic_idiv(
1717 op->code(),
1718 op->in_opr1(),
1719 op->in_opr2(),
1720 op->in_opr3(),
1721 op->result_opr(),
1722 op->info());
1723 break;
1724 default: ShouldNotReachHere(); break;
1725 }
1726 }
1728 void LIR_Assembler::emit_op4(LIR_Op4* op) {
1729 switch (op->code()) {
1730 case lir_cmove_mips:
1731 emit_cmove_mips(op);
1732 break;
1733 default: ShouldNotReachHere(); break;
1734 }
1735 }
1737 void LIR_Assembler::emit_cmove_mips(LIR_Op4* op) {
1738 LIR_Opr cmp1 = op->in_opr1();
1739 LIR_Opr cmp2 = op->in_opr2();
1740 LIR_Opr src1 = op->in_opr3();
1741 LIR_Opr src2 = op->in_opr4();
1742 LIR_Condition condition = op->cond();
1743 LIR_Opr dst = op->result_opr();
1745 if (src1->is_constant() && src2->is_constant() && (dst->is_single_cpu() || dst->is_double_cpu())) {
1746 jlong value = (jlong)(src2->pointer()->as_constant()->as_pointer());
1747 Register dst_reg = dst->as_register_lo();
1748 __ set64(dst_reg, value);
1749 } else {
1750 Unimplemented();
1751 }
1752 /*
1753 if (opr1->is_single_cpu()) {
1754 Register reg_op1 = opr1->as_register();
1755 if (opr2->is_single_cpu()) {
1756 Register reg_op2 = opr2->as_register();
1757 switch (condition) {
1758 case lir_cond_equal:
1759 __ beq_far(reg_op1, reg_op2, *op->label());
1760 break;
1761 case lir_cond_notEqual:
1762 if(op->label()==NULL)
1763 __ bne_far(reg_op1, reg_op2, *op->label());
1764 else
1765 __ bne_far(reg_op1, reg_op2, *op->label());
1766 break;
1767 case lir_cond_less:
1768 __ slt(AT, reg_op1, reg_op2);
1769 __ bne_far(AT, R0, *op->label());
1770 break;
1771 case lir_cond_lessEqual:
1772 __ slt(AT, reg_op2, reg_op1);
1773 __ beq_far(AT, R0, *op->label());
1774 break;
1775 case lir_cond_belowEqual:
1776 __ sltu(AT, reg_op2, reg_op1);
1777 __ beq_far(AT, R0, *op->label());
1778 break;
1779 case lir_cond_greaterEqual:
1780 __ slt(AT, reg_op1, reg_op2);
1781 __ beq_far(AT, R0, *op->label());
1782 break;
1783 case lir_cond_aboveEqual:
1784 __ sltu(AT, reg_op1, reg_op2);
1785 __ beq_far(AT, R0, *op->label());
1786 break;
1787 case lir_cond_greater:
1788 __ slt(AT, reg_op2, reg_op1);
1789 __ bne_far(AT, R0, *op->label());
1790 break;
1791 default: ShouldNotReachHere();
1792 }
1793 } else if (opr2->is_constant()) {
1794 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
1795 bool is_object = false;
1796 if (opr2->pointer()->as_constant()->type() == T_INT) {
1797 temp_value = (jint)(opr2->as_jint());
1798 } else if (opr2->pointer()->as_constant()->type() == T_LONG) {
1799 temp_value = (jlong)(opr2->as_jlong());
1800 } else if (opr2->pointer()->as_constant()->type() == T_OBJECT) {
1801 is_object = true;
1802 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_jobject());
1803 } else {
1804 ShouldNotReachHere();
1805 }
1807 switch (condition) {
1808 case lir_cond_equal:
1809 if (temp_value) {
1810 if (is_object) {
1811 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1812 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1813 __ relocate(rspec);
1814 }
1815 __ li(AT, temp_value);
1816 __ beq_far(reg_op1, AT, *op->label());
1817 } else {
1818 __ beq_far(reg_op1, R0, *op->label());
1819 }
1820 break;
1822 case lir_cond_notEqual:
1823 if (temp_value) {
1824 if (is_object) {
1825 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1826 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1827 __ relocate(rspec);
1828 }
1829 __ li(AT, temp_value);
1830 __ bne_far(reg_op1, AT, *op->label());
1831 } else {
1832 __ bne_far(reg_op1, R0, *op->label());
1833 }
1834 break;
1836 case lir_cond_less:
1837 if (Assembler::is_simm16(temp_value)) {
1838 __ slti(AT, reg_op1, temp_value);
1839 } else {
1840 __ move(AT, temp_value);
1841 __ slt(AT, reg_op1, AT);
1842 }
1843 __ bne_far(AT, R0, *op->label());
1844 break;
1846 case lir_cond_lessEqual:
1847 __ li(AT, temp_value);
1848 __ slt(AT, AT, reg_op1);
1849 __ beq_far(AT, R0, *op->label());
1850 break;
1852 case lir_cond_belowEqual:
1853 #ifdef OPT_RANGECHECK
1854 if (op->check()) {
1855 __ li(AT, temp_value);
1856 add_debug_info_for_range_check_here(op->info(), temp_value);
1857 __ tgeu(AT, reg_op1, 29);
1858 } else {
1859 #endif
1860 __ li(AT, temp_value);
1861 __ sltu(AT, AT, reg_op1);
1862 __ beq_far(AT, R0, *op->label());
1863 #ifdef OPT_RANGECHECK
1864 }
1865 #endif
1866 break;
1868 case lir_cond_greaterEqual:
1869 if (Assembler::is_simm16(temp_value)) {
1870 __ slti(AT, reg_op1, temp_value);
1871 } else {
1872 __ li(AT, temp_value);
1873 __ slt(AT, reg_op1, AT);
1874 }
1875 __ beq_far(AT, R0, *op->label());
1876 break;
1878 case lir_cond_aboveEqual:
1879 if (Assembler::is_simm16(temp_value)) {
1880 __ sltiu(AT, reg_op1, temp_value);
1881 } else {
1882 __ li(AT, temp_value);
1883 __ sltu(AT, reg_op1, AT);
1884 }
1885 __ beq_far(AT, R0, *op->label());
1886 break;
1888 case lir_cond_greater:
1889 __ li(AT, temp_value);
1890 __ slt(AT, AT, reg_op1);
1891 __ bne_far(AT, R0, *op->label());
1892 break;
1894 default: ShouldNotReachHere();
1895 }
1897 } else {
1898 if (opr2->is_address()) {
1899 if (op->type() == T_INT)
1900 __ lw(AT, as_Address(opr2->pointer()->as_address()));
1901 else
1902 __ ld_ptr(AT, as_Address(opr2->pointer()->as_address()));
1903 } else if (opr2->is_stack()) {
1904 __ ld_ptr(AT, frame_map()->address_for_slot(opr2->single_stack_ix()));
1905 } else {
1906 ShouldNotReachHere();
1907 }
1908 switch (condition) {
1909 case lir_cond_equal:
1910 __ beq_far(reg_op1, AT, *op->label());
1911 break;
1912 case lir_cond_notEqual:
1913 __ bne_far(reg_op1, AT, *op->label());
1914 break;
1915 case lir_cond_less:
1916 __ slt(AT, reg_op1, AT);
1917 __ bne_far(AT, R0, *op->label());
1918 break;
1919 case lir_cond_lessEqual:
1920 __ slt(AT, AT, reg_op1);
1921 __ beq_far(AT, R0, *op->label());
1922 break;
1923 case lir_cond_belowEqual:
1924 __ sltu(AT, AT, reg_op1);
1925 __ beq_far(AT, R0, *op->label());
1926 break;
1927 case lir_cond_greaterEqual:
1928 __ slt(AT, reg_op1, AT);
1929 __ beq_far(AT, R0, *op->label());
1930 break;
1931 case lir_cond_aboveEqual:
1932 #ifdef OPT_RANGECHECK
1933 if (op->check()) {
1934 add_debug_info_for_range_check_here(op->info(), opr1->rinfo());
1935 __ tgeu(reg_op1, AT, 29);
1936 } else {
1937 #endif
1938 __ sltu(AT, reg_op1, AT);
1939 __ beq_far(AT, R0, *op->label());
1940 #ifdef OPT_RANGECHECK
1941 }
1942 #endif
1943 break;
1944 case lir_cond_greater:
1945 __ slt(AT, AT, reg_op1);
1946 __ bne_far(AT, R0, *op->label());
1947 break;
1948 default: ShouldNotReachHere();
1949 }
1950 }
1951 __ delayed()->nop();
1953 } else if(opr1->is_address() || opr1->is_stack()) {
1954 if (opr2->is_constant()) {
1955 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
1956 if (opr2->as_constant_ptr()->type() == T_INT) {
1957 temp_value = (jint)opr2->as_constant_ptr()->as_jint();
1958 } else if (opr2->as_constant_ptr()->type() == T_OBJECT) {
1959 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_constant_ptr()->as_jobject());
1960 } else {
1961 ShouldNotReachHere();
1962 }
1964 if (Assembler::is_simm16(temp_value)) {
1965 if (opr1->is_address()) {
1966 __ lw(AT, as_Address(opr1->pointer()->as_address()));
1967 } else {
1968 __ lw(AT, frame_map()->address_for_slot(opr1->single_stack_ix()));
1969 }
1971 switch(condition) {
1973 case lir_cond_equal:
1974 __ addi(AT, AT, -(int)temp_value);
1975 __ beq_far(AT, R0, *op->label());
1976 break;
1977 case lir_cond_notEqual:
1978 __ addi(AT, AT, -(int)temp_value);
1979 __ bne_far(AT, R0, *op->label());
1980 break;
1981 case lir_cond_less:
1982 __ slti(AT, AT, temp_value);
1983 __ bne_far(AT, R0, *op->label());
1984 break;
1985 case lir_cond_lessEqual:
1986 __ addi(AT, AT, -temp_value);
1987 __ slt(AT, R0, AT);
1988 __ beq_far(AT, R0, *op->label());
1989 break;
1990 case lir_cond_belowEqual:
1991 __ addiu(AT, AT, -temp_value);
1992 __ sltu(AT, R0, AT);
1993 __ beq_far(AT, R0, *op->label());
1994 break;
1995 case lir_cond_greaterEqual:
1996 __ slti(AT, AT, temp_value);
1997 __ beq_far(AT, R0, *op->label());
1998 break;
1999 case lir_cond_aboveEqual:
2000 __ sltiu(AT, AT, temp_value);
2001 __ beq_far(AT, R0, *op->label());
2002 break;
2003 case lir_cond_greater:
2004 __ addi(AT, AT, -temp_value);
2005 __ slt(AT, R0, AT);
2006 __ bne_far(AT, R0, *op->label());
2007 break;
2009 default:
2010 Unimplemented();
2011 }
2012 } else {
2013 Unimplemented();
2014 }
2015 } else {
2016 Unimplemented();
2017 }
2018 __ delayed()->nop();
2020 } else if(opr1->is_double_cpu()) {
2021 Register opr1_lo = opr1->as_register_lo();
2022 Register opr1_hi = opr1->as_register_hi();
2024 if (opr2->is_double_cpu()) {
2025 Register opr2_lo = opr2->as_register_lo();
2026 Register opr2_hi = opr2->as_register_hi();
2027 switch (condition) {
2028 case lir_cond_equal: {
2029 Label L;
2030 #ifndef _LP64
2031 Unimplemented();
2032 #else
2033 __ beq_far(opr1_lo, opr2_lo, *op->label());
2034 #endif
2035 __ delayed()->nop();
2036 __ bind(L);
2037 }
2038 break;
2040 case lir_cond_notEqual:
2041 if (op->label()==NULL)
2042 __ bne_far(opr1_lo, opr2_lo, *op->label());
2043 else
2044 __ bne_far(opr1_lo, opr2_lo, *op->label());
2045 __ delayed()->nop();
2046 if (op->label()==NULL)
2047 NOT_LP64(__ bne(opr1_hi, opr2_hi, *op->label()));
2048 else
2049 NOT_LP64(__ bne_far(opr1_hi, opr2_hi, *op->label()));
2050 NOT_LP64(__ delayed()->nop());
2051 break;
2053 case lir_cond_less: {
2054 #ifdef _LP64
2055 __ slt(AT, opr1_lo, opr2_lo);
2056 __ bne_far(AT, R0, *op->label());
2057 __ delayed()->nop();
2058 #else
2059 Unimplemented();
2060 #endif
2061 }
2062 break;
2064 case lir_cond_lessEqual: {
2065 #ifdef _LP64
2066 __ slt(AT, opr2_lo, opr1_lo);
2067 __ beq_far(AT, R0, *op->label());
2068 __ delayed()->nop();
2069 #else
2070 Unimplemented();
2071 #endif
2072 }
2073 break;
2075 case lir_cond_belowEqual: {
2076 #ifdef _LP64
2077 __ sltu(AT, opr2_lo, opr1_lo);
2078 __ beq_far(AT, R0, *op->label());
2079 __ delayed()->nop();
2080 #else
2081 Unimplemented();
2082 #endif
2083 }
2084 break;
2086 case lir_cond_greaterEqual: {
2087 #ifdef _LP64
2088 __ slt(AT, opr1_lo, opr2_lo);
2089 __ beq_far(AT, R0, *op->label());
2090 __ delayed()->nop();
2091 #else
2092 Unimplemented();
2093 #endif
2094 }
2095 break;
2097 case lir_cond_aboveEqual: {
2098 #ifdef _LP64
2099 __ sltu(AT, opr1_lo, opr2_lo);
2100 __ beq_far(AT, R0, *op->label());
2101 __ delayed()->nop();
2102 #else
2103 Unimplemented();
2104 #endif
2105 }
2106 break;
2108 case lir_cond_greater: {
2109 #ifdef _LP64
2110 __ slt(AT, opr2_lo, opr1_lo);
2111 __ bne_far(AT, R0, *op->label());
2112 __ delayed()->nop();
2113 #else
2114 Unimplemented();
2115 #endif
2116 }
2117 break;
2119 default: ShouldNotReachHere();
2120 }
2122 } else if(opr2->is_constant()) {
2123 jlong lv = opr2->as_jlong();
2124 switch (condition) {
2125 case lir_cond_equal:
2126 #ifdef _LP64
2127 __ li(T8, lv);
2128 __ beq_far(opr1_lo, T8, *op->label());
2129 __ delayed()->nop();
2130 #else
2131 Unimplemented();
2132 #endif
2133 break;
2135 case lir_cond_notEqual:
2136 #ifdef _LP64
2137 __ li(T8, lv);
2138 __ bne_far(opr1_lo, T8, *op->label());
2139 __ delayed()->nop();
2140 #else
2141 Unimplemented();
2142 #endif
2143 break;
2145 case lir_cond_less:
2146 #ifdef _LP64
2147 __ li(T8, lv);
2148 __ slt(AT, opr1_lo, T8);
2149 __ bne_far(AT, R0, *op->label());
2150 __ delayed()->nop();
2151 #else
2152 Unimplemented();
2153 #endif
2154 break;
2156 case lir_cond_lessEqual:
2157 #ifdef _LP64
2158 __ li(T8, lv);
2159 __ slt(AT, T8, opr1_lo);
2160 __ beq_far(AT, R0, *op->label());
2161 __ delayed()->nop();
2162 #else
2163 Unimplemented();
2164 #endif
2165 break;
2167 case lir_cond_belowEqual:
2168 #ifdef _LP64
2169 __ li(T8, lv);
2170 __ sltu(AT, T8, opr1_lo);
2171 __ beq_far(AT, R0, *op->label());
2172 __ delayed()->nop();
2173 #else
2174 Unimplemented();
2175 #endif
2176 break;
2178 case lir_cond_greaterEqual:
2179 #ifdef _LP64
2180 __ li(T8, lv);
2181 __ slt(AT, opr1_lo, T8);
2182 __ beq_far(AT, R0, *op->label());
2183 __ delayed()->nop();
2184 #else
2185 Unimplemented();
2186 #endif
2187 break;
2189 case lir_cond_aboveEqual:
2190 #ifdef _LP64
2191 __ li(T8, lv);
2192 __ sltu(AT, opr1_lo, T8);
2193 __ beq_far(AT, R0, *op->label());
2194 __ delayed()->nop();
2195 #else
2196 Unimplemented();
2197 #endif
2198 break;
2200 case lir_cond_greater:
2201 #ifdef _LP64
2202 __ li(T8, lv);
2203 __ slt(AT, T8, opr1_lo);
2204 __ bne_far(AT, R0, *op->label());
2205 __ delayed()->nop();
2206 #else
2207 Unimplemented();
2208 #endif
2209 break;
2211 default:
2212 ShouldNotReachHere();
2213 }
2214 } else {
2215 Unimplemented();
2216 }
2217 } else if (opr1->is_single_fpu()) {
2218 assert(opr2->is_single_fpu(), "change the code");
2220 FloatRegister reg_op1 = opr1->as_float_reg();
2221 FloatRegister reg_op2 = opr2->as_float_reg();
2222 bool un_jump = (op->ublock()->label()==op->label());
2224 Label& L = *op->label();
2226 switch (condition) {
2227 case lir_cond_equal:
2228 if (un_jump)
2229 __ c_ueq_s(reg_op1, reg_op2);
2230 else
2231 __ c_eq_s(reg_op1, reg_op2);
2232 __ bc1t(L);
2234 break;
2236 case lir_cond_notEqual:
2237 if (un_jump)
2238 __ c_eq_s(reg_op1, reg_op2);
2239 else
2240 __ c_ueq_s(reg_op1, reg_op2);
2241 __ bc1f(L);
2243 break;
2245 case lir_cond_less:
2246 if (un_jump)
2247 __ c_ult_s(reg_op1, reg_op2);
2248 else
2249 __ c_olt_s(reg_op1, reg_op2);
2250 __ bc1t(L);
2252 break;
2254 case lir_cond_lessEqual:
2255 case lir_cond_belowEqual:
2256 if (un_jump)
2257 __ c_ule_s(reg_op1, reg_op2);
2258 else
2259 __ c_ole_s(reg_op1, reg_op2);
2260 __ bc1t(L);
2262 break;
2264 case lir_cond_greaterEqual:
2265 case lir_cond_aboveEqual:
2266 if (un_jump)
2267 __ c_olt_s(reg_op1, reg_op2);
2268 else
2269 __ c_ult_s(reg_op1, reg_op2);
2270 __ bc1f(L);
2272 break;
2274 case lir_cond_greater:
2275 if (un_jump)
2276 __ c_ole_s(reg_op1, reg_op2);
2277 else
2278 __ c_ule_s(reg_op1, reg_op2);
2279 __ bc1f(L);
2281 break;
2283 default:
2284 ShouldNotReachHere();
2285 }
2286 __ delayed()->nop();
2287 } else if (opr1->is_double_fpu()) {
2288 assert(opr2->is_double_fpu(), "change the code");
2290 FloatRegister reg_op1 = opr1->as_double_reg();
2291 FloatRegister reg_op2 = opr2->as_double_reg();
2292 bool un_jump = (op->ublock()->label()==op->label());
2293 Label& L = *op->label();
2295 switch (condition) {
2296 case lir_cond_equal:
2297 if (un_jump)
2298 __ c_ueq_d(reg_op1, reg_op2);
2299 else
2300 __ c_eq_d(reg_op1, reg_op2);
2301 __ bc1t(L);
2303 break;
2305 case lir_cond_notEqual:
2306 if (un_jump)
2307 __ c_eq_d(reg_op1, reg_op2);
2308 else
2309 __ c_ueq_d(reg_op1, reg_op2);
2310 __ bc1f(L);
2312 break;
2314 case lir_cond_less:
2315 if (un_jump)
2316 __ c_ult_d(reg_op1, reg_op2);
2317 else
2318 __ c_olt_d(reg_op1, reg_op2);
2319 __ bc1t(L);
2321 break;
2323 case lir_cond_lessEqual:
2324 case lir_cond_belowEqual:
2325 if (un_jump)
2326 __ c_ule_d(reg_op1, reg_op2);
2327 else
2328 __ c_ole_d(reg_op1, reg_op2);
2329 __ bc1t(L);
2331 break;
2333 case lir_cond_greaterEqual:
2334 case lir_cond_aboveEqual:
2335 if (un_jump)
2336 __ c_olt_d(reg_op1, reg_op2);
2337 else
2338 __ c_ult_d(reg_op1, reg_op2);
2339 __ bc1f(L);
2341 break;
2343 case lir_cond_greater:
2344 if (un_jump)
2345 __ c_ole_d(reg_op1, reg_op2);
2346 else
2347 __ c_ule_d(reg_op1, reg_op2);
2348 __ bc1f(L);
2350 break;
2352 default:
2353 ShouldNotReachHere();
2354 }
2355 __ delayed()->nop();
2356 } else {
2357 Unimplemented();
2358 }
2359 */
2360 }
2362 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
2363 LIR_Opr opr1 = op->left();
2364 LIR_Opr opr2 = op->right();
2365 LIR_Condition condition = op->cond();
2366 #ifdef ASSERT
2367 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
2368 if (op->block() != NULL) _branch_target_blocks.append(op->block());
2369 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
2370 #endif
2371 if (op->cond() == lir_cond_always) {
2372 if(op->label()==NULL)
2373 __ b_far(*op->label());
2374 else
2375 __ b_far(*op->label());
2376 __ delayed()->nop();
2377 return;
2378 }
2379 if (opr1->is_single_cpu()) {
2380 Register reg_op1 = opr1->as_register();
2381 if (opr2->is_single_cpu()) {
2382 #ifdef OPT_RANGECHECK
2383 assert(!op->check(), "just check");
2384 #endif
2385 Register reg_op2 = opr2->as_register();
2386 switch (condition) {
2387 case lir_cond_equal:
2388 __ beq_far(reg_op1, reg_op2, *op->label());
2389 break;
2390 case lir_cond_notEqual:
2391 if(op->label()==NULL)
2392 __ bne_far(reg_op1, reg_op2, *op->label());
2393 else
2394 __ bne_far(reg_op1, reg_op2, *op->label());
2395 break;
2396 case lir_cond_less:
2397 // AT = 1 TRUE
2398 __ slt(AT, reg_op1, reg_op2);
2399 __ bne_far(AT, R0, *op->label());
2400 break;
2401 case lir_cond_lessEqual:
2402 // AT = 0 TRUE
2403 __ slt(AT, reg_op2, reg_op1);
2404 __ beq_far(AT, R0, *op->label());
2405 break;
2406 case lir_cond_belowEqual:
2407 // AT = 0 TRUE
2408 __ sltu(AT, reg_op2, reg_op1);
2409 __ beq_far(AT, R0, *op->label());
2410 break;
2411 case lir_cond_greaterEqual:
2412 // AT = 0 TRUE
2413 __ slt(AT, reg_op1, reg_op2);
2414 __ beq_far(AT, R0, *op->label());
2415 break;
2416 case lir_cond_aboveEqual:
2417 // AT = 0 TRUE
2418 __ sltu(AT, reg_op1, reg_op2);
2419 __ beq_far(AT, R0, *op->label());
2420 break;
2421 case lir_cond_greater:
2422 // AT = 1 TRUE
2423 __ slt(AT, reg_op2, reg_op1);
2424 __ bne_far(AT, R0, *op->label());
2425 break;
2426 default: ShouldNotReachHere();
2427 }
2428 } else if (opr2->is_constant()) {
2429 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
2430 bool is_object = false;
2431 if (opr2->pointer()->as_constant()->type() == T_INT) {
2432 temp_value = (jint)(opr2->as_jint());
2433 } else if (opr2->pointer()->as_constant()->type() == T_LONG) {
2434 temp_value = (jlong)(opr2->as_jlong());
2435 } else if (opr2->pointer()->as_constant()->type() == T_OBJECT) {
2436 is_object = true;
2437 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_jobject());
2438 } else {
2439 ShouldNotReachHere();
2440 }
2442 switch (condition) {
2443 case lir_cond_equal:
2444 #ifdef OPT_RANGECHECK
2445 assert(!op->check(), "just check");
2446 #endif
2447 if (temp_value) {
2448 if (is_object) {
2449 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
2450 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2451 __ relocate(rspec);
2452 }
2453 __ li(AT, temp_value);
2454 __ beq_far(reg_op1, AT, *op->label());
2455 } else {
2456 __ beq_far(reg_op1, R0, *op->label());
2457 }
2458 break;
2460 case lir_cond_notEqual:
2461 #ifdef OPT_RANGECHECK
2462 assert(!op->check(), "just check");
2463 #endif
2464 if (temp_value) {
2465 if (is_object) {
2466 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
2467 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2468 __ relocate(rspec);
2469 }
2470 __ li(AT, temp_value);
2471 __ bne_far(reg_op1, AT, *op->label());
2472 } else {
2473 __ bne_far(reg_op1, R0, *op->label());
2474 }
2475 break;
2477 case lir_cond_less:
2478 #ifdef OPT_RANGECHECK
2479 assert(!op->check(), "just check");
2480 #endif
2481 // AT = 1 TRUE
2482 if (Assembler::is_simm16(temp_value)) {
2483 __ slti(AT, reg_op1, temp_value);
2484 } else {
2485 __ move(AT, temp_value);
2486 __ slt(AT, reg_op1, AT);
2487 }
2488 __ bne_far(AT, R0, *op->label());
2489 break;
2491 case lir_cond_lessEqual:
2492 #ifdef OPT_RANGECHECK
2493 assert(!op->check(), "just check");
2494 #endif
2495 // AT = 0 TRUE
2496 __ li(AT, temp_value);
2497 __ slt(AT, AT, reg_op1);
2498 __ beq_far(AT, R0, *op->label());
2499 break;
2501 case lir_cond_belowEqual:
2502 // AT = 0 TRUE
2503 #ifdef OPT_RANGECHECK
2504 if (op->check()) {
2505 __ li(AT, temp_value);
2506 add_debug_info_for_range_check_here(op->info(), temp_value);
2507 __ tgeu(AT, reg_op1, 29);
2508 } else {
2509 #endif
2510 __ li(AT, temp_value);
2511 __ sltu(AT, AT, reg_op1);
2512 __ beq_far(AT, R0, *op->label());
2513 #ifdef OPT_RANGECHECK
2514 }
2515 #endif
2516 break;
2518 case lir_cond_greaterEqual:
2519 #ifdef OPT_RANGECHECK
2520 assert(!op->check(), "just check");
2521 #endif
2522 // AT = 0 TRUE
2523 if (Assembler::is_simm16(temp_value)) {
2524 __ slti(AT, reg_op1, temp_value);
2525 } else {
2526 __ li(AT, temp_value);
2527 __ slt(AT, reg_op1, AT);
2528 }
2529 __ beq_far(AT, R0, *op->label());
2530 break;
2532 case lir_cond_aboveEqual:
2533 #ifdef OPT_RANGECHECK
2534 assert(!op->check(), "just check");
2535 #endif
2536 // AT = 0 TRUE
2537 if (Assembler::is_simm16(temp_value)) {
2538 __ sltiu(AT, reg_op1, temp_value);
2539 } else {
2540 __ li(AT, temp_value);
2541 __ sltu(AT, reg_op1, AT);
2542 }
2543 __ beq_far(AT, R0, *op->label());
2544 break;
2546 case lir_cond_greater:
2547 #ifdef OPT_RANGECHECK
2548 assert(!op->check(), "just check");
2549 #endif
2550 // AT = 1 TRUE
2551 __ li(AT, temp_value);
2552 __ slt(AT, AT, reg_op1);
2553 __ bne_far(AT, R0, *op->label());
2554 break;
2556 default: ShouldNotReachHere();
2557 }
2559 } else {
2560 if (opr2->is_address()) {
2561 //FIXME. aoqi lw or ld_ptr?
2562 if (op->type() == T_INT)
2563 __ lw(AT, as_Address(opr2->pointer()->as_address()));
2564 else
2565 __ ld_ptr(AT, as_Address(opr2->pointer()->as_address()));
2566 } else if (opr2->is_stack()) {
2567 //FIXME. aoqi
2568 __ ld_ptr(AT, frame_map()->address_for_slot(opr2->single_stack_ix()));
2569 } else {
2570 ShouldNotReachHere();
2571 }
2572 switch (condition) {
2573 case lir_cond_equal:
2574 #ifdef OPT_RANGECHECK
2575 assert(!op->check(), "just check");
2576 #endif
2577 __ beq_far(reg_op1, AT, *op->label());
2578 break;
2579 case lir_cond_notEqual:
2580 #ifdef OPT_RANGECHECK
2581 assert(!op->check(), "just check");
2582 #endif
2583 __ bne_far(reg_op1, AT, *op->label());
2584 break;
2585 case lir_cond_less:
2586 #ifdef OPT_RANGECHECK
2587 assert(!op->check(), "just check");
2588 #endif
2589 // AT = 1 TRUE
2590 __ slt(AT, reg_op1, AT);
2591 __ bne_far(AT, R0, *op->label());
2592 break;
2593 case lir_cond_lessEqual:
2594 #ifdef OPT_RANGECHECK
2595 assert(!op->check(), "just check");
2596 #endif
2597 // AT = 0 TRUE
2598 __ slt(AT, AT, reg_op1);
2599 __ beq_far(AT, R0, *op->label());
2600 break;
2601 case lir_cond_belowEqual:
2602 #ifdef OPT_RANGECHECK
2603 assert(!op->check(), "just check");
2604 #endif
2605 // AT = 0 TRUE
2606 __ sltu(AT, AT, reg_op1);
2607 __ beq_far(AT, R0, *op->label());
2608 break;
2609 case lir_cond_greaterEqual:
2610 #ifdef OPT_RANGECHECK
2611 assert(!op->check(), "just check");
2612 #endif
2613 // AT = 0 TRUE
2614 __ slt(AT, reg_op1, AT);
2615 __ beq_far(AT, R0, *op->label());
2616 break;
2617 case lir_cond_aboveEqual:
2618 // AT = 0 TRUE
2619 #ifdef OPT_RANGECHECK
2620 if (op->check()) {
2621 add_debug_info_for_range_check_here(op->info(), opr1->rinfo());
2622 __ tgeu(reg_op1, AT, 29);
2623 } else {
2624 #endif
2625 __ sltu(AT, reg_op1, AT);
2626 __ beq_far(AT, R0, *op->label());
2627 #ifdef OPT_RANGECHECK
2628 }
2629 #endif
2630 break;
2631 case lir_cond_greater:
2632 #ifdef OPT_RANGECHECK
2633 assert(!op->check(), "just check");
2634 #endif
2635 // AT = 1 TRUE
2636 __ slt(AT, AT, reg_op1);
2637 __ bne_far(AT, R0, *op->label());
2638 break;
2639 default: ShouldNotReachHere();
2640 }
2641 }
2642 #ifdef OPT_RANGECHECK
2643 if (!op->check())
2644 #endif
2645 __ delayed()->nop();
2647 } else if(opr1->is_address() || opr1->is_stack()) {
2648 #ifdef OPT_RANGECHECK
2649 assert(!op->check(), "just check");
2650 #endif
2651 if (opr2->is_constant()) {
2652 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
2653 if (opr2->as_constant_ptr()->type() == T_INT) {
2654 temp_value = (jint)opr2->as_constant_ptr()->as_jint();
2655 } else if (opr2->as_constant_ptr()->type() == T_OBJECT) {
2656 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_constant_ptr()->as_jobject());
2657 } else {
2658 ShouldNotReachHere();
2659 }
2661 if (Assembler::is_simm16(temp_value)) {
2662 if (opr1->is_address()) {
2663 __ lw(AT, as_Address(opr1->pointer()->as_address()));
2664 } else {
2665 __ lw(AT, frame_map()->address_for_slot(opr1->single_stack_ix()));
2666 }
2668 switch(condition) {
2670 case lir_cond_equal:
2671 __ addi(AT, AT, -(int)temp_value);
2672 __ beq_far(AT, R0, *op->label());
2673 break;
2674 case lir_cond_notEqual:
2675 __ addi(AT, AT, -(int)temp_value);
2676 __ bne_far(AT, R0, *op->label());
2677 break;
2678 case lir_cond_less:
2679 // AT = 1 TRUE
2680 __ slti(AT, AT, temp_value);
2681 __ bne_far(AT, R0, *op->label());
2682 break;
2683 case lir_cond_lessEqual:
2684 // AT = 0 TRUE
2685 __ addi(AT, AT, -temp_value);
2686 __ slt(AT, R0, AT);
2687 __ beq_far(AT, R0, *op->label());
2688 break;
2689 case lir_cond_belowEqual:
2690 // AT = 0 TRUE
2691 __ addiu(AT, AT, -temp_value);
2692 __ sltu(AT, R0, AT);
2693 __ beq_far(AT, R0, *op->label());
2694 break;
2695 case lir_cond_greaterEqual:
2696 // AT = 0 TRUE
2697 __ slti(AT, AT, temp_value);
2698 __ beq_far(AT, R0, *op->label());
2699 break;
2700 case lir_cond_aboveEqual:
2701 // AT = 0 TRUE
2702 __ sltiu(AT, AT, temp_value);
2703 __ beq_far(AT, R0, *op->label());
2704 break;
2705 case lir_cond_greater:
2706 // AT = 1 TRUE
2707 __ addi(AT, AT, -temp_value);
2708 __ slt(AT, R0, AT);
2709 __ bne_far(AT, R0, *op->label());
2710 break;
2712 default:
2713 Unimplemented();
2714 }
2715 } else {
2716 Unimplemented();
2717 }
2718 } else {
2719 Unimplemented();
2720 }
2721 __ delayed()->nop();
2723 } else if(opr1->is_double_cpu()) {
2724 #ifdef OPT_RANGECHECK
2725 assert(!op->check(), "just check");
2726 #endif
2727 Register opr1_lo = opr1->as_register_lo();
2728 Register opr1_hi = opr1->as_register_hi();
2730 if (opr2->is_double_cpu()) {
2731 Register opr2_lo = opr2->as_register_lo();
2732 Register opr2_hi = opr2->as_register_hi();
2733 switch (condition) {
2734 case lir_cond_equal: {
2735 Label L;
2736 #ifndef _LP64
2737 __ bne(opr1_lo, opr2_lo, L);
2738 __ delayed()->nop();
2739 __ beq(opr1_hi, opr2_hi, *op->label());
2740 #else
2741 /* static jobject java.lang.Long.toString(jlong)
2743 10 move [t0t0|J] [a4a4|J]
2744 12 move [lng:-9223372036854775808|J] [a6a6|J]
2745 14 branch [EQ] [a4a4|J] [a6a6|J] [B1]
2746 0x000000555e8532e4: bne a4, a6, 0x000000555e8532e4 <-- error
2747 0x000000555e8532e8: sll zero, zero, 0
2748 */
2749 __ beq_far(opr1_lo, opr2_lo, *op->label());
2750 #endif
2751 __ delayed()->nop();
2752 __ bind(L);
2753 }
2754 break;
2756 case lir_cond_notEqual:
2757 if (op->label()==NULL)
2758 __ bne_far(opr1_lo, opr2_lo, *op->label());
2759 else
2760 __ bne_far(opr1_lo, opr2_lo, *op->label());
2761 __ delayed()->nop();
2762 if (op->label()==NULL)
2763 NOT_LP64(__ bne(opr1_hi, opr2_hi, *op->label()));
2764 else
2765 NOT_LP64(__ bne_far(opr1_hi, opr2_hi, *op->label()));
2766 NOT_LP64(__ delayed()->nop());
2767 break;
2769 case lir_cond_less: {
2770 #ifdef _LP64
2771 __ slt(AT, opr1_lo, opr2_lo);
2772 __ bne_far(AT, R0, *op->label());
2773 __ delayed()->nop();
2774 #else
2775 Label L;
2777 // if hi less then jump
2778 __ slt(AT, opr1_hi, opr2_hi);
2779 __ bne(AT, R0, *op->label());
2780 __ delayed()->nop();
2782 // if hi great then fail
2783 __ bne(opr1_hi, opr2_hi, L);
2784 __ delayed();
2786 // now just comp lo as unsigned
2787 __ sltu(AT, opr1_lo, opr2_lo);
2788 __ bne_far(AT, R0, *op->label());
2789 __ delayed()->nop();
2791 __ bind(L);
2792 #endif
2793 }
2794 break;
2796 case lir_cond_lessEqual: {
2797 #ifdef _LP64
2798 __ slt(AT, opr2_lo, opr1_lo);
2799 __ beq_far(AT, R0, *op->label());
2800 __ delayed()->nop();
2801 #else
2802 Label L;
2804 // if hi great then fail
2805 __ slt(AT, opr2_hi, opr1_hi);
2806 __ bne(AT, R0, L);
2807 __ delayed()->nop();
2809 // if hi less then jump
2810 if(op->label()==NULL)
2811 __ bne(opr2_hi, opr1_hi, *op->label());
2812 else
2813 __ bne_far(opr2_hi, opr1_hi, *op->label());
2814 __ delayed();
2816 // now just comp lo as unsigned
2817 __ sltu(AT, opr2_lo, opr1_lo);
2818 __ beq(AT, R0, *op->label());
2819 __ delayed()->nop();
2821 __ bind(L);
2822 #endif
2823 }
2824 break;
2826 case lir_cond_belowEqual: {
2827 #ifdef _LP64
2828 __ sltu(AT, opr2_lo, opr1_lo);
2829 __ beq_far(AT, R0, *op->label());
2830 __ delayed()->nop();
2831 #else
2832 Label L;
2834 // if hi great then fail
2835 __ sltu(AT, opr2_hi, opr1_hi);
2836 __ bne_far(AT, R0, L);
2837 __ delayed()->nop();
2839 // if hi less then jump
2840 if(op->label()==NULL)
2841 __ bne(opr2_hi, opr1_hi, *op->label());
2842 else
2843 __ bne_far(opr2_hi, opr1_hi, *op->label());
2844 __ delayed();
2846 // now just comp lo as unsigned
2847 __ sltu(AT, opr2_lo, opr1_lo);
2848 __ beq(AT, R0, *op->label());
2849 __ delayed()->nop();
2851 __ bind(L);
2852 #endif
2853 }
2854 break;
2856 case lir_cond_greaterEqual: {
2857 #ifdef _LP64
2858 __ slt(AT, opr1_lo, opr2_lo);
2859 __ beq_far(AT, R0, *op->label());
2860 __ delayed()->nop();
2861 #else
2862 Label L;
2864 // if hi less then fail
2865 __ slt(AT, opr1_hi, opr2_hi);
2866 __ bne_far(AT, R0, L);
2867 __ delayed()->nop();
2869 // if hi great then jump
2870 if(op->label()==NULL)
2871 __ bne(opr2_hi, opr1_hi, *op->label());
2872 else
2873 __ bne_far(opr2_hi, opr1_hi, *op->label());
2874 __ delayed();
2876 // now just comp lo as unsigned
2877 __ sltu(AT, opr1_lo, opr2_lo);
2878 __ beq(AT, R0, *op->label());
2879 __ delayed()->nop();
2881 __ bind(L);
2882 #endif
2883 }
2884 break;
2886 case lir_cond_aboveEqual: {
2887 #ifdef _LP64
2888 __ sltu(AT, opr1_lo, opr2_lo);
2889 __ beq_far(AT, R0, *op->label());
2890 __ delayed()->nop();
2891 #else
2892 Label L;
2894 // if hi less then fail
2895 __ sltu(AT, opr1_hi, opr2_hi);
2896 __ bne(AT, R0, L);
2897 __ delayed()->nop();
2899 // if hi great then jump
2900 if(op->label()==NULL)
2901 __ bne(opr2_hi, opr1_hi, *op->label());
2902 else
2903 __ bne_far(opr2_hi, opr1_hi, *op->label());
2904 __ delayed();
2906 // now just comp lo as unsigned
2907 __ sltu(AT, opr1_lo, opr2_lo);
2908 __ beq(AT, R0, *op->label());
2909 __ delayed()->nop();
2911 __ bind(L);
2912 #endif
2913 }
2914 break;
2916 case lir_cond_greater: {
2917 #ifdef _LP64
2918 __ slt(AT, opr2_lo, opr1_lo);
2919 __ bne_far(AT, R0, *op->label());
2920 __ delayed()->nop();
2921 #else
2922 Label L;
2924 // if hi great then jump
2925 __ slt(AT, opr2_hi, opr1_hi);
2926 __ bne(AT, R0, *op->label());
2927 __ delayed()->nop();
2929 // if hi less then fail
2930 __ bne(opr2_hi, opr1_hi, L);
2931 __ delayed();
2933 // now just comp lo as unsigned
2934 __ sltu(AT, opr2_lo, opr1_lo);
2935 __ bne(AT, R0, *op->label());
2936 __ delayed()->nop();
2938 __ bind(L);
2939 #endif
2940 }
2941 break;
2943 default: ShouldNotReachHere();
2944 }
2946 } else if(opr2->is_constant()) {
2947 jlong lv = opr2->as_jlong();
2948 #ifndef _LP64
2949 jint iv_lo = (jint)lv;
2950 jint iv_hi = (jint)(lv>>32);
2951 bool is_zero = (lv==0);
2952 #endif
2954 switch (condition) {
2955 case lir_cond_equal:
2956 #ifdef _LP64
2957 __ li(T8, lv);
2958 __ beq_far(opr1_lo, T8, *op->label());
2959 __ delayed()->nop();
2960 #else
2961 if (is_zero) {
2962 __ orr(AT, opr1_lo, opr1_hi);
2963 __ beq(AT, R0, *op->label());
2964 __ delayed()->nop();
2965 } else {
2966 Label L;
2967 __ move(T8, iv_lo);
2968 __ bne(opr1_lo, T8, L);
2969 __ delayed();
2970 __ move(T8, iv_hi);
2971 __ beq(opr1_hi, T8, *op->label());
2972 __ delayed()->nop();
2973 __ bind(L);
2974 }
2975 #endif
2976 break;
2978 case lir_cond_notEqual:
2979 #ifdef _LP64
2980 __ li(T8, lv);
2981 __ bne_far(opr1_lo, T8, *op->label());
2982 __ delayed()->nop();
2983 #else
2984 if (is_zero) {
2985 __ orr(AT, opr1_lo, opr1_hi);
2986 __ bne(AT, R0, *op->label());
2987 __ delayed()->nop();
2988 } else {
2989 __ move(T8, iv_lo);
2990 __ bne(opr1_lo, T8, *op->label());
2991 __ delayed();
2992 __ move(T8, iv_hi);
2993 __ bne(opr1_hi, T8, *op->label());
2994 __ delayed()->nop();
2995 }
2996 #endif
2997 break;
2999 case lir_cond_less:
3000 #ifdef _LP64
3001 __ li(T8, lv);
3002 __ slt(AT, opr1_lo, T8);
3003 __ bne_far(AT, R0, *op->label());
3004 __ delayed()->nop();
3005 #else
3006 if (is_zero) {
3007 __ bltz(opr1_hi, *op->label());
3008 __ delayed()->nop();
3009 __ bltz(opr1_lo, *op->label());
3010 __ delayed()->nop();
3011 } else {
3012 Label L;
3014 // if hi less then jump
3015 __ move(T8, iv_hi);
3016 __ slt(AT, opr1_hi, T8);
3017 __ bne_far(AT, R0, *op->label());
3018 __ delayed()->nop();
3020 // if hi great then fail
3021 __ bne(opr1_hi, T8, L);
3022 __ delayed();
3024 // now just comp lo as unsigned
3025 if (Assembler::is_simm16(iv_lo)) {
3026 __ sltiu(AT, opr1_lo, iv_lo);
3027 } else {
3028 __ move(T8, iv_lo);
3029 __ sltu(AT, opr1_lo, T8);
3030 }
3031 __ bne(AT, R0, *op->label());
3032 __ delayed()->nop();
3034 __ bind(L);
3035 }
3036 #endif
3037 break;
3039 case lir_cond_lessEqual:
3040 #ifdef _LP64
3041 __ li(T8, lv);
3042 __ slt(AT, T8, opr1_lo);
3043 __ beq_far(AT, R0, *op->label());
3044 __ delayed()->nop();
3045 #else
3046 if (is_zero) {
3047 __ bltz(opr1_hi, *op->label());
3048 __ delayed()->nop();
3049 __ orr(AT, opr1_hi, opr1_lo);
3050 __ beq(AT, R0, *op->label());
3051 __ delayed();
3052 } else {
3053 Label L;
3055 // if hi great then fail
3056 __ move(T8, iv_hi);
3057 __ slt(AT, T8, opr1_hi);
3058 __ bne(AT, R0, L);
3059 __ delayed()->nop();
3061 // if hi less then jump
3062 __ bne(T8, opr1_hi, *op->label());
3063 __ delayed();
3065 // now just comp lo as unsigned
3066 __ move(T8, iv_lo);
3067 __ sltu(AT, T8, opr1_lo);
3068 __ beq(AT, R0, *op->label());
3069 __ delayed()->nop();
3071 __ bind(L);
3072 }
3073 #endif
3074 break;
3076 case lir_cond_belowEqual:
3077 #ifdef _LP64
3078 __ li(T8, lv);
3079 __ sltu(AT, T8, opr1_lo);
3080 __ beq_far(AT, R0, *op->label());
3081 __ delayed()->nop();
3082 #else
3083 if (is_zero) {
3084 __ orr(AT, opr1_hi, opr1_lo);
3085 __ beq(AT, R0, *op->label());
3086 __ delayed()->nop();
3087 } else {
3088 Label L;
3090 // if hi great then fail
3091 __ move(T8, iv_hi);
3092 __ sltu(AT, T8, opr1_hi);
3093 __ bne(AT, R0, L);
3094 __ delayed()->nop();
3096 // if hi less then jump
3097 __ bne(T8, opr1_hi, *op->label());
3098 __ delayed();
3100 // now just comp lo as unsigned
3101 __ move(T8, iv_lo);
3102 __ sltu(AT, T8, opr1_lo);
3103 __ beq(AT, R0, *op->label());
3104 __ delayed()->nop();
3106 __ bind(L);
3107 }
3108 #endif
3109 break;
3111 case lir_cond_greaterEqual:
3112 #ifdef _LP64
3113 __ li(T8, lv);
3114 __ slt(AT, opr1_lo, T8);
3115 __ beq_far(AT, R0, *op->label());
3116 __ delayed()->nop();
3117 #else
3118 if (is_zero) {
3119 __ bgez(opr1_hi, *op->label());
3120 __ delayed()->nop();
3121 } else {
3122 Label L;
3124 // if hi less then fail
3125 __ move(T8, iv_hi);
3126 __ slt(AT, opr1_hi, T8);
3127 __ bne(AT, R0, L);
3128 __ delayed()->nop();
3130 // if hi great then jump
3131 __ bne(T8, opr1_hi, *op->label());
3132 __ delayed();
3134 // now just comp lo as unsigned
3135 if (Assembler::is_simm16(iv_lo)) {
3136 __ sltiu(AT, opr1_lo, iv_lo);
3137 } else {
3138 __ move(T8, iv_lo);
3139 __ sltu(AT, opr1_lo, T8);
3140 }
3141 __ beq(AT, R0, *op->label());
3142 __ delayed()->nop();
3144 __ bind(L);
3145 }
3146 #endif
3147 break;
3149 case lir_cond_aboveEqual:
3150 #ifdef _LP64
3151 __ li(T8, lv);
3152 __ sltu(AT, opr1_lo, T8);
3153 __ beq_far(AT, R0, *op->label());
3154 __ delayed()->nop();
3155 #else
3156 if (is_zero) {
3157 if(op->label()==NULL) //by liaob2
3158 __ b(*op->label());
3159 else
3160 __ b_far(*op->label());
3161 __ delayed()->nop();
3162 } else {
3163 Label L;
3165 // if hi less then fail
3166 __ move(T8, iv_hi);
3167 __ sltu(AT, opr1_hi, T8);
3168 __ bne(AT, R0, L);
3169 __ delayed()->nop();
3171 // if hi great then jump
3172 __ bne(T8, opr1_hi, *op->label());
3173 __ delayed();
3175 // now just comp lo as unsigned
3176 if (Assembler::is_simm16(iv_lo)) {
3177 __ sltiu(AT, opr1_lo, iv_lo);
3178 } else {
3179 __ move(T8, iv_lo);
3180 __ sltu(AT, opr1_lo, T8);
3181 }
3182 __ beq(AT, R0, *op->label());
3183 __ delayed()->nop();
3185 __ bind(L);
3186 }
3187 #endif
3188 break;
3190 case lir_cond_greater:
3191 #ifdef _LP64
3192 __ li(T8, lv);
3193 __ slt(AT, T8, opr1_lo);
3194 __ bne_far(AT, R0, *op->label());
3195 __ delayed()->nop();
3196 #else
3197 if (is_zero) {
3198 Label L;
3199 __ bgtz(opr1_hi, *op->label());
3200 __ delayed()->nop();
3201 __ bne(opr1_hi, R0, L);
3202 __ delayed()->nop();
3203 __ bne(opr1_lo, R0, *op->label());
3204 __ delayed()->nop();
3205 __ bind(L);
3206 } else {
3207 Label L;
3209 // if hi great then jump
3210 __ move(T8, iv_hi);
3211 __ slt(AT, T8, opr1_hi);
3212 __ bne(AT, R0, *op->label());
3213 __ delayed()->nop();
3215 // if hi less then fail
3216 __ bne(T8, opr1_hi, L);
3217 __ delayed();
3219 // now just comp lo as unsigned
3220 __ move(T8, iv_lo);
3221 __ sltu(AT, T8, opr1_lo);
3222 __ bne(AT, R0, *op->label());
3223 __ delayed()->nop();
3225 __ bind(L);
3226 }
3227 #endif
3228 break;
3230 default:
3231 ShouldNotReachHere();
3232 }
3233 } else {
3234 Unimplemented();
3235 }
3236 } else if (opr1->is_single_fpu()) {
3237 #ifdef OPT_RANGECHECK
3238 assert(!op->check(), "just check");
3239 #endif
3240 assert(opr2->is_single_fpu(), "change the code");
3242 FloatRegister reg_op1 = opr1->as_float_reg();
3243 FloatRegister reg_op2 = opr2->as_float_reg();
3244 // bool un_ls
3245 bool un_jump = (op->ublock()->label()==op->label());
3247 Label& L = *op->label();
3249 switch (condition) {
3250 case lir_cond_equal:
3251 if (un_jump)
3252 __ c_ueq_s(reg_op1, reg_op2);
3253 else
3254 __ c_eq_s(reg_op1, reg_op2);
3255 __ bc1t(L);
3257 break;
3259 case lir_cond_notEqual:
3260 if (un_jump)
3261 __ c_eq_s(reg_op1, reg_op2);
3262 else
3263 __ c_ueq_s(reg_op1, reg_op2);
3264 __ bc1f(L);
3266 break;
3268 case lir_cond_less:
3269 if (un_jump)
3270 __ c_ult_s(reg_op1, reg_op2);
3271 else
3272 __ c_olt_s(reg_op1, reg_op2);
3273 __ bc1t(L);
3275 break;
3277 case lir_cond_lessEqual:
3278 case lir_cond_belowEqual:
3279 if (un_jump)
3280 __ c_ule_s(reg_op1, reg_op2);
3281 else
3282 __ c_ole_s(reg_op1, reg_op2);
3283 __ bc1t(L);
3285 break;
3287 case lir_cond_greaterEqual:
3288 case lir_cond_aboveEqual:
3289 if (un_jump)
3290 __ c_olt_s(reg_op1, reg_op2);
3291 else
3292 __ c_ult_s(reg_op1, reg_op2);
3293 __ bc1f(L);
3295 break;
3297 case lir_cond_greater:
3298 if (un_jump)
3299 __ c_ole_s(reg_op1, reg_op2);
3300 else
3301 __ c_ule_s(reg_op1, reg_op2);
3302 __ bc1f(L);
3304 break;
3306 default:
3307 ShouldNotReachHere();
3308 }
3309 __ delayed()->nop();
3310 } else if (opr1->is_double_fpu()) {
3311 #ifdef OPT_RANGECHECK
3312 assert(!op->check(), "just check");
3313 #endif
3314 assert(opr2->is_double_fpu(), "change the code");
3316 FloatRegister reg_op1 = opr1->as_double_reg();
3317 FloatRegister reg_op2 = opr2->as_double_reg();
3318 bool un_jump = (op->ublock()->label()==op->label());
3319 Label& L = *op->label();
3321 switch (condition) {
3322 case lir_cond_equal:
3323 if (un_jump)
3324 __ c_ueq_d(reg_op1, reg_op2);
3325 else
3326 __ c_eq_d(reg_op1, reg_op2);
3327 __ bc1t(L);
3329 break;
3331 case lir_cond_notEqual:
3332 if (un_jump)
3333 __ c_eq_d(reg_op1, reg_op2);
3334 else
3335 __ c_ueq_d(reg_op1, reg_op2);
3336 __ bc1f(L);
3338 break;
3340 case lir_cond_less:
3341 if (un_jump)
3342 __ c_ult_d(reg_op1, reg_op2);
3343 else
3344 __ c_olt_d(reg_op1, reg_op2);
3345 __ bc1t(L);
3347 break;
3349 case lir_cond_lessEqual:
3350 case lir_cond_belowEqual:
3351 if (un_jump)
3352 __ c_ule_d(reg_op1, reg_op2);
3353 else
3354 __ c_ole_d(reg_op1, reg_op2);
3355 __ bc1t(L);
3357 break;
3359 case lir_cond_greaterEqual:
3360 case lir_cond_aboveEqual:
3361 if (un_jump)
3362 __ c_olt_d(reg_op1, reg_op2);
3363 else
3364 __ c_ult_d(reg_op1, reg_op2);
3365 __ bc1f(L);
3367 break;
3369 case lir_cond_greater:
3370 if (un_jump)
3371 __ c_ole_d(reg_op1, reg_op2);
3372 else
3373 __ c_ule_d(reg_op1, reg_op2);
3374 __ bc1f(L);
3376 break;
3378 default:
3379 ShouldNotReachHere();
3380 }
3381 __ delayed()->nop();
3382 } else {
3383 Unimplemented();
3384 }
3385 }
3388 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
3389 LIR_Opr value = op->in_opr();
3390 LIR_Opr src = op->in_opr();
3391 LIR_Opr dest = op->result_opr();
3392 Bytecodes::Code code = op->bytecode();
3394 switch (code) {
3395 case Bytecodes::_i2l:
3396 move_regs(src->as_register(), dest->as_register_lo());
3397 NOT_LP64(__ sra (dest->as_register_hi(), dest->as_register_lo(), 31));
3398 break;
3400 case Bytecodes::_l2i:
3401 #ifndef _LP64
3402 move_regs (src->as_register_lo(), dest->as_register());
3403 #else
3404 __ dsll32(dest->as_register(), src->as_register_lo(), 0);
3405 __ dsra32(dest->as_register(), dest->as_register(), 0);
3406 #endif
3407 break;
3409 case Bytecodes::_i2b:
3410 #ifndef _LP64
3411 move_regs (src->as_register(), dest->as_register());
3412 __ sign_extend_byte(dest->as_register());
3413 #else
3414 __ dsll32(dest->as_register(), src->as_register(), 24);
3415 __ dsra32(dest->as_register(), dest->as_register(), 24);
3416 #endif
3417 break;
3419 case Bytecodes::_i2c:
3420 __ andi(dest->as_register(), src->as_register(), 0xFFFF);
3421 break;
3423 case Bytecodes::_i2s:
3424 #ifndef _LP64
3425 move_regs (src->as_register(), dest->as_register());
3426 __ sign_extend_short(dest->as_register());
3427 #else
3428 __ dsll32(dest->as_register(), src->as_register(), 16);
3429 __ dsra32(dest->as_register(), dest->as_register(), 16);
3430 #endif
3431 break;
3433 case Bytecodes::_f2d:
3434 __ cvt_d_s(dest->as_double_reg(), src->as_float_reg());
3435 break;
3437 case Bytecodes::_d2f:
3438 __ cvt_s_d(dest->as_float_reg(), src->as_double_reg());
3439 break;
3440 case Bytecodes::_i2f: {
3441 FloatRegister df = dest->as_float_reg();
3442 if(src->is_single_cpu()) {
3443 __ mtc1(src->as_register(), df);
3444 __ cvt_s_w(df, df);
3445 } else if (src->is_stack()) {
3446 Address src_addr = src->is_single_stack()
3447 ? frame_map()->address_for_slot(src->single_stack_ix())
3448 : frame_map()->address_for_slot(src->double_stack_ix());
3449 __ lw(AT, src_addr);
3450 __ mtc1(AT, df);
3451 __ cvt_s_w(df, df);
3452 } else {
3453 Unimplemented();
3454 }
3455 break;
3456 }
3457 case Bytecodes::_i2d: {
3458 FloatRegister dd = dest->as_double_reg();
3459 if (src->is_single_cpu()) {
3460 __ mtc1(src->as_register(), dd);
3461 __ cvt_d_w(dd, dd);
3462 } else if (src->is_stack()) {
3463 Address src_addr = src->is_single_stack()
3464 ? frame_map()->address_for_slot(value->single_stack_ix())
3465 : frame_map()->address_for_slot(value->double_stack_ix());
3466 __ lw(AT, src_addr);
3467 __ mtc1(AT, dd);
3468 __ cvt_d_w(dd, dd);
3469 } else {
3470 Unimplemented();
3471 }
3472 break;
3473 }
3474 case Bytecodes::_f2i: {
3475 FloatRegister fval = src->as_float_reg();
3476 Register dreg = dest->as_register();
3478 Label L;
3479 __ c_un_s(fval, fval); //NaN?
3480 __ bc1t(L);
3481 __ delayed();
3482 __ move(dreg, R0);
3484 __ trunc_w_s(F30, fval);
3486 /* Call SharedRuntime:f2i() to do valid convention */
3487 __ cfc1(AT, 31);
3488 __ li(T9, 0x10000);
3489 __ andr(AT, AT, T9);
3490 __ beq(AT, R0, L);
3491 __ delayed()->mfc1(dreg, F30);
3493 __ mov_s(F12, fval);
3494 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
3495 __ move(dreg, V0);
3496 __ bind(L);
3497 break;
3498 }
3499 case Bytecodes::_d2i: {
3500 FloatRegister dval = src->as_double_reg();
3501 Register dreg = dest->as_register();
3503 Label L;
3504 #ifndef _LP64
3505 __ c_un_d(dval, dval); //NaN?
3506 __ bc1t(L);
3507 __ delayed();
3508 __ move(dreg, R0);
3509 #endif
3511 __ trunc_w_d(F30, dval);
3512 __ cfc1(AT, 31);
3513 __ li(T9, 0x10000);
3514 __ andr(AT, AT, T9);
3515 __ beq(AT, R0, L);
3516 __ delayed()->mfc1(dreg, F30);
3518 __ mov_d(F12, dval);
3519 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
3520 __ move(dreg, V0);
3521 __ bind(L);
3522 break;
3523 }
3524 case Bytecodes::_l2f: {
3525 FloatRegister ldf = dest->as_float_reg();
3526 if (src->is_double_cpu()) {
3527 #ifndef _LP64
3528 __ mtc1(src->as_register_lo(), ldf);
3529 __ mtc1(src->as_register_hi(), ldf + 1);
3530 __ cvt_s_l(ldf, ldf);
3531 #else
3532 __ dmtc1(src->as_register_lo(), ldf);
3533 __ cvt_s_l(ldf, ldf);
3534 #endif
3535 } else if (src->is_double_stack()) {
3536 Address src_addr=frame_map()->address_for_slot(value->double_stack_ix());
3537 #ifndef _LP64
3538 __ lw(AT, src_addr);
3539 __ mtc1(AT, ldf);
3540 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
3541 __ mtc1(AT, ldf + 1);
3542 __ cvt_s_l(ldf, ldf);
3543 #else
3544 __ ld(AT, src_addr);
3545 __ dmtc1(AT, ldf);
3546 __ cvt_s_l(ldf, ldf);
3547 #endif
3548 } else {
3549 Unimplemented();
3550 }
3551 break;
3552 }
3553 case Bytecodes::_l2d: {
3554 FloatRegister ldd = dest->as_double_reg();
3555 if (src->is_double_cpu()) {
3556 #ifndef _LP64
3557 __ mtc1(src->as_register_lo(), ldd);
3558 __ mtc1(src->as_register_hi(), ldd + 1);
3559 __ cvt_d_l(ldd, ldd);
3560 #else
3561 __ dmtc1(src->as_register_lo(), ldd);
3562 __ cvt_d_l(ldd, ldd);
3563 #endif
3564 } else if (src->is_double_stack()) {
3565 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
3566 #ifndef _LP64
3567 __ lw(AT, src_addr);
3568 __ mtc1(AT, ldd);
3569 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
3570 __ mtc1(AT, ldd + 1);
3571 __ cvt_d_l(ldd, ldd);
3572 #else
3573 __ ld(AT, src_addr);
3574 __ dmtc1(AT, ldd);
3575 __ cvt_d_l(ldd, ldd);
3576 #endif
3577 } else {
3578 Unimplemented();
3579 }
3580 break;
3581 }
3583 case Bytecodes::_f2l: {
3584 FloatRegister fval = src->as_float_reg();
3585 Register dlo = dest->as_register_lo();
3586 Register dhi = dest->as_register_hi();
3588 Label L;
3589 __ move(dhi, R0);
3590 __ c_un_s(fval, fval); //NaN?
3591 __ bc1t(L);
3592 __ delayed();
3593 __ move(dlo, R0);
3595 __ trunc_l_s(F30, fval);
3596 #ifdef _LP64
3597 __ cfc1(AT, 31);
3598 __ li(T9, 0x10000);
3599 __ andr(AT, AT, T9);
3600 __ beq(AT, R0, L);
3601 __ delayed()->dmfc1(dlo, F30);
3603 __ mov_s(F12, fval);
3604 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
3605 __ move(dlo, V0);
3606 #else
3607 __ mfc1(dlo, F30);
3608 #endif
3609 NOT_LP64(__ mfc1(dhi, F31));
3610 __ bind(L);
3611 break;
3612 }
3613 case Bytecodes::_d2l: {
3614 FloatRegister dval = src->as_double_reg();
3615 Register dlo = dest->as_register_lo();
3616 Register dhi = dest->as_register_hi();
3618 Label L;
3619 __ move(dhi, R0);
3620 __ c_un_d(dval, dval); //NaN?
3621 __ bc1t(L);
3622 __ delayed();
3623 __ move(dlo, R0);
3625 __ trunc_l_d(F30, dval);
3626 #ifdef _LP64
3627 __ cfc1(AT, 31);
3628 __ li(T9, 0x10000);
3629 __ andr(AT, AT, T9);
3630 __ beq(AT, R0, L);
3631 __ delayed()->dmfc1(dlo, F30);
3633 __ mov_d(F12, dval);
3634 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
3635 __ move(dlo, V0);
3636 #else
3637 __ mfc1(dlo, F30);
3638 __ mfc1(dhi, F31);
3639 #endif
3640 __ bind(L);
3641 break;
3642 }
3644 default: ShouldNotReachHere();
3645 }
3646 }
3648 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
3649 if (op->init_check()) {
3650 add_debug_info_for_null_check_here(op->stub()->info());
3651 __ lw(AT,Address(op->klass()->as_register(),
3652 InstanceKlass::init_state_offset()));
3653 __ addi(AT, AT, -InstanceKlass::fully_initialized);
3654 __ bne_far(AT, R0,*op->stub()->entry());
3655 __ delayed()->nop();
3656 }
3657 __ allocate_object(
3658 op->obj()->as_register(),
3659 op->tmp1()->as_register(),
3660 op->tmp2()->as_register(),
3661 op->header_size(),
3662 op->object_size(),
3663 op->klass()->as_register(),
3664 *op->stub()->entry());
3666 __ bind(*op->stub()->continuation());
3667 }
3669 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
3670 if (UseSlowPath ||
3671 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
3672 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
3673 __ b_far(*op->stub()->entry());
3674 __ delayed()->nop();
3675 } else {
3676 Register len = op->len()->as_register();
3677 Register tmp1 = op->tmp1()->as_register();
3678 Register tmp2 = op->tmp2()->as_register();
3679 Register tmp3 = op->tmp3()->as_register();
3680 __ allocate_array(op->obj()->as_register(),
3681 len,
3682 tmp1,
3683 tmp2,
3684 tmp3,
3685 arrayOopDesc::header_size(op->type()),
3686 array_element_size(op->type()),
3687 op->klass()->as_register(),
3688 *op->stub()->entry());
3689 }
3690 __ bind(*op->stub()->continuation());
3691 }
3693 void LIR_Assembler::type_profile_helper(Register mdo,
3694 ciMethodData *md, ciProfileData *data,
3695 Register recv, Label* update_done) {
3696 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3697 Label next_test;
3698 // See if the receiver is receiver[n].
3699 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
3700 __ bne(AT, recv, next_test);
3701 __ delayed()->nop();
3702 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
3703 __ ld_ptr(AT, data_addr);
3704 __ addi(AT, AT, DataLayout::counter_increment);
3705 __ st_ptr(AT, data_addr);
3706 __ b(*update_done);
3707 __ delayed()->nop();
3708 __ bind(next_test);
3709 }
3711 // Didn't find receiver; find next empty slot and fill it in
3712 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3713 Label next_test;
3714 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
3715 __ ld_ptr(AT, recv_addr);
3716 __ bne(AT, R0, next_test);
3717 __ delayed()->nop();
3718 __ st_ptr(recv, recv_addr);
3719 __ move(AT, DataLayout::counter_increment);
3720 __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
3721 __ b(*update_done);
3722 __ delayed()->nop();
3723 __ bind(next_test);
3724 }
3725 }
3727 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
3728 // we always need a stub for the failure case.
3729 CodeStub* stub = op->stub();
3730 Register obj = op->object()->as_register();
3731 Register k_RInfo = op->tmp1()->as_register();
3732 Register klass_RInfo = op->tmp2()->as_register();
3733 Register dst = op->result_opr()->as_register();
3734 ciKlass* k = op->klass();
3735 Register Rtmp1 = noreg;
3737 // check if it needs to be profiled
3738 ciMethodData* md = NULL;
3739 ciProfileData* data = NULL;
3741 if (op->should_profile()) {
3742 ciMethod* method = op->profiled_method();
3743 assert(method != NULL, "Should have method");
3744 int bci = op->profiled_bci();
3745 md = method->method_data_or_null();
3746 assert(md != NULL, "Sanity");
3747 data = md->bci_to_data(bci);
3748 assert(data != NULL, "need data for type check");
3749 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3750 }
3751 Label profile_cast_success, profile_cast_failure;
3752 Label *success_target = op->should_profile() ? &profile_cast_success : success;
3753 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
3755 if (obj == k_RInfo) {
3756 k_RInfo = dst;
3757 } else if (obj == klass_RInfo) {
3758 klass_RInfo = dst;
3759 }
3760 if (k->is_loaded() && !UseCompressedClassPointers) {
3761 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
3762 } else {
3763 Rtmp1 = op->tmp3()->as_register();
3764 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
3765 }
3767 assert_different_registers(obj, k_RInfo, klass_RInfo);
3769 if (op->should_profile()) {
3770 Label not_null;
3771 __ bne(obj, R0, not_null);
3772 __ delayed()->nop();
3773 // Object is null; update MDO and exit
3774 Register mdo = klass_RInfo;
3775 __ mov_metadata(mdo, md->constant_encoding());
3776 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3777 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3778 __ lw(AT, data_addr);
3779 __ ori(AT, AT, header_bits);
3780 __ sw(AT,data_addr);
3781 __ b(*obj_is_null);
3782 __ delayed()->nop();
3783 __ bind(not_null);
3784 } else {
3785 __ beq(obj, R0, *obj_is_null);
3786 __ delayed()->nop();
3787 }
3789 if (!k->is_loaded()) {
3790 klass2reg_with_patching(k_RInfo, op->info_for_patch());
3791 } else {
3792 #ifdef _LP64
3793 __ mov_metadata(k_RInfo, k->constant_encoding());
3794 #endif // _LP64
3795 }
3796 __ verify_oop(obj);
3798 if (op->fast_check()) {
3799 // get object class
3800 // not a safepoint as obj null check happens earlier
3801 if (UseCompressedClassPointers) {
3802 __ load_klass(Rtmp1, obj);
3803 __ bne_far(k_RInfo, Rtmp1, *failure_target);
3804 __ delayed()->nop();
3805 } else {
3806 __ ld(AT, Address(obj, oopDesc::klass_offset_in_bytes()));
3807 __ bne_far(k_RInfo, AT, *failure_target);
3808 __ delayed()->nop();
3809 }
3810 // successful cast, fall through to profile or jump
3811 } else {
3812 // get object class
3813 // not a safepoint as obj null check happens earlier
3814 __ load_klass(klass_RInfo, obj);
3815 if (k->is_loaded()) {
3816 // See if we get an immediate positive hit
3817 __ ld(AT, Address(klass_RInfo, k->super_check_offset()));
3818 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
3819 __ bne_far(k_RInfo, AT, *failure_target);
3820 __ delayed()->nop();
3821 // successful cast, fall through to profile or jump
3822 } else {
3823 // See if we get an immediate positive hit
3824 __ beq(k_RInfo, AT, *success_target);
3825 __ delayed()->nop();
3826 // check for self
3827 __ beq(k_RInfo, klass_RInfo, *success_target);
3828 __ delayed()->nop();
3830 if (A0 != klass_RInfo) __ push(A0);
3831 if (A1 != k_RInfo) __ push(A1);
3832 if (A0 != klass_RInfo) __ move(A0, klass_RInfo);
3833 if (A1 != k_RInfo) __ move(A1, k_RInfo);
3834 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3835 __ delayed()->nop();
3836 if (A1 != k_RInfo) __ pop(A1);
3837 if (A0 != klass_RInfo) __ pop(A0);
3838 // result is a boolean
3839 __ beq_far(V0, R0, *failure_target);
3840 __ delayed()->nop();
3841 // successful cast, fall through to profile or jump
3842 }
3843 } else {
3844 // perform the fast part of the checking logic
3845 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
3846 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
3847 if (A0 != klass_RInfo) __ push(A0);
3848 if (A1 != k_RInfo) __ push(A1);
3849 if (A0 != klass_RInfo) __ move(A0, klass_RInfo);
3850 if (A1 != k_RInfo) __ move(A1, k_RInfo);
3851 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3852 __ delayed()->nop();
3853 if (A1 != k_RInfo) __ pop(A1);
3854 if (A0 != klass_RInfo) __ pop(A0);
3855 // result is a boolean
3856 __ beq_far(V0, R0, *failure_target);
3857 __ delayed()->nop();
3858 // successful cast, fall through to profile or jump
3859 }
3860 }
3861 if (op->should_profile()) {
3862 Register mdo = klass_RInfo, recv = k_RInfo;
3863 __ bind(profile_cast_success);
3864 __ mov_metadata(mdo, md->constant_encoding());
3865 __ load_klass(recv, obj);
3866 Label update_done;
3867 type_profile_helper(mdo, md, data, recv, success);
3868 __ b(*success);
3869 __ delayed()->nop();
3871 __ bind(profile_cast_failure);
3872 __ mov_metadata(mdo, md->constant_encoding());
3873 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3874 __ ld_ptr(AT, counter_addr);
3875 __ addi(AT, AT, -DataLayout::counter_increment);
3876 __ st_ptr(AT, counter_addr);
3878 __ b(*failure);
3879 __ delayed()->nop();
3880 }
3881 __ b(*success);
3882 __ delayed()->nop();
3883 }
3887 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
3888 LIR_Code code = op->code();
3889 if (code == lir_store_check) {
3890 Register value = op->object()->as_register();
3891 Register array = op->array()->as_register();
3892 Register k_RInfo = op->tmp1()->as_register();
3893 Register klass_RInfo = op->tmp2()->as_register();
3894 Register tmp = op->tmp3()->as_register();
3896 CodeStub* stub = op->stub();
3898 //check if it needs to be profiled
3899 ciMethodData* md;
3900 ciProfileData* data;
3902 if (op->should_profile()) {
3903 ciMethod* method = op->profiled_method();
3904 assert(method != NULL, "Should have method");
3905 int bci = op->profiled_bci();
3906 md = method->method_data_or_null();
3907 assert(md != NULL, "Sanity");
3908 data = md->bci_to_data(bci);
3909 assert(data != NULL, "need data for type check");
3910 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3911 }
3912 Label profile_cast_success, profile_cast_failure, done;
3913 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
3914 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
3916 if(op->should_profile()) {
3917 Label not_null;
3918 __ bne(value, R0, not_null);
3919 __ delayed()->nop();
3921 Register mdo = klass_RInfo;
3922 __ mov_metadata(mdo, md->constant_encoding());
3923 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3924 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3925 __ lw(AT, data_addr);
3926 __ ori(AT, AT, header_bits);
3927 __ sw(AT,data_addr);
3928 __ b(done);
3929 __ delayed()->nop();
3930 __ bind(not_null);
3931 } else {
3932 __ beq(value, R0, done);
3933 __ delayed()->nop();
3934 }
3936 add_debug_info_for_null_check_here(op->info_for_exception());
3937 __ load_klass(k_RInfo, array);
3938 __ load_klass(klass_RInfo, value);
3939 // get instance klass (it's already uncompressed)
3940 __ ld_ptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
3941 // perform the fast part of the checking logic
3942 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, tmp, success_target, failure_target, NULL);
3943 if (A0 != klass_RInfo) __ push(A0);
3944 if (A1 != k_RInfo) __ push(A1);
3945 if (A0 != klass_RInfo) __ move(A0, klass_RInfo);
3946 if (A1 != k_RInfo) __ move(A1, k_RInfo);
3947 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3948 __ delayed()->nop();
3949 if (A1 != k_RInfo) __ pop(A1);
3950 if (A0 != klass_RInfo) __ pop(A0);
3951 // result is a boolean
3952 __ beq_far(V0, R0, *failure_target);
3953 __ delayed()->nop();
3954 // fall through to the success case
3956 if (op->should_profile()) {
3957 Register mdo = klass_RInfo, recv = k_RInfo;
3958 __ bind(profile_cast_success);
3959 __ mov_metadata(mdo, md->constant_encoding());
3960 __ load_klass(recv, value);
3961 Label update_done;
3962 type_profile_helper(mdo, md, data, recv, &done);
3963 __ b(done);
3964 __ delayed()->nop();
3966 __ bind(profile_cast_failure);
3967 __ mov_metadata(mdo, md->constant_encoding());
3968 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3969 __ ld_ptr(AT, counter_addr);
3970 __ addi(AT, AT, -DataLayout::counter_increment);
3971 __ st_ptr(AT, counter_addr);
3972 __ b_far(*stub->entry());
3973 __ delayed()->nop();
3974 }
3976 __ bind(done);
3977 } else if (code == lir_checkcast) {
3978 Register obj = op->object()->as_register();
3979 Register dst = op->result_opr()->as_register();
3980 Label success;
3981 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
3982 __ bind(success);
3983 if (dst != obj) {
3984 __ move(dst, obj);
3985 }
3986 } else if (code == lir_instanceof) {
3987 Register obj = op->object()->as_register();
3988 Register dst = op->result_opr()->as_register();
3989 Label success, failure, done;
3990 emit_typecheck_helper(op, &success, &failure, &failure);
3991 __ bind(failure);
3992 __ move(dst, R0);
3993 __ b(done);
3994 __ delayed()->nop();
3995 __ bind(success);
3996 __ addi(dst, R0, 1);
3997 __ bind(done);
3998 } else {
3999 ShouldNotReachHere();
4000 }
4001 }
4003 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
4004 if (op->code() == lir_cas_long) {
4005 #ifdef _LP64
4006 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
4007 Register newval = (op->new_value()->is_single_cpu() ? op->new_value()->as_register() : op->new_value()->as_register_lo());
4008 Register cmpval = (op->cmp_value()->is_single_cpu() ? op->cmp_value()->as_register() : op->cmp_value()->as_register_lo());
4009 assert(newval != NULL, "new val must be register");
4010 assert(cmpval != newval, "cmp and new values must be in different registers");
4011 assert(cmpval != addr, "cmp and addr must be in different registers");
4012 assert(newval != addr, "new value and addr must be in different registers");
4013 __ cmpxchg(newval, addr, cmpval); // 64-bit test-and-set
4014 #else
4015 Register addr = op->addr()->as_register();
4016 __ cmpxchg8(op->new_value()->as_register_lo(),
4017 op->new_value()->as_register_hi(),
4018 addr,
4019 op->cmp_value()->as_register_lo(),
4020 op->cmp_value()->as_register_hi())
4021 #endif
4022 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
4023 Register addr = op->addr()->as_pointer_register();
4024 Register cmp_value = op->cmp_value()->as_register();
4025 Register new_value = op->new_value()->as_register();
4026 Register tmp1 = op->tmp1()->as_register();
4027 Register tmp2 = op->tmp2()->as_register();
4029 assert_different_registers(addr, cmp_value, new_value, tmp1, tmp2);
4031 if (op->code() == lir_cas_obj) {
4032 #ifdef _LP64
4033 if (UseCompressedOops) {
4034 __ move(tmp1, cmp_value);
4035 __ encode_heap_oop(tmp1);
4036 __ sll(tmp1, tmp1, 0);
4037 __ move(tmp2, new_value);
4038 __ encode_heap_oop(tmp2);
4039 __ sll(tmp2, tmp2, 0);
4040 __ cmpxchg32(tmp2, addr, tmp1); // 32-bit test-and-set
4041 } else {
4042 __ cmpxchg(new_value, addr, cmp_value); // 64-bit test-and-set
4043 }
4044 } else
4045 #endif
4046 {
4047 __ cmpxchg32(new_value, addr, cmp_value); // 32-bit test-and-set
4048 }
4049 } else {
4050 Unimplemented();
4051 }
4052 }
4054 #ifndef MIPS64
4055 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
4056 Unimplemented();
4057 }
4058 #endif
4059 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info,bool pop_fpu_stack) {
4060 assert(info == NULL || ((code == lir_rem || code == lir_div || code == lir_sub) && right->is_double_cpu()), "info is only for ldiv/lrem");
4061 if (left->is_single_cpu()) {
4062 // left may not be equal to dest on mips.
4063 //assert(left == dest, "left and dest must be equal");
4064 Register lreg = left->as_register();
4066 if (right->is_cpu_register()) {
4067 // cpu register - cpu register
4068 Register rreg, res;
4069 if (right->is_single_cpu()) {
4070 rreg = right->as_register();
4071 #ifdef _LP64
4072 if(dest->is_double_cpu())
4073 res = dest->as_register_lo();
4074 else
4075 #endif
4076 res = dest->as_register();
4077 } else if (right->is_double_cpu()) {
4078 assert(right->is_double_cpu(),"right must be long");
4079 rreg = right->as_register_lo();
4080 res = dest->as_register_lo();
4081 } else {
4082 ShouldNotReachHere();
4083 }
4084 switch (code) {
4085 case lir_add:
4086 #ifdef _LP64
4087 if (dest->type() == T_INT)
4088 __ addu32(res, lreg, rreg);
4089 else
4090 #endif
4091 __ addu(res, lreg, rreg);
4092 break;
4094 case lir_mul:
4095 #ifndef _LP64
4096 //by aoqi
4097 __ mult(lreg, rreg);
4098 #else
4099 __ dmult(lreg, rreg);
4100 #endif
4101 __ nop();
4102 __ nop();
4103 __ mflo(res);
4104 #ifdef _LP64
4105 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
4106 *
4107 * Example: java.net.URLClassLoader::string2int()
4108 * a6: 0xcafebab
4109 * s0: 16
4110 *
4111 * 104 mul [a6|I] [s0|I] [t0|I]
4112 0x00000055655e3728: dmult a6, s0
4113 0x00000055655e372c: sll zero, zero, 0
4114 0x00000055655e3730: sll zero, zero, 0
4115 0x00000055655e3734: mflo t0 <-- error
4116 *
4117 * t0: 0xFFFFFFFFcafebab0 (Right)
4118 * t0: 0x00000000cafebab0 (Wrong)
4119 */
4120 if (dest->type() == T_INT)
4121 __ sll(res, res, 0);
4122 #endif
4123 break;
4125 case lir_sub:
4126 #ifdef _LP64
4127 if (dest->type() == T_INT)
4128 __ subu32(res, lreg, rreg);
4129 else
4130 #endif
4131 __ subu(res, lreg, rreg);
4132 break;
4134 default:
4135 ShouldNotReachHere();
4136 }
4137 } else if (right->is_stack()) {
4138 // cpu register - stack
4139 Unimplemented();
4140 } else if (right->is_constant()) {
4141 // cpu register - constant
4142 Register res;
4143 if (dest->is_double_cpu()) {
4144 res = dest->as_register_lo();
4145 } else {
4146 res = dest->as_register();
4147 }
4148 jint c;
4149 if (right->type() == T_INT) {
4150 c = right->as_constant_ptr()->as_jint();
4151 } else {
4152 c = right->as_constant_ptr()->as_jlong();
4153 }
4155 switch (code) {
4156 case lir_mul_strictfp:
4157 case lir_mul:
4158 __ move(AT, c);
4159 #ifndef _LP64
4160 //by aoqi
4161 __ mult(lreg, AT);
4162 #else
4163 __ dmult(lreg, AT);
4164 #endif
4165 __ nop();
4166 __ nop();
4167 __ mflo(res);
4168 #ifdef _LP64
4169 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
4170 *
4171 * Example: java.net.URLClassLoader::string2int()
4172 * a6: 0xcafebab
4173 * s0: 16
4174 *
4175 * 104 mul [a6|I] [s0|I] [t0|I]
4176 0x00000055655e3728: dmult a6, s0
4177 0x00000055655e372c: sll zero, zero, 0
4178 0x00000055655e3730: sll zero, zero, 0
4179 0x00000055655e3734: mflo t0 <-- error
4180 *
4181 * t0: 0xFFFFFFFFcafebab0 (Right)
4182 * t0: 0x00000000cafebab0 (Wrong)
4183 */
4184 if (dest->type() == T_INT)
4185 __ sll(res, res, 0);
4186 #endif
4187 break;
4189 case lir_add:
4190 if (Assembler::is_simm16(c)) {
4191 __ addiu(res, lreg, c);
4192 } else {
4193 __ move(AT, c);
4194 __ addu(res, lreg, AT);
4195 }
4196 break;
4198 case lir_sub:
4199 if (Assembler::is_simm16(-c)) {
4200 __ addi(res, lreg, -c);
4201 } else {
4202 __ move(AT, c);
4203 __ subu(res, lreg, AT);
4204 }
4205 break;
4207 default:
4208 ShouldNotReachHere();
4209 }
4210 } else {
4211 ShouldNotReachHere();
4212 }
4214 } else if (left->is_double_cpu()) {
4215 Register op1_lo = left->as_register_lo();
4216 Register op1_hi = left->as_register_hi();
4217 Register op2_lo;
4218 Register op2_hi;
4219 Register dst_lo;
4220 Register dst_hi;
4222 if(dest->is_single_cpu())
4223 {
4224 dst_lo = dest->as_register();
4225 }
4226 else
4227 {
4228 #ifdef _LP64
4229 dst_lo = dest->as_register_lo();
4230 #else
4231 dst_lo = dest->as_register_lo();
4232 dst_hi = dest->as_register_hi();
4233 #endif
4234 }
4235 if (right->is_constant()) {
4236 op2_lo = AT;
4237 op2_hi = R0;
4238 #ifndef _LP64
4239 __ li(AT, right->as_constant_ptr()->as_jint());
4240 #else
4241 __ li(AT, right->as_constant_ptr()->as_jlong_bits());
4242 #endif
4243 } else if (right->is_double_cpu()) { // Double cpu
4244 assert(right->is_double_cpu(),"right must be long");
4245 assert(dest->is_double_cpu(), "dest must be long");
4246 op2_lo = right->as_register_lo();
4247 op2_hi = right->as_register_hi();
4248 } else {
4249 #ifdef _LP64
4250 op2_lo = right->as_register();
4251 #else
4252 ShouldNotReachHere();
4253 #endif
4254 }
4256 NOT_LP64(assert_different_registers(op1_lo, op1_hi, op2_lo, op2_hi));
4257 // Jin: Why?
4258 // LP64_ONLY(assert_different_registers(op1_lo, op2_lo));
4260 switch (code) {
4261 case lir_add:
4262 #ifndef _LP64
4263 //by aoqi
4264 __ addu(dst_lo, op1_lo, op2_lo);
4265 __ sltu(AT, dst_lo, op2_lo);
4266 __ addu(dst_hi, op1_hi, op2_hi);
4267 __ addu(dst_hi, dst_hi, AT);
4268 #else
4269 __ addu(dst_lo, op1_lo, op2_lo);
4270 #endif
4271 break;
4273 case lir_sub:
4274 #ifndef _LP64
4275 //by aoqi
4276 __ subu(dst_lo, op1_lo, op2_lo);
4277 __ sltu(AT, op1_lo, dst_lo);
4278 __ subu(dst_hi, op1_hi, op2_hi);
4279 __ subu(dst_hi, dst_hi, AT);
4280 #else
4281 __ subu(dst_lo, op1_lo, op2_lo);
4282 #endif
4283 break;
4285 case lir_mul:
4286 {
4288 #ifndef _LP64
4289 //by aoqi
4290 Label zero, quick, done;
4291 //zero?
4292 __ orr(AT, op2_lo, op1_lo);
4293 __ beq(AT, R0, zero);
4294 __ delayed();
4295 __ move(dst_hi, R0);
4297 //quick?
4298 __ orr(AT, op2_hi, op1_hi);
4299 __ beq(AT, R0, quick);
4300 __ delayed()->nop();
4302 __ multu(op2_lo, op1_hi);
4303 __ nop();
4304 __ nop();
4305 __ mflo(dst_hi);
4306 __ multu(op2_hi, op1_lo);
4307 __ nop();
4308 __ nop();
4309 __ mflo(AT);
4311 __ bind(quick);
4312 __ multu(op2_lo, op1_lo);
4313 __ addu(dst_hi, dst_hi, AT);
4314 __ nop();
4315 __ mflo(dst_lo);
4316 __ mfhi(AT);
4317 __ b(done);
4318 __ delayed()->addu(dst_hi, dst_hi, AT);
4320 __ bind(zero);
4321 __ move(dst_lo, R0);
4322 __ bind(done);
4323 #else
4324 Label zero, done;
4325 //zero?
4326 __ orr(AT, op2_lo, op1_lo);
4327 __ beq(AT, R0, zero);
4328 __ delayed();
4329 __ move(dst_hi, R0);
4331 #ifdef ASSERT
4332 //op1_hi, op2_hi should be 0
4333 {
4334 Label L;
4335 __ beq(op1_hi, R0, L);
4336 __ delayed()->nop();
4337 __ stop("wrong register, lir_mul");
4338 __ bind(L);
4339 }
4340 {
4341 Label L;
4342 __ beq(op2_hi, R0, L);
4343 __ delayed()->nop();
4344 __ stop("wrong register, lir_mul");
4345 __ bind(L);
4346 }
4347 #endif
4349 __ multu(op2_lo, op1_lo);
4350 __ nop();
4351 __ nop();
4352 __ mflo(dst_lo);
4353 __ b(done);
4354 __ delayed()->nop();
4356 __ bind(zero);
4357 __ move(dst_lo, R0);
4358 __ bind(done);
4359 #endif //_LP64
4360 }
4361 break;
4363 default:
4364 ShouldNotReachHere();
4365 }
4368 } else if (left->is_single_fpu()) {
4369 assert(right->is_single_fpu(),"right must be float");
4370 assert(dest->is_single_fpu(), "dest must be float");
4372 FloatRegister lreg = left->as_float_reg();
4373 FloatRegister rreg = right->as_float_reg();
4374 FloatRegister res = dest->as_float_reg();
4376 switch (code) {
4377 case lir_add:
4378 __ add_s(res, lreg, rreg);
4379 break;
4380 case lir_sub:
4381 __ sub_s(res, lreg, rreg);
4382 break;
4383 case lir_mul:
4384 case lir_mul_strictfp:
4385 // i dont think we need special handling of this. FIXME
4386 __ mul_s(res, lreg, rreg);
4387 break;
4388 case lir_div:
4389 case lir_div_strictfp:
4390 __ div_s(res, lreg, rreg);
4391 break;
4392 default : ShouldNotReachHere();
4393 }
4394 } else if (left->is_double_fpu()) {
4395 assert(right->is_double_fpu(),"right must be double");
4396 assert(dest->is_double_fpu(), "dest must be double");
4398 FloatRegister lreg = left->as_double_reg();
4399 FloatRegister rreg = right->as_double_reg();
4400 FloatRegister res = dest->as_double_reg();
4402 switch (code) {
4403 case lir_add:
4404 __ add_d(res, lreg, rreg);
4405 break;
4406 case lir_sub:
4407 __ sub_d(res, lreg, rreg);
4408 break;
4409 case lir_mul:
4410 case lir_mul_strictfp:
4411 // i dont think we need special handling of this. FIXME
4412 // by yjl 9/13/2005
4413 __ mul_d(res, lreg, rreg);
4414 break;
4415 case lir_div:
4416 case lir_div_strictfp:
4417 __ div_d(res, lreg, rreg);
4418 break;
4419 // case lir_rem:
4420 // __ rem_d(res, lreg, rreg);
4421 // break;
4422 default : ShouldNotReachHere();
4423 }
4424 }
4425 else if (left->is_single_stack() || left->is_address()) {
4426 assert(left == dest, "left and dest must be equal");
4428 Address laddr;
4429 if (left->is_single_stack()) {
4430 laddr = frame_map()->address_for_slot(left->single_stack_ix());
4431 } else if (left->is_address()) {
4432 laddr = as_Address(left->as_address_ptr());
4433 } else {
4434 ShouldNotReachHere();
4435 }
4437 if (right->is_single_cpu()) {
4438 Register rreg = right->as_register();
4439 switch (code) {
4440 case lir_add:
4441 #ifndef _LP64
4442 //by aoqi
4443 __ lw(AT, laddr);
4444 __ add(AT, AT, rreg);
4445 __ sw(AT, laddr);
4446 #else
4447 __ ld(AT, laddr);
4448 __ dadd(AT, AT, rreg);
4449 __ sd(AT, laddr);
4450 #endif
4451 break;
4452 case lir_sub:
4453 #ifndef _LP64
4454 //by aoqi
4455 __ lw(AT, laddr);
4456 __ sub(AT,AT,rreg);
4457 __ sw(AT, laddr);
4458 #else
4459 __ ld(AT, laddr);
4460 __ dsub(AT,AT,rreg);
4461 __ sd(AT, laddr);
4462 #endif
4463 break;
4464 default: ShouldNotReachHere();
4465 }
4466 } else if (right->is_constant()) {
4467 #ifndef _LP64
4468 jint c = right->as_constant_ptr()->as_jint();
4469 #else
4470 jlong c = right->as_constant_ptr()->as_jlong_bits();
4471 #endif
4472 switch (code) {
4473 case lir_add: {
4474 __ ld_ptr(AT, laddr);
4475 #ifndef _LP64
4476 __ addi(AT, AT, c);
4477 #else
4478 __ li(T8, c);
4479 __ add(AT, AT, T8);
4480 #endif
4481 __ st_ptr(AT, laddr);
4482 break;
4483 }
4484 case lir_sub: {
4485 __ ld_ptr(AT, laddr);
4486 #ifndef _LP64
4487 __ addi(AT, AT, -c);
4488 #else
4489 __ li(T8, -c);
4490 __ add(AT, AT, T8);
4491 #endif
4492 __ st_ptr(AT, laddr);
4493 break;
4494 }
4495 default: ShouldNotReachHere();
4496 }
4497 } else {
4498 ShouldNotReachHere();
4499 }
4500 } else {
4501 ShouldNotReachHere();
4502 }
4503 }
4505 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op *op) {
4506 //FIXME,lir_log, lir_log10,lir_abs,lir_sqrt,so many new lir instruction @jerome
4507 if (value->is_double_fpu()) {
4508 // assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
4509 switch(code) {
4510 case lir_log : //__ flog() ; break;
4511 case lir_log10 : //__ flog10() ;
4512 Unimplemented();
4513 break;
4514 case lir_abs : __ abs_d(dest->as_double_reg(), value->as_double_reg()) ; break;
4515 case lir_sqrt : __ sqrt_d(dest->as_double_reg(), value->as_double_reg()); break;
4516 case lir_sin :
4517 // Should consider not saving ebx if not necessary
4518 __ trigfunc('s', 0);
4519 break;
4520 case lir_cos :
4521 // Should consider not saving ebx if not necessary
4522 // assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
4523 __ trigfunc('c', 0);
4524 break;
4525 case lir_tan :
4526 // Should consider not saving ebx if not necessary
4527 __ trigfunc('t', 0);
4528 break;
4529 default : ShouldNotReachHere();
4530 }
4531 } else {
4532 Unimplemented();
4533 }
4534 }
4536 //FIXME, if right is on the stack!
4537 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
4538 if (left->is_single_cpu()) {
4539 Register dstreg = dst->as_register();
4540 Register reg = left->as_register();
4541 if (right->is_constant()) {
4542 int val = right->as_constant_ptr()->as_jint();
4543 __ move(AT, val);
4544 switch (code) {
4545 case lir_logic_and:
4546 __ andr (dstreg, reg, AT);
4547 break;
4548 case lir_logic_or:
4549 __ orr(dstreg, reg, AT);
4550 break;
4551 case lir_logic_xor:
4552 __ xorr(dstreg, reg, AT);
4553 break;
4554 default: ShouldNotReachHere();
4555 }
4556 } else if (right->is_stack()) {
4557 // added support for stack operands
4558 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
4559 switch (code) {
4560 case lir_logic_and:
4561 //FIXME. lw or ld_ptr?
4562 __ lw(AT, raddr);
4563 __ andr(reg, reg,AT);
4564 break;
4565 case lir_logic_or:
4566 __ lw(AT, raddr);
4567 __ orr(reg, reg, AT);
4568 break;
4569 case lir_logic_xor:
4570 __ lw(AT, raddr);
4571 __ xorr(reg, reg, AT);
4572 break;
4573 default: ShouldNotReachHere();
4574 }
4575 } else {
4576 Register rright = right->as_register();
4577 switch (code) {
4578 case lir_logic_and: __ andr (dstreg, reg, rright); break;
4579 case lir_logic_or : __ orr (dstreg, reg, rright); break;
4580 case lir_logic_xor: __ xorr (dstreg, reg, rright); break;
4581 default: ShouldNotReachHere();
4582 }
4583 }
4584 } else {
4585 Register l_lo = left->as_register_lo();
4586 Register dst_lo = dst->as_register_lo();
4587 #ifndef _LP64
4588 Register l_hi = left->as_register_hi();
4589 Register dst_hi = dst->as_register_hi();
4590 #endif
4592 if (right->is_constant()) {
4593 #ifndef _LP64
4595 int r_lo = right->as_constant_ptr()->as_jint_lo();
4596 int r_hi = right->as_constant_ptr()->as_jint_hi();
4598 switch (code) {
4599 case lir_logic_and:
4600 __ move(AT, r_lo);
4601 __ andr(dst_lo, l_lo, AT);
4602 __ move(AT, r_hi);
4603 __ andr(dst_hi, l_hi, AT);
4604 break;
4606 case lir_logic_or:
4607 __ move(AT, r_lo);
4608 __ orr(dst_lo, l_lo, AT);
4609 __ move(AT, r_hi);
4610 __ orr(dst_hi, l_hi, AT);
4611 break;
4613 case lir_logic_xor:
4614 __ move(AT, r_lo);
4615 __ xorr(dst_lo, l_lo, AT);
4616 __ move(AT, r_hi);
4617 __ xorr(dst_hi, l_hi, AT);
4618 break;
4620 default: ShouldNotReachHere();
4621 }
4622 #else
4623 __ li(AT, right->as_constant_ptr()->as_jlong());
4625 switch (code) {
4626 case lir_logic_and:
4627 __ andr(dst_lo, l_lo, AT);
4628 break;
4630 case lir_logic_or:
4631 __ orr(dst_lo, l_lo, AT);
4632 break;
4634 case lir_logic_xor:
4635 __ xorr(dst_lo, l_lo, AT);
4636 break;
4638 default: ShouldNotReachHere();
4639 }
4640 #endif
4642 } else {
4643 Register r_lo = right->as_register_lo();
4644 Register r_hi = right->as_register_hi();
4646 switch (code) {
4647 case lir_logic_and:
4648 __ andr(dst_lo, l_lo, r_lo);
4649 NOT_LP64(__ andr(dst_hi, l_hi, r_hi);)
4650 break;
4651 case lir_logic_or:
4652 __ orr(dst_lo, l_lo, r_lo);
4653 NOT_LP64(__ orr(dst_hi, l_hi, r_hi);)
4654 break;
4655 case lir_logic_xor:
4656 __ xorr(dst_lo, l_lo, r_lo);
4657 NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);)
4658 break;
4659 default: ShouldNotReachHere();
4660 }
4661 }
4662 }
4663 }
4665 //done here. aoqi. 12-12 22:25
4666 // we assume that eax and edx can be overwritten
4667 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
4669 assert(left->is_single_cpu(), "left must be register");
4670 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
4671 assert(result->is_single_cpu(), "result must be register");
4673 Register lreg = left->as_register();
4674 Register dreg = result->as_register();
4676 if (right->is_constant()) {
4677 int divisor = right->as_constant_ptr()->as_jint();
4678 assert(divisor!=0, "must be nonzero");
4679 #ifndef _LP64
4680 __ move(AT, divisor);
4681 __ div(lreg, AT);
4682 #else
4683 __ li(AT, divisor);
4684 __ ddiv(lreg, AT);
4685 #endif
4686 int idivl_offset = code_offset();
4688 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
4689 We must trap an exception manually. */
4690 __ teq(R0, AT, 0x7);
4691 __ nop();
4692 __ nop();
4693 add_debug_info_for_div0(idivl_offset, info);
4694 } else {
4695 Register rreg = right->as_register();
4696 #ifndef _LP64
4697 __ div(lreg, rreg);
4698 #else
4699 __ ddiv(lreg, rreg);
4700 #endif
4702 int idivl_offset = code_offset();
4703 __ teq(R0, rreg, 0x7);
4704 __ nop();
4705 __ nop();
4706 add_debug_info_for_div0(idivl_offset, info);
4707 }
4709 // get the result
4710 if (code == lir_irem) {
4711 __ mfhi(dreg);
4712 #ifdef _LP64
4713 if (result->type() == T_INT)
4714 __ sll(dreg, dreg, 0);
4715 #endif
4716 } else if (code == lir_idiv) {
4717 __ mflo(dreg);
4718 } else {
4719 ShouldNotReachHere();
4720 }
4721 }
4723 void LIR_Assembler::arithmetic_frem(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
4724 if (left->is_single_fpu()) {
4725 assert(right->is_single_fpu(),"right must be float");
4726 assert(result->is_single_fpu(), "dest must be float");
4727 assert(temp->is_single_fpu(), "dest must be float");
4729 FloatRegister lreg = left->as_float_reg();
4730 FloatRegister rreg = right->as_float_reg();
4731 FloatRegister res = result->as_float_reg();
4732 FloatRegister tmp = temp->as_float_reg();
4734 switch (code) {
4735 case lir_frem:
4736 __ rem_s(res, lreg, rreg, tmp);
4737 break;
4738 default : ShouldNotReachHere();
4739 }
4740 } else if (left->is_double_fpu()) {
4741 assert(right->is_double_fpu(),"right must be double");
4742 assert(result->is_double_fpu(), "dest must be double");
4743 assert(temp->is_double_fpu(), "dest must be double");
4745 FloatRegister lreg = left->as_double_reg();
4746 FloatRegister rreg = right->as_double_reg();
4747 FloatRegister res = result->as_double_reg();
4748 FloatRegister tmp = temp->as_double_reg();
4750 switch (code) {
4751 case lir_frem:
4752 __ rem_d(res, lreg, rreg, tmp);
4753 break;
4754 default : ShouldNotReachHere();
4755 }
4756 }
4757 }
4759 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst,LIR_Op2 * op) {
4760 Register dstreg = dst->as_register();
4761 if (code == lir_cmp_fd2i) {
4762 if (left->is_single_fpu()) {
4763 FloatRegister leftreg = left->as_float_reg();
4764 FloatRegister rightreg = right->as_float_reg();
4766 Label done;
4767 // equal?
4768 __ c_eq_s(leftreg, rightreg);
4769 __ bc1t(done);
4770 __ delayed();
4771 __ move(dstreg, R0);
4772 // less?
4773 __ c_olt_s(leftreg, rightreg);
4774 __ bc1t(done);
4775 __ delayed();
4776 __ move(dstreg, -1);
4777 // great
4778 __ move(dstreg, 1);
4780 __ bind(done);
4781 } else {
4782 assert(left->is_double_fpu(), "Must double");
4783 FloatRegister leftreg = left->as_double_reg();
4784 FloatRegister rightreg = right->as_double_reg();
4786 Label done;
4787 // equal?
4788 __ c_eq_d(leftreg, rightreg);
4789 __ bc1t(done);
4790 __ delayed();
4791 __ move(dstreg, R0);
4792 // less?
4793 __ c_olt_d(leftreg, rightreg);
4794 __ bc1t(done);
4795 __ delayed();
4796 __ move(dstreg, -1);
4797 // great
4798 __ move(dstreg, 1);
4800 __ bind(done);
4801 }
4802 } else if (code == lir_ucmp_fd2i) {
4803 if (left->is_single_fpu()) {
4804 FloatRegister leftreg = left->as_float_reg();
4805 FloatRegister rightreg = right->as_float_reg();
4807 Label done;
4808 // equal?
4809 __ c_eq_s(leftreg, rightreg);
4810 __ bc1t(done);
4811 __ delayed();
4812 __ move(dstreg, R0);
4813 // less?
4814 __ c_ult_s(leftreg, rightreg);
4815 __ bc1t(done);
4816 __ delayed();
4817 __ move(dstreg, -1);
4818 // great
4819 __ move(dstreg, 1);
4821 __ bind(done);
4822 } else {
4823 assert(left->is_double_fpu(), "Must double");
4824 FloatRegister leftreg = left->as_double_reg();
4825 FloatRegister rightreg = right->as_double_reg();
4827 Label done;
4828 // equal?
4829 __ c_eq_d(leftreg, rightreg);
4830 __ bc1t(done);
4831 __ delayed();
4832 __ move(dstreg, R0);
4833 // less?
4834 __ c_ult_d(leftreg, rightreg);
4835 __ bc1t(done);
4836 __ delayed();
4837 __ move(dstreg, -1);
4838 // great
4839 __ move(dstreg, 1);
4841 __ bind(done);
4842 }
4843 } else {
4844 assert(code == lir_cmp_l2i, "check");
4845 Register l_lo, l_hi, r_lo, r_hi, d_lo, d_hi;
4846 l_lo = left->as_register_lo();
4847 l_hi = left->as_register_hi();
4848 r_lo = right->as_register_lo();
4849 r_hi = right->as_register_hi();
4851 Label done;
4852 #ifndef _LP64
4853 // less?
4854 __ slt(AT, l_hi, r_hi);
4855 __ bne(AT, R0, done);
4856 __ delayed();
4857 __ move(dstreg, -1);
4858 // great?
4859 __ slt(AT, r_hi, l_hi);
4860 __ bne(AT, R0, done);
4861 __ delayed();
4862 __ move(dstreg, 1);
4863 #endif
4865 // now compare low 32 bits
4866 // below?
4867 #ifndef _LP64
4868 __ sltu(AT, l_lo, r_lo);
4869 #else
4870 __ slt(AT, l_lo, r_lo);
4871 #endif
4872 __ bne(AT, R0, done);
4873 __ delayed();
4874 __ move(dstreg, -1);
4875 // above?
4876 #ifndef _LP64
4877 __ sltu(AT, r_lo, l_lo);
4878 #else
4879 __ slt(AT, r_lo, l_lo);
4880 #endif
4881 __ bne(AT, R0, done);
4882 __ delayed();
4883 __ move(dstreg, 1);
4884 // equal
4885 __ move(dstreg, R0);
4887 __ bind(done);
4888 }
4889 }
4892 void LIR_Assembler::align_call(LIR_Code code) {
4893 if (os::is_MP()) {
4894 // make sure that the displacement word of the call ends up word aligned
4895 int offset = __ offset();
4896 switch (code) {
4897 case lir_static_call:
4898 case lir_optvirtual_call:
4899 case lir_dynamic_call:
4900 offset += NativeCall::displacement_offset;
4901 break;
4902 case lir_icvirtual_call:
4903 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
4904 break;
4905 case lir_virtual_call: // currently, sparc-specific for niagara
4906 default: ShouldNotReachHere();
4907 }
4908 while (offset % BytesPerWord != 0) {
4909 __ nop();
4910 offset += 4;
4911 }
4912 }
4913 }
4916 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
4917 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned");
4918 __ call(op->addr(), rtype);
4919 __ delayed()->nop();
4920 add_call_info(code_offset(), op->info());
4921 }
4924 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
4925 __ ic_call(op->addr());
4926 add_call_info(code_offset(), op->info());
4927 }
4930 /* Currently, vtable-dispatch is only enabled for sparc platforms */
4931 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
4932 ShouldNotReachHere();
4933 }
4937 void LIR_Assembler::emit_static_call_stub() {
4938 address call_pc = __ pc();
4939 address stub = __ start_a_stub(call_stub_size);
4940 if (stub == NULL) {
4941 bailout("static call stub overflow");
4942 return;
4943 }
4944 int start = __ offset();
4945 __ relocate(static_stub_Relocation::spec(call_pc));
4947 Metadata *o = NULL;
4948 int index = __ oop_recorder()->allocate_metadata_index(o);
4949 RelocationHolder rspec = metadata_Relocation::spec(index);
4950 __ relocate(rspec);
4951 //see set_to_interpreted
4952 __ patchable_set48(Rmethod, (long)o);
4954 __ patchable_set48(AT, (long)-1);
4955 __ jr(AT);
4956 __ delayed()->nop();
4957 assert(__ offset() - start <= call_stub_size, "stub too big");
4958 __ end_a_stub();
4959 }
4962 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
4963 assert(exceptionOop->as_register()== V0, "must match");
4964 assert(exceptionPC->as_register()== V1, "must match");
4966 // exception object is not added to oop map by LinearScan
4967 // (LinearScan assumes that no oops are in fixed registers)
4969 info->add_register_oop(exceptionOop);
4970 long pc_for_athrow = (long)__ pc();
4971 int pc_for_athrow_offset = __ offset();
4972 Register epc = exceptionPC->as_register();
4973 __ relocate(relocInfo::internal_pc_type);
4974 __ li48(epc, pc_for_athrow);
4975 add_call_info(pc_for_athrow_offset, info); // for exception handler
4976 __ verify_not_null_oop(V0);
4977 // search an exception handler (eax: exception oop, edx: throwing pc)
4978 if (compilation()->has_fpu_code()) {
4979 __ call(Runtime1::entry_for(Runtime1::handle_exception_id),
4980 relocInfo::runtime_call_type);
4981 } else {
4982 __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
4983 relocInfo::runtime_call_type);
4984 }
4985 __ delayed()->nop();
4986 }
4988 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
4989 assert(exceptionOop->as_register()== FSR, "must match");
4990 __ b(_unwind_handler_entry);
4991 __ delayed()->nop();
4992 }
4994 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
4995 // optimized version for linear scan:
4996 // * tmp must be unused
4997 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
4999 #ifdef _LP64
5000 Register count_reg = count->as_register();
5001 Register value_reg;
5002 Register dest_reg;
5003 if (left->is_single_cpu()) {
5004 value_reg = left->as_register();
5005 dest_reg = dest->as_register();
5007 } else if (left->is_double_cpu()) {
5008 value_reg = left->as_register_lo();
5009 dest_reg = dest->as_register_lo();
5010 } else {
5011 ShouldNotReachHere();
5012 }
5013 assert_different_registers(count_reg, value_reg);
5014 switch (code) {
5015 case lir_shl:
5016 if (dest->type() == T_INT)
5017 __ sllv(dest_reg, value_reg, count_reg);
5018 else
5019 __ dsllv(dest_reg, value_reg, count_reg);
5020 break;
5021 case lir_shr: __ dsrav(dest_reg, value_reg, count_reg); break;
5022 case lir_ushr:
5023 #if 1
5024 /*
5025 Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
5026 However, dsrl will shift in company with the highest 32 bits.
5027 Thus, if the source register contains a negative value,
5028 the resulti is incorrect.
5029 * DoubleCvt.java
5030 *
5031 * static int inp (int shift)
5032 * {
5033 * return -1 >>> (32 - shift);
5034 * }
5035 *
5036 * 26 ushift_right [t0|I] [a4|I] [a6|I]
5037 * 0x00000055616d2a98: dsrl a6, t0, a4 <-- error
5038 */
5040 // java.math.MutableBigInteger::primitiveRightShift
5041 //
5042 // 108 ushift_right [a6|I] [a4|I] [a4|I]
5043 // 0x00000055646d2f70: dsll32 a4, a6, 0 \
5044 // 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error!
5045 // 0x00000055646d2f78: dsrl a4, a4, a4 /
5046 if (left->type() == T_INT && dest->type() == T_INT) {
5047 __ dsll32(AT, value_reg, 0); // Omit the high 32 bits
5048 __ dsrl32(AT, AT, 0);
5049 __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift
5050 break;
5051 }
5052 #endif
5053 __ dsrlv(dest_reg, value_reg, count_reg); break;
5054 default: ShouldNotReachHere();
5055 }
5056 #else
5057 if (left->is_single_cpu()) {
5058 Register value_reg = left->as_register();
5059 Register count_reg = count->as_register();
5060 Register dest_reg = dest->as_register();
5061 assert_different_registers(count_reg, value_reg);
5063 switch (code) {
5064 case lir_shl: __ sllv(dest_reg, value_reg, count_reg); break;
5065 case lir_shr: __ srav(dest_reg, value_reg, count_reg); break;
5066 case lir_ushr: __ srlv(dest_reg, value_reg, count_reg); break;
5067 default: ShouldNotReachHere();
5068 }
5070 } else if (left->is_double_cpu()) {
5071 Register creg = count->as_register();
5072 Register lo = left->as_register_lo();
5073 Register hi = left->as_register_hi();
5074 Register dlo = dest->as_register_lo();
5075 Register dhi = dest->as_register_hi();
5077 __ andi(creg, creg, 0x3f);
5078 switch (code) {
5079 case lir_shl:
5080 {
5081 Label normal, done, notzero;
5083 //count=0
5084 __ bne(creg, R0, notzero);
5085 __ delayed()->nop();
5086 __ move(dlo, lo);
5087 __ b(done);
5088 __ delayed();
5089 __ move(dhi, hi);
5091 //count>=32
5092 __ bind(notzero);
5093 __ sltiu(AT, creg, BitsPerWord);
5094 __ bne(AT, R0, normal);
5095 __ delayed();
5096 __ addiu(AT, creg, (-1) * BitsPerWord);
5097 __ sllv(dhi, lo, AT);
5098 __ b(done);
5099 __ delayed();
5100 __ move(dlo, R0);
5102 //count<32
5103 __ bind(normal);
5104 __ sllv(dhi, hi, creg);
5105 __ move(AT, BitsPerWord);
5106 __ sub(AT, AT, creg);
5107 __ srlv(AT, lo, AT);
5108 __ orr(dhi, dhi, AT);
5109 __ sllv(dlo, lo, creg);
5110 __ bind(done);
5111 }
5112 break;
5113 case lir_shr:
5114 {
5115 Label normal, done, notzero;
5117 //count=0
5118 __ bne(creg, R0, notzero);
5119 __ delayed()->nop();
5120 __ move(dhi, hi);
5121 __ b(done);
5122 __ delayed();
5123 __ move(dlo, lo);
5125 //count>=32
5126 __ bind(notzero);
5127 __ sltiu(AT, creg, BitsPerWord);
5128 __ bne(AT, R0, normal);
5129 __ delayed();
5130 __ addiu(AT, creg, (-1) * BitsPerWord);
5131 __ srav(dlo, hi, AT);
5132 __ b(done);
5133 __ delayed();
5134 __ sra(dhi, hi, BitsPerWord - 1);
5136 //count<32
5137 __ bind(normal);
5138 __ srlv(dlo, lo, creg);
5139 __ move(AT, BitsPerWord);
5140 __ sub(AT, AT, creg);
5141 __ sllv(AT, hi, AT);
5142 __ orr(dlo, dlo, AT);
5143 __ srav(dhi, hi, creg);
5144 __ bind(done);
5145 }
5146 break;
5147 case lir_ushr:
5148 {
5149 Label normal, done, notzero;
5151 //count=zero
5152 __ bne(creg, R0, notzero);
5153 __ delayed()->nop();
5154 __ move(dhi, hi);
5155 __ b(done);
5156 __ delayed();
5157 __ move(dlo, lo);
5159 //count>=32
5160 __ bind(notzero);
5161 __ sltiu(AT, creg, BitsPerWord);
5162 __ bne(AT, R0, normal);
5163 __ delayed();
5164 __ addi(AT, creg, (-1) * BitsPerWord);
5165 __ srlv(dlo, hi, AT);
5166 __ b(done);
5167 __ delayed();
5168 __ move(dhi, R0);
5170 //count<32
5171 __ bind(normal);
5172 __ srlv(dlo, lo, creg);
5173 __ move(AT, BitsPerWord);
5174 __ sub(AT, AT, creg);
5175 __ sllv(AT, hi, AT);
5176 __ orr(dlo, dlo, AT);
5177 __ srlv(dhi, hi, creg);
5178 __ bind(done);
5179 }
5180 break;
5181 default: ShouldNotReachHere();
5182 }
5183 } else {
5184 ShouldNotReachHere();
5185 }
5186 #endif
5188 }
5190 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
5191 if (dest->is_single_cpu()) {
5192 /* In WebClient,
5193 * virtual jboolean java.util.concurrent.atomic.AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl.compareAndSet
5194 *
5195 * 130 ushift_right [a4a4|J] [int:9|I] [a4|L]
5196 */
5197 Register value_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
5198 Register dest_reg = dest->as_register();
5199 count = count & 0x1F; // Java spec
5201 switch (code) {
5202 #ifdef _LP64
5203 case lir_shl:
5204 if (dest->type() == T_INT)
5205 __ sll(dest_reg, value_reg, count);
5206 else
5207 __ dsll(dest_reg, value_reg, count);
5208 break;
5209 case lir_shr: __ dsra(dest_reg, value_reg, count); break;
5210 case lir_ushr:
5211 #if 1
5212 if (left->type() == T_INT && dest->type() == T_INT) {
5213 /* Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
5214 However, dsrl will shift in company with the highest 32 bits.
5215 Thus, if the source register contains a negative value,
5216 the resulti is incorrect.
5218 Example: in java.util.HashMap.get()
5220 68 ushift_right [t0|I] [int:20|I] [a4|I]
5221 dsrl a4, t0, 20
5223 t0: 0xFFFFFFFF87654321 (64bits for 0x87654321)
5225 ushift_right t0, 16 -> a4
5227 a4: 00000000 00008765 (right)
5228 a4: FFFFFFFF FFFF8765 (wrong)
5229 */
5230 __ dsll32(dest_reg, value_reg, 0); // Omit the high 32 bits
5231 __ dsrl32(dest_reg, dest_reg, count); // Unsigned right shift
5232 break;
5233 }
5234 #endif
5236 __ dsrl(dest_reg, value_reg, count);
5237 break;
5238 #else
5239 case lir_shl: __ sll(dest_reg, value_reg, count); break;
5240 case lir_shr: __ sra(dest_reg, value_reg, count); break;
5241 case lir_ushr: __ srl(dest_reg, value_reg, count); break;
5242 #endif
5243 default: ShouldNotReachHere();
5244 }
5246 } else if (dest->is_double_cpu()) {
5247 Register valuelo = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
5248 Register destlo = dest->as_register_lo();
5249 count = count & 0x3f;
5250 #ifdef _LP64
5251 switch (code) {
5252 case lir_shl: __ dsll(destlo, valuelo, count); break;
5253 case lir_shr: __ dsra(destlo, valuelo, count); break;
5254 case lir_ushr: __ dsrl(destlo, valuelo, count); break;
5255 default: ShouldNotReachHere();
5256 }
5257 #else
5258 Register desthi = dest->as_register_hi();
5259 Register valuehi = left->as_register_hi();
5260 assert_different_registers(destlo, valuehi, desthi);
5261 switch (code) {
5262 case lir_shl:
5263 if (count==0) {
5264 __ move(destlo, valuelo);
5265 __ move(desthi, valuehi);
5266 } else if (count>=32) {
5267 __ sll(desthi, valuelo, count-32);
5268 __ move(destlo, R0);
5269 } else {
5270 __ srl(AT, valuelo, 32 - count);
5271 __ sll(destlo, valuelo, count);
5272 __ sll(desthi, valuehi, count);
5273 __ orr(desthi, desthi, AT);
5274 }
5275 break;
5277 case lir_shr:
5278 if (count==0) {
5279 __ move(destlo, valuelo);
5280 __ move(desthi, valuehi);
5281 } else if (count>=32) {
5282 __ sra(destlo, valuehi, count-32);
5283 __ sra(desthi, valuehi, 31);
5284 } else {
5285 __ sll(AT, valuehi, 32 - count);
5286 __ sra(desthi, valuehi, count);
5287 __ srl(destlo, valuelo, count);
5288 __ orr(destlo, destlo, AT);
5289 }
5290 break;
5292 case lir_ushr:
5293 if (count==0) {
5294 __ move(destlo, valuelo);
5295 __ move(desthi, valuehi);
5296 } else if (count>=32) {
5297 __ sra(destlo, valuehi, count-32);
5298 __ move(desthi, R0);
5299 } else {
5300 __ sll(AT, valuehi, 32 - count);
5301 __ srl(desthi, valuehi, count);
5302 __ srl(destlo, valuelo, count);
5303 __ orr(destlo, destlo, AT);
5304 }
5305 break;
5307 default: ShouldNotReachHere();
5308 }
5309 #endif
5310 } else {
5311 ShouldNotReachHere();
5312 }
5313 }
5315 void LIR_Assembler::store_parameter(Register r, int offset_from_esp_in_words) {
5316 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
5317 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
5318 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
5319 __ st_ptr(r, SP, offset_from_sp_in_bytes);
5320 }
5323 void LIR_Assembler::store_parameter(jint c, int offset_from_esp_in_words) {
5324 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
5325 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
5326 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
5327 __ move(AT, c);
5328 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
5329 }
5331 void LIR_Assembler::store_parameter(jobject o, int offset_from_esp_in_words) {
5332 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
5333 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
5334 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
5335 int oop_index = __ oop_recorder()->find_index(o);
5336 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5337 __ relocate(rspec);
5338 #ifndef _LP64
5339 //by_css
5340 __ lui(AT, Assembler::split_high((int)o));
5341 __ addiu(AT, AT, Assembler::split_low((int)o));
5342 #else
5343 __ li48(AT, (long)o);
5344 #endif
5346 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
5348 }
5351 // This code replaces a call to arraycopy; no exception may
5352 // be thrown in this code, they must be thrown in the System.arraycopy
5353 // activation frame; we could save some checks if this would not be the case
5354 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
5357 ciArrayKlass* default_type = op->expected_type();
5358 Register src = op->src()->as_register();
5359 Register dst = op->dst()->as_register();
5360 Register src_pos = op->src_pos()->as_register();
5361 Register dst_pos = op->dst_pos()->as_register();
5362 Register length = op->length()->as_register();
5363 Register tmp = T8;
5364 #ifndef OPT_THREAD
5365 Register java_thread = T8;
5366 #else
5367 Register java_thread = TREG;
5368 #endif
5369 CodeStub* stub = op->stub();
5371 int flags = op->flags();
5372 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
5373 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
5375 // if we don't know anything or it's an object array, just go through the generic arraycopy
5376 if (default_type == NULL) {
5377 Label done;
5378 // save outgoing arguments on stack in case call to System.arraycopy is needed
5379 // HACK ALERT. This code used to push the parameters in a hardwired fashion
5380 // for interpreter calling conventions. Now we have to do it in new style conventions.
5381 // For the moment until C1 gets the new register allocator I just force all the
5382 // args to the right place (except the register args) and then on the back side
5383 // reload the register args properly if we go slow path. Yuck
5385 // this is saved in the caller's reserved argument area
5386 //FIXME, maybe It will change something in the stack;
5387 // These are proper for the calling convention
5388 //store_parameter(length, 2);
5389 //store_parameter(dst_pos, 1);
5390 //store_parameter(dst, 0);
5392 // these are just temporary placements until we need to reload
5393 //store_parameter(src_pos, 3);
5394 //store_parameter(src, 4);
5395 assert(src == T0 && src_pos == A0, "mismatch in calling convention");
5396 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
5398 __ push(src);
5399 __ push(dst);
5400 __ push(src_pos);
5401 __ push(dst_pos);
5402 __ push(length);
5405 // save SP and align
5406 #ifndef OPT_THREAD
5407 __ get_thread(java_thread);
5408 #endif
5409 __ st_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
5410 #ifndef _LP64
5411 __ addi(SP, SP, (-5) * wordSize);
5412 __ move(AT, -(StackAlignmentInBytes));
5413 __ andr(SP, SP, AT);
5414 // push argument
5415 __ sw(length, SP, 4 * wordSize);
5416 #else
5417 __ move(A4, length);
5418 #endif
5419 __ move(A3, dst_pos);
5420 __ move(A2, dst);
5421 __ move(A1, src_pos);
5422 __ move(A0, src);
5423 // make call
5424 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
5425 __ call(entry, relocInfo::runtime_call_type);
5426 __ delayed()->nop();
5427 // restore SP
5428 #ifndef OPT_THREAD
5429 __ get_thread(java_thread);
5430 #endif
5431 __ ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
5432 __ super_pop(length);
5433 __ super_pop(dst_pos);
5434 __ super_pop(src_pos);
5435 __ super_pop(dst);
5436 __ super_pop(src);
5438 __ beq_far(V0, R0, *stub->continuation());
5439 __ delayed()->nop();
5442 __ b_far(*stub->entry());
5443 __ delayed()->nop();
5444 __ bind(*stub->continuation());
5445 return;
5446 }
5447 assert(default_type != NULL
5448 && default_type->is_array_klass()
5449 && default_type->is_loaded(),
5450 "must be true at this point");
5452 int elem_size = type2aelembytes(basic_type);
5453 int shift_amount;
5454 switch (elem_size) {
5455 case 1 :shift_amount = 0; break;
5456 case 2 :shift_amount = 1; break;
5457 case 4 :shift_amount = 2; break;
5458 case 8 :shift_amount = 3; break;
5459 default:ShouldNotReachHere();
5460 }
5462 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
5463 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
5464 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
5465 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
5467 // test for NULL
5468 if (flags & LIR_OpArrayCopy::src_null_check) {
5469 __ beq_far(src, R0, *stub->entry());
5470 __ delayed()->nop();
5471 }
5472 if (flags & LIR_OpArrayCopy::dst_null_check) {
5473 __ beq_far(dst, R0, *stub->entry());
5474 __ delayed()->nop();
5475 }
5477 // check if negative
5478 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
5479 __ bltz(src_pos, *stub->entry());
5480 __ delayed()->nop();
5481 }
5482 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
5483 __ bltz(dst_pos, *stub->entry());
5484 __ delayed()->nop();
5485 }
5486 if (flags & LIR_OpArrayCopy::length_positive_check) {
5487 __ bltz(length, *stub->entry());
5488 __ delayed()->nop();
5489 }
5491 if (flags & LIR_OpArrayCopy::src_range_check) {
5492 __ add(AT, src_pos, length);
5493 __ lw(tmp, src_length_addr);
5494 __ sltu(AT, tmp, AT);
5495 __ bne_far(AT, R0, *stub->entry());
5496 __ delayed()->nop();
5497 }
5498 if (flags & LIR_OpArrayCopy::dst_range_check) {
5499 __ add(AT, dst_pos, length);
5500 __ lw(tmp, dst_length_addr);
5501 __ sltu(AT, tmp, AT);
5502 __ bne_far(AT, R0, *stub->entry());
5503 __ delayed()->nop();
5504 }
5506 if (flags & LIR_OpArrayCopy::type_check) {
5507 if (UseCompressedClassPointers) {
5508 __ lwu(AT, src_klass_addr);
5509 __ lwu(tmp, dst_klass_addr);
5510 } else {
5511 __ ld(AT, src_klass_addr);
5512 __ ld(tmp, dst_klass_addr);
5513 }
5514 __ bne_far(AT, tmp, *stub->entry());
5515 __ delayed()->nop();
5516 }
5518 #ifdef ASSERT
5519 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
5520 // Sanity check the known type with the incoming class. For the
5521 // primitive case the types must match exactly. For the object array
5522 // case, if no type check is needed then the dst type must match the
5523 // expected type and the src type is so subtype which we can't check. If
5524 // a type check i needed then at this point the classes are known to be
5525 // the same but again which don't know which type so we can't check them.
5526 Label known_ok, halt;
5527 __ mov_metadata(tmp, default_type->constant_encoding());
5528 #ifdef _LP64
5529 if (UseCompressedClassPointers) {
5530 __ encode_klass_not_null(tmp);
5531 }
5532 #endif
5533 if (basic_type != T_OBJECT) {
5534 if (UseCompressedClassPointers) {
5535 __ lwu(AT, dst_klass_addr);
5536 } else {
5537 __ ld(AT, dst_klass_addr);
5538 }
5539 __ bne(AT, tmp, halt);
5540 __ delayed()->nop();
5541 if (UseCompressedClassPointers) {
5542 __ lwu(AT, src_klass_addr);
5543 } else {
5544 __ ld(AT, src_klass_addr);
5545 }
5546 __ beq(AT, tmp, known_ok);
5547 __ delayed()->nop();
5548 } else {
5549 if (UseCompressedClassPointers) {
5550 __ lwu(AT, dst_klass_addr);
5551 } else {
5552 __ ld(AT, dst_klass_addr);
5553 }
5554 __ beq(AT, tmp, known_ok);
5555 __ delayed()->nop();
5556 __ beq(src, dst, known_ok);
5557 __ delayed()->nop();
5558 }
5559 __ bind(halt);
5560 __ stop("incorrect type information in arraycopy");
5561 __ bind(known_ok);
5562 }
5563 #endif
5564 __ push(src);
5565 __ push(dst);
5566 __ push(src_pos);
5567 __ push(dst_pos);
5568 __ push(length);
5571 assert(A0 != A1 &&
5572 A0 != length &&
5573 A1 != length, "register checks");
5574 __ move(AT, dst_pos);
5575 if (shift_amount > 0 && basic_type != T_OBJECT) {
5576 #ifndef _LP64
5577 __ sll(A2, length, shift_amount);
5578 #else
5579 __ dsll(A2, length, shift_amount);
5580 #endif
5581 } else {
5582 if (length!=A2)
5583 __ move(A2, length);
5584 }
5585 __ move(A3, src_pos );
5586 assert(A0 != dst_pos &&
5587 A0 != dst &&
5588 dst_pos != dst, "register checks");
5590 assert_different_registers(A0, dst_pos, dst);
5591 #ifndef _LP64
5592 __ sll(AT, AT, shift_amount);
5593 #else
5594 __ dsll(AT, AT, shift_amount);
5595 #endif
5596 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
5597 __ add(A1, dst, AT);
5599 #ifndef _LP64
5600 __ sll(AT, A3, shift_amount);
5601 #else
5602 __ dsll(AT, A3, shift_amount);
5603 #endif
5604 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
5605 __ add(A0, src, AT);
5609 if (basic_type == T_OBJECT) {
5610 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 3);
5611 } else {
5612 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 3);
5613 }
5614 __ super_pop(length);
5615 __ super_pop(dst_pos);
5616 __ super_pop(src_pos);
5617 __ super_pop(dst);
5618 __ super_pop(src);
5620 __ bind(*stub->continuation());
5621 }
5623 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
5624 tty->print_cr("LIR_Assembler::emit_updatecrc32 unimplemented yet !");
5625 Unimplemented();
5626 }
5628 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
5629 Register obj = op->obj_opr()->as_register(); // may not be an oop
5630 Register hdr = op->hdr_opr()->as_register();
5631 Register lock = op->lock_opr()->is_single_cpu() ? op->lock_opr()->as_register(): op->lock_opr()->as_register_lo();
5632 if (!UseFastLocking) {
5633 __ b_far(*op->stub()->entry());
5634 __ delayed()->nop();
5635 } else if (op->code() == lir_lock) {
5636 Register scratch = noreg;
5637 if (UseBiasedLocking) {
5638 scratch = op->scratch_opr()->as_register();
5639 }
5640 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
5641 "lock_reg must point to the displaced header");
5642 // add debug info for NullPointerException only if one is possible
5643 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
5644 if (op->info() != NULL) {
5645 //add_debug_info_for_null_check_here(op->info());
5646 add_debug_info_for_null_check(null_check_offset,op->info());
5647 }
5648 // done
5649 } else if (op->code() == lir_unlock) {
5650 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
5651 "lock_reg must point to the displaced header");
5652 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
5653 } else {
5654 Unimplemented();
5655 }
5656 __ bind(*op->stub()->continuation());
5657 }
5661 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
5662 ciMethod* method = op->profiled_method();
5663 int bci = op->profiled_bci();
5664 ciMethod* callee = op->profiled_callee();
5666 // Update counter for all call types
5667 ciMethodData* md = method->method_data();
5668 assert(md != NULL, "Sanity");
5669 ciProfileData* data = md->bci_to_data(bci);
5670 assert(data->is_CounterData(), "need CounterData for calls");
5671 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
5672 Register mdo = op->mdo()->as_register();
5674 __ mov_metadata(mdo, md->constant_encoding());
5676 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
5677 Bytecodes::Code bc = method->java_code_at_bci(bci);
5678 const bool callee_is_static = callee->is_loaded() && callee->is_static();
5680 // Perform additional virtual call profiling for invokevirtual and
5681 // invokeinterface bytecodes
5682 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
5683 !callee_is_static && //required for optimized MH invokes
5684 C1ProfileVirtualCalls) {
5685 assert(op->recv()->is_single_cpu(), "recv must be allocated");
5686 Register recv = op->recv()->as_register();
5687 assert_different_registers(mdo, recv);
5688 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
5689 ciKlass* known_klass = op->known_holder();
5690 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
5691 // We know the type that will be seen at this call site; we can
5692 // statically update the methodDataOop rather than needing to do
5693 // dynamic tests on the receiver type
5695 // NOTE: we should probably put a lock around this search to
5696 // avoid collisions by concurrent compilations
5697 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
5698 uint i;
5699 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5700 ciKlass* receiver = vc_data->receiver(i);
5701 if (known_klass->equals(receiver)) {
5702 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5703 __ ld_ptr(AT, data_addr);
5704 __ addi(AT, AT, DataLayout::counter_increment);
5705 __ st_ptr(AT, data_addr);
5706 return;
5707 }
5708 }
5710 // Receiver type not found in profile data; select an empty slot
5712 // Note that this is less efficient than it should be because it
5713 // always does a write to the receiver part of the
5714 // VirtualCallData rather than just the first time
5715 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5716 ciKlass* receiver = vc_data->receiver(i);
5717 if (receiver == NULL) {
5718 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5719 __ mov_metadata(AT, known_klass->constant_encoding());
5720 __ st_ptr(AT,recv_addr);
5721 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5722 __ ld_ptr(AT, data_addr);
5723 __ addi(AT, AT, DataLayout::counter_increment);
5724 __ st_ptr(AT, data_addr);
5725 return;
5726 }
5727 }
5728 } else {
5729 __ load_klass(recv, recv);
5730 Label update_done;
5731 type_profile_helper(mdo, md, data, recv, &update_done);
5733 // Receiver did not match any saved receiver and there is no empty row for it.
5734 // Increment total counter to indicate polymorphic case.
5735 __ ld_ptr(AT, counter_addr);
5736 __ addi(AT, AT, DataLayout::counter_increment);
5737 __ st_ptr(AT, counter_addr);
5739 __ bind(update_done);
5740 }
5741 } else {
5742 __ ld_ptr(AT, counter_addr);
5743 __ addi(AT, AT, DataLayout::counter_increment);
5744 __ st_ptr(AT, counter_addr);
5745 }
5746 }
5748 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
5749 Register obj = op->obj()->as_register();
5750 Register tmp = op->tmp()->as_pointer_register();
5751 Register tmp1 = T1;
5752 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
5753 ciKlass* exact_klass = op->exact_klass();
5754 intptr_t current_klass = op->current_klass();
5755 bool not_null = op->not_null();
5756 bool no_conflict = op->no_conflict();
5758 Label update, next, none;
5760 bool do_null = !not_null;
5761 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
5762 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
5764 assert(do_null || do_update, "why are we here?");
5765 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
5767 __ verify_oop(obj);
5769 if (tmp != obj) {
5770 __ move(tmp, obj);
5771 }
5772 if (do_null) {
5773 __ bne(tmp, R0, update);
5774 __ delayed()->nop();
5775 if (!TypeEntries::was_null_seen(current_klass)) {
5776 __ push(tmp1);
5777 if (mdo_addr.index() == noreg) {
5778 __ ld(tmp1, mdo_addr);
5779 } else {
5780 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
5781 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
5783 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5784 __ daddu(AT, AT, mdo_addr.base());
5785 __ ld(tmp1, AT, mdo_addr.disp());
5786 }
5787 __ li(AT, TypeEntries::null_seen);
5788 __ orr(AT, tmp1, AT);
5789 if (mdo_addr.index() == noreg) {
5790 __ sd(AT, mdo_addr);
5791 } else {
5792 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
5793 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
5795 __ dsll(tmp1, mdo_addr.index(), mdo_addr.scale());
5796 __ daddu(tmp1, tmp1, mdo_addr.base());
5797 __ sd(AT, tmp1, mdo_addr.disp());
5798 }
5799 __ pop(tmp1);
5801 }
5802 if (do_update) {
5803 #ifndef ASSERT
5804 __ b(next);
5805 __ delayed()->nop();
5806 }
5807 #else
5808 __ b(next);
5809 __ delayed()->nop();
5810 }
5811 } else {
5812 __ bne(tmp, R0, update);
5813 __ delayed()->nop();
5814 __ stop("unexpect null obj");
5815 #endif
5816 }
5818 __ bind(update);
5820 if (do_update) {
5821 #ifdef ASSERT
5822 if (exact_klass != NULL) {
5823 Label ok;
5824 __ load_klass(tmp, tmp);
5825 __ push(tmp);
5826 __ mov_metadata(tmp, exact_klass->constant_encoding());
5827 __ ld(AT, Address(SP, 0));
5828 __ beq(tmp, AT, ok);
5829 __ delayed()->nop();
5830 __ stop("exact klass and actual klass differ");
5831 __ bind(ok);
5832 __ pop(tmp);
5833 }
5834 #endif
5835 if (!no_conflict) {
5836 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
5837 if (exact_klass != NULL) {
5838 __ mov_metadata(tmp, exact_klass->constant_encoding());
5839 } else {
5840 __ load_klass(tmp, tmp);
5841 }
5843 if (mdo_addr.index() == noreg) {
5844 __ ld(AT, mdo_addr);
5845 } else {
5846 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5847 __ daddu(AT, AT, mdo_addr.base());
5848 __ ld(AT, AT, mdo_addr.disp());
5849 }
5850 __ xorr(tmp, tmp, AT);
5851 __ li(AT, TypeEntries::type_klass_mask);
5852 __ andr(AT, tmp, AT);
5853 // klass seen before, nothing to do. The unknown bit may have been
5854 // set already but no need to check.
5855 __ beq(AT, R0, next);
5856 __ delayed()->nop();
5858 __ li(AT, TypeEntries::type_unknown);
5859 __ andr(AT, tmp, AT);
5860 __ bne(AT, R0, next); // already unknown. Nothing to do anymore.
5861 __ delayed()->nop();
5863 if (TypeEntries::is_type_none(current_klass)) {
5864 if (mdo_addr.index() == noreg) {
5865 __ ld(AT, mdo_addr);
5866 } else {
5867 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5868 __ daddu(AT, AT, mdo_addr.base());
5869 __ ld(AT, AT, mdo_addr.disp());
5870 }
5871 __ beq(AT, R0, none);
5872 __ delayed()->nop();
5874 __ push(tmp1);
5875 if (mdo_addr.index() == noreg) {
5876 __ ld(tmp1, mdo_addr);
5877 } else {
5878 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
5879 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
5881 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5882 __ daddu(AT, AT, mdo_addr.base());
5883 __ ld(tmp1, AT, mdo_addr.disp());
5884 }
5885 __ li(AT, TypeEntries::null_seen);
5886 __ subu(AT, AT, tmp1);
5887 __ pop(tmp1);
5888 __ beq(AT, R0, none);
5889 __ delayed()->nop();
5890 // There is a chance that the checks above (re-reading profiling
5891 // data from memory) fail if another thread has just set the
5892 // profiling to this obj's klass
5894 if (mdo_addr.index() == noreg) {
5895 __ ld(AT, mdo_addr);
5896 } else {
5897 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5898 __ daddu(AT, AT, mdo_addr.base());
5899 __ ld(AT, AT, mdo_addr.disp());
5900 }
5901 __ xorr(tmp, tmp, AT);
5902 __ li(AT, TypeEntries::type_klass_mask);
5903 __ andr(AT, tmp, AT);
5904 __ beq(AT, R0, next);
5905 __ delayed()->nop();
5906 }
5907 } else {
5908 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
5909 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
5910 if (mdo_addr.index() == noreg) {
5911 __ ld(tmp, mdo_addr);
5912 } else {
5913 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5914 __ daddu(AT, AT, mdo_addr.base());
5915 __ ld(tmp, AT, mdo_addr.disp());
5916 }
5917 __ li(AT, TypeEntries::type_unknown);
5918 __ andr(AT, tmp, AT);
5919 __ bne(AT, R0, next); // already unknown. Nothing to do anymore.
5920 __ delayed()->nop();
5921 }
5923 // different than before. Cannot keep accurate profile.
5924 __ push(tmp1);
5925 if (mdo_addr.index() == noreg) {
5926 __ ld(tmp1, mdo_addr);
5927 } else {
5928 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
5929 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
5931 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5932 __ daddu(AT, AT, mdo_addr.base());
5933 __ ld(tmp1, AT, mdo_addr.disp());
5934 }
5935 __ li(AT, TypeEntries::type_unknown);
5936 __ orr(AT, tmp1, AT);
5937 if (mdo_addr.index() == noreg) {
5938 __ sd(AT, mdo_addr);
5939 } else {
5940 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
5941 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
5943 __ dsll(tmp1, mdo_addr.index(), mdo_addr.scale());
5944 __ daddu(tmp1, tmp1, mdo_addr.base());
5945 __ sd(AT, tmp1, mdo_addr.disp());
5946 }
5947 __ pop(tmp1);
5949 if (TypeEntries::is_type_none(current_klass)) {
5950 __ b(next);
5951 __ delayed()->nop();
5953 __ bind(none);
5954 // first time here. Set profile type.
5955 if (mdo_addr.index() == noreg) {
5956 __ sd(tmp, mdo_addr);
5957 } else {
5958 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5959 __ daddu(AT, AT, mdo_addr.base());
5960 __ sd(tmp, AT, mdo_addr.disp());
5961 }
5962 }
5963 } else {
5964 // There's a single possible klass at this profile point
5965 assert(exact_klass != NULL, "should be");
5966 if (TypeEntries::is_type_none(current_klass)) {
5967 __ mov_metadata(tmp, exact_klass->constant_encoding());
5968 if (mdo_addr.index() == noreg) {
5969 __ ld(AT, mdo_addr);
5970 } else {
5971 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5972 __ daddu(AT, AT, mdo_addr.base());
5973 __ ld(AT, AT, mdo_addr.disp());
5974 }
5975 __ xorr(tmp, tmp, AT);
5976 __ li(AT, TypeEntries::type_klass_mask);
5977 __ andr(AT, tmp, AT);
5978 #ifdef ASSERT
5979 __ beq_far(AT, R0, next);
5980 __ delayed()->nop();
5982 {
5983 Label ok;
5984 __ push(tmp);
5985 if (mdo_addr.index() == noreg) {
5986 __ ld(AT, mdo_addr);
5987 } else {
5988 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
5989 __ daddu(AT, AT, mdo_addr.base());
5990 __ ld(AT, AT, mdo_addr.disp());
5991 }
5992 __ beq_far(AT, R0, ok);
5993 __ delayed()->nop();
5995 __ push(tmp1);
5996 if (mdo_addr.index() == noreg) {
5997 __ ld(tmp1, mdo_addr);
5998 } else {
5999 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
6000 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
6002 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
6003 __ daddu(AT, AT, mdo_addr.base());
6004 __ ld(tmp1, AT, mdo_addr.disp());
6005 }
6006 __ li(AT, TypeEntries::null_seen);
6007 __ subu(AT, AT, tmp1);
6008 __ pop(tmp1);
6009 __ beq_far(AT, R0, ok);
6010 __ delayed()->nop();
6011 // may have been set by another thread
6012 __ mov_metadata(tmp, exact_klass->constant_encoding());
6013 if (mdo_addr.index() == noreg) {
6014 __ ld(AT, mdo_addr);
6015 } else {
6016 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
6017 __ daddu(AT, AT, mdo_addr.base());
6018 __ ld(AT, AT, mdo_addr.disp());
6019 }
6020 __ xorr(tmp, tmp, AT);
6021 __ li(AT, TypeEntries::type_mask);
6022 __ andr(AT, tmp, AT);
6023 __ beq_far(AT, R0, ok);
6024 __ delayed()->nop();
6026 __ stop("unexpected profiling mismatch");
6027 __ bind(ok);
6028 __ pop(tmp);
6029 }
6030 #else
6031 __ beq(AT, R0, next);
6032 __ delayed()->nop();
6033 #endif
6034 // first time here. Set profile type.
6035 if (mdo_addr.index() == noreg) {
6036 __ sd(tmp, mdo_addr);
6037 } else {
6038 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
6039 __ daddu(AT, AT, mdo_addr.base());
6040 __ sd(tmp, AT, mdo_addr.disp());
6041 }
6042 } else {
6043 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
6044 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
6046 if (mdo_addr.index() == noreg) {
6047 __ ld(tmp, mdo_addr);
6048 } else {
6049 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
6050 __ daddu(AT, AT, mdo_addr.base());
6051 __ ld(tmp, AT, mdo_addr.disp());
6052 }
6053 __ li(AT, TypeEntries::type_unknown);
6054 __ andr(AT, tmp, AT);
6055 __ bne(AT, R0, next); // already unknown. Nothing to do anymore.
6056 __ delayed()->nop();
6058 __ push(tmp1);
6059 if (mdo_addr.index() == noreg) {
6060 __ ld(tmp1, mdo_addr);
6061 } else {
6062 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
6063 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
6065 __ dsll(AT, mdo_addr.index(), mdo_addr.scale());
6066 __ daddu(AT, AT, mdo_addr.base());
6067 __ ld(tmp1, AT, mdo_addr.disp());
6068 }
6069 __ li(AT, TypeEntries::type_unknown);
6070 __ orr(AT, tmp1, AT);
6071 if (mdo_addr.index() == noreg) {
6072 __ sd(AT, mdo_addr);
6073 } else {
6074 guarantee(tmp1 != mdo_addr.base(), "The base register will be corrupted !");
6075 guarantee(tmp1 != mdo_addr.index(), "The index register will be corrupted !");
6077 __ dsll(tmp1, mdo_addr.index(), mdo_addr.scale());
6078 __ daddu(tmp1, tmp1, mdo_addr.base());
6079 __ sd(AT, tmp1, mdo_addr.disp());
6080 }
6081 __ pop(tmp1);
6082 }
6083 }
6085 __ bind(next);
6086 }
6087 }
6089 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
6090 Unimplemented();
6091 }
6094 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
6095 if (dst->is_single_cpu())
6096 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
6097 else if (dst->is_double_cpu())
6098 __ lea(dst->as_register_lo(), frame_map()->address_for_monitor_lock(monitor_no));
6099 }
6101 void LIR_Assembler::align_backward_branch_target() {
6102 __ align(BytesPerWord);
6103 }
6106 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
6107 if (left->is_single_cpu()) {
6108 __ subu(dest->as_register(), R0, left->as_register());
6109 } else if (left->is_double_cpu()) {
6110 #ifndef _LP64
6111 Register lo = left->as_register_lo();
6112 Register hi = left->as_register_hi();
6113 Register dlo = dest->as_register_lo();
6114 Register dhi = dest->as_register_hi();
6115 assert(dlo != hi, "register checks");
6116 __ nor(dlo, R0, lo);
6117 __ addiu(dlo, dlo, 1);
6118 __ sltiu(AT, dlo, 1);
6119 __ nor(dhi, R0, hi);
6120 __ addu(dhi, dhi, AT);
6121 #else
6122 __ subu(dest->as_register_lo(), R0, left->as_register_lo());
6123 #endif
6124 } else if (left->is_single_fpu()) {
6125 //for mips , does it required ?
6126 __ neg_s(dest->as_float_reg(), left->as_float_reg());
6127 } else if (left->is_double_fpu()) {
6128 //for mips , does it required ?
6129 __ neg_d(dest->as_double_reg(), left->as_double_reg());
6130 }else {
6131 ShouldNotReachHere();
6132 }
6133 }
6135 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
6136 assert(addr->is_address() && dest->is_register(), "check");
6137 Register reg;
6138 reg = dest->as_pointer_register();
6139 __ lea(reg, as_Address(addr->as_address_ptr()));
6140 }
6143 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
6144 if (o == NULL) {
6145 // This seems wrong as we do not emit relocInfo
6146 // for classes that are not loaded yet, i.e., they will be
6147 // never GC'd
6148 #ifndef _LP64
6149 //by_css
6150 __ lui(reg, Assembler::split_high((int)o));
6151 __ addiu(reg, reg, Assembler::split_low((int)o));
6152 #else
6153 __ li48(reg, (long)o);
6154 //__ patchable_set48(reg, (long)o);
6155 #endif
6156 } else {
6157 int oop_index = __ oop_recorder()->find_index(o);
6158 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6159 __ relocate(rspec);
6160 #ifndef _LP64
6161 //by_css
6162 __ lui(reg, Assembler::split_high((int)o));
6163 __ addiu(reg, reg, Assembler::split_low((int)o));
6164 #else
6165 __ li48(reg, (long)o);
6166 //__ patchable_set48(reg, (long)o);
6167 #endif
6168 }
6169 }
6171 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
6172 assert(!tmp->is_valid(), "don't need temporary");
6173 __ call(dest, relocInfo::runtime_call_type);
6174 __ delayed()->nop();
6175 if (info != NULL) {
6176 add_call_info_here(info);
6177 }
6178 }
6180 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
6181 assert(type == T_LONG, "only for volatile long fields");
6182 if (info != NULL) {
6183 add_debug_info_for_null_check_here(info);
6184 }
6186 if(src->is_register() && dest->is_address()) {
6187 if(src->is_double_cpu()) {
6188 #ifdef _LP64
6189 __ sd(src->as_register_lo(), as_Address(dest->as_address_ptr()));
6190 #else
6191 Unimplemented();
6192 //__ sw(src->as_register_lo(), as_Address(dest->as_address_ptr()));
6193 //__ sw(src->as_register_hi(), as_Address(dest->as_address_ptr()).base(),
6194 //as_Address(dest->as_address_ptr()).disp() +4);
6195 #endif
6196 } else if (src->is_double_fpu()) {
6197 #ifdef _LP64
6198 __ sdc1(src->as_double_reg(), as_Address(dest->as_address_ptr()));
6199 #else
6200 Unimplemented();
6201 //__ swc1(src->as_fpu_lo(), as_Address(dest->as_address_ptr()));
6202 //__ swc1(src->as_fpu_hi(), as_Address(dest->as_address_ptr()).base(),
6203 //as_Address(dest->as_address_ptr()).disp() +4);
6204 #endif
6205 } else {
6206 ShouldNotReachHere();
6207 }
6208 } else if (src->is_address() && dest->is_register()){
6209 if(dest->is_double_cpu()) {
6210 #ifdef _LP64
6211 __ ld(dest->as_register_lo(), as_Address(src->as_address_ptr()));
6212 #else
6213 Unimplemented();
6214 // __ lw(dest->as_register_lo(), as_Address(src->as_address_ptr()));
6215 // __ lw(dest->as_register_hi(), as_Address(src->as_address_ptr()).base(),
6216 // as_Address(src->as_address_ptr()).disp() +4);
6217 #endif
6218 } else if (dest->is_double_fpu()) {
6219 #ifdef _LP64
6220 __ ldc1(dest->as_double_reg(), as_Address(src->as_address_ptr()));
6221 #else
6222 Unimplemented();
6223 // __ lwc1(dest->as_fpu_lo(), as_Address(src->as_address_ptr()));
6224 // __ lwc1(dest->as_fpu_hi(), as_Address(src->as_address_ptr()).base(),
6225 // as_Address(src->as_address_ptr()).disp() +4);
6226 #endif
6227 } else {
6228 ShouldNotReachHere();
6229 }
6230 } else {
6231 ShouldNotReachHere();
6232 }
6233 }
6235 #ifdef ASSERT
6236 // emit run-time assertion
6237 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
6238 tty->print_cr("LIR_Assembler::emit_assert unimplemented yet!");
6239 Unimplemented();
6240 }
6241 #endif
6243 void LIR_Assembler::membar() {
6244 __ sync();
6245 }
6247 void LIR_Assembler::membar_acquire() {
6248 __ sync();
6249 }
6251 void LIR_Assembler::membar_release() {
6252 __ sync();
6253 }
6255 void LIR_Assembler::membar_loadload() {
6256 // no-op
6257 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
6258 }
6260 void LIR_Assembler::membar_storestore() {
6261 // no-op
6262 // //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
6263 }
6265 void LIR_Assembler::membar_loadstore() {
6266 // no-op
6267 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
6268 }
6270 void LIR_Assembler::membar_storeload() {
6271 //__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
6272 }
6275 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
6276 assert(result_reg->is_register(), "check");
6277 #ifndef OPT_THREAD
6278 __ get_thread(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()));
6279 #else
6280 __ move(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()), TREG);
6281 #endif
6282 }
6284 void LIR_Assembler::peephole(LIR_List*) {
6285 // do nothing for now
6286 }
6288 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
6289 /* assert(data == dest, "xchg/xadd uses only 2 operands");
6291 if (data->type() == T_INT) {
6292 if (code == lir_xadd) {
6293 if (os::is_MP()) {
6294 __ lock();
6295 }
6296 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
6297 } else {
6298 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
6299 }
6300 } else if (data->is_oop()) {
6301 assert (code == lir_xchg, "xadd for oops");
6302 Register obj = data->as_register();
6303 #ifdef _LP64
6304 if (UseCompressedOops) {
6305 __ encode_heap_oop(obj);
6306 __ xchgl(obj, as_Address(src->as_address_ptr()));
6307 __ decode_heap_oop(obj);
6308 } else {
6309 __ xchgptr(obj, as_Address(src->as_address_ptr()));
6310 }
6311 #else
6312 __ xchgl(obj, as_Address(src->as_address_ptr()));
6313 #endif
6314 } else if (data->type() == T_LONG) {
6315 #ifdef _LP64
6316 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
6317 if (code == lir_xadd) {
6318 if (os::is_MP()) {
6319 __ lock();
6320 }
6321 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
6322 } else {
6323 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
6324 }
6325 #else
6326 ShouldNotReachHere();
6327 #endif
6328 } else {
6329 ShouldNotReachHere();
6330 }*/
6331 ShouldNotReachHere();
6332 }
6334 #undef __