Thu, 24 May 2018 19:49:50 +0800
some C1 fix
Contributed-by: chenhaoxuan, zhaixiang, aoqi
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "gc_interface/collectedHeap.hpp"
37 #include "memory/barrierSet.hpp"
38 #include "memory/cardTableModRefBS.hpp"
39 #include "nativeInst_mips.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #define __ _masm->
44 static void select_different_registers(Register preserve,
45 Register extra,
46 Register &tmp1,
47 Register &tmp2) {
48 if (tmp1 == preserve) {
49 assert_different_registers(tmp1, tmp2, extra);
50 tmp1 = extra;
51 } else if (tmp2 == preserve) {
52 assert_different_registers(tmp1, tmp2, extra);
53 tmp2 = extra;
54 }
55 assert_different_registers(preserve, tmp1, tmp2);
56 }
60 static void select_different_registers(Register preserve,
61 Register extra,
62 Register &tmp1,
63 Register &tmp2,
64 Register &tmp3) {
65 if (tmp1 == preserve) {
66 assert_different_registers(tmp1, tmp2, tmp3, extra);
67 tmp1 = extra;
68 } else if (tmp2 == preserve) {
69 tmp2 = extra;
70 } else if (tmp3 == preserve) {
71 assert_different_registers(tmp1, tmp2, tmp3, extra);
72 tmp3 = extra;
73 }
74 assert_different_registers(preserve, tmp1, tmp2, tmp3);
75 }
77 // need add method Assembler::is_simm16 in assembler_gs2.hpp
78 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
79 if (opr->is_constant()) {
80 LIR_Const* constant = opr->as_constant_ptr();
81 switch (constant->type()) {
82 case T_INT: {
83 jint value = constant->as_jint();
84 return Assembler::is_simm16(value);
85 }
86 default:
87 return false;
88 }
89 }
90 return false;
91 }
93 //FIXME, which register should be used?
94 LIR_Opr LIR_Assembler::receiverOpr() {
95 return FrameMap::_t0_oop_opr;
96 }
97 /*
98 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
99 return receiverOpr();
100 }*/
102 LIR_Opr LIR_Assembler::osrBufferPointer() {
103 #ifdef _LP64
104 Register r = receiverOpr()->as_register();
105 return FrameMap::as_long_opr(r, r);
106 #else
107 return FrameMap::as_opr(receiverOpr()->as_register());
108 #endif
109 }
111 //--------------fpu register translations-----------------------
112 // FIXME:I do not know what's to do for mips fpu
114 address LIR_Assembler::float_constant(float f) {
115 address const_addr = __ float_constant(f);
116 if (const_addr == NULL) {
117 bailout("const section overflow");
118 return __ code()->consts()->start();
119 } else {
120 return const_addr;
121 }
122 }
125 address LIR_Assembler::double_constant(double d) {
126 address const_addr = __ double_constant(d);
127 if (const_addr == NULL) {
128 bailout("const section overflow");
129 return __ code()->consts()->start();
130 } else {
131 return const_addr;
132 }
133 }
139 void LIR_Assembler::reset_FPU() {
140 Unimplemented();
141 }
144 void LIR_Assembler::set_24bit_FPU() {
145 Unimplemented();
146 }
148 //FIXME.
149 void LIR_Assembler::fpop() {
150 // do nothing
151 }
152 void LIR_Assembler::fxch(int i) {
153 // do nothing
154 }
155 void LIR_Assembler::fld(int i) {
156 // do nothing
157 }
158 void LIR_Assembler::ffree(int i) {
159 // do nothing
160 }
162 void LIR_Assembler::breakpoint() {
163 __ brk(17);
164 }
165 //FIXME, opr can not be float?
166 void LIR_Assembler::push(LIR_Opr opr) {
167 if (opr->is_single_cpu()) {
168 __ push_reg(opr->as_register());
169 } else if (opr->is_double_cpu()) {
170 __ push_reg(opr->as_register_hi());
171 __ push_reg(opr->as_register_lo());
172 } else if (opr->is_stack()) {
173 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
174 } else if (opr->is_constant()) {
175 LIR_Const* const_opr = opr->as_constant_ptr();
176 if (const_opr->type() == T_OBJECT) {
177 __ push_oop(const_opr->as_jobject());
178 } else if (const_opr->type() == T_INT) {
179 __ push_jint(const_opr->as_jint());
180 } else {
181 ShouldNotReachHere();
182 }
183 } else {
184 ShouldNotReachHere();
185 }
186 }
188 void LIR_Assembler::pop(LIR_Opr opr) {
189 if (opr->is_single_cpu() ) {
190 __ pop(opr->as_register());
191 } else {
192 assert(false, "Must be single word register or floating-point register");
193 }
194 }
197 Address LIR_Assembler::as_Address(LIR_Address* addr) {
198 #ifndef _LP64
199 Register reg = addr->base()->as_register();
200 #else
201 //FIXME aoqi
202 Register reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
203 #endif
204 // now we need this for parameter pass
205 return Address(reg, addr->disp());
206 }
209 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
210 return as_Address(addr);
211 }
214 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
215 Register reg = addr->base()->as_register();
216 return Address(reg, addr->disp()+longSize/2);
217 }
220 //void LIR_Assembler::osr_entry(IRScope* scope, int number_of_locks, Label* continuation, int osr_bci) {
221 void LIR_Assembler::osr_entry() {
222 // assert(scope->is_top_scope(), "inlined OSR not yet implemented");
223 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
224 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
225 ValueStack* entry_state = osr_entry->state();
226 int number_of_locks = entry_state->locks_size();
228 // we jump here if osr happens with the interpreter
229 // state set up to continue at the beginning of the
230 // loop that triggered osr - in particular, we have
231 // the following registers setup:
232 //
233 // S7: interpreter locals pointer
234 // V1: interpreter locks pointer
235 // RA: return address
236 //T0: OSR buffer
237 // build frame
238 // ciMethod* m = scope->method();
239 ciMethod* m = compilation()->method();
240 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
242 // OSR buffer is
243 //
244 // locals[nlocals-1..0]
245 // monitors[0..number_of_locks]
246 //
247 // locals is a direct copy of the interpreter frame so in the osr buffer
248 // so first slot in the local array is the last local from the interpreter
249 // and last slot is local[0] (receiver) from the interpreter
250 //
251 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
252 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
253 // in the interpreter frame (the method lock if a sync method)
255 // Initialize monitors in the compiled activation.
256 // T0: pointer to osr buffer
257 //
258 // All other registers are dead at this point and the locals will be
259 // copied into place by code emitted in the IR.
261 Register OSR_buf = osrBufferPointer()->as_pointer_register();
264 // note: we do osr only if the expression stack at the loop beginning is empty,
265 // in which case the spill area is empty too and we don't have to setup
266 // spilled locals
267 //
268 // copy monitors
269 // V1: pointer to locks
270 {
271 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
272 int monitor_offset = BytesPerWord * method()->max_locals()+
273 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
274 for (int i = 0; i < number_of_locks; i++) {
275 int slot_offset =monitor_offset - (i * BasicObjectLock::size())*BytesPerWord;
276 #ifdef ASSERT
277 {
278 Label L;
279 //__ lw(AT, V1, slot_offset * BytesPerWord + BasicObjectLock::obj_offset_in_bytes());
280 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
281 __ bne(AT, R0, L);
282 __ delayed()->nop();
283 __ stop("locked object is NULL");
284 __ bind(L);
285 }
286 #endif
287 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes());
288 __ st_ptr(AT, frame_map()->address_for_monitor_lock(i));
289 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
290 __ st_ptr(AT, frame_map()->address_for_monitor_object(i));
291 }
292 }
293 }
296 int LIR_Assembler::check_icache() {
297 Register receiver = FrameMap::receiver_opr->as_register();
298 Register ic_klass = IC_Klass;
300 /*const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
301 const bool do_post_padding = VerifyOops || UseCompressedOops;
302 if (!do_post_padding) {
303 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
304 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
305 __ nop();
306 }
307 }*/
309 int offset = __ offset();
310 __ inline_cache_check(receiver, IC_Klass);
311 __ align(CodeEntryAlignment);
312 return offset;
315 }
317 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
318 jobject o = NULL;
319 int oop_index = __ oop_recorder()->allocate_oop_index(o);
320 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
321 RelocationHolder rspec = oop_Relocation::spec(oop_index);
322 __ relocate(rspec);
323 #ifndef _LP64
324 //by_css
325 __ lui(reg, Assembler::split_high((int)o));
326 __ addiu(reg, reg, Assembler::split_low((int)o));
327 #else
328 //li may not pass NativeMovConstReg::verify. see nativeMovConstReg_at(pc_start()); in PatchingStub::install. by aoqi
329 // __ li48(reg, (long)o);
330 __ li48(reg, (long)o);
331 #endif
332 // patching_epilog(patch, LIR_Op1::patch_normal, noreg, info);
333 patching_epilog(patch, lir_patch_normal, reg, info);
334 }
336 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
337 Metadata *o = NULL;
338 int index = __ oop_recorder()->allocate_metadata_index(o);
339 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
340 RelocationHolder rspec = metadata_Relocation::spec(index);
341 __ relocate(rspec);
342 __ li48(reg, (long)o);
343 patching_epilog(patch, lir_patch_normal, reg, info);
344 }
346 // This specifies the esp decrement needed to build the frame
347 int LIR_Assembler::initial_frame_size_in_bytes() const {
348 // if rounding, must let FrameMap know!
349 // return (frame_map()->framesize() - 2) * BytesPerWord; // subtract two words to account for return address and link
350 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
351 }
353 int LIR_Assembler::emit_exception_handler() {
354 // if the last instruction is a call (typically to do a throw which
355 // is coming at the end after block reordering) the return address
356 // must still point into the code area in order to avoid assertion
357 // failures when searching for the corresponding bci => add a nop
358 // (was bug 5/14/1999 - gri)
359 // Lazy deopt bug 4932387. If last instruction is a call then we
360 // need an area to patch where we won't overwrite the exception
361 // handler. This means we need 5 bytes. Could use a fat_nop
362 // but since this never gets executed it doesn't really make
363 // much difference.
364 //
365 for (int i = 0; i < (NativeCall::instruction_size/BytesPerInstWord + 1) ; i++ ) {
366 __ nop();
367 }
369 // generate code for exception handler
370 address handler_base = __ start_a_stub(exception_handler_size);
371 if (handler_base == NULL) {
372 // no enough space
373 bailout("exception handler overflow");
374 return -1;
375 }
377 int offset = code_offset();
379 // the exception oop and pc are in V0, and V1
380 // no other registers need to be preserved, so invalidate them
381 //__ invalidate_registers(false, true, true, false, true, true);
383 // check that there is really an exception
384 __ verify_not_null_oop(V0);
386 // search an exception handler (V0: exception oop, V1: throwing pc)
387 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
388 __ delayed()->nop();
389 __ should_not_reach_here();
390 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
391 __ end_a_stub();
393 return offset;
394 }
396 // Emit the code to remove the frame from the stack in the exception
397 // unwind path.
398 int LIR_Assembler::emit_unwind_handler() {
399 #ifndef PRODUCT
400 if (CommentedAssembly) {
401 _masm->block_comment("Unwind handler");
402 }
403 #endif
405 int offset = code_offset();
406 // Fetch the exception from TLS and clear out exception related thread state
407 Register thread = TREG;
408 #ifndef OPT_THREAD
409 __ get_thread(thread);
410 #endif
411 __ ld_ptr(V0, Address(thread, JavaThread::exception_oop_offset()));
412 __ st_ptr(R0, Address(thread, JavaThread::exception_oop_offset()));
413 __ st_ptr(R0, Address(thread, JavaThread::exception_pc_offset()));
415 __ bind(_unwind_handler_entry);
416 __ verify_not_null_oop(V0);
417 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
418 __ move(S0, V0); // Preserve the exception (rbx is always callee-saved)
419 }
421 // Preform needed unlocking
422 MonitorExitStub* stub = NULL;
423 if (method()->is_synchronized()) {
424 monitor_address(0, FrameMap::_v0_opr);
425 stub = new MonitorExitStub(FrameMap::_v0_opr, true, 0);
426 __ unlock_object(A0, A1, V0, *stub->entry());
427 __ bind(*stub->continuation());
428 }
430 if (compilation()->env()->dtrace_method_probes()) {
431 __ move(A0, thread);
432 __ mov_metadata(A1, method()->constant_encoding());
433 __ patchable_call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit));
434 }
436 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
437 __ move(V0, S0); // Restore the exception
438 }
440 // remove the activation and dispatch to the unwind handler
441 // leave activation of nmethod
442 __ remove_frame(initial_frame_size_in_bytes());
444 __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id));
445 __ delayed()->nop();
447 // Emit the slow path assembly
448 if (stub != NULL) {
449 stub->emit_code(this);
450 }
452 return offset;
453 }
456 int LIR_Assembler::emit_deopt_handler() {
457 // if the last instruction is a call (typically to do a throw which
458 // is coming at the end after block reordering) the return address
459 // must still point into the code area in order to avoid assertion
460 // failures when searching for the corresponding bci => add a nop
461 // (was bug 5/14/1999 - gri)
463 __ nop();
465 // generate code for exception handler
466 address handler_base = __ start_a_stub(deopt_handler_size);
467 if (handler_base == NULL) {
468 // not enough space left for the handler
469 bailout("deopt handler overflow");
470 return -1;
471 }
472 int offset = code_offset();
474 // compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
476 __ call(SharedRuntime::deopt_blob()->unpack());
477 __ delayed()->nop();
479 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
480 __ end_a_stub();
482 return offset;
484 }
487 // Optimized Library calls
488 // This is the fast version of java.lang.String.compare; it has not
489 // OSR-entry and therefore, we generate a slow version for OSR's
490 //void LIR_Assembler::emit_string_compare(IRScope* scope) {
491 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
492 // get two string object in T0&T1
493 //receiver already in T0
494 __ ld_ptr(T1, arg1->as_register());
495 //__ ld_ptr(T2, T0, java_lang_String::value_offset_in_bytes()); //value, T_CHAR array
496 __ load_heap_oop(T2, Address(T0, java_lang_String::value_offset_in_bytes()));
497 __ ld_ptr(AT, T0, java_lang_String::offset_offset_in_bytes()); //offset
498 __ shl(AT, 1);
499 __ add(T2, T2, AT);
500 __ addi(T2, T2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
501 // Now T2 is the address of the first char in first string(T0)
503 add_debug_info_for_null_check_here(info);
504 //__ ld_ptr(T3, T1, java_lang_String::value_offset_in_bytes());
505 __ load_heap_oop(T3, Address(T1, java_lang_String::value_offset_in_bytes()));
506 __ ld_ptr(AT, T1, java_lang_String::offset_offset_in_bytes());
507 __ shl(AT, 1);
508 __ add(T3, T3, AT);
509 __ addi(T3, T3, arrayOopDesc::base_offset_in_bytes(T_CHAR));
510 // Now T3 is the address of the first char in second string(T1)
512 #ifndef _LP64
513 //by_css
514 // compute minimum length (in T4) and difference of lengths (V0)
515 Label L;
516 __ lw (T4, Address(T0, java_lang_String::count_offset_in_bytes()));
517 // the length of the first string(T0)
518 __ lw (T5, Address(T1, java_lang_String::count_offset_in_bytes()));
519 // the length of the second string(T1)
521 __ subu(V0, T4, T5);
522 __ blez(V0, L);
523 __ delayed()->nop();
524 __ move (T4, T5);
525 __ bind (L);
527 Label Loop, haveResult, LoopEnd;
528 __ bind(Loop);
529 __ beq(T4, R0, LoopEnd);
530 __ delayed();
532 __ addi(T2, T2, 2);
534 // compare current character
535 __ lhu(T5, T2, -2);
536 __ lhu(T6, T3, 0);
537 __ bne(T5, T6, haveResult);
538 __ delayed();
540 __ addi(T3, T3, 2);
542 __ b(Loop);
543 __ delayed()->addi(T4, T4, -1);
545 __ bind(haveResult);
546 __ subu(V0, T5, T6);
548 __ bind(LoopEnd);
549 #else
550 // compute minimum length (in T4) and difference of lengths (V0)
551 Label L;
552 __ lw (A4, Address(T0, java_lang_String::count_offset_in_bytes()));
553 // the length of the first string(T0)
554 __ lw (A5, Address(T1, java_lang_String::count_offset_in_bytes()));
555 // the length of the second string(T1)
557 __ dsubu(V0, A4, A5);
558 __ blez(V0, L);
559 __ delayed()->nop();
560 __ move (A4, A5);
561 __ bind (L);
563 Label Loop, haveResult, LoopEnd;
564 __ bind(Loop);
565 __ beq(A4, R0, LoopEnd);
566 __ delayed();
568 __ daddi(T2, T2, 2);
570 // compare current character
571 __ lhu(A5, T2, -2);
572 __ lhu(A6, T3, 0);
573 __ bne(A5, A6, haveResult);
574 __ delayed();
576 __ daddi(T3, T3, 2);
578 __ b(Loop);
579 __ delayed()->addi(A4, A4, -1);
581 __ bind(haveResult);
582 __ dsubu(V0, A5, A6);
584 __ bind(LoopEnd);
585 #endif
586 return_op(FrameMap::_v0_opr);
587 }
590 void LIR_Assembler::return_op(LIR_Opr result) {
591 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0");
592 // Pop the stack before the safepoint code
593 __ remove_frame(initial_frame_size_in_bytes());
594 #ifndef _LP64
595 //by aoqi
596 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()
597 + (SafepointPollOffset % os::vm_page_size())));
598 __ relocate(relocInfo::poll_return_type);
599 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()
600 + (SafepointPollOffset % os::vm_page_size())));
601 #else
602 #ifndef OPT_SAFEPOINT
603 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
604 __ li48(AT, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
605 __ relocate(relocInfo::poll_return_type);
606 __ lw(AT, AT, 0);
607 #else
608 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
609 __ relocate(relocInfo::poll_return_type);
610 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
611 #endif
612 #endif
614 __ pop(RA);
615 __ jr(RA);
616 __ delayed()->nop();
617 }
619 //read protect mem to R0 won't cause the exception only in godson-2e, So I modify R0 to AT .@jerome,11/25,2006
620 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
621 assert(info != NULL, "info must not be null for safepoint poll");
622 int offset = __ offset();
623 Register r = tmp->as_register();
624 #ifndef _LP64
625 //by aoqi
626 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
627 add_debug_info_for_branch(info);
628 __ relocate(relocInfo::poll_type);
629 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
630 #else
631 #ifndef OPT_SAFEPOINT
632 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
633 //__ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
634 __ li48(r, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
635 add_debug_info_for_branch(info);
636 __ relocate(relocInfo::poll_type);
637 //__ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
638 __ lw(AT, r, 0);
639 #else
640 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
641 add_debug_info_for_branch(info);
642 __ relocate(relocInfo::poll_type);
643 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
644 #endif
645 #endif
646 return offset;
647 }
649 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
650 if (from_reg != to_reg) __ move(to_reg, from_reg);
651 }
654 void LIR_Assembler::swap_reg(Register a, Register b) {
655 __ xorr(a, a, b);
656 __ xorr(b, a, b);
657 __ xorr(a, a, b);
658 }
660 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
661 assert(src->is_constant(), "should not call otherwise");
662 assert(dest->is_register(), "should not call otherwise");
663 LIR_Const* c = src->as_constant_ptr();
664 switch (c->type()) {
665 case T_ADDRESS: {
666 assert(patch_code == lir_patch_none, "no patching handled here");
667 __ move(dest->as_register(), c->as_jint()); // FIXME
668 break;
669 }
671 case T_INT: {
672 assert(patch_code == lir_patch_none, "no patching handled here");
673 __ move(dest->as_register(), c->as_jint());
674 break;
675 }
677 case T_LONG: {
678 #ifndef _LP64
679 jlong con = c->as_jlong();
680 jint* conhi = (jint*)&con + 1;
681 jint* conlow = (jint*)&con;
683 if (dest->is_double_cpu()) {
684 __ move(dest->as_register_lo(), *conlow);
685 __ move(dest->as_register_hi(), *conhi);
686 } else {
687 // assert(dest->is_double(), "wrong register kind");
688 __ move(AT, *conlow);
689 __ mtc1(AT, dest->as_double_reg());
690 __ move(AT, *conhi);
691 __ mtc1(AT, dest->as_double_reg()+1);
692 }
693 #else
694 if (dest->is_double_cpu()) {
695 __ li(dest->as_register_lo(), c->as_jlong());
696 } else {
697 __ li(dest->as_register(), c->as_jlong());
698 }
699 #endif
700 break;
701 }
703 case T_OBJECT: {
704 if (patch_code == lir_patch_none) {
705 jobject2reg(c->as_jobject(), dest->as_register());
706 } else {
707 jobject2reg_with_patching(dest->as_register(), info);
708 }
709 break;
710 }
712 case T_METADATA: {
713 if (patch_code != lir_patch_none) {
714 klass2reg_with_patching(dest->as_register(), info);
715 } else {
716 __ mov_metadata(dest->as_register(), c->as_metadata());
717 }
718 break;
719 }
721 case T_FLOAT: {
722 address const_addr = float_constant(c->as_jfloat());
723 assert (const_addr != NULL, "must create float constant in the constant table");
725 if (dest->is_single_fpu()) {
726 __ relocate(relocInfo::internal_pc_type);
727 #ifndef _LP64
728 //by_css
729 __ lui(AT, Assembler::split_high((int)const_addr));
730 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
731 #else
732 __ li48(AT, (long)const_addr);
733 #endif
734 __ lwc1(dest->as_float_reg(), AT, 0);
736 } else {
737 assert(dest->is_single_cpu(), "Must be a cpu register.");
738 assert(dest->as_register() != AT, "AT can not be allocated.");
740 __ relocate(relocInfo::internal_pc_type);
741 #ifndef _LP64
742 //by_css
743 __ lui(AT, Assembler::split_high((int)const_addr));
744 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
745 #else
746 __ li48(AT, (long)const_addr);
747 #endif
748 __ lw(dest->as_register(), AT, 0);
749 }
750 break;
751 }
753 case T_DOUBLE: {
754 address const_addr = double_constant(c->as_jdouble());
755 assert (const_addr != NULL, "must create double constant in the constant table");
757 if (dest->is_double_fpu()) {
758 __ relocate(relocInfo::internal_pc_type);
759 #ifndef _LP64
760 //by_css
761 __ lui(AT, Assembler::split_high((int)const_addr));
762 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
763 __ lwc1(dest->as_double_reg(), AT, 0);
764 __ lwc1(dest->as_double_reg()+1, AT, 4);
765 #else
766 __ li48(AT, (long)const_addr);
767 __ ldc1(dest->as_double_reg(), AT, 0);
768 #endif
769 } else {
770 assert(dest->as_register_lo() != AT, "AT can not be allocated.");
771 assert(dest->as_register_hi() != AT, "AT can not be allocated.");
773 __ relocate(relocInfo::internal_pc_type);
774 #ifndef _LP64
775 //by_css
776 __ lui(AT, Assembler::split_high((int)const_addr));
777 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
778 __ lw(dest->as_register_lo(), AT, 0);
779 __ lw(dest->as_register_hi(), AT, 4);
780 #else
781 __ li48(AT, (long)const_addr);
782 __ ld(dest->as_register_lo(), AT, 0);
783 #endif
784 }
785 break;
786 }
788 default:
789 ShouldNotReachHere();
790 }
791 }
793 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
794 assert(src->is_constant(), "should not call otherwise");
795 assert(dest->is_stack(), "should not call otherwise");
796 LIR_Const* c = src->as_constant_ptr();
797 switch (c->type()) {
798 case T_INT: // fall through
799 case T_FLOAT:
800 __ move(AT, c->as_jint_bits());
801 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
802 break;
804 case T_ADDRESS:
805 __ move(AT, c->as_jint_bits());
806 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
807 break;
809 case T_OBJECT:
810 if (c->as_jobject() == NULL) {
811 __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix()));
812 } else {
813 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
814 RelocationHolder rspec = oop_Relocation::spec(oop_index);
815 __ relocate(rspec);
816 #ifndef _LP64
817 //by_css
818 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
819 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
820 #else
821 __ li48(AT, (long)c->as_jobject());
822 #endif
823 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
824 }
825 break;
826 case T_LONG: // fall through
827 case T_DOUBLE:
828 #ifndef _LP64
829 __ move(AT, c->as_jint_lo_bits());
830 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
831 lo_word_offset_in_bytes));
832 __ move(AT, c->as_jint_hi_bits());
833 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
834 hi_word_offset_in_bytes));
835 #else
836 __ move(AT, c->as_jlong_bits());
837 __ sd(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
838 lo_word_offset_in_bytes));
839 #endif
840 break;
841 default:
842 ShouldNotReachHere();
843 }
844 }
846 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
847 assert(src->is_constant(), "should not call otherwise");
848 assert(dest->is_address(), "should not call otherwise");
849 LIR_Const* c = src->as_constant_ptr();
850 LIR_Address* addr = dest->as_address_ptr();
852 int null_check_here = code_offset();
853 switch (type) {
854 case T_LONG: // fall through
855 case T_DOUBLE:
856 #ifndef _LP64
857 __ move(AT, c->as_jint_hi_bits());
858 __ sw(AT, as_Address_hi(addr));
859 __ move(AT, c->as_jint_lo_bits());
860 __ sw(AT, as_Address_lo(addr));
861 #else
862 if(c->as_jlong_bits() != 0) {
863 /* DoublePrint: -0.0
864 * (gdb) print /x -9223372036854775808
865 * $1 = 0x8000000000000000
866 */
867 __ li64(AT, c->as_jlong_bits());
868 __ sd(AT, as_Address_lo(addr));
869 } else
870 __ sd(R0, as_Address(addr));
871 #endif
872 break;
873 case T_OBJECT: // fall through
874 case T_ARRAY:
875 if (c->as_jobject() == NULL){
876 if (UseCompressedOops && !wide) {
877 __ sw(R0, as_Address(addr));
878 } else {
879 __ st_ptr(R0, as_Address(addr));
880 }
881 } else {
882 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
883 RelocationHolder rspec = oop_Relocation::spec(oop_index);
884 __ relocate(rspec);
885 #ifndef _LP64
886 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
887 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
888 __ st_ptr(AT, as_Address(addr));
889 null_check_here = code_offset();
890 #else
891 //by_css
892 __ li64(AT, (long)c->as_jobject());
893 if (UseCompressedOops && !wide) {
894 __ encode_heap_oop(AT);
895 null_check_here = code_offset();
896 __ sw(AT, as_Address(addr));
897 } else {
898 __ st_ptr(AT, as_Address(addr));
899 }
900 #endif
901 }
902 break;
903 case T_INT: // fall through
904 case T_FLOAT:
905 if(c->as_jint_bits() != 0) {
906 __ move(AT, c->as_jint_bits());
907 __ sw(AT, as_Address(addr));
908 } else
909 __ sw(R0, as_Address(addr));
910 break;
911 case T_ADDRESS:
912 __ move(AT, c->as_jint_bits());
913 __ st_ptr(AT, as_Address(addr));
914 break;
915 case T_BOOLEAN: // fall through
916 case T_BYTE:
917 if(c->as_jint() != 0) {
918 __ move(AT, c->as_jint());
919 __ sb(AT, as_Address(addr));
920 }
921 else
922 __ sb(R0, as_Address(addr));
923 break;
924 case T_CHAR: // fall through
925 case T_SHORT:
926 if(c->as_jint() != 0) {
927 __ move(AT, c->as_jint());
928 __ sh(AT, as_Address(addr));
929 }
930 else
931 __ sh(R0, as_Address(addr));
932 break;
933 default: ShouldNotReachHere();
934 };
935 if (info != NULL) add_debug_info_for_null_check(null_check_here, info);
936 }
938 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
939 assert(src->is_register(), "should not call otherwise");
940 assert(dest->is_register(), "should not call otherwise");
941 if (dest->is_float_kind() && src->is_float_kind()) {
942 // float to float moves
943 if (dest->is_single_fpu()) {
944 assert(src->is_single_fpu(), "must both be float");
945 __ mov_s(dest->as_float_reg(), src->as_float_reg());
946 } else {
947 assert(src->is_double_fpu(), "must bothe be double");
948 __ mov_d( dest->as_double_reg(),src->as_double_reg());
949 }
950 } else if (!dest->is_float_kind() && !src->is_float_kind()) {
951 // int to int moves
952 if (dest->is_single_cpu()) {
953 #ifdef _LP64
954 //FIXME aoqi: copy from x86
955 if (src->type() == T_LONG) {
956 // Can do LONG -> OBJECT
957 move_regs(src->as_register_lo(), dest->as_register());
958 return;
959 }
960 #endif
961 assert(src->is_single_cpu(), "must match");
962 if (dest->type() == T_INT) {
963 __ move_u32(dest->as_register(), src->as_register());
964 } else
965 move_regs(src->as_register(), dest->as_register());
966 } else if (dest->is_double_cpu()) {
967 #ifdef _LP64
968 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
969 // Surprising to me but we can see move of a long to t_object
970 __ verify_oop(src->as_register());
971 move_regs(src->as_register(), dest->as_register_lo());
972 return;
973 }
974 #endif
975 Register f_lo;
976 Register f_hi;
977 Register t_lo;
978 Register t_hi;
980 if (src->is_single_cpu()) {
981 f_lo = src->as_register();
982 t_lo = dest->as_register_lo();
983 } else {
984 f_lo = src->as_register_lo();
985 f_hi = src->as_register_hi();
986 t_lo = dest->as_register_lo();
987 t_hi = dest->as_register_hi();
988 assert(f_hi == f_lo, "must be same");
989 assert(t_hi == t_lo, "must be same");
990 }
991 #ifdef _LP64
992 move_regs(f_lo, t_lo);
993 #else
994 /*
995 if (src->as_register_hi() != dest->as_register_lo()) {
996 move_regs(src->as_register_lo(), dest->as_register_lo());
997 move_regs(src->as_register_hi(), dest->as_register_hi());
998 } else if (src->as_register_lo() != dest->as_register_hi()) {
999 move_regs(src->as_register_hi(), dest->as_register_hi());
1000 move_regs(src->as_register_lo(), dest->as_register_lo());
1001 } else {
1002 swap_reg(src->as_register_lo(), src->as_register_hi());
1003 }
1004 */
1005 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
1007 if (f_lo == t_hi && f_hi == t_lo) {
1008 swap_reg(f_lo, f_hi);
1009 } else if (f_hi == t_lo) {
1010 assert(f_lo != t_hi, "overwriting register");
1011 move_regs(f_hi, t_hi);
1012 move_regs(f_lo, t_lo);
1013 } else {
1014 assert(f_hi != t_lo, "overwriting register");
1015 move_regs(f_lo, t_lo);
1016 move_regs(f_hi, t_hi);
1017 }
1018 #endif // LP64
1019 }
1020 } else {
1021 // float to int or int to float moves
1022 if (dest->is_double_cpu()) {
1023 assert(src->is_double_fpu(), "must match");
1024 __ mfc1(dest->as_register_lo(), src->as_double_reg());
1025 #ifndef _LP64
1026 __ mfc1(dest->as_register_hi(), src->as_double_reg() + 1);
1027 #endif
1028 } else if (dest->is_single_cpu()) {
1029 assert(src->is_single_fpu(), "must match");
1030 __ mfc1(dest->as_register(), src->as_float_reg());
1031 } else if (dest->is_double_fpu()) {
1032 assert(src->is_double_cpu(), "must match");
1033 __ mtc1(src->as_register_lo(), dest->as_double_reg());
1034 #ifndef _LP64
1035 __ mtc1(src->as_register_hi(), dest->as_double_reg() + 1);
1036 #endif
1037 } else if (dest->is_single_fpu()) {
1038 assert(src->is_single_cpu(), "must match");
1039 __ mtc1(src->as_register(), dest->as_float_reg());
1040 }
1041 }
1042 }
1045 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type,bool pop_fpu_stack) {
1046 assert(src->is_register(), "should not call otherwise");
1047 assert(dest->is_stack(), "should not call otherwise");
1049 if (src->is_single_cpu()) {
1050 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1051 if (type == T_OBJECT || type == T_ARRAY) {
1052 __ verify_oop(src->as_register());
1053 }
1054 #ifdef _LP64
1055 if (type == T_INT)
1056 __ sw(src->as_register(),dst);
1057 else
1058 #endif
1059 __ st_ptr(src->as_register(),dst);
1060 } else if (src->is_double_cpu()) {
1061 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
1062 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
1063 __ st_ptr(src->as_register_lo(),dstLO);
1064 NOT_LP64(__ st_ptr(src->as_register_hi(),dstHI));
1065 }else if (src->is_single_fpu()) {
1066 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1067 __ swc1(src->as_float_reg(), dst_addr);
1069 } else if (src->is_double_fpu()) {
1070 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1071 #ifndef _LP64
1072 __ swc1(src->as_double_reg(), dst_addr);
1073 __ swc1(src->as_double_reg() + 1, dst_addr.base(), dst_addr.disp() + 4);
1074 #else
1075 __ sdc1(src->as_double_reg(), dst_addr);
1076 #endif
1078 } else {
1079 ShouldNotReachHere();
1080 }
1081 }
1083 //FIXME
1084 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info,bool pop_fpu_stack, bool wide, bool/*unaliged*/) {
1085 LIR_Address* to_addr = dest->as_address_ptr();
1086 //Register dest_reg = to_addr->base()->as_register();
1087 // FIXME aoqi
1088 Register dest_reg = to_addr->base()->is_single_cpu()? to_addr->base()->as_register() : to_addr->base()->as_register_lo();
1089 PatchingStub* patch = NULL;
1090 bool needs_patching = (patch_code != lir_patch_none);
1091 Register disp_reg = NOREG;
1092 int disp_value = to_addr->disp();
1093 /*
1094 the start position of patch template is labeled by "new PatchingStub(...)"
1095 during patch, T9 will be changed and not restore
1096 that's why we use S7 but not T9 as compressed_src here
1097 */
1098 Register compressed_src = S7;
1100 if (type == T_ARRAY || type == T_OBJECT) {
1101 __ verify_oop(src->as_register());
1102 #ifdef _LP64
1103 if (UseCompressedOops && !wide) {
1104 __ move(compressed_src, src->as_register());
1105 __ encode_heap_oop(compressed_src);
1106 }
1107 #endif
1108 }
1110 if (needs_patching) {
1111 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1112 assert(!src->is_double_cpu() ||
1113 patch_code == lir_patch_none ||
1114 patch_code == lir_patch_normal,
1115 "patching doesn't match register");
1116 Address toa = as_Address(to_addr);
1117 assert(toa.disp() != 0, "must have");
1118 }
1120 if (info != NULL) {
1121 add_debug_info_for_null_check_here(info);
1122 }
1123 if (needs_patching) {
1124 disp_reg = AT;
1125 __ lui(AT, Assembler::split_high(disp_value));
1126 __ addiu(AT, AT, Assembler::split_low(disp_value));
1127 } else if (!Assembler::is_simm16(disp_value)) {
1128 disp_reg = AT;
1129 __ lui(AT, Assembler::split_high(disp_value));
1130 }
1131 int offset = code_offset();
1133 switch(type) {
1134 case T_DOUBLE:
1135 assert(src->is_double_fpu(), "just check");
1136 if (disp_reg == noreg) {
1137 #ifndef _LP64
1138 __ swc1(src->as_double_reg(), dest_reg, disp_value);
1139 __ swc1(src->as_double_reg()+1, dest_reg, disp_value+4);
1140 #else
1141 __ sdc1(src->as_double_reg(), dest_reg, disp_value);
1142 #endif
1143 } else if (needs_patching) {
1144 __ add(AT, dest_reg, disp_reg);
1145 #ifndef _LP64
1146 __ swc1(src->as_double_reg(), AT, 0);
1147 __ swc1(src->as_double_reg()+1, AT, 4);
1148 #else
1149 __ sdc1(src->as_double_reg(), AT, 0);
1150 #endif
1151 } else {
1152 __ add(AT, dest_reg, disp_reg);
1153 #ifndef _LP64
1154 __ swc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1155 __ swc1(src->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1156 #else
1157 __ sdc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1158 #endif
1159 }
1160 break;
1162 case T_FLOAT:
1163 if (disp_reg == noreg) {
1164 __ swc1(src->as_float_reg(), dest_reg, disp_value);
1165 } else if(needs_patching) {
1166 __ add(AT, dest_reg, disp_reg);
1167 __ swc1(src->as_float_reg(), AT, 0);
1168 } else {
1169 __ add(AT, dest_reg, disp_reg);
1170 __ swc1(src->as_float_reg(), AT, Assembler::split_low(disp_value));
1171 }
1172 break;
1174 case T_LONG: {
1175 Register from_lo = src->as_register_lo();
1176 Register from_hi = src->as_register_hi();
1177 #ifdef _LP64
1178 if (needs_patching) {
1179 __ add(AT, dest_reg, disp_reg);
1180 __ st_ptr(from_lo, AT, 0);
1181 } else {
1182 __ st_ptr(from_lo, as_Address_lo(to_addr));
1183 }
1184 #else
1185 Register base = to_addr->base()->as_register();
1186 Register index = noreg;
1187 if (to_addr->index()->is_register()) {
1188 index = to_addr->index()->as_register();
1189 }
1190 if (base == from_lo || index == from_lo) {
1191 assert(base != from_hi, "can't be");
1192 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1193 if (needs_patching) {
1194 __ add(AT, dest_reg, disp_reg);
1195 NOT_LP64(__ st_ptr(from_hi, AT, longSize/2);)
1196 __ st_ptr(from_lo, AT, 0);
1197 } else {
1198 __ st_ptr(from_hi, as_Address_hi(to_addr));
1199 __ st_ptr(from_lo, as_Address_lo(to_addr));
1200 }
1201 } else {
1202 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1203 if (needs_patching) {
1204 __ add(AT, dest_reg, disp_reg);
1205 __ st_ptr(from_lo, AT, 0);
1206 __ st_ptr(from_hi, AT, longSize/2);
1207 } else {
1208 __ st_ptr(from_lo, as_Address_lo(to_addr));
1209 __ st_ptr(from_hi, as_Address_hi(to_addr));
1210 }
1211 }
1212 #endif
1213 break;
1214 }
1215 case T_ARRAY:
1216 case T_OBJECT:
1217 #ifdef _LP64
1218 if (UseCompressedOops && !wide) {
1219 if (disp_reg == noreg) {
1220 __ sw(compressed_src, dest_reg, disp_value);
1221 } else if (needs_patching) {
1222 __ add(AT, dest_reg, disp_reg);
1223 __ sw(compressed_src, AT, 0);
1224 } else {
1225 __ add(AT, dest_reg, disp_reg);
1226 __ sw(compressed_src, AT, Assembler::split_low(disp_value));
1227 }
1228 } else {
1229 if (disp_reg == noreg) {
1230 __ st_ptr(src->as_register(), dest_reg, disp_value);
1231 } else if (needs_patching) {
1232 __ add(AT, dest_reg, disp_reg);
1233 __ st_ptr(src->as_register(), AT, 0);
1234 } else {
1235 __ add(AT, dest_reg, disp_reg);
1236 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1237 }
1238 }
1239 break;
1240 #endif
1241 case T_ADDRESS:
1242 #ifdef _LP64
1243 if (disp_reg == noreg) {
1244 __ st_ptr(src->as_register(), dest_reg, disp_value);
1245 } else if (needs_patching) {
1246 __ add(AT, dest_reg, disp_reg);
1247 __ st_ptr(src->as_register(), AT, 0);
1248 } else {
1249 __ add(AT, dest_reg, disp_reg);
1250 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1251 }
1252 break;
1253 #endif
1254 case T_INT:
1255 if (disp_reg == noreg) {
1256 __ sw(src->as_register(), dest_reg, disp_value);
1257 } else if (needs_patching) {
1258 __ add(AT, dest_reg, disp_reg);
1259 __ sw(src->as_register(), AT, 0);
1260 } else {
1261 __ add(AT, dest_reg, disp_reg);
1262 __ sw(src->as_register(), AT, Assembler::split_low(disp_value));
1263 }
1264 break;
1266 case T_CHAR:
1267 case T_SHORT:
1268 if (disp_reg == noreg) {
1269 __ sh(src->as_register(), dest_reg, disp_value);
1270 } else if (needs_patching) {
1271 __ add(AT, dest_reg, disp_reg);
1272 __ sh(src->as_register(), AT, 0);
1273 } else {
1274 __ add(AT, dest_reg, disp_reg);
1275 __ sh(src->as_register(), AT, Assembler::split_low(disp_value));
1276 }
1277 break;
1279 case T_BYTE:
1280 case T_BOOLEAN:
1281 assert(src->is_single_cpu(), "just check");
1283 if (disp_reg == noreg) {
1284 __ sb(src->as_register(), dest_reg, disp_value);
1285 } else if (needs_patching) {
1286 __ add(AT, dest_reg, disp_reg);
1287 __ sb(src->as_register(), AT, 0);
1288 } else {
1289 __ add(AT, dest_reg, disp_reg);
1290 __ sb(src->as_register(), AT, Assembler::split_low(disp_value));
1291 }
1292 break;
1294 default:
1295 ShouldNotReachHere();
1296 }
1299 if (needs_patching) {
1300 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1301 }
1302 }
1306 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1307 assert(src->is_stack(), "should not call otherwise");
1308 assert(dest->is_register(), "should not call otherwise");
1310 if (dest->is_single_cpu()) {
1311 #ifdef _LP64
1312 if (type == T_INT)
1313 __ lw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1314 else
1315 #endif
1316 __ ld_ptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1317 if (type == T_ARRAY || type == T_OBJECT) {
1318 __ verify_oop(dest->as_register());
1319 }
1320 } else if (dest->is_double_cpu()) {
1321 #ifdef _LP64
1322 /* java.util.concurrent.locks.ReentrantReadWriteLock$Sync::tryAcquire
1324 88 move [stack:2|L] [a5a5|J]
1325 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1326 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1327 0x000000556197af8c: ld a5, 0x50(sp)
1328 */
1329 Address src_addr_LO;
1330 if (src->is_single_stack())
1331 src_addr_LO = frame_map()->address_for_slot(src->single_stack_ix(),lo_word_offset_in_bytes);
1332 else if (src->is_double_stack())
1333 src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1334 else
1335 ShouldNotReachHere();
1336 #else
1337 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1338 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1339 #endif
1340 #ifdef _LP64
1341 if (src->type() == T_INT)
1342 __ lw(dest->as_register_lo(), src_addr_LO);
1343 else
1344 #endif
1345 __ ld_ptr(dest->as_register_lo(), src_addr_LO);
1346 NOT_LP64(__ ld_ptr(dest->as_register_hi(), src_addr_HI));
1347 }else if (dest->is_single_fpu()) {
1348 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1349 __ lwc1(dest->as_float_reg(), addr);
1350 } else if (dest->is_double_fpu()) {
1351 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1352 #ifndef _LP64
1353 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1354 __ lwc1(dest->as_double_reg(), src_addr_LO);
1355 __ lwc1(dest->as_double_reg()+1, src_addr_HI);
1356 #else
1357 __ ldc1(dest->as_double_reg(), src_addr_LO);
1358 #endif
1359 } else {
1360 ShouldNotReachHere();
1361 /*
1362 assert(dest->is_single_cpu(), "cannot be anything else but a single cpu");
1363 assert(type!= T_ILLEGAL, "Bad type in stack2reg")
1364 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1365 __ lw(dest->as_register(), addr);
1366 */
1367 }
1368 }
1370 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1371 if (src->is_single_stack()) {
1372 /*
1373 * 2012/5/23 Jin: YozoOffice(-Xcomp) corrupts in "New File -> word"
1374 *
1375 * [b.q.e.a.z::bw()]
1376 * move [stack:15|L] [stack:17|L]
1377 * 0x00000055584e7cf4: lw at, 0x78(sp) <--- error!
1378 * 0x00000055584e7cf8: sw at, 0x88(sp)
1379 */
1380 if (type == T_OBJECT )
1381 {
1382 __ ld(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1383 __ sd(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1384 }
1385 else
1386 {
1387 __ lw(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1388 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1389 }
1390 } else if (src->is_double_stack()) {
1391 #ifndef _LP64
1392 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1393 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1394 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix(),4));
1395 __ sw(AT, frame_map()->address_for_slot(dest ->double_stack_ix(),4));
1396 #else
1397 __ ld_ptr(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1398 __ st_ptr(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1399 #endif
1400 } else {
1401 ShouldNotReachHere();
1402 }
1403 }
1405 // if patching needed, be sure the instruction at offset is a MoveMemReg
1406 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool) {
1407 assert(src->is_address(), "should not call otherwise");
1408 assert(dest->is_register(), "should not call otherwise");
1409 LIR_Address* addr = src->as_address_ptr();
1410 //Address from_addr = as_Address(addr);
1412 //Register src_reg = addr->base()->as_register();
1413 // FIXME aoqi
1414 Register src_reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
1415 Register disp_reg = noreg;
1416 int disp_value = addr->disp();
1417 bool needs_patching = (patch_code != lir_patch_none);
1419 PatchingStub* patch = NULL;
1420 if (needs_patching) {
1421 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1422 }
1424 // we must use lui&addiu,
1425 if (needs_patching) {
1426 disp_reg = AT;
1427 __ lui(AT, Assembler::split_high(disp_value));
1428 __ addiu(AT, AT, Assembler::split_low(disp_value));
1429 } else if (!Assembler::is_simm16(disp_value)) {
1430 disp_reg = AT;
1431 __ lui(AT, Assembler::split_high(disp_value));
1432 }
1434 // remember the offset of the load. The patching_epilog must be done
1435 // before the call to add_debug_info, otherwise the PcDescs don't get
1436 // entered in increasing order.
1437 int offset = code_offset();
1439 switch(type) {
1440 case T_BOOLEAN:
1441 case T_BYTE: {
1442 //assert(to_reg.is_word(), "just check");
1443 if (disp_reg == noreg) {
1444 __ lb(dest->as_register(), src_reg, disp_value);
1445 } else if (needs_patching) {
1446 __ add(AT, src_reg, disp_reg);
1447 offset = code_offset();
1448 __ lb(dest->as_register(), AT, 0);
1449 } else {
1450 __ add(AT, src_reg, disp_reg);
1451 offset = code_offset();
1452 __ lb(dest->as_register(), AT, Assembler::split_low(disp_value));
1453 }
1454 }
1455 break;
1457 case T_CHAR: {
1458 //assert(to_reg.is_word(), "just check");
1459 if (disp_reg == noreg) {
1460 __ lhu(dest->as_register(), src_reg, disp_value);
1461 } else if (needs_patching) {
1462 __ add(AT, src_reg, disp_reg);
1463 offset = code_offset();
1464 __ lhu(dest->as_register(), AT, 0);
1465 } else {
1466 __ add(AT, src_reg, disp_reg);
1467 offset = code_offset();
1468 __ lhu(dest->as_register(), AT, Assembler::split_low(disp_value));
1469 }
1470 }
1471 break;
1473 case T_SHORT: {
1474 // assert(to_reg.is_word(), "just check");
1475 if (disp_reg == noreg) {
1476 __ lh(dest->as_register(), src_reg, disp_value);
1477 } else if (needs_patching) {
1478 __ add(AT, src_reg, disp_reg);
1479 offset = code_offset();
1480 __ lh(dest->as_register(), AT, 0);
1481 } else {
1482 __ add(AT, src_reg, disp_reg);
1483 offset = code_offset();
1484 __ lh(dest->as_register(), AT, Assembler::split_low(disp_value));
1485 }
1486 }
1487 break;
1489 case T_OBJECT:
1490 case T_ARRAY:
1491 if (UseCompressedOops && !wide) {
1492 if (disp_reg == noreg) {
1493 __ lwu(dest->as_register(), src_reg, disp_value);
1494 } else if (needs_patching) {
1495 __ dadd(AT, src_reg, disp_reg);
1496 offset = code_offset();
1497 __ lwu(dest->as_register(), AT, 0);
1498 } else {
1499 __ dadd(AT, src_reg, disp_reg);
1500 offset = code_offset();
1501 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1502 }
1503 } else {
1504 if (disp_reg == noreg) {
1505 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1506 } else if (needs_patching) {
1507 __ dadd(AT, src_reg, disp_reg);
1508 offset = code_offset();
1509 __ ld_ptr(dest->as_register(), AT, 0);
1510 } else {
1511 __ dadd(AT, src_reg, disp_reg);
1512 offset = code_offset();
1513 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1514 }
1515 }
1516 break;
1517 case T_ADDRESS:
1518 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1519 if (disp_reg == noreg) {
1520 __ lwu(dest->as_register(), src_reg, disp_value);
1521 } else if (needs_patching) {
1522 __ dadd(AT, src_reg, disp_reg);
1523 offset = code_offset();
1524 __ lwu(dest->as_register(), AT, 0);
1525 } else {
1526 __ dadd(AT, src_reg, disp_reg);
1527 offset = code_offset();
1528 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1529 }
1530 } else {
1531 if (disp_reg == noreg) {
1532 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1533 } else if (needs_patching) {
1534 __ dadd(AT, src_reg, disp_reg);
1535 offset = code_offset();
1536 __ ld_ptr(dest->as_register(), AT, 0);
1537 } else {
1538 __ dadd(AT, src_reg, disp_reg);
1539 offset = code_offset();
1540 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1541 }
1542 }
1543 break;
1544 case T_INT: {
1545 //assert(to_reg.is_word(), "just check");
1546 if (disp_reg == noreg) {
1547 __ lw(dest->as_register(), src_reg, disp_value);
1548 } else if (needs_patching) {
1549 __ add(AT, src_reg, disp_reg);
1550 offset = code_offset();
1551 __ lw(dest->as_register(), AT, 0);
1552 } else {
1553 __ add(AT, src_reg, disp_reg);
1554 offset = code_offset();
1555 __ lw(dest->as_register(), AT, Assembler::split_low(disp_value));
1556 }
1557 }
1558 break;
1560 case T_LONG: {
1561 Register to_lo = dest->as_register_lo();
1562 Register to_hi = dest->as_register_hi();
1563 #ifdef _LP64
1564 if (needs_patching) {
1565 __ add(AT, src_reg, disp_reg);
1566 __ ld_ptr(to_lo, AT, 0);
1567 } else {
1568 __ ld_ptr(to_lo, as_Address_lo(addr));
1569 }
1570 #else
1571 Register base = addr->base()->as_register();
1572 Register index = noreg;
1573 if (addr->index()->is_register()) {
1574 index = addr->index()->as_register();
1575 }
1576 if ((base == to_lo && index == to_hi) ||(base == to_hi && index == to_lo)) {
1577 // addresses with 2 registers are only formed as a result of
1578 // array access so this code will never have to deal with
1579 // patches or null checks.
1580 assert(info == NULL && patch == NULL, "must be");
1581 __ lea(to_hi, as_Address(addr));
1582 __ lw(to_lo, Address(to_hi));
1583 __ lw(to_hi, Address(to_hi, BytesPerWord));
1584 } else if (base == to_lo || index == to_lo) {
1585 assert(base != to_hi, "can't be");
1586 assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1587 if (needs_patching) {
1588 __ add(AT, src_reg, disp_reg);
1589 offset = code_offset();
1590 __ lw(to_hi, AT, longSize/2);
1591 __ lw(to_lo, AT, 0);
1592 } else {
1593 __ lw(to_hi, as_Address_hi(addr));
1594 __ lw(to_lo, as_Address_lo(addr));
1595 }
1596 } else {
1597 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1598 if (needs_patching) {
1599 __ add(AT, src_reg, disp_reg);
1600 offset = code_offset();
1601 __ lw(to_lo, AT, 0);
1602 __ lw(to_hi, AT, longSize/2);
1603 } else {
1604 __ lw(to_lo, as_Address_lo(addr));
1605 __ lw(to_hi, as_Address_hi(addr));
1606 }
1607 }
1608 #endif
1609 }
1610 break;
1612 case T_FLOAT: {
1613 //assert(to_reg.is_float(), "just check");
1614 if (disp_reg == noreg) {
1615 __ lwc1(dest->as_float_reg(), src_reg, disp_value);
1616 } else if (needs_patching) {
1617 __ add(AT, src_reg, disp_reg);
1618 offset = code_offset();
1619 __ lwc1(dest->as_float_reg(), AT, 0);
1620 } else {
1621 __ add(AT, src_reg, disp_reg);
1622 offset = code_offset();
1623 __ lwc1(dest->as_float_reg(), AT, Assembler::split_low(disp_value));
1624 }
1625 }
1626 break;
1628 case T_DOUBLE: {
1629 //assert(to_reg.is_double(), "just check");
1631 if (disp_reg == noreg) {
1632 #ifndef _LP64
1633 __ lwc1(dest->as_double_reg(), src_reg, disp_value);
1634 __ lwc1(dest->as_double_reg()+1, src_reg, disp_value+4);
1635 #else
1636 __ ldc1(dest->as_double_reg(), src_reg, disp_value);
1637 #endif
1638 } else if (needs_patching) {
1639 __ add(AT, src_reg, disp_reg);
1640 offset = code_offset();
1641 #ifndef _LP64
1642 __ lwc1(dest->as_double_reg(), AT, 0);
1643 __ lwc1(dest->as_double_reg()+1, AT, 4);
1644 #else
1645 __ ldc1(dest->as_double_reg(), AT, 0);
1646 #endif
1647 } else {
1648 __ add(AT, src_reg, disp_reg);
1649 offset = code_offset();
1650 #ifndef _LP64
1651 __ lwc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1652 __ lwc1(dest->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1653 #else
1654 __ ldc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1655 #endif
1656 }
1657 }
1658 break;
1660 default:
1661 ShouldNotReachHere();
1662 }
1664 if (needs_patching) {
1665 patching_epilog(patch, patch_code, src_reg, info);
1666 }
1668 if (type == T_ARRAY || type == T_OBJECT) {
1669 #ifdef _LP64
1670 if (UseCompressedOops && !wide) {
1671 __ decode_heap_oop(dest->as_register());
1672 }
1673 #endif
1674 __ verify_oop(dest->as_register());
1675 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1676 if (UseCompressedClassPointers) {
1677 __ decode_klass_not_null(dest->as_register());
1678 }
1679 }
1680 if (info != NULL) add_debug_info_for_null_check(offset, info);
1681 }
1684 void LIR_Assembler::prefetchr(LIR_Opr src) {
1685 LIR_Address* addr = src->as_address_ptr();
1686 Address from_addr = as_Address(addr);
1687 }
1690 void LIR_Assembler::prefetchw(LIR_Opr src) {
1691 }
1693 NEEDS_CLEANUP; // This could be static?
1694 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1695 int elem_size = type2aelembytes(type);
1696 switch (elem_size) {
1697 case 1: return Address::times_1;
1698 case 2: return Address::times_2;
1699 case 4: return Address::times_4;
1700 case 8: return Address::times_8;
1701 }
1702 ShouldNotReachHere();
1703 return Address::no_scale;
1704 }
1707 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1708 switch (op->code()) {
1709 case lir_frem:
1710 arithmetic_frem(
1711 op->code(),
1712 op->in_opr1(),
1713 op->in_opr2(),
1714 op->in_opr3(),
1715 op->result_opr(),
1716 op->info());
1717 break;
1719 case lir_idiv:
1720 case lir_irem:
1721 arithmetic_idiv(
1722 op->code(),
1723 op->in_opr1(),
1724 op->in_opr2(),
1725 op->in_opr3(),
1726 op->result_opr(),
1727 op->info());
1728 break;
1729 default: ShouldNotReachHere(); break;
1730 }
1731 }
1733 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1734 LIR_Opr opr1 = op->left();
1735 LIR_Opr opr2 = op->right();
1736 LIR_Condition condition = op->cond();
1737 #ifdef ASSERT
1738 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1739 if (op->block() != NULL) _branch_target_blocks.append(op->block());
1740 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1741 #endif
1742 if (op->cond() == lir_cond_always) {
1743 if(op->label()==NULL) //by liaob1
1744 __ b(*op->label());
1745 else
1746 __ b_far(*op->label());
1747 __ delayed()->nop();
1748 return;
1749 }
1750 if (opr1->is_single_cpu()) {
1751 Register reg_op1 = opr1->as_register();
1752 if (opr2->is_single_cpu()) {
1753 #ifdef OPT_RANGECHECK
1754 assert(!op->check(), "just check");
1755 #endif
1756 Register reg_op2 = opr2->as_register();
1757 switch (condition) {
1758 case lir_cond_equal:
1759 __ beq(reg_op1, reg_op2, *op->label());
1760 break;
1761 case lir_cond_notEqual:
1762 if(op->label()==NULL)
1763 __ bne(reg_op1, reg_op2, *op->label());//liaobin1
1764 else
1765 __ bne_far(reg_op1, reg_op2, *op->label());//liaobin1
1766 break;
1767 case lir_cond_less:
1768 // AT = 1 TRUE
1769 __ slt(AT, reg_op1, reg_op2);
1770 __ bne_far(AT, R0, *op->label());
1771 break;
1772 case lir_cond_lessEqual:
1773 // AT = 0 TRUE
1774 __ slt(AT, reg_op2, reg_op1);
1775 __ beq_far(AT, R0, *op->label());
1776 break;
1777 case lir_cond_belowEqual:
1778 // AT = 0 TRUE
1779 __ sltu(AT, reg_op2, reg_op1);
1780 __ beq(AT, R0, *op->label());
1781 break;
1782 case lir_cond_greaterEqual:
1783 // AT = 0 TRUE
1784 __ slt(AT, reg_op1, reg_op2);
1785 __ beq_far(AT, R0, *op->label());
1786 break;
1787 case lir_cond_aboveEqual:
1788 // AT = 0 TRUE
1789 __ sltu(AT, reg_op1, reg_op2);
1790 __ beq_far(AT, R0, *op->label());
1791 break;
1792 case lir_cond_greater:
1793 // AT = 1 TRUE
1794 __ slt(AT, reg_op2, reg_op1);
1795 __ bne_far(AT, R0, *op->label());
1796 break;
1797 default: ShouldNotReachHere();
1798 }
1799 } else if (opr2->is_constant()) {
1800 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
1801 bool is_object = false;
1802 if (opr2->pointer()->as_constant()->type() == T_INT) {
1803 temp_value = (jint)(opr2->as_jint());
1804 } else if (opr2->pointer()->as_constant()->type() == T_LONG) {
1805 temp_value = (jlong)(opr2->as_jlong());
1806 } else if (opr2->pointer()->as_constant()->type() == T_OBJECT) {
1807 is_object = true;
1808 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_jobject());
1809 } else {
1810 ShouldNotReachHere();
1811 }
1813 switch (condition) {
1814 case lir_cond_equal:
1815 #ifdef OPT_RANGECHECK
1816 assert(!op->check(), "just check");
1817 #endif
1818 if (temp_value) {
1819 if (is_object) {
1820 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1821 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1822 __ relocate(rspec);
1823 }
1824 __ li(AT, temp_value);
1825 __ beq_far(reg_op1, AT, *op->label());
1826 } else {
1827 __ beq_far(reg_op1, R0, *op->label());
1828 }
1829 break;
1831 case lir_cond_notEqual:
1832 #ifdef OPT_RANGECHECK
1833 assert(!op->check(), "just check");
1834 #endif
1835 if (temp_value) {
1836 if (is_object) {
1837 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1838 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1839 __ relocate(rspec);
1840 }
1841 __ li(AT, temp_value);
1842 __ bne_far(reg_op1, AT, *op->label());
1843 } else {
1844 __ bne_far(reg_op1, R0, *op->label());
1845 }
1846 break;
1848 case lir_cond_less:
1849 #ifdef OPT_RANGECHECK
1850 assert(!op->check(), "just check");
1851 #endif
1852 // AT = 1 TRUE
1853 if (Assembler::is_simm16(temp_value)) {
1854 __ slti(AT, reg_op1, temp_value);
1855 } else {
1856 __ move(AT, temp_value);
1857 __ slt(AT, reg_op1, AT);
1858 }
1859 __ bne_far(AT, R0, *op->label());
1860 break;
1862 case lir_cond_lessEqual:
1863 #ifdef OPT_RANGECHECK
1864 assert(!op->check(), "just check");
1865 #endif
1866 // AT = 0 TRUE
1867 __ li(AT, temp_value);
1868 __ slt(AT, AT, reg_op1);
1869 __ beq(AT, R0, *op->label());
1870 break;
1872 case lir_cond_belowEqual:
1873 // AT = 0 TRUE
1874 #ifdef OPT_RANGECHECK
1875 if (op->check()) {
1876 __ li(AT, temp_value);
1877 add_debug_info_for_range_check_here(op->info(), temp_value);
1878 __ tgeu(AT, reg_op1, 29);
1879 } else {
1880 #endif
1881 __ li(AT, temp_value);
1882 __ sltu(AT, AT, reg_op1);
1883 __ beq(AT, R0, *op->label());
1884 #ifdef OPT_RANGECHECK
1885 }
1886 #endif
1887 break;
1889 case lir_cond_greaterEqual:
1890 #ifdef OPT_RANGECHECK
1891 assert(!op->check(), "just check");
1892 #endif
1893 // AT = 0 TRUE
1894 if (Assembler::is_simm16(temp_value)) {
1895 __ slti(AT, reg_op1, temp_value);
1896 } else {
1897 __ li(AT, temp_value);
1898 __ slt(AT, reg_op1, AT);
1899 }
1900 __ beq(AT, R0, *op->label());
1901 break;
1903 case lir_cond_aboveEqual:
1904 #ifdef OPT_RANGECHECK
1905 assert(!op->check(), "just check");
1906 #endif
1907 // AT = 0 TRUE
1908 if (Assembler::is_simm16(temp_value)) {
1909 __ sltiu(AT, reg_op1, temp_value);
1910 } else {
1911 __ li(AT, temp_value);
1912 __ sltu(AT, reg_op1, AT);
1913 }
1914 __ beq(AT, R0, *op->label());
1915 break;
1917 case lir_cond_greater:
1918 #ifdef OPT_RANGECHECK
1919 assert(!op->check(), "just check");
1920 #endif
1921 // AT = 1 TRUE
1922 __ li(AT, temp_value);
1923 __ slt(AT, AT, reg_op1);
1924 __ bne_far(AT, R0, *op->label());
1925 break;
1927 default: ShouldNotReachHere();
1928 }
1930 } else {
1931 if (opr2->is_address()) {
1932 //FIXME. aoqi lw or ld_ptr?
1933 if (op->type() == T_INT)
1934 __ lw(AT, as_Address(opr2->pointer()->as_address()));
1935 else
1936 __ ld_ptr(AT, as_Address(opr2->pointer()->as_address()));
1937 } else if (opr2->is_stack()) {
1938 //FIXME. aoqi
1939 __ ld_ptr(AT, frame_map()->address_for_slot(opr2->single_stack_ix()));
1940 } else {
1941 ShouldNotReachHere();
1942 }
1943 switch (condition) {
1944 case lir_cond_equal:
1945 #ifdef OPT_RANGECHECK
1946 assert(!op->check(), "just check");
1947 #endif
1948 __ beq(reg_op1, AT, *op->label());
1949 break;
1950 case lir_cond_notEqual:
1951 #ifdef OPT_RANGECHECK
1952 assert(!op->check(), "just check");
1953 #endif
1954 __ bne_far(reg_op1, AT, *op->label());
1955 break;
1956 case lir_cond_less:
1957 #ifdef OPT_RANGECHECK
1958 assert(!op->check(), "just check");
1959 #endif
1960 // AT = 1 TRUE
1961 __ slt(AT, reg_op1, AT);
1962 __ bne_far(AT, R0, *op->label());
1963 break;
1964 case lir_cond_lessEqual:
1965 #ifdef OPT_RANGECHECK
1966 assert(!op->check(), "just check");
1967 #endif
1968 // AT = 0 TRUE
1969 __ slt(AT, AT, reg_op1);
1970 __ beq(AT, R0, *op->label());
1971 break;
1972 case lir_cond_belowEqual:
1973 #ifdef OPT_RANGECHECK
1974 assert(!op->check(), "just check");
1975 #endif
1976 // AT = 0 TRUE
1977 __ sltu(AT, AT, reg_op1);
1978 __ beq(AT, R0, *op->label());
1979 break;
1980 case lir_cond_greaterEqual:
1981 #ifdef OPT_RANGECHECK
1982 assert(!op->check(), "just check");
1983 #endif
1984 // AT = 0 TRUE
1985 __ slt(AT, reg_op1, AT);
1986 __ beq(AT, R0, *op->label());
1987 break;
1988 case lir_cond_aboveEqual:
1989 // AT = 0 TRUE
1990 #ifdef OPT_RANGECHECK
1991 if (op->check()) {
1992 add_debug_info_for_range_check_here(op->info(), opr1->rinfo());
1993 __ tgeu(reg_op1, AT, 29);
1994 } else {
1995 #endif
1996 __ sltu(AT, reg_op1, AT);
1997 __ beq_far(AT, R0, *op->label());
1998 #ifdef OPT_RANGECHECK
1999 }
2000 #endif
2001 break;
2002 case lir_cond_greater:
2003 #ifdef OPT_RANGECHECK
2004 assert(!op->check(), "just check");
2005 #endif
2006 // AT = 1 TRUE
2007 __ slt(AT, AT, reg_op1);
2008 __ bne_far(AT, R0, *op->label());
2009 break;
2010 default: ShouldNotReachHere();
2011 }
2012 }
2013 #ifdef OPT_RANGECHECK
2014 if (!op->check())
2015 #endif
2016 __ delayed()->nop();
2018 } else if(opr1->is_address() || opr1->is_stack()) {
2019 #ifdef OPT_RANGECHECK
2020 assert(!op->check(), "just check");
2021 #endif
2022 if (opr2->is_constant()) {
2023 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
2024 if (opr2->as_constant_ptr()->type() == T_INT) {
2025 temp_value = (jint)opr2->as_constant_ptr()->as_jint();
2026 } else if (opr2->as_constant_ptr()->type() == T_OBJECT) {
2027 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_constant_ptr()->as_jobject());
2028 } else {
2029 ShouldNotReachHere();
2030 }
2032 if (Assembler::is_simm16(temp_value)) {
2033 if (opr1->is_address()) {
2034 __ lw(AT, as_Address(opr1->pointer()->as_address()));
2035 } else {
2036 __ lw(AT, frame_map()->address_for_slot(opr1->single_stack_ix()));
2037 }
2039 switch(condition) {
2041 case lir_cond_equal:
2042 __ addi(AT, AT, -(int)temp_value);
2043 __ beq(AT, R0, *op->label());
2044 break;
2045 case lir_cond_notEqual:
2046 __ addi(AT, AT, -(int)temp_value);
2047 __ bne_far(AT, R0, *op->label());
2048 break;
2049 case lir_cond_less:
2050 // AT = 1 TRUE
2051 __ slti(AT, AT, temp_value);
2052 __ bne_far(AT, R0, *op->label());
2053 break;
2054 case lir_cond_lessEqual:
2055 // AT = 0 TRUE
2056 __ addi(AT, AT, -temp_value);
2057 __ slt(AT, R0, AT);
2058 __ beq(AT, R0, *op->label());
2059 break;
2060 case lir_cond_belowEqual:
2061 // AT = 0 TRUE
2062 __ addiu(AT, AT, -temp_value);
2063 __ sltu(AT, R0, AT);
2064 __ beq(AT, R0, *op->label());
2065 break;
2066 case lir_cond_greaterEqual:
2067 // AT = 0 TRUE
2068 __ slti(AT, AT, temp_value);
2069 __ beq(AT, R0, *op->label());
2070 break;
2071 case lir_cond_aboveEqual:
2072 // AT = 0 TRUE
2073 __ sltiu(AT, AT, temp_value);
2074 __ beq(AT, R0, *op->label());
2075 break;
2076 case lir_cond_greater:
2077 // AT = 1 TRUE
2078 __ addi(AT, AT, -temp_value);
2079 __ slt(AT, R0, AT);
2080 __ bne_far(AT, R0, *op->label());
2081 break;
2083 default:
2084 Unimplemented();
2085 }
2086 } else {
2087 Unimplemented();
2088 }
2089 } else {
2090 Unimplemented();
2091 }
2092 __ delayed()->nop();
2094 } else if(opr1->is_double_cpu()) {
2095 #ifdef OPT_RANGECHECK
2096 assert(!op->check(), "just check");
2097 #endif
2098 Register opr1_lo = opr1->as_register_lo();
2099 Register opr1_hi = opr1->as_register_hi();
2101 if (opr2->is_double_cpu()) {
2102 Register opr2_lo = opr2->as_register_lo();
2103 Register opr2_hi = opr2->as_register_hi();
2104 switch (condition) {
2105 case lir_cond_equal: {
2106 Label L;
2107 #ifndef _LP64
2108 __ bne(opr1_lo, opr2_lo, L);
2109 __ delayed()->nop();
2110 __ beq(opr1_hi, opr2_hi, *op->label());
2111 #else
2112 /* static jobject java.lang.Long.toString(jlong)
2114 10 move [t0t0|J] [a4a4|J]
2115 12 move [lng:-9223372036854775808|J] [a6a6|J]
2116 14 branch [EQ] [a4a4|J] [a6a6|J] [B1]
2117 0x000000555e8532e4: bne a4, a6, 0x000000555e8532e4 <-- error
2118 0x000000555e8532e8: sll zero, zero, 0
2119 */
2120 __ beq(opr1_lo, opr2_lo, *op->label());
2121 #endif
2122 __ delayed()->nop();
2123 __ bind(L);
2124 }
2125 break;
2127 case lir_cond_notEqual:
2128 if (op->label()==NULL)
2129 __ bne(opr1_lo, opr2_lo, *op->label());//by liaobin2
2130 else
2131 __ bne_far(opr1_lo, opr2_lo, *op->label());//by liaobin2
2132 __ delayed()->nop();
2133 if (op->label()==NULL)
2134 NOT_LP64(__ bne(opr1_hi, opr2_hi, *op->label()));//by liaobin3
2135 else
2136 NOT_LP64(__ bne_far(opr1_hi, opr2_hi, *op->label()));//by liaobin3
2137 NOT_LP64(__ delayed()->nop());
2138 break;
2140 case lir_cond_less: {
2141 #ifdef _LP64
2142 __ slt(AT, opr1_lo, opr2_lo);
2143 __ bne_far(AT, R0, *op->label());
2144 __ delayed()->nop();
2145 #else
2146 Label L;
2148 // if hi less then jump
2149 __ slt(AT, opr1_hi, opr2_hi);
2150 __ bne(AT, R0, *op->label());
2151 __ delayed()->nop();
2153 // if hi great then fail
2154 __ bne(opr1_hi, opr2_hi, L);
2155 __ delayed();
2157 // now just comp lo as unsigned
2158 __ sltu(AT, opr1_lo, opr2_lo);
2159 __ bne_far(AT, R0, *op->label());
2160 __ delayed()->nop();
2162 __ bind(L);
2163 #endif
2164 }
2165 break;
2167 case lir_cond_lessEqual: {
2168 #ifdef _LP64
2169 __ slt(AT, opr2_lo, opr1_lo);
2170 __ beq_far(AT, R0, *op->label());
2171 __ delayed()->nop();
2172 #else
2173 Label L;
2175 // if hi great then fail
2176 __ slt(AT, opr2_hi, opr1_hi);
2177 __ bne(AT, R0, L);
2178 __ delayed()->nop();
2180 // if hi less then jump
2181 if(op->label()==NULL)
2182 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin4
2183 else
2184 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin4
2185 __ delayed();
2187 // now just comp lo as unsigned
2188 __ sltu(AT, opr2_lo, opr1_lo);
2189 __ beq(AT, R0, *op->label());
2190 __ delayed()->nop();
2192 __ bind(L);
2193 #endif
2194 }
2195 break;
2197 case lir_cond_belowEqual: {
2198 #ifdef _LP64
2199 __ sltu(AT, opr2_lo, opr1_lo);
2200 __ beq(AT, R0, *op->label());
2201 __ delayed()->nop();
2202 #else
2203 Label L;
2205 // if hi great then fail
2206 __ sltu(AT, opr2_hi, opr1_hi);
2207 __ bne_far(AT, R0, L);
2208 __ delayed()->nop();
2210 // if hi less then jump
2211 if(op->label()==NULL)
2212 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin5
2213 else
2214 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin5
2215 __ delayed();
2217 // now just comp lo as unsigned
2218 __ sltu(AT, opr2_lo, opr1_lo);
2219 __ beq(AT, R0, *op->label());
2220 __ delayed()->nop();
2222 __ bind(L);
2223 #endif
2224 }
2225 break;
2227 case lir_cond_greaterEqual: {
2228 #ifdef _LP64
2229 __ slt(AT, opr1_lo, opr2_lo);
2230 __ beq_far(AT, R0, *op->label());
2231 __ delayed()->nop();
2232 #else
2233 Label L;
2235 // if hi less then fail
2236 __ slt(AT, opr1_hi, opr2_hi);
2237 __ bne_far(AT, R0, L);
2238 __ delayed()->nop();
2240 // if hi great then jump
2241 if(op->label()==NULL)
2242 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin6
2243 else
2244 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin6
2245 __ delayed();
2247 // now just comp lo as unsigned
2248 __ sltu(AT, opr1_lo, opr2_lo);
2249 __ beq(AT, R0, *op->label());
2250 __ delayed()->nop();
2252 __ bind(L);
2253 #endif
2254 }
2255 break;
2257 case lir_cond_aboveEqual: {
2258 #ifdef _LP64
2259 __ sltu(AT, opr1_lo, opr2_lo);
2260 __ beq_far(AT, R0, *op->label());
2261 __ delayed()->nop();
2262 #else
2263 Label L;
2265 // if hi less then fail
2266 __ sltu(AT, opr1_hi, opr2_hi);
2267 __ bne(AT, R0, L);
2268 __ delayed()->nop();
2270 // if hi great then jump
2271 if(op->label()==NULL)
2272 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin7
2273 else
2274 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin7
2275 __ delayed();
2277 // now just comp lo as unsigned
2278 __ sltu(AT, opr1_lo, opr2_lo);
2279 __ beq(AT, R0, *op->label());
2280 __ delayed()->nop();
2282 __ bind(L);
2283 #endif
2284 }
2285 break;
2287 case lir_cond_greater: {
2288 #ifdef _LP64
2289 __ slt(AT, opr2_lo, opr1_lo);
2290 __ bne_far(AT, R0, *op->label());
2291 __ delayed()->nop();
2292 #else
2293 Label L;
2295 // if hi great then jump
2296 __ slt(AT, opr2_hi, opr1_hi);
2297 __ bne(AT, R0, *op->label());
2298 __ delayed()->nop();
2300 // if hi less then fail
2301 __ bne(opr2_hi, opr1_hi, L);
2302 __ delayed();
2304 // now just comp lo as unsigned
2305 __ sltu(AT, opr2_lo, opr1_lo);
2306 __ bne(AT, R0, *op->label());
2307 __ delayed()->nop();
2309 __ bind(L);
2310 #endif
2311 }
2312 break;
2314 default: ShouldNotReachHere();
2315 }
2317 } else if(opr2->is_constant()) {
2318 jlong lv = opr2->as_jlong();
2319 #ifndef _LP64
2320 jint iv_lo = (jint)lv;
2321 jint iv_hi = (jint)(lv>>32);
2322 bool is_zero = (lv==0);
2323 #endif
2325 switch (condition) {
2326 case lir_cond_equal:
2327 #ifdef _LP64
2328 __ li(T8, lv);
2329 __ beq(opr1_lo, T8, *op->label());
2330 __ delayed()->nop();
2331 #else
2332 if (is_zero) {
2333 __ orr(AT, opr1_lo, opr1_hi);
2334 __ beq(AT, R0, *op->label());
2335 __ delayed()->nop();
2336 } else {
2337 Label L;
2338 __ move(T8, iv_lo);
2339 __ bne(opr1_lo, T8, L);
2340 __ delayed();
2341 __ move(T8, iv_hi);
2342 __ beq(opr1_hi, T8, *op->label());
2343 __ delayed()->nop();
2344 __ bind(L);
2345 }
2346 #endif
2347 break;
2349 case lir_cond_notEqual:
2350 #ifdef _LP64
2351 __ li(T8, lv);
2352 __ bne(opr1_lo, T8, *op->label());
2353 __ delayed()->nop();
2354 #else
2355 if (is_zero) {
2356 __ orr(AT, opr1_lo, opr1_hi);
2357 __ bne(AT, R0, *op->label());
2358 __ delayed()->nop();
2359 } else {
2360 __ move(T8, iv_lo);
2361 __ bne(opr1_lo, T8, *op->label());
2362 __ delayed();
2363 __ move(T8, iv_hi);
2364 __ bne(opr1_hi, T8, *op->label());
2365 __ delayed()->nop();
2366 }
2367 #endif
2368 break;
2370 case lir_cond_less:
2371 #ifdef _LP64
2372 __ li(T8, lv);
2373 __ slt(AT, opr1_lo, T8);
2374 __ bne_far(AT, R0, *op->label());
2375 __ delayed()->nop();
2376 #else
2377 if (is_zero) {
2378 __ bltz(opr1_hi, *op->label());
2379 __ bltz(opr1_lo, *op->label());
2380 __ delayed()->nop();
2381 } else {
2382 Label L;
2384 // if hi less then jump
2385 __ move(T8, iv_hi);
2386 __ slt(AT, opr1_hi, T8);
2387 __ bne_far(AT, R0, *op->label());
2388 __ delayed()->nop();
2390 // if hi great then fail
2391 __ bne(opr1_hi, T8, L);
2392 __ delayed();
2394 // now just comp lo as unsigned
2395 if (Assembler::is_simm16(iv_lo)) {
2396 __ sltiu(AT, opr1_lo, iv_lo);
2397 } else {
2398 __ move(T8, iv_lo);
2399 __ sltu(AT, opr1_lo, T8);
2400 }
2401 __ bne(AT, R0, *op->label());
2402 __ delayed()->nop();
2404 __ bind(L);
2405 }
2406 #endif
2407 break;
2409 case lir_cond_lessEqual:
2410 #ifdef _LP64
2411 __ li(T8, lv);
2412 __ slt(AT, T8, opr1_lo);
2413 __ beq(AT, R0, *op->label());
2414 __ delayed()->nop();
2415 #else
2416 if (is_zero) {
2417 __ bltz(opr1_hi, *op->label());
2418 __ delayed()->nop();
2419 __ orr(AT, opr1_hi, opr1_lo);
2420 __ beq(AT, R0, *op->label());
2421 __ delayed();
2422 } else {
2423 Label L;
2425 // if hi great then fail
2426 __ move(T8, iv_hi);
2427 __ slt(AT, T8, opr1_hi);
2428 __ bne(AT, R0, L);
2429 __ delayed()->nop();
2431 // if hi less then jump
2432 __ bne(T8, opr1_hi, *op->label());
2433 __ delayed();
2435 // now just comp lo as unsigned
2436 __ move(T8, iv_lo);
2437 __ sltu(AT, T8, opr1_lo);
2438 __ beq(AT, R0, *op->label());
2439 __ delayed()->nop();
2441 __ bind(L);
2442 }
2443 #endif
2444 break;
2446 case lir_cond_belowEqual:
2447 #ifdef _LP64
2448 __ li(T8, lv);
2449 __ sltu(AT, T8, opr1_lo);
2450 __ beq(AT, R0, *op->label());
2451 __ delayed()->nop();
2452 #else
2453 if (is_zero) {
2454 __ orr(AT, opr1_hi, opr1_lo);
2455 __ beq(AT, R0, *op->label());
2456 __ delayed()->nop();
2457 } else {
2458 Label L;
2460 // if hi great then fail
2461 __ move(T8, iv_hi);
2462 __ sltu(AT, T8, opr1_hi);
2463 __ bne(AT, R0, L);
2464 __ delayed()->nop();
2466 // if hi less then jump
2467 __ bne(T8, opr1_hi, *op->label());
2468 __ delayed();
2470 // now just comp lo as unsigned
2471 __ move(T8, iv_lo);
2472 __ sltu(AT, T8, opr1_lo);
2473 __ beq(AT, R0, *op->label());
2474 __ delayed()->nop();
2476 __ bind(L);
2477 }
2478 #endif
2479 break;
2481 case lir_cond_greaterEqual:
2482 #ifdef _LP64
2483 __ li(T8, lv);
2484 __ slt(AT, opr1_lo, T8);
2485 __ beq(AT, R0, *op->label());
2486 __ delayed()->nop();
2487 #else
2488 if (is_zero) {
2489 __ bgez(opr1_hi, *op->label());
2490 __ delayed()->nop();
2491 } else {
2492 Label L;
2494 // if hi less then fail
2495 __ move(T8, iv_hi);
2496 __ slt(AT, opr1_hi, T8);
2497 __ bne(AT, R0, L);
2498 __ delayed()->nop();
2500 // if hi great then jump
2501 __ bne(T8, opr1_hi, *op->label());
2502 __ delayed();
2504 // now just comp lo as unsigned
2505 if (Assembler::is_simm16(iv_lo)) {
2506 __ sltiu(AT, opr1_lo, iv_lo);
2507 } else {
2508 __ move(T8, iv_lo);
2509 __ sltu(AT, opr1_lo, T8);
2510 }
2511 __ beq(AT, R0, *op->label());
2512 __ delayed()->nop();
2514 __ bind(L);
2515 }
2516 #endif
2517 break;
2519 case lir_cond_aboveEqual:
2520 #ifdef _LP64
2521 __ li(T8, lv);
2522 __ sltu(AT, opr1_lo, T8);
2523 __ beq(AT, R0, *op->label());
2524 __ delayed()->nop();
2525 #else
2526 if (is_zero) {
2527 if(op->label()==NULL) //by liaob2
2528 __ b(*op->label());
2529 else
2530 __ b_far(*op->label());
2531 __ delayed()->nop();
2532 } else {
2533 Label L;
2535 // if hi less then fail
2536 __ move(T8, iv_hi);
2537 __ sltu(AT, opr1_hi, T8);
2538 __ bne(AT, R0, L);
2539 __ delayed()->nop();
2541 // if hi great then jump
2542 __ bne(T8, opr1_hi, *op->label());
2543 __ delayed();
2545 // now just comp lo as unsigned
2546 if (Assembler::is_simm16(iv_lo)) {
2547 __ sltiu(AT, opr1_lo, iv_lo);
2548 } else {
2549 __ move(T8, iv_lo);
2550 __ sltu(AT, opr1_lo, T8);
2551 }
2552 __ beq(AT, R0, *op->label());
2553 __ delayed()->nop();
2555 __ bind(L);
2556 }
2557 #endif
2558 break;
2560 case lir_cond_greater:
2561 #ifdef _LP64
2562 __ li(T8, lv);
2563 __ slt(AT, T8, opr1_lo);
2564 __ bne_far(AT, R0, *op->label());
2565 __ delayed()->nop();
2566 #else
2567 if (is_zero) {
2568 Label L;
2569 __ bgtz(opr1_hi, *op->label());
2570 __ delayed()->nop();
2571 __ bne(opr1_hi, R0, L);
2572 __ delayed()->nop();
2573 __ bne(opr1_lo, R0, *op->label());
2574 __ delayed()->nop();
2575 __ bind(L);
2576 } else {
2577 Label L;
2579 // if hi great then jump
2580 __ move(T8, iv_hi);
2581 __ slt(AT, T8, opr1_hi);
2582 __ bne(AT, R0, *op->label());
2583 __ delayed()->nop();
2585 // if hi less then fail
2586 __ bne(T8, opr1_hi, L);
2587 __ delayed();
2589 // now just comp lo as unsigned
2590 __ move(T8, iv_lo);
2591 __ sltu(AT, T8, opr1_lo);
2592 __ bne(AT, R0, *op->label());
2593 __ delayed()->nop();
2595 __ bind(L);
2596 }
2597 #endif
2598 break;
2600 default:
2601 ShouldNotReachHere();
2602 }
2603 } else {
2604 Unimplemented();
2605 }
2606 } else if (opr1->is_single_fpu()) {
2607 #ifdef OPT_RANGECHECK
2608 assert(!op->check(), "just check");
2609 #endif
2610 assert(opr2->is_single_fpu(), "change the code");
2612 FloatRegister reg_op1 = opr1->as_float_reg();
2613 FloatRegister reg_op2 = opr2->as_float_reg();
2614 // bool un_ls
2615 bool un_jump = (op->ublock()->label()==op->label());
2617 Label& L = *op->label();
2619 switch (condition) {
2620 case lir_cond_equal:
2621 if (un_jump)
2622 __ c_ueq_s(reg_op1, reg_op2);
2623 else
2624 __ c_eq_s(reg_op1, reg_op2);
2625 __ bc1t(L);
2627 break;
2629 case lir_cond_notEqual:
2630 if (un_jump)
2631 __ c_eq_s(reg_op1, reg_op2);
2632 else
2633 __ c_ueq_s(reg_op1, reg_op2);
2634 __ bc1f(L);
2636 break;
2638 case lir_cond_less:
2639 if (un_jump)
2640 __ c_ult_s(reg_op1, reg_op2);
2641 else
2642 __ c_olt_s(reg_op1, reg_op2);
2643 __ bc1t(L);
2645 break;
2647 case lir_cond_lessEqual:
2648 case lir_cond_belowEqual:
2649 if (un_jump)
2650 __ c_ule_s(reg_op1, reg_op2);
2651 else
2652 __ c_ole_s(reg_op1, reg_op2);
2653 __ bc1t(L);
2655 break;
2657 case lir_cond_greaterEqual:
2658 case lir_cond_aboveEqual:
2659 if (un_jump)
2660 __ c_olt_s(reg_op1, reg_op2);
2661 else
2662 __ c_ult_s(reg_op1, reg_op2);
2663 __ bc1f(L);
2665 break;
2667 case lir_cond_greater:
2668 if (un_jump)
2669 __ c_ole_s(reg_op1, reg_op2);
2670 else
2671 __ c_ule_s(reg_op1, reg_op2);
2672 __ bc1f(L);
2674 break;
2676 default:
2677 ShouldNotReachHere();
2678 }
2679 __ delayed()->nop();
2680 } else if (opr1->is_double_fpu()) {
2681 #ifdef OPT_RANGECHECK
2682 assert(!op->check(), "just check");
2683 #endif
2684 assert(opr2->is_double_fpu(), "change the code");
2686 FloatRegister reg_op1 = opr1->as_double_reg();
2687 FloatRegister reg_op2 = opr2->as_double_reg();
2688 bool un_jump = (op->ublock()->label()==op->label());
2689 Label& L = *op->label();
2691 switch (condition) {
2692 case lir_cond_equal:
2693 if (un_jump)
2694 __ c_ueq_d(reg_op1, reg_op2);
2695 else
2696 __ c_eq_d(reg_op1, reg_op2);
2697 __ bc1t(L);
2699 break;
2701 case lir_cond_notEqual:
2702 if (un_jump)
2703 __ c_eq_d(reg_op1, reg_op2);
2704 else
2705 __ c_ueq_d(reg_op1, reg_op2);
2706 __ bc1f(L);
2708 break;
2710 case lir_cond_less:
2711 if (un_jump)
2712 __ c_ult_d(reg_op1, reg_op2);
2713 else
2714 __ c_olt_d(reg_op1, reg_op2);
2715 __ bc1t(L);
2717 break;
2719 case lir_cond_lessEqual:
2720 case lir_cond_belowEqual:
2721 if (un_jump)
2722 __ c_ule_d(reg_op1, reg_op2);
2723 else
2724 __ c_ole_d(reg_op1, reg_op2);
2725 __ bc1t(L);
2727 break;
2729 case lir_cond_greaterEqual:
2730 case lir_cond_aboveEqual:
2731 if (un_jump)
2732 __ c_olt_d(reg_op1, reg_op2);
2733 else
2734 __ c_ult_d(reg_op1, reg_op2);
2735 __ bc1f(L);
2737 break;
2739 case lir_cond_greater:
2740 if (un_jump)
2741 __ c_ole_d(reg_op1, reg_op2);
2742 else
2743 __ c_ule_d(reg_op1, reg_op2);
2744 __ bc1f(L);
2746 break;
2748 default:
2749 ShouldNotReachHere();
2750 }
2751 __ delayed()->nop();
2752 } else {
2753 Unimplemented();
2754 }
2755 }
2758 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
2759 LIR_Opr value = op->in_opr();
2760 LIR_Opr src = op->in_opr();
2761 LIR_Opr dest = op->result_opr();
2762 Bytecodes::Code code = op->bytecode();
2764 switch (code) {
2765 case Bytecodes::_i2l:
2766 move_regs(src->as_register(), dest->as_register_lo());
2767 NOT_LP64(__ sra (dest->as_register_hi(), dest->as_register_lo(), 31));
2768 break;
2770 case Bytecodes::_l2i:
2771 #ifndef _LP64
2772 move_regs (src->as_register_lo(), dest->as_register());
2773 #else
2774 __ dsll32(dest->as_register(), src->as_register_lo(), 0);
2775 __ dsra32(dest->as_register(), dest->as_register(), 0);
2776 #endif
2777 break;
2779 case Bytecodes::_i2b:
2780 #ifndef _LP64
2781 move_regs (src->as_register(), dest->as_register());
2782 __ sign_extend_byte(dest->as_register());
2783 #else
2784 __ dsll32(dest->as_register(), src->as_register(), 24);
2785 __ dsra32(dest->as_register(), dest->as_register(), 24);
2786 #endif
2787 break;
2789 case Bytecodes::_i2c:
2790 __ andi(dest->as_register(), src->as_register(), 0xFFFF);
2791 break;
2793 case Bytecodes::_i2s:
2794 #ifndef _LP64
2795 move_regs (src->as_register(), dest->as_register());
2796 __ sign_extend_short(dest->as_register());
2797 #else
2798 __ dsll32(dest->as_register(), src->as_register(), 16);
2799 __ dsra32(dest->as_register(), dest->as_register(), 16);
2800 #endif
2801 break;
2803 case Bytecodes::_f2d:
2804 __ cvt_d_s(dest->as_double_reg(), src->as_float_reg());
2805 break;
2807 case Bytecodes::_d2f:
2808 __ cvt_s_d(dest->as_float_reg(), src->as_double_reg());
2809 break;
2810 case Bytecodes::_i2f: {
2811 FloatRegister df = dest->as_float_reg();
2812 if(src->is_single_cpu()) {
2813 __ mtc1(src->as_register(), df);
2814 __ cvt_s_w(df, df);
2815 } else if (src->is_stack()) {
2816 Address src_addr = src->is_single_stack()
2817 ? frame_map()->address_for_slot(src->single_stack_ix())
2818 : frame_map()->address_for_slot(src->double_stack_ix());
2819 __ lw(AT, src_addr);
2820 __ mtc1(AT, df);
2821 __ cvt_s_w(df, df);
2822 } else {
2823 Unimplemented();
2824 }
2825 break;
2826 }
2827 case Bytecodes::_i2d: {
2828 FloatRegister dd = dest->as_double_reg();
2829 if (src->is_single_cpu()) {
2830 __ mtc1(src->as_register(), dd);
2831 __ cvt_d_w(dd, dd);
2832 } else if (src->is_stack()) {
2833 Address src_addr = src->is_single_stack()
2834 ? frame_map()->address_for_slot(value->single_stack_ix())
2835 : frame_map()->address_for_slot(value->double_stack_ix());
2836 __ lw(AT, src_addr);
2837 __ mtc1(AT, dd);
2838 __ cvt_d_w(dd, dd);
2839 } else {
2840 Unimplemented();
2841 }
2842 break;
2843 }
2844 case Bytecodes::_f2i: {
2845 FloatRegister fval = src->as_float_reg();
2846 Register dreg = dest->as_register();
2848 Label L;
2849 __ c_un_s(fval, fval); //NaN?
2850 __ bc1t(L);
2851 __ delayed();
2852 __ move(dreg, R0);
2854 __ trunc_w_s(F30, fval);
2856 /* Call SharedRuntime:f2i() to do valid convention */
2857 __ cfc1(AT, 31);
2858 __ li(T9, 0x10000);
2859 __ andr(AT, AT, T9);
2860 __ beq(AT, R0, L);
2861 __ delayed()->mfc1(dreg, F30);
2863 __ mov_s(F12, fval);
2864 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2865 __ move(dreg, V0);
2866 __ bind(L);
2867 break;
2868 }
2869 case Bytecodes::_d2i: {
2870 FloatRegister dval = src->as_double_reg();
2871 Register dreg = dest->as_register();
2873 Label L;
2874 #ifndef _LP64
2875 __ c_un_d(dval, dval); //NaN?
2876 __ bc1t(L);
2877 __ delayed();
2878 __ move(dreg, R0);
2879 #endif
2881 __ trunc_w_d(F30, dval);
2882 __ cfc1(AT, 31);
2883 __ li(T9, 0x10000);
2884 __ andr(AT, AT, T9);
2885 __ beq(AT, R0, L);
2886 __ delayed()->mfc1(dreg, F30);
2888 __ mov_d(F12, dval);
2889 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
2890 __ move(dreg, V0);
2891 __ bind(L);
2892 break;
2893 }
2894 case Bytecodes::_l2f: {
2895 FloatRegister ldf = dest->as_float_reg();
2896 if (src->is_double_cpu()) {
2897 #ifndef _LP64
2898 __ mtc1(src->as_register_lo(), ldf);
2899 __ mtc1(src->as_register_hi(), ldf + 1);
2900 __ cvt_s_l(ldf, ldf);
2901 #else
2902 __ dmtc1(src->as_register_lo(), ldf);
2903 __ cvt_s_l(ldf, ldf);
2904 #endif
2905 } else if (src->is_double_stack()) {
2906 Address src_addr=frame_map()->address_for_slot(value->double_stack_ix());
2907 #ifndef _LP64
2908 __ lw(AT, src_addr);
2909 __ mtc1(AT, ldf);
2910 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
2911 __ mtc1(AT, ldf + 1);
2912 __ cvt_s_l(ldf, ldf);
2913 #else
2914 __ ld(AT, src_addr);
2915 __ dmtc1(AT, ldf);
2916 __ cvt_s_l(ldf, ldf);
2917 #endif
2918 } else {
2919 Unimplemented();
2920 }
2921 break;
2922 }
2923 case Bytecodes::_l2d: {
2924 FloatRegister ldd = dest->as_double_reg();
2925 if (src->is_double_cpu()) {
2926 #ifndef _LP64
2927 __ mtc1(src->as_register_lo(), ldd);
2928 __ mtc1(src->as_register_hi(), ldd + 1);
2929 __ cvt_d_l(ldd, ldd);
2930 #else
2931 __ dmtc1(src->as_register_lo(), ldd);
2932 __ cvt_d_l(ldd, ldd);
2933 #endif
2934 } else if (src->is_double_stack()) {
2935 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
2936 #ifndef _LP64
2937 __ lw(AT, src_addr);
2938 __ mtc1(AT, ldd);
2939 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
2940 __ mtc1(AT, ldd + 1);
2941 __ cvt_d_l(ldd, ldd);
2942 #else
2943 __ ld(AT, src_addr);
2944 __ dmtc1(AT, ldd);
2945 __ cvt_d_l(ldd, ldd);
2946 #endif
2947 } else {
2948 Unimplemented();
2949 }
2950 break;
2951 }
2953 case Bytecodes::_f2l: {
2954 FloatRegister fval = src->as_float_reg();
2955 Register dlo = dest->as_register_lo();
2956 Register dhi = dest->as_register_hi();
2958 Label L;
2959 __ move(dhi, R0);
2960 __ c_un_s(fval, fval); //NaN?
2961 __ bc1t(L);
2962 __ delayed();
2963 __ move(dlo, R0);
2965 __ trunc_l_s(F30, fval);
2966 #ifdef _LP64
2967 __ cfc1(AT, 31);
2968 __ li(T9, 0x10000);
2969 __ andr(AT, AT, T9);
2970 __ beq(AT, R0, L);
2971 __ delayed()->dmfc1(dlo, F30);
2973 __ mov_s(F12, fval);
2974 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2975 __ move(dlo, V0);
2976 #else
2977 __ mfc1(dlo, F30);
2978 #endif
2979 NOT_LP64(__ mfc1(dhi, F31));
2980 __ bind(L);
2981 break;
2982 }
2983 case Bytecodes::_d2l: {
2984 FloatRegister dval = src->as_double_reg();
2985 Register dlo = dest->as_register_lo();
2986 Register dhi = dest->as_register_hi();
2988 Label L;
2989 __ move(dhi, R0);
2990 __ c_un_d(dval, dval); //NaN?
2991 __ bc1t(L);
2992 __ delayed();
2993 __ move(dlo, R0);
2995 __ trunc_l_d(F30, dval);
2996 #ifdef _LP64
2997 __ cfc1(AT, 31);
2998 __ li(T9, 0x10000);
2999 __ andr(AT, AT, T9);
3000 __ beq(AT, R0, L);
3001 __ delayed()->dmfc1(dlo, F30);
3003 __ mov_d(F12, dval);
3004 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
3005 __ move(dlo, V0);
3006 #else
3007 __ mfc1(dlo, F30);
3008 __ mfc1(dhi, F31);
3009 #endif
3010 __ bind(L);
3011 break;
3012 }
3014 default: ShouldNotReachHere();
3015 }
3016 }
3018 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
3019 if (op->init_check()) {
3020 add_debug_info_for_null_check_here(op->stub()->info());
3021 __ lw(AT,Address(op->klass()->as_register(),
3022 InstanceKlass::init_state_offset()));
3023 __ addi(AT, AT, -InstanceKlass::fully_initialized);
3024 __ bne_far(AT, R0,*op->stub()->entry());
3025 __ delayed()->nop();
3026 }
3027 __ allocate_object(
3028 op->obj()->as_register(),
3029 op->tmp1()->as_register(),
3030 op->tmp2()->as_register(),
3031 op->header_size(),
3032 op->object_size(),
3033 op->klass()->as_register(),
3034 *op->stub()->entry());
3036 __ bind(*op->stub()->continuation());
3037 }
3039 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
3040 if (UseSlowPath ||
3041 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
3042 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
3043 __ b_far(*op->stub()->entry());
3044 __ delayed()->nop();
3045 } else {
3046 Register len = op->len()->as_register();
3047 Register tmp1 = op->tmp1()->as_register();
3048 Register tmp2 = op->tmp2()->as_register();
3049 Register tmp3 = op->tmp3()->as_register();
3050 __ allocate_array(op->obj()->as_register(),
3051 len,
3052 tmp1,
3053 tmp2,
3054 tmp3,
3055 arrayOopDesc::header_size(op->type()),
3056 array_element_size(op->type()),
3057 op->klass()->as_register(),
3058 *op->stub()->entry());
3059 }
3060 __ bind(*op->stub()->continuation());
3061 }
3063 void LIR_Assembler::type_profile_helper(Register mdo,
3064 ciMethodData *md, ciProfileData *data,
3065 Register recv, Label* update_done) {
3066 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3067 Label next_test;
3068 // See if the receiver is receiver[n].
3069 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
3070 __ bne(AT, recv, next_test);
3071 __ delayed()->nop();
3072 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
3073 __ ld_ptr(AT, data_addr);
3074 __ addi(AT, AT, DataLayout::counter_increment);
3075 __ st_ptr(AT, data_addr);
3076 __ b(*update_done);
3077 __ delayed()->nop();
3078 __ bind(next_test);
3079 }
3081 // Didn't find receiver; find next empty slot and fill it in
3082 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3083 Label next_test;
3084 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
3085 __ ld_ptr(AT, recv_addr);
3086 __ bne(AT, R0, next_test);
3087 __ delayed()->nop();
3088 __ st_ptr(recv, recv_addr);
3089 __ move(AT, DataLayout::counter_increment);
3090 __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
3091 __ b(*update_done);
3092 __ delayed()->nop();
3093 __ bind(next_test);
3094 }
3095 }
3097 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
3098 // we always need a stub for the failure case.
3099 CodeStub* stub = op->stub();
3100 Register obj = op->object()->as_register();
3101 Register k_RInfo = op->tmp1()->as_register();
3102 Register klass_RInfo = op->tmp2()->as_register();
3103 Register dst = op->result_opr()->as_register();
3104 ciKlass* k = op->klass();
3105 Register Rtmp1 = noreg;
3107 // check if it needs to be profiled
3108 ciMethodData* md;
3109 ciProfileData* data;
3111 if (op->should_profile()) {
3112 ciMethod* method = op->profiled_method();
3113 assert(method != NULL, "Should have method");
3114 int bci = op->profiled_bci();
3115 md = method->method_data_or_null();
3116 assert(md != NULL, "Sanity");
3117 data = md->bci_to_data(bci);
3118 assert(data != NULL, "need data for type check");
3119 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3120 }
3121 Label profile_cast_success, profile_cast_failure;
3122 Label *success_target = op->should_profile() ? &profile_cast_success : success;
3123 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
3125 if (obj == k_RInfo) {
3126 k_RInfo = dst;
3127 } else if (obj == klass_RInfo) {
3128 klass_RInfo = dst;
3129 }
3130 if (k->is_loaded() && !UseCompressedClassPointers) {
3131 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
3132 } else {
3133 Rtmp1 = op->tmp3()->as_register();
3134 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
3135 }
3137 assert_different_registers(obj, k_RInfo, klass_RInfo);
3139 if (op->should_profile()) {
3140 Label not_null;
3141 __ bne(obj, R0, not_null);
3142 __ delayed()->nop();
3143 // Object is null; update MDO and exit
3144 Register mdo = klass_RInfo;
3145 __ mov_metadata(mdo, md->constant_encoding());
3146 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3147 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3148 __ lw(AT, data_addr);
3149 __ ori(AT, AT, header_bits);
3150 __ sw(AT,data_addr);
3151 __ b(*obj_is_null);
3152 __ delayed()->nop();
3153 __ bind(not_null);
3154 } else {
3155 __ beq(obj, R0, *obj_is_null);
3156 __ delayed()->nop();
3157 }
3159 if (!k->is_loaded()) {
3160 klass2reg_with_patching(k_RInfo, op->info_for_patch());
3161 } else {
3162 #ifdef _LP64
3163 __ mov_metadata(k_RInfo, k->constant_encoding());
3164 #endif // _LP64
3165 }
3166 __ verify_oop(obj);
3168 if (op->fast_check()) {
3169 // get object class
3170 // not a safepoint as obj null check happens earlier
3171 if (UseCompressedClassPointers) {
3172 __ load_klass(Rtmp1, obj);
3173 __ bne(k_RInfo, Rtmp1, *failure_target);
3174 __ delayed()->nop();
3175 } else {
3176 __ ld(AT, Address(obj, oopDesc::klass_offset_in_bytes()));
3177 __ bne(k_RInfo, AT, *failure_target);
3178 __ delayed()->nop();
3179 }
3180 // successful cast, fall through to profile or jump
3181 } else {
3182 // get object class
3183 // not a safepoint as obj null check happens earlier
3184 __ load_klass(klass_RInfo, obj);
3185 if (k->is_loaded()) {
3186 // See if we get an immediate positive hit
3187 __ ld(AT, Address(klass_RInfo, k->super_check_offset()));
3188 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
3189 __ bne(k_RInfo, AT, *failure_target);
3190 __ delayed()->nop();
3191 // successful cast, fall through to profile or jump
3192 } else {
3193 // See if we get an immediate positive hit
3194 __ beq(k_RInfo, AT, *success_target);
3195 __ delayed()->nop();
3196 // check for self
3197 __ beq(k_RInfo, klass_RInfo, *success_target);
3198 __ delayed()->nop();
3200 __ push(klass_RInfo);
3201 __ push(k_RInfo);
3202 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3203 __ pop(klass_RInfo);
3204 __ pop(klass_RInfo);
3205 // result is a boolean
3206 __ beq(klass_RInfo, R0, *failure_target);
3207 __ delayed()->nop();
3208 // successful cast, fall through to profile or jump
3209 }
3210 } else {
3211 // perform the fast part of the checking logic
3212 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
3213 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
3214 __ push(klass_RInfo);
3215 __ push(k_RInfo);
3216 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3217 __ pop(klass_RInfo);
3218 __ pop(k_RInfo);
3219 // result is a boolean
3220 __ beq(k_RInfo, R0, *failure_target);
3221 __ delayed()->nop();
3222 // successful cast, fall through to profile or jump
3223 }
3224 }
3225 if (op->should_profile()) {
3226 Register mdo = klass_RInfo, recv = k_RInfo;
3227 __ bind(profile_cast_success);
3228 __ mov_metadata(mdo, md->constant_encoding());
3229 __ load_klass(recv, obj);
3230 Label update_done;
3231 type_profile_helper(mdo, md, data, recv, success);
3232 __ b(*success);
3233 __ delayed()->nop();
3235 __ bind(profile_cast_failure);
3236 __ mov_metadata(mdo, md->constant_encoding());
3237 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3238 __ ld_ptr(AT, counter_addr);
3239 __ addi(AT, AT, -DataLayout::counter_increment);
3240 __ st_ptr(AT, counter_addr);
3242 __ b(*failure);
3243 __ delayed()->nop();
3244 }
3245 __ b(*success);
3246 __ delayed()->nop();
3247 }
3251 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
3252 LIR_Code code = op->code();
3253 if (code == lir_store_check) {
3254 Register value = op->object()->as_register();
3255 Register array = op->array()->as_register();
3256 Register k_RInfo = op->tmp1()->as_register();
3257 Register klass_RInfo = op->tmp2()->as_register();
3258 Register tmp = op->tmp3()->as_register();
3260 CodeStub* stub = op->stub();
3262 //check if it needs to be profiled
3263 ciMethodData* md;
3264 ciProfileData* data;
3266 if (op->should_profile()) {
3267 ciMethod* method = op->profiled_method();
3268 assert(method != NULL, "Should have method");
3269 int bci = op->profiled_bci();
3270 md = method->method_data_or_null();
3271 assert(md != NULL, "Sanity");
3272 data = md->bci_to_data(bci);
3273 assert(data != NULL, "need data for type check");
3274 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3275 }
3276 Label profile_cast_success, profile_cast_failure, done;
3277 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
3278 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
3280 if(op->should_profile()) {
3281 Label not_null;
3282 __ bne(value, R0, not_null);
3283 __ delayed()->nop();
3285 Register mdo = klass_RInfo;
3286 __ mov_metadata(mdo, md->constant_encoding());
3287 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3288 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3289 __ lw(AT, data_addr);
3290 __ ori(AT, AT, header_bits);
3291 __ sw(AT,data_addr);
3292 __ b(done);
3293 __ delayed()->nop();
3294 __ bind(not_null);
3295 } else {
3296 __ beq(value, R0, done);
3297 __ delayed()->nop();
3298 }
3300 add_debug_info_for_null_check_here(op->info_for_exception());
3301 __ load_klass(k_RInfo, array);
3302 __ load_klass(klass_RInfo, value);
3303 // get instance klass (it's already uncompressed)
3304 __ ld_ptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
3305 // perform the fast part of the checking logic
3306 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, tmp, success_target, failure_target, NULL);
3307 __ push(klass_RInfo);
3308 __ push(k_RInfo);
3309 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3310 __ pop(klass_RInfo);
3311 __ pop(k_RInfo);
3312 // result is a boolean
3313 __ beq(k_RInfo, R0, *failure_target);
3314 __ delayed()->nop();
3315 // fall through to the success case
3317 if (op->should_profile()) {
3318 Register mdo = klass_RInfo, recv = k_RInfo;
3319 __ bind(profile_cast_success);
3320 __ mov_metadata(mdo, md->constant_encoding());
3321 __ load_klass(recv, value);
3322 Label update_done;
3323 type_profile_helper(mdo, md, data, recv, &done);
3324 __ b(done);
3325 __ delayed()->nop();
3327 __ bind(profile_cast_failure);
3328 __ mov_metadata(mdo, md->constant_encoding());
3329 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3330 __ ld_ptr(AT, counter_addr);
3331 __ addi(AT, AT, -DataLayout::counter_increment);
3332 __ st_ptr(AT, counter_addr);
3333 __ b(*stub->entry());
3334 __ delayed()->nop();
3335 }
3337 __ bind(done);
3338 } else if (code == lir_checkcast) {
3339 Register obj = op->object()->as_register();
3340 Register dst = op->result_opr()->as_register();
3341 Label success;
3342 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
3343 __ bind(success);
3344 if (dst != obj) {
3345 __ move(dst, obj);
3346 }
3347 } else if (code == lir_instanceof) {
3348 Register obj = op->object()->as_register();
3349 Register dst = op->result_opr()->as_register();
3350 Label success, failure, done;
3351 emit_typecheck_helper(op, &success, &failure, &failure);
3352 __ bind(failure);
3353 __ move(dst, R0);
3354 __ b(done);
3355 __ delayed()->nop();
3356 __ bind(success);
3357 __ addi(dst, R0, 1);
3358 __ bind(done);
3359 } else {
3360 ShouldNotReachHere();
3361 }
3362 }
3364 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
3365 if (op->code() == lir_cas_long) {
3366 #ifdef _LP64
3367 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
3368 Register newval = (op->new_value()->is_single_cpu() ? op->new_value()->as_register() : op->new_value()->as_register_lo());
3369 Register cmpval = (op->cmp_value()->is_single_cpu() ? op->cmp_value()->as_register() : op->cmp_value()->as_register_lo());
3370 assert(newval != NULL, "new val must be register");
3371 assert(cmpval != newval, "cmp and new values must be in different registers");
3372 assert(cmpval != addr, "cmp and addr must be in different registers");
3373 assert(newval != addr, "new value and addr must be in different registers");
3374 if (os::is_MP()) {}
3375 __ cmpxchg(newval, addr, cmpval); // 64-bit test-and-set
3376 #else
3377 Register addr = op->addr()->as_register();
3378 if (os::is_MP()) {}
3379 __ cmpxchg8(op->new_value()->as_register_lo(),
3380 op->new_value()->as_register_hi(),
3381 addr,
3382 op->cmp_value()->as_register_lo(),
3383 op->cmp_value()->as_register_hi())
3384 #endif
3385 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
3386 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
3387 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
3388 Register newval = op->new_value()->as_register();
3389 Register cmpval = op->cmp_value()->as_register();
3390 assert(newval != NULL, "new val must be register");
3391 assert(cmpval != newval, "cmp and new values must be in different registers");
3392 assert(cmpval != addr, "cmp and addr must be in different registers");
3393 assert(newval != addr, "new value and addr must be in different registers");
3394 if (op->code() == lir_cas_obj) {
3395 #ifdef _LP64
3396 if (UseCompressedOops) {
3397 Register tmp_reg = S7;
3398 __ push(cmpval);
3399 __ encode_heap_oop(cmpval);
3400 __ move(tmp_reg, newval);
3401 __ encode_heap_oop(tmp_reg);
3402 if (os::is_MP()) {}
3403 __ cmpxchg32(tmp_reg, addr, cmpval); // 32-bit test-and-set
3404 __ pop(cmpval);
3405 } else
3406 {
3407 if (os::is_MP()) {}
3408 __ cmpxchg(newval, addr, cmpval); // 64-bit test-and-set
3409 }
3410 } else
3411 #endif
3412 {
3413 __ cmpxchg32(newval, addr, cmpval); // 32-bit test-and-set
3414 }
3415 } else {
3416 Unimplemented();
3417 }
3418 }
3419 #ifndef MIPS64
3420 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
3421 Unimplemented();
3422 }
3423 #endif
3424 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info,bool pop_fpu_stack) {
3425 assert(info == NULL || ((code == lir_rem || code == lir_div || code == lir_sub) && right->is_double_cpu()), "info is only for ldiv/lrem");
3426 if (left->is_single_cpu()) {
3427 // left may not be equal to dest on mips.
3428 //assert(left == dest, "left and dest must be equal");
3429 Register lreg = left->as_register();
3431 if (right->is_cpu_register()) {
3432 // cpu register - cpu register
3433 Register rreg, res;
3434 if (right->is_single_cpu()) {
3435 rreg = right->as_register();
3436 #ifdef _LP64
3437 if(dest->is_double_cpu())
3438 res = dest->as_register_lo();
3439 else
3440 #endif
3441 res = dest->as_register();
3442 } else if (right->is_double_cpu()) {
3443 assert(right->is_double_cpu(),"right must be long");
3444 rreg = right->as_register_lo();
3445 res = dest->as_register_lo();
3446 } else {
3447 ShouldNotReachHere();
3448 }
3449 switch (code) {
3450 case lir_add:
3451 #ifdef _LP64
3452 if (dest->type() == T_INT)
3453 __ addu32(res, lreg, rreg);
3454 else
3455 #endif
3456 __ addu(res, lreg, rreg);
3457 break;
3459 case lir_mul:
3460 #ifndef _LP64
3461 //by aoqi
3462 __ mult(lreg, rreg);
3463 #else
3464 __ dmult(lreg, rreg);
3465 #endif
3466 __ nop();
3467 __ nop();
3468 __ mflo(res);
3469 #ifdef _LP64
3470 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
3471 *
3472 * Example: java.net.URLClassLoader::string2int()
3473 * a6: 0xcafebab
3474 * s0: 16
3475 *
3476 * 104 mul [a6|I] [s0|I] [t0|I]
3477 0x00000055655e3728: dmult a6, s0
3478 0x00000055655e372c: sll zero, zero, 0
3479 0x00000055655e3730: sll zero, zero, 0
3480 0x00000055655e3734: mflo t0 <-- error
3481 *
3482 * t0: 0xFFFFFFFFcafebab0 (Right)
3483 * t0: 0x00000000cafebab0 (Wrong)
3484 */
3485 if (dest->type() == T_INT)
3486 __ sll(res, res, 0);
3487 #endif
3488 break;
3490 case lir_sub:
3491 #ifdef _LP64
3492 if (dest->type() == T_INT)
3493 __ subu32(res, lreg, rreg);
3494 else
3495 #endif
3496 __ subu(res, lreg, rreg);
3497 break;
3499 default:
3500 ShouldNotReachHere();
3501 }
3502 } else if (right->is_stack()) {
3503 // cpu register - stack
3504 Unimplemented();
3505 } else if (right->is_constant()) {
3506 // cpu register - constant
3507 Register res;
3508 if (dest->is_double_cpu()) {
3509 res = dest->as_register_lo();
3510 } else {
3511 res = dest->as_register();
3512 }
3513 jint c;
3514 if (right->type() == T_INT) {
3515 c = right->as_constant_ptr()->as_jint();
3516 } else {
3517 c = right->as_constant_ptr()->as_jlong();
3518 }
3520 switch (code) {
3521 case lir_mul_strictfp:
3522 case lir_mul:
3523 __ move(AT, c);
3524 #ifndef _LP64
3525 //by aoqi
3526 __ mult(lreg, AT);
3527 #else
3528 __ dmult(lreg, AT);
3529 #endif
3530 __ nop();
3531 __ nop();
3532 __ mflo(res);
3533 #ifdef _LP64
3534 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
3535 *
3536 * Example: java.net.URLClassLoader::string2int()
3537 * a6: 0xcafebab
3538 * s0: 16
3539 *
3540 * 104 mul [a6|I] [s0|I] [t0|I]
3541 0x00000055655e3728: dmult a6, s0
3542 0x00000055655e372c: sll zero, zero, 0
3543 0x00000055655e3730: sll zero, zero, 0
3544 0x00000055655e3734: mflo t0 <-- error
3545 *
3546 * t0: 0xFFFFFFFFcafebab0 (Right)
3547 * t0: 0x00000000cafebab0 (Wrong)
3548 */
3549 if (dest->type() == T_INT)
3550 __ sll(res, res, 0);
3551 #endif
3552 break;
3554 case lir_add:
3555 if (Assembler::is_simm16(c)) {
3556 __ addiu(res, lreg, c);
3557 } else {
3558 __ move(AT, c);
3559 __ addu(res, lreg, AT);
3560 }
3561 break;
3563 case lir_sub:
3564 if (Assembler::is_simm16(-c)) {
3565 __ addi(res, lreg, -c);
3566 } else {
3567 __ move(AT, c);
3568 __ subu(res, lreg, AT);
3569 }
3570 break;
3572 default:
3573 ShouldNotReachHere();
3574 }
3575 } else {
3576 ShouldNotReachHere();
3577 }
3579 } else if (left->is_double_cpu()) {
3580 Register op1_lo = left->as_register_lo();
3581 Register op1_hi = left->as_register_hi();
3582 Register op2_lo;
3583 Register op2_hi;
3584 Register dst_lo;
3585 Register dst_hi;
3587 if(dest->is_single_cpu())
3588 {
3589 dst_lo = dest->as_register();
3590 }
3591 else
3592 {
3593 #ifdef _LP64
3594 dst_lo = dest->as_register_lo();
3595 #else
3596 dst_lo = dest->as_register_lo();
3597 dst_hi = dest->as_register_hi();
3598 #endif
3599 }
3600 if (right->is_constant()) {
3601 op2_lo = AT;
3602 op2_hi = R0;
3603 #ifndef _LP64
3604 __ li(AT, right->as_constant_ptr()->as_jint());
3605 #else
3606 __ li(AT, right->as_constant_ptr()->as_jlong_bits());
3607 #endif
3608 } else if (right->is_double_cpu()) { // Double cpu
3609 assert(right->is_double_cpu(),"right must be long");
3610 assert(dest->is_double_cpu(), "dest must be long");
3611 op2_lo = right->as_register_lo();
3612 op2_hi = right->as_register_hi();
3613 } else {
3614 #ifdef _LP64
3615 op2_lo = right->as_register();
3616 #else
3617 ShouldNotReachHere();
3618 #endif
3619 }
3621 NOT_LP64(assert_different_registers(op1_lo, op1_hi, op2_lo, op2_hi));
3622 // Jin: Why?
3623 // LP64_ONLY(assert_different_registers(op1_lo, op2_lo));
3625 switch (code) {
3626 case lir_add:
3627 #ifndef _LP64
3628 //by aoqi
3629 __ addu(dst_lo, op1_lo, op2_lo);
3630 __ sltu(AT, dst_lo, op2_lo);
3631 __ addu(dst_hi, op1_hi, op2_hi);
3632 __ addu(dst_hi, dst_hi, AT);
3633 #else
3634 __ addu(dst_lo, op1_lo, op2_lo);
3635 #endif
3636 break;
3638 case lir_sub:
3639 #ifndef _LP64
3640 //by aoqi
3641 __ subu(dst_lo, op1_lo, op2_lo);
3642 __ sltu(AT, op1_lo, dst_lo);
3643 __ subu(dst_hi, op1_hi, op2_hi);
3644 __ subu(dst_hi, dst_hi, AT);
3645 #else
3646 __ subu(dst_lo, op1_lo, op2_lo);
3647 #endif
3648 break;
3650 case lir_mul:
3651 {
3653 #ifndef _LP64
3654 //by aoqi
3655 Label zero, quick, done;
3656 //zero?
3657 __ orr(AT, op2_lo, op1_lo);
3658 __ beq(AT, R0, zero);
3659 __ delayed();
3660 __ move(dst_hi, R0);
3662 //quick?
3663 __ orr(AT, op2_hi, op1_hi);
3664 __ beq(AT, R0, quick);
3665 __ delayed()->nop();
3667 __ multu(op2_lo, op1_hi);
3668 __ nop();
3669 __ nop();
3670 __ mflo(dst_hi);
3671 __ multu(op2_hi, op1_lo);
3672 __ nop();
3673 __ nop();
3674 __ mflo(AT);
3676 __ bind(quick);
3677 __ multu(op2_lo, op1_lo);
3678 __ addu(dst_hi, dst_hi, AT);
3679 __ nop();
3680 __ mflo(dst_lo);
3681 __ mfhi(AT);
3682 __ b(done);
3683 __ delayed()->addu(dst_hi, dst_hi, AT);
3685 __ bind(zero);
3686 __ move(dst_lo, R0);
3687 __ bind(done);
3688 #else
3689 Label zero, done;
3690 //zero?
3691 __ orr(AT, op2_lo, op1_lo);
3692 __ beq(AT, R0, zero);
3693 __ delayed();
3694 __ move(dst_hi, R0);
3696 #ifdef ASSERT
3697 //op1_hi, op2_hi should be 0
3698 {
3699 Label L;
3700 __ beq(op1_hi, R0, L);
3701 __ delayed()->nop();
3702 __ stop("wrong register, lir_mul");
3703 __ bind(L);
3704 }
3705 {
3706 Label L;
3707 __ beq(op2_hi, R0, L);
3708 __ delayed()->nop();
3709 __ stop("wrong register, lir_mul");
3710 __ bind(L);
3711 }
3712 #endif
3714 __ multu(op2_lo, op1_lo);
3715 __ nop();
3716 __ nop();
3717 __ mflo(dst_lo);
3718 __ b(done);
3719 __ delayed()->nop();
3721 __ bind(zero);
3722 __ move(dst_lo, R0);
3723 __ bind(done);
3724 #endif //_LP64
3725 }
3726 break;
3728 default:
3729 ShouldNotReachHere();
3730 }
3733 } else if (left->is_single_fpu()) {
3734 assert(right->is_single_fpu(),"right must be float");
3735 assert(dest->is_single_fpu(), "dest must be float");
3737 FloatRegister lreg = left->as_float_reg();
3738 FloatRegister rreg = right->as_float_reg();
3739 FloatRegister res = dest->as_float_reg();
3741 switch (code) {
3742 case lir_add:
3743 __ add_s(res, lreg, rreg);
3744 break;
3745 case lir_sub:
3746 __ sub_s(res, lreg, rreg);
3747 break;
3748 case lir_mul:
3749 case lir_mul_strictfp:
3750 // i dont think we need special handling of this. FIXME
3751 __ mul_s(res, lreg, rreg);
3752 break;
3753 case lir_div:
3754 case lir_div_strictfp:
3755 __ div_s(res, lreg, rreg);
3756 break;
3757 default : ShouldNotReachHere();
3758 }
3759 } else if (left->is_double_fpu()) {
3760 assert(right->is_double_fpu(),"right must be double");
3761 assert(dest->is_double_fpu(), "dest must be double");
3763 FloatRegister lreg = left->as_double_reg();
3764 FloatRegister rreg = right->as_double_reg();
3765 FloatRegister res = dest->as_double_reg();
3767 switch (code) {
3768 case lir_add:
3769 __ add_d(res, lreg, rreg);
3770 break;
3771 case lir_sub:
3772 __ sub_d(res, lreg, rreg);
3773 break;
3774 case lir_mul:
3775 case lir_mul_strictfp:
3776 // i dont think we need special handling of this. FIXME
3777 // by yjl 9/13/2005
3778 __ mul_d(res, lreg, rreg);
3779 break;
3780 case lir_div:
3781 case lir_div_strictfp:
3782 __ div_d(res, lreg, rreg);
3783 break;
3784 // case lir_rem:
3785 // __ rem_d(res, lreg, rreg);
3786 // break;
3787 default : ShouldNotReachHere();
3788 }
3789 }
3790 else if (left->is_single_stack() || left->is_address()) {
3791 assert(left == dest, "left and dest must be equal");
3793 Address laddr;
3794 if (left->is_single_stack()) {
3795 laddr = frame_map()->address_for_slot(left->single_stack_ix());
3796 } else if (left->is_address()) {
3797 laddr = as_Address(left->as_address_ptr());
3798 } else {
3799 ShouldNotReachHere();
3800 }
3802 if (right->is_single_cpu()) {
3803 Register rreg = right->as_register();
3804 switch (code) {
3805 case lir_add:
3806 #ifndef _LP64
3807 //by aoqi
3808 __ lw(AT, laddr);
3809 __ add(AT, AT, rreg);
3810 __ sw(AT, laddr);
3811 #else
3812 __ ld(AT, laddr);
3813 __ dadd(AT, AT, rreg);
3814 __ sd(AT, laddr);
3815 #endif
3816 break;
3817 case lir_sub:
3818 #ifndef _LP64
3819 //by aoqi
3820 __ lw(AT, laddr);
3821 __ sub(AT,AT,rreg);
3822 __ sw(AT, laddr);
3823 #else
3824 __ ld(AT, laddr);
3825 __ dsub(AT,AT,rreg);
3826 __ sd(AT, laddr);
3827 #endif
3828 break;
3829 default: ShouldNotReachHere();
3830 }
3831 } else if (right->is_constant()) {
3832 #ifndef _LP64
3833 jint c = right->as_constant_ptr()->as_jint();
3834 #else
3835 jlong c = right->as_constant_ptr()->as_jlong_bits();
3836 #endif
3837 switch (code) {
3838 case lir_add: {
3839 __ ld_ptr(AT, laddr);
3840 #ifndef _LP64
3841 __ addi(AT, AT, c);
3842 #else
3843 __ li(T8, c);
3844 __ add(AT, AT, T8);
3845 #endif
3846 __ st_ptr(AT, laddr);
3847 break;
3848 }
3849 case lir_sub: {
3850 __ ld_ptr(AT, laddr);
3851 #ifndef _LP64
3852 __ addi(AT, AT, -c);
3853 #else
3854 __ li(T8, -c);
3855 __ add(AT, AT, T8);
3856 #endif
3857 __ st_ptr(AT, laddr);
3858 break;
3859 }
3860 default: ShouldNotReachHere();
3861 }
3862 } else {
3863 ShouldNotReachHere();
3864 }
3865 } else {
3866 ShouldNotReachHere();
3867 }
3868 }
3870 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op *op) {
3871 //FIXME,lir_log, lir_log10,lir_abs,lir_sqrt,so many new lir instruction @jerome
3872 if (value->is_double_fpu()) {
3873 // assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
3874 switch(code) {
3875 case lir_log : //__ flog() ; break;
3876 case lir_log10 : //__ flog10() ;
3877 Unimplemented();
3878 break;
3879 case lir_abs : __ abs_d(dest->as_double_reg(), value->as_double_reg()) ; break;
3880 case lir_sqrt : __ sqrt_d(dest->as_double_reg(), value->as_double_reg()); break;
3881 case lir_sin :
3882 // Should consider not saving ebx if not necessary
3883 __ trigfunc('s', 0);
3884 break;
3885 case lir_cos :
3886 // Should consider not saving ebx if not necessary
3887 // assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
3888 __ trigfunc('c', 0);
3889 break;
3890 case lir_tan :
3891 // Should consider not saving ebx if not necessary
3892 __ trigfunc('t', 0);
3893 break;
3894 default : ShouldNotReachHere();
3895 }
3896 } else {
3897 Unimplemented();
3898 }
3899 }
3901 //FIXME, if right is on the stack!
3902 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
3903 if (left->is_single_cpu()) {
3904 Register dstreg = dst->as_register();
3905 Register reg = left->as_register();
3906 if (right->is_constant()) {
3907 int val = right->as_constant_ptr()->as_jint();
3908 __ move(AT, val);
3909 switch (code) {
3910 case lir_logic_and:
3911 __ andr (dstreg, reg, AT);
3912 break;
3913 case lir_logic_or:
3914 __ orr(dstreg, reg, AT);
3915 break;
3916 case lir_logic_xor:
3917 __ xorr(dstreg, reg, AT);
3918 break;
3919 default: ShouldNotReachHere();
3920 }
3921 } else if (right->is_stack()) {
3922 // added support for stack operands
3923 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
3924 switch (code) {
3925 case lir_logic_and:
3926 //FIXME. lw or ld_ptr?
3927 __ lw(AT, raddr);
3928 __ andr(reg, reg,AT);
3929 break;
3930 case lir_logic_or:
3931 __ lw(AT, raddr);
3932 __ orr(reg, reg, AT);
3933 break;
3934 case lir_logic_xor:
3935 __ lw(AT, raddr);
3936 __ xorr(reg, reg, AT);
3937 break;
3938 default: ShouldNotReachHere();
3939 }
3940 } else {
3941 Register rright = right->as_register();
3942 switch (code) {
3943 case lir_logic_and: __ andr (dstreg, reg, rright); break;
3944 case lir_logic_or : __ orr (dstreg, reg, rright); break;
3945 case lir_logic_xor: __ xorr (dstreg, reg, rright); break;
3946 default: ShouldNotReachHere();
3947 }
3948 }
3949 } else {
3950 Register l_lo = left->as_register_lo();
3951 Register dst_lo = dst->as_register_lo();
3952 #ifndef _LP64
3953 Register l_hi = left->as_register_hi();
3954 Register dst_hi = dst->as_register_hi();
3955 #endif
3957 if (right->is_constant()) {
3958 #ifndef _LP64
3960 int r_lo = right->as_constant_ptr()->as_jint_lo();
3961 int r_hi = right->as_constant_ptr()->as_jint_hi();
3963 switch (code) {
3964 case lir_logic_and:
3965 __ move(AT, r_lo);
3966 __ andr(dst_lo, l_lo, AT);
3967 __ move(AT, r_hi);
3968 __ andr(dst_hi, l_hi, AT);
3969 break;
3971 case lir_logic_or:
3972 __ move(AT, r_lo);
3973 __ orr(dst_lo, l_lo, AT);
3974 __ move(AT, r_hi);
3975 __ orr(dst_hi, l_hi, AT);
3976 break;
3978 case lir_logic_xor:
3979 __ move(AT, r_lo);
3980 __ xorr(dst_lo, l_lo, AT);
3981 __ move(AT, r_hi);
3982 __ xorr(dst_hi, l_hi, AT);
3983 break;
3985 default: ShouldNotReachHere();
3986 }
3987 #else
3988 __ li(AT, right->as_constant_ptr()->as_jlong());
3990 switch (code) {
3991 case lir_logic_and:
3992 __ andr(dst_lo, l_lo, AT);
3993 break;
3995 case lir_logic_or:
3996 __ orr(dst_lo, l_lo, AT);
3997 break;
3999 case lir_logic_xor:
4000 __ xorr(dst_lo, l_lo, AT);
4001 break;
4003 default: ShouldNotReachHere();
4004 }
4005 #endif
4007 } else {
4008 Register r_lo = right->as_register_lo();
4009 Register r_hi = right->as_register_hi();
4011 switch (code) {
4012 case lir_logic_and:
4013 __ andr(dst_lo, l_lo, r_lo);
4014 NOT_LP64(__ andr(dst_hi, l_hi, r_hi);)
4015 break;
4016 case lir_logic_or:
4017 __ orr(dst_lo, l_lo, r_lo);
4018 NOT_LP64(__ orr(dst_hi, l_hi, r_hi);)
4019 break;
4020 case lir_logic_xor:
4021 __ xorr(dst_lo, l_lo, r_lo);
4022 NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);)
4023 break;
4024 default: ShouldNotReachHere();
4025 }
4026 }
4027 }
4028 }
4030 //done here. aoqi. 12-12 22:25
4031 // we assume that eax and edx can be overwritten
4032 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
4034 assert(left->is_single_cpu(), "left must be register");
4035 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
4036 assert(result->is_single_cpu(), "result must be register");
4038 Register lreg = left->as_register();
4039 Register dreg = result->as_register();
4041 if (right->is_constant()) {
4042 int divisor = right->as_constant_ptr()->as_jint();
4043 assert(divisor!=0, "must be nonzero");
4044 #ifndef _LP64
4045 __ move(AT, divisor);
4046 __ div(lreg, AT);
4047 #else
4048 __ li(AT, divisor);
4049 __ ddiv(lreg, AT);
4050 #endif
4051 int idivl_offset = code_offset();
4053 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
4054 We must trap an exception manually. */
4055 __ teq(R0, AT, 0x7);
4056 __ nop();
4057 __ nop();
4058 add_debug_info_for_div0(idivl_offset, info);
4059 } else {
4060 Register rreg = right->as_register();
4061 #ifndef _LP64
4062 __ div(lreg, rreg);
4063 #else
4064 __ ddiv(lreg, rreg);
4065 #endif
4067 int idivl_offset = code_offset();
4068 __ teq(R0, rreg, 0x7);
4069 __ nop();
4070 __ nop();
4071 add_debug_info_for_div0(idivl_offset, info);
4072 }
4074 // get the result
4075 if (code == lir_irem) {
4076 __ mfhi(dreg);
4077 #ifdef _LP64
4078 if (result->type() == T_INT)
4079 __ sll(dreg, dreg, 0);
4080 #endif
4081 } else if (code == lir_idiv) {
4082 __ mflo(dreg);
4083 } else {
4084 ShouldNotReachHere();
4085 }
4086 }
4088 void LIR_Assembler::arithmetic_frem(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
4089 if (left->is_single_fpu()) {
4090 assert(right->is_single_fpu(),"right must be float");
4091 assert(result->is_single_fpu(), "dest must be float");
4092 assert(temp->is_single_fpu(), "dest must be float");
4094 FloatRegister lreg = left->as_float_reg();
4095 FloatRegister rreg = right->as_float_reg();
4096 FloatRegister res = result->as_float_reg();
4097 FloatRegister tmp = temp->as_float_reg();
4099 switch (code) {
4100 case lir_frem:
4101 __ rem_s(res, lreg, rreg, tmp);
4102 break;
4103 default : ShouldNotReachHere();
4104 }
4105 } else if (left->is_double_fpu()) {
4106 assert(right->is_double_fpu(),"right must be double");
4107 assert(result->is_double_fpu(), "dest must be double");
4108 assert(temp->is_double_fpu(), "dest must be double");
4110 FloatRegister lreg = left->as_double_reg();
4111 FloatRegister rreg = right->as_double_reg();
4112 FloatRegister res = result->as_double_reg();
4113 FloatRegister tmp = temp->as_double_reg();
4115 switch (code) {
4116 case lir_frem:
4117 __ rem_d(res, lreg, rreg, tmp);
4118 break;
4119 default : ShouldNotReachHere();
4120 }
4121 }
4122 }
4124 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst,LIR_Op2 * op) {
4125 Register dstreg = dst->as_register();
4126 if (code == lir_cmp_fd2i) {
4127 if (left->is_single_fpu()) {
4128 FloatRegister leftreg = left->as_float_reg();
4129 FloatRegister rightreg = right->as_float_reg();
4131 Label done;
4132 // equal?
4133 __ c_eq_s(leftreg, rightreg);
4134 __ bc1t(done);
4135 __ delayed();
4136 __ move(dstreg, R0);
4137 // less?
4138 __ c_olt_s(leftreg, rightreg);
4139 __ bc1t(done);
4140 __ delayed();
4141 __ move(dstreg, -1);
4142 // great
4143 __ move(dstreg, 1);
4145 __ bind(done);
4146 } else {
4147 assert(left->is_double_fpu(), "Must double");
4148 FloatRegister leftreg = left->as_double_reg();
4149 FloatRegister rightreg = right->as_double_reg();
4151 Label done;
4152 // equal?
4153 __ c_eq_d(leftreg, rightreg);
4154 __ bc1t(done);
4155 __ delayed();
4156 __ move(dstreg, R0);
4157 // less?
4158 __ c_olt_d(leftreg, rightreg);
4159 __ bc1t(done);
4160 __ delayed();
4161 __ move(dstreg, -1);
4162 // great
4163 __ move(dstreg, 1);
4165 __ bind(done);
4166 }
4167 } else if (code == lir_ucmp_fd2i) {
4168 if (left->is_single_fpu()) {
4169 FloatRegister leftreg = left->as_float_reg();
4170 FloatRegister rightreg = right->as_float_reg();
4172 Label done;
4173 // equal?
4174 __ c_eq_s(leftreg, rightreg);
4175 __ bc1t(done);
4176 __ delayed();
4177 __ move(dstreg, R0);
4178 // less?
4179 __ c_ult_s(leftreg, rightreg);
4180 __ bc1t(done);
4181 __ delayed();
4182 __ move(dstreg, -1);
4183 // great
4184 __ move(dstreg, 1);
4186 __ bind(done);
4187 } else {
4188 assert(left->is_double_fpu(), "Must double");
4189 FloatRegister leftreg = left->as_double_reg();
4190 FloatRegister rightreg = right->as_double_reg();
4192 Label done;
4193 // equal?
4194 __ c_eq_d(leftreg, rightreg);
4195 __ bc1t(done);
4196 __ delayed();
4197 __ move(dstreg, R0);
4198 // less?
4199 __ c_ult_d(leftreg, rightreg);
4200 __ bc1t(done);
4201 __ delayed();
4202 __ move(dstreg, -1);
4203 // great
4204 __ move(dstreg, 1);
4206 __ bind(done);
4207 }
4208 } else {
4209 assert(code == lir_cmp_l2i, "check");
4210 Register l_lo, l_hi, r_lo, r_hi, d_lo, d_hi;
4211 l_lo = left->as_register_lo();
4212 l_hi = left->as_register_hi();
4213 r_lo = right->as_register_lo();
4214 r_hi = right->as_register_hi();
4216 Label done;
4217 #ifndef _LP64
4218 // less?
4219 __ slt(AT, l_hi, r_hi);
4220 __ bne(AT, R0, done);
4221 __ delayed();
4222 __ move(dstreg, -1);
4223 // great?
4224 __ slt(AT, r_hi, l_hi);
4225 __ bne(AT, R0, done);
4226 __ delayed();
4227 __ move(dstreg, 1);
4228 #endif
4230 // now compare low 32 bits
4231 // below?
4232 #ifndef _LP64
4233 __ sltu(AT, l_lo, r_lo);
4234 #else
4235 __ slt(AT, l_lo, r_lo);
4236 #endif
4237 __ bne(AT, R0, done);
4238 __ delayed();
4239 __ move(dstreg, -1);
4240 // above?
4241 #ifndef _LP64
4242 __ sltu(AT, r_lo, l_lo);
4243 #else
4244 __ slt(AT, r_lo, l_lo);
4245 #endif
4246 __ bne(AT, R0, done);
4247 __ delayed();
4248 __ move(dstreg, 1);
4249 // equal
4250 __ move(dstreg, R0);
4252 __ bind(done);
4253 }
4254 }
4257 void LIR_Assembler::align_call(LIR_Code code) {
4258 }
4261 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
4262 //assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned");
4263 __ call(op->addr(), rtype);
4264 __ delayed()->nop();
4265 add_call_info(code_offset(), op->info());
4266 }
4269 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
4270 __ ic_call(op->addr());
4271 add_call_info(code_offset(), op->info());
4272 }
4275 /* Currently, vtable-dispatch is only enabled for sparc platforms */
4276 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
4277 ShouldNotReachHere();
4278 }
4282 void LIR_Assembler::emit_static_call_stub() {
4283 address call_pc = __ pc();
4284 address stub = __ start_a_stub(call_stub_size);
4285 if (stub == NULL) {
4286 bailout("static call stub overflow");
4287 return;
4288 }
4289 int start = __ offset();
4290 __ relocate(static_stub_Relocation::spec(call_pc));
4292 Metadata *o = NULL;
4293 int index = __ oop_recorder()->allocate_metadata_index(o);
4294 RelocationHolder rspec = metadata_Relocation::spec(index);
4295 __ relocate(rspec);
4296 //see set_to_interpreted
4297 __ patchable_set48(Rmethod, (long)o);
4299 __ patchable_set48(AT, (long)-1);
4300 __ jr(AT);
4301 __ delayed()->nop();
4302 assert(__ offset() - start <= call_stub_size, "stub too big");
4303 __ end_a_stub();
4304 }
4307 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
4308 assert(exceptionOop->as_register()== V0, "must match");
4309 assert(exceptionPC->as_register()== V1, "must match");
4311 // exception object is not added to oop map by LinearScan
4312 // (LinearScan assumes that no oops are in fixed registers)
4314 info->add_register_oop(exceptionOop);
4315 long pc_for_athrow = (long)__ pc();
4316 int pc_for_athrow_offset = __ offset();
4317 Register epc = exceptionPC->as_register();
4318 __ relocate(relocInfo::internal_pc_type);
4319 __ li48(epc, pc_for_athrow);
4320 add_call_info(pc_for_athrow_offset, info); // for exception handler
4321 __ verify_not_null_oop(V0);
4322 // search an exception handler (eax: exception oop, edx: throwing pc)
4323 if (compilation()->has_fpu_code()) {
4324 __ call(Runtime1::entry_for(Runtime1::handle_exception_id),
4325 relocInfo::runtime_call_type);
4326 } else {
4327 __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
4328 relocInfo::runtime_call_type);
4329 }
4330 __ delayed()->nop();
4331 }
4333 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
4334 assert(exceptionOop->as_register()== FSR, "must match");
4335 __ b(_unwind_handler_entry);
4336 __ delayed()->nop();
4337 }
4339 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
4340 // optimized version for linear scan:
4341 // * tmp must be unused
4342 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
4344 #ifdef _LP64
4345 Register count_reg = count->as_register();
4346 Register value_reg;
4347 Register dest_reg;
4348 if (left->is_single_cpu()) {
4349 value_reg = left->as_register();
4350 dest_reg = dest->as_register();
4352 } else if (left->is_double_cpu()) {
4353 value_reg = left->as_register_lo();
4354 dest_reg = dest->as_register_lo();
4355 } else {
4356 ShouldNotReachHere();
4357 }
4358 assert_different_registers(count_reg, value_reg);
4359 switch (code) {
4360 case lir_shl:
4361 if (dest->type() == T_INT)
4362 __ sllv(dest_reg, value_reg, count_reg);
4363 else
4364 __ dsllv(dest_reg, value_reg, count_reg);
4365 break;
4366 case lir_shr: __ dsrav(dest_reg, value_reg, count_reg); break;
4367 case lir_ushr:
4368 #if 1
4369 /*
4370 Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4371 However, dsrl will shift in company with the highest 32 bits.
4372 Thus, if the source register contains a negative value,
4373 the resulti is incorrect.
4374 * DoubleCvt.java
4375 *
4376 * static int inp (int shift)
4377 * {
4378 * return -1 >>> (32 - shift);
4379 * }
4380 *
4381 * 26 ushift_right [t0|I] [a4|I] [a6|I]
4382 * 0x00000055616d2a98: dsrl a6, t0, a4 <-- error
4383 */
4385 // java.math.MutableBigInteger::primitiveRightShift
4386 //
4387 // 108 ushift_right [a6|I] [a4|I] [a4|I]
4388 // 0x00000055646d2f70: dsll32 a4, a6, 0 \
4389 // 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error!
4390 // 0x00000055646d2f78: dsrl a4, a4, a4 /
4391 if (left->type() == T_INT && dest->type() == T_INT) {
4392 __ dsll32(AT, value_reg, 0); // Omit the high 32 bits
4393 __ dsrl32(AT, AT, 0);
4394 __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift
4395 break;
4396 }
4397 #endif
4398 __ dsrlv(dest_reg, value_reg, count_reg); break;
4399 default: ShouldNotReachHere();
4400 }
4401 #else
4402 if (left->is_single_cpu()) {
4403 Register value_reg = left->as_register();
4404 Register count_reg = count->as_register();
4405 Register dest_reg = dest->as_register();
4406 assert_different_registers(count_reg, value_reg);
4408 switch (code) {
4409 case lir_shl: __ sllv(dest_reg, value_reg, count_reg); break;
4410 case lir_shr: __ srav(dest_reg, value_reg, count_reg); break;
4411 case lir_ushr: __ srlv(dest_reg, value_reg, count_reg); break;
4412 default: ShouldNotReachHere();
4413 }
4415 } else if (left->is_double_cpu()) {
4416 Register creg = count->as_register();
4417 Register lo = left->as_register_lo();
4418 Register hi = left->as_register_hi();
4419 Register dlo = dest->as_register_lo();
4420 Register dhi = dest->as_register_hi();
4422 __ andi(creg, creg, 0x3f);
4423 switch (code) {
4424 case lir_shl:
4425 {
4426 Label normal, done, notzero;
4428 //count=0
4429 __ bne(creg, R0, notzero);
4430 __ delayed()->nop();
4431 __ move(dlo, lo);
4432 __ b(done);
4433 __ delayed();
4434 __ move(dhi, hi);
4436 //count>=32
4437 __ bind(notzero);
4438 __ sltiu(AT, creg, BitsPerWord);
4439 __ bne(AT, R0, normal);
4440 __ delayed();
4441 __ addiu(AT, creg, (-1) * BitsPerWord);
4442 __ sllv(dhi, lo, AT);
4443 __ b(done);
4444 __ delayed();
4445 __ move(dlo, R0);
4447 //count<32
4448 __ bind(normal);
4449 __ sllv(dhi, hi, creg);
4450 __ move(AT, BitsPerWord);
4451 __ sub(AT, AT, creg);
4452 __ srlv(AT, lo, AT);
4453 __ orr(dhi, dhi, AT);
4454 __ sllv(dlo, lo, creg);
4455 __ bind(done);
4456 }
4457 break;
4458 case lir_shr:
4459 {
4460 Label normal, done, notzero;
4462 //count=0
4463 __ bne(creg, R0, notzero);
4464 __ delayed()->nop();
4465 __ move(dhi, hi);
4466 __ b(done);
4467 __ delayed();
4468 __ move(dlo, lo);
4470 //count>=32
4471 __ bind(notzero);
4472 __ sltiu(AT, creg, BitsPerWord);
4473 __ bne(AT, R0, normal);
4474 __ delayed();
4475 __ addiu(AT, creg, (-1) * BitsPerWord);
4476 __ srav(dlo, hi, AT);
4477 __ b(done);
4478 __ delayed();
4479 __ sra(dhi, hi, BitsPerWord - 1);
4481 //count<32
4482 __ bind(normal);
4483 __ srlv(dlo, lo, creg);
4484 __ move(AT, BitsPerWord);
4485 __ sub(AT, AT, creg);
4486 __ sllv(AT, hi, AT);
4487 __ orr(dlo, dlo, AT);
4488 __ srav(dhi, hi, creg);
4489 __ bind(done);
4490 }
4491 break;
4492 case lir_ushr:
4493 {
4494 Label normal, done, notzero;
4496 //count=zero
4497 __ bne(creg, R0, notzero);
4498 __ delayed()->nop();
4499 __ move(dhi, hi);
4500 __ b(done);
4501 __ delayed();
4502 __ move(dlo, lo);
4504 //count>=32
4505 __ bind(notzero);
4506 __ sltiu(AT, creg, BitsPerWord);
4507 __ bne(AT, R0, normal);
4508 __ delayed();
4509 __ addi(AT, creg, (-1) * BitsPerWord);
4510 __ srlv(dlo, hi, AT);
4511 __ b(done);
4512 __ delayed();
4513 __ move(dhi, R0);
4515 //count<32
4516 __ bind(normal);
4517 __ srlv(dlo, lo, creg);
4518 __ move(AT, BitsPerWord);
4519 __ sub(AT, AT, creg);
4520 __ sllv(AT, hi, AT);
4521 __ orr(dlo, dlo, AT);
4522 __ srlv(dhi, hi, creg);
4523 __ bind(done);
4524 }
4525 break;
4526 default: ShouldNotReachHere();
4527 }
4528 } else {
4529 ShouldNotReachHere();
4530 }
4531 #endif
4533 }
4535 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
4536 if (dest->is_single_cpu()) {
4537 /* In WebClient,
4538 * virtual jboolean java.util.concurrent.atomic.AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl.compareAndSet
4539 *
4540 * 130 ushift_right [a4a4|J] [int:9|I] [a4|L]
4541 */
4542 Register value_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
4543 Register dest_reg = dest->as_register();
4544 count = count & 0x1F; // Java spec
4546 switch (code) {
4547 #ifdef _LP64
4548 case lir_shl:
4549 if (dest->type() == T_INT)
4550 __ sll(dest_reg, value_reg, count);
4551 else
4552 __ dsll(dest_reg, value_reg, count);
4553 break;
4554 case lir_shr: __ dsra(dest_reg, value_reg, count); break;
4555 case lir_ushr:
4556 #if 1
4557 if (left->type() == T_INT && dest->type() == T_INT) {
4558 /* Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4559 However, dsrl will shift in company with the highest 32 bits.
4560 Thus, if the source register contains a negative value,
4561 the resulti is incorrect.
4563 Example: in java.util.HashMap.get()
4565 68 ushift_right [t0|I] [int:20|I] [a4|I]
4566 dsrl a4, t0, 20
4568 t0: 0xFFFFFFFF87654321 (64bits for 0x87654321)
4570 ushift_right t0, 16 -> a4
4572 a4: 00000000 00008765 (right)
4573 a4: FFFFFFFF FFFF8765 (wrong)
4574 */
4575 __ dsll32(dest_reg, value_reg, 0); // Omit the high 32 bits
4576 __ dsrl32(dest_reg, dest_reg, count); // Unsigned right shift
4577 break;
4578 }
4579 #endif
4581 __ dsrl(dest_reg, value_reg, count);
4582 break;
4583 #else
4584 case lir_shl: __ sll(dest_reg, value_reg, count); break;
4585 case lir_shr: __ sra(dest_reg, value_reg, count); break;
4586 case lir_ushr: __ srl(dest_reg, value_reg, count); break;
4587 #endif
4588 default: ShouldNotReachHere();
4589 }
4591 } else if (dest->is_double_cpu()) {
4592 Register valuelo = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
4593 Register destlo = dest->as_register_lo();
4594 count = count & 0x3f;
4595 #ifdef _LP64
4596 switch (code) {
4597 case lir_shl: __ dsll(destlo, valuelo, count); break;
4598 case lir_shr: __ dsra(destlo, valuelo, count); break;
4599 case lir_ushr: __ dsrl(destlo, valuelo, count); break;
4600 default: ShouldNotReachHere();
4601 }
4602 #else
4603 Register desthi = dest->as_register_hi();
4604 Register valuehi = left->as_register_hi();
4605 assert_different_registers(destlo, valuehi, desthi);
4606 switch (code) {
4607 case lir_shl:
4608 if (count==0) {
4609 __ move(destlo, valuelo);
4610 __ move(desthi, valuehi);
4611 } else if (count>=32) {
4612 __ sll(desthi, valuelo, count-32);
4613 __ move(destlo, R0);
4614 } else {
4615 __ srl(AT, valuelo, 32 - count);
4616 __ sll(destlo, valuelo, count);
4617 __ sll(desthi, valuehi, count);
4618 __ orr(desthi, desthi, AT);
4619 }
4620 break;
4622 case lir_shr:
4623 if (count==0) {
4624 __ move(destlo, valuelo);
4625 __ move(desthi, valuehi);
4626 } else if (count>=32) {
4627 __ sra(destlo, valuehi, count-32);
4628 __ sra(desthi, valuehi, 31);
4629 } else {
4630 __ sll(AT, valuehi, 32 - count);
4631 __ sra(desthi, valuehi, count);
4632 __ srl(destlo, valuelo, count);
4633 __ orr(destlo, destlo, AT);
4634 }
4635 break;
4637 case lir_ushr:
4638 if (count==0) {
4639 __ move(destlo, valuelo);
4640 __ move(desthi, valuehi);
4641 } else if (count>=32) {
4642 __ sra(destlo, valuehi, count-32);
4643 __ move(desthi, R0);
4644 } else {
4645 __ sll(AT, valuehi, 32 - count);
4646 __ srl(desthi, valuehi, count);
4647 __ srl(destlo, valuelo, count);
4648 __ orr(destlo, destlo, AT);
4649 }
4650 break;
4652 default: ShouldNotReachHere();
4653 }
4654 #endif
4655 } else {
4656 ShouldNotReachHere();
4657 }
4658 }
4660 void LIR_Assembler::store_parameter(Register r, int offset_from_esp_in_words) {
4661 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4662 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4663 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
4664 __ st_ptr(r, SP, offset_from_sp_in_bytes);
4665 }
4668 void LIR_Assembler::store_parameter(jint c, int offset_from_esp_in_words) {
4669 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4670 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4671 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
4672 __ move(AT, c);
4673 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
4674 }
4676 void LIR_Assembler::store_parameter(jobject o, int offset_from_esp_in_words) {
4677 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4678 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4679 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
4680 int oop_index = __ oop_recorder()->find_index(o);
4681 RelocationHolder rspec = oop_Relocation::spec(oop_index);
4682 __ relocate(rspec);
4683 #ifndef _LP64
4684 //by_css
4685 __ lui(AT, Assembler::split_high((int)o));
4686 __ addiu(AT, AT, Assembler::split_low((int)o));
4687 #else
4688 __ li48(AT, (long)o);
4689 #endif
4691 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
4693 }
4696 // This code replaces a call to arraycopy; no exception may
4697 // be thrown in this code, they must be thrown in the System.arraycopy
4698 // activation frame; we could save some checks if this would not be the case
4699 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
4702 ciArrayKlass* default_type = op->expected_type();
4703 Register src = op->src()->as_register();
4704 Register dst = op->dst()->as_register();
4705 Register src_pos = op->src_pos()->as_register();
4706 Register dst_pos = op->dst_pos()->as_register();
4707 Register length = op->length()->as_register();
4708 Register tmp = T8;
4709 #ifndef OPT_THREAD
4710 Register java_thread = T8;
4711 #else
4712 Register java_thread = TREG;
4713 #endif
4714 CodeStub* stub = op->stub();
4716 int flags = op->flags();
4717 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
4718 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
4720 // if we don't know anything or it's an object array, just go through the generic arraycopy
4721 if (default_type == NULL) {
4722 Label done;
4723 // save outgoing arguments on stack in case call to System.arraycopy is needed
4724 // HACK ALERT. This code used to push the parameters in a hardwired fashion
4725 // for interpreter calling conventions. Now we have to do it in new style conventions.
4726 // For the moment until C1 gets the new register allocator I just force all the
4727 // args to the right place (except the register args) and then on the back side
4728 // reload the register args properly if we go slow path. Yuck
4730 // this is saved in the caller's reserved argument area
4731 //FIXME, maybe It will change something in the stack;
4732 // These are proper for the calling convention
4733 //store_parameter(length, 2);
4734 //store_parameter(dst_pos, 1);
4735 //store_parameter(dst, 0);
4737 // these are just temporary placements until we need to reload
4738 //store_parameter(src_pos, 3);
4739 //store_parameter(src, 4);
4740 assert(src == T0 && src_pos == A0, "mismatch in calling convention");
4741 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
4743 __ push(src);
4744 __ push(dst);
4745 __ push(src_pos);
4746 __ push(dst_pos);
4747 __ push(length);
4750 // save SP and align
4751 #ifndef OPT_THREAD
4752 __ get_thread(java_thread);
4753 #endif
4754 __ st_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
4755 #ifndef _LP64
4756 __ addi(SP, SP, (-5) * wordSize);
4757 __ move(AT, -(StackAlignmentInBytes));
4758 __ andr(SP, SP, AT);
4759 // push argument
4760 __ sw(length, SP, 4 * wordSize);
4761 #else
4762 __ move(A4, length);
4763 #endif
4764 __ move(A3, dst_pos);
4765 __ move(A2, dst);
4766 __ move(A1, src_pos);
4767 __ move(A0, src);
4768 // make call
4769 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
4770 __ call(entry, relocInfo::runtime_call_type);
4771 __ delayed()->nop();
4772 // restore SP
4773 #ifndef OPT_THREAD
4774 __ get_thread(java_thread);
4775 #endif
4776 __ ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
4777 __ super_pop(length);
4778 __ super_pop(dst_pos);
4779 __ super_pop(src_pos);
4780 __ super_pop(dst);
4781 __ super_pop(src);
4783 __ beq_far(V0, R0, *stub->continuation());
4784 __ delayed()->nop();
4787 __ b_far(*stub->entry());
4788 __ delayed()->nop();
4789 __ bind(*stub->continuation());
4790 return;
4791 }
4792 assert(default_type != NULL
4793 && default_type->is_array_klass()
4794 && default_type->is_loaded(),
4795 "must be true at this point");
4797 int elem_size = type2aelembytes(basic_type);
4798 int shift_amount;
4799 switch (elem_size) {
4800 case 1 :shift_amount = 0; break;
4801 case 2 :shift_amount = 1; break;
4802 case 4 :shift_amount = 2; break;
4803 case 8 :shift_amount = 3; break;
4804 default:ShouldNotReachHere();
4805 }
4807 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
4808 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
4809 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
4810 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
4812 // test for NULL
4813 if (flags & LIR_OpArrayCopy::src_null_check) {
4814 __ beq_far(src, R0, *stub->entry());
4815 __ delayed()->nop();
4816 }
4817 if (flags & LIR_OpArrayCopy::dst_null_check) {
4818 __ beq_far(dst, R0, *stub->entry());
4819 __ delayed()->nop();
4820 }
4822 // check if negative
4823 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
4824 __ bltz(src_pos, *stub->entry());
4825 __ delayed()->nop();
4826 }
4827 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
4828 __ bltz(dst_pos, *stub->entry());
4829 __ delayed()->nop();
4830 }
4831 if (flags & LIR_OpArrayCopy::length_positive_check) {
4832 __ bltz(length, *stub->entry());
4833 __ delayed()->nop();
4834 }
4836 if (flags & LIR_OpArrayCopy::src_range_check) {
4837 __ add(AT, src_pos, length);
4838 __ lw(tmp, src_length_addr);
4839 __ sltu(AT, tmp, AT);
4840 __ bne_far(AT, R0, *stub->entry());
4841 __ delayed()->nop();
4842 }
4843 if (flags & LIR_OpArrayCopy::dst_range_check) {
4844 __ add(AT, dst_pos, length);
4845 __ lw(tmp, dst_length_addr);
4846 __ sltu(AT, tmp, AT);
4847 __ bne_far(AT, R0, *stub->entry());
4848 __ delayed()->nop();
4849 }
4851 if (flags & LIR_OpArrayCopy::type_check) {
4852 if (UseCompressedClassPointers) {
4853 __ lw(AT, src_klass_addr);
4854 __ lw(tmp, dst_klass_addr);
4855 } else {
4856 __ ld(AT, src_klass_addr); __ ld(tmp, dst_klass_addr);
4857 }
4858 __ bne_far(AT, tmp, *stub->entry());
4859 __ delayed()->nop();
4860 }
4862 #ifdef ASSERT
4863 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
4864 // Sanity check the known type with the incoming class. For the
4865 // primitive case the types must match exactly. For the object array
4866 // case, if no type check is needed then the dst type must match the
4867 // expected type and the src type is so subtype which we can't check. If
4868 // a type check i needed then at this point the classes are known to be
4869 // the same but again which don't know which type so we can't check them.
4870 Label known_ok, halt;
4871 __ mov_metadata(tmp, default_type->constant_encoding());
4872 #ifdef _LP64
4873 if (UseCompressedClassPointers) {
4874 __ encode_klass_not_null(tmp);
4875 }
4876 #endif
4877 if (basic_type != T_OBJECT) {
4878 if (UseCompressedClassPointers) {
4879 __ lw(AT, dst_klass_addr);
4880 } else {
4881 __ ld(AT, dst_klass_addr);
4882 }
4883 __ bne(AT, tmp, halt);
4884 __ delayed()->nop();
4885 if (UseCompressedClassPointers) {
4886 __ lw(AT, src_klass_addr);
4887 } else {
4888 __ ld(AT, src_klass_addr);
4889 }
4890 __ beq(AT, tmp, known_ok);
4891 __ delayed()->nop();
4892 } else {
4893 if (UseCompressedClassPointers) {
4894 __ lw(AT, dst_klass_addr);
4895 } else {
4896 __ ld(AT, dst_klass_addr);
4897 }
4898 __ beq(AT, tmp, known_ok);
4899 __ delayed()->nop();
4900 __ beq(src, dst, known_ok);
4901 __ delayed()->nop();
4902 }
4903 __ bind(halt);
4904 __ stop("incorrect type information in arraycopy");
4905 __ bind(known_ok);
4906 }
4907 #endif
4908 __ push(src);
4909 __ push(dst);
4910 __ push(src_pos);
4911 __ push(dst_pos);
4912 __ push(length);
4915 assert(A0 != A1 &&
4916 A0 != length &&
4917 A1 != length, "register checks");
4918 __ move(AT, dst_pos);
4919 if (shift_amount > 0 && basic_type != T_OBJECT) {
4920 #ifndef _LP64
4921 __ sll(A2, length, shift_amount);
4922 #else
4923 __ dsll(A2, length, shift_amount);
4924 #endif
4925 } else {
4926 if (length!=A2)
4927 __ move(A2, length);
4928 }
4929 __ move(A3, src_pos );
4930 assert(A0 != dst_pos &&
4931 A0 != dst &&
4932 dst_pos != dst, "register checks");
4934 assert_different_registers(A0, dst_pos, dst);
4935 #ifndef _LP64
4936 __ sll(AT, AT, shift_amount);
4937 #else
4938 __ dsll(AT, AT, shift_amount);
4939 #endif
4940 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
4941 __ add(A1, dst, AT);
4943 #ifndef _LP64
4944 __ sll(AT, A3, shift_amount);
4945 #else
4946 __ dsll(AT, A3, shift_amount);
4947 #endif
4948 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
4949 __ add(A0, src, AT);
4953 if (basic_type == T_OBJECT) {
4954 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 3);
4955 } else {
4956 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 3);
4957 }
4958 __ super_pop(length);
4959 __ super_pop(dst_pos);
4960 __ super_pop(src_pos);
4961 __ super_pop(dst);
4962 __ super_pop(src);
4964 __ bind(*stub->continuation());
4965 }
4967 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
4968 tty->print_cr("LIR_Assembler::emit_updatecrc32 unimplemented yet !");
4969 Unimplemented();
4970 }
4972 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
4973 Register obj = op->obj_opr()->as_register(); // may not be an oop
4974 Register hdr = op->hdr_opr()->as_register();
4975 Register lock = op->lock_opr()->is_single_cpu() ? op->lock_opr()->as_register(): op->lock_opr()->as_register_lo();
4976 if (!UseFastLocking) {
4977 __ b_far(*op->stub()->entry());
4978 __ delayed()->nop();
4979 } else if (op->code() == lir_lock) {
4980 Register scratch = noreg;
4981 if (UseBiasedLocking) {
4982 scratch = op->scratch_opr()->as_register();
4983 }
4984 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
4985 "lock_reg must point to the displaced header");
4986 // add debug info for NullPointerException only if one is possible
4987 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
4988 if (op->info() != NULL) {
4989 //add_debug_info_for_null_check_here(op->info());
4990 add_debug_info_for_null_check(null_check_offset,op->info());
4991 }
4992 // done
4993 } else if (op->code() == lir_unlock) {
4994 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
4995 "lock_reg must point to the displaced header");
4996 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
4997 } else {
4998 Unimplemented();
4999 }
5000 __ bind(*op->stub()->continuation());
5001 }
5005 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
5006 ciMethod* method = op->profiled_method();
5007 int bci = op->profiled_bci();
5008 ciMethod* callee = op->profiled_callee();
5009 // Update counter for all call types
5010 ciMethodData* md = method->method_data();
5011 if (md == NULL) {
5012 bailout("out of memory building methodDataOop");
5013 return;
5014 }
5015 ciProfileData* data = md->bci_to_data(bci);
5016 assert(data->is_CounterData(), "need CounterData for calls");
5017 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
5018 Register mdo = op->mdo()->as_register();
5020 __ mov_metadata(mdo, md->constant_encoding());
5022 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
5023 __ ld_ptr(AT, counter_addr);
5024 __ addi(AT, AT, DataLayout::counter_increment);
5025 __ st_ptr(AT, counter_addr);
5027 Bytecodes::Code bc = method->java_code_at_bci(bci);
5028 const bool callee_is_static = callee->is_loaded() && callee->is_static();
5029 // Perform additional virtual call profiling for invokevirtual and
5030 // invokeinterface bytecodes
5031 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
5032 !callee_is_static && //required for optimized MH invokes
5033 C1ProfileVirtualCalls) {
5034 assert(op->recv()->is_single_cpu(), "recv must be allocated");
5035 Register recv = op->recv()->as_register();
5036 assert_different_registers(mdo, recv);
5037 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
5038 ciKlass* known_klass = op->known_holder();
5039 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
5040 // We know the type that will be seen at this call site; we can
5041 // statically update the methodDataOop rather than needing to do
5042 // dynamic tests on the receiver type
5044 // NOTE: we should probably put a lock around this search to
5045 // avoid collisions by concurrent compilations
5046 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
5047 uint i;
5048 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5049 ciKlass* receiver = vc_data->receiver(i);
5050 if (known_klass->equals(receiver)) {
5051 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5052 __ ld_ptr(AT, data_addr);
5053 __ addi(AT, AT, DataLayout::counter_increment);
5054 __ st_ptr(AT, data_addr);
5055 return;
5056 }
5057 }
5059 // Receiver type not found in profile data; select an empty slot
5061 // Note that this is less efficient than it should be because it
5062 // always does a write to the receiver part of the
5063 // VirtualCallData rather than just the first time
5064 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5065 ciKlass* receiver = vc_data->receiver(i);
5066 if (receiver == NULL) {
5067 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5068 __ mov_metadata(AT, known_klass->constant_encoding());
5069 __ st_ptr(AT,recv_addr);
5070 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5071 __ ld_ptr(AT, data_addr);
5072 __ addi(AT, AT, DataLayout::counter_increment);
5073 __ st_ptr(AT, data_addr);
5074 return;
5075 }
5076 }
5077 } else {
5078 //__ ld_ptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
5079 __ load_klass(recv, recv);
5080 Label update_done;
5081 uint i;
5082 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5083 Label next_test;
5084 // See if the receiver is receiver[n].
5085 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
5086 __ bne(recv,AT,next_test);
5087 __ delayed()->nop();
5088 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5089 __ ld_ptr(AT, data_addr);
5090 __ addi(AT, AT, DataLayout::counter_increment);
5091 __ st_ptr(AT, data_addr);
5092 __ b(update_done);
5093 __ delayed()->nop();
5094 __ bind(next_test);
5095 }
5097 // Didn't find receiver; find next empty slot and fill it in
5098 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5099 Label next_test;
5100 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5101 __ ld_ptr(AT, recv_addr);
5102 __ bne(AT, R0, next_test);
5103 __ delayed()->nop();
5104 __ st_ptr(recv, recv_addr);
5105 __ move(AT, DataLayout::counter_increment);
5106 __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))));
5107 if (i < (VirtualCallData::row_limit() - 1)) {
5108 __ b(update_done);
5109 __ delayed()->nop();
5110 }
5111 __ bind(next_test);
5112 }
5113 __ bind(update_done);
5114 }
5115 }
5116 }
5118 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
5119 // Newly added in OpenJDK 8
5120 Unimplemented();
5121 }
5123 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
5124 Unimplemented();
5125 }
5128 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
5129 if (dst->is_single_cpu())
5130 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
5131 else if (dst->is_double_cpu())
5132 __ lea(dst->as_register_lo(), frame_map()->address_for_monitor_lock(monitor_no));
5133 }
5135 void LIR_Assembler::align_backward_branch_target() {
5136 __ align(BytesPerWord);
5137 }
5140 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
5141 if (left->is_single_cpu()) {
5142 __ subu(dest->as_register(), R0, left->as_register());
5143 } else if (left->is_double_cpu()) {
5144 #ifndef _LP64
5145 Register lo = left->as_register_lo();
5146 Register hi = left->as_register_hi();
5147 Register dlo = dest->as_register_lo();
5148 Register dhi = dest->as_register_hi();
5149 assert(dlo != hi, "register checks");
5150 __ nor(dlo, R0, lo);
5151 __ addiu(dlo, dlo, 1);
5152 __ sltiu(AT, dlo, 1);
5153 __ nor(dhi, R0, hi);
5154 __ addu(dhi, dhi, AT);
5155 #else
5156 __ subu(dest->as_register_lo(), R0, left->as_register_lo());
5157 #endif
5158 } else if (left->is_single_fpu()) {
5159 //for mips , does it required ?
5160 __ neg_s(dest->as_float_reg(), left->as_float_reg());
5161 } else if (left->is_double_fpu()) {
5162 //for mips , does it required ?
5163 __ neg_d(dest->as_double_reg(), left->as_double_reg());
5164 }else {
5165 ShouldNotReachHere();
5166 }
5167 }
5169 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
5170 assert(addr->is_address() && dest->is_register(), "check");
5171 Register reg;
5172 reg = dest->as_pointer_register();
5173 __ lea(reg, as_Address(addr->as_address_ptr()));
5174 }
5177 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
5178 if (o == NULL) {
5179 // This seems wrong as we do not emit relocInfo
5180 // for classes that are not loaded yet, i.e., they will be
5181 // never GC'd
5182 #ifndef _LP64
5183 //by_css
5184 __ lui(reg, Assembler::split_high((int)o));
5185 __ addiu(reg, reg, Assembler::split_low((int)o));
5186 #else
5187 __ li48(reg, (long)o);
5188 //__ patchable_set48(reg, (long)o);
5189 #endif
5190 } else {
5191 int oop_index = __ oop_recorder()->find_index(o);
5192 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5193 __ relocate(rspec);
5194 #ifndef _LP64
5195 //by_css
5196 __ lui(reg, Assembler::split_high((int)o));
5197 __ addiu(reg, reg, Assembler::split_low((int)o));
5198 #else
5199 __ li48(reg, (long)o);
5200 //__ patchable_set48(reg, (long)o);
5201 #endif
5202 }
5203 }
5205 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
5206 assert(!tmp->is_valid(), "don't need temporary");
5207 __ call(dest, relocInfo::runtime_call_type);
5208 __ delayed()->nop();
5209 if (info != NULL) {
5210 add_call_info_here(info);
5211 }
5212 }
5214 /* by yyq 7/22/2009
5215 * i don't know the register allocator will allocate long or double in two consecutive registers
5216 * if the allocator do like this, the lws below should be removed and lds be used.
5217 */
5219 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
5220 assert(type == T_LONG, "only for volatile long fields");
5221 if (info != NULL) {
5222 add_debug_info_for_null_check_here(info);
5223 }
5225 if(src->is_register() && dest->is_address()) {
5226 if(src->is_double_cpu()) {
5227 #ifdef _LP64
5228 __ sd(src->as_register_lo(), as_Address(dest->as_address_ptr()));
5229 #else
5230 __ sw(src->as_register_lo(), as_Address(dest->as_address_ptr()));
5231 __ sw(src->as_register_hi(), as_Address(dest->as_address_ptr()).base(),
5232 as_Address(dest->as_address_ptr()).disp() +4);
5233 #endif
5234 } else if (src->is_double_fpu()) {
5235 #ifdef _LP64
5236 __ sdc1(src->as_fpu_lo(), as_Address(dest->as_address_ptr()));
5237 #else
5238 __ swc1(src->as_fpu_lo(), as_Address(dest->as_address_ptr()));
5239 __ swc1(src->as_fpu_hi(), as_Address(dest->as_address_ptr()).base(),
5240 as_Address(dest->as_address_ptr()).disp() +4);
5241 #endif
5242 } else {
5243 ShouldNotReachHere();
5244 }
5245 } else if (src->is_address() && dest->is_register()){
5246 if(dest->is_double_cpu()) {
5247 #ifdef _LP64
5248 __ ld(dest->as_register_lo(), as_Address(src->as_address_ptr()));
5249 #else
5250 __ lw(dest->as_register_lo(), as_Address(src->as_address_ptr()));
5251 __ lw(dest->as_register_hi(), as_Address(src->as_address_ptr()).base(),
5252 as_Address(src->as_address_ptr()).disp() +4);
5253 #endif
5254 } else if (dest->is_double_fpu()) {
5255 #ifdef _LP64
5256 __ ldc1(dest->as_fpu_lo(), as_Address(src->as_address_ptr()));
5257 #else
5258 __ lwc1(dest->as_fpu_lo(), as_Address(src->as_address_ptr()));
5259 __ lwc1(dest->as_fpu_hi(), as_Address(src->as_address_ptr()).base(),
5260 as_Address(src->as_address_ptr()).disp() +4);
5261 #endif
5262 } else {
5263 ShouldNotReachHere();
5264 }
5265 } else {
5266 ShouldNotReachHere();
5267 }
5268 }
5270 #ifdef ASSERT
5271 // emit run-time assertion
5272 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
5273 tty->print_cr("LIR_Assembler::emit_assert unimplemented yet!");
5274 Unimplemented();
5275 }
5276 #endif
5278 void LIR_Assembler::membar() {
5279 __ sync();
5280 }
5282 void LIR_Assembler::membar_acquire() {
5283 __ sync();
5284 }
5286 void LIR_Assembler::membar_release() {
5287 __ sync();
5288 }
5290 void LIR_Assembler::membar_loadload() {
5291 // no-op
5292 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
5293 }
5295 void LIR_Assembler::membar_storestore() {
5296 // no-op
5297 // //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
5298 }
5300 void LIR_Assembler::membar_loadstore() {
5301 // no-op
5302 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
5303 }
5305 void LIR_Assembler::membar_storeload() {
5306 //__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
5307 }
5310 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
5311 assert(result_reg->is_register(), "check");
5312 #ifndef OPT_THREAD
5313 __ get_thread(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()));
5314 #else
5315 __ move(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()), TREG);
5316 #endif
5317 }
5319 void LIR_Assembler::peephole(LIR_List*) {
5320 // do nothing for now
5321 }
5323 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
5324 /* assert(data == dest, "xchg/xadd uses only 2 operands");
5326 if (data->type() == T_INT) {
5327 if (code == lir_xadd) {
5328 if (os::is_MP()) {
5329 __ lock();
5330 }
5331 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
5332 } else {
5333 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
5334 }
5335 } else if (data->is_oop()) {
5336 assert (code == lir_xchg, "xadd for oops");
5337 Register obj = data->as_register();
5338 #ifdef _LP64
5339 if (UseCompressedOops) {
5340 __ encode_heap_oop(obj);
5341 __ xchgl(obj, as_Address(src->as_address_ptr()));
5342 __ decode_heap_oop(obj);
5343 } else {
5344 __ xchgptr(obj, as_Address(src->as_address_ptr()));
5345 }
5346 #else
5347 __ xchgl(obj, as_Address(src->as_address_ptr()));
5348 #endif
5349 } else if (data->type() == T_LONG) {
5350 #ifdef _LP64
5351 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
5352 if (code == lir_xadd) {
5353 if (os::is_MP()) {
5354 __ lock();
5355 }
5356 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
5357 } else {
5358 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
5359 }
5360 #else
5361 ShouldNotReachHere();
5362 #endif
5363 } else {
5364 ShouldNotReachHere();
5365 }*/
5366 ShouldNotReachHere();
5367 }
5369 #undef __