Fri, 29 Apr 2016 00:06:10 +0800
Added MIPS 64-bit port.
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "gc_interface/collectedHeap.hpp"
37 #include "memory/barrierSet.hpp"
38 #include "memory/cardTableModRefBS.hpp"
39 #include "nativeInst_mips.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #define __ _masm->
44 static void select_different_registers(Register preserve,
45 Register extra,
46 Register &tmp1,
47 Register &tmp2) {
48 if (tmp1 == preserve) {
49 assert_different_registers(tmp1, tmp2, extra);
50 tmp1 = extra;
51 } else if (tmp2 == preserve) {
52 assert_different_registers(tmp1, tmp2, extra);
53 tmp2 = extra;
54 }
55 assert_different_registers(preserve, tmp1, tmp2);
56 }
60 static void select_different_registers(Register preserve,
61 Register extra,
62 Register &tmp1,
63 Register &tmp2,
64 Register &tmp3) {
65 if (tmp1 == preserve) {
66 assert_different_registers(tmp1, tmp2, tmp3, extra);
67 tmp1 = extra;
68 } else if (tmp2 == preserve) {
69 tmp2 = extra;
70 } else if (tmp3 == preserve) {
71 assert_different_registers(tmp1, tmp2, tmp3, extra);
72 tmp3 = extra;
73 }
74 assert_different_registers(preserve, tmp1, tmp2, tmp3);
75 }
77 // need add method Assembler::is_simm16 in assembler_gs2.hpp
78 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
79 if (opr->is_constant()) {
80 LIR_Const* constant = opr->as_constant_ptr();
81 switch (constant->type()) {
82 case T_INT: {
83 jint value = constant->as_jint();
84 return Assembler::is_simm16(value);
85 }
86 default:
87 return false;
88 }
89 }
90 return false;
91 }
93 //FIXME, which register should be used?
94 LIR_Opr LIR_Assembler::receiverOpr() {
95 return FrameMap::_t0_oop_opr;
96 }
97 /*
98 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
99 return receiverOpr();
100 }*/
102 LIR_Opr LIR_Assembler::osrBufferPointer() {
103 #ifdef _LP64
104 Register r = receiverOpr()->as_register();
105 return FrameMap::as_long_opr(r, r);
106 #else
107 return FrameMap::as_opr(receiverOpr()->as_register());
108 #endif
109 }
111 //--------------fpu register translations-----------------------
112 // FIXME:I do not know what's to do for mips fpu
114 address LIR_Assembler::float_constant(float f) {
115 address const_addr = __ float_constant(f);
116 if (const_addr == NULL) {
117 bailout("const section overflow");
118 return __ code()->consts()->start();
119 } else {
120 return const_addr;
121 }
122 }
125 address LIR_Assembler::double_constant(double d) {
126 address const_addr = __ double_constant(d);
127 if (const_addr == NULL) {
128 bailout("const section overflow");
129 return __ code()->consts()->start();
130 } else {
131 return const_addr;
132 }
133 }
139 void LIR_Assembler::reset_FPU() {
140 Unimplemented();
141 }
144 void LIR_Assembler::set_24bit_FPU() {
145 Unimplemented();
146 }
148 //FIXME.
149 void LIR_Assembler::fpop() {
150 // do nothing
151 }
152 void LIR_Assembler::fxch(int i) {
153 // do nothing
154 }
155 void LIR_Assembler::fld(int i) {
156 // do nothing
157 }
158 void LIR_Assembler::ffree(int i) {
159 // do nothing
160 }
162 void LIR_Assembler::breakpoint() {
163 __ brk(17);
164 }
165 //FIXME, opr can not be float?
166 void LIR_Assembler::push(LIR_Opr opr) {
167 if (opr->is_single_cpu()) {
168 __ push_reg(opr->as_register());
169 } else if (opr->is_double_cpu()) {
170 __ push_reg(opr->as_register_hi());
171 __ push_reg(opr->as_register_lo());
172 } else if (opr->is_stack()) {
173 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
174 } else if (opr->is_constant()) {
175 LIR_Const* const_opr = opr->as_constant_ptr();
176 if (const_opr->type() == T_OBJECT) {
177 __ push_oop(const_opr->as_jobject());
178 } else if (const_opr->type() == T_INT) {
179 __ push_jint(const_opr->as_jint());
180 } else {
181 ShouldNotReachHere();
182 }
183 } else {
184 ShouldNotReachHere();
185 }
186 }
188 void LIR_Assembler::pop(LIR_Opr opr) {
189 if (opr->is_single_cpu() ) {
190 __ pop(opr->as_register());
191 } else {
192 assert(false, "Must be single word register or floating-point register");
193 }
194 }
197 Address LIR_Assembler::as_Address(LIR_Address* addr) {
198 #ifndef _LP64
199 Register reg = addr->base()->as_register();
200 #else
201 //FIXME aoqi
202 Register reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
203 #endif
204 // now we need this for parameter pass
205 return Address(reg, addr->disp());
206 }
209 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
210 return as_Address(addr);
211 }
214 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
215 Register reg = addr->base()->as_register();
216 return Address(reg, addr->disp()+longSize/2);
217 }
220 //void LIR_Assembler::osr_entry(IRScope* scope, int number_of_locks, Label* continuation, int osr_bci) {
221 void LIR_Assembler::osr_entry() {
222 // assert(scope->is_top_scope(), "inlined OSR not yet implemented");
223 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
224 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
225 ValueStack* entry_state = osr_entry->state();
226 int number_of_locks = entry_state->locks_size();
228 // we jump here if osr happens with the interpreter
229 // state set up to continue at the beginning of the
230 // loop that triggered osr - in particular, we have
231 // the following registers setup:
232 //
233 // S7: interpreter locals pointer
234 // V1: interpreter locks pointer
235 // RA: return address
236 //T0: OSR buffer
237 // build frame
238 // ciMethod* m = scope->method();
239 ciMethod* m = compilation()->method();
240 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
242 // OSR buffer is
243 //
244 // locals[nlocals-1..0]
245 // monitors[0..number_of_locks]
246 //
247 // locals is a direct copy of the interpreter frame so in the osr buffer
248 // so first slot in the local array is the last local from the interpreter
249 // and last slot is local[0] (receiver) from the interpreter
250 //
251 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
252 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
253 // in the interpreter frame (the method lock if a sync method)
255 // Initialize monitors in the compiled activation.
256 // T0: pointer to osr buffer
257 //
258 // All other registers are dead at this point and the locals will be
259 // copied into place by code emitted in the IR.
261 Register OSR_buf = osrBufferPointer()->as_pointer_register();
264 // note: we do osr only if the expression stack at the loop beginning is empty,
265 // in which case the spill area is empty too and we don't have to setup
266 // spilled locals
267 //
268 // copy monitors
269 // V1: pointer to locks
270 {
271 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
272 int monitor_offset = BytesPerWord * method()->max_locals()+
273 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
274 for (int i = 0; i < number_of_locks; i++) {
275 int slot_offset =monitor_offset - (i * BasicObjectLock::size())*BytesPerWord;
276 #ifdef ASSERT
277 {
278 Label L;
279 //__ lw(AT, V1, slot_offset * BytesPerWord + BasicObjectLock::obj_offset_in_bytes());
280 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
281 __ bne(AT, R0, L);
282 __ delayed()->nop();
283 __ stop("locked object is NULL");
284 __ bind(L);
285 }
286 #endif
287 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes());
288 __ st_ptr(AT, frame_map()->address_for_monitor_lock(i));
289 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
290 __ st_ptr(AT, frame_map()->address_for_monitor_object(i));
291 }
292 }
293 }
296 int LIR_Assembler::check_icache() {
297 Register receiver = FrameMap::receiver_opr->as_register();
298 Register ic_klass = IC_Klass;
300 /*const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
301 const bool do_post_padding = VerifyOops || UseCompressedOops;
302 if (!do_post_padding) {
303 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
304 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
305 __ nop();
306 }
307 }*/
309 int offset = __ offset();
310 __ inline_cache_check(receiver, IC_Klass);
311 __ align(CodeEntryAlignment);
312 return offset;
315 }
317 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
318 jobject o = NULL;
319 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
320 int oop_index = __ oop_recorder()->allocate_oop_index(o);
321 RelocationHolder rspec = oop_Relocation::spec(oop_index);
322 __ relocate(rspec);
323 #ifndef _LP64
324 //by_css
325 __ lui(reg, Assembler::split_high((int)o));
326 __ addiu(reg, reg, Assembler::split_low((int)o));
327 #else
328 //li may not pass NativeMovConstReg::verify. see nativeMovConstReg_at(pc_start()); in PatchingStub::install. by aoqi
329 __ li48(reg, (long)o);
330 #endif
331 // patching_epilog(patch, LIR_Op1::patch_normal, noreg, info);
332 patching_epilog(patch, lir_patch_normal, reg, info);
333 }
336 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register unused, int monitor_no, Register exception) {
338 if (exception->is_valid()) {
339 // preserve exception
340 // note: the monitor_exit runtime call is a leaf routine
341 // and cannot block => no GC can happen
342 // The slow case (MonitorAccessStub) uses the first two stack slots
343 // ([SP+0] and [SP+4]), therefore we store the exception at [esp+8]
344 __ st_ptr(exception, SP, 2 * wordSize);
345 }
347 Register obj_reg = obj_opr->as_register();
348 Register lock_reg = lock_opr->as_register();
350 // compute pointer to BasicLock
351 //Address lock_addr = frame_map()->address_for_monitor_lock_index(monitor_no);
352 Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
353 __ lea(lock_reg, lock_addr);
354 // unlock object
355 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
356 // temporary fix: must be created after exceptionhandler, therefore as call stub
357 _slow_case_stubs->append(slow_case);
358 if (UseFastLocking) {
359 // try inlined fast unlocking first, revert to slow locking if it fails
360 // note: lock_reg points to the displaced header since the displaced header offset is 0!
361 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
362 __ unlock_object(NOREG, obj_reg, lock_reg, *slow_case->entry());
363 } else {
364 // always do slow unlocking
365 // note: the slow unlocking code could be inlined here, however if we use
366 // slow unlocking, speed doesn't matter anyway and this solution is
367 // simpler and requires less duplicated code - additionally, the
368 // slow unlocking code is the same in either case which simplifies
369 // debugging
370 __ b_far(*slow_case->entry());
371 __ delayed()->nop();
372 }
373 // done
374 __ bind(*slow_case->continuation());
376 if (exception->is_valid()) {
377 // restore exception
378 __ ld_ptr(exception, SP, 2 * wordSize);
379 }
380 }
382 // This specifies the esp decrement needed to build the frame
383 int LIR_Assembler::initial_frame_size_in_bytes() const {
384 // if rounding, must let FrameMap know!
385 // return (frame_map()->framesize() - 2) * BytesPerWord; // subtract two words to account for return address and link
386 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
387 }
389 int LIR_Assembler::emit_exception_handler() {
390 // if the last instruction is a call (typically to do a throw which
391 // is coming at the end after block reordering) the return address
392 // must still point into the code area in order to avoid assertion
393 // failures when searching for the corresponding bci => add a nop
394 // (was bug 5/14/1999 - gri)
395 // Lazy deopt bug 4932387. If last instruction is a call then we
396 // need an area to patch where we won't overwrite the exception
397 // handler. This means we need 5 bytes. Could use a fat_nop
398 // but since this never gets executed it doesn't really make
399 // much difference.
400 //
401 for (int i = 0; i < (NativeCall::instruction_size/BytesPerInstWord + 1) ; i++ ) {
402 __ nop();
403 }
405 // generate code for exception handler
406 address handler_base = __ start_a_stub(exception_handler_size);
407 if (handler_base == NULL) {
408 //no enough space
409 bailout("exception handler overflow");
410 return -1;
411 }
415 //compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
416 // if the method does not have an exception handler, then there is
417 // no reason to search for one
418 //if (compilation()->has_exception_handlers() || JvmtiExport::can_post_exceptions()) {
419 // the exception oop and pc are in V0 and V1
420 // no other registers need to be preserved, so invalidate them
421 // check that there is really an exception
422 // __ verify_not_null_oop(V0);
424 // search an exception handler (V0: exception oop, V1: throwing pc)
425 // __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
426 // relocInfo::runtime_call_type);
427 // __ delayed()->nop();
428 // if the call returns here, then the exception handler for particular
429 // exception doesn't exist -> unwind activation and forward exception to caller
430 // }
431 int offset = code_offset();
433 // the exception oop is in V0
434 // no other registers need to be preserved, so invalidate them
435 // check that there is really an exception
436 __ verify_not_null_oop(V0);
437 //FIXME:wuhui??
438 //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
439 //__ delayed()->nop();
440 __ should_not_reach_here();
441 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
442 __ end_a_stub();
443 return offset;
445 // unlock the receiver/klass if necessary
446 // V0: exception
447 // ciMethod* method = compilation()->method();
448 // if (method->is_synchronized() && GenerateSynchronizationCode) {
449 //#ifndef _LP64
450 //by_css
451 // monitorexit(FrameMap::_t0_oop_opr, FrameMap::_t6_opr, NOREG, 0, V0);
452 //#else
453 // monitorexit(FrameMap::_t0_oop_opr, FrameMap::_a6_opr, NOREG, 0, V0);
454 //#endif
455 // }
457 // unwind activation and forward exception to caller
458 // V0: exception
459 // __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id),
460 // relocInfo::runtime_call_type);
461 // __ delayed()->nop();
462 // __ end_a_stub();
463 }
465 // Emit the code to remove the frame from the stack in the exception
466 // // unwind path.
467 int LIR_Assembler::emit_unwind_handler() {
468 #ifndef PRODUCT
469 if (CommentedAssembly) {
470 _masm->block_comment("Unwind handler");
471 }
472 #endif
474 int offset = code_offset();
475 /* // Fetch the exception from TLS and clear out exception related thread state
476 __ get_thread(rsi);
477 __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
478 __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
479 __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
481 __ bind(_unwind_handler_entry);
482 __ verify_not_null_oop(rax);
483 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
484 __ mov(rsi, rax); // Preserve the exception
485 }
486 // Preform needed unlocking
487 MonitorExitStub* stub = NULL;
488 if (method()->is_synchronized()) {
489 monitor_address(0, FrameMap::rax_opr);
490 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
491 __ unlock_object(rdi, rbx, rax, *stub->entry());
492 __ bind(*stub->continuation());
493 }
495 if (compilation()->env()->dtrace_method_probes()) {
496 __ get_thread(rax);
497 __ movptr(Address(rsp, 0), rax);
498 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
499 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
500 }
502 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
503 __ mov(rax, rsi); // Restore the exception
504 }
506 // remove the activation and dispatch to the unwind handler
507 __ remove_frame(initial_frame_size_in_bytes());
508 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
510 // Emit the slow path assembly
511 if (stub != NULL) {
512 stub->emit_code(this);
513 }
514 */
515 return offset;
516 }
519 int LIR_Assembler::emit_deopt_handler() {
520 // if the last instruction is a call (typically to do a throw which
521 // is coming at the end after block reordering) the return address
522 // must still point into the code area in order to avoid assertion
523 // failures when searching for the corresponding bci => add a nop
524 // (was bug 5/14/1999 - gri)
526 __ nop();
528 // generate code for exception handler
529 address handler_base = __ start_a_stub(deopt_handler_size);
530 if (handler_base == NULL) {
531 // not enough space left for the handler
532 bailout("deopt handler overflow");
533 return -1;
534 }
535 int offset = code_offset();
537 // compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
539 __ call(SharedRuntime::deopt_blob()->unpack());
540 __ delayed()->nop();
542 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
543 __ end_a_stub();
544 return offset;
546 }
549 // Optimized Library calls
550 // This is the fast version of java.lang.String.compare; it has not
551 // OSR-entry and therefore, we generate a slow version for OSR's
552 //void LIR_Assembler::emit_string_compare(IRScope* scope) {
553 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
554 // get two string object in T0&T1
555 //receiver already in T0
556 __ ld_ptr(T1, arg1->as_register());
557 //__ ld_ptr(T2, T0, java_lang_String::value_offset_in_bytes()); //value, T_CHAR array
558 __ load_heap_oop(T2, Address(T0, java_lang_String::value_offset_in_bytes()));
559 __ ld_ptr(AT, T0, java_lang_String::offset_offset_in_bytes()); //offset
560 __ shl(AT, 1);
561 __ add(T2, T2, AT);
562 __ addi(T2, T2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
563 // Now T2 is the address of the first char in first string(T0)
565 add_debug_info_for_null_check_here(info);
566 //__ ld_ptr(T3, T1, java_lang_String::value_offset_in_bytes());
567 __ load_heap_oop(T3, Address(T1, java_lang_String::value_offset_in_bytes()));
568 __ ld_ptr(AT, T1, java_lang_String::offset_offset_in_bytes());
569 __ shl(AT, 1);
570 __ add(T3, T3, AT);
571 __ addi(T3, T3, arrayOopDesc::base_offset_in_bytes(T_CHAR));
572 // Now T3 is the address of the first char in second string(T1)
574 #ifndef _LP64
575 //by_css
576 // compute minimum length (in T4) and difference of lengths (V0)
577 Label L;
578 __ lw (T4, Address(T0, java_lang_String::count_offset_in_bytes()));
579 // the length of the first string(T0)
580 __ lw (T5, Address(T1, java_lang_String::count_offset_in_bytes()));
581 // the length of the second string(T1)
583 __ subu(V0, T4, T5);
584 __ blez(V0, L);
585 __ delayed()->nop();
586 __ move (T4, T5);
587 __ bind (L);
589 Label Loop, haveResult, LoopEnd;
590 __ bind(Loop);
591 __ beq(T4, R0, LoopEnd);
592 __ delayed();
594 __ addi(T2, T2, 2);
596 // compare current character
597 __ lhu(T5, T2, -2);
598 __ lhu(T6, T3, 0);
599 __ bne(T5, T6, haveResult);
600 __ delayed();
602 __ addi(T3, T3, 2);
604 __ b(Loop);
605 __ delayed()->addi(T4, T4, -1);
607 __ bind(haveResult);
608 __ subu(V0, T5, T6);
610 __ bind(LoopEnd);
611 #else
612 // compute minimum length (in T4) and difference of lengths (V0)
613 Label L;
614 __ lw (A4, Address(T0, java_lang_String::count_offset_in_bytes()));
615 // the length of the first string(T0)
616 __ lw (A5, Address(T1, java_lang_String::count_offset_in_bytes()));
617 // the length of the second string(T1)
619 __ dsubu(V0, A4, A5);
620 __ blez(V0, L);
621 __ delayed()->nop();
622 __ move (A4, A5);
623 __ bind (L);
625 Label Loop, haveResult, LoopEnd;
626 __ bind(Loop);
627 __ beq(A4, R0, LoopEnd);
628 __ delayed();
630 __ daddi(T2, T2, 2);
632 // compare current character
633 __ lhu(A5, T2, -2);
634 __ lhu(A6, T3, 0);
635 __ bne(A5, A6, haveResult);
636 __ delayed();
638 __ daddi(T3, T3, 2);
640 __ b(Loop);
641 __ delayed()->addi(A4, A4, -1);
643 __ bind(haveResult);
644 __ dsubu(V0, A5, A6);
646 __ bind(LoopEnd);
647 #endif
648 return_op(FrameMap::_v0_opr);
649 }
652 void LIR_Assembler::return_op(LIR_Opr result) {
653 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0");
654 // Pop the stack before the safepoint code
655 __ leave();
656 #ifndef _LP64
657 //by aoqi
658 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()
659 + (SafepointPollOffset % os::vm_page_size())));
660 __ relocate(relocInfo::poll_return_type);
661 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()
662 + (SafepointPollOffset % os::vm_page_size())));
663 #else
664 #ifndef OPT_SAFEPOINT
665 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
666 __ li48(AT, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
667 __ relocate(relocInfo::poll_return_type);
668 __ lw(AT, AT, 0);
669 #else
670 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
671 __ relocate(relocInfo::poll_return_type);
672 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
673 #endif
674 #endif
676 __ jr(RA);
677 __ delayed()->nop();
678 }
680 //read protect mem to R0 won't cause the exception only in godson-2e, So I modify R0 to AT .@jerome,11/25,2006
681 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
682 assert(info != NULL, "info must not be null for safepoint poll");
683 int offset = __ offset();
684 Register r = tmp->as_register();
685 #ifndef _LP64
686 //by aoqi
687 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
688 add_debug_info_for_branch(info);
689 __ relocate(relocInfo::poll_type);
690 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
691 #else
692 #ifndef OPT_SAFEPOINT
693 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
694 //__ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
695 __ li48(r, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
696 add_debug_info_for_branch(info);
697 __ relocate(relocInfo::poll_type);
698 //__ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
699 __ lw(AT, r, 0);
700 #else
701 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
702 add_debug_info_for_branch(info);
703 __ relocate(relocInfo::poll_type);
704 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
705 #endif
706 #endif
707 return offset;
708 }
710 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
711 if (from_reg != to_reg) __ move(to_reg, from_reg);
712 }
715 void LIR_Assembler::swap_reg(Register a, Register b) {
716 __ xorr(a, a, b);
717 __ xorr(b, a, b);
718 __ xorr(a, a, b);
719 }
721 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
722 assert(src->is_constant(), "should not call otherwise");
723 assert(dest->is_register(), "should not call otherwise");
724 LIR_Const* c = src->as_constant_ptr();
725 switch (c->type()) {
726 case T_INT:
727 {
728 jint con = c->as_jint();
729 if (dest->is_single_cpu()) {
730 assert(patch_code == lir_patch_none, "no patching handled here");
731 __ move(dest->as_register(), con);
732 } else {
733 assert(dest->is_single_fpu(), "wrong register kind");
734 __ move(AT, con);
735 __ mtc1(AT, dest->as_float_reg());
736 }
737 }
738 break;
740 case T_LONG:
741 {
742 #ifndef _LP64
743 jlong con = c->as_jlong();
744 jint* conhi = (jint*)&con + 1;
745 jint* conlow = (jint*)&con;
747 if (dest->is_double_cpu()) {
748 __ move(dest->as_register_lo(), *conlow);
749 __ move(dest->as_register_hi(), *conhi);
750 } else {
751 // assert(dest->is_double(), "wrong register kind");
752 __ move(AT, *conlow);
753 __ mtc1(AT, dest->as_double_reg());
754 __ move(AT, *conhi);
755 __ mtc1(AT, dest->as_double_reg()+1);
756 }
757 #else
758 if (dest->is_double_cpu()) {
759 __ li(dest->as_register_lo(), c->as_jlong());
760 } else {
761 __ li(dest->as_register(), c->as_jlong());
762 }
763 #endif
764 }
765 break;
767 case T_OBJECT:
768 {
769 if (patch_code == lir_patch_none) {
770 jobject2reg(c->as_jobject(), dest->as_register());
771 } else {
772 jobject2reg_with_patching(dest->as_register(), info);
773 }
774 }
775 break;
777 case T_FLOAT:
778 {
779 address const_addr = float_constant(c->as_jfloat());
780 assert (const_addr != NULL, "must create float constant in the constant table");
782 if (dest->is_single_fpu()) {
783 __ relocate(relocInfo::internal_pc_type);
784 #ifndef _LP64
785 //by_css
786 __ lui(AT, Assembler::split_high((int)const_addr));
787 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
788 #else
789 __ li48(AT, (long)const_addr);
790 #endif
791 __ lwc1(dest->as_float_reg(), AT, 0);
793 } else {
794 assert(dest->is_single_cpu(), "Must be a cpu register.");
795 assert(dest->as_register() != AT, "AT can not be allocated.");
797 __ relocate(relocInfo::internal_pc_type);
798 #ifndef _LP64
799 //by_css
800 __ lui(AT, Assembler::split_high((int)const_addr));
801 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
802 #else
803 __ li48(AT, (long)const_addr);
804 #endif
805 __ lw(dest->as_register(), AT, 0);
806 }
807 }
808 break;
810 case T_DOUBLE:
811 {
812 address const_addr = double_constant(c->as_jdouble());
813 assert (const_addr != NULL, "must create double constant in the constant table");
815 if (dest->is_double_fpu()) {
816 __ relocate(relocInfo::internal_pc_type);
817 #ifndef _LP64
818 //by_css
819 __ lui(AT, Assembler::split_high((int)const_addr));
820 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
821 __ lwc1(dest->as_double_reg(), AT, 0);
822 __ lwc1(dest->as_double_reg()+1, AT, 4);
823 #else
824 __ li48(AT, (long)const_addr);
825 __ ldc1(dest->as_double_reg(), AT, 0);
826 #endif
827 } else {
828 assert(dest->as_register_lo() != AT, "AT can not be allocated.");
829 assert(dest->as_register_hi() != AT, "AT can not be allocated.");
831 __ relocate(relocInfo::internal_pc_type);
832 #ifndef _LP64
833 //by_css
834 __ lui(AT, Assembler::split_high((int)const_addr));
835 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
836 __ lw(dest->as_register_lo(), AT, 0);
837 __ lw(dest->as_register_hi(), AT, 4);
838 #else
839 __ li48(AT, (long)const_addr);
840 __ ld(dest->as_register_lo(), AT, 0);
841 #endif
842 }
843 }
844 break;
846 default:
847 ShouldNotReachHere();
848 }
849 }
852 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
853 assert(src->is_constant(), "should not call otherwise");
854 assert(dest->is_stack(), "should not call otherwise");
855 LIR_Const* c = src->as_constant_ptr();
856 switch (c->type()) {
857 case T_INT: // fall through
858 case T_FLOAT:
859 __ move(AT, c->as_jint_bits());
860 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
861 break;
863 case T_OBJECT:
864 if (c->as_jobject() == NULL) {
865 __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix()));
866 } else {
867 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
868 RelocationHolder rspec = oop_Relocation::spec(oop_index);
869 __ relocate(rspec);
870 #ifndef _LP64
871 //by_css
872 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
873 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
874 #else
875 __ li48(AT, (long)c->as_jobject());
876 #endif
877 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
878 }
879 break;
880 case T_LONG: // fall through
881 case T_DOUBLE:
882 #ifndef _LP64
883 __ move(AT, c->as_jint_lo_bits());
884 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
885 lo_word_offset_in_bytes));
886 __ move(AT, c->as_jint_hi_bits());
887 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
888 hi_word_offset_in_bytes));
889 #else
890 __ move(AT, c->as_jlong_bits());
891 __ sd(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
892 lo_word_offset_in_bytes));
893 #endif
894 break;
895 default:
896 ShouldNotReachHere();
897 }
898 }
900 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
901 assert(src->is_constant(), "should not call otherwise");
902 assert(dest->is_address(), "should not call otherwise");
903 LIR_Const* c = src->as_constant_ptr();
904 LIR_Address* addr = dest->as_address_ptr();
906 int null_check_here = code_offset();
907 switch (type) {
908 case T_LONG: // fall through
909 case T_DOUBLE:
910 #ifndef _LP64
911 __ move(AT, c->as_jint_hi_bits());
912 __ sw(AT, as_Address_hi(addr));
913 __ move(AT, c->as_jint_lo_bits());
914 __ sw(AT, as_Address_lo(addr));
915 #else
916 if(c->as_jlong_bits() != 0)
917 {
918 /* DoublePrint: -0.0
919 * (gdb) print /x -9223372036854775808
920 * $1 = 0x8000000000000000
921 */
922 __ li64(AT, c->as_jlong_bits());
923 __ sd(AT, as_Address_lo(addr));
924 }
925 else
926 __ sd(R0, as_Address(addr));
927 #endif
928 break;
929 case T_OBJECT: // fall through
930 case T_ARRAY:
931 if (c->as_jobject() == NULL){
932 if (UseCompressedOops && !wide) {
933 __ sw(R0, as_Address(addr));
934 } else {
935 __ st_ptr(R0, as_Address(addr));
936 }
937 } else {
938 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
939 RelocationHolder rspec = oop_Relocation::spec(oop_index);
940 __ relocate(rspec);
941 #ifndef _LP64
942 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
943 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
944 __ st_ptr(AT, as_Address(addr));
945 null_check_here = code_offset();
946 #else
947 //by_css
948 __ li64(AT, (long)c->as_jobject());
949 if (UseCompressedOops && !wide) {
950 __ encode_heap_oop(AT);
951 null_check_here = code_offset();
952 __ sw(AT, as_Address(addr));
953 } else {
954 __ st_ptr(AT, as_Address(addr));
955 }
956 #endif
957 }
958 break;
959 case T_INT: // fall through
960 case T_FLOAT:
961 if(c->as_jint_bits() != 0)
962 {
963 __ move(AT, c->as_jint_bits());
964 __ sw(AT, as_Address(addr));
965 }
966 else
967 __ sw(R0, as_Address(addr));
968 break;
969 case T_BOOLEAN: // fall through
970 case T_BYTE:
971 if(c->as_jint() != 0)
972 {
973 __ move(AT, c->as_jint());
974 __ sb(AT, as_Address(addr));
975 }
976 else
977 __ sb(R0, as_Address(addr));
978 break;
979 case T_CHAR: // fall through
980 case T_SHORT:
981 if(c->as_jint() != 0)
982 {
983 __ move(AT, c->as_jint());
984 __ sh(AT, as_Address(addr));
985 }
986 else
987 __ sh(R0, as_Address(addr));
988 break;
989 default: ShouldNotReachHere();
990 };
991 if (info != NULL) add_debug_info_for_null_check(null_check_here, info);
992 }
994 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
995 assert(src->is_register(), "should not call otherwise");
996 assert(dest->is_register(), "should not call otherwise");
997 if (dest->is_float_kind() && src->is_float_kind()) {
998 // float to float moves
999 if (dest->is_single_fpu()) {
1000 assert(src->is_single_fpu(), "must both be float");
1001 __ mov_s(dest->as_float_reg(), src->as_float_reg());
1002 } else {
1003 assert(src->is_double_fpu(), "must bothe be double");
1004 __ mov_d( dest->as_double_reg(),src->as_double_reg());
1005 }
1006 } else if (!dest->is_float_kind() && !src->is_float_kind()) {
1007 // int to int moves
1008 if (dest->is_single_cpu()) {
1009 #ifdef _LP64
1010 //FIXME aoqi: copy from x86
1011 if (src->type() == T_LONG) {
1012 // Can do LONG -> OBJECT
1013 move_regs(src->as_register_lo(), dest->as_register());
1014 return;
1015 }
1016 #endif
1017 assert(src->is_single_cpu(), "must match");
1018 if (dest->type() == T_INT) {
1019 __ move_u32(dest->as_register(), src->as_register());
1020 }
1021 else
1022 move_regs(src->as_register(), dest->as_register());
1023 } else if (dest->is_double_cpu()) {
1024 #ifdef _LP64
1025 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
1026 // Surprising to me but we can see move of a long to t_object
1027 __ verify_oop(src->as_register());
1028 move_regs(src->as_register(), dest->as_register_lo());
1029 return;
1030 }
1031 #endif
1032 Register f_lo;
1033 Register f_hi;
1034 Register t_lo;
1035 Register t_hi;
1037 if (src->is_single_cpu())
1038 {
1039 f_lo = src->as_register();
1040 t_lo = dest->as_register_lo();
1041 }
1042 else
1043 {
1044 f_lo = src->as_register_lo();
1045 f_hi = src->as_register_hi();
1046 t_lo = dest->as_register_lo();
1047 t_hi = dest->as_register_hi();
1048 assert(f_hi == f_lo, "must be same");
1049 assert(t_hi == t_lo, "must be same");
1050 }
1051 #ifdef _LP64
1052 move_regs(f_lo, t_lo);
1053 #else
1054 /*
1055 if (src->as_register_hi() != dest->as_register_lo()) {
1056 move_regs(src->as_register_lo(), dest->as_register_lo());
1057 move_regs(src->as_register_hi(), dest->as_register_hi());
1058 } else if (src->as_register_lo() != dest->as_register_hi()) {
1059 move_regs(src->as_register_hi(), dest->as_register_hi());
1060 move_regs(src->as_register_lo(), dest->as_register_lo());
1061 } else {
1062 swap_reg(src->as_register_lo(), src->as_register_hi());
1063 }
1064 */
1065 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
1067 if (f_lo == t_hi && f_hi == t_lo) {
1068 swap_reg(f_lo, f_hi);
1069 } else if (f_hi == t_lo) {
1070 assert(f_lo != t_hi, "overwriting register");
1071 move_regs(f_hi, t_hi);
1072 move_regs(f_lo, t_lo);
1073 } else {
1074 assert(f_hi != t_lo, "overwriting register");
1075 move_regs(f_lo, t_lo);
1076 move_regs(f_hi, t_hi);
1077 }
1078 #endif // LP64
1079 }
1080 } else {
1081 // float to int or int to float moves
1082 if (dest->is_double_cpu()) {
1083 assert(src->is_double_fpu(), "must match");
1084 __ mfc1(dest->as_register_lo(), src->as_double_reg());
1085 #ifndef _LP64
1086 __ mfc1(dest->as_register_hi(), src->as_double_reg() + 1);
1087 #endif
1088 } else if (dest->is_single_cpu()) {
1089 assert(src->is_single_fpu(), "must match");
1090 __ mfc1(dest->as_register(), src->as_float_reg());
1091 } else if (dest->is_double_fpu()) {
1092 assert(src->is_double_cpu(), "must match");
1093 __ mtc1(src->as_register_lo(), dest->as_double_reg());
1094 #ifndef _LP64
1095 __ mtc1(src->as_register_hi(), dest->as_double_reg() + 1);
1096 #endif
1097 } else if (dest->is_single_fpu()) {
1098 assert(src->is_single_cpu(), "must match");
1099 __ mtc1(src->as_register(), dest->as_float_reg());
1100 }
1101 }
1102 }
1105 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type,bool pop_fpu_stack) {
1106 assert(src->is_register(), "should not call otherwise");
1107 assert(dest->is_stack(), "should not call otherwise");
1109 if (src->is_single_cpu()) {
1110 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1111 if (type == T_OBJECT || type == T_ARRAY) {
1112 __ verify_oop(src->as_register());
1113 }
1114 #ifdef _LP64
1115 if (type == T_INT)
1116 __ sw(src->as_register(),dst);
1117 else
1118 #endif
1119 __ st_ptr(src->as_register(),dst);
1120 } else if (src->is_double_cpu()) {
1121 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
1122 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
1123 __ st_ptr(src->as_register_lo(),dstLO);
1124 NOT_LP64(__ st_ptr(src->as_register_hi(),dstHI));
1125 }else if (src->is_single_fpu()) {
1126 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1127 __ swc1(src->as_float_reg(), dst_addr);
1129 } else if (src->is_double_fpu()) {
1130 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1131 #ifndef _LP64
1132 __ swc1(src->as_double_reg(), dst_addr);
1133 __ swc1(src->as_double_reg() + 1, dst_addr.base(), dst_addr.disp() + 4);
1134 #else
1135 __ sdc1(src->as_double_reg(), dst_addr);
1136 #endif
1138 } else {
1139 ShouldNotReachHere();
1140 }
1141 }
1143 //FIXME
1144 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info,bool pop_fpu_stack, bool wide, bool/*unaliged*/) {
1145 LIR_Address* to_addr = dest->as_address_ptr();
1146 //Register dest_reg = to_addr->base()->as_register();
1147 // FIXME aoqi
1148 Register dest_reg = to_addr->base()->is_single_cpu()? to_addr->base()->as_register() : to_addr->base()->as_register_lo();
1149 PatchingStub* patch = NULL;
1150 bool needs_patching = (patch_code != lir_patch_none);
1151 Register disp_reg = NOREG;
1152 int disp_value = to_addr->disp();
1153 /*
1154 the start position of patch template is labeled by "new PatchingStub(...)"
1155 during patch, T9 will be changed and not restore
1156 that's why we use S7 but not T9 as compressed_src here
1157 */
1158 Register compressed_src = S7;
1160 if (type == T_ARRAY || type == T_OBJECT) {
1161 __ verify_oop(src->as_register());
1162 #ifdef _LP64
1163 if (UseCompressedOops && !wide) {
1164 __ move(compressed_src, src->as_register());
1165 __ encode_heap_oop(compressed_src);
1166 }
1167 #endif
1168 }
1170 if (needs_patching) {
1171 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1172 assert(!src->is_double_cpu() ||
1173 patch_code == lir_patch_none ||
1174 patch_code == lir_patch_normal,
1175 "patching doesn't match register");
1176 Address toa = as_Address(to_addr);
1177 assert(toa.disp() != 0, "must have");
1178 }
1180 if (info != NULL) {
1181 add_debug_info_for_null_check_here(info);
1182 }
1183 if (needs_patching) {
1184 disp_reg = AT;
1185 __ lui(AT, Assembler::split_high(disp_value));
1186 __ addiu(AT, AT, Assembler::split_low(disp_value));
1187 } else if (!Assembler::is_simm16(disp_value)) {
1188 disp_reg = AT;
1189 __ lui(AT, Assembler::split_high(disp_value));
1190 }
1191 int offset = code_offset();
1193 switch(type) {
1194 case T_DOUBLE:
1195 assert(src->is_double_fpu(), "just check");
1196 if (disp_reg == noreg) {
1197 #ifndef _LP64
1198 __ swc1(src->as_double_reg(), dest_reg, disp_value);
1199 __ swc1(src->as_double_reg()+1, dest_reg, disp_value+4);
1200 #else
1201 __ sdc1(src->as_double_reg(), dest_reg, disp_value);
1202 #endif
1203 } else if (needs_patching) {
1204 __ add(AT, dest_reg, disp_reg);
1205 #ifndef _LP64
1206 __ swc1(src->as_double_reg(), AT, 0);
1207 __ swc1(src->as_double_reg()+1, AT, 4);
1208 #else
1209 __ sdc1(src->as_double_reg(), AT, 0);
1210 #endif
1211 } else {
1212 __ add(AT, dest_reg, disp_reg);
1213 #ifndef _LP64
1214 __ swc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1215 __ swc1(src->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1216 #else
1217 __ sdc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1218 #endif
1219 }
1220 break;
1222 case T_FLOAT:
1223 if (disp_reg == noreg) {
1224 __ swc1(src->as_float_reg(), dest_reg, disp_value);
1225 } else if(needs_patching) {
1226 __ add(AT, dest_reg, disp_reg);
1227 __ swc1(src->as_float_reg(), AT, 0);
1228 } else {
1229 __ add(AT, dest_reg, disp_reg);
1230 __ swc1(src->as_float_reg(), AT, Assembler::split_low(disp_value));
1231 }
1232 break;
1234 case T_LONG: {
1235 Register from_lo = src->as_register_lo();
1236 Register from_hi = src->as_register_hi();
1237 #ifdef _LP64
1238 if (needs_patching) {
1239 __ add(AT, dest_reg, disp_reg);
1240 __ st_ptr(from_lo, AT, 0);
1241 } else {
1242 __ st_ptr(from_lo, as_Address_lo(to_addr));
1243 }
1244 #else
1245 Register base = to_addr->base()->as_register();
1246 Register index = noreg;
1247 if (to_addr->index()->is_register()) {
1248 index = to_addr->index()->as_register();
1249 }
1250 if (base == from_lo || index == from_lo) {
1251 assert(base != from_hi, "can't be");
1252 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1253 if (needs_patching) {
1254 __ add(AT, dest_reg, disp_reg);
1255 NOT_LP64(__ st_ptr(from_hi, AT, longSize/2);)
1256 __ st_ptr(from_lo, AT, 0);
1257 } else {
1258 __ st_ptr(from_hi, as_Address_hi(to_addr));
1259 __ st_ptr(from_lo, as_Address_lo(to_addr));
1260 }
1261 } else {
1262 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1263 if (needs_patching) {
1264 __ add(AT, dest_reg, disp_reg);
1265 __ st_ptr(from_lo, AT, 0);
1266 __ st_ptr(from_hi, AT, longSize/2);
1267 } else {
1268 __ st_ptr(from_lo, as_Address_lo(to_addr));
1269 __ st_ptr(from_hi, as_Address_hi(to_addr));
1270 }
1271 }
1272 #endif
1273 break;
1274 }
1275 case T_ARRAY:
1276 case T_OBJECT:
1277 #ifdef _LP64
1278 if (UseCompressedOops && !wide) {
1279 if (disp_reg == noreg) {
1280 __ sw(compressed_src, dest_reg, disp_value);
1281 } else if (needs_patching) {
1282 __ add(AT, dest_reg, disp_reg);
1283 __ sw(compressed_src, AT, 0);
1284 } else {
1285 __ add(AT, dest_reg, disp_reg);
1286 __ sw(compressed_src, AT, Assembler::split_low(disp_value));
1287 }
1288 } else {
1289 if (disp_reg == noreg) {
1290 __ st_ptr(src->as_register(), dest_reg, disp_value);
1291 } else if (needs_patching) {
1292 __ add(AT, dest_reg, disp_reg);
1293 __ st_ptr(src->as_register(), AT, 0);
1294 } else {
1295 __ add(AT, dest_reg, disp_reg);
1296 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1297 }
1298 }
1299 break;
1300 #endif
1301 case T_ADDRESS:
1302 #ifdef _LP64
1303 if (disp_reg == noreg) {
1304 __ st_ptr(src->as_register(), dest_reg, disp_value);
1305 } else if (needs_patching) {
1306 __ add(AT, dest_reg, disp_reg);
1307 __ st_ptr(src->as_register(), AT, 0);
1308 } else {
1309 __ add(AT, dest_reg, disp_reg);
1310 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1311 }
1312 break;
1313 #endif
1314 case T_INT:
1315 if (disp_reg == noreg) {
1316 __ sw(src->as_register(), dest_reg, disp_value);
1317 } else if (needs_patching) {
1318 __ add(AT, dest_reg, disp_reg);
1319 __ sw(src->as_register(), AT, 0);
1320 } else {
1321 __ add(AT, dest_reg, disp_reg);
1322 __ sw(src->as_register(), AT, Assembler::split_low(disp_value));
1323 }
1324 break;
1326 case T_CHAR:
1327 case T_SHORT:
1328 if (disp_reg == noreg) {
1329 __ sh(src->as_register(), dest_reg, disp_value);
1330 } else if (needs_patching) {
1331 __ add(AT, dest_reg, disp_reg);
1332 __ sh(src->as_register(), AT, 0);
1333 } else {
1334 __ add(AT, dest_reg, disp_reg);
1335 __ sh(src->as_register(), AT, Assembler::split_low(disp_value));
1336 }
1337 break;
1339 case T_BYTE:
1340 case T_BOOLEAN:
1341 assert(src->is_single_cpu(), "just check");
1343 if (disp_reg == noreg) {
1344 __ sb(src->as_register(), dest_reg, disp_value);
1345 } else if (needs_patching) {
1346 __ add(AT, dest_reg, disp_reg);
1347 __ sb(src->as_register(), AT, 0);
1348 } else {
1349 __ add(AT, dest_reg, disp_reg);
1350 __ sb(src->as_register(), AT, Assembler::split_low(disp_value));
1351 }
1352 break;
1354 default:
1355 ShouldNotReachHere();
1356 }
1359 if (needs_patching) {
1360 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1361 }
1362 }
1366 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1367 assert(src->is_stack(), "should not call otherwise");
1368 assert(dest->is_register(), "should not call otherwise");
1369 if (dest->is_single_cpu()) {
1370 #ifdef _LP64
1371 if (type == T_INT)
1372 __ lw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1373 else
1374 #endif
1375 __ ld_ptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1376 if (type == T_ARRAY || type == T_OBJECT) {
1377 __ verify_oop(dest->as_register());
1378 }
1379 } else if (dest->is_double_cpu()) {
1380 #ifdef _LP64
1381 /* java.util.concurrent.locks.ReentrantReadWriteLock$Sync::tryAcquire
1383 88 move [stack:2|L] [a5a5|J]
1384 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1385 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1386 0x000000556197af8c: ld a5, 0x50(sp)
1387 */
1388 Address src_addr_LO;
1389 if (src->is_single_stack())
1390 src_addr_LO = frame_map()->address_for_slot(src->single_stack_ix(),lo_word_offset_in_bytes);
1391 else if (src->is_double_stack())
1392 src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1393 else
1394 ShouldNotReachHere();
1395 #else
1396 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1397 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1398 #endif
1399 #ifdef _LP64
1400 if (src->type() == T_INT)
1401 __ lw(dest->as_register_lo(), src_addr_LO);
1402 else
1403 #endif
1404 __ ld_ptr(dest->as_register_lo(), src_addr_LO);
1405 NOT_LP64(__ ld_ptr(dest->as_register_hi(), src_addr_HI));
1406 }else if (dest->is_single_fpu()) {
1407 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1408 __ lwc1(dest->as_float_reg(), addr);
1409 } else if (dest->is_double_fpu()) {
1410 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1411 #ifndef _LP64
1412 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1413 __ lwc1(dest->as_double_reg(), src_addr_LO);
1414 __ lwc1(dest->as_double_reg()+1, src_addr_HI);
1415 #else
1416 __ ldc1(dest->as_double_reg(), src_addr_LO);
1417 #endif
1418 } else {
1419 ShouldNotReachHere();
1420 /*
1421 assert(dest->is_single_cpu(), "cannot be anything else but a single cpu");
1422 assert(type!= T_ILLEGAL, "Bad type in stack2reg")
1423 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1424 __ lw(dest->as_register(), addr);
1425 */
1426 }
1427 }
1429 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1430 if (src->is_single_stack()) {
1431 /*
1432 * 2012/5/23 Jin: YozoOffice(-Xcomp) corrupts in "New File -> word"
1433 *
1434 * [b.q.e.a.z::bw()]
1435 * move [stack:15|L] [stack:17|L]
1436 * 0x00000055584e7cf4: lw at, 0x78(sp) <--- error!
1437 * 0x00000055584e7cf8: sw at, 0x88(sp)
1438 */
1439 if (type == T_OBJECT )
1440 {
1441 __ ld(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1442 __ sd(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1443 }
1444 else
1445 {
1446 __ lw(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1447 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1448 }
1449 } else if (src->is_double_stack()) {
1450 #ifndef _LP64
1451 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1452 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1453 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix(),4));
1454 __ sw(AT, frame_map()->address_for_slot(dest ->double_stack_ix(),4));
1455 #else
1456 __ ld_ptr(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1457 __ st_ptr(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1458 #endif
1459 } else {
1460 ShouldNotReachHere();
1461 }
1462 }
1464 // if patching needed, be sure the instruction at offset is a MoveMemReg
1465 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool) {
1466 assert(src->is_address(), "should not call otherwise");
1467 assert(dest->is_register(), "should not call otherwise");
1468 LIR_Address* addr = src->as_address_ptr();
1469 //Address from_addr = as_Address(addr);
1471 //Register src_reg = addr->base()->as_register();
1472 // FIXME aoqi
1473 Register src_reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
1474 Register disp_reg = noreg;
1475 int disp_value = addr->disp();
1476 bool needs_patching = (patch_code != lir_patch_none);
1478 PatchingStub* patch = NULL;
1479 if (needs_patching) {
1480 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1481 }
1483 // we must use lui&addiu,
1484 if (needs_patching) {
1485 disp_reg = AT;
1486 __ lui(AT, Assembler::split_high(disp_value));
1487 __ addiu(AT, AT, Assembler::split_low(disp_value));
1488 } else if (!Assembler::is_simm16(disp_value)) {
1489 disp_reg = AT;
1490 __ lui(AT, Assembler::split_high(disp_value));
1491 }
1493 // remember the offset of the load. The patching_epilog must be done
1494 // before the call to add_debug_info, otherwise the PcDescs don't get
1495 // entered in increasing order.
1496 int offset = code_offset();
1498 switch(type) {
1499 case T_BOOLEAN:
1500 case T_BYTE: {
1501 //assert(to_reg.is_word(), "just check");
1502 if (disp_reg == noreg) {
1503 __ lb(dest->as_register(), src_reg, disp_value);
1504 } else if (needs_patching) {
1505 __ add(AT, src_reg, disp_reg);
1506 offset = code_offset();
1507 __ lb(dest->as_register(), AT, 0);
1508 } else {
1509 __ add(AT, src_reg, disp_reg);
1510 offset = code_offset();
1511 __ lb(dest->as_register(), AT, Assembler::split_low(disp_value));
1512 }
1513 }
1514 break;
1516 case T_CHAR: {
1517 //assert(to_reg.is_word(), "just check");
1518 if (disp_reg == noreg) {
1519 __ lhu(dest->as_register(), src_reg, disp_value);
1520 } else if (needs_patching) {
1521 __ add(AT, src_reg, disp_reg);
1522 offset = code_offset();
1523 __ lhu(dest->as_register(), AT, 0);
1524 } else {
1525 __ add(AT, src_reg, disp_reg);
1526 offset = code_offset();
1527 __ lhu(dest->as_register(), AT, Assembler::split_low(disp_value));
1528 }
1529 }
1530 break;
1532 case T_SHORT: {
1533 // assert(to_reg.is_word(), "just check");
1534 if (disp_reg == noreg) {
1535 __ lh(dest->as_register(), src_reg, disp_value);
1536 } else if (needs_patching) {
1537 __ add(AT, src_reg, disp_reg);
1538 offset = code_offset();
1539 __ lh(dest->as_register(), AT, 0);
1540 } else {
1541 __ add(AT, src_reg, disp_reg);
1542 offset = code_offset();
1543 __ lh(dest->as_register(), AT, Assembler::split_low(disp_value));
1544 }
1545 }
1546 break;
1548 case T_OBJECT:
1549 case T_ARRAY:
1550 if (UseCompressedOops && !wide) {
1551 if (disp_reg == noreg) {
1552 __ lw(dest->as_register(), src_reg, disp_value);
1553 } else if (needs_patching) {
1554 __ dadd(AT, src_reg, disp_reg);
1555 offset = code_offset();
1556 __ lw(dest->as_register(), AT, 0);
1557 } else {
1558 __ dadd(AT, src_reg, disp_reg);
1559 offset = code_offset();
1560 __ lw(dest->as_register(), AT, Assembler::split_low(disp_value));
1561 }
1563 } else {
1564 if (disp_reg == noreg) {
1565 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1566 } else if (needs_patching) {
1567 __ dadd(AT, src_reg, disp_reg);
1568 offset = code_offset();
1569 __ ld_ptr(dest->as_register(), AT, 0);
1570 } else {
1571 __ dadd(AT, src_reg, disp_reg);
1572 offset = code_offset();
1573 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1574 }
1575 }
1576 break;
1577 case T_ADDRESS:
1578 if (disp_reg == noreg) {
1579 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1580 } else if (needs_patching) {
1581 __ dadd(AT, src_reg, disp_reg);
1582 offset = code_offset();
1583 __ ld_ptr(dest->as_register(), AT, 0);
1584 } else {
1585 __ dadd(AT, src_reg, disp_reg);
1586 offset = code_offset();
1587 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1588 }
1589 break;
1590 case T_INT: {
1591 //assert(to_reg.is_word(), "just check");
1592 if (disp_reg == noreg) {
1593 __ lw(dest->as_register(), src_reg, disp_value);
1594 } else if (needs_patching) {
1595 __ add(AT, src_reg, disp_reg);
1596 offset = code_offset();
1597 __ lw(dest->as_register(), AT, 0);
1598 } else {
1599 __ add(AT, src_reg, disp_reg);
1600 offset = code_offset();
1601 __ lw(dest->as_register(), AT, Assembler::split_low(disp_value));
1602 }
1603 }
1604 break;
1606 case T_LONG: {
1607 Register to_lo = dest->as_register_lo();
1608 Register to_hi = dest->as_register_hi();
1609 #ifdef _LP64
1610 if (needs_patching) {
1611 __ add(AT, src_reg, disp_reg);
1612 __ ld_ptr(to_lo, AT, 0);
1613 } else {
1614 __ ld_ptr(to_lo, as_Address_lo(addr));
1615 }
1616 #else
1617 Register base = addr->base()->as_register();
1618 Register index = noreg;
1619 if (addr->index()->is_register()) {
1620 index = addr->index()->as_register();
1621 }
1622 if ((base == to_lo && index == to_hi) ||(base == to_hi && index == to_lo)) {
1623 // addresses with 2 registers are only formed as a result of
1624 // array access so this code will never have to deal with
1625 // patches or null checks.
1626 assert(info == NULL && patch == NULL, "must be");
1627 __ lea(to_hi, as_Address(addr));
1628 __ lw(to_lo, Address(to_hi));
1629 __ lw(to_hi, Address(to_hi, BytesPerWord));
1630 } else if (base == to_lo || index == to_lo) {
1631 assert(base != to_hi, "can't be");
1632 assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1633 if (needs_patching) {
1634 __ add(AT, src_reg, disp_reg);
1635 offset = code_offset();
1636 __ lw(to_hi, AT, longSize/2);
1637 __ lw(to_lo, AT, 0);
1638 } else {
1639 __ lw(to_hi, as_Address_hi(addr));
1640 __ lw(to_lo, as_Address_lo(addr));
1641 }
1642 } else {
1643 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1644 if (needs_patching) {
1645 __ add(AT, src_reg, disp_reg);
1646 offset = code_offset();
1647 __ lw(to_lo, AT, 0);
1648 __ lw(to_hi, AT, longSize/2);
1649 } else {
1650 __ lw(to_lo, as_Address_lo(addr));
1651 __ lw(to_hi, as_Address_hi(addr));
1652 }
1653 }
1654 #endif
1655 }
1656 break;
1658 case T_FLOAT: {
1659 //assert(to_reg.is_float(), "just check");
1660 if (disp_reg == noreg) {
1661 __ lwc1(dest->as_float_reg(), src_reg, disp_value);
1662 } else if (needs_patching) {
1663 __ add(AT, src_reg, disp_reg);
1664 offset = code_offset();
1665 __ lwc1(dest->as_float_reg(), AT, 0);
1666 } else {
1667 __ add(AT, src_reg, disp_reg);
1668 offset = code_offset();
1669 __ lwc1(dest->as_float_reg(), AT, Assembler::split_low(disp_value));
1670 }
1671 }
1672 break;
1674 case T_DOUBLE: {
1675 //assert(to_reg.is_double(), "just check");
1677 if (disp_reg == noreg) {
1678 #ifndef _LP64
1679 __ lwc1(dest->as_double_reg(), src_reg, disp_value);
1680 __ lwc1(dest->as_double_reg()+1, src_reg, disp_value+4);
1681 #else
1682 __ ldc1(dest->as_double_reg(), src_reg, disp_value);
1683 #endif
1684 } else if (needs_patching) {
1685 __ add(AT, src_reg, disp_reg);
1686 offset = code_offset();
1687 #ifndef _LP64
1688 __ lwc1(dest->as_double_reg(), AT, 0);
1689 __ lwc1(dest->as_double_reg()+1, AT, 4);
1690 #else
1691 __ ldc1(dest->as_double_reg(), AT, 0);
1692 #endif
1693 } else {
1694 __ add(AT, src_reg, disp_reg);
1695 offset = code_offset();
1696 #ifndef _LP64
1697 __ lwc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1698 __ lwc1(dest->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1699 #else
1700 __ ldc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1701 #endif
1702 }
1703 }
1704 break;
1706 default:
1707 ShouldNotReachHere();
1708 }
1710 if (needs_patching) {
1711 patching_epilog(patch, patch_code, src_reg, info);
1712 }
1714 if (type == T_ARRAY || type == T_OBJECT) {
1715 #ifdef _LP64
1716 if (UseCompressedOops && !wide) {
1717 __ decode_heap_oop(dest->as_register());
1718 }
1719 #endif
1720 __ verify_oop(dest->as_register());
1721 }
1722 if (info != NULL) add_debug_info_for_null_check(offset, info);
1723 }
1726 void LIR_Assembler::prefetchr(LIR_Opr src) {
1727 LIR_Address* addr = src->as_address_ptr();
1728 Address from_addr = as_Address(addr);
1729 }
1732 void LIR_Assembler::prefetchw(LIR_Opr src) {
1733 }
1735 NEEDS_CLEANUP; // This could be static?
1736 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1737 int elem_size = type2aelembytes(type);
1738 switch (elem_size) {
1739 case 1: return Address::times_1;
1740 case 2: return Address::times_2;
1741 case 4: return Address::times_4;
1742 case 8: return Address::times_8;
1743 }
1744 ShouldNotReachHere();
1745 return Address::no_scale;
1746 }
1749 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1750 switch (op->code()) {
1751 case lir_frem:
1752 arithmetic_frem(
1753 op->code(),
1754 op->in_opr1(),
1755 op->in_opr2(),
1756 op->in_opr3(),
1757 op->result_opr(),
1758 op->info());
1759 break;
1761 case lir_idiv:
1762 case lir_irem:
1763 arithmetic_idiv(
1764 op->code(),
1765 op->in_opr1(),
1766 op->in_opr2(),
1767 op->in_opr3(),
1768 op->result_opr(),
1769 op->info());
1770 break;
1771 default: ShouldNotReachHere(); break;
1772 }
1773 }
1775 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1776 LIR_Opr opr1 = op->left();
1777 LIR_Opr opr2 = op->right();
1778 LIR_Condition condition = op->cond();
1779 #ifdef ASSERT
1780 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1781 if (op->block() != NULL) _branch_target_blocks.append(op->block());
1782 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1783 #endif
1784 if (op->cond() == lir_cond_always) {
1785 if(op->label()==NULL) //by liaob1
1786 __ b(*op->label());
1787 else
1788 __ b_far(*op->label());
1789 __ delayed()->nop();
1790 return;
1791 }
1792 if (opr1->is_single_cpu()) {
1793 Register reg_op1 = opr1->as_register();
1794 if (opr2->is_single_cpu()) {
1795 #ifdef OPT_RANGECHECK
1796 assert(!op->check(), "just check");
1797 #endif
1798 Register reg_op2 = opr2->as_register();
1799 switch (condition) {
1800 case lir_cond_equal:
1801 __ beq(reg_op1, reg_op2, *op->label());
1802 break;
1803 case lir_cond_notEqual:
1804 if(op->label()==NULL)
1805 __ bne(reg_op1, reg_op2, *op->label());//liaobin1
1806 else
1807 __ bne_far(reg_op1, reg_op2, *op->label());//liaobin1
1808 break;
1809 case lir_cond_less:
1810 // AT = 1 TRUE
1811 __ slt(AT, reg_op1, reg_op2);
1812 __ bne_far(AT, R0, *op->label());
1813 break;
1814 case lir_cond_lessEqual:
1815 // AT = 0 TRUE
1816 __ slt(AT, reg_op2, reg_op1);
1817 __ beq_far(AT, R0, *op->label());
1818 break;
1819 case lir_cond_belowEqual:
1820 // AT = 0 TRUE
1821 __ sltu(AT, reg_op2, reg_op1);
1822 __ beq(AT, R0, *op->label());
1823 break;
1824 case lir_cond_greaterEqual:
1825 // AT = 0 TRUE
1826 __ slt(AT, reg_op1, reg_op2);
1827 __ beq_far(AT, R0, *op->label());
1828 break;
1829 case lir_cond_aboveEqual:
1830 // AT = 0 TRUE
1831 __ sltu(AT, reg_op1, reg_op2);
1832 __ beq_far(AT, R0, *op->label());
1833 break;
1834 case lir_cond_greater:
1835 // AT = 1 TRUE
1836 __ slt(AT, reg_op2, reg_op1);
1837 __ bne_far(AT, R0, *op->label());
1838 break;
1839 default: ShouldNotReachHere();
1840 }
1841 } else if (opr2->is_constant()) {
1842 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
1843 bool is_object = false;
1844 if (opr2->pointer()->as_constant()->type() == T_INT) {
1845 temp_value = (jint)(opr2->as_jint());
1846 } else if (opr2->pointer()->as_constant()->type() == T_LONG) {
1847 temp_value = (jlong)(opr2->as_jlong());
1848 } else if (opr2->pointer()->as_constant()->type() == T_OBJECT) {
1849 is_object = true;
1850 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_jobject());
1851 } else {
1852 ShouldNotReachHere();
1853 }
1855 switch (condition) {
1856 case lir_cond_equal:
1857 #ifdef OPT_RANGECHECK
1858 assert(!op->check(), "just check");
1859 #endif
1860 if (temp_value) {
1861 if (is_object) {
1862 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1863 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1864 __ relocate(rspec);
1865 }
1866 __ li(AT, temp_value);
1867 __ beq_far(reg_op1, AT, *op->label());
1868 } else {
1869 __ beq_far(reg_op1, R0, *op->label());
1870 }
1871 break;
1873 case lir_cond_notEqual:
1874 #ifdef OPT_RANGECHECK
1875 assert(!op->check(), "just check");
1876 #endif
1877 if (temp_value) {
1878 if (is_object) {
1879 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1880 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1881 __ relocate(rspec);
1882 }
1883 __ li(AT, temp_value);
1884 __ bne_far(reg_op1, AT, *op->label());
1885 } else {
1886 __ bne_far(reg_op1, R0, *op->label());
1887 }
1888 break;
1890 case lir_cond_less:
1891 #ifdef OPT_RANGECHECK
1892 assert(!op->check(), "just check");
1893 #endif
1894 // AT = 1 TRUE
1895 if (Assembler::is_simm16(temp_value)) {
1896 __ slti(AT, reg_op1, temp_value);
1897 } else {
1898 __ move(AT, temp_value);
1899 __ slt(AT, reg_op1, AT);
1900 }
1901 __ bne_far(AT, R0, *op->label());
1902 break;
1904 case lir_cond_lessEqual:
1905 #ifdef OPT_RANGECHECK
1906 assert(!op->check(), "just check");
1907 #endif
1908 // AT = 0 TRUE
1909 __ li(AT, temp_value);
1910 __ slt(AT, AT, reg_op1);
1911 __ beq(AT, R0, *op->label());
1912 break;
1914 case lir_cond_belowEqual:
1915 // AT = 0 TRUE
1916 #ifdef OPT_RANGECHECK
1917 if (op->check()) {
1918 __ li(AT, temp_value);
1919 add_debug_info_for_range_check_here(op->info(), temp_value);
1920 __ tgeu(AT, reg_op1, 29);
1921 } else {
1922 #endif
1923 __ li(AT, temp_value);
1924 __ sltu(AT, AT, reg_op1);
1925 __ beq(AT, R0, *op->label());
1926 #ifdef OPT_RANGECHECK
1927 }
1928 #endif
1929 break;
1931 case lir_cond_greaterEqual:
1932 #ifdef OPT_RANGECHECK
1933 assert(!op->check(), "just check");
1934 #endif
1935 // AT = 0 TRUE
1936 if (Assembler::is_simm16(temp_value)) {
1937 __ slti(AT, reg_op1, temp_value);
1938 } else {
1939 __ li(AT, temp_value);
1940 __ slt(AT, reg_op1, AT);
1941 }
1942 __ beq(AT, R0, *op->label());
1943 break;
1945 case lir_cond_aboveEqual:
1946 #ifdef OPT_RANGECHECK
1947 assert(!op->check(), "just check");
1948 #endif
1949 // AT = 0 TRUE
1950 if (Assembler::is_simm16(temp_value)) {
1951 __ sltiu(AT, reg_op1, temp_value);
1952 } else {
1953 __ li(AT, temp_value);
1954 __ sltu(AT, reg_op1, AT);
1955 }
1956 __ beq(AT, R0, *op->label());
1957 break;
1959 case lir_cond_greater:
1960 #ifdef OPT_RANGECHECK
1961 assert(!op->check(), "just check");
1962 #endif
1963 // AT = 1 TRUE
1964 __ li(AT, temp_value);
1965 __ slt(AT, AT, reg_op1);
1966 __ bne_far(AT, R0, *op->label());
1967 break;
1969 default: ShouldNotReachHere();
1970 }
1972 } else {
1973 if (opr2->is_address()) {
1974 //FIXME. aoqi lw or ld_ptr?
1975 if (op->type() == T_INT)
1976 __ lw(AT, as_Address(opr2->pointer()->as_address()));
1977 else
1978 __ ld_ptr(AT, as_Address(opr2->pointer()->as_address()));
1979 } else if (opr2->is_stack()) {
1980 //FIXME. aoqi
1981 __ ld_ptr(AT, frame_map()->address_for_slot(opr2->single_stack_ix()));
1982 } else {
1983 ShouldNotReachHere();
1984 }
1985 switch (condition) {
1986 case lir_cond_equal:
1987 #ifdef OPT_RANGECHECK
1988 assert(!op->check(), "just check");
1989 #endif
1990 __ beq(reg_op1, AT, *op->label());
1991 break;
1992 case lir_cond_notEqual:
1993 #ifdef OPT_RANGECHECK
1994 assert(!op->check(), "just check");
1995 #endif
1996 __ bne_far(reg_op1, AT, *op->label());
1997 break;
1998 case lir_cond_less:
1999 #ifdef OPT_RANGECHECK
2000 assert(!op->check(), "just check");
2001 #endif
2002 // AT = 1 TRUE
2003 __ slt(AT, reg_op1, AT);
2004 __ bne_far(AT, R0, *op->label());
2005 break;
2006 case lir_cond_lessEqual:
2007 #ifdef OPT_RANGECHECK
2008 assert(!op->check(), "just check");
2009 #endif
2010 // AT = 0 TRUE
2011 __ slt(AT, AT, reg_op1);
2012 __ beq(AT, R0, *op->label());
2013 break;
2014 case lir_cond_belowEqual:
2015 #ifdef OPT_RANGECHECK
2016 assert(!op->check(), "just check");
2017 #endif
2018 // AT = 0 TRUE
2019 __ sltu(AT, AT, reg_op1);
2020 __ beq(AT, R0, *op->label());
2021 break;
2022 case lir_cond_greaterEqual:
2023 #ifdef OPT_RANGECHECK
2024 assert(!op->check(), "just check");
2025 #endif
2026 // AT = 0 TRUE
2027 __ slt(AT, reg_op1, AT);
2028 __ beq(AT, R0, *op->label());
2029 break;
2030 case lir_cond_aboveEqual:
2031 // AT = 0 TRUE
2032 #ifdef OPT_RANGECHECK
2033 if (op->check()) {
2034 add_debug_info_for_range_check_here(op->info(), opr1->rinfo());
2035 __ tgeu(reg_op1, AT, 29);
2036 } else {
2037 #endif
2038 __ sltu(AT, reg_op1, AT);
2039 __ beq_far(AT, R0, *op->label());
2040 #ifdef OPT_RANGECHECK
2041 }
2042 #endif
2043 break;
2044 case lir_cond_greater:
2045 #ifdef OPT_RANGECHECK
2046 assert(!op->check(), "just check");
2047 #endif
2048 // AT = 1 TRUE
2049 __ slt(AT, AT, reg_op1);
2050 __ bne_far(AT, R0, *op->label());
2051 break;
2052 default: ShouldNotReachHere();
2053 }
2054 }
2055 #ifdef OPT_RANGECHECK
2056 if (!op->check())
2057 #endif
2058 __ delayed()->nop();
2060 } else if(opr1->is_address() || opr1->is_stack()) {
2061 #ifdef OPT_RANGECHECK
2062 assert(!op->check(), "just check");
2063 #endif
2064 if (opr2->is_constant()) {
2065 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
2066 if (opr2->as_constant_ptr()->type() == T_INT) {
2067 temp_value = (jint)opr2->as_constant_ptr()->as_jint();
2068 } else if (opr2->as_constant_ptr()->type() == T_OBJECT) {
2069 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_constant_ptr()->as_jobject());
2070 } else {
2071 ShouldNotReachHere();
2072 }
2074 if (Assembler::is_simm16(temp_value)) {
2075 if (opr1->is_address()) {
2076 __ lw(AT, as_Address(opr1->pointer()->as_address()));
2077 } else {
2078 __ lw(AT, frame_map()->address_for_slot(opr1->single_stack_ix()));
2079 }
2081 switch(condition) {
2083 case lir_cond_equal:
2084 __ addi(AT, AT, -(int)temp_value);
2085 __ beq(AT, R0, *op->label());
2086 break;
2087 case lir_cond_notEqual:
2088 __ addi(AT, AT, -(int)temp_value);
2089 __ bne_far(AT, R0, *op->label());
2090 break;
2091 case lir_cond_less:
2092 // AT = 1 TRUE
2093 __ slti(AT, AT, temp_value);
2094 __ bne_far(AT, R0, *op->label());
2095 break;
2096 case lir_cond_lessEqual:
2097 // AT = 0 TRUE
2098 __ addi(AT, AT, -temp_value);
2099 __ slt(AT, R0, AT);
2100 __ beq(AT, R0, *op->label());
2101 break;
2102 case lir_cond_belowEqual:
2103 // AT = 0 TRUE
2104 __ addiu(AT, AT, -temp_value);
2105 __ sltu(AT, R0, AT);
2106 __ beq(AT, R0, *op->label());
2107 break;
2108 case lir_cond_greaterEqual:
2109 // AT = 0 TRUE
2110 __ slti(AT, AT, temp_value);
2111 __ beq(AT, R0, *op->label());
2112 break;
2113 case lir_cond_aboveEqual:
2114 // AT = 0 TRUE
2115 __ sltiu(AT, AT, temp_value);
2116 __ beq(AT, R0, *op->label());
2117 break;
2118 case lir_cond_greater:
2119 // AT = 1 TRUE
2120 __ addi(AT, AT, -temp_value);
2121 __ slt(AT, R0, AT);
2122 __ bne_far(AT, R0, *op->label());
2123 break;
2125 default:
2126 Unimplemented();
2127 }
2128 } else {
2129 Unimplemented();
2130 }
2131 } else {
2132 Unimplemented();
2133 }
2134 __ delayed()->nop();
2136 } else if(opr1->is_double_cpu()) {
2137 #ifdef OPT_RANGECHECK
2138 assert(!op->check(), "just check");
2139 #endif
2140 Register opr1_lo = opr1->as_register_lo();
2141 Register opr1_hi = opr1->as_register_hi();
2143 if (opr2->is_double_cpu()) {
2144 Register opr2_lo = opr2->as_register_lo();
2145 Register opr2_hi = opr2->as_register_hi();
2146 switch (condition) {
2147 case lir_cond_equal: {
2148 Label L;
2149 #ifndef _LP64
2150 __ bne(opr1_lo, opr2_lo, L);
2151 __ delayed()->nop();
2152 __ beq(opr1_hi, opr2_hi, *op->label());
2153 #else
2154 /* static jobject java.lang.Long.toString(jlong)
2156 10 move [t0t0|J] [a4a4|J]
2157 12 move [lng:-9223372036854775808|J] [a6a6|J]
2158 14 branch [EQ] [a4a4|J] [a6a6|J] [B1]
2159 0x000000555e8532e4: bne a4, a6, 0x000000555e8532e4 <-- error
2160 0x000000555e8532e8: sll zero, zero, 0
2161 */
2162 // __ beq(opr1_lo, opr2_lo, *op->label());
2163 __ beq(opr1_lo, opr2_lo, *op->label());
2164 #endif
2165 __ delayed()->nop();
2166 __ bind(L);
2167 }
2168 break;
2170 case lir_cond_notEqual:
2171 if (op->label()==NULL)
2172 __ bne(opr1_lo, opr2_lo, *op->label());//by liaobin2
2173 else
2174 __ bne_far(opr1_lo, opr2_lo, *op->label());//by liaobin2
2175 __ delayed()->nop();
2176 if (op->label()==NULL)
2177 NOT_LP64(__ bne(opr1_hi, opr2_hi, *op->label()));//by liaobin3
2178 else
2179 NOT_LP64(__ bne_far(opr1_hi, opr2_hi, *op->label()));//by liaobin3
2180 NOT_LP64(__ delayed()->nop());
2181 break;
2183 case lir_cond_less: {
2184 #ifdef _LP64
2185 __ slt(AT, opr1_lo, opr2_lo);
2186 __ bne_far(AT, R0, *op->label());
2187 __ delayed()->nop();
2188 #else
2189 Label L;
2191 // if hi less then jump
2192 __ slt(AT, opr1_hi, opr2_hi);
2193 __ bne(AT, R0, *op->label());
2194 __ delayed()->nop();
2196 // if hi great then fail
2197 __ bne(opr1_hi, opr2_hi, L);
2198 __ delayed();
2200 // now just comp lo as unsigned
2201 __ sltu(AT, opr1_lo, opr2_lo);
2202 __ bne_far(AT, R0, *op->label());
2203 __ delayed()->nop();
2205 __ bind(L);
2206 #endif
2207 }
2208 break;
2210 case lir_cond_lessEqual: {
2211 #ifdef _LP64
2212 __ slt(AT, opr2_lo, opr1_lo);
2213 __ beq_far(AT, R0, *op->label());
2214 __ delayed()->nop();
2215 #else
2216 Label L;
2218 // if hi great then fail
2219 __ slt(AT, opr2_hi, opr1_hi);
2220 __ bne(AT, R0, L);
2221 __ delayed()->nop();
2223 // if hi less then jump
2224 if(op->label()==NULL)
2225 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin4
2226 else
2227 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin4
2228 __ delayed();
2230 // now just comp lo as unsigned
2231 __ sltu(AT, opr2_lo, opr1_lo);
2232 __ beq(AT, R0, *op->label());
2233 __ delayed()->nop();
2235 __ bind(L);
2236 #endif
2237 }
2238 break;
2240 case lir_cond_belowEqual: {
2241 #ifdef _LP64
2242 __ sltu(AT, opr2_lo, opr1_lo);
2243 __ beq(AT, R0, *op->label());
2244 __ delayed()->nop();
2245 #else
2246 Label L;
2248 // if hi great then fail
2249 __ sltu(AT, opr2_hi, opr1_hi);
2250 __ bne_far(AT, R0, L);
2251 __ delayed()->nop();
2253 // if hi less then jump
2254 if(op->label()==NULL)
2255 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin5
2256 else
2257 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin5
2258 __ delayed();
2260 // now just comp lo as unsigned
2261 __ sltu(AT, opr2_lo, opr1_lo);
2262 __ beq(AT, R0, *op->label());
2263 __ delayed()->nop();
2265 __ bind(L);
2266 #endif
2267 }
2268 break;
2270 case lir_cond_greaterEqual: {
2271 #ifdef _LP64
2272 __ slt(AT, opr1_lo, opr2_lo);
2273 __ beq_far(AT, R0, *op->label());
2274 __ delayed()->nop();
2275 #else
2276 Label L;
2278 // if hi less then fail
2279 __ slt(AT, opr1_hi, opr2_hi);
2280 __ bne_far(AT, R0, L);
2281 __ delayed()->nop();
2283 // if hi great then jump
2284 if(op->label()==NULL)
2285 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin6
2286 else
2287 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin6
2288 __ delayed();
2290 // now just comp lo as unsigned
2291 __ sltu(AT, opr1_lo, opr2_lo);
2292 __ beq(AT, R0, *op->label());
2293 __ delayed()->nop();
2295 __ bind(L);
2296 #endif
2297 }
2298 break;
2300 case lir_cond_aboveEqual: {
2301 #ifdef _LP64
2302 __ sltu(AT, opr1_lo, opr2_lo);
2303 __ beq_far(AT, R0, *op->label());
2304 __ delayed()->nop();
2305 #else
2306 Label L;
2308 // if hi less then fail
2309 __ sltu(AT, opr1_hi, opr2_hi);
2310 __ bne(AT, R0, L);
2311 __ delayed()->nop();
2313 // if hi great then jump
2314 if(op->label()==NULL)
2315 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin7
2316 else
2317 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin7
2318 __ delayed();
2320 // now just comp lo as unsigned
2321 __ sltu(AT, opr1_lo, opr2_lo);
2322 __ beq(AT, R0, *op->label());
2323 __ delayed()->nop();
2325 __ bind(L);
2326 #endif
2327 }
2328 break;
2330 case lir_cond_greater: {
2331 #ifdef _LP64
2332 __ slt(AT, opr2_lo, opr1_lo);
2333 __ bne_far(AT, R0, *op->label());
2334 __ delayed()->nop();
2335 #else
2336 Label L;
2338 // if hi great then jump
2339 __ slt(AT, opr2_hi, opr1_hi);
2340 __ bne(AT, R0, *op->label());
2341 __ delayed()->nop();
2343 // if hi less then fail
2344 __ bne(opr2_hi, opr1_hi, L);
2345 __ delayed();
2347 // now just comp lo as unsigned
2348 __ sltu(AT, opr2_lo, opr1_lo);
2349 __ bne(AT, R0, *op->label());
2350 __ delayed()->nop();
2352 __ bind(L);
2353 #endif
2354 }
2355 break;
2357 default: ShouldNotReachHere();
2358 }
2360 } else if(opr2->is_constant()) {
2361 jlong lv = opr2->as_jlong();
2362 #ifndef _LP64
2363 jint iv_lo = (jint)lv;
2364 jint iv_hi = (jint)(lv>>32);
2365 bool is_zero = (lv==0);
2366 #endif
2368 switch (condition) {
2369 case lir_cond_equal:
2370 #ifdef _LP64
2371 __ li(T8, lv);
2372 __ beq(opr1_lo, T8, *op->label());
2373 __ delayed()->nop();
2374 #else
2375 if (is_zero) {
2376 __ orr(AT, opr1_lo, opr1_hi);
2377 __ beq(AT, R0, *op->label());
2378 __ delayed()->nop();
2379 } else {
2380 Label L;
2381 __ move(T8, iv_lo);
2382 __ bne(opr1_lo, T8, L);
2383 __ delayed();
2384 __ move(T8, iv_hi);
2385 __ beq(opr1_hi, T8, *op->label());
2386 __ delayed()->nop();
2387 __ bind(L);
2388 }
2389 #endif
2390 break;
2392 case lir_cond_notEqual:
2393 #ifdef _LP64
2394 __ li(T8, lv);
2395 __ bne(opr1_lo, T8, *op->label());
2396 __ delayed()->nop();
2397 #else
2398 if (is_zero) {
2399 __ orr(AT, opr1_lo, opr1_hi);
2400 __ bne(AT, R0, *op->label());
2401 __ delayed()->nop();
2402 } else {
2403 __ move(T8, iv_lo);
2404 __ bne(opr1_lo, T8, *op->label());
2405 __ delayed();
2406 __ move(T8, iv_hi);
2407 __ bne(opr1_hi, T8, *op->label());
2408 __ delayed()->nop();
2409 }
2410 #endif
2411 break;
2413 case lir_cond_less:
2414 #ifdef _LP64
2415 __ li(T8, lv);
2416 __ slt(AT, opr1_lo, T8);
2417 __ bne_far(AT, R0, *op->label());
2418 __ delayed()->nop();
2419 #else
2420 if (is_zero) {
2421 __ bltz(opr1_hi, *op->label());
2422 __ bltz(opr1_lo, *op->label());
2423 __ delayed()->nop();
2424 } else {
2425 Label L;
2427 // if hi less then jump
2428 __ move(T8, iv_hi);
2429 __ slt(AT, opr1_hi, T8);
2430 __ bne_far(AT, R0, *op->label());
2431 __ delayed()->nop();
2433 // if hi great then fail
2434 __ bne(opr1_hi, T8, L);
2435 __ delayed();
2437 // now just comp lo as unsigned
2438 if (Assembler::is_simm16(iv_lo)) {
2439 __ sltiu(AT, opr1_lo, iv_lo);
2440 } else {
2441 __ move(T8, iv_lo);
2442 __ sltu(AT, opr1_lo, T8);
2443 }
2444 __ bne(AT, R0, *op->label());
2445 __ delayed()->nop();
2447 __ bind(L);
2448 }
2449 #endif
2450 break;
2452 case lir_cond_lessEqual:
2453 #ifdef _LP64
2454 __ li(T8, lv);
2455 __ slt(AT, T8, opr1_lo);
2456 __ beq(AT, R0, *op->label());
2457 __ delayed()->nop();
2458 #else
2459 if (is_zero) {
2460 __ bltz(opr1_hi, *op->label());
2461 __ delayed()->nop();
2462 __ orr(AT, opr1_hi, opr1_lo);
2463 __ beq(AT, R0, *op->label());
2464 __ delayed();
2465 } else {
2466 Label L;
2468 // if hi great then fail
2469 __ move(T8, iv_hi);
2470 __ slt(AT, T8, opr1_hi);
2471 __ bne(AT, R0, L);
2472 __ delayed()->nop();
2474 // if hi less then jump
2475 __ bne(T8, opr1_hi, *op->label());
2476 __ delayed();
2478 // now just comp lo as unsigned
2479 __ move(T8, iv_lo);
2480 __ sltu(AT, T8, opr1_lo);
2481 __ beq(AT, R0, *op->label());
2482 __ delayed()->nop();
2484 __ bind(L);
2485 }
2486 #endif
2487 break;
2489 case lir_cond_belowEqual:
2490 #ifdef _LP64
2491 __ li(T8, lv);
2492 __ sltu(AT, T8, opr1_lo);
2493 __ beq(AT, R0, *op->label());
2494 __ delayed()->nop();
2495 #else
2496 if (is_zero) {
2497 __ orr(AT, opr1_hi, opr1_lo);
2498 __ beq(AT, R0, *op->label());
2499 __ delayed()->nop();
2500 } else {
2501 Label L;
2503 // if hi great then fail
2504 __ move(T8, iv_hi);
2505 __ sltu(AT, T8, opr1_hi);
2506 __ bne(AT, R0, L);
2507 __ delayed()->nop();
2509 // if hi less then jump
2510 __ bne(T8, opr1_hi, *op->label());
2511 __ delayed();
2513 // now just comp lo as unsigned
2514 __ move(T8, iv_lo);
2515 __ sltu(AT, T8, opr1_lo);
2516 __ beq(AT, R0, *op->label());
2517 __ delayed()->nop();
2519 __ bind(L);
2520 }
2521 #endif
2522 break;
2524 case lir_cond_greaterEqual:
2525 #ifdef _LP64
2526 __ li(T8, lv);
2527 __ slt(AT, opr1_lo, T8);
2528 __ beq(AT, R0, *op->label());
2529 __ delayed()->nop();
2530 #else
2531 if (is_zero) {
2532 __ bgez(opr1_hi, *op->label());
2533 __ delayed()->nop();
2534 } else {
2535 Label L;
2537 // if hi less then fail
2538 __ move(T8, iv_hi);
2539 __ slt(AT, opr1_hi, T8);
2540 __ bne(AT, R0, L);
2541 __ delayed()->nop();
2543 // if hi great then jump
2544 __ bne(T8, opr1_hi, *op->label());
2545 __ delayed();
2547 // now just comp lo as unsigned
2548 if (Assembler::is_simm16(iv_lo)) {
2549 __ sltiu(AT, opr1_lo, iv_lo);
2550 } else {
2551 __ move(T8, iv_lo);
2552 __ sltu(AT, opr1_lo, T8);
2553 }
2554 __ beq(AT, R0, *op->label());
2555 __ delayed()->nop();
2557 __ bind(L);
2558 }
2559 #endif
2560 break;
2562 case lir_cond_aboveEqual:
2563 #ifdef _LP64
2564 __ li(T8, lv);
2565 __ sltu(AT, opr1_lo, T8);
2566 __ beq(AT, R0, *op->label());
2567 __ delayed()->nop();
2568 #else
2569 if (is_zero) {
2570 if(op->label()==NULL) //by liaob2
2571 __ b(*op->label());
2572 else
2573 __ b_far(*op->label());
2574 __ delayed()->nop();
2575 } else {
2576 Label L;
2578 // if hi less then fail
2579 __ move(T8, iv_hi);
2580 __ sltu(AT, opr1_hi, T8);
2581 __ bne(AT, R0, L);
2582 __ delayed()->nop();
2584 // if hi great then jump
2585 __ bne(T8, opr1_hi, *op->label());
2586 __ delayed();
2588 // now just comp lo as unsigned
2589 if (Assembler::is_simm16(iv_lo)) {
2590 __ sltiu(AT, opr1_lo, iv_lo);
2591 } else {
2592 __ move(T8, iv_lo);
2593 __ sltu(AT, opr1_lo, T8);
2594 }
2595 __ beq(AT, R0, *op->label());
2596 __ delayed()->nop();
2598 __ bind(L);
2599 }
2600 #endif
2601 break;
2603 case lir_cond_greater:
2604 #ifdef _LP64
2605 __ li(T8, lv);
2606 __ slt(AT, T8, opr1_lo);
2607 __ bne_far(AT, R0, *op->label());
2608 __ delayed()->nop();
2609 #else
2610 if (is_zero) {
2611 Label L;
2612 __ bgtz(opr1_hi, *op->label());
2613 __ delayed()->nop();
2614 __ bne(opr1_hi, R0, L);
2615 __ delayed()->nop();
2616 __ bne(opr1_lo, R0, *op->label());
2617 __ delayed()->nop();
2618 __ bind(L);
2619 } else {
2620 Label L;
2622 // if hi great then jump
2623 __ move(T8, iv_hi);
2624 __ slt(AT, T8, opr1_hi);
2625 __ bne(AT, R0, *op->label());
2626 __ delayed()->nop();
2628 // if hi less then fail
2629 __ bne(T8, opr1_hi, L);
2630 __ delayed();
2632 // now just comp lo as unsigned
2633 __ move(T8, iv_lo);
2634 __ sltu(AT, T8, opr1_lo);
2635 __ bne(AT, R0, *op->label());
2636 __ delayed()->nop();
2638 __ bind(L);
2639 }
2640 #endif
2641 break;
2643 default:
2644 ShouldNotReachHere();
2645 }
2646 } else {
2647 Unimplemented();
2648 }
2649 } else if (opr1->is_single_fpu()) {
2650 #ifdef OPT_RANGECHECK
2651 assert(!op->check(), "just check");
2652 #endif
2653 assert(opr2->is_single_fpu(), "change the code");
2655 FloatRegister reg_op1 = opr1->as_float_reg();
2656 FloatRegister reg_op2 = opr2->as_float_reg();
2657 // bool un_ls
2658 bool un_jump = (op->ublock()->label()==op->label());
2660 Label& L = *op->label();
2662 switch (condition) {
2663 case lir_cond_equal:
2664 if (un_jump)
2665 __ c_ueq_s(reg_op1, reg_op2);
2666 else
2667 __ c_eq_s(reg_op1, reg_op2);
2668 __ bc1t(L);
2670 break;
2672 case lir_cond_notEqual:
2673 if (un_jump)
2674 __ c_eq_s(reg_op1, reg_op2);
2675 else
2676 __ c_ueq_s(reg_op1, reg_op2);
2677 __ bc1f(L);
2679 break;
2681 case lir_cond_less:
2682 if (un_jump)
2683 __ c_ult_s(reg_op1, reg_op2);
2684 else
2685 __ c_olt_s(reg_op1, reg_op2);
2686 __ bc1t(L);
2688 break;
2690 case lir_cond_lessEqual:
2691 case lir_cond_belowEqual:
2692 if (un_jump)
2693 __ c_ule_s(reg_op1, reg_op2);
2694 else
2695 __ c_ole_s(reg_op1, reg_op2);
2696 __ bc1t(L);
2698 break;
2700 case lir_cond_greaterEqual:
2701 case lir_cond_aboveEqual:
2702 if (un_jump)
2703 __ c_olt_s(reg_op1, reg_op2);
2704 else
2705 __ c_ult_s(reg_op1, reg_op2);
2706 __ bc1f(L);
2708 break;
2710 case lir_cond_greater:
2711 if (un_jump)
2712 __ c_ole_s(reg_op1, reg_op2);
2713 else
2714 __ c_ule_s(reg_op1, reg_op2);
2715 __ bc1f(L);
2717 break;
2719 default:
2720 ShouldNotReachHere();
2721 }
2722 __ delayed()->nop();
2723 } else if (opr1->is_double_fpu()) {
2724 #ifdef OPT_RANGECHECK
2725 assert(!op->check(), "just check");
2726 #endif
2727 assert(opr2->is_double_fpu(), "change the code");
2729 FloatRegister reg_op1 = opr1->as_double_reg();
2730 FloatRegister reg_op2 = opr2->as_double_reg();
2731 bool un_jump = (op->ublock()->label()==op->label());
2732 Label& L = *op->label();
2734 switch (condition) {
2735 case lir_cond_equal:
2736 if (un_jump)
2737 __ c_ueq_d(reg_op1, reg_op2);
2738 else
2739 __ c_eq_d(reg_op1, reg_op2);
2740 __ bc1t(L);
2742 break;
2744 case lir_cond_notEqual:
2745 if (un_jump)
2746 __ c_eq_d(reg_op1, reg_op2);
2747 else
2748 __ c_ueq_d(reg_op1, reg_op2);
2749 __ bc1f(L);
2751 break;
2753 case lir_cond_less:
2754 if (un_jump)
2755 __ c_ult_d(reg_op1, reg_op2);
2756 else
2757 __ c_olt_d(reg_op1, reg_op2);
2758 __ bc1t(L);
2760 break;
2762 case lir_cond_lessEqual:
2763 case lir_cond_belowEqual:
2764 if (un_jump)
2765 __ c_ule_d(reg_op1, reg_op2);
2766 else
2767 __ c_ole_d(reg_op1, reg_op2);
2768 __ bc1t(L);
2770 break;
2772 case lir_cond_greaterEqual:
2773 case lir_cond_aboveEqual:
2774 if (un_jump)
2775 __ c_olt_d(reg_op1, reg_op2);
2776 else
2777 __ c_ult_d(reg_op1, reg_op2);
2778 __ bc1f(L);
2780 break;
2782 case lir_cond_greater:
2783 if (un_jump)
2784 __ c_ole_d(reg_op1, reg_op2);
2785 else
2786 __ c_ule_d(reg_op1, reg_op2);
2787 __ bc1f(L);
2789 break;
2791 default:
2792 ShouldNotReachHere();
2793 }
2794 __ delayed()->nop();
2795 } else {
2796 Unimplemented();
2797 }
2798 }
2801 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
2802 LIR_Opr value = op->in_opr();
2803 LIR_Opr src = op->in_opr();
2804 LIR_Opr dest = op->result_opr();
2805 Bytecodes::Code code = op->bytecode();
2807 switch (code) {
2808 case Bytecodes::_i2l:
2809 move_regs(src->as_register(), dest->as_register_lo());
2810 NOT_LP64(__ sra (dest->as_register_hi(), dest->as_register_lo(), 31));
2811 break;
2813 case Bytecodes::_l2i:
2814 #ifndef _LP64
2815 move_regs (src->as_register_lo(), dest->as_register());
2816 #else
2817 __ dsll32(dest->as_register(), src->as_register_lo(), 0);
2818 __ dsra32(dest->as_register(), dest->as_register(), 0);
2819 #endif
2820 break;
2822 case Bytecodes::_i2b:
2823 #ifndef _LP64
2824 move_regs (src->as_register(), dest->as_register());
2825 __ sign_extend_byte(dest->as_register());
2826 #else
2827 __ dsll32(dest->as_register(), src->as_register(), 24);
2828 __ dsra32(dest->as_register(), dest->as_register(), 24);
2829 #endif
2830 break;
2832 case Bytecodes::_i2c:
2833 __ andi(dest->as_register(), src->as_register(), 0xFFFF);
2834 break;
2836 case Bytecodes::_i2s:
2837 #ifndef _LP64
2838 move_regs (src->as_register(), dest->as_register());
2839 __ sign_extend_short(dest->as_register());
2840 #else
2841 __ dsll32(dest->as_register(), src->as_register(), 16);
2842 __ dsra32(dest->as_register(), dest->as_register(), 16);
2843 #endif
2844 break;
2846 case Bytecodes::_f2d:
2847 __ cvt_d_s(dest->as_double_reg(), src->as_float_reg());
2848 break;
2850 case Bytecodes::_d2f:
2851 __ cvt_s_d(dest->as_float_reg(), src->as_double_reg());
2852 break;
2853 case Bytecodes::_i2f: {
2854 FloatRegister df = dest->as_float_reg();
2855 if(src->is_single_cpu()) {
2856 __ mtc1(src->as_register(), df);
2857 __ cvt_s_w(df, df);
2858 } else if (src->is_stack()) {
2859 Address src_addr = src->is_single_stack()
2860 ? frame_map()->address_for_slot(src->single_stack_ix())
2861 : frame_map()->address_for_slot(src->double_stack_ix());
2862 __ lw(AT, src_addr);
2863 __ mtc1(AT, df);
2864 __ cvt_s_w(df, df);
2865 } else {
2866 Unimplemented();
2867 }
2868 break;
2869 }
2870 case Bytecodes::_i2d: {
2871 FloatRegister dd = dest->as_double_reg();
2872 if (src->is_single_cpu()) {
2873 __ mtc1(src->as_register(), dd);
2874 __ cvt_d_w(dd, dd);
2875 } else if (src->is_stack()) {
2876 Address src_addr = src->is_single_stack()
2877 ? frame_map()->address_for_slot(value->single_stack_ix())
2878 : frame_map()->address_for_slot(value->double_stack_ix());
2879 __ lw(AT, src_addr);
2880 __ mtc1(AT, dd);
2881 __ cvt_d_w(dd, dd);
2882 } else {
2883 Unimplemented();
2884 }
2885 break;
2886 }
2887 case Bytecodes::_f2i: {
2888 FloatRegister fval = src->as_float_reg();
2889 Register dreg = dest->as_register();
2891 Label L;
2892 __ c_un_s(fval, fval); //NaN?
2893 __ bc1t(L);
2894 __ delayed();
2895 __ move(dreg, R0);
2897 __ trunc_w_s(F30, fval);
2899 /* Call SharedRuntime:f2i() to do valid convention */
2900 __ cfc1(AT, 31);
2901 __ li(T9, 0x10000);
2902 __ andr(AT, AT, T9);
2903 __ beq(AT, R0, L);
2904 __ delayed()->mfc1(dreg, F30);
2906 __ mov_s(F12, fval);
2907 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2908 __ move(dreg, V0);
2909 __ bind(L);
2910 break;
2911 }
2912 case Bytecodes::_d2i: {
2913 FloatRegister dval = src->as_double_reg();
2914 Register dreg = dest->as_register();
2916 Label L;
2917 #ifndef _LP64
2918 __ c_un_d(dval, dval); //NaN?
2919 __ bc1t(L);
2920 __ delayed();
2921 __ move(dreg, R0);
2922 #endif
2924 __ trunc_w_d(F30, dval);
2925 __ cfc1(AT, 31);
2926 __ li(T9, 0x10000);
2927 __ andr(AT, AT, T9);
2928 __ beq(AT, R0, L);
2929 __ delayed()->mfc1(dreg, F30);
2931 __ mov_d(F12, dval);
2932 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
2933 __ move(dreg, V0);
2934 __ bind(L);
2935 break;
2936 }
2937 case Bytecodes::_l2f: {
2938 FloatRegister ldf = dest->as_float_reg();
2939 if (src->is_double_cpu()) {
2940 #ifndef _LP64
2941 __ mtc1(src->as_register_lo(), ldf);
2942 __ mtc1(src->as_register_hi(), ldf + 1);
2943 __ cvt_s_l(ldf, ldf);
2944 #else
2945 __ dmtc1(src->as_register_lo(), ldf);
2946 __ cvt_s_l(ldf, ldf);
2947 #endif
2948 } else if (src->is_double_stack()) {
2949 Address src_addr=frame_map()->address_for_slot(value->double_stack_ix());
2950 #ifndef _LP64
2951 __ lw(AT, src_addr);
2952 __ mtc1(AT, ldf);
2953 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
2954 __ mtc1(AT, ldf + 1);
2955 __ cvt_s_l(ldf, ldf);
2956 #else
2957 __ ld(AT, src_addr);
2958 __ dmtc1(AT, ldf);
2959 __ cvt_s_l(ldf, ldf);
2960 #endif
2961 } else {
2962 Unimplemented();
2963 }
2964 break;
2965 }
2966 case Bytecodes::_l2d: {
2967 FloatRegister ldd = dest->as_double_reg();
2968 if (src->is_double_cpu()) {
2969 #ifndef _LP64
2970 __ mtc1(src->as_register_lo(), ldd);
2971 __ mtc1(src->as_register_hi(), ldd + 1);
2972 __ cvt_d_l(ldd, ldd);
2973 #else
2974 __ dmtc1(src->as_register_lo(), ldd);
2975 __ cvt_d_l(ldd, ldd);
2976 #endif
2977 } else if (src->is_double_stack()) {
2978 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
2979 #ifndef _LP64
2980 __ lw(AT, src_addr);
2981 __ mtc1(AT, ldd);
2982 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
2983 __ mtc1(AT, ldd + 1);
2984 __ cvt_d_l(ldd, ldd);
2985 #else
2986 __ ld(AT, src_addr);
2987 __ dmtc1(AT, ldd);
2988 __ cvt_d_l(ldd, ldd);
2989 #endif
2990 } else {
2991 Unimplemented();
2992 }
2993 break;
2994 }
2996 case Bytecodes::_f2l: {
2997 FloatRegister fval = src->as_float_reg();
2998 Register dlo = dest->as_register_lo();
2999 Register dhi = dest->as_register_hi();
3001 Label L;
3002 __ move(dhi, R0);
3003 __ c_un_s(fval, fval); //NaN?
3004 __ bc1t(L);
3005 __ delayed();
3006 __ move(dlo, R0);
3008 __ trunc_l_s(F30, fval);
3009 #ifdef _LP64
3010 __ cfc1(AT, 31);
3011 __ li(T9, 0x10000);
3012 __ andr(AT, AT, T9);
3013 __ beq(AT, R0, L);
3014 __ delayed()->dmfc1(dlo, F30);
3016 __ mov_s(F12, fval);
3017 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
3018 __ move(dlo, V0);
3019 #else
3020 __ mfc1(dlo, F30);
3021 #endif
3022 NOT_LP64(__ mfc1(dhi, F31));
3023 __ bind(L);
3024 break;
3025 }
3026 case Bytecodes::_d2l: {
3027 FloatRegister dval = src->as_double_reg();
3028 Register dlo = dest->as_register_lo();
3029 Register dhi = dest->as_register_hi();
3031 Label L;
3032 __ move(dhi, R0);
3033 __ c_un_d(dval, dval); //NaN?
3034 __ bc1t(L);
3035 __ delayed();
3036 __ move(dlo, R0);
3038 __ trunc_l_d(F30, dval);
3039 #ifdef _LP64
3040 __ cfc1(AT, 31);
3041 __ li(T9, 0x10000);
3042 __ andr(AT, AT, T9);
3043 __ beq(AT, R0, L);
3044 __ delayed()->dmfc1(dlo, F30);
3046 __ mov_d(F12, dval);
3047 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
3048 __ move(dlo, V0);
3049 #else
3050 __ mfc1(dlo, F30);
3051 __ mfc1(dhi, F31);
3052 #endif
3053 __ bind(L);
3054 break;
3055 }
3057 default: ShouldNotReachHere();
3058 }
3059 }
3061 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
3062 if (op->init_check()) {
3063 add_debug_info_for_null_check_here(op->stub()->info());
3064 __ lw(AT,Address(op->klass()->as_register(),
3065 InstanceKlass::init_state_offset()));
3066 __ addi(AT, AT, -InstanceKlass::fully_initialized);
3067 __ bne_far(AT, R0,*op->stub()->entry());
3068 __ delayed()->nop();
3069 }
3070 __ allocate_object(
3071 op->obj()->as_register(),
3072 op->tmp1()->as_register(),
3073 op->tmp2()->as_register(),
3074 op->header_size(),
3075 op->object_size(),
3076 op->klass()->as_register(),
3077 *op->stub()->entry());
3079 __ bind(*op->stub()->continuation());
3080 }
3082 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
3083 if (UseSlowPath ||
3084 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
3085 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
3086 __ b_far(*op->stub()->entry());
3087 __ delayed()->nop();
3088 } else {
3089 Register len = op->len()->as_register();
3090 Register tmp1 = op->tmp1()->as_register();
3091 Register tmp2 = op->tmp2()->as_register();
3092 Register tmp3 = op->tmp3()->as_register();
3093 __ allocate_array(op->obj()->as_register(),
3094 len,
3095 tmp1,
3096 tmp2,
3097 tmp3,
3098 arrayOopDesc::header_size(op->type()),
3099 array_element_size(op->type()),
3100 op->klass()->as_register(),
3101 *op->stub()->entry());
3102 }
3103 __ bind(*op->stub()->continuation());
3104 }
3108 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
3109 LIR_Code code = op->code();
3110 // if (code == lir_store_check) {
3111 Register value = op->object()->as_register();
3112 Register array = op->array()->as_register();
3113 Register k_RInfo = op->tmp1()->as_register();
3114 Register klass_RInfo = op->tmp2()->as_register();
3115 Register tmp = op->tmp3()->as_register();
3117 CodeStub* stub = op->stub();
3118 //check if it needs to be profiled
3119 ciMethodData* md;
3120 ciProfileData* data;
3121 if (op->should_profile()) {
3122 ciMethod* method = op->profiled_method();
3123 assert(method != NULL, "Should have method");
3124 int bci = op->profiled_bci();
3125 md = method->method_data_or_null();
3126 assert(md != NULL, "Sanity");
3127 data = md->bci_to_data(bci);
3128 assert(data != NULL, "need data for type check");
3129 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3130 }
3131 Label profile_cast_success, profile_cast_failure, done;
3132 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
3133 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done;
3134 //__ cmpptr(value, (int32_t)NULL_WORD);
3135 if(op->should_profile()) {
3136 Label not_null;
3137 __ bne(value, R0, not_null);
3138 __ delayed()->nop();
3140 // __ jcc(Assembler::notEqual, profile_done);
3141 // __ bne(obj, R0, profile_done);
3142 //__ delayed()->nop();
3144 // Object is null; update methodDataOop
3145 //ciMethodData* md = method->method_data();
3146 //if (md == NULL) {
3147 // bailout("out of memory building methodDataOop");
3148 // return;
3149 // }
3150 // ciProfileData* data = md->bci_to_data(bci);
3151 //assert(data != NULL, "need data for checkcast");
3152 // assert(data->is_BitData(), "need BitData for checkcast");
3153 Register mdo = klass_RInfo;
3154 int oop_index = __ oop_recorder()->find_index(md->constant_encoding());
3155 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3156 __ relocate(rspec);
3157 #ifndef _LP64
3158 //by_css
3159 __ lui(mdo, Assembler::split_high((int)md->constant_encoding()));
3160 __ addiu(mdo, mdo, Assembler::split_low((int)md->consant_encoding()));
3161 #else
3162 __ li48(mdo, (long)md->constant_encoding());
3163 #endif
3165 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3166 //FIXME, it very ineffictive to replace orl with 3 mips instruction @jerome, 12/27,06
3167 //__ orl(data_addr, BitData::null_flag_constant());
3168 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3169 __ lw(AT, data_addr);
3170 __ ori(AT, AT, header_bits);
3171 __ sw(AT,data_addr);
3172 __ b(done);
3173 __ delayed()->nop();
3174 __ bind(not_null);
3175 } else {
3176 __ beq(value, R0, done);
3177 __ delayed()->nop();
3178 }
3179 //__ verify_oop(obj);
3180 add_debug_info_for_null_check_here(op->info_for_exception());
3181 __ load_klass(k_RInfo, array);
3182 __ load_klass(klass_RInfo, value);
3183 // get instance klass (it's already uncompressed)
3184 //__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
3185 __ daddi (k_RInfo, k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()));
3186 // perform the fast part of the checking logic
3187 //__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
3188 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
3189 //1899 __ push(klass_RInfo);
3190 //1900 __ push(k_RInfo);
3191 //1901 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3192 //1902 __ pop(klass_RInfo);
3193 //1903 __ pop(k_RInfo);
3194 //1904 // result is a boolean
3195 ///1905 __ cmpl(k_RInfo, 0);
3196 //1906 __ jcc(Assembler::equal, *failure_target);
3197 //1907 // fall through to the success case
3198 //1908
3199 //1909 if (op->should_profile()) {
3200 //1910 Register mdo = klass_RInfo, recv = k_RInfo;
3201 //1911 __ bind(profile_cast_success);
3202 //1912 __ mov_metadata(mdo, md->constant_encoding());
3203 //1913 __ load_klass(recv, value);
3204 //1914 Label update_done;
3205 //1915 type_profile_helper(mdo, md, data, recv, &done);
3206 //1916 __ jmpb(done);
3207 //1917
3208 //1918 __ bind(profile_cast_failure);
3209 //1919 __ mov_metadata(mdo, md->constant_encoding());
3210 //1920 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3211 //1921 __ subptr(counter_addr, DataLayout::counter_increment);
3212 //1922 __ jmp(*stub->entry());
3213 //1923 }
3214 //1925 __ bind(done);
3215 //1926 } else
3216 //1927 if (code == lir_checkcast) {
3217 //1928 Register obj = op->object()->as_register();
3218 //1929 Register dst = op->result_opr()->as_register();
3219 //1930 Label success;
3220 //1931 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
3221 //1932 __ bind(success);
3222 //1933 if (dst != obj) {
3223 //1934 __ mov(dst, obj);
3224 //1935 }
3225 //1936 } else
3226 //1937 if (code == lir_instanceof) {
3227 //1938 Register obj = op->object()->as_register();
3228 ///1939 Register dst = op->result_opr()->as_register();
3229 //1940 Label success, failure, done;
3230 //1941 emit_typecheck_helper(op, &success, &failure, &failure);
3231 ///1942 __ bind(failure);
3232 //1943 __ xorptr(dst, dst);
3233 //1944 __ jmpb(done);
3234 //1945 __ bind(success);
3235 //1946 __ movptr(dst, 1);
3236 //1947 __ bind(done);
3237 //1948 } else {
3238 //1949 ShouldNotReachHere();
3239 //1950 }
3240 //FIXME:wuhui.
3242 }
3245 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
3246 if (op->code() == lir_cas_long) {
3247 #ifdef _LP64
3248 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
3249 Register newval = (op->new_value()->is_single_cpu() ? op->new_value()->as_register() : op->new_value()->as_register_lo());
3250 Register cmpval = (op->cmp_value()->is_single_cpu() ? op->cmp_value()->as_register() : op->cmp_value()->as_register_lo());
3251 assert(newval != NULL, "new val must be register");
3252 assert(cmpval != newval, "cmp and new values must be in different registers");
3253 assert(cmpval != addr, "cmp and addr must be in different registers");
3254 assert(newval != addr, "new value and addr must be in different registers");
3255 if (os::is_MP()) {}
3256 __ cmpxchg(newval, addr, cmpval); // 64-bit test-and-set
3257 #else
3258 Register addr = op->addr()->as_register();
3259 if (os::is_MP()) {}
3260 __ cmpxchg8(op->new_value()->as_register_lo(),
3261 op->new_value()->as_register_hi(),
3262 addr,
3263 op->cmp_value()->as_register_lo(),
3264 op->cmp_value()->as_register_hi())
3265 #endif
3266 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
3267 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
3268 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
3269 Register newval = op->new_value()->as_register();
3270 Register cmpval = op->cmp_value()->as_register();
3271 assert(newval != NULL, "new val must be register");
3272 assert(cmpval != newval, "cmp and new values must be in different registers");
3273 assert(cmpval != addr, "cmp and addr must be in different registers");
3274 assert(newval != addr, "new value and addr must be in different registers");
3275 if (op->code() == lir_cas_obj) {
3276 #ifdef _LP64
3277 if (UseCompressedOops) {
3278 Register tmp_reg = S7;
3279 __ push(cmpval);
3280 __ encode_heap_oop(cmpval);
3281 __ move(tmp_reg, newval);
3282 __ encode_heap_oop(tmp_reg);
3283 if (os::is_MP()) {}
3284 __ cmpxchg32(tmp_reg, addr, cmpval); // 32-bit test-and-set
3285 __ pop(cmpval);
3286 } else
3287 {
3288 if (os::is_MP()) {}
3289 __ cmpxchg(newval, addr, cmpval); // 64-bit test-and-set
3290 }
3291 } else
3292 #endif
3293 {
3294 __ cmpxchg32(newval, addr, cmpval); // 32-bit test-and-set
3295 }
3296 } else {
3297 Unimplemented();
3298 }
3299 }
3300 #ifndef MIPS64
3301 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
3302 Unimplemented();
3303 }
3304 #endif
3305 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info,bool pop_fpu_stack) {
3306 assert(info == NULL || ((code == lir_rem || code == lir_div || code == lir_sub) && right->is_double_cpu()), "info is only for ldiv/lrem");
3307 if (left->is_single_cpu()) {
3308 // left may not be equal to dest on mips.
3309 //assert(left == dest, "left and dest must be equal");
3310 Register lreg = left->as_register();
3312 if (right->is_cpu_register()) {
3313 // cpu register - cpu register
3314 Register rreg, res;
3315 if (right->is_single_cpu()) {
3316 rreg = right->as_register();
3317 #ifdef _LP64
3318 if(dest->is_double_cpu())
3319 res = dest->as_register_lo();
3320 else
3321 #endif
3322 res = dest->as_register();
3323 } else if (right->is_double_cpu()) {
3324 assert(right->is_double_cpu(),"right must be long");
3325 rreg = right->as_register_lo();
3326 res = dest->as_register_lo();
3327 } else {
3328 ShouldNotReachHere();
3329 }
3330 switch (code) {
3331 case lir_add:
3332 #ifdef _LP64
3333 if (dest->type() == T_INT)
3334 __ addu32(res, lreg, rreg);
3335 else
3336 #endif
3337 __ addu(res, lreg, rreg);
3338 break;
3340 case lir_mul:
3341 #ifndef _LP64
3342 //by aoqi
3343 __ mult(lreg, rreg);
3344 #else
3345 __ dmult(lreg, rreg);
3346 #endif
3347 __ nop();
3348 __ nop();
3349 __ mflo(res);
3350 #ifdef _LP64
3351 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
3352 *
3353 * Example: java.net.URLClassLoader::string2int()
3354 * a6: 0xcafebab
3355 * s0: 16
3356 *
3357 * 104 mul [a6|I] [s0|I] [t0|I]
3358 0x00000055655e3728: dmult a6, s0
3359 0x00000055655e372c: sll zero, zero, 0
3360 0x00000055655e3730: sll zero, zero, 0
3361 0x00000055655e3734: mflo t0 <-- error
3362 *
3363 * t0: 0xFFFFFFFFcafebab0 (Right)
3364 * t0: 0x00000000cafebab0 (Wrong)
3365 */
3366 if (dest->type() == T_INT)
3367 __ sll(res, res, 0);
3368 #endif
3369 break;
3371 case lir_sub:
3372 #ifdef _LP64
3373 if (dest->type() == T_INT)
3374 __ subu32(res, lreg, rreg);
3375 else
3376 #endif
3377 __ subu(res, lreg, rreg);
3378 break;
3380 default:
3381 ShouldNotReachHere();
3382 }
3383 } else if (right->is_stack()) {
3384 // cpu register - stack
3385 Unimplemented();
3386 } else if (right->is_constant()) {
3387 // cpu register - constant
3388 Register res = dest->as_register();
3389 jint c = right->as_constant_ptr()->as_jint();
3391 switch (code) {
3392 case lir_mul_strictfp:
3393 case lir_mul:
3394 __ move(AT, c);
3395 #ifndef _LP64
3396 //by aoqi
3397 __ mult(lreg, AT);
3398 #else
3399 __ dmult(lreg, AT);
3400 #endif
3401 __ nop();
3402 __ nop();
3403 __ mflo(res);
3404 #ifdef _LP64
3405 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
3406 *
3407 * Example: java.net.URLClassLoader::string2int()
3408 * a6: 0xcafebab
3409 * s0: 16
3410 *
3411 * 104 mul [a6|I] [s0|I] [t0|I]
3412 0x00000055655e3728: dmult a6, s0
3413 0x00000055655e372c: sll zero, zero, 0
3414 0x00000055655e3730: sll zero, zero, 0
3415 0x00000055655e3734: mflo t0 <-- error
3416 *
3417 * t0: 0xFFFFFFFFcafebab0 (Right)
3418 * t0: 0x00000000cafebab0 (Wrong)
3419 */
3420 if (dest->type() == T_INT)
3421 __ sll(res, res, 0);
3422 #endif
3423 break;
3425 case lir_add:
3426 if (Assembler::is_simm16(c)) {
3427 __ addiu(res, lreg, c);
3428 } else {
3429 __ move(AT, c);
3430 __ addu(res, lreg, AT);
3431 }
3432 break;
3434 case lir_sub:
3435 if (Assembler::is_simm16(-c)) {
3436 __ addi(res, lreg, -c);
3437 } else {
3438 __ move(AT, c);
3439 __ subu(res, lreg, AT);
3440 }
3441 break;
3443 default:
3444 ShouldNotReachHere();
3445 }
3446 } else {
3447 ShouldNotReachHere();
3448 }
3450 } else if (left->is_double_cpu()) {
3451 Register op1_lo = left->as_register_lo();
3452 Register op1_hi = left->as_register_hi();
3453 Register op2_lo;
3454 Register op2_hi;
3455 Register dst_lo;
3456 Register dst_hi;
3458 if(dest->is_single_cpu())
3459 {
3460 dst_lo = dest->as_register();
3461 }
3462 else
3463 {
3464 #ifdef _LP64
3465 dst_lo = dest->as_register_lo();
3466 #else
3467 dst_lo = dest->as_register_lo();
3468 dst_hi = dest->as_register_hi();
3469 #endif
3470 }
3471 if (right->is_constant()) {
3472 op2_lo = AT;
3473 op2_hi = R0;
3474 #ifndef _LP64
3475 __ li(AT, right->as_constant_ptr()->as_jint());
3476 #else
3477 __ li(AT, right->as_constant_ptr()->as_jlong_bits());
3478 #endif
3479 } else if (right->is_double_cpu()) { // Double cpu
3480 assert(right->is_double_cpu(),"right must be long");
3481 assert(dest->is_double_cpu(), "dest must be long");
3482 op2_lo = right->as_register_lo();
3483 op2_hi = right->as_register_hi();
3484 } else {
3485 #ifdef _LP64
3486 op2_lo = right->as_register();
3487 #else
3488 ShouldNotReachHere();
3489 #endif
3490 }
3492 NOT_LP64(assert_different_registers(op1_lo, op1_hi, op2_lo, op2_hi));
3493 // Jin: Why?
3494 // LP64_ONLY(assert_different_registers(op1_lo, op2_lo));
3496 switch (code) {
3497 case lir_add:
3498 #ifndef _LP64
3499 //by aoqi
3500 __ addu(dst_lo, op1_lo, op2_lo);
3501 __ sltu(AT, dst_lo, op2_lo);
3502 __ addu(dst_hi, op1_hi, op2_hi);
3503 __ addu(dst_hi, dst_hi, AT);
3504 #else
3505 __ addu(dst_lo, op1_lo, op2_lo);
3506 #endif
3507 break;
3509 case lir_sub:
3510 #ifndef _LP64
3511 //by aoqi
3512 __ subu(dst_lo, op1_lo, op2_lo);
3513 __ sltu(AT, op1_lo, dst_lo);
3514 __ subu(dst_hi, op1_hi, op2_hi);
3515 __ subu(dst_hi, dst_hi, AT);
3516 #else
3517 __ subu(dst_lo, op1_lo, op2_lo);
3518 #endif
3519 break;
3521 case lir_mul:
3522 {
3524 #ifndef _LP64
3525 //by aoqi
3526 Label zero, quick, done;
3527 //zero?
3528 __ orr(AT, op2_lo, op1_lo);
3529 __ beq(AT, R0, zero);
3530 __ delayed();
3531 __ move(dst_hi, R0);
3533 //quick?
3534 __ orr(AT, op2_hi, op1_hi);
3535 __ beq(AT, R0, quick);
3536 __ delayed()->nop();
3538 __ multu(op2_lo, op1_hi);
3539 __ nop();
3540 __ nop();
3541 __ mflo(dst_hi);
3542 __ multu(op2_hi, op1_lo);
3543 __ nop();
3544 __ nop();
3545 __ mflo(AT);
3547 __ bind(quick);
3548 __ multu(op2_lo, op1_lo);
3549 __ addu(dst_hi, dst_hi, AT);
3550 __ nop();
3551 __ mflo(dst_lo);
3552 __ mfhi(AT);
3553 __ b(done);
3554 __ delayed()->addu(dst_hi, dst_hi, AT);
3556 __ bind(zero);
3557 __ move(dst_lo, R0);
3558 __ bind(done);
3559 #else
3560 Label zero, done;
3561 //zero?
3562 __ orr(AT, op2_lo, op1_lo);
3563 __ beq(AT, R0, zero);
3564 __ delayed();
3565 __ move(dst_hi, R0);
3567 #ifdef ASSERT
3568 //op1_hi, op2_hi should be 0
3569 {
3570 Label L;
3571 __ beq(op1_hi, R0, L);
3572 __ delayed()->nop();
3573 __ stop("wrong register, lir_mul");
3574 __ bind(L);
3575 }
3576 {
3577 Label L;
3578 __ beq(op2_hi, R0, L);
3579 __ delayed()->nop();
3580 __ stop("wrong register, lir_mul");
3581 __ bind(L);
3582 }
3583 #endif
3585 __ multu(op2_lo, op1_lo);
3586 __ nop();
3587 __ nop();
3588 __ mflo(dst_lo);
3589 __ b(done);
3590 __ delayed()->nop();
3592 __ bind(zero);
3593 __ move(dst_lo, R0);
3594 __ bind(done);
3595 #endif //_LP64
3596 }
3597 break;
3599 default:
3600 ShouldNotReachHere();
3601 }
3604 } else if (left->is_single_fpu()) {
3605 assert(right->is_single_fpu(),"right must be float");
3606 assert(dest->is_single_fpu(), "dest must be float");
3608 FloatRegister lreg = left->as_float_reg();
3609 FloatRegister rreg = right->as_float_reg();
3610 FloatRegister res = dest->as_float_reg();
3612 switch (code) {
3613 case lir_add:
3614 __ add_s(res, lreg, rreg);
3615 break;
3616 case lir_sub:
3617 __ sub_s(res, lreg, rreg);
3618 break;
3619 case lir_mul:
3620 case lir_mul_strictfp:
3621 // i dont think we need special handling of this. FIXME
3622 __ mul_s(res, lreg, rreg);
3623 break;
3624 case lir_div:
3625 case lir_div_strictfp:
3626 __ div_s(res, lreg, rreg);
3627 break;
3628 default : ShouldNotReachHere();
3629 }
3630 } else if (left->is_double_fpu()) {
3631 assert(right->is_double_fpu(),"right must be double");
3632 assert(dest->is_double_fpu(), "dest must be double");
3634 FloatRegister lreg = left->as_double_reg();
3635 FloatRegister rreg = right->as_double_reg();
3636 FloatRegister res = dest->as_double_reg();
3638 switch (code) {
3639 case lir_add:
3640 __ add_d(res, lreg, rreg);
3641 break;
3642 case lir_sub:
3643 __ sub_d(res, lreg, rreg);
3644 break;
3645 case lir_mul:
3646 case lir_mul_strictfp:
3647 // i dont think we need special handling of this. FIXME
3648 // by yjl 9/13/2005
3649 __ mul_d(res, lreg, rreg);
3650 break;
3651 case lir_div:
3652 case lir_div_strictfp:
3653 __ div_d(res, lreg, rreg);
3654 break;
3655 // case lir_rem:
3656 // __ rem_d(res, lreg, rreg);
3657 // break;
3658 default : ShouldNotReachHere();
3659 }
3660 }
3661 else if (left->is_single_stack() || left->is_address()) {
3662 assert(left == dest, "left and dest must be equal");
3664 Address laddr;
3665 if (left->is_single_stack()) {
3666 laddr = frame_map()->address_for_slot(left->single_stack_ix());
3667 } else if (left->is_address()) {
3668 laddr = as_Address(left->as_address_ptr());
3669 } else {
3670 ShouldNotReachHere();
3671 }
3673 if (right->is_single_cpu()) {
3674 Register rreg = right->as_register();
3675 switch (code) {
3676 case lir_add:
3677 #ifndef _LP64
3678 //by aoqi
3679 __ lw(AT, laddr);
3680 __ add(AT, AT, rreg);
3681 __ sw(AT, laddr);
3682 #else
3683 __ ld(AT, laddr);
3684 __ dadd(AT, AT, rreg);
3685 __ sd(AT, laddr);
3686 #endif
3687 break;
3688 case lir_sub:
3689 #ifndef _LP64
3690 //by aoqi
3691 __ lw(AT, laddr);
3692 __ sub(AT,AT,rreg);
3693 __ sw(AT, laddr);
3694 #else
3695 __ ld(AT, laddr);
3696 __ dsub(AT,AT,rreg);
3697 __ sd(AT, laddr);
3698 #endif
3699 break;
3700 default: ShouldNotReachHere();
3701 }
3702 } else if (right->is_constant()) {
3703 #ifndef _LP64
3704 jint c = right->as_constant_ptr()->as_jint();
3705 #else
3706 jlong c = right->as_constant_ptr()->as_jlong_bits();
3707 #endif
3708 switch (code) {
3709 case lir_add: {
3710 __ ld_ptr(AT, laddr);
3711 #ifndef _LP64
3712 __ addi(AT, AT, c);
3713 #else
3714 __ li(T8, c);
3715 __ add(AT, AT, T8);
3716 #endif
3717 __ st_ptr(AT, laddr);
3718 break;
3719 }
3720 case lir_sub: {
3721 __ ld_ptr(AT, laddr);
3722 #ifndef _LP64
3723 __ addi(AT, AT, -c);
3724 #else
3725 __ li(T8, -c);
3726 __ add(AT, AT, T8);
3727 #endif
3728 __ st_ptr(AT, laddr);
3729 break;
3730 }
3731 default: ShouldNotReachHere();
3732 }
3733 } else {
3734 ShouldNotReachHere();
3735 }
3736 } else {
3737 ShouldNotReachHere();
3738 }
3739 }
3741 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op *op) {
3742 //FIXME,lir_log, lir_log10,lir_abs,lir_sqrt,so many new lir instruction @jerome
3743 if (value->is_double_fpu()) {
3744 // assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
3745 switch(code) {
3746 case lir_log : //__ flog() ; break;
3747 case lir_log10 : //__ flog10() ;
3748 Unimplemented();
3749 break;
3750 case lir_abs : __ abs_d(dest->as_double_reg(), value->as_double_reg()) ; break;
3751 case lir_sqrt : __ sqrt_d(dest->as_double_reg(), value->as_double_reg()); break;
3752 case lir_sin :
3753 // Should consider not saving ebx if not necessary
3754 __ trigfunc('s', 0);
3755 break;
3756 case lir_cos :
3757 // Should consider not saving ebx if not necessary
3758 // assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
3759 __ trigfunc('c', 0);
3760 break;
3761 case lir_tan :
3762 // Should consider not saving ebx if not necessary
3763 __ trigfunc('t', 0);
3764 break;
3765 default : ShouldNotReachHere();
3766 }
3767 } else {
3768 Unimplemented();
3769 }
3770 }
3772 //FIXME, if right is on the stack!
3773 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
3774 if (left->is_single_cpu()) {
3775 Register dstreg = dst->as_register();
3776 Register reg = left->as_register();
3777 if (right->is_constant()) {
3778 int val = right->as_constant_ptr()->as_jint();
3779 __ move(AT, val);
3780 switch (code) {
3781 case lir_logic_and:
3782 __ andr (dstreg, reg, AT);
3783 break;
3784 case lir_logic_or:
3785 __ orr(dstreg, reg, AT);
3786 break;
3787 case lir_logic_xor:
3788 __ xorr(dstreg, reg, AT);
3789 break;
3790 default: ShouldNotReachHere();
3791 }
3792 } else if (right->is_stack()) {
3793 // added support for stack operands
3794 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
3795 switch (code) {
3796 case lir_logic_and:
3797 //FIXME. lw or ld_ptr?
3798 __ lw(AT, raddr);
3799 __ andr(reg, reg,AT);
3800 break;
3801 case lir_logic_or:
3802 __ lw(AT, raddr);
3803 __ orr(reg, reg, AT);
3804 break;
3805 case lir_logic_xor:
3806 __ lw(AT, raddr);
3807 __ xorr(reg, reg, AT);
3808 break;
3809 default: ShouldNotReachHere();
3810 }
3811 } else {
3812 Register rright = right->as_register();
3813 switch (code) {
3814 case lir_logic_and: __ andr (dstreg, reg, rright); break;
3815 case lir_logic_or : __ orr (dstreg, reg, rright); break;
3816 case lir_logic_xor: __ xorr (dstreg, reg, rright); break;
3817 default: ShouldNotReachHere();
3818 }
3819 }
3820 } else {
3821 Register l_lo = left->as_register_lo();
3822 Register dst_lo = dst->as_register_lo();
3823 #ifndef _LP64
3824 Register l_hi = left->as_register_hi();
3825 Register dst_hi = dst->as_register_hi();
3826 #endif
3828 if (right->is_constant()) {
3829 #ifndef _LP64
3831 int r_lo = right->as_constant_ptr()->as_jint_lo();
3832 int r_hi = right->as_constant_ptr()->as_jint_hi();
3834 switch (code) {
3835 case lir_logic_and:
3836 __ move(AT, r_lo);
3837 __ andr(dst_lo, l_lo, AT);
3838 __ move(AT, r_hi);
3839 __ andr(dst_hi, l_hi, AT);
3840 break;
3842 case lir_logic_or:
3843 __ move(AT, r_lo);
3844 __ orr(dst_lo, l_lo, AT);
3845 __ move(AT, r_hi);
3846 __ orr(dst_hi, l_hi, AT);
3847 break;
3849 case lir_logic_xor:
3850 __ move(AT, r_lo);
3851 __ xorr(dst_lo, l_lo, AT);
3852 __ move(AT, r_hi);
3853 __ xorr(dst_hi, l_hi, AT);
3854 break;
3856 default: ShouldNotReachHere();
3857 }
3858 #else
3859 __ li(AT, right->as_constant_ptr()->as_jlong());
3861 switch (code) {
3862 case lir_logic_and:
3863 __ andr(dst_lo, l_lo, AT);
3864 break;
3866 case lir_logic_or:
3867 __ orr(dst_lo, l_lo, AT);
3868 break;
3870 case lir_logic_xor:
3871 __ xorr(dst_lo, l_lo, AT);
3872 break;
3874 default: ShouldNotReachHere();
3875 }
3876 #endif
3878 } else {
3879 Register r_lo = right->as_register_lo();
3880 Register r_hi = right->as_register_hi();
3882 switch (code) {
3883 case lir_logic_and:
3884 __ andr(dst_lo, l_lo, r_lo);
3885 NOT_LP64(__ andr(dst_hi, l_hi, r_hi);)
3886 break;
3887 case lir_logic_or:
3888 __ orr(dst_lo, l_lo, r_lo);
3889 NOT_LP64(__ orr(dst_hi, l_hi, r_hi);)
3890 break;
3891 case lir_logic_xor:
3892 __ xorr(dst_lo, l_lo, r_lo);
3893 NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);)
3894 break;
3895 default: ShouldNotReachHere();
3896 }
3897 }
3898 }
3899 }
3901 //done here. aoqi. 12-12 22:25
3902 // we assume that eax and edx can be overwritten
3903 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
3905 assert(left->is_single_cpu(), "left must be register");
3906 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
3907 assert(result->is_single_cpu(), "result must be register");
3909 Register lreg = left->as_register();
3910 Register dreg = result->as_register();
3912 if (right->is_constant()) {
3913 int divisor = right->as_constant_ptr()->as_jint();
3914 assert(divisor!=0, "must be nonzero");
3915 #ifndef _LP64
3916 __ move(AT, divisor);
3917 __ div(lreg, AT);
3918 #else
3919 __ li(AT, divisor);
3920 __ ddiv(lreg, AT);
3921 #endif
3922 int idivl_offset = code_offset();
3924 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
3925 We must trap an exception manually. */
3926 __ teq(R0, AT, 0x7);
3927 __ nop();
3928 __ nop();
3929 add_debug_info_for_div0(idivl_offset, info);
3930 } else {
3931 Register rreg = right->as_register();
3932 #ifndef _LP64
3933 __ div(lreg, rreg);
3934 #else
3935 __ ddiv(lreg, rreg);
3936 #endif
3938 int idivl_offset = code_offset();
3939 __ teq(R0, rreg, 0x7);
3940 __ nop();
3941 __ nop();
3942 add_debug_info_for_div0(idivl_offset, info);
3943 }
3945 // get the result
3946 if (code == lir_irem) {
3947 __ mfhi(dreg);
3948 #ifdef _LP64
3949 if (result->type() == T_INT)
3950 __ sll(dreg, dreg, 0);
3951 #endif
3952 } else if (code == lir_idiv) {
3953 __ mflo(dreg);
3954 } else {
3955 ShouldNotReachHere();
3956 }
3957 }
3959 void LIR_Assembler::arithmetic_frem(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
3960 if (left->is_single_fpu()) {
3961 assert(right->is_single_fpu(),"right must be float");
3962 assert(result->is_single_fpu(), "dest must be float");
3963 assert(temp->is_single_fpu(), "dest must be float");
3965 FloatRegister lreg = left->as_float_reg();
3966 FloatRegister rreg = right->as_float_reg();
3967 FloatRegister res = result->as_float_reg();
3968 FloatRegister tmp = temp->as_float_reg();
3970 switch (code) {
3971 case lir_frem:
3972 __ rem_s(res, lreg, rreg, tmp);
3973 break;
3974 default : ShouldNotReachHere();
3975 }
3976 } else if (left->is_double_fpu()) {
3977 assert(right->is_double_fpu(),"right must be double");
3978 assert(result->is_double_fpu(), "dest must be double");
3979 assert(temp->is_double_fpu(), "dest must be double");
3981 FloatRegister lreg = left->as_double_reg();
3982 FloatRegister rreg = right->as_double_reg();
3983 FloatRegister res = result->as_double_reg();
3984 FloatRegister tmp = temp->as_double_reg();
3986 switch (code) {
3987 case lir_frem:
3988 __ rem_d(res, lreg, rreg, tmp);
3989 break;
3990 default : ShouldNotReachHere();
3991 }
3992 }
3993 }
3995 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst,LIR_Op2 * op) {
3996 Register dstreg = dst->as_register();
3997 if (code == lir_cmp_fd2i) {
3998 if (left->is_single_fpu()) {
3999 FloatRegister leftreg = left->as_float_reg();
4000 FloatRegister rightreg = right->as_float_reg();
4002 Label done;
4003 // equal?
4004 __ c_eq_s(leftreg, rightreg);
4005 __ bc1t(done);
4006 __ delayed();
4007 __ move(dstreg, R0);
4008 // less?
4009 __ c_olt_s(leftreg, rightreg);
4010 __ bc1t(done);
4011 __ delayed();
4012 __ move(dstreg, -1);
4013 // great
4014 __ move(dstreg, 1);
4016 __ bind(done);
4017 } else {
4018 assert(left->is_double_fpu(), "Must double");
4019 FloatRegister leftreg = left->as_double_reg();
4020 FloatRegister rightreg = right->as_double_reg();
4022 Label done;
4023 // equal?
4024 __ c_eq_d(leftreg, rightreg);
4025 __ bc1t(done);
4026 __ delayed();
4027 __ move(dstreg, R0);
4028 // less?
4029 __ c_olt_d(leftreg, rightreg);
4030 __ bc1t(done);
4031 __ delayed();
4032 __ move(dstreg, -1);
4033 // great
4034 __ move(dstreg, 1);
4036 __ bind(done);
4037 }
4038 } else if (code == lir_ucmp_fd2i) {
4039 if (left->is_single_fpu()) {
4040 FloatRegister leftreg = left->as_float_reg();
4041 FloatRegister rightreg = right->as_float_reg();
4043 Label done;
4044 // equal?
4045 __ c_eq_s(leftreg, rightreg);
4046 __ bc1t(done);
4047 __ delayed();
4048 __ move(dstreg, R0);
4049 // less?
4050 __ c_ult_s(leftreg, rightreg);
4051 __ bc1t(done);
4052 __ delayed();
4053 __ move(dstreg, -1);
4054 // great
4055 __ move(dstreg, 1);
4057 __ bind(done);
4058 } else {
4059 assert(left->is_double_fpu(), "Must double");
4060 FloatRegister leftreg = left->as_double_reg();
4061 FloatRegister rightreg = right->as_double_reg();
4063 Label done;
4064 // equal?
4065 __ c_eq_d(leftreg, rightreg);
4066 __ bc1t(done);
4067 __ delayed();
4068 __ move(dstreg, R0);
4069 // less?
4070 __ c_ult_d(leftreg, rightreg);
4071 __ bc1t(done);
4072 __ delayed();
4073 __ move(dstreg, -1);
4074 // great
4075 __ move(dstreg, 1);
4077 __ bind(done);
4078 }
4079 } else {
4080 assert(code == lir_cmp_l2i, "check");
4081 Register l_lo, l_hi, r_lo, r_hi, d_lo, d_hi;
4082 l_lo = left->as_register_lo();
4083 l_hi = left->as_register_hi();
4084 r_lo = right->as_register_lo();
4085 r_hi = right->as_register_hi();
4087 Label done;
4088 #ifndef _LP64
4089 // less?
4090 __ slt(AT, l_hi, r_hi);
4091 __ bne(AT, R0, done);
4092 __ delayed();
4093 __ move(dstreg, -1);
4094 // great?
4095 __ slt(AT, r_hi, l_hi);
4096 __ bne(AT, R0, done);
4097 __ delayed();
4098 __ move(dstreg, 1);
4099 #endif
4101 // now compare low 32 bits
4102 // below?
4103 #ifndef _LP64
4104 __ sltu(AT, l_lo, r_lo);
4105 #else
4106 __ slt(AT, l_lo, r_lo);
4107 #endif
4108 __ bne(AT, R0, done);
4109 __ delayed();
4110 __ move(dstreg, -1);
4111 // above?
4112 #ifndef _LP64
4113 __ sltu(AT, r_lo, l_lo);
4114 #else
4115 __ slt(AT, r_lo, l_lo);
4116 #endif
4117 __ bne(AT, R0, done);
4118 __ delayed();
4119 __ move(dstreg, 1);
4120 // equal
4121 __ move(dstreg, R0);
4123 __ bind(done);
4124 }
4125 }
4128 void LIR_Assembler::align_call(LIR_Code code) {
4129 //FIXME. aoqi, this right?
4130 // do nothing since all instructions are word aligned on sparc
4131 /*
4132 if (os::is_MP()) {
4133 // make sure that the displacement word of the call ends up word aligned
4134 int offset = __ offset();
4135 switch (code) {
4136 case lir_static_call:
4137 case lir_optvirtual_call:
4138 offset += NativeCall::displacement_offset;
4139 break;
4140 case lir_icvirtual_call:
4141 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
4142 break;
4143 case lir_virtual_call: // currently, sparc-specific for niagara
4144 default: ShouldNotReachHere();
4145 }
4146 while (offset++ % BytesPerWord != 0) {
4147 __ nop();
4148 }
4149 }
4150 */
4151 }
4154 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
4155 //assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned");
4156 __ call(op->addr(), rtype);
4157 __ delayed()->nop();
4158 add_call_info(code_offset(), op->info());
4159 }
4162 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
4163 RelocationHolder rh = virtual_call_Relocation::spec(pc());
4164 // int oop_index = __ oop_recorder()->allocate_oop_index((jobject)Universe::non_oop_word());
4165 // RelocationHolder rspec = oop_Relocation::spec(oop_index);
4166 /// __ relocate(rspec);
4167 #ifndef _LP64
4168 //by_css
4169 __ lui(IC_Klass, Assembler::split_high((int)Universe::non_oop_word()));
4170 __ addiu(IC_Klass, IC_Klass, Assembler::split_low((int)Universe::non_oop_word()));
4171 #else
4172 __ li48(IC_Klass, (long)Universe::non_oop_word());
4173 #endif
4174 __ call(op->addr(), rh);
4175 __ delayed()->nop();
4176 // add_call_info(code_offset(), info);
4178 add_call_info(code_offset(), op->info());
4179 assert(!os::is_MP() ||
4180 (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
4181 "must be aligned");
4183 }
4186 /* Currently, vtable-dispatch is only enabled for sparc platforms */
4187 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
4188 ShouldNotReachHere();
4189 }
4193 void LIR_Assembler::emit_static_call_stub() {
4194 address call_pc = __ pc();
4195 address stub = __ start_a_stub(call_stub_size);
4196 if (stub == NULL) {
4197 bailout("static call stub overflow");
4198 return;
4199 }
4201 int start = __ offset();
4202 /*
4203 if (os::is_MP()) {
4204 // make sure that the displacement word of the call ends up word aligned
4205 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
4206 while (offset++ % BytesPerWord != 0) {
4207 __ nop();
4208 }
4209 }
4210 */
4211 __ relocate(static_stub_Relocation::spec(call_pc));
4212 jobject o=NULL;
4213 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)o);
4214 RelocationHolder rspec = oop_Relocation::spec(oop_index);
4215 __ relocate(rspec);
4216 //see set_to_interpreted
4217 #ifndef _LP64
4218 __ lui(T7, Assembler::split_high((int)o));
4219 __ addiu(T7, T7, Assembler::split_low((int)o));
4220 #else
4221 __ li48(Rmethod, (long)o);
4222 #endif
4223 #ifndef _LP64
4224 __ lui(AT, Assembler::split_high((int)-1));
4225 __ addiu(AT, AT, Assembler::split_low((int)-1));
4226 #else
4227 __ li48(AT, (long)-1);
4228 #endif
4229 //assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
4230 __ jr(AT);
4231 __ delayed()->nop();
4232 assert(__ offset() - start <= call_stub_size, "stub too big");
4233 __ end_a_stub();
4234 }
4237 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
4238 assert(exceptionOop->as_register()== V0, "must match");
4239 assert(exceptionPC->as_register()== V1, "must match");
4241 // exception object is not added to oop map by LinearScan
4242 // (LinearScan assumes that no oops are in fixed registers)
4244 info->add_register_oop(exceptionOop);
4245 //if (!unwind) {
4246 // get current pc information
4247 // pc is only needed if the method has an exception handler, the unwind code does not need it.
4248 #ifndef _LP64
4249 //by_css
4250 int pc_for_athrow = (int)__ pc();
4251 int pc_for_athrow_offset = __ offset();
4252 Register epc = exceptionPC->as_register();
4253 //__ nop();
4254 // pc_for_athrow can not point to itself (relocInfo restriction), no need now
4255 __ relocate(relocInfo::internal_pc_type);
4256 __ lui(epc, Assembler::split_high(pc_for_athrow));
4257 __ addiu(epc, epc, Assembler::split_low(pc_for_athrow));
4258 #else
4259 long pc_for_athrow = (long)__ pc();
4260 int pc_for_athrow_offset = __ offset();
4261 Register epc = exceptionPC->as_register();
4262 //__ nop();
4263 // pc_for_athrow can not point to itself (relocInfo restriction), no need now
4264 __ relocate(relocInfo::internal_pc_type);
4265 __ li48(epc, pc_for_athrow);
4266 #endif
4267 add_call_info(pc_for_athrow_offset, info); // for exception handler
4268 __ verify_not_null_oop(V0);
4269 // search an exception handler (eax: exception oop, edx: throwing pc)
4270 if (compilation()->has_fpu_code()) {
4271 __ call(Runtime1::entry_for(Runtime1::handle_exception_id),
4272 relocInfo::runtime_call_type);
4273 } else {
4274 __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
4275 relocInfo::runtime_call_type);
4276 }
4277 // } else {
4278 // __ call(Runtime1::entry_for(Runtime1::unwind_exception_id),
4279 // relocInfo::runtime_call_type);
4280 // }
4282 // enough room for two byte trap
4283 __ delayed()->nop();
4284 }
4286 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop){
4287 assert(exceptionOop->as_register()== FSR, "must match");
4288 __ b(_unwind_handler_entry);
4289 __ delayed()->nop();
4290 }
4292 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
4293 // optimized version for linear scan:
4294 // * tmp must be unused
4295 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
4297 #ifdef _LP64
4298 Register count_reg = count->as_register();
4299 Register value_reg;
4300 Register dest_reg;
4301 if (left->is_single_cpu()) {
4302 value_reg = left->as_register();
4303 dest_reg = dest->as_register();
4305 } else if (left->is_double_cpu()) {
4306 value_reg = left->as_register_lo();
4307 dest_reg = dest->as_register_lo();
4308 } else {
4309 ShouldNotReachHere();
4310 }
4311 assert_different_registers(count_reg, value_reg);
4312 switch (code) {
4313 case lir_shl:
4314 if (dest->type() == T_INT)
4315 __ sllv(dest_reg, value_reg, count_reg);
4316 else
4317 __ dsllv(dest_reg, value_reg, count_reg);
4318 break;
4319 //__ dsllv(dest_reg, value_reg, count_reg); break;
4320 case lir_shr: __ dsrav(dest_reg, value_reg, count_reg); break;
4321 case lir_ushr:
4322 #if 1
4323 /*
4324 Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4325 However, dsrl will shift in company with the highest 32 bits.
4326 Thus, if the source register contains a negative value,
4327 the resulti is incorrect.
4328 * DoubleCvt.java
4329 *
4330 * static int inp (int shift)
4331 * {
4332 * return -1 >>> (32 - shift);
4333 * }
4334 *
4335 * 26 ushift_right [t0|I] [a4|I] [a6|I]
4336 * 0x00000055616d2a98: dsrl a6, t0, a4 <-- error
4337 */
4339 // java.math.MutableBigInteger::primitiveRightShift
4340 //
4341 // 108 ushift_right [a6|I] [a4|I] [a4|I]
4342 // 0x00000055646d2f70: dsll32 a4, a6, 0 \
4343 // 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error!
4344 // 0x00000055646d2f78: dsrl a4, a4, a4 /
4345 if (left->type() == T_INT && dest->type() == T_INT)
4346 {
4347 __ dsll32(AT, value_reg, 0); // Omit the high 32 bits
4348 __ dsrl32(AT, AT, 0);
4349 __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift
4350 break;
4351 }
4352 #endif
4353 __ dsrlv(dest_reg, value_reg, count_reg); break;
4354 default: ShouldNotReachHere();
4355 }
4356 #else
4357 if (left->is_single_cpu()) {
4358 Register value_reg = left->as_register();
4359 Register count_reg = count->as_register();
4360 Register dest_reg = dest->as_register();
4361 assert_different_registers(count_reg, value_reg);
4363 switch (code) {
4364 case lir_shl: __ sllv(dest_reg, value_reg, count_reg); break;
4365 case lir_shr: __ srav(dest_reg, value_reg, count_reg); break;
4366 case lir_ushr: __ srlv(dest_reg, value_reg, count_reg); break;
4367 default: ShouldNotReachHere();
4368 }
4370 } else if (left->is_double_cpu()) {
4371 Register creg = count->as_register();
4372 Register lo = left->as_register_lo();
4373 Register hi = left->as_register_hi();
4374 Register dlo = dest->as_register_lo();
4375 Register dhi = dest->as_register_hi();
4377 __ andi(creg, creg, 0x3f);
4378 switch (code) {
4379 case lir_shl:
4380 {
4381 Label normal, done, notzero;
4383 //count=0
4384 __ bne(creg, R0, notzero);
4385 __ delayed()->nop();
4386 __ move(dlo, lo);
4387 __ b(done);
4388 __ delayed();
4389 __ move(dhi, hi);
4391 //count>=32
4392 __ bind(notzero);
4393 __ sltiu(AT, creg, BitsPerWord);
4394 __ bne(AT, R0, normal);
4395 __ delayed();
4396 __ addiu(AT, creg, (-1) * BitsPerWord);
4397 __ sllv(dhi, lo, AT);
4398 __ b(done);
4399 __ delayed();
4400 __ move(dlo, R0);
4402 //count<32
4403 __ bind(normal);
4404 __ sllv(dhi, hi, creg);
4405 __ move(AT, BitsPerWord);
4406 __ sub(AT, AT, creg);
4407 __ srlv(AT, lo, AT);
4408 __ orr(dhi, dhi, AT);
4409 __ sllv(dlo, lo, creg);
4410 __ bind(done);
4411 }
4412 break;
4413 case lir_shr:
4414 {
4415 Label normal, done, notzero;
4417 //count=0
4418 __ bne(creg, R0, notzero);
4419 __ delayed()->nop();
4420 __ move(dhi, hi);
4421 __ b(done);
4422 __ delayed();
4423 __ move(dlo, lo);
4425 //count>=32
4426 __ bind(notzero);
4427 __ sltiu(AT, creg, BitsPerWord);
4428 __ bne(AT, R0, normal);
4429 __ delayed();
4430 __ addiu(AT, creg, (-1) * BitsPerWord);
4431 __ srav(dlo, hi, AT);
4432 __ b(done);
4433 __ delayed();
4434 __ sra(dhi, hi, BitsPerWord - 1);
4436 //count<32
4437 __ bind(normal);
4438 __ srlv(dlo, lo, creg);
4439 __ move(AT, BitsPerWord);
4440 __ sub(AT, AT, creg);
4441 __ sllv(AT, hi, AT);
4442 __ orr(dlo, dlo, AT);
4443 __ srav(dhi, hi, creg);
4444 __ bind(done);
4445 }
4446 break;
4447 case lir_ushr:
4448 {
4449 Label normal, done, notzero;
4451 //count=zero
4452 __ bne(creg, R0, notzero);
4453 __ delayed()->nop();
4454 __ move(dhi, hi);
4455 __ b(done);
4456 __ delayed();
4457 __ move(dlo, lo);
4459 //count>=32
4460 __ bind(notzero);
4461 __ sltiu(AT, creg, BitsPerWord);
4462 __ bne(AT, R0, normal);
4463 __ delayed();
4464 __ addi(AT, creg, (-1) * BitsPerWord);
4465 __ srlv(dlo, hi, AT);
4466 __ b(done);
4467 __ delayed();
4468 __ move(dhi, R0);
4470 //count<32
4471 __ bind(normal);
4472 __ srlv(dlo, lo, creg);
4473 __ move(AT, BitsPerWord);
4474 __ sub(AT, AT, creg);
4475 __ sllv(AT, hi, AT);
4476 __ orr(dlo, dlo, AT);
4477 __ srlv(dhi, hi, creg);
4478 __ bind(done);
4479 }
4480 break;
4481 default: ShouldNotReachHere();
4482 }
4483 } else {
4484 ShouldNotReachHere();
4485 }
4486 #endif
4488 }
4490 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
4491 if (dest->is_single_cpu()) {
4492 /* In WebClient,
4493 * virtual jboolean java.util.concurrent.atomic.AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl.compareAndSet
4494 *
4495 * 130 ushift_right [a4a4|J] [int:9|I] [a4|L]
4496 */
4497 Register value_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
4498 Register dest_reg = dest->as_register();
4499 count = count & 0x1F; // Java spec
4501 switch (code) {
4502 #ifdef _LP64
4503 case lir_shl:
4504 if (dest->type() == T_INT)
4505 __ sll(dest_reg, value_reg, count);
4506 else
4507 __ dsll(dest_reg, value_reg, count);
4508 break;
4509 case lir_shr: __ dsra(dest_reg, value_reg, count); break;
4510 case lir_ushr:
4511 #if 1
4512 if (left->type() == T_INT && dest->type() == T_INT)
4513 {
4514 /* Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4515 However, dsrl will shift in company with the highest 32 bits.
4516 Thus, if the source register contains a negative value,
4517 the resulti is incorrect.
4519 Example: in java.util.HashMap.get()
4521 68 ushift_right [t0|I] [int:20|I] [a4|I]
4522 dsrl a4, t0, 20
4524 t0: 0xFFFFFFFF87654321 (64bits for 0x87654321)
4526 ushift_right t0, 16 -> a4
4528 a4: 00000000 00008765 (right)
4529 a4: FFFFFFFF FFFF8765 (wrong)
4530 */
4531 __ dsll32(dest_reg, value_reg, 0); // Omit the high 32 bits
4532 __ dsrl32(dest_reg, dest_reg, count); // Unsigned right shift
4533 break;
4534 }
4535 #endif
4537 __ dsrl(dest_reg, value_reg, count);
4538 break;
4539 #else
4540 case lir_shl: __ sll(dest_reg, value_reg, count); break;
4541 case lir_shr: __ sra(dest_reg, value_reg, count); break;
4542 case lir_ushr: __ srl(dest_reg, value_reg, count); break;
4543 #endif
4544 default: ShouldNotReachHere();
4545 }
4547 } else if (dest->is_double_cpu()) {
4548 Register valuelo = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
4549 Register destlo = dest->as_register_lo();
4550 count = count & 0x3f;
4551 #ifdef _LP64
4552 switch (code) {
4553 case lir_shl: __ dsll(destlo, valuelo, count); break;
4554 case lir_shr: __ dsra(destlo, valuelo, count); break;
4555 case lir_ushr: __ dsrl(destlo, valuelo, count); break;
4556 default: ShouldNotReachHere();
4557 }
4558 #else
4559 Register desthi = dest->as_register_hi();
4560 Register valuehi = left->as_register_hi();
4561 assert_different_registers(destlo, valuehi, desthi);
4562 switch (code) {
4563 case lir_shl:
4564 if (count==0) {
4565 __ move(destlo, valuelo);
4566 __ move(desthi, valuehi);
4567 } else if (count>=32) {
4568 __ sll(desthi, valuelo, count-32);
4569 __ move(destlo, R0);
4570 } else {
4571 __ srl(AT, valuelo, 32 - count);
4572 __ sll(destlo, valuelo, count);
4573 __ sll(desthi, valuehi, count);
4574 __ orr(desthi, desthi, AT);
4575 }
4576 break;
4578 case lir_shr:
4579 if (count==0) {
4580 __ move(destlo, valuelo);
4581 __ move(desthi, valuehi);
4582 } else if (count>=32) {
4583 __ sra(destlo, valuehi, count-32);
4584 __ sra(desthi, valuehi, 31);
4585 } else {
4586 __ sll(AT, valuehi, 32 - count);
4587 __ sra(desthi, valuehi, count);
4588 __ srl(destlo, valuelo, count);
4589 __ orr(destlo, destlo, AT);
4590 }
4591 break;
4593 case lir_ushr:
4594 if (count==0) {
4595 __ move(destlo, valuelo);
4596 __ move(desthi, valuehi);
4597 } else if (count>=32) {
4598 __ sra(destlo, valuehi, count-32);
4599 __ move(desthi, R0);
4600 } else {
4601 __ sll(AT, valuehi, 32 - count);
4602 __ srl(desthi, valuehi, count);
4603 __ srl(destlo, valuelo, count);
4604 __ orr(destlo, destlo, AT);
4605 }
4606 break;
4608 default: ShouldNotReachHere();
4609 }
4610 #endif
4611 } else {
4612 ShouldNotReachHere();
4613 }
4614 }
4616 void LIR_Assembler::store_parameter(Register r, int offset_from_esp_in_words) {
4617 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4618 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4619 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
4620 __ st_ptr(r, SP, offset_from_sp_in_bytes);
4621 }
4624 void LIR_Assembler::store_parameter(jint c, int offset_from_esp_in_words) {
4625 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4626 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4627 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
4628 __ move(AT, c);
4629 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
4630 }
4632 void LIR_Assembler::store_parameter(jobject o, int offset_from_esp_in_words) {
4633 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4634 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4635 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
4636 int oop_index = __ oop_recorder()->find_index(o);
4637 RelocationHolder rspec = oop_Relocation::spec(oop_index);
4638 __ relocate(rspec);
4639 #ifndef _LP64
4640 //by_css
4641 __ lui(AT, Assembler::split_high((int)o));
4642 __ addiu(AT, AT, Assembler::split_low((int)o));
4643 #else
4644 __ li48(AT, (long)o);
4645 #endif
4647 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
4649 }
4652 // This code replaces a call to arraycopy; no exception may
4653 // be thrown in this code, they must be thrown in the System.arraycopy
4654 // activation frame; we could save some checks if this would not be the case
4655 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
4658 ciArrayKlass* default_type = op->expected_type();
4659 Register src = op->src()->as_register();
4660 Register dst = op->dst()->as_register();
4661 Register src_pos = op->src_pos()->as_register();
4662 Register dst_pos = op->dst_pos()->as_register();
4663 Register length = op->length()->as_register();
4664 Register tmp = T8;
4665 #ifndef OPT_THREAD
4666 Register java_thread = T8;
4667 #else
4668 Register java_thread = TREG;
4669 #endif
4670 CodeStub* stub = op->stub();
4672 int flags = op->flags();
4673 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
4674 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
4676 // if we don't know anything or it's an object array, just go through the generic arraycopy
4677 if (default_type == NULL) {
4678 Label done;
4679 // save outgoing arguments on stack in case call to System.arraycopy is needed
4680 // HACK ALERT. This code used to push the parameters in a hardwired fashion
4681 // for interpreter calling conventions. Now we have to do it in new style conventions.
4682 // For the moment until C1 gets the new register allocator I just force all the
4683 // args to the right place (except the register args) and then on the back side
4684 // reload the register args properly if we go slow path. Yuck
4686 // this is saved in the caller's reserved argument area
4687 //FIXME, maybe It will change something in the stack;
4688 // These are proper for the calling convention
4689 //store_parameter(length, 2);
4690 //store_parameter(dst_pos, 1);
4691 //store_parameter(dst, 0);
4693 // these are just temporary placements until we need to reload
4694 //store_parameter(src_pos, 3);
4695 //store_parameter(src, 4);
4696 assert(src == T0 && src_pos == A0, "mismatch in calling convention");
4697 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
4699 __ push(src);
4700 __ push(dst);
4701 __ push(src_pos);
4702 __ push(dst_pos);
4703 __ push(length);
4706 // save SP and align
4707 #ifndef OPT_THREAD
4708 __ get_thread(java_thread);
4709 #endif
4710 __ st_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
4711 #ifndef _LP64
4712 __ addi(SP, SP, (-5) * wordSize);
4713 __ move(AT, -(StackAlignmentInBytes));
4714 __ andr(SP, SP, AT);
4715 // push argument
4716 __ sw(length, SP, 4 * wordSize);
4717 #else
4718 __ move(A4, length);
4719 #endif
4720 __ move(A3, dst_pos);
4721 __ move(A2, dst);
4722 __ move(A1, src_pos);
4723 __ move(A0, src);
4724 // make call
4725 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
4726 __ call(entry, relocInfo::runtime_call_type);
4727 __ delayed()->nop();
4728 // restore SP
4729 #ifndef OPT_THREAD
4730 __ get_thread(java_thread);
4731 #endif
4732 __ ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
4733 __ super_pop(length);
4734 __ super_pop(dst_pos);
4735 __ super_pop(src_pos);
4736 __ super_pop(dst);
4737 __ super_pop(src);
4739 __ beq_far(V0, R0, *stub->continuation());
4740 __ delayed()->nop();
4743 __ b_far(*stub->entry());
4744 __ delayed()->nop();
4745 __ bind(*stub->continuation());
4746 return;
4747 }
4748 assert(default_type != NULL
4749 && default_type->is_array_klass()
4750 && default_type->is_loaded(),
4751 "must be true at this point");
4753 int elem_size = type2aelembytes(basic_type);
4754 int shift_amount;
4755 switch (elem_size) {
4756 case 1 :shift_amount = 0; break;
4757 case 2 :shift_amount = 1; break;
4758 case 4 :shift_amount = 2; break;
4759 case 8 :shift_amount = 3; break;
4760 default:ShouldNotReachHere();
4761 }
4763 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
4764 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
4765 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
4766 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
4768 // test for NULL
4769 if (flags & LIR_OpArrayCopy::src_null_check) {
4770 __ beq_far(src, R0, *stub->entry());
4771 __ delayed()->nop();
4772 }
4773 if (flags & LIR_OpArrayCopy::dst_null_check) {
4774 __ beq_far(dst, R0, *stub->entry());
4775 __ delayed()->nop();
4776 }
4778 // check if negative
4779 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
4780 __ bltz(src_pos, *stub->entry());
4781 __ delayed()->nop();
4782 }
4783 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
4784 __ bltz(dst_pos, *stub->entry());
4785 __ delayed()->nop();
4786 }
4787 if (flags & LIR_OpArrayCopy::length_positive_check) {
4788 __ bltz(length, *stub->entry());
4789 __ delayed()->nop();
4790 }
4792 if (flags & LIR_OpArrayCopy::src_range_check) {
4793 __ add(AT, src_pos, length);
4794 __ lw(tmp, src_length_addr);
4795 __ sltu(AT, tmp, AT);
4796 __ bne_far(AT, R0, *stub->entry());
4797 __ delayed()->nop();
4798 }
4799 if (flags & LIR_OpArrayCopy::dst_range_check) {
4800 __ add(AT, dst_pos, length);
4801 __ lw(tmp, dst_length_addr);
4802 __ sltu(AT, tmp, AT);
4803 __ bne_far(AT, R0, *stub->entry());
4804 __ delayed()->nop();
4805 }
4807 if (flags & LIR_OpArrayCopy::type_check) {
4808 if (UseCompressedOops) {
4809 __ lw(AT, src_klass_addr);
4810 __ lw(tmp, dst_klass_addr);
4811 } else {
4812 __ ld(AT, src_klass_addr);
4813 __ ld(tmp, dst_klass_addr);
4814 }
4815 __ bne_far(AT, tmp, *stub->entry());
4816 __ delayed()->nop();
4817 }
4819 #ifdef ASSERT
4820 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
4821 // Sanity check the known type with the incoming class. For the
4822 // primitive case the types must match exactly. For the object array
4823 // case, if no type check is needed then the dst type must match the
4824 // expected type and the src type is so subtype which we can't check. If
4825 // a type check i needed then at this point the classes are known to be
4826 // the same but again which don't know which type so we can't check them.
4827 Label known_ok, halt;
4828 //FIXME:wuhui. not finished. __ mov_metadata(tmp, default_type->constant_encoding());
4829 #ifdef _LP64
4830 if (UseCompressedOops) {
4831 __ encode_heap_oop(AT);
4832 __ lw(tmp, dst_klass_addr);
4833 } else
4834 #endif
4835 {
4836 __ ld(tmp, dst_klass_addr);
4837 }
4838 if (basic_type != T_OBJECT) {
4839 __ bne(AT, tmp, halt);
4840 __ delayed()->nop();
4841 if (UseCompressedOops) {
4842 __ lw(tmp, src_klass_addr);
4843 } else {
4844 __ ld(tmp, src_klass_addr);
4845 }
4846 __ beq(AT, tmp, known_ok);
4847 __ delayed()->nop();
4848 } else {
4849 if (UseCompressedOops) {
4850 __ lw(tmp, dst_klass_addr);
4851 } else {
4852 __ ld(tmp, dst_klass_addr);
4853 }
4854 __ beq(AT, tmp, known_ok);
4855 __ delayed()->nop();
4856 __ beq(src, dst, known_ok);
4857 __ delayed()->nop();
4858 }
4859 __ bind(halt);
4860 __ stop("incorrect type information in arraycopy");
4861 __ bind(known_ok);
4862 }
4863 #endif
4864 __ push(src);
4865 __ push(dst);
4866 __ push(src_pos);
4867 __ push(dst_pos);
4868 __ push(length);
4871 assert(A0 != A1 &&
4872 A0 != length &&
4873 A1 != length, "register checks");
4874 __ move(AT, dst_pos);
4875 if (shift_amount > 0 && basic_type != T_OBJECT) {
4876 #ifndef _LP64
4877 __ sll(A2, length, shift_amount);
4878 #else
4879 __ dsll(A2, length, shift_amount);
4880 #endif
4881 } else {
4882 if (length!=A2)
4883 __ move(A2, length);
4884 }
4885 __ move(A3, src_pos );
4886 assert(A0 != dst_pos &&
4887 A0 != dst &&
4888 dst_pos != dst, "register checks");
4890 assert_different_registers(A0, dst_pos, dst);
4891 #ifndef _LP64
4892 __ sll(AT, AT, shift_amount);
4893 #else
4894 __ dsll(AT, AT, shift_amount);
4895 #endif
4896 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
4897 __ add(A1, dst, AT);
4899 #ifndef _LP64
4900 __ sll(AT, A3, shift_amount);
4901 #else
4902 __ dsll(AT, A3, shift_amount);
4903 #endif
4904 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
4905 __ add(A0, src, AT);
4909 if (basic_type == T_OBJECT) {
4910 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 3);
4911 } else {
4912 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 3);
4913 }
4914 __ super_pop(length);
4915 __ super_pop(dst_pos);
4916 __ super_pop(src_pos);
4917 __ super_pop(dst);
4918 __ super_pop(src);
4920 __ bind(*stub->continuation());
4921 }
4923 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
4924 tty->print_cr("LIR_Assembler::emit_updatecrc32 unimplemented yet !");
4925 Unimplemented();
4926 }
4928 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
4929 Register obj = op->obj_opr()->as_register(); // may not be an oop
4930 Register hdr = op->hdr_opr()->as_register();
4931 Register lock = op->lock_opr()->is_single_cpu() ? op->lock_opr()->as_register(): op->lock_opr()->as_register_lo();
4932 if (!UseFastLocking) {
4933 __ b_far(*op->stub()->entry());
4934 } else if (op->code() == lir_lock) {
4935 Register scratch = noreg;
4936 if (UseBiasedLocking) {
4937 scratch = op->scratch_opr()->as_register();
4938 }
4939 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
4940 "lock_reg must point to the displaced header");
4941 // add debug info for NullPointerException only if one is possible
4942 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
4943 if (op->info() != NULL) {
4944 //add_debug_info_for_null_check_here(op->info());
4945 add_debug_info_for_null_check(null_check_offset,op->info());
4946 }
4947 // done
4948 } else if (op->code() == lir_unlock) {
4949 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
4950 "lock_reg must point to the displaced header");
4951 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
4952 } else {
4953 Unimplemented();
4954 }
4955 __ bind(*op->stub()->continuation());
4956 }
4960 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
4961 ciMethod* method = op->profiled_method();
4962 int bci = op->profiled_bci();
4963 ciMethod* callee = op->profiled_callee();
4964 // Update counter for all call types
4965 ciMethodData* md = method->method_data();
4966 if (md == NULL) {
4967 bailout("out of memory building methodDataOop");
4968 return;
4969 }
4970 ciProfileData* data = md->bci_to_data(bci);
4971 assert(data->is_CounterData(), "need CounterData for calls");
4972 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
4973 Register mdo = op->mdo()->as_register();
4975 int oop_index = __ oop_recorder()->find_index(md->constant_encoding());
4976 RelocationHolder rspec = oop_Relocation::spec(oop_index);
4977 __ relocate(rspec);
4978 #ifndef _LP64
4979 //by_css
4980 __ lui(mdo, Assembler::split_high((int)md->constant_encoding()));
4981 __ addiu(mdo, mdo, Assembler::split_low((int)md->constant_encoding()));
4982 #else
4983 __ li48(mdo, (long)md->constant_encoding());
4984 #endif
4986 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
4987 __ lw(AT, counter_addr);
4988 __ addi(AT,AT, DataLayout::counter_increment);
4989 __ sw(AT,counter_addr);
4991 Bytecodes::Code bc = method->java_code_at_bci(bci);
4992 const bool callee_is_static = callee->is_loaded() && callee->is_static();
4993 // Perform additional virtual call profiling for invokevirtual and
4994 // invokeinterface bytecodes
4995 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
4996 !callee_is_static && //required for optimized MH invokes
4997 C1ProfileVirtualCalls) {
4998 assert(op->recv()->is_single_cpu(), "recv must be allocated");
4999 Register recv = op->recv()->as_register();
5000 assert_different_registers(mdo, recv);
5001 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
5002 ciKlass* known_klass = op->known_holder();
5003 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
5004 // We know the type that will be seen at this call site; we can
5005 // statically update the methodDataOop rather than needing to do
5006 // dynamic tests on the receiver type
5008 // NOTE: we should probably put a lock around this search to
5009 // avoid collisions by concurrent compilations
5010 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
5011 uint i;
5012 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5013 ciKlass* receiver = vc_data->receiver(i);
5014 if (known_klass->equals(receiver)) {
5015 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5016 __ lw(AT, data_addr);
5017 __ addi(AT, AT, DataLayout::counter_increment);
5018 __ sw(AT, data_addr);
5019 return;
5020 }
5021 }
5023 // Receiver type not found in profile data; select an empty slot
5025 // Note that this is less efficient than it should be because it
5026 // always does a write to the receiver part of the
5027 // VirtualCallData rather than just the first time
5028 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5029 ciKlass* receiver = vc_data->receiver(i);
5030 if (receiver == NULL) {
5031 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5032 int oop_index = __ oop_recorder()->find_index(known_klass->constant_encoding());
5033 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5034 __ relocate(rspec);
5035 #ifndef _LP64
5036 //by_css
5037 __ lui(AT, Assembler::split_high((int)known_klass->constant_encoding()));
5038 __ addiu(AT, AT, Assembler::split_low((int)known_klass->constant_encoding()));
5039 #else
5040 __ li48(AT, (long)known_klass->constant_encoding());
5041 #endif
5042 __ st_ptr(AT,recv_addr);
5043 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5044 __ lw(AT, data_addr);
5045 __ addi(AT, AT, DataLayout::counter_increment);
5046 __ sw(AT, data_addr);
5047 return;
5048 }
5049 }
5050 } else {
5051 //__ ld_ptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
5052 __ load_klass(recv, recv);
5053 Label update_done;
5054 uint i;
5055 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5056 Label next_test;
5057 // See if the receiver is receiver[n].
5058 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
5059 __ bne(recv,AT,next_test);
5060 __ delayed()->nop();
5061 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5062 __ lw(AT, data_addr);
5063 __ addi(AT, AT, DataLayout::counter_increment);
5064 __ sw(AT, data_addr);
5065 __ b(update_done);
5066 __ delayed()->nop();
5067 __ bind(next_test);
5068 }
5070 // Didn't find receiver; find next empty slot and fill it in
5071 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5072 Label next_test;
5073 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5074 __ ld_ptr(AT, recv_addr);
5075 __ bne(AT, R0, next_test);
5076 __ delayed()->nop();
5077 __ st_ptr(recv, recv_addr);
5078 __ move(AT,DataLayout::counter_increment);
5079 __ sw(AT,Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))));
5080 if (i < (VirtualCallData::row_limit() - 1)) {
5081 __ b(update_done);
5082 __ delayed()->nop();
5083 }
5084 __ bind(next_test);
5085 }
5087 __ bind(update_done);
5088 }
5089 }
5090 }
5092 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
5093 // Newly added in OpenJDK 8
5094 Unimplemented();
5095 }
5097 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
5098 Unimplemented();
5099 }
5102 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
5103 if (dst->is_single_cpu())
5104 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
5105 else if (dst->is_double_cpu())
5106 __ lea(dst->as_register_lo(), frame_map()->address_for_monitor_lock(monitor_no));
5107 }
5109 void LIR_Assembler::align_backward_branch_target() {
5110 }
5113 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
5114 if (left->is_single_cpu()) {
5115 __ subu(dest->as_register(), R0, left->as_register());
5116 } else if (left->is_double_cpu()) {
5117 #ifndef _LP64
5118 Register lo = left->as_register_lo();
5119 Register hi = left->as_register_hi();
5120 Register dlo = dest->as_register_lo();
5121 Register dhi = dest->as_register_hi();
5122 assert(dlo != hi, "register checks");
5123 __ nor(dlo, R0, lo);
5124 __ addiu(dlo, dlo, 1);
5125 __ sltiu(AT, dlo, 1);
5126 __ nor(dhi, R0, hi);
5127 __ addu(dhi, dhi, AT);
5128 #else
5129 __ subu(dest->as_register_lo(), R0, left->as_register_lo());
5130 #endif
5131 } else if (left->is_single_fpu()) {
5132 //for mips , does it required ?
5133 __ neg_s(dest->as_float_reg(), left->as_float_reg());
5134 } else if (left->is_double_fpu()) {
5135 //for mips , does it required ?
5136 __ neg_d(dest->as_double_reg(), left->as_double_reg());
5137 }else {
5138 ShouldNotReachHere();
5139 }
5140 }
5143 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
5144 assert(addr->is_address() && dest->is_register(), "check");
5145 Register reg = dest->as_register();
5146 __ lea(dest->as_register(), as_Address(addr->as_address_ptr()));
5147 }
5150 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
5151 if (o == NULL) {
5152 // This seems wrong as we do not emit relocInfo
5153 // for classes that are not loaded yet, i.e., they will be
5154 // never GC'd
5155 #ifndef _LP64
5156 //by_css
5157 __ lui(reg, Assembler::split_high((int)o));
5158 __ addiu(reg, reg, Assembler::split_low((int)o));
5159 #else
5160 //__ li48(reg, (long)o);
5161 __ li(reg, (long)o);
5162 #endif
5163 } else {
5164 int oop_index = __ oop_recorder()->find_index(o);
5165 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5166 __ relocate(rspec);
5167 #ifndef _LP64
5168 //by_css
5169 __ lui(reg, Assembler::split_high((int)o));
5170 __ addiu(reg, reg, Assembler::split_low((int)o));
5171 #else
5172 //__ li48(reg, (long)o);
5173 __ li(reg, (long)o);
5174 #endif
5175 }
5176 }
5178 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
5179 assert(!tmp->is_valid(), "don't need temporary");
5180 __ call(dest, relocInfo::runtime_call_type);
5181 __ delayed()->nop();
5182 if (info != NULL) {
5183 add_call_info_here(info);
5184 }
5185 }
5187 /* by yyq 7/22/2009
5188 * i don't know the register allocator will allocate long or double in two consecutive registers
5189 * if the allocator do like this, the lws below should be removed and lds be used.
5190 */
5192 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
5193 assert(type == T_LONG, "only for volatile long fields");
5194 if (info != NULL) {
5195 add_debug_info_for_null_check_here(info);
5196 }
5198 if(src->is_register() && dest->is_address()) {
5199 if(src->is_double_cpu()) {
5200 #ifdef _LP64
5201 __ sd(src->as_register_lo(), as_Address(dest->as_address_ptr()));
5202 #else
5203 __ sw(src->as_register_lo(), as_Address(dest->as_address_ptr()));
5204 __ sw(src->as_register_hi(), as_Address(dest->as_address_ptr()).base(),
5205 as_Address(dest->as_address_ptr()).disp() +4);
5206 #endif
5207 } else if (src->is_double_fpu()) {
5208 #ifdef _LP64
5209 __ sdc1(src->as_fpu_lo(), as_Address(dest->as_address_ptr()));
5210 #else
5211 __ swc1(src->as_fpu_lo(), as_Address(dest->as_address_ptr()));
5212 __ swc1(src->as_fpu_hi(), as_Address(dest->as_address_ptr()).base(),
5213 as_Address(dest->as_address_ptr()).disp() +4);
5214 #endif
5215 } else {
5216 ShouldNotReachHere();
5217 }
5218 } else if (src->is_address() && dest->is_register()){
5219 if(dest->is_double_cpu()) {
5220 #ifdef _LP64
5221 __ ld(dest->as_register_lo(), as_Address(src->as_address_ptr()));
5222 #else
5223 __ lw(dest->as_register_lo(), as_Address(src->as_address_ptr()));
5224 __ lw(dest->as_register_hi(), as_Address(src->as_address_ptr()).base(),
5225 as_Address(src->as_address_ptr()).disp() +4);
5226 #endif
5227 } else if (dest->is_double_fpu()) {
5228 #ifdef _LP64
5229 __ ldc1(dest->as_fpu_lo(), as_Address(src->as_address_ptr()));
5230 #else
5231 __ lwc1(dest->as_fpu_lo(), as_Address(src->as_address_ptr()));
5232 __ lwc1(dest->as_fpu_hi(), as_Address(src->as_address_ptr()).base(),
5233 as_Address(src->as_address_ptr()).disp() +4);
5234 #endif
5235 } else {
5236 ShouldNotReachHere();
5237 }
5238 } else {
5239 ShouldNotReachHere();
5240 }
5241 }
5243 #ifdef ASSERT
5244 // emit run-time assertion
5245 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
5246 tty->print_cr("LIR_Assembler::emit_assert unimplemented yet!");
5247 Unimplemented();
5248 }
5249 #endif
5251 void LIR_Assembler::membar() {
5252 __ sync();
5253 }
5255 void LIR_Assembler::membar_acquire() {
5256 __ sync();
5257 }
5259 void LIR_Assembler::membar_release() {
5260 __ sync();
5261 }
5263 void LIR_Assembler::membar_loadload() {
5264 // no-op
5265 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
5266 }
5268 void LIR_Assembler::membar_storestore() {
5269 // no-op
5270 // //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
5271 }
5273 void LIR_Assembler::membar_loadstore() {
5274 // no-op
5275 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
5276 }
5278 void LIR_Assembler::membar_storeload() {
5279 //__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
5280 }
5283 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
5284 assert(result_reg->is_register(), "check");
5285 #ifndef OPT_THREAD
5286 __ get_thread(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()));
5287 #else
5288 __ move(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()), TREG);
5289 #endif
5290 }
5292 void LIR_Assembler::peephole(LIR_List*) {
5293 // do nothing for now
5294 }
5296 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
5297 /* assert(data == dest, "xchg/xadd uses only 2 operands");
5299 if (data->type() == T_INT) {
5300 if (code == lir_xadd) {
5301 if (os::is_MP()) {
5302 __ lock();
5303 }
5304 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
5305 } else {
5306 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
5307 }
5308 } else if (data->is_oop()) {
5309 assert (code == lir_xchg, "xadd for oops");
5310 Register obj = data->as_register();
5311 #ifdef _LP64
5312 if (UseCompressedOops) {
5313 __ encode_heap_oop(obj);
5314 __ xchgl(obj, as_Address(src->as_address_ptr()));
5315 __ decode_heap_oop(obj);
5316 } else {
5317 __ xchgptr(obj, as_Address(src->as_address_ptr()));
5318 }
5319 #else
5320 __ xchgl(obj, as_Address(src->as_address_ptr()));
5321 #endif
5322 } else if (data->type() == T_LONG) {
5323 #ifdef _LP64
5324 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
5325 if (code == lir_xadd) {
5326 if (os::is_MP()) {
5327 __ lock();
5328 }
5329 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
5330 } else {
5331 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
5332 }
5333 #else
5334 ShouldNotReachHere();
5335 #endif
5336 } else {
5337 ShouldNotReachHere();
5338 }*/
5339 }
5341 #undef __