Tue, 12 Jun 2018 13:58:17 +0800
#7157 Fix all forgot saying delayed() when filling delay slot issues
Summary: enable check_delay and guarantee delay_state is at_delay_slot when filling delay slot
Reviewed-by: aoqi
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "gc_interface/collectedHeap.hpp"
37 #include "memory/barrierSet.hpp"
38 #include "memory/cardTableModRefBS.hpp"
39 #include "nativeInst_mips.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #define __ _masm->
44 static void select_different_registers(Register preserve,
45 Register extra,
46 Register &tmp1,
47 Register &tmp2) {
48 if (tmp1 == preserve) {
49 assert_different_registers(tmp1, tmp2, extra);
50 tmp1 = extra;
51 } else if (tmp2 == preserve) {
52 assert_different_registers(tmp1, tmp2, extra);
53 tmp2 = extra;
54 }
55 assert_different_registers(preserve, tmp1, tmp2);
56 }
60 static void select_different_registers(Register preserve,
61 Register extra,
62 Register &tmp1,
63 Register &tmp2,
64 Register &tmp3) {
65 if (tmp1 == preserve) {
66 assert_different_registers(tmp1, tmp2, tmp3, extra);
67 tmp1 = extra;
68 } else if (tmp2 == preserve) {
69 tmp2 = extra;
70 } else if (tmp3 == preserve) {
71 assert_different_registers(tmp1, tmp2, tmp3, extra);
72 tmp3 = extra;
73 }
74 assert_different_registers(preserve, tmp1, tmp2, tmp3);
75 }
77 // need add method Assembler::is_simm16 in assembler_gs2.hpp
78 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
79 if (opr->is_constant()) {
80 LIR_Const* constant = opr->as_constant_ptr();
81 switch (constant->type()) {
82 case T_INT: {
83 jint value = constant->as_jint();
84 return Assembler::is_simm16(value);
85 }
86 default:
87 return false;
88 }
89 }
90 return false;
91 }
93 //FIXME, which register should be used?
94 LIR_Opr LIR_Assembler::receiverOpr() {
95 return FrameMap::_t0_oop_opr;
96 }
97 /*
98 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
99 return receiverOpr();
100 }*/
102 LIR_Opr LIR_Assembler::osrBufferPointer() {
103 #ifdef _LP64
104 Register r = receiverOpr()->as_register();
105 return FrameMap::as_long_opr(r, r);
106 #else
107 return FrameMap::as_opr(receiverOpr()->as_register());
108 #endif
109 }
111 //--------------fpu register translations-----------------------
112 // FIXME:I do not know what's to do for mips fpu
114 address LIR_Assembler::float_constant(float f) {
115 address const_addr = __ float_constant(f);
116 if (const_addr == NULL) {
117 bailout("const section overflow");
118 return __ code()->consts()->start();
119 } else {
120 return const_addr;
121 }
122 }
125 address LIR_Assembler::double_constant(double d) {
126 address const_addr = __ double_constant(d);
127 if (const_addr == NULL) {
128 bailout("const section overflow");
129 return __ code()->consts()->start();
130 } else {
131 return const_addr;
132 }
133 }
139 void LIR_Assembler::reset_FPU() {
140 Unimplemented();
141 }
144 void LIR_Assembler::set_24bit_FPU() {
145 Unimplemented();
146 }
148 //FIXME.
149 void LIR_Assembler::fpop() {
150 // do nothing
151 }
152 void LIR_Assembler::fxch(int i) {
153 // do nothing
154 }
155 void LIR_Assembler::fld(int i) {
156 // do nothing
157 }
158 void LIR_Assembler::ffree(int i) {
159 // do nothing
160 }
162 void LIR_Assembler::breakpoint() {
163 __ brk(17);
164 }
165 //FIXME, opr can not be float?
166 void LIR_Assembler::push(LIR_Opr opr) {
167 if (opr->is_single_cpu()) {
168 __ push_reg(opr->as_register());
169 } else if (opr->is_double_cpu()) {
170 __ push_reg(opr->as_register_hi());
171 __ push_reg(opr->as_register_lo());
172 } else if (opr->is_stack()) {
173 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
174 } else if (opr->is_constant()) {
175 LIR_Const* const_opr = opr->as_constant_ptr();
176 if (const_opr->type() == T_OBJECT) {
177 __ push_oop(const_opr->as_jobject());
178 } else if (const_opr->type() == T_INT) {
179 __ push_jint(const_opr->as_jint());
180 } else {
181 ShouldNotReachHere();
182 }
183 } else {
184 ShouldNotReachHere();
185 }
186 }
188 void LIR_Assembler::pop(LIR_Opr opr) {
189 if (opr->is_single_cpu() ) {
190 __ pop(opr->as_register());
191 } else {
192 assert(false, "Must be single word register or floating-point register");
193 }
194 }
197 Address LIR_Assembler::as_Address(LIR_Address* addr) {
198 #ifndef _LP64
199 Register reg = addr->base()->as_register();
200 #else
201 //FIXME aoqi
202 Register reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
203 #endif
204 // now we need this for parameter pass
205 return Address(reg, addr->disp());
206 }
209 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
210 return as_Address(addr);
211 }
214 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
215 Register reg = addr->base()->as_register();
216 return Address(reg, addr->disp()+longSize/2);
217 }
220 //void LIR_Assembler::osr_entry(IRScope* scope, int number_of_locks, Label* continuation, int osr_bci) {
221 void LIR_Assembler::osr_entry() {
222 // assert(scope->is_top_scope(), "inlined OSR not yet implemented");
223 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
224 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
225 ValueStack* entry_state = osr_entry->state();
226 int number_of_locks = entry_state->locks_size();
228 // we jump here if osr happens with the interpreter
229 // state set up to continue at the beginning of the
230 // loop that triggered osr - in particular, we have
231 // the following registers setup:
232 //
233 // S7: interpreter locals pointer
234 // V1: interpreter locks pointer
235 // RA: return address
236 //T0: OSR buffer
237 // build frame
238 // ciMethod* m = scope->method();
239 ciMethod* m = compilation()->method();
240 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
242 // OSR buffer is
243 //
244 // locals[nlocals-1..0]
245 // monitors[0..number_of_locks]
246 //
247 // locals is a direct copy of the interpreter frame so in the osr buffer
248 // so first slot in the local array is the last local from the interpreter
249 // and last slot is local[0] (receiver) from the interpreter
250 //
251 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
252 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
253 // in the interpreter frame (the method lock if a sync method)
255 // Initialize monitors in the compiled activation.
256 // T0: pointer to osr buffer
257 //
258 // All other registers are dead at this point and the locals will be
259 // copied into place by code emitted in the IR.
261 Register OSR_buf = osrBufferPointer()->as_pointer_register();
264 // note: we do osr only if the expression stack at the loop beginning is empty,
265 // in which case the spill area is empty too and we don't have to setup
266 // spilled locals
267 //
268 // copy monitors
269 // V1: pointer to locks
270 {
271 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
272 int monitor_offset = BytesPerWord * method()->max_locals()+
273 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
274 for (int i = 0; i < number_of_locks; i++) {
275 int slot_offset =monitor_offset - (i * BasicObjectLock::size())*BytesPerWord;
276 #ifdef ASSERT
277 {
278 Label L;
279 //__ lw(AT, V1, slot_offset * BytesPerWord + BasicObjectLock::obj_offset_in_bytes());
280 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
281 __ bne(AT, R0, L);
282 __ delayed()->nop();
283 __ stop("locked object is NULL");
284 __ bind(L);
285 }
286 #endif
287 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes());
288 __ st_ptr(AT, frame_map()->address_for_monitor_lock(i));
289 __ ld_ptr(AT, OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes());
290 __ st_ptr(AT, frame_map()->address_for_monitor_object(i));
291 }
292 }
293 }
296 int LIR_Assembler::check_icache() {
297 Register receiver = FrameMap::receiver_opr->as_register();
298 Register ic_klass = IC_Klass;
300 int offset = __ offset();
301 __ inline_cache_check(receiver, IC_Klass);
302 __ align(CodeEntryAlignment);
303 return offset;
306 }
308 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
309 jobject o = NULL;
310 int oop_index = __ oop_recorder()->allocate_oop_index(o);
311 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
312 RelocationHolder rspec = oop_Relocation::spec(oop_index);
313 __ relocate(rspec);
314 #ifndef _LP64
315 //by_css
316 __ lui(reg, Assembler::split_high((int)o));
317 __ addiu(reg, reg, Assembler::split_low((int)o));
318 #else
319 //li may not pass NativeMovConstReg::verify. see nativeMovConstReg_at(pc_start()); in PatchingStub::install. by aoqi
320 // __ li48(reg, (long)o);
321 __ li48(reg, (long)o);
322 #endif
323 // patching_epilog(patch, LIR_Op1::patch_normal, noreg, info);
324 patching_epilog(patch, lir_patch_normal, reg, info);
325 }
327 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
328 Metadata *o = NULL;
329 int index = __ oop_recorder()->allocate_metadata_index(o);
330 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
331 RelocationHolder rspec = metadata_Relocation::spec(index);
332 __ relocate(rspec);
333 __ li48(reg, (long)o);
334 patching_epilog(patch, lir_patch_normal, reg, info);
335 }
337 // This specifies the esp decrement needed to build the frame
338 int LIR_Assembler::initial_frame_size_in_bytes() const {
339 // if rounding, must let FrameMap know!
340 // return (frame_map()->framesize() - 2) * BytesPerWord; // subtract two words to account for return address and link
341 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
342 }
344 int LIR_Assembler::emit_exception_handler() {
345 // if the last instruction is a call (typically to do a throw which
346 // is coming at the end after block reordering) the return address
347 // must still point into the code area in order to avoid assertion
348 // failures when searching for the corresponding bci => add a nop
349 // (was bug 5/14/1999 - gri)
350 // Lazy deopt bug 4932387. If last instruction is a call then we
351 // need an area to patch where we won't overwrite the exception
352 // handler. This means we need 5 bytes. Could use a fat_nop
353 // but since this never gets executed it doesn't really make
354 // much difference.
355 //
356 for (int i = 0; i < (NativeCall::instruction_size/BytesPerInstWord + 1) ; i++ ) {
357 __ nop();
358 }
360 // generate code for exception handler
361 address handler_base = __ start_a_stub(exception_handler_size);
362 if (handler_base == NULL) {
363 // no enough space
364 bailout("exception handler overflow");
365 return -1;
366 }
368 int offset = code_offset();
370 // the exception oop and pc are in V0, and V1
371 // no other registers need to be preserved, so invalidate them
372 //__ invalidate_registers(false, true, true, false, true, true);
374 // check that there is really an exception
375 __ verify_not_null_oop(V0);
377 // search an exception handler (V0: exception oop, V1: throwing pc)
378 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
379 __ delayed()->nop();
380 __ should_not_reach_here();
381 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
382 __ end_a_stub();
384 return offset;
385 }
387 // Emit the code to remove the frame from the stack in the exception
388 // unwind path.
389 int LIR_Assembler::emit_unwind_handler() {
390 #ifndef PRODUCT
391 if (CommentedAssembly) {
392 _masm->block_comment("Unwind handler");
393 }
394 #endif
396 int offset = code_offset();
397 // Fetch the exception from TLS and clear out exception related thread state
398 Register thread = TREG;
399 #ifndef OPT_THREAD
400 __ get_thread(thread);
401 #endif
402 __ ld_ptr(V0, Address(thread, JavaThread::exception_oop_offset()));
403 __ st_ptr(R0, Address(thread, JavaThread::exception_oop_offset()));
404 __ st_ptr(R0, Address(thread, JavaThread::exception_pc_offset()));
406 __ bind(_unwind_handler_entry);
407 __ verify_not_null_oop(V0);
408 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
409 __ move(S0, V0); // Preserve the exception (rbx is always callee-saved)
410 }
412 // Preform needed unlocking
413 MonitorExitStub* stub = NULL;
414 if (method()->is_synchronized()) {
415 monitor_address(0, FrameMap::_v0_opr);
416 stub = new MonitorExitStub(FrameMap::_v0_opr, true, 0);
417 __ unlock_object(A0, A1, V0, *stub->entry());
418 __ bind(*stub->continuation());
419 }
421 if (compilation()->env()->dtrace_method_probes()) {
422 __ move(A0, thread);
423 __ mov_metadata(A1, method()->constant_encoding());
424 __ patchable_call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit));
425 }
427 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
428 __ move(V0, S0); // Restore the exception
429 }
431 // remove the activation and dispatch to the unwind handler
432 // leave activation of nmethod
433 __ remove_frame(initial_frame_size_in_bytes());
435 __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id));
436 __ delayed()->nop();
438 // Emit the slow path assembly
439 if (stub != NULL) {
440 stub->emit_code(this);
441 }
443 return offset;
444 }
447 int LIR_Assembler::emit_deopt_handler() {
448 // if the last instruction is a call (typically to do a throw which
449 // is coming at the end after block reordering) the return address
450 // must still point into the code area in order to avoid assertion
451 // failures when searching for the corresponding bci => add a nop
452 // (was bug 5/14/1999 - gri)
454 __ nop();
456 // generate code for exception handler
457 address handler_base = __ start_a_stub(deopt_handler_size);
458 if (handler_base == NULL) {
459 // not enough space left for the handler
460 bailout("deopt handler overflow");
461 return -1;
462 }
463 int offset = code_offset();
465 // compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
467 __ call(SharedRuntime::deopt_blob()->unpack());
468 __ delayed()->nop();
470 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
471 __ end_a_stub();
473 return offset;
475 }
478 // Optimized Library calls
479 // This is the fast version of java.lang.String.compare; it has not
480 // OSR-entry and therefore, we generate a slow version for OSR's
481 //void LIR_Assembler::emit_string_compare(IRScope* scope) {
482 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
483 // get two string object in T0&T1
484 //receiver already in T0
485 __ ld_ptr(T1, arg1->as_register());
486 //__ ld_ptr(T2, T0, java_lang_String::value_offset_in_bytes()); //value, T_CHAR array
487 __ load_heap_oop(T2, Address(T0, java_lang_String::value_offset_in_bytes()));
488 __ ld_ptr(AT, T0, java_lang_String::offset_offset_in_bytes()); //offset
489 __ shl(AT, 1);
490 __ add(T2, T2, AT);
491 __ addi(T2, T2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
492 // Now T2 is the address of the first char in first string(T0)
494 add_debug_info_for_null_check_here(info);
495 //__ ld_ptr(T3, T1, java_lang_String::value_offset_in_bytes());
496 __ load_heap_oop(T3, Address(T1, java_lang_String::value_offset_in_bytes()));
497 __ ld_ptr(AT, T1, java_lang_String::offset_offset_in_bytes());
498 __ shl(AT, 1);
499 __ add(T3, T3, AT);
500 __ addi(T3, T3, arrayOopDesc::base_offset_in_bytes(T_CHAR));
501 // Now T3 is the address of the first char in second string(T1)
503 #ifndef _LP64
504 //by_css
505 // compute minimum length (in T4) and difference of lengths (V0)
506 Label L;
507 __ lw (T4, Address(T0, java_lang_String::count_offset_in_bytes()));
508 // the length of the first string(T0)
509 __ lw (T5, Address(T1, java_lang_String::count_offset_in_bytes()));
510 // the length of the second string(T1)
512 __ subu(V0, T4, T5);
513 __ blez(V0, L);
514 __ delayed()->nop();
515 __ move (T4, T5);
516 __ bind (L);
518 Label Loop, haveResult, LoopEnd;
519 __ bind(Loop);
520 __ beq(T4, R0, LoopEnd);
521 __ delayed();
523 __ addi(T2, T2, 2);
525 // compare current character
526 __ lhu(T5, T2, -2);
527 __ lhu(T6, T3, 0);
528 __ bne(T5, T6, haveResult);
529 __ delayed();
531 __ addi(T3, T3, 2);
533 __ b(Loop);
534 __ delayed()->addi(T4, T4, -1);
536 __ bind(haveResult);
537 __ subu(V0, T5, T6);
539 __ bind(LoopEnd);
540 #else
541 // compute minimum length (in T4) and difference of lengths (V0)
542 Label L;
543 __ lw (A4, Address(T0, java_lang_String::count_offset_in_bytes()));
544 // the length of the first string(T0)
545 __ lw (A5, Address(T1, java_lang_String::count_offset_in_bytes()));
546 // the length of the second string(T1)
548 __ dsubu(V0, A4, A5);
549 __ blez(V0, L);
550 __ delayed()->nop();
551 __ move (A4, A5);
552 __ bind (L);
554 Label Loop, haveResult, LoopEnd;
555 __ bind(Loop);
556 __ beq(A4, R0, LoopEnd);
557 __ delayed();
559 __ daddi(T2, T2, 2);
561 // compare current character
562 __ lhu(A5, T2, -2);
563 __ lhu(A6, T3, 0);
564 __ bne(A5, A6, haveResult);
565 __ delayed();
567 __ daddi(T3, T3, 2);
569 __ b(Loop);
570 __ delayed()->addi(A4, A4, -1);
572 __ bind(haveResult);
573 __ dsubu(V0, A5, A6);
575 __ bind(LoopEnd);
576 #endif
577 return_op(FrameMap::_v0_opr);
578 }
581 void LIR_Assembler::return_op(LIR_Opr result) {
582 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0");
583 // Pop the stack before the safepoint code
584 __ remove_frame(initial_frame_size_in_bytes());
585 #ifndef _LP64
586 //by aoqi
587 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()
588 + (SafepointPollOffset % os::vm_page_size())));
589 __ relocate(relocInfo::poll_return_type);
590 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()
591 + (SafepointPollOffset % os::vm_page_size())));
592 #else
593 #ifndef OPT_SAFEPOINT
594 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
595 __ li48(AT, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
596 __ relocate(relocInfo::poll_return_type);
597 __ lw(AT, AT, 0);
598 #else
599 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
600 __ relocate(relocInfo::poll_return_type);
601 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
602 #endif
603 #endif
605 __ pop(RA);
606 __ jr(RA);
607 __ delayed()->nop();
608 }
610 //read protect mem to R0 won't cause the exception only in godson-2e, So I modify R0 to AT .@jerome,11/25,2006
611 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
612 assert(info != NULL, "info must not be null for safepoint poll");
613 int offset = __ offset();
614 Register r = tmp->as_register();
615 #ifndef _LP64
616 //by aoqi
617 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
618 add_debug_info_for_branch(info);
619 __ relocate(relocInfo::poll_type);
620 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
621 #else
622 #ifndef OPT_SAFEPOINT
623 // do not know how to handle relocate yet. do not know li or li64 should be used neither. by aoqi. 20111207 FIXME.
624 //__ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
625 __ li48(r, (intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
626 add_debug_info_for_branch(info);
627 __ relocate(relocInfo::poll_type);
628 //__ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
629 __ lw(AT, r, 0);
630 #else
631 __ lui(r, Assembler::split_high((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
632 add_debug_info_for_branch(info);
633 __ relocate(relocInfo::poll_type);
634 __ lw(AT, r, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
635 #endif
636 #endif
637 return offset;
638 }
640 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
641 if (from_reg != to_reg) __ move(to_reg, from_reg);
642 }
645 void LIR_Assembler::swap_reg(Register a, Register b) {
646 __ xorr(a, a, b);
647 __ xorr(b, a, b);
648 __ xorr(a, a, b);
649 }
651 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
652 assert(src->is_constant(), "should not call otherwise");
653 assert(dest->is_register(), "should not call otherwise");
654 LIR_Const* c = src->as_constant_ptr();
655 switch (c->type()) {
656 case T_ADDRESS: {
657 assert(patch_code == lir_patch_none, "no patching handled here");
658 Unimplemented();
659 __ move(dest->as_register(), c->as_jint()); // FIXME
660 break;
661 }
663 case T_INT: {
664 assert(patch_code == lir_patch_none, "no patching handled here");
665 __ move(dest->as_register(), c->as_jint());
666 break;
667 }
669 case T_LONG: {
670 #ifndef _LP64
671 jlong con = c->as_jlong();
672 jint* conhi = (jint*)&con + 1;
673 jint* conlow = (jint*)&con;
675 if (dest->is_double_cpu()) {
676 __ move(dest->as_register_lo(), *conlow);
677 __ move(dest->as_register_hi(), *conhi);
678 } else {
679 // assert(dest->is_double(), "wrong register kind");
680 __ move(AT, *conlow);
681 __ mtc1(AT, dest->as_double_reg());
682 __ move(AT, *conhi);
683 __ mtc1(AT, dest->as_double_reg()+1);
684 }
685 #else
686 if (dest->is_double_cpu()) {
687 __ li(dest->as_register_lo(), c->as_jlong());
688 } else {
689 __ li(dest->as_register(), c->as_jlong());
690 }
691 #endif
692 break;
693 }
695 case T_OBJECT: {
696 if (patch_code == lir_patch_none) {
697 jobject2reg(c->as_jobject(), dest->as_register());
698 } else {
699 jobject2reg_with_patching(dest->as_register(), info);
700 }
701 break;
702 }
704 case T_METADATA: {
705 if (patch_code != lir_patch_none) {
706 klass2reg_with_patching(dest->as_register(), info);
707 } else {
708 __ mov_metadata(dest->as_register(), c->as_metadata());
709 }
710 break;
711 }
713 case T_FLOAT: {
714 address const_addr = float_constant(c->as_jfloat());
715 assert (const_addr != NULL, "must create float constant in the constant table");
717 if (dest->is_single_fpu()) {
718 __ relocate(relocInfo::internal_pc_type);
719 #ifndef _LP64
720 //by_css
721 __ lui(AT, Assembler::split_high((int)const_addr));
722 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
723 #else
724 __ li48(AT, (long)const_addr);
725 #endif
726 __ lwc1(dest->as_float_reg(), AT, 0);
728 } else {
729 assert(dest->is_single_cpu(), "Must be a cpu register.");
730 assert(dest->as_register() != AT, "AT can not be allocated.");
732 __ relocate(relocInfo::internal_pc_type);
733 #ifndef _LP64
734 //by_css
735 __ lui(AT, Assembler::split_high((int)const_addr));
736 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
737 #else
738 __ li48(AT, (long)const_addr);
739 #endif
740 __ lw(dest->as_register(), AT, 0);
741 }
742 break;
743 }
745 case T_DOUBLE: {
746 address const_addr = double_constant(c->as_jdouble());
747 assert (const_addr != NULL, "must create double constant in the constant table");
749 if (dest->is_double_fpu()) {
750 __ relocate(relocInfo::internal_pc_type);
751 #ifndef _LP64
752 //by_css
753 __ lui(AT, Assembler::split_high((int)const_addr));
754 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
755 __ lwc1(dest->as_double_reg(), AT, 0);
756 __ lwc1(dest->as_double_reg()+1, AT, 4);
757 #else
758 __ li48(AT, (long)const_addr);
759 __ ldc1(dest->as_double_reg(), AT, 0);
760 #endif
761 } else {
762 assert(dest->as_register_lo() != AT, "AT can not be allocated.");
763 assert(dest->as_register_hi() != AT, "AT can not be allocated.");
765 __ relocate(relocInfo::internal_pc_type);
766 #ifndef _LP64
767 //by_css
768 __ lui(AT, Assembler::split_high((int)const_addr));
769 __ addiu(AT, AT, Assembler::split_low((int)const_addr));
770 __ lw(dest->as_register_lo(), AT, 0);
771 __ lw(dest->as_register_hi(), AT, 4);
772 #else
773 __ li48(AT, (long)const_addr);
774 __ ld(dest->as_register_lo(), AT, 0);
775 #endif
776 }
777 break;
778 }
780 default:
781 ShouldNotReachHere();
782 }
783 }
785 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
786 assert(src->is_constant(), "should not call otherwise");
787 assert(dest->is_stack(), "should not call otherwise");
788 LIR_Const* c = src->as_constant_ptr();
789 switch (c->type()) {
790 case T_INT: // fall through
791 __ move(AT, c->as_jint_bits());
792 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
793 break;
795 case T_FLOAT:
796 Unimplemented();
797 break;
799 case T_ADDRESS:
800 Unimplemented();
801 __ move(AT, c->as_jint_bits());
802 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
803 break;
805 case T_OBJECT:
806 if (c->as_jobject() == NULL) {
807 __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix()));
808 } else {
809 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
810 RelocationHolder rspec = oop_Relocation::spec(oop_index);
811 __ relocate(rspec);
812 #ifndef _LP64
813 //by_css
814 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
815 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
816 #else
817 __ li48(AT, (long)c->as_jobject());
818 #endif
819 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
820 }
821 break;
822 case T_LONG: // fall through
823 case T_DOUBLE:
824 #ifndef _LP64
825 __ move(AT, c->as_jint_lo_bits());
826 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
827 lo_word_offset_in_bytes));
828 __ move(AT, c->as_jint_hi_bits());
829 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
830 hi_word_offset_in_bytes));
831 #else
832 __ move(AT, c->as_jlong_bits());
833 __ sd(AT, frame_map()->address_for_slot(dest->double_stack_ix(),
834 lo_word_offset_in_bytes));
835 #endif
836 break;
837 default:
838 ShouldNotReachHere();
839 }
840 }
842 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
843 assert(src->is_constant(), "should not call otherwise");
844 assert(dest->is_address(), "should not call otherwise");
845 LIR_Const* c = src->as_constant_ptr();
846 LIR_Address* addr = dest->as_address_ptr();
848 int null_check_here = code_offset();
849 switch (type) {
850 case T_LONG: // fall through
851 case T_DOUBLE:
852 #ifndef _LP64
853 __ move(AT, c->as_jint_hi_bits());
854 __ sw(AT, as_Address_hi(addr));
855 __ move(AT, c->as_jint_lo_bits());
856 __ sw(AT, as_Address_lo(addr));
857 #else
858 if(c->as_jlong_bits() != 0) {
859 /* DoublePrint: -0.0
860 * (gdb) print /x -9223372036854775808
861 * $1 = 0x8000000000000000
862 */
863 __ li64(AT, c->as_jlong_bits());
864 __ sd(AT, as_Address_lo(addr));
865 } else
866 __ sd(R0, as_Address(addr));
867 #endif
868 break;
869 case T_OBJECT: // fall through
870 case T_ARRAY:
871 if (c->as_jobject() == NULL){
872 if (UseCompressedOops && !wide) {
873 __ sw(R0, as_Address(addr));
874 } else {
875 __ st_ptr(R0, as_Address(addr));
876 }
877 } else {
878 int oop_index = __ oop_recorder()->find_index(c->as_jobject());
879 RelocationHolder rspec = oop_Relocation::spec(oop_index);
880 __ relocate(rspec);
881 #ifndef _LP64
882 __ lui(AT, Assembler::split_high((int)c->as_jobject()));
883 __ addiu(AT, AT, Assembler::split_low((int)c->as_jobject()));
884 __ st_ptr(AT, as_Address(addr));
885 null_check_here = code_offset();
886 #else
887 //by_css
888 __ li64(AT, (long)c->as_jobject());
889 if (UseCompressedOops && !wide) {
890 __ encode_heap_oop(AT);
891 null_check_here = code_offset();
892 __ sw(AT, as_Address(addr));
893 } else {
894 __ st_ptr(AT, as_Address(addr));
895 }
896 #endif
897 }
898 break;
899 case T_INT: // fall through
900 case T_FLOAT:
901 if(c->as_jint_bits() != 0) {
902 __ move(AT, c->as_jint_bits());
903 __ sw(AT, as_Address(addr));
904 } else
905 __ sw(R0, as_Address(addr));
906 break;
907 case T_ADDRESS:
908 __ move(AT, c->as_jint_bits());
909 __ st_ptr(AT, as_Address(addr));
910 break;
911 case T_BOOLEAN: // fall through
912 case T_BYTE:
913 if(c->as_jint() != 0) {
914 __ move(AT, c->as_jint());
915 __ sb(AT, as_Address(addr));
916 }
917 else
918 __ sb(R0, as_Address(addr));
919 break;
920 case T_CHAR: // fall through
921 case T_SHORT:
922 if(c->as_jint() != 0) {
923 __ move(AT, c->as_jint());
924 __ sh(AT, as_Address(addr));
925 }
926 else
927 __ sh(R0, as_Address(addr));
928 break;
929 default: ShouldNotReachHere();
930 };
931 if (info != NULL) add_debug_info_for_null_check(null_check_here, info);
932 }
934 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
935 assert(src->is_register(), "should not call otherwise");
936 assert(dest->is_register(), "should not call otherwise");
937 if (dest->is_float_kind() && src->is_float_kind()) {
938 // float to float moves
939 if (dest->is_single_fpu()) {
940 assert(src->is_single_fpu(), "must both be float");
941 __ mov_s(dest->as_float_reg(), src->as_float_reg());
942 } else {
943 assert(src->is_double_fpu(), "must bothe be double");
944 __ mov_d( dest->as_double_reg(),src->as_double_reg());
945 }
946 } else if (!dest->is_float_kind() && !src->is_float_kind()) {
947 // int to int moves
948 if (dest->is_single_cpu()) {
949 #ifdef _LP64
950 //FIXME aoqi: copy from x86
951 if (src->type() == T_LONG) {
952 // Can do LONG -> OBJECT
953 move_regs(src->as_register_lo(), dest->as_register());
954 return;
955 }
956 #endif
957 assert(src->is_single_cpu(), "must match");
958 if (dest->type() == T_INT) {
959 __ move_u32(dest->as_register(), src->as_register());
960 } else
961 move_regs(src->as_register(), dest->as_register());
962 } else if (dest->is_double_cpu()) {
963 #ifdef _LP64
964 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
965 // Surprising to me but we can see move of a long to t_object
966 __ verify_oop(src->as_register());
967 move_regs(src->as_register(), dest->as_register_lo());
968 return;
969 }
970 #endif
971 Register f_lo;
972 Register f_hi;
973 Register t_lo;
974 Register t_hi;
976 if (src->is_single_cpu()) {
977 f_lo = src->as_register();
978 t_lo = dest->as_register_lo();
979 } else {
980 f_lo = src->as_register_lo();
981 f_hi = src->as_register_hi();
982 t_lo = dest->as_register_lo();
983 t_hi = dest->as_register_hi();
984 assert(f_hi == f_lo, "must be same");
985 assert(t_hi == t_lo, "must be same");
986 }
987 #ifdef _LP64
988 move_regs(f_lo, t_lo);
989 #else
990 /*
991 if (src->as_register_hi() != dest->as_register_lo()) {
992 move_regs(src->as_register_lo(), dest->as_register_lo());
993 move_regs(src->as_register_hi(), dest->as_register_hi());
994 } else if (src->as_register_lo() != dest->as_register_hi()) {
995 move_regs(src->as_register_hi(), dest->as_register_hi());
996 move_regs(src->as_register_lo(), dest->as_register_lo());
997 } else {
998 swap_reg(src->as_register_lo(), src->as_register_hi());
999 }
1000 */
1001 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
1003 if (f_lo == t_hi && f_hi == t_lo) {
1004 swap_reg(f_lo, f_hi);
1005 } else if (f_hi == t_lo) {
1006 assert(f_lo != t_hi, "overwriting register");
1007 move_regs(f_hi, t_hi);
1008 move_regs(f_lo, t_lo);
1009 } else {
1010 assert(f_hi != t_lo, "overwriting register");
1011 move_regs(f_lo, t_lo);
1012 move_regs(f_hi, t_hi);
1013 }
1014 #endif // LP64
1015 }
1016 } else {
1017 // float to int or int to float moves
1018 if (dest->is_double_cpu()) {
1019 assert(src->is_double_fpu(), "must match");
1020 __ mfc1(dest->as_register_lo(), src->as_double_reg());
1021 #ifndef _LP64
1022 __ mfc1(dest->as_register_hi(), src->as_double_reg() + 1);
1023 #endif
1024 } else if (dest->is_single_cpu()) {
1025 assert(src->is_single_fpu(), "must match");
1026 __ mfc1(dest->as_register(), src->as_float_reg());
1027 } else if (dest->is_double_fpu()) {
1028 assert(src->is_double_cpu(), "must match");
1029 __ mtc1(src->as_register_lo(), dest->as_double_reg());
1030 #ifndef _LP64
1031 __ mtc1(src->as_register_hi(), dest->as_double_reg() + 1);
1032 #endif
1033 } else if (dest->is_single_fpu()) {
1034 assert(src->is_single_cpu(), "must match");
1035 __ mtc1(src->as_register(), dest->as_float_reg());
1036 }
1037 }
1038 }
1041 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type,bool pop_fpu_stack) {
1042 assert(src->is_register(), "should not call otherwise");
1043 assert(dest->is_stack(), "should not call otherwise");
1045 if (src->is_single_cpu()) {
1046 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1047 if (type == T_OBJECT || type == T_ARRAY) {
1048 __ verify_oop(src->as_register());
1049 }
1050 #ifdef _LP64
1051 if (type == T_INT)
1052 __ sw(src->as_register(),dst);
1053 else
1054 #endif
1055 __ st_ptr(src->as_register(),dst);
1056 } else if (src->is_double_cpu()) {
1057 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
1058 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
1059 __ st_ptr(src->as_register_lo(),dstLO);
1060 NOT_LP64(__ st_ptr(src->as_register_hi(),dstHI));
1061 }else if (src->is_single_fpu()) {
1062 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1063 __ swc1(src->as_float_reg(), dst_addr);
1065 } else if (src->is_double_fpu()) {
1066 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1067 #ifndef _LP64
1068 __ swc1(src->as_double_reg(), dst_addr);
1069 __ swc1(src->as_double_reg() + 1, dst_addr.base(), dst_addr.disp() + 4);
1070 #else
1071 __ sdc1(src->as_double_reg(), dst_addr);
1072 #endif
1074 } else {
1075 ShouldNotReachHere();
1076 }
1077 }
1079 //FIXME
1080 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info,bool pop_fpu_stack, bool wide, bool/*unaliged*/) {
1081 LIR_Address* to_addr = dest->as_address_ptr();
1082 //Register dest_reg = to_addr->base()->as_register();
1083 // FIXME aoqi
1084 Register dest_reg = to_addr->base()->is_single_cpu()? to_addr->base()->as_register() : to_addr->base()->as_register_lo();
1085 PatchingStub* patch = NULL;
1086 bool needs_patching = (patch_code != lir_patch_none);
1087 Register disp_reg = NOREG;
1088 int disp_value = to_addr->disp();
1089 /*
1090 the start position of patch template is labeled by "new PatchingStub(...)"
1091 during patch, T9 will be changed and not restore
1092 that's why we use S7 but not T9 as compressed_src here
1093 */
1094 Register compressed_src = S7;
1096 if (type == T_ARRAY || type == T_OBJECT) {
1097 __ verify_oop(src->as_register());
1098 #ifdef _LP64
1099 if (UseCompressedOops && !wide) {
1100 __ move(compressed_src, src->as_register());
1101 __ encode_heap_oop(compressed_src);
1102 }
1103 #endif
1104 }
1106 if (needs_patching) {
1107 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1108 assert(!src->is_double_cpu() ||
1109 patch_code == lir_patch_none ||
1110 patch_code == lir_patch_normal,
1111 "patching doesn't match register");
1112 Address toa = as_Address(to_addr);
1113 assert(toa.disp() != 0, "must have");
1114 }
1116 if (info != NULL) {
1117 add_debug_info_for_null_check_here(info);
1118 }
1119 if (needs_patching) {
1120 disp_reg = AT;
1121 __ lui(AT, Assembler::split_high(disp_value));
1122 __ addiu(AT, AT, Assembler::split_low(disp_value));
1123 } else if (!Assembler::is_simm16(disp_value)) {
1124 disp_reg = AT;
1125 __ lui(AT, Assembler::split_high(disp_value));
1126 }
1127 int offset = code_offset();
1129 switch(type) {
1130 case T_DOUBLE:
1131 assert(src->is_double_fpu(), "just check");
1132 if (disp_reg == noreg) {
1133 #ifndef _LP64
1134 __ swc1(src->as_double_reg(), dest_reg, disp_value);
1135 __ swc1(src->as_double_reg()+1, dest_reg, disp_value+4);
1136 #else
1137 __ sdc1(src->as_double_reg(), dest_reg, disp_value);
1138 #endif
1139 } else if (needs_patching) {
1140 __ add(AT, dest_reg, disp_reg);
1141 #ifndef _LP64
1142 __ swc1(src->as_double_reg(), AT, 0);
1143 __ swc1(src->as_double_reg()+1, AT, 4);
1144 #else
1145 __ sdc1(src->as_double_reg(), AT, 0);
1146 #endif
1147 } else {
1148 __ add(AT, dest_reg, disp_reg);
1149 #ifndef _LP64
1150 __ swc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1151 __ swc1(src->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1152 #else
1153 __ sdc1(src->as_double_reg(), AT, Assembler::split_low(disp_value));
1154 #endif
1155 }
1156 break;
1158 case T_FLOAT:
1159 if (disp_reg == noreg) {
1160 __ swc1(src->as_float_reg(), dest_reg, disp_value);
1161 } else if(needs_patching) {
1162 __ add(AT, dest_reg, disp_reg);
1163 __ swc1(src->as_float_reg(), AT, 0);
1164 } else {
1165 __ add(AT, dest_reg, disp_reg);
1166 __ swc1(src->as_float_reg(), AT, Assembler::split_low(disp_value));
1167 }
1168 break;
1170 case T_LONG: {
1171 Register from_lo = src->as_register_lo();
1172 Register from_hi = src->as_register_hi();
1173 #ifdef _LP64
1174 if (needs_patching) {
1175 __ add(AT, dest_reg, disp_reg);
1176 __ st_ptr(from_lo, AT, 0);
1177 } else {
1178 __ st_ptr(from_lo, as_Address_lo(to_addr));
1179 }
1180 #else
1181 Register base = to_addr->base()->as_register();
1182 Register index = noreg;
1183 if (to_addr->index()->is_register()) {
1184 index = to_addr->index()->as_register();
1185 }
1186 if (base == from_lo || index == from_lo) {
1187 assert(base != from_hi, "can't be");
1188 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1189 if (needs_patching) {
1190 __ add(AT, dest_reg, disp_reg);
1191 NOT_LP64(__ st_ptr(from_hi, AT, longSize/2);)
1192 __ st_ptr(from_lo, AT, 0);
1193 } else {
1194 __ st_ptr(from_hi, as_Address_hi(to_addr));
1195 __ st_ptr(from_lo, as_Address_lo(to_addr));
1196 }
1197 } else {
1198 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1199 if (needs_patching) {
1200 __ add(AT, dest_reg, disp_reg);
1201 __ st_ptr(from_lo, AT, 0);
1202 __ st_ptr(from_hi, AT, longSize/2);
1203 } else {
1204 __ st_ptr(from_lo, as_Address_lo(to_addr));
1205 __ st_ptr(from_hi, as_Address_hi(to_addr));
1206 }
1207 }
1208 #endif
1209 break;
1210 }
1211 case T_ARRAY:
1212 case T_OBJECT:
1213 #ifdef _LP64
1214 if (UseCompressedOops && !wide) {
1215 if (disp_reg == noreg) {
1216 __ sw(compressed_src, dest_reg, disp_value);
1217 } else if (needs_patching) {
1218 __ add(AT, dest_reg, disp_reg);
1219 __ sw(compressed_src, AT, 0);
1220 } else {
1221 __ add(AT, dest_reg, disp_reg);
1222 __ sw(compressed_src, AT, Assembler::split_low(disp_value));
1223 }
1224 } else {
1225 if (disp_reg == noreg) {
1226 __ st_ptr(src->as_register(), dest_reg, disp_value);
1227 } else if (needs_patching) {
1228 __ add(AT, dest_reg, disp_reg);
1229 __ st_ptr(src->as_register(), AT, 0);
1230 } else {
1231 __ add(AT, dest_reg, disp_reg);
1232 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1233 }
1234 }
1235 break;
1236 #endif
1237 case T_ADDRESS:
1238 #ifdef _LP64
1239 if (disp_reg == noreg) {
1240 __ st_ptr(src->as_register(), dest_reg, disp_value);
1241 } else if (needs_patching) {
1242 __ add(AT, dest_reg, disp_reg);
1243 __ st_ptr(src->as_register(), AT, 0);
1244 } else {
1245 __ add(AT, dest_reg, disp_reg);
1246 __ st_ptr(src->as_register(), AT, Assembler::split_low(disp_value));
1247 }
1248 break;
1249 #endif
1250 case T_INT:
1251 if (disp_reg == noreg) {
1252 __ sw(src->as_register(), dest_reg, disp_value);
1253 } else if (needs_patching) {
1254 __ add(AT, dest_reg, disp_reg);
1255 __ sw(src->as_register(), AT, 0);
1256 } else {
1257 __ add(AT, dest_reg, disp_reg);
1258 __ sw(src->as_register(), AT, Assembler::split_low(disp_value));
1259 }
1260 break;
1262 case T_CHAR:
1263 case T_SHORT:
1264 if (disp_reg == noreg) {
1265 __ sh(src->as_register(), dest_reg, disp_value);
1266 } else if (needs_patching) {
1267 __ add(AT, dest_reg, disp_reg);
1268 __ sh(src->as_register(), AT, 0);
1269 } else {
1270 __ add(AT, dest_reg, disp_reg);
1271 __ sh(src->as_register(), AT, Assembler::split_low(disp_value));
1272 }
1273 break;
1275 case T_BYTE:
1276 case T_BOOLEAN:
1277 assert(src->is_single_cpu(), "just check");
1279 if (disp_reg == noreg) {
1280 __ sb(src->as_register(), dest_reg, disp_value);
1281 } else if (needs_patching) {
1282 __ add(AT, dest_reg, disp_reg);
1283 __ sb(src->as_register(), AT, 0);
1284 } else {
1285 __ add(AT, dest_reg, disp_reg);
1286 __ sb(src->as_register(), AT, Assembler::split_low(disp_value));
1287 }
1288 break;
1290 default:
1291 ShouldNotReachHere();
1292 }
1295 if (needs_patching) {
1296 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1297 }
1298 }
1302 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1303 assert(src->is_stack(), "should not call otherwise");
1304 assert(dest->is_register(), "should not call otherwise");
1306 if (dest->is_single_cpu()) {
1307 #ifdef _LP64
1308 if (type == T_INT)
1309 __ lw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1310 else
1311 #endif
1312 __ ld_ptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1313 if (type == T_ARRAY || type == T_OBJECT) {
1314 __ verify_oop(dest->as_register());
1315 }
1316 } else if (dest->is_double_cpu()) {
1317 #ifdef _LP64
1318 /* java.util.concurrent.locks.ReentrantReadWriteLock$Sync::tryAcquire
1320 88 move [stack:2|L] [a5a5|J]
1321 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1322 OpenJDK 64-Bit Client VM warning: /mnt/openjdk6-mips/hotspot/src/share/c1/c1_LIR.hpp, 397 , assert(is_double_stack() && !is_virtual(),"type check")
1323 0x000000556197af8c: ld a5, 0x50(sp)
1324 */
1325 Address src_addr_LO;
1326 if (src->is_single_stack())
1327 src_addr_LO = frame_map()->address_for_slot(src->single_stack_ix(),lo_word_offset_in_bytes);
1328 else if (src->is_double_stack())
1329 src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1330 else
1331 ShouldNotReachHere();
1332 #else
1333 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1334 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1335 #endif
1336 #ifdef _LP64
1337 if (src->type() == T_INT)
1338 __ lw(dest->as_register_lo(), src_addr_LO);
1339 else
1340 #endif
1341 __ ld_ptr(dest->as_register_lo(), src_addr_LO);
1342 NOT_LP64(__ ld_ptr(dest->as_register_hi(), src_addr_HI));
1343 }else if (dest->is_single_fpu()) {
1344 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1345 __ lwc1(dest->as_float_reg(), addr);
1346 } else if (dest->is_double_fpu()) {
1347 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(),lo_word_offset_in_bytes);
1348 #ifndef _LP64
1349 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1350 __ lwc1(dest->as_double_reg(), src_addr_LO);
1351 __ lwc1(dest->as_double_reg()+1, src_addr_HI);
1352 #else
1353 __ ldc1(dest->as_double_reg(), src_addr_LO);
1354 #endif
1355 } else {
1356 ShouldNotReachHere();
1357 /*
1358 assert(dest->is_single_cpu(), "cannot be anything else but a single cpu");
1359 assert(type!= T_ILLEGAL, "Bad type in stack2reg")
1360 Address addr = frame_map()->address_for_slot(src->single_stack_ix());
1361 __ lw(dest->as_register(), addr);
1362 */
1363 }
1364 }
1366 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1367 if (src->is_single_stack()) {
1368 /*
1369 * 2012/5/23 Jin: YozoOffice(-Xcomp) corrupts in "New File -> word"
1370 *
1371 * [b.q.e.a.z::bw()]
1372 * move [stack:15|L] [stack:17|L]
1373 * 0x00000055584e7cf4: lw at, 0x78(sp) <--- error!
1374 * 0x00000055584e7cf8: sw at, 0x88(sp)
1375 */
1376 if (type == T_OBJECT )
1377 {
1378 __ ld(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1379 __ sd(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1380 }
1381 else
1382 {
1383 __ lw(AT, frame_map()->address_for_slot(src ->single_stack_ix()));
1384 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
1385 }
1386 } else if (src->is_double_stack()) {
1387 #ifndef _LP64
1388 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1389 __ sw(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1390 __ lw(AT, frame_map()->address_for_slot(src ->double_stack_ix(),4));
1391 __ sw(AT, frame_map()->address_for_slot(dest ->double_stack_ix(),4));
1392 #else
1393 __ ld_ptr(AT, frame_map()->address_for_slot(src ->double_stack_ix()));
1394 __ st_ptr(AT, frame_map()->address_for_slot(dest->double_stack_ix()));
1395 #endif
1396 } else {
1397 ShouldNotReachHere();
1398 }
1399 }
1401 // if patching needed, be sure the instruction at offset is a MoveMemReg
1402 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool) {
1403 assert(src->is_address(), "should not call otherwise");
1404 assert(dest->is_register(), "should not call otherwise");
1405 LIR_Address* addr = src->as_address_ptr();
1406 //Address from_addr = as_Address(addr);
1408 //Register src_reg = addr->base()->as_register();
1409 // FIXME aoqi
1410 Register src_reg = addr->base()->is_single_cpu()? addr->base()->as_register() : addr->base()->as_register_lo();
1411 Register disp_reg = noreg;
1412 int disp_value = addr->disp();
1413 bool needs_patching = (patch_code != lir_patch_none);
1415 PatchingStub* patch = NULL;
1416 if (needs_patching) {
1417 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1418 }
1420 // we must use lui&addiu,
1421 if (needs_patching) {
1422 disp_reg = AT;
1423 __ lui(AT, Assembler::split_high(disp_value));
1424 __ addiu(AT, AT, Assembler::split_low(disp_value));
1425 } else if (!Assembler::is_simm16(disp_value)) {
1426 disp_reg = AT;
1427 __ lui(AT, Assembler::split_high(disp_value));
1428 }
1430 // remember the offset of the load. The patching_epilog must be done
1431 // before the call to add_debug_info, otherwise the PcDescs don't get
1432 // entered in increasing order.
1433 int offset = code_offset();
1435 switch(type) {
1436 case T_BOOLEAN:
1437 case T_BYTE: {
1438 //assert(to_reg.is_word(), "just check");
1439 if (disp_reg == noreg) {
1440 __ lb(dest->as_register(), src_reg, disp_value);
1441 } else if (needs_patching) {
1442 __ add(AT, src_reg, disp_reg);
1443 offset = code_offset();
1444 __ lb(dest->as_register(), AT, 0);
1445 } else {
1446 __ add(AT, src_reg, disp_reg);
1447 offset = code_offset();
1448 __ lb(dest->as_register(), AT, Assembler::split_low(disp_value));
1449 }
1450 }
1451 break;
1453 case T_CHAR: {
1454 //assert(to_reg.is_word(), "just check");
1455 if (disp_reg == noreg) {
1456 __ lhu(dest->as_register(), src_reg, disp_value);
1457 } else if (needs_patching) {
1458 __ add(AT, src_reg, disp_reg);
1459 offset = code_offset();
1460 __ lhu(dest->as_register(), AT, 0);
1461 } else {
1462 __ add(AT, src_reg, disp_reg);
1463 offset = code_offset();
1464 __ lhu(dest->as_register(), AT, Assembler::split_low(disp_value));
1465 }
1466 }
1467 break;
1469 case T_SHORT: {
1470 // assert(to_reg.is_word(), "just check");
1471 if (disp_reg == noreg) {
1472 __ lh(dest->as_register(), src_reg, disp_value);
1473 } else if (needs_patching) {
1474 __ add(AT, src_reg, disp_reg);
1475 offset = code_offset();
1476 __ lh(dest->as_register(), AT, 0);
1477 } else {
1478 __ add(AT, src_reg, disp_reg);
1479 offset = code_offset();
1480 __ lh(dest->as_register(), AT, Assembler::split_low(disp_value));
1481 }
1482 }
1483 break;
1485 case T_OBJECT:
1486 case T_ARRAY:
1487 if (UseCompressedOops && !wide) {
1488 if (disp_reg == noreg) {
1489 __ lwu(dest->as_register(), src_reg, disp_value);
1490 } else if (needs_patching) {
1491 __ dadd(AT, src_reg, disp_reg);
1492 offset = code_offset();
1493 __ lwu(dest->as_register(), AT, 0);
1494 } else {
1495 __ dadd(AT, src_reg, disp_reg);
1496 offset = code_offset();
1497 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1498 }
1499 } else {
1500 if (disp_reg == noreg) {
1501 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1502 } else if (needs_patching) {
1503 __ dadd(AT, src_reg, disp_reg);
1504 offset = code_offset();
1505 __ ld_ptr(dest->as_register(), AT, 0);
1506 } else {
1507 __ dadd(AT, src_reg, disp_reg);
1508 offset = code_offset();
1509 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1510 }
1511 }
1512 break;
1513 case T_ADDRESS:
1514 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1515 if (disp_reg == noreg) {
1516 __ lwu(dest->as_register(), src_reg, disp_value);
1517 } else if (needs_patching) {
1518 __ dadd(AT, src_reg, disp_reg);
1519 offset = code_offset();
1520 __ lwu(dest->as_register(), AT, 0);
1521 } else {
1522 __ dadd(AT, src_reg, disp_reg);
1523 offset = code_offset();
1524 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1525 }
1526 } else {
1527 if (disp_reg == noreg) {
1528 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1529 } else if (needs_patching) {
1530 __ dadd(AT, src_reg, disp_reg);
1531 offset = code_offset();
1532 __ ld_ptr(dest->as_register(), AT, 0);
1533 } else {
1534 __ dadd(AT, src_reg, disp_reg);
1535 offset = code_offset();
1536 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1537 }
1538 }
1539 break;
1540 case T_INT: {
1541 //assert(to_reg.is_word(), "just check");
1542 if (disp_reg == noreg) {
1543 __ lw(dest->as_register(), src_reg, disp_value);
1544 } else if (needs_patching) {
1545 __ add(AT, src_reg, disp_reg);
1546 offset = code_offset();
1547 __ lw(dest->as_register(), AT, 0);
1548 } else {
1549 __ add(AT, src_reg, disp_reg);
1550 offset = code_offset();
1551 __ lw(dest->as_register(), AT, Assembler::split_low(disp_value));
1552 }
1553 }
1554 break;
1556 case T_LONG: {
1557 Register to_lo = dest->as_register_lo();
1558 Register to_hi = dest->as_register_hi();
1559 #ifdef _LP64
1560 if (needs_patching) {
1561 __ add(AT, src_reg, disp_reg);
1562 __ ld_ptr(to_lo, AT, 0);
1563 } else {
1564 __ ld_ptr(to_lo, as_Address_lo(addr));
1565 }
1566 #else
1567 Register base = addr->base()->as_register();
1568 Register index = noreg;
1569 if (addr->index()->is_register()) {
1570 index = addr->index()->as_register();
1571 }
1572 if ((base == to_lo && index == to_hi) ||(base == to_hi && index == to_lo)) {
1573 // addresses with 2 registers are only formed as a result of
1574 // array access so this code will never have to deal with
1575 // patches or null checks.
1576 assert(info == NULL && patch == NULL, "must be");
1577 __ lea(to_hi, as_Address(addr));
1578 __ lw(to_lo, Address(to_hi));
1579 __ lw(to_hi, Address(to_hi, BytesPerWord));
1580 } else if (base == to_lo || index == to_lo) {
1581 assert(base != to_hi, "can't be");
1582 assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1583 if (needs_patching) {
1584 __ add(AT, src_reg, disp_reg);
1585 offset = code_offset();
1586 __ lw(to_hi, AT, longSize/2);
1587 __ lw(to_lo, AT, 0);
1588 } else {
1589 __ lw(to_hi, as_Address_hi(addr));
1590 __ lw(to_lo, as_Address_lo(addr));
1591 }
1592 } else {
1593 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1594 if (needs_patching) {
1595 __ add(AT, src_reg, disp_reg);
1596 offset = code_offset();
1597 __ lw(to_lo, AT, 0);
1598 __ lw(to_hi, AT, longSize/2);
1599 } else {
1600 __ lw(to_lo, as_Address_lo(addr));
1601 __ lw(to_hi, as_Address_hi(addr));
1602 }
1603 }
1604 #endif
1605 }
1606 break;
1608 case T_FLOAT: {
1609 //assert(to_reg.is_float(), "just check");
1610 if (disp_reg == noreg) {
1611 __ lwc1(dest->as_float_reg(), src_reg, disp_value);
1612 } else if (needs_patching) {
1613 __ add(AT, src_reg, disp_reg);
1614 offset = code_offset();
1615 __ lwc1(dest->as_float_reg(), AT, 0);
1616 } else {
1617 __ add(AT, src_reg, disp_reg);
1618 offset = code_offset();
1619 __ lwc1(dest->as_float_reg(), AT, Assembler::split_low(disp_value));
1620 }
1621 }
1622 break;
1624 case T_DOUBLE: {
1625 //assert(to_reg.is_double(), "just check");
1627 if (disp_reg == noreg) {
1628 #ifndef _LP64
1629 __ lwc1(dest->as_double_reg(), src_reg, disp_value);
1630 __ lwc1(dest->as_double_reg()+1, src_reg, disp_value+4);
1631 #else
1632 __ ldc1(dest->as_double_reg(), src_reg, disp_value);
1633 #endif
1634 } else if (needs_patching) {
1635 __ add(AT, src_reg, disp_reg);
1636 offset = code_offset();
1637 #ifndef _LP64
1638 __ lwc1(dest->as_double_reg(), AT, 0);
1639 __ lwc1(dest->as_double_reg()+1, AT, 4);
1640 #else
1641 __ ldc1(dest->as_double_reg(), AT, 0);
1642 #endif
1643 } else {
1644 __ add(AT, src_reg, disp_reg);
1645 offset = code_offset();
1646 #ifndef _LP64
1647 __ lwc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1648 __ lwc1(dest->as_double_reg()+1, AT, Assembler::split_low(disp_value) + 4);
1649 #else
1650 __ ldc1(dest->as_double_reg(), AT, Assembler::split_low(disp_value));
1651 #endif
1652 }
1653 }
1654 break;
1656 default:
1657 ShouldNotReachHere();
1658 }
1660 if (needs_patching) {
1661 patching_epilog(patch, patch_code, src_reg, info);
1662 }
1664 if (type == T_ARRAY || type == T_OBJECT) {
1665 #ifdef _LP64
1666 if (UseCompressedOops && !wide) {
1667 __ decode_heap_oop(dest->as_register());
1668 }
1669 #endif
1670 __ verify_oop(dest->as_register());
1671 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1672 if (UseCompressedClassPointers) {
1673 __ decode_klass_not_null(dest->as_register());
1674 }
1675 }
1676 if (info != NULL) add_debug_info_for_null_check(offset, info);
1677 }
1680 void LIR_Assembler::prefetchr(LIR_Opr src) {
1681 LIR_Address* addr = src->as_address_ptr();
1682 Address from_addr = as_Address(addr);
1683 }
1686 void LIR_Assembler::prefetchw(LIR_Opr src) {
1687 }
1689 NEEDS_CLEANUP; // This could be static?
1690 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1691 int elem_size = type2aelembytes(type);
1692 switch (elem_size) {
1693 case 1: return Address::times_1;
1694 case 2: return Address::times_2;
1695 case 4: return Address::times_4;
1696 case 8: return Address::times_8;
1697 }
1698 ShouldNotReachHere();
1699 return Address::no_scale;
1700 }
1703 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1704 switch (op->code()) {
1705 case lir_frem:
1706 arithmetic_frem(
1707 op->code(),
1708 op->in_opr1(),
1709 op->in_opr2(),
1710 op->in_opr3(),
1711 op->result_opr(),
1712 op->info());
1713 break;
1715 case lir_idiv:
1716 case lir_irem:
1717 arithmetic_idiv(
1718 op->code(),
1719 op->in_opr1(),
1720 op->in_opr2(),
1721 op->in_opr3(),
1722 op->result_opr(),
1723 op->info());
1724 break;
1725 default: ShouldNotReachHere(); break;
1726 }
1727 }
1729 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1730 LIR_Opr opr1 = op->left();
1731 LIR_Opr opr2 = op->right();
1732 LIR_Condition condition = op->cond();
1733 #ifdef ASSERT
1734 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1735 if (op->block() != NULL) _branch_target_blocks.append(op->block());
1736 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1737 #endif
1738 if (op->cond() == lir_cond_always) {
1739 if(op->label()==NULL) //by liaob1
1740 __ b(*op->label());
1741 else
1742 __ b_far(*op->label());
1743 __ delayed()->nop();
1744 return;
1745 }
1746 if (opr1->is_single_cpu()) {
1747 Register reg_op1 = opr1->as_register();
1748 if (opr2->is_single_cpu()) {
1749 #ifdef OPT_RANGECHECK
1750 assert(!op->check(), "just check");
1751 #endif
1752 Register reg_op2 = opr2->as_register();
1753 switch (condition) {
1754 case lir_cond_equal:
1755 __ beq(reg_op1, reg_op2, *op->label());
1756 break;
1757 case lir_cond_notEqual:
1758 if(op->label()==NULL)
1759 __ bne(reg_op1, reg_op2, *op->label());//liaobin1
1760 else
1761 __ bne_far(reg_op1, reg_op2, *op->label());//liaobin1
1762 break;
1763 case lir_cond_less:
1764 // AT = 1 TRUE
1765 __ slt(AT, reg_op1, reg_op2);
1766 __ bne_far(AT, R0, *op->label());
1767 break;
1768 case lir_cond_lessEqual:
1769 // AT = 0 TRUE
1770 __ slt(AT, reg_op2, reg_op1);
1771 __ beq_far(AT, R0, *op->label());
1772 break;
1773 case lir_cond_belowEqual:
1774 // AT = 0 TRUE
1775 __ sltu(AT, reg_op2, reg_op1);
1776 __ beq(AT, R0, *op->label());
1777 break;
1778 case lir_cond_greaterEqual:
1779 // AT = 0 TRUE
1780 __ slt(AT, reg_op1, reg_op2);
1781 __ beq_far(AT, R0, *op->label());
1782 break;
1783 case lir_cond_aboveEqual:
1784 // AT = 0 TRUE
1785 __ sltu(AT, reg_op1, reg_op2);
1786 __ beq_far(AT, R0, *op->label());
1787 break;
1788 case lir_cond_greater:
1789 // AT = 1 TRUE
1790 __ slt(AT, reg_op2, reg_op1);
1791 __ bne_far(AT, R0, *op->label());
1792 break;
1793 default: ShouldNotReachHere();
1794 }
1795 } else if (opr2->is_constant()) {
1796 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
1797 bool is_object = false;
1798 if (opr2->pointer()->as_constant()->type() == T_INT) {
1799 temp_value = (jint)(opr2->as_jint());
1800 } else if (opr2->pointer()->as_constant()->type() == T_LONG) {
1801 temp_value = (jlong)(opr2->as_jlong());
1802 } else if (opr2->pointer()->as_constant()->type() == T_OBJECT) {
1803 is_object = true;
1804 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_jobject());
1805 } else {
1806 ShouldNotReachHere();
1807 }
1809 switch (condition) {
1810 case lir_cond_equal:
1811 #ifdef OPT_RANGECHECK
1812 assert(!op->check(), "just check");
1813 #endif
1814 if (temp_value) {
1815 if (is_object) {
1816 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1817 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1818 __ relocate(rspec);
1819 }
1820 __ li(AT, temp_value);
1821 __ beq_far(reg_op1, AT, *op->label());
1822 } else {
1823 __ beq_far(reg_op1, R0, *op->label());
1824 }
1825 break;
1827 case lir_cond_notEqual:
1828 #ifdef OPT_RANGECHECK
1829 assert(!op->check(), "just check");
1830 #endif
1831 if (temp_value) {
1832 if (is_object) {
1833 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)temp_value);
1834 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1835 __ relocate(rspec);
1836 }
1837 __ li(AT, temp_value);
1838 __ bne_far(reg_op1, AT, *op->label());
1839 } else {
1840 __ bne_far(reg_op1, R0, *op->label());
1841 }
1842 break;
1844 case lir_cond_less:
1845 #ifdef OPT_RANGECHECK
1846 assert(!op->check(), "just check");
1847 #endif
1848 // AT = 1 TRUE
1849 if (Assembler::is_simm16(temp_value)) {
1850 __ slti(AT, reg_op1, temp_value);
1851 } else {
1852 __ move(AT, temp_value);
1853 __ slt(AT, reg_op1, AT);
1854 }
1855 __ bne_far(AT, R0, *op->label());
1856 break;
1858 case lir_cond_lessEqual:
1859 #ifdef OPT_RANGECHECK
1860 assert(!op->check(), "just check");
1861 #endif
1862 // AT = 0 TRUE
1863 __ li(AT, temp_value);
1864 __ slt(AT, AT, reg_op1);
1865 __ beq(AT, R0, *op->label());
1866 break;
1868 case lir_cond_belowEqual:
1869 // AT = 0 TRUE
1870 #ifdef OPT_RANGECHECK
1871 if (op->check()) {
1872 __ li(AT, temp_value);
1873 add_debug_info_for_range_check_here(op->info(), temp_value);
1874 __ tgeu(AT, reg_op1, 29);
1875 } else {
1876 #endif
1877 __ li(AT, temp_value);
1878 __ sltu(AT, AT, reg_op1);
1879 __ beq(AT, R0, *op->label());
1880 #ifdef OPT_RANGECHECK
1881 }
1882 #endif
1883 break;
1885 case lir_cond_greaterEqual:
1886 #ifdef OPT_RANGECHECK
1887 assert(!op->check(), "just check");
1888 #endif
1889 // AT = 0 TRUE
1890 if (Assembler::is_simm16(temp_value)) {
1891 __ slti(AT, reg_op1, temp_value);
1892 } else {
1893 __ li(AT, temp_value);
1894 __ slt(AT, reg_op1, AT);
1895 }
1896 __ beq(AT, R0, *op->label());
1897 break;
1899 case lir_cond_aboveEqual:
1900 #ifdef OPT_RANGECHECK
1901 assert(!op->check(), "just check");
1902 #endif
1903 // AT = 0 TRUE
1904 if (Assembler::is_simm16(temp_value)) {
1905 __ sltiu(AT, reg_op1, temp_value);
1906 } else {
1907 __ li(AT, temp_value);
1908 __ sltu(AT, reg_op1, AT);
1909 }
1910 __ beq(AT, R0, *op->label());
1911 break;
1913 case lir_cond_greater:
1914 #ifdef OPT_RANGECHECK
1915 assert(!op->check(), "just check");
1916 #endif
1917 // AT = 1 TRUE
1918 __ li(AT, temp_value);
1919 __ slt(AT, AT, reg_op1);
1920 __ bne_far(AT, R0, *op->label());
1921 break;
1923 default: ShouldNotReachHere();
1924 }
1926 } else {
1927 if (opr2->is_address()) {
1928 //FIXME. aoqi lw or ld_ptr?
1929 if (op->type() == T_INT)
1930 __ lw(AT, as_Address(opr2->pointer()->as_address()));
1931 else
1932 __ ld_ptr(AT, as_Address(opr2->pointer()->as_address()));
1933 } else if (opr2->is_stack()) {
1934 //FIXME. aoqi
1935 __ ld_ptr(AT, frame_map()->address_for_slot(opr2->single_stack_ix()));
1936 } else {
1937 ShouldNotReachHere();
1938 }
1939 switch (condition) {
1940 case lir_cond_equal:
1941 #ifdef OPT_RANGECHECK
1942 assert(!op->check(), "just check");
1943 #endif
1944 __ beq(reg_op1, AT, *op->label());
1945 break;
1946 case lir_cond_notEqual:
1947 #ifdef OPT_RANGECHECK
1948 assert(!op->check(), "just check");
1949 #endif
1950 __ bne_far(reg_op1, AT, *op->label());
1951 break;
1952 case lir_cond_less:
1953 #ifdef OPT_RANGECHECK
1954 assert(!op->check(), "just check");
1955 #endif
1956 // AT = 1 TRUE
1957 __ slt(AT, reg_op1, AT);
1958 __ bne_far(AT, R0, *op->label());
1959 break;
1960 case lir_cond_lessEqual:
1961 #ifdef OPT_RANGECHECK
1962 assert(!op->check(), "just check");
1963 #endif
1964 // AT = 0 TRUE
1965 __ slt(AT, AT, reg_op1);
1966 __ beq(AT, R0, *op->label());
1967 break;
1968 case lir_cond_belowEqual:
1969 #ifdef OPT_RANGECHECK
1970 assert(!op->check(), "just check");
1971 #endif
1972 // AT = 0 TRUE
1973 __ sltu(AT, AT, reg_op1);
1974 __ beq(AT, R0, *op->label());
1975 break;
1976 case lir_cond_greaterEqual:
1977 #ifdef OPT_RANGECHECK
1978 assert(!op->check(), "just check");
1979 #endif
1980 // AT = 0 TRUE
1981 __ slt(AT, reg_op1, AT);
1982 __ beq(AT, R0, *op->label());
1983 break;
1984 case lir_cond_aboveEqual:
1985 // AT = 0 TRUE
1986 #ifdef OPT_RANGECHECK
1987 if (op->check()) {
1988 add_debug_info_for_range_check_here(op->info(), opr1->rinfo());
1989 __ tgeu(reg_op1, AT, 29);
1990 } else {
1991 #endif
1992 __ sltu(AT, reg_op1, AT);
1993 __ beq_far(AT, R0, *op->label());
1994 #ifdef OPT_RANGECHECK
1995 }
1996 #endif
1997 break;
1998 case lir_cond_greater:
1999 #ifdef OPT_RANGECHECK
2000 assert(!op->check(), "just check");
2001 #endif
2002 // AT = 1 TRUE
2003 __ slt(AT, AT, reg_op1);
2004 __ bne_far(AT, R0, *op->label());
2005 break;
2006 default: ShouldNotReachHere();
2007 }
2008 }
2009 #ifdef OPT_RANGECHECK
2010 if (!op->check())
2011 #endif
2012 __ delayed()->nop();
2014 } else if(opr1->is_address() || opr1->is_stack()) {
2015 #ifdef OPT_RANGECHECK
2016 assert(!op->check(), "just check");
2017 #endif
2018 if (opr2->is_constant()) {
2019 NOT_LP64(jint) LP64_ONLY(jlong) temp_value;
2020 if (opr2->as_constant_ptr()->type() == T_INT) {
2021 temp_value = (jint)opr2->as_constant_ptr()->as_jint();
2022 } else if (opr2->as_constant_ptr()->type() == T_OBJECT) {
2023 temp_value = NOT_LP64((jint)) LP64_ONLY((jlong))(opr2->as_constant_ptr()->as_jobject());
2024 } else {
2025 ShouldNotReachHere();
2026 }
2028 if (Assembler::is_simm16(temp_value)) {
2029 if (opr1->is_address()) {
2030 __ lw(AT, as_Address(opr1->pointer()->as_address()));
2031 } else {
2032 __ lw(AT, frame_map()->address_for_slot(opr1->single_stack_ix()));
2033 }
2035 switch(condition) {
2037 case lir_cond_equal:
2038 __ addi(AT, AT, -(int)temp_value);
2039 __ beq(AT, R0, *op->label());
2040 break;
2041 case lir_cond_notEqual:
2042 __ addi(AT, AT, -(int)temp_value);
2043 __ bne_far(AT, R0, *op->label());
2044 break;
2045 case lir_cond_less:
2046 // AT = 1 TRUE
2047 __ slti(AT, AT, temp_value);
2048 __ bne_far(AT, R0, *op->label());
2049 break;
2050 case lir_cond_lessEqual:
2051 // AT = 0 TRUE
2052 __ addi(AT, AT, -temp_value);
2053 __ slt(AT, R0, AT);
2054 __ beq(AT, R0, *op->label());
2055 break;
2056 case lir_cond_belowEqual:
2057 // AT = 0 TRUE
2058 __ addiu(AT, AT, -temp_value);
2059 __ sltu(AT, R0, AT);
2060 __ beq(AT, R0, *op->label());
2061 break;
2062 case lir_cond_greaterEqual:
2063 // AT = 0 TRUE
2064 __ slti(AT, AT, temp_value);
2065 __ beq(AT, R0, *op->label());
2066 break;
2067 case lir_cond_aboveEqual:
2068 // AT = 0 TRUE
2069 __ sltiu(AT, AT, temp_value);
2070 __ beq(AT, R0, *op->label());
2071 break;
2072 case lir_cond_greater:
2073 // AT = 1 TRUE
2074 __ addi(AT, AT, -temp_value);
2075 __ slt(AT, R0, AT);
2076 __ bne_far(AT, R0, *op->label());
2077 break;
2079 default:
2080 Unimplemented();
2081 }
2082 } else {
2083 Unimplemented();
2084 }
2085 } else {
2086 Unimplemented();
2087 }
2088 __ delayed()->nop();
2090 } else if(opr1->is_double_cpu()) {
2091 #ifdef OPT_RANGECHECK
2092 assert(!op->check(), "just check");
2093 #endif
2094 Register opr1_lo = opr1->as_register_lo();
2095 Register opr1_hi = opr1->as_register_hi();
2097 if (opr2->is_double_cpu()) {
2098 Register opr2_lo = opr2->as_register_lo();
2099 Register opr2_hi = opr2->as_register_hi();
2100 switch (condition) {
2101 case lir_cond_equal: {
2102 Label L;
2103 #ifndef _LP64
2104 __ bne(opr1_lo, opr2_lo, L);
2105 __ delayed()->nop();
2106 __ beq(opr1_hi, opr2_hi, *op->label());
2107 #else
2108 /* static jobject java.lang.Long.toString(jlong)
2110 10 move [t0t0|J] [a4a4|J]
2111 12 move [lng:-9223372036854775808|J] [a6a6|J]
2112 14 branch [EQ] [a4a4|J] [a6a6|J] [B1]
2113 0x000000555e8532e4: bne a4, a6, 0x000000555e8532e4 <-- error
2114 0x000000555e8532e8: sll zero, zero, 0
2115 */
2116 __ beq(opr1_lo, opr2_lo, *op->label());
2117 #endif
2118 __ delayed()->nop();
2119 __ bind(L);
2120 }
2121 break;
2123 case lir_cond_notEqual:
2124 if (op->label()==NULL)
2125 __ bne(opr1_lo, opr2_lo, *op->label());//by liaobin2
2126 else
2127 __ bne_far(opr1_lo, opr2_lo, *op->label());//by liaobin2
2128 __ delayed()->nop();
2129 if (op->label()==NULL)
2130 NOT_LP64(__ bne(opr1_hi, opr2_hi, *op->label()));//by liaobin3
2131 else
2132 NOT_LP64(__ bne_far(opr1_hi, opr2_hi, *op->label()));//by liaobin3
2133 NOT_LP64(__ delayed()->nop());
2134 break;
2136 case lir_cond_less: {
2137 #ifdef _LP64
2138 __ slt(AT, opr1_lo, opr2_lo);
2139 __ bne_far(AT, R0, *op->label());
2140 __ delayed()->nop();
2141 #else
2142 Label L;
2144 // if hi less then jump
2145 __ slt(AT, opr1_hi, opr2_hi);
2146 __ bne(AT, R0, *op->label());
2147 __ delayed()->nop();
2149 // if hi great then fail
2150 __ bne(opr1_hi, opr2_hi, L);
2151 __ delayed();
2153 // now just comp lo as unsigned
2154 __ sltu(AT, opr1_lo, opr2_lo);
2155 __ bne_far(AT, R0, *op->label());
2156 __ delayed()->nop();
2158 __ bind(L);
2159 #endif
2160 }
2161 break;
2163 case lir_cond_lessEqual: {
2164 #ifdef _LP64
2165 __ slt(AT, opr2_lo, opr1_lo);
2166 __ beq_far(AT, R0, *op->label());
2167 __ delayed()->nop();
2168 #else
2169 Label L;
2171 // if hi great then fail
2172 __ slt(AT, opr2_hi, opr1_hi);
2173 __ bne(AT, R0, L);
2174 __ delayed()->nop();
2176 // if hi less then jump
2177 if(op->label()==NULL)
2178 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin4
2179 else
2180 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin4
2181 __ delayed();
2183 // now just comp lo as unsigned
2184 __ sltu(AT, opr2_lo, opr1_lo);
2185 __ beq(AT, R0, *op->label());
2186 __ delayed()->nop();
2188 __ bind(L);
2189 #endif
2190 }
2191 break;
2193 case lir_cond_belowEqual: {
2194 #ifdef _LP64
2195 __ sltu(AT, opr2_lo, opr1_lo);
2196 __ beq(AT, R0, *op->label());
2197 __ delayed()->nop();
2198 #else
2199 Label L;
2201 // if hi great then fail
2202 __ sltu(AT, opr2_hi, opr1_hi);
2203 __ bne_far(AT, R0, L);
2204 __ delayed()->nop();
2206 // if hi less then jump
2207 if(op->label()==NULL)
2208 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin5
2209 else
2210 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin5
2211 __ delayed();
2213 // now just comp lo as unsigned
2214 __ sltu(AT, opr2_lo, opr1_lo);
2215 __ beq(AT, R0, *op->label());
2216 __ delayed()->nop();
2218 __ bind(L);
2219 #endif
2220 }
2221 break;
2223 case lir_cond_greaterEqual: {
2224 #ifdef _LP64
2225 __ slt(AT, opr1_lo, opr2_lo);
2226 __ beq_far(AT, R0, *op->label());
2227 __ delayed()->nop();
2228 #else
2229 Label L;
2231 // if hi less then fail
2232 __ slt(AT, opr1_hi, opr2_hi);
2233 __ bne_far(AT, R0, L);
2234 __ delayed()->nop();
2236 // if hi great then jump
2237 if(op->label()==NULL)
2238 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin6
2239 else
2240 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin6
2241 __ delayed();
2243 // now just comp lo as unsigned
2244 __ sltu(AT, opr1_lo, opr2_lo);
2245 __ beq(AT, R0, *op->label());
2246 __ delayed()->nop();
2248 __ bind(L);
2249 #endif
2250 }
2251 break;
2253 case lir_cond_aboveEqual: {
2254 #ifdef _LP64
2255 __ sltu(AT, opr1_lo, opr2_lo);
2256 __ beq_far(AT, R0, *op->label());
2257 __ delayed()->nop();
2258 #else
2259 Label L;
2261 // if hi less then fail
2262 __ sltu(AT, opr1_hi, opr2_hi);
2263 __ bne(AT, R0, L);
2264 __ delayed()->nop();
2266 // if hi great then jump
2267 if(op->label()==NULL)
2268 __ bne(opr2_hi, opr1_hi, *op->label());//by liaobin7
2269 else
2270 __ bne_far(opr2_hi, opr1_hi, *op->label());//by liaobin7
2271 __ delayed();
2273 // now just comp lo as unsigned
2274 __ sltu(AT, opr1_lo, opr2_lo);
2275 __ beq(AT, R0, *op->label());
2276 __ delayed()->nop();
2278 __ bind(L);
2279 #endif
2280 }
2281 break;
2283 case lir_cond_greater: {
2284 #ifdef _LP64
2285 __ slt(AT, opr2_lo, opr1_lo);
2286 __ bne_far(AT, R0, *op->label());
2287 __ delayed()->nop();
2288 #else
2289 Label L;
2291 // if hi great then jump
2292 __ slt(AT, opr2_hi, opr1_hi);
2293 __ bne(AT, R0, *op->label());
2294 __ delayed()->nop();
2296 // if hi less then fail
2297 __ bne(opr2_hi, opr1_hi, L);
2298 __ delayed();
2300 // now just comp lo as unsigned
2301 __ sltu(AT, opr2_lo, opr1_lo);
2302 __ bne(AT, R0, *op->label());
2303 __ delayed()->nop();
2305 __ bind(L);
2306 #endif
2307 }
2308 break;
2310 default: ShouldNotReachHere();
2311 }
2313 } else if(opr2->is_constant()) {
2314 jlong lv = opr2->as_jlong();
2315 #ifndef _LP64
2316 jint iv_lo = (jint)lv;
2317 jint iv_hi = (jint)(lv>>32);
2318 bool is_zero = (lv==0);
2319 #endif
2321 switch (condition) {
2322 case lir_cond_equal:
2323 #ifdef _LP64
2324 __ li(T8, lv);
2325 __ beq(opr1_lo, T8, *op->label());
2326 __ delayed()->nop();
2327 #else
2328 if (is_zero) {
2329 __ orr(AT, opr1_lo, opr1_hi);
2330 __ beq(AT, R0, *op->label());
2331 __ delayed()->nop();
2332 } else {
2333 Label L;
2334 __ move(T8, iv_lo);
2335 __ bne(opr1_lo, T8, L);
2336 __ delayed();
2337 __ move(T8, iv_hi);
2338 __ beq(opr1_hi, T8, *op->label());
2339 __ delayed()->nop();
2340 __ bind(L);
2341 }
2342 #endif
2343 break;
2345 case lir_cond_notEqual:
2346 #ifdef _LP64
2347 __ li(T8, lv);
2348 __ bne(opr1_lo, T8, *op->label());
2349 __ delayed()->nop();
2350 #else
2351 if (is_zero) {
2352 __ orr(AT, opr1_lo, opr1_hi);
2353 __ bne(AT, R0, *op->label());
2354 __ delayed()->nop();
2355 } else {
2356 __ move(T8, iv_lo);
2357 __ bne(opr1_lo, T8, *op->label());
2358 __ delayed();
2359 __ move(T8, iv_hi);
2360 __ bne(opr1_hi, T8, *op->label());
2361 __ delayed()->nop();
2362 }
2363 #endif
2364 break;
2366 case lir_cond_less:
2367 #ifdef _LP64
2368 __ li(T8, lv);
2369 __ slt(AT, opr1_lo, T8);
2370 __ bne_far(AT, R0, *op->label());
2371 __ delayed()->nop();
2372 #else
2373 if (is_zero) {
2374 __ bltz(opr1_hi, *op->label());
2375 __ delayed()->nop();
2376 __ bltz(opr1_lo, *op->label());
2377 __ delayed()->nop();
2378 } else {
2379 Label L;
2381 // if hi less then jump
2382 __ move(T8, iv_hi);
2383 __ slt(AT, opr1_hi, T8);
2384 __ bne_far(AT, R0, *op->label());
2385 __ delayed()->nop();
2387 // if hi great then fail
2388 __ bne(opr1_hi, T8, L);
2389 __ delayed();
2391 // now just comp lo as unsigned
2392 if (Assembler::is_simm16(iv_lo)) {
2393 __ sltiu(AT, opr1_lo, iv_lo);
2394 } else {
2395 __ move(T8, iv_lo);
2396 __ sltu(AT, opr1_lo, T8);
2397 }
2398 __ bne(AT, R0, *op->label());
2399 __ delayed()->nop();
2401 __ bind(L);
2402 }
2403 #endif
2404 break;
2406 case lir_cond_lessEqual:
2407 #ifdef _LP64
2408 __ li(T8, lv);
2409 __ slt(AT, T8, opr1_lo);
2410 __ beq(AT, R0, *op->label());
2411 __ delayed()->nop();
2412 #else
2413 if (is_zero) {
2414 __ bltz(opr1_hi, *op->label());
2415 __ delayed()->nop();
2416 __ orr(AT, opr1_hi, opr1_lo);
2417 __ beq(AT, R0, *op->label());
2418 __ delayed();
2419 } else {
2420 Label L;
2422 // if hi great then fail
2423 __ move(T8, iv_hi);
2424 __ slt(AT, T8, opr1_hi);
2425 __ bne(AT, R0, L);
2426 __ delayed()->nop();
2428 // if hi less then jump
2429 __ bne(T8, opr1_hi, *op->label());
2430 __ delayed();
2432 // now just comp lo as unsigned
2433 __ move(T8, iv_lo);
2434 __ sltu(AT, T8, opr1_lo);
2435 __ beq(AT, R0, *op->label());
2436 __ delayed()->nop();
2438 __ bind(L);
2439 }
2440 #endif
2441 break;
2443 case lir_cond_belowEqual:
2444 #ifdef _LP64
2445 __ li(T8, lv);
2446 __ sltu(AT, T8, opr1_lo);
2447 __ beq(AT, R0, *op->label());
2448 __ delayed()->nop();
2449 #else
2450 if (is_zero) {
2451 __ orr(AT, opr1_hi, opr1_lo);
2452 __ beq(AT, R0, *op->label());
2453 __ delayed()->nop();
2454 } else {
2455 Label L;
2457 // if hi great then fail
2458 __ move(T8, iv_hi);
2459 __ sltu(AT, T8, opr1_hi);
2460 __ bne(AT, R0, L);
2461 __ delayed()->nop();
2463 // if hi less then jump
2464 __ bne(T8, opr1_hi, *op->label());
2465 __ delayed();
2467 // now just comp lo as unsigned
2468 __ move(T8, iv_lo);
2469 __ sltu(AT, T8, opr1_lo);
2470 __ beq(AT, R0, *op->label());
2471 __ delayed()->nop();
2473 __ bind(L);
2474 }
2475 #endif
2476 break;
2478 case lir_cond_greaterEqual:
2479 #ifdef _LP64
2480 __ li(T8, lv);
2481 __ slt(AT, opr1_lo, T8);
2482 __ beq(AT, R0, *op->label());
2483 __ delayed()->nop();
2484 #else
2485 if (is_zero) {
2486 __ bgez(opr1_hi, *op->label());
2487 __ delayed()->nop();
2488 } else {
2489 Label L;
2491 // if hi less then fail
2492 __ move(T8, iv_hi);
2493 __ slt(AT, opr1_hi, T8);
2494 __ bne(AT, R0, L);
2495 __ delayed()->nop();
2497 // if hi great then jump
2498 __ bne(T8, opr1_hi, *op->label());
2499 __ delayed();
2501 // now just comp lo as unsigned
2502 if (Assembler::is_simm16(iv_lo)) {
2503 __ sltiu(AT, opr1_lo, iv_lo);
2504 } else {
2505 __ move(T8, iv_lo);
2506 __ sltu(AT, opr1_lo, T8);
2507 }
2508 __ beq(AT, R0, *op->label());
2509 __ delayed()->nop();
2511 __ bind(L);
2512 }
2513 #endif
2514 break;
2516 case lir_cond_aboveEqual:
2517 #ifdef _LP64
2518 __ li(T8, lv);
2519 __ sltu(AT, opr1_lo, T8);
2520 __ beq(AT, R0, *op->label());
2521 __ delayed()->nop();
2522 #else
2523 if (is_zero) {
2524 if(op->label()==NULL) //by liaob2
2525 __ b(*op->label());
2526 else
2527 __ b_far(*op->label());
2528 __ delayed()->nop();
2529 } else {
2530 Label L;
2532 // if hi less then fail
2533 __ move(T8, iv_hi);
2534 __ sltu(AT, opr1_hi, T8);
2535 __ bne(AT, R0, L);
2536 __ delayed()->nop();
2538 // if hi great then jump
2539 __ bne(T8, opr1_hi, *op->label());
2540 __ delayed();
2542 // now just comp lo as unsigned
2543 if (Assembler::is_simm16(iv_lo)) {
2544 __ sltiu(AT, opr1_lo, iv_lo);
2545 } else {
2546 __ move(T8, iv_lo);
2547 __ sltu(AT, opr1_lo, T8);
2548 }
2549 __ beq(AT, R0, *op->label());
2550 __ delayed()->nop();
2552 __ bind(L);
2553 }
2554 #endif
2555 break;
2557 case lir_cond_greater:
2558 #ifdef _LP64
2559 __ li(T8, lv);
2560 __ slt(AT, T8, opr1_lo);
2561 __ bne_far(AT, R0, *op->label());
2562 __ delayed()->nop();
2563 #else
2564 if (is_zero) {
2565 Label L;
2566 __ bgtz(opr1_hi, *op->label());
2567 __ delayed()->nop();
2568 __ bne(opr1_hi, R0, L);
2569 __ delayed()->nop();
2570 __ bne(opr1_lo, R0, *op->label());
2571 __ delayed()->nop();
2572 __ bind(L);
2573 } else {
2574 Label L;
2576 // if hi great then jump
2577 __ move(T8, iv_hi);
2578 __ slt(AT, T8, opr1_hi);
2579 __ bne(AT, R0, *op->label());
2580 __ delayed()->nop();
2582 // if hi less then fail
2583 __ bne(T8, opr1_hi, L);
2584 __ delayed();
2586 // now just comp lo as unsigned
2587 __ move(T8, iv_lo);
2588 __ sltu(AT, T8, opr1_lo);
2589 __ bne(AT, R0, *op->label());
2590 __ delayed()->nop();
2592 __ bind(L);
2593 }
2594 #endif
2595 break;
2597 default:
2598 ShouldNotReachHere();
2599 }
2600 } else {
2601 Unimplemented();
2602 }
2603 } else if (opr1->is_single_fpu()) {
2604 #ifdef OPT_RANGECHECK
2605 assert(!op->check(), "just check");
2606 #endif
2607 assert(opr2->is_single_fpu(), "change the code");
2609 FloatRegister reg_op1 = opr1->as_float_reg();
2610 FloatRegister reg_op2 = opr2->as_float_reg();
2611 // bool un_ls
2612 bool un_jump = (op->ublock()->label()==op->label());
2614 Label& L = *op->label();
2616 switch (condition) {
2617 case lir_cond_equal:
2618 if (un_jump)
2619 __ c_ueq_s(reg_op1, reg_op2);
2620 else
2621 __ c_eq_s(reg_op1, reg_op2);
2622 __ bc1t(L);
2624 break;
2626 case lir_cond_notEqual:
2627 if (un_jump)
2628 __ c_eq_s(reg_op1, reg_op2);
2629 else
2630 __ c_ueq_s(reg_op1, reg_op2);
2631 __ bc1f(L);
2633 break;
2635 case lir_cond_less:
2636 if (un_jump)
2637 __ c_ult_s(reg_op1, reg_op2);
2638 else
2639 __ c_olt_s(reg_op1, reg_op2);
2640 __ bc1t(L);
2642 break;
2644 case lir_cond_lessEqual:
2645 case lir_cond_belowEqual:
2646 if (un_jump)
2647 __ c_ule_s(reg_op1, reg_op2);
2648 else
2649 __ c_ole_s(reg_op1, reg_op2);
2650 __ bc1t(L);
2652 break;
2654 case lir_cond_greaterEqual:
2655 case lir_cond_aboveEqual:
2656 if (un_jump)
2657 __ c_olt_s(reg_op1, reg_op2);
2658 else
2659 __ c_ult_s(reg_op1, reg_op2);
2660 __ bc1f(L);
2662 break;
2664 case lir_cond_greater:
2665 if (un_jump)
2666 __ c_ole_s(reg_op1, reg_op2);
2667 else
2668 __ c_ule_s(reg_op1, reg_op2);
2669 __ bc1f(L);
2671 break;
2673 default:
2674 ShouldNotReachHere();
2675 }
2676 __ delayed()->nop();
2677 } else if (opr1->is_double_fpu()) {
2678 #ifdef OPT_RANGECHECK
2679 assert(!op->check(), "just check");
2680 #endif
2681 assert(opr2->is_double_fpu(), "change the code");
2683 FloatRegister reg_op1 = opr1->as_double_reg();
2684 FloatRegister reg_op2 = opr2->as_double_reg();
2685 bool un_jump = (op->ublock()->label()==op->label());
2686 Label& L = *op->label();
2688 switch (condition) {
2689 case lir_cond_equal:
2690 if (un_jump)
2691 __ c_ueq_d(reg_op1, reg_op2);
2692 else
2693 __ c_eq_d(reg_op1, reg_op2);
2694 __ bc1t(L);
2696 break;
2698 case lir_cond_notEqual:
2699 if (un_jump)
2700 __ c_eq_d(reg_op1, reg_op2);
2701 else
2702 __ c_ueq_d(reg_op1, reg_op2);
2703 __ bc1f(L);
2705 break;
2707 case lir_cond_less:
2708 if (un_jump)
2709 __ c_ult_d(reg_op1, reg_op2);
2710 else
2711 __ c_olt_d(reg_op1, reg_op2);
2712 __ bc1t(L);
2714 break;
2716 case lir_cond_lessEqual:
2717 case lir_cond_belowEqual:
2718 if (un_jump)
2719 __ c_ule_d(reg_op1, reg_op2);
2720 else
2721 __ c_ole_d(reg_op1, reg_op2);
2722 __ bc1t(L);
2724 break;
2726 case lir_cond_greaterEqual:
2727 case lir_cond_aboveEqual:
2728 if (un_jump)
2729 __ c_olt_d(reg_op1, reg_op2);
2730 else
2731 __ c_ult_d(reg_op1, reg_op2);
2732 __ bc1f(L);
2734 break;
2736 case lir_cond_greater:
2737 if (un_jump)
2738 __ c_ole_d(reg_op1, reg_op2);
2739 else
2740 __ c_ule_d(reg_op1, reg_op2);
2741 __ bc1f(L);
2743 break;
2745 default:
2746 ShouldNotReachHere();
2747 }
2748 __ delayed()->nop();
2749 } else {
2750 Unimplemented();
2751 }
2752 }
2755 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
2756 LIR_Opr value = op->in_opr();
2757 LIR_Opr src = op->in_opr();
2758 LIR_Opr dest = op->result_opr();
2759 Bytecodes::Code code = op->bytecode();
2761 switch (code) {
2762 case Bytecodes::_i2l:
2763 move_regs(src->as_register(), dest->as_register_lo());
2764 NOT_LP64(__ sra (dest->as_register_hi(), dest->as_register_lo(), 31));
2765 break;
2767 case Bytecodes::_l2i:
2768 #ifndef _LP64
2769 move_regs (src->as_register_lo(), dest->as_register());
2770 #else
2771 __ dsll32(dest->as_register(), src->as_register_lo(), 0);
2772 __ dsra32(dest->as_register(), dest->as_register(), 0);
2773 #endif
2774 break;
2776 case Bytecodes::_i2b:
2777 #ifndef _LP64
2778 move_regs (src->as_register(), dest->as_register());
2779 __ sign_extend_byte(dest->as_register());
2780 #else
2781 __ dsll32(dest->as_register(), src->as_register(), 24);
2782 __ dsra32(dest->as_register(), dest->as_register(), 24);
2783 #endif
2784 break;
2786 case Bytecodes::_i2c:
2787 __ andi(dest->as_register(), src->as_register(), 0xFFFF);
2788 break;
2790 case Bytecodes::_i2s:
2791 #ifndef _LP64
2792 move_regs (src->as_register(), dest->as_register());
2793 __ sign_extend_short(dest->as_register());
2794 #else
2795 __ dsll32(dest->as_register(), src->as_register(), 16);
2796 __ dsra32(dest->as_register(), dest->as_register(), 16);
2797 #endif
2798 break;
2800 case Bytecodes::_f2d:
2801 __ cvt_d_s(dest->as_double_reg(), src->as_float_reg());
2802 break;
2804 case Bytecodes::_d2f:
2805 __ cvt_s_d(dest->as_float_reg(), src->as_double_reg());
2806 break;
2807 case Bytecodes::_i2f: {
2808 FloatRegister df = dest->as_float_reg();
2809 if(src->is_single_cpu()) {
2810 __ mtc1(src->as_register(), df);
2811 __ cvt_s_w(df, df);
2812 } else if (src->is_stack()) {
2813 Address src_addr = src->is_single_stack()
2814 ? frame_map()->address_for_slot(src->single_stack_ix())
2815 : frame_map()->address_for_slot(src->double_stack_ix());
2816 __ lw(AT, src_addr);
2817 __ mtc1(AT, df);
2818 __ cvt_s_w(df, df);
2819 } else {
2820 Unimplemented();
2821 }
2822 break;
2823 }
2824 case Bytecodes::_i2d: {
2825 FloatRegister dd = dest->as_double_reg();
2826 if (src->is_single_cpu()) {
2827 __ mtc1(src->as_register(), dd);
2828 __ cvt_d_w(dd, dd);
2829 } else if (src->is_stack()) {
2830 Address src_addr = src->is_single_stack()
2831 ? frame_map()->address_for_slot(value->single_stack_ix())
2832 : frame_map()->address_for_slot(value->double_stack_ix());
2833 __ lw(AT, src_addr);
2834 __ mtc1(AT, dd);
2835 __ cvt_d_w(dd, dd);
2836 } else {
2837 Unimplemented();
2838 }
2839 break;
2840 }
2841 case Bytecodes::_f2i: {
2842 FloatRegister fval = src->as_float_reg();
2843 Register dreg = dest->as_register();
2845 Label L;
2846 __ c_un_s(fval, fval); //NaN?
2847 __ bc1t(L);
2848 __ delayed();
2849 __ move(dreg, R0);
2851 __ trunc_w_s(F30, fval);
2853 /* Call SharedRuntime:f2i() to do valid convention */
2854 __ cfc1(AT, 31);
2855 __ li(T9, 0x10000);
2856 __ andr(AT, AT, T9);
2857 __ beq(AT, R0, L);
2858 __ delayed()->mfc1(dreg, F30);
2860 __ mov_s(F12, fval);
2861 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2862 __ move(dreg, V0);
2863 __ bind(L);
2864 break;
2865 }
2866 case Bytecodes::_d2i: {
2867 FloatRegister dval = src->as_double_reg();
2868 Register dreg = dest->as_register();
2870 Label L;
2871 #ifndef _LP64
2872 __ c_un_d(dval, dval); //NaN?
2873 __ bc1t(L);
2874 __ delayed();
2875 __ move(dreg, R0);
2876 #endif
2878 __ trunc_w_d(F30, dval);
2879 __ cfc1(AT, 31);
2880 __ li(T9, 0x10000);
2881 __ andr(AT, AT, T9);
2882 __ beq(AT, R0, L);
2883 __ delayed()->mfc1(dreg, F30);
2885 __ mov_d(F12, dval);
2886 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
2887 __ move(dreg, V0);
2888 __ bind(L);
2889 break;
2890 }
2891 case Bytecodes::_l2f: {
2892 FloatRegister ldf = dest->as_float_reg();
2893 if (src->is_double_cpu()) {
2894 #ifndef _LP64
2895 __ mtc1(src->as_register_lo(), ldf);
2896 __ mtc1(src->as_register_hi(), ldf + 1);
2897 __ cvt_s_l(ldf, ldf);
2898 #else
2899 __ dmtc1(src->as_register_lo(), ldf);
2900 __ cvt_s_l(ldf, ldf);
2901 #endif
2902 } else if (src->is_double_stack()) {
2903 Address src_addr=frame_map()->address_for_slot(value->double_stack_ix());
2904 #ifndef _LP64
2905 __ lw(AT, src_addr);
2906 __ mtc1(AT, ldf);
2907 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
2908 __ mtc1(AT, ldf + 1);
2909 __ cvt_s_l(ldf, ldf);
2910 #else
2911 __ ld(AT, src_addr);
2912 __ dmtc1(AT, ldf);
2913 __ cvt_s_l(ldf, ldf);
2914 #endif
2915 } else {
2916 Unimplemented();
2917 }
2918 break;
2919 }
2920 case Bytecodes::_l2d: {
2921 FloatRegister ldd = dest->as_double_reg();
2922 if (src->is_double_cpu()) {
2923 #ifndef _LP64
2924 __ mtc1(src->as_register_lo(), ldd);
2925 __ mtc1(src->as_register_hi(), ldd + 1);
2926 __ cvt_d_l(ldd, ldd);
2927 #else
2928 __ dmtc1(src->as_register_lo(), ldd);
2929 __ cvt_d_l(ldd, ldd);
2930 #endif
2931 } else if (src->is_double_stack()) {
2932 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
2933 #ifndef _LP64
2934 __ lw(AT, src_addr);
2935 __ mtc1(AT, ldd);
2936 __ lw(AT, src_addr.base(), src_addr.disp() + 4);
2937 __ mtc1(AT, ldd + 1);
2938 __ cvt_d_l(ldd, ldd);
2939 #else
2940 __ ld(AT, src_addr);
2941 __ dmtc1(AT, ldd);
2942 __ cvt_d_l(ldd, ldd);
2943 #endif
2944 } else {
2945 Unimplemented();
2946 }
2947 break;
2948 }
2950 case Bytecodes::_f2l: {
2951 FloatRegister fval = src->as_float_reg();
2952 Register dlo = dest->as_register_lo();
2953 Register dhi = dest->as_register_hi();
2955 Label L;
2956 __ move(dhi, R0);
2957 __ c_un_s(fval, fval); //NaN?
2958 __ bc1t(L);
2959 __ delayed();
2960 __ move(dlo, R0);
2962 __ trunc_l_s(F30, fval);
2963 #ifdef _LP64
2964 __ cfc1(AT, 31);
2965 __ li(T9, 0x10000);
2966 __ andr(AT, AT, T9);
2967 __ beq(AT, R0, L);
2968 __ delayed()->dmfc1(dlo, F30);
2970 __ mov_s(F12, fval);
2971 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2972 __ move(dlo, V0);
2973 #else
2974 __ mfc1(dlo, F30);
2975 #endif
2976 NOT_LP64(__ mfc1(dhi, F31));
2977 __ bind(L);
2978 break;
2979 }
2980 case Bytecodes::_d2l: {
2981 FloatRegister dval = src->as_double_reg();
2982 Register dlo = dest->as_register_lo();
2983 Register dhi = dest->as_register_hi();
2985 Label L;
2986 __ move(dhi, R0);
2987 __ c_un_d(dval, dval); //NaN?
2988 __ bc1t(L);
2989 __ delayed();
2990 __ move(dlo, R0);
2992 __ trunc_l_d(F30, dval);
2993 #ifdef _LP64
2994 __ cfc1(AT, 31);
2995 __ li(T9, 0x10000);
2996 __ andr(AT, AT, T9);
2997 __ beq(AT, R0, L);
2998 __ delayed()->dmfc1(dlo, F30);
3000 __ mov_d(F12, dval);
3001 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
3002 __ move(dlo, V0);
3003 #else
3004 __ mfc1(dlo, F30);
3005 __ mfc1(dhi, F31);
3006 #endif
3007 __ bind(L);
3008 break;
3009 }
3011 default: ShouldNotReachHere();
3012 }
3013 }
3015 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
3016 if (op->init_check()) {
3017 add_debug_info_for_null_check_here(op->stub()->info());
3018 __ lw(AT,Address(op->klass()->as_register(),
3019 InstanceKlass::init_state_offset()));
3020 __ addi(AT, AT, -InstanceKlass::fully_initialized);
3021 __ bne_far(AT, R0,*op->stub()->entry());
3022 __ delayed()->nop();
3023 }
3024 __ allocate_object(
3025 op->obj()->as_register(),
3026 op->tmp1()->as_register(),
3027 op->tmp2()->as_register(),
3028 op->header_size(),
3029 op->object_size(),
3030 op->klass()->as_register(),
3031 *op->stub()->entry());
3033 __ bind(*op->stub()->continuation());
3034 }
3036 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
3037 if (UseSlowPath ||
3038 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
3039 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
3040 __ b_far(*op->stub()->entry());
3041 __ delayed()->nop();
3042 } else {
3043 Register len = op->len()->as_register();
3044 Register tmp1 = op->tmp1()->as_register();
3045 Register tmp2 = op->tmp2()->as_register();
3046 Register tmp3 = op->tmp3()->as_register();
3047 __ allocate_array(op->obj()->as_register(),
3048 len,
3049 tmp1,
3050 tmp2,
3051 tmp3,
3052 arrayOopDesc::header_size(op->type()),
3053 array_element_size(op->type()),
3054 op->klass()->as_register(),
3055 *op->stub()->entry());
3056 }
3057 __ bind(*op->stub()->continuation());
3058 }
3060 void LIR_Assembler::type_profile_helper(Register mdo,
3061 ciMethodData *md, ciProfileData *data,
3062 Register recv, Label* update_done) {
3063 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3064 Label next_test;
3065 // See if the receiver is receiver[n].
3066 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
3067 __ bne(AT, recv, next_test);
3068 __ delayed()->nop();
3069 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
3070 __ ld_ptr(AT, data_addr);
3071 __ addi(AT, AT, DataLayout::counter_increment);
3072 __ st_ptr(AT, data_addr);
3073 __ b(*update_done);
3074 __ delayed()->nop();
3075 __ bind(next_test);
3076 }
3078 // Didn't find receiver; find next empty slot and fill it in
3079 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3080 Label next_test;
3081 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
3082 __ ld_ptr(AT, recv_addr);
3083 __ bne(AT, R0, next_test);
3084 __ delayed()->nop();
3085 __ st_ptr(recv, recv_addr);
3086 __ move(AT, DataLayout::counter_increment);
3087 __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
3088 __ b(*update_done);
3089 __ delayed()->nop();
3090 __ bind(next_test);
3091 }
3092 }
3094 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
3095 // we always need a stub for the failure case.
3096 CodeStub* stub = op->stub();
3097 Register obj = op->object()->as_register();
3098 Register k_RInfo = op->tmp1()->as_register();
3099 Register klass_RInfo = op->tmp2()->as_register();
3100 Register dst = op->result_opr()->as_register();
3101 ciKlass* k = op->klass();
3102 Register Rtmp1 = noreg;
3104 // check if it needs to be profiled
3105 ciMethodData* md = NULL;
3106 ciProfileData* data = NULL;
3108 if (op->should_profile()) {
3109 ciMethod* method = op->profiled_method();
3110 assert(method != NULL, "Should have method");
3111 int bci = op->profiled_bci();
3112 md = method->method_data_or_null();
3113 assert(md != NULL, "Sanity");
3114 data = md->bci_to_data(bci);
3115 assert(data != NULL, "need data for type check");
3116 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3117 }
3118 Label profile_cast_success, profile_cast_failure;
3119 Label *success_target = op->should_profile() ? &profile_cast_success : success;
3120 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
3122 if (obj == k_RInfo) {
3123 k_RInfo = dst;
3124 } else if (obj == klass_RInfo) {
3125 klass_RInfo = dst;
3126 }
3127 if (k->is_loaded() && !UseCompressedClassPointers) {
3128 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
3129 } else {
3130 Rtmp1 = op->tmp3()->as_register();
3131 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
3132 }
3134 assert_different_registers(obj, k_RInfo, klass_RInfo);
3136 if (op->should_profile()) {
3137 Label not_null;
3138 __ bne(obj, R0, not_null);
3139 __ delayed()->nop();
3140 // Object is null; update MDO and exit
3141 Register mdo = klass_RInfo;
3142 __ mov_metadata(mdo, md->constant_encoding());
3143 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3144 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3145 __ lw(AT, data_addr);
3146 __ ori(AT, AT, header_bits);
3147 __ sw(AT,data_addr);
3148 __ b(*obj_is_null);
3149 __ delayed()->nop();
3150 __ bind(not_null);
3151 } else {
3152 __ beq(obj, R0, *obj_is_null);
3153 __ delayed()->nop();
3154 }
3156 if (!k->is_loaded()) {
3157 klass2reg_with_patching(k_RInfo, op->info_for_patch());
3158 } else {
3159 #ifdef _LP64
3160 __ mov_metadata(k_RInfo, k->constant_encoding());
3161 #endif // _LP64
3162 }
3163 __ verify_oop(obj);
3165 if (op->fast_check()) {
3166 // get object class
3167 // not a safepoint as obj null check happens earlier
3168 if (UseCompressedClassPointers) {
3169 __ load_klass(Rtmp1, obj);
3170 __ bne(k_RInfo, Rtmp1, *failure_target);
3171 __ delayed()->nop();
3172 } else {
3173 __ ld(AT, Address(obj, oopDesc::klass_offset_in_bytes()));
3174 __ bne(k_RInfo, AT, *failure_target);
3175 __ delayed()->nop();
3176 }
3177 // successful cast, fall through to profile or jump
3178 } else {
3179 // get object class
3180 // not a safepoint as obj null check happens earlier
3181 __ load_klass(klass_RInfo, obj);
3182 if (k->is_loaded()) {
3183 // See if we get an immediate positive hit
3184 __ ld(AT, Address(klass_RInfo, k->super_check_offset()));
3185 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
3186 __ bne(k_RInfo, AT, *failure_target);
3187 __ delayed()->nop();
3188 // successful cast, fall through to profile or jump
3189 } else {
3190 // See if we get an immediate positive hit
3191 __ beq(k_RInfo, AT, *success_target);
3192 __ delayed()->nop();
3193 // check for self
3194 __ beq(k_RInfo, klass_RInfo, *success_target);
3195 __ delayed()->nop();
3197 if (A0 != klass_RInfo) __ push(A0);
3198 if (A1 != k_RInfo) __ push(A1);
3199 if (A0 != klass_RInfo) __ move(A0, klass_RInfo);
3200 if (A1 != k_RInfo) __ move(A1, k_RInfo);
3201 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3202 __ delayed()->nop();
3203 if (A1 != k_RInfo) __ pop(A1);
3204 if (A0 != klass_RInfo) __ pop(A0);
3205 // result is a boolean
3206 __ beq(V0, R0, *failure_target);
3207 __ delayed()->nop();
3208 // successful cast, fall through to profile or jump
3209 }
3210 } else {
3211 // perform the fast part of the checking logic
3212 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
3213 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
3214 if (A0 != klass_RInfo) __ push(A0);
3215 if (A1 != k_RInfo) __ push(A1);
3216 if (A0 != klass_RInfo) __ move(A0, klass_RInfo);
3217 if (A1 != k_RInfo) __ move(A1, k_RInfo);
3218 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3219 __ delayed()->nop();
3220 if (A1 != k_RInfo) __ pop(A1);
3221 if (A0 != klass_RInfo) __ pop(A0);
3222 // result is a boolean
3223 __ beq(V0, R0, *failure_target);
3224 __ delayed()->nop();
3225 // successful cast, fall through to profile or jump
3226 }
3227 }
3228 if (op->should_profile()) {
3229 Register mdo = klass_RInfo, recv = k_RInfo;
3230 __ bind(profile_cast_success);
3231 __ mov_metadata(mdo, md->constant_encoding());
3232 __ load_klass(recv, obj);
3233 Label update_done;
3234 type_profile_helper(mdo, md, data, recv, success);
3235 __ b(*success);
3236 __ delayed()->nop();
3238 __ bind(profile_cast_failure);
3239 __ mov_metadata(mdo, md->constant_encoding());
3240 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3241 __ ld_ptr(AT, counter_addr);
3242 __ addi(AT, AT, -DataLayout::counter_increment);
3243 __ st_ptr(AT, counter_addr);
3245 __ b(*failure);
3246 __ delayed()->nop();
3247 }
3248 __ b(*success);
3249 __ delayed()->nop();
3250 }
3254 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
3255 LIR_Code code = op->code();
3256 if (code == lir_store_check) {
3257 Register value = op->object()->as_register();
3258 Register array = op->array()->as_register();
3259 Register k_RInfo = op->tmp1()->as_register();
3260 Register klass_RInfo = op->tmp2()->as_register();
3261 Register tmp = op->tmp3()->as_register();
3263 CodeStub* stub = op->stub();
3265 //check if it needs to be profiled
3266 ciMethodData* md;
3267 ciProfileData* data;
3269 if (op->should_profile()) {
3270 ciMethod* method = op->profiled_method();
3271 assert(method != NULL, "Should have method");
3272 int bci = op->profiled_bci();
3273 md = method->method_data_or_null();
3274 assert(md != NULL, "Sanity");
3275 data = md->bci_to_data(bci);
3276 assert(data != NULL, "need data for type check");
3277 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3278 }
3279 Label profile_cast_success, profile_cast_failure, done;
3280 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
3281 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
3283 if(op->should_profile()) {
3284 Label not_null;
3285 __ bne(value, R0, not_null);
3286 __ delayed()->nop();
3288 Register mdo = klass_RInfo;
3289 __ mov_metadata(mdo, md->constant_encoding());
3290 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3291 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3292 __ lw(AT, data_addr);
3293 __ ori(AT, AT, header_bits);
3294 __ sw(AT,data_addr);
3295 __ b(done);
3296 __ delayed()->nop();
3297 __ bind(not_null);
3298 } else {
3299 __ beq(value, R0, done);
3300 __ delayed()->nop();
3301 }
3303 add_debug_info_for_null_check_here(op->info_for_exception());
3304 __ load_klass(k_RInfo, array);
3305 __ load_klass(klass_RInfo, value);
3306 // get instance klass (it's already uncompressed)
3307 __ ld_ptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
3308 // perform the fast part of the checking logic
3309 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, tmp, success_target, failure_target, NULL);
3310 if (A0 != klass_RInfo) __ push(A0);
3311 if (A1 != k_RInfo) __ push(A1);
3312 if (A0 != klass_RInfo) __ move(A0, klass_RInfo);
3313 if (A1 != k_RInfo) __ move(A1, k_RInfo);
3314 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3315 __ delayed()->nop();
3316 if (A1 != k_RInfo) __ pop(A1);
3317 if (A0 != klass_RInfo) __ pop(A0);
3318 // result is a boolean
3319 __ beq(V0, R0, *failure_target);
3320 __ delayed()->nop();
3321 // fall through to the success case
3323 if (op->should_profile()) {
3324 Register mdo = klass_RInfo, recv = k_RInfo;
3325 __ bind(profile_cast_success);
3326 __ mov_metadata(mdo, md->constant_encoding());
3327 __ load_klass(recv, value);
3328 Label update_done;
3329 type_profile_helper(mdo, md, data, recv, &done);
3330 __ b(done);
3331 __ delayed()->nop();
3333 __ bind(profile_cast_failure);
3334 __ mov_metadata(mdo, md->constant_encoding());
3335 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3336 __ ld_ptr(AT, counter_addr);
3337 __ addi(AT, AT, -DataLayout::counter_increment);
3338 __ st_ptr(AT, counter_addr);
3339 __ b(*stub->entry());
3340 __ delayed()->nop();
3341 }
3343 __ bind(done);
3344 } else if (code == lir_checkcast) {
3345 Register obj = op->object()->as_register();
3346 Register dst = op->result_opr()->as_register();
3347 Label success;
3348 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
3349 __ bind(success);
3350 if (dst != obj) {
3351 __ move(dst, obj);
3352 }
3353 } else if (code == lir_instanceof) {
3354 Register obj = op->object()->as_register();
3355 Register dst = op->result_opr()->as_register();
3356 Label success, failure, done;
3357 emit_typecheck_helper(op, &success, &failure, &failure);
3358 __ bind(failure);
3359 __ move(dst, R0);
3360 __ b(done);
3361 __ delayed()->nop();
3362 __ bind(success);
3363 __ addi(dst, R0, 1);
3364 __ bind(done);
3365 } else {
3366 ShouldNotReachHere();
3367 }
3368 }
3370 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
3371 if (op->code() == lir_cas_long) {
3372 #ifdef _LP64
3373 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
3374 Register newval = (op->new_value()->is_single_cpu() ? op->new_value()->as_register() : op->new_value()->as_register_lo());
3375 Register cmpval = (op->cmp_value()->is_single_cpu() ? op->cmp_value()->as_register() : op->cmp_value()->as_register_lo());
3376 assert(newval != NULL, "new val must be register");
3377 assert(cmpval != newval, "cmp and new values must be in different registers");
3378 assert(cmpval != addr, "cmp and addr must be in different registers");
3379 assert(newval != addr, "new value and addr must be in different registers");
3380 if (os::is_MP()) {}
3381 __ cmpxchg(newval, addr, cmpval); // 64-bit test-and-set
3382 #else
3383 Register addr = op->addr()->as_register();
3384 if (os::is_MP()) {}
3385 __ cmpxchg8(op->new_value()->as_register_lo(),
3386 op->new_value()->as_register_hi(),
3387 addr,
3388 op->cmp_value()->as_register_lo(),
3389 op->cmp_value()->as_register_hi())
3390 #endif
3391 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
3392 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
3393 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
3394 Register newval = op->new_value()->as_register();
3395 Register cmpval = op->cmp_value()->as_register();
3396 assert(newval != NULL, "new val must be register");
3397 assert(cmpval != newval, "cmp and new values must be in different registers");
3398 assert(cmpval != addr, "cmp and addr must be in different registers");
3399 assert(newval != addr, "new value and addr must be in different registers");
3400 if (op->code() == lir_cas_obj) {
3401 #ifdef _LP64
3402 if (UseCompressedOops) {
3403 Register tmp_reg = S7;
3404 __ push(cmpval);
3405 __ encode_heap_oop(cmpval);
3406 __ move(tmp_reg, newval);
3407 __ encode_heap_oop(tmp_reg);
3408 if (os::is_MP()) {}
3409 __ cmpxchg32(tmp_reg, addr, cmpval); // 32-bit test-and-set
3410 __ pop(cmpval);
3411 } else
3412 {
3413 if (os::is_MP()) {}
3414 __ cmpxchg(newval, addr, cmpval); // 64-bit test-and-set
3415 }
3416 } else
3417 #endif
3418 {
3419 __ cmpxchg32(newval, addr, cmpval); // 32-bit test-and-set
3420 }
3421 } else {
3422 Unimplemented();
3423 }
3424 }
3425 #ifndef MIPS64
3426 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
3427 Unimplemented();
3428 }
3429 #endif
3430 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info,bool pop_fpu_stack) {
3431 assert(info == NULL || ((code == lir_rem || code == lir_div || code == lir_sub) && right->is_double_cpu()), "info is only for ldiv/lrem");
3432 if (left->is_single_cpu()) {
3433 // left may not be equal to dest on mips.
3434 //assert(left == dest, "left and dest must be equal");
3435 Register lreg = left->as_register();
3437 if (right->is_cpu_register()) {
3438 // cpu register - cpu register
3439 Register rreg, res;
3440 if (right->is_single_cpu()) {
3441 rreg = right->as_register();
3442 #ifdef _LP64
3443 if(dest->is_double_cpu())
3444 res = dest->as_register_lo();
3445 else
3446 #endif
3447 res = dest->as_register();
3448 } else if (right->is_double_cpu()) {
3449 assert(right->is_double_cpu(),"right must be long");
3450 rreg = right->as_register_lo();
3451 res = dest->as_register_lo();
3452 } else {
3453 ShouldNotReachHere();
3454 }
3455 switch (code) {
3456 case lir_add:
3457 #ifdef _LP64
3458 if (dest->type() == T_INT)
3459 __ addu32(res, lreg, rreg);
3460 else
3461 #endif
3462 __ addu(res, lreg, rreg);
3463 break;
3465 case lir_mul:
3466 #ifndef _LP64
3467 //by aoqi
3468 __ mult(lreg, rreg);
3469 #else
3470 __ dmult(lreg, rreg);
3471 #endif
3472 __ nop();
3473 __ nop();
3474 __ mflo(res);
3475 #ifdef _LP64
3476 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
3477 *
3478 * Example: java.net.URLClassLoader::string2int()
3479 * a6: 0xcafebab
3480 * s0: 16
3481 *
3482 * 104 mul [a6|I] [s0|I] [t0|I]
3483 0x00000055655e3728: dmult a6, s0
3484 0x00000055655e372c: sll zero, zero, 0
3485 0x00000055655e3730: sll zero, zero, 0
3486 0x00000055655e3734: mflo t0 <-- error
3487 *
3488 * t0: 0xFFFFFFFFcafebab0 (Right)
3489 * t0: 0x00000000cafebab0 (Wrong)
3490 */
3491 if (dest->type() == T_INT)
3492 __ sll(res, res, 0);
3493 #endif
3494 break;
3496 case lir_sub:
3497 #ifdef _LP64
3498 if (dest->type() == T_INT)
3499 __ subu32(res, lreg, rreg);
3500 else
3501 #endif
3502 __ subu(res, lreg, rreg);
3503 break;
3505 default:
3506 ShouldNotReachHere();
3507 }
3508 } else if (right->is_stack()) {
3509 // cpu register - stack
3510 Unimplemented();
3511 } else if (right->is_constant()) {
3512 // cpu register - constant
3513 Register res;
3514 if (dest->is_double_cpu()) {
3515 res = dest->as_register_lo();
3516 } else {
3517 res = dest->as_register();
3518 }
3519 jint c;
3520 if (right->type() == T_INT) {
3521 c = right->as_constant_ptr()->as_jint();
3522 } else {
3523 c = right->as_constant_ptr()->as_jlong();
3524 }
3526 switch (code) {
3527 case lir_mul_strictfp:
3528 case lir_mul:
3529 __ move(AT, c);
3530 #ifndef _LP64
3531 //by aoqi
3532 __ mult(lreg, AT);
3533 #else
3534 __ dmult(lreg, AT);
3535 #endif
3536 __ nop();
3537 __ nop();
3538 __ mflo(res);
3539 #ifdef _LP64
3540 /* Jin: if res < 0, it must be sign-extended. Otherwise it will be a 64-bit positive number.
3541 *
3542 * Example: java.net.URLClassLoader::string2int()
3543 * a6: 0xcafebab
3544 * s0: 16
3545 *
3546 * 104 mul [a6|I] [s0|I] [t0|I]
3547 0x00000055655e3728: dmult a6, s0
3548 0x00000055655e372c: sll zero, zero, 0
3549 0x00000055655e3730: sll zero, zero, 0
3550 0x00000055655e3734: mflo t0 <-- error
3551 *
3552 * t0: 0xFFFFFFFFcafebab0 (Right)
3553 * t0: 0x00000000cafebab0 (Wrong)
3554 */
3555 if (dest->type() == T_INT)
3556 __ sll(res, res, 0);
3557 #endif
3558 break;
3560 case lir_add:
3561 if (Assembler::is_simm16(c)) {
3562 __ addiu(res, lreg, c);
3563 } else {
3564 __ move(AT, c);
3565 __ addu(res, lreg, AT);
3566 }
3567 break;
3569 case lir_sub:
3570 if (Assembler::is_simm16(-c)) {
3571 __ addi(res, lreg, -c);
3572 } else {
3573 __ move(AT, c);
3574 __ subu(res, lreg, AT);
3575 }
3576 break;
3578 default:
3579 ShouldNotReachHere();
3580 }
3581 } else {
3582 ShouldNotReachHere();
3583 }
3585 } else if (left->is_double_cpu()) {
3586 Register op1_lo = left->as_register_lo();
3587 Register op1_hi = left->as_register_hi();
3588 Register op2_lo;
3589 Register op2_hi;
3590 Register dst_lo;
3591 Register dst_hi;
3593 if(dest->is_single_cpu())
3594 {
3595 dst_lo = dest->as_register();
3596 }
3597 else
3598 {
3599 #ifdef _LP64
3600 dst_lo = dest->as_register_lo();
3601 #else
3602 dst_lo = dest->as_register_lo();
3603 dst_hi = dest->as_register_hi();
3604 #endif
3605 }
3606 if (right->is_constant()) {
3607 op2_lo = AT;
3608 op2_hi = R0;
3609 #ifndef _LP64
3610 __ li(AT, right->as_constant_ptr()->as_jint());
3611 #else
3612 __ li(AT, right->as_constant_ptr()->as_jlong_bits());
3613 #endif
3614 } else if (right->is_double_cpu()) { // Double cpu
3615 assert(right->is_double_cpu(),"right must be long");
3616 assert(dest->is_double_cpu(), "dest must be long");
3617 op2_lo = right->as_register_lo();
3618 op2_hi = right->as_register_hi();
3619 } else {
3620 #ifdef _LP64
3621 op2_lo = right->as_register();
3622 #else
3623 ShouldNotReachHere();
3624 #endif
3625 }
3627 NOT_LP64(assert_different_registers(op1_lo, op1_hi, op2_lo, op2_hi));
3628 // Jin: Why?
3629 // LP64_ONLY(assert_different_registers(op1_lo, op2_lo));
3631 switch (code) {
3632 case lir_add:
3633 #ifndef _LP64
3634 //by aoqi
3635 __ addu(dst_lo, op1_lo, op2_lo);
3636 __ sltu(AT, dst_lo, op2_lo);
3637 __ addu(dst_hi, op1_hi, op2_hi);
3638 __ addu(dst_hi, dst_hi, AT);
3639 #else
3640 __ addu(dst_lo, op1_lo, op2_lo);
3641 #endif
3642 break;
3644 case lir_sub:
3645 #ifndef _LP64
3646 //by aoqi
3647 __ subu(dst_lo, op1_lo, op2_lo);
3648 __ sltu(AT, op1_lo, dst_lo);
3649 __ subu(dst_hi, op1_hi, op2_hi);
3650 __ subu(dst_hi, dst_hi, AT);
3651 #else
3652 __ subu(dst_lo, op1_lo, op2_lo);
3653 #endif
3654 break;
3656 case lir_mul:
3657 {
3659 #ifndef _LP64
3660 //by aoqi
3661 Label zero, quick, done;
3662 //zero?
3663 __ orr(AT, op2_lo, op1_lo);
3664 __ beq(AT, R0, zero);
3665 __ delayed();
3666 __ move(dst_hi, R0);
3668 //quick?
3669 __ orr(AT, op2_hi, op1_hi);
3670 __ beq(AT, R0, quick);
3671 __ delayed()->nop();
3673 __ multu(op2_lo, op1_hi);
3674 __ nop();
3675 __ nop();
3676 __ mflo(dst_hi);
3677 __ multu(op2_hi, op1_lo);
3678 __ nop();
3679 __ nop();
3680 __ mflo(AT);
3682 __ bind(quick);
3683 __ multu(op2_lo, op1_lo);
3684 __ addu(dst_hi, dst_hi, AT);
3685 __ nop();
3686 __ mflo(dst_lo);
3687 __ mfhi(AT);
3688 __ b(done);
3689 __ delayed()->addu(dst_hi, dst_hi, AT);
3691 __ bind(zero);
3692 __ move(dst_lo, R0);
3693 __ bind(done);
3694 #else
3695 Label zero, done;
3696 //zero?
3697 __ orr(AT, op2_lo, op1_lo);
3698 __ beq(AT, R0, zero);
3699 __ delayed();
3700 __ move(dst_hi, R0);
3702 #ifdef ASSERT
3703 //op1_hi, op2_hi should be 0
3704 {
3705 Label L;
3706 __ beq(op1_hi, R0, L);
3707 __ delayed()->nop();
3708 __ stop("wrong register, lir_mul");
3709 __ bind(L);
3710 }
3711 {
3712 Label L;
3713 __ beq(op2_hi, R0, L);
3714 __ delayed()->nop();
3715 __ stop("wrong register, lir_mul");
3716 __ bind(L);
3717 }
3718 #endif
3720 __ multu(op2_lo, op1_lo);
3721 __ nop();
3722 __ nop();
3723 __ mflo(dst_lo);
3724 __ b(done);
3725 __ delayed()->nop();
3727 __ bind(zero);
3728 __ move(dst_lo, R0);
3729 __ bind(done);
3730 #endif //_LP64
3731 }
3732 break;
3734 default:
3735 ShouldNotReachHere();
3736 }
3739 } else if (left->is_single_fpu()) {
3740 assert(right->is_single_fpu(),"right must be float");
3741 assert(dest->is_single_fpu(), "dest must be float");
3743 FloatRegister lreg = left->as_float_reg();
3744 FloatRegister rreg = right->as_float_reg();
3745 FloatRegister res = dest->as_float_reg();
3747 switch (code) {
3748 case lir_add:
3749 __ add_s(res, lreg, rreg);
3750 break;
3751 case lir_sub:
3752 __ sub_s(res, lreg, rreg);
3753 break;
3754 case lir_mul:
3755 case lir_mul_strictfp:
3756 // i dont think we need special handling of this. FIXME
3757 __ mul_s(res, lreg, rreg);
3758 break;
3759 case lir_div:
3760 case lir_div_strictfp:
3761 __ div_s(res, lreg, rreg);
3762 break;
3763 default : ShouldNotReachHere();
3764 }
3765 } else if (left->is_double_fpu()) {
3766 assert(right->is_double_fpu(),"right must be double");
3767 assert(dest->is_double_fpu(), "dest must be double");
3769 FloatRegister lreg = left->as_double_reg();
3770 FloatRegister rreg = right->as_double_reg();
3771 FloatRegister res = dest->as_double_reg();
3773 switch (code) {
3774 case lir_add:
3775 __ add_d(res, lreg, rreg);
3776 break;
3777 case lir_sub:
3778 __ sub_d(res, lreg, rreg);
3779 break;
3780 case lir_mul:
3781 case lir_mul_strictfp:
3782 // i dont think we need special handling of this. FIXME
3783 // by yjl 9/13/2005
3784 __ mul_d(res, lreg, rreg);
3785 break;
3786 case lir_div:
3787 case lir_div_strictfp:
3788 __ div_d(res, lreg, rreg);
3789 break;
3790 // case lir_rem:
3791 // __ rem_d(res, lreg, rreg);
3792 // break;
3793 default : ShouldNotReachHere();
3794 }
3795 }
3796 else if (left->is_single_stack() || left->is_address()) {
3797 assert(left == dest, "left and dest must be equal");
3799 Address laddr;
3800 if (left->is_single_stack()) {
3801 laddr = frame_map()->address_for_slot(left->single_stack_ix());
3802 } else if (left->is_address()) {
3803 laddr = as_Address(left->as_address_ptr());
3804 } else {
3805 ShouldNotReachHere();
3806 }
3808 if (right->is_single_cpu()) {
3809 Register rreg = right->as_register();
3810 switch (code) {
3811 case lir_add:
3812 #ifndef _LP64
3813 //by aoqi
3814 __ lw(AT, laddr);
3815 __ add(AT, AT, rreg);
3816 __ sw(AT, laddr);
3817 #else
3818 __ ld(AT, laddr);
3819 __ dadd(AT, AT, rreg);
3820 __ sd(AT, laddr);
3821 #endif
3822 break;
3823 case lir_sub:
3824 #ifndef _LP64
3825 //by aoqi
3826 __ lw(AT, laddr);
3827 __ sub(AT,AT,rreg);
3828 __ sw(AT, laddr);
3829 #else
3830 __ ld(AT, laddr);
3831 __ dsub(AT,AT,rreg);
3832 __ sd(AT, laddr);
3833 #endif
3834 break;
3835 default: ShouldNotReachHere();
3836 }
3837 } else if (right->is_constant()) {
3838 #ifndef _LP64
3839 jint c = right->as_constant_ptr()->as_jint();
3840 #else
3841 jlong c = right->as_constant_ptr()->as_jlong_bits();
3842 #endif
3843 switch (code) {
3844 case lir_add: {
3845 __ ld_ptr(AT, laddr);
3846 #ifndef _LP64
3847 __ addi(AT, AT, c);
3848 #else
3849 __ li(T8, c);
3850 __ add(AT, AT, T8);
3851 #endif
3852 __ st_ptr(AT, laddr);
3853 break;
3854 }
3855 case lir_sub: {
3856 __ ld_ptr(AT, laddr);
3857 #ifndef _LP64
3858 __ addi(AT, AT, -c);
3859 #else
3860 __ li(T8, -c);
3861 __ add(AT, AT, T8);
3862 #endif
3863 __ st_ptr(AT, laddr);
3864 break;
3865 }
3866 default: ShouldNotReachHere();
3867 }
3868 } else {
3869 ShouldNotReachHere();
3870 }
3871 } else {
3872 ShouldNotReachHere();
3873 }
3874 }
3876 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op *op) {
3877 //FIXME,lir_log, lir_log10,lir_abs,lir_sqrt,so many new lir instruction @jerome
3878 if (value->is_double_fpu()) {
3879 // assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
3880 switch(code) {
3881 case lir_log : //__ flog() ; break;
3882 case lir_log10 : //__ flog10() ;
3883 Unimplemented();
3884 break;
3885 case lir_abs : __ abs_d(dest->as_double_reg(), value->as_double_reg()) ; break;
3886 case lir_sqrt : __ sqrt_d(dest->as_double_reg(), value->as_double_reg()); break;
3887 case lir_sin :
3888 // Should consider not saving ebx if not necessary
3889 __ trigfunc('s', 0);
3890 break;
3891 case lir_cos :
3892 // Should consider not saving ebx if not necessary
3893 // assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
3894 __ trigfunc('c', 0);
3895 break;
3896 case lir_tan :
3897 // Should consider not saving ebx if not necessary
3898 __ trigfunc('t', 0);
3899 break;
3900 default : ShouldNotReachHere();
3901 }
3902 } else {
3903 Unimplemented();
3904 }
3905 }
3907 //FIXME, if right is on the stack!
3908 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
3909 if (left->is_single_cpu()) {
3910 Register dstreg = dst->as_register();
3911 Register reg = left->as_register();
3912 if (right->is_constant()) {
3913 int val = right->as_constant_ptr()->as_jint();
3914 __ move(AT, val);
3915 switch (code) {
3916 case lir_logic_and:
3917 __ andr (dstreg, reg, AT);
3918 break;
3919 case lir_logic_or:
3920 __ orr(dstreg, reg, AT);
3921 break;
3922 case lir_logic_xor:
3923 __ xorr(dstreg, reg, AT);
3924 break;
3925 default: ShouldNotReachHere();
3926 }
3927 } else if (right->is_stack()) {
3928 // added support for stack operands
3929 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
3930 switch (code) {
3931 case lir_logic_and:
3932 //FIXME. lw or ld_ptr?
3933 __ lw(AT, raddr);
3934 __ andr(reg, reg,AT);
3935 break;
3936 case lir_logic_or:
3937 __ lw(AT, raddr);
3938 __ orr(reg, reg, AT);
3939 break;
3940 case lir_logic_xor:
3941 __ lw(AT, raddr);
3942 __ xorr(reg, reg, AT);
3943 break;
3944 default: ShouldNotReachHere();
3945 }
3946 } else {
3947 Register rright = right->as_register();
3948 switch (code) {
3949 case lir_logic_and: __ andr (dstreg, reg, rright); break;
3950 case lir_logic_or : __ orr (dstreg, reg, rright); break;
3951 case lir_logic_xor: __ xorr (dstreg, reg, rright); break;
3952 default: ShouldNotReachHere();
3953 }
3954 }
3955 } else {
3956 Register l_lo = left->as_register_lo();
3957 Register dst_lo = dst->as_register_lo();
3958 #ifndef _LP64
3959 Register l_hi = left->as_register_hi();
3960 Register dst_hi = dst->as_register_hi();
3961 #endif
3963 if (right->is_constant()) {
3964 #ifndef _LP64
3966 int r_lo = right->as_constant_ptr()->as_jint_lo();
3967 int r_hi = right->as_constant_ptr()->as_jint_hi();
3969 switch (code) {
3970 case lir_logic_and:
3971 __ move(AT, r_lo);
3972 __ andr(dst_lo, l_lo, AT);
3973 __ move(AT, r_hi);
3974 __ andr(dst_hi, l_hi, AT);
3975 break;
3977 case lir_logic_or:
3978 __ move(AT, r_lo);
3979 __ orr(dst_lo, l_lo, AT);
3980 __ move(AT, r_hi);
3981 __ orr(dst_hi, l_hi, AT);
3982 break;
3984 case lir_logic_xor:
3985 __ move(AT, r_lo);
3986 __ xorr(dst_lo, l_lo, AT);
3987 __ move(AT, r_hi);
3988 __ xorr(dst_hi, l_hi, AT);
3989 break;
3991 default: ShouldNotReachHere();
3992 }
3993 #else
3994 __ li(AT, right->as_constant_ptr()->as_jlong());
3996 switch (code) {
3997 case lir_logic_and:
3998 __ andr(dst_lo, l_lo, AT);
3999 break;
4001 case lir_logic_or:
4002 __ orr(dst_lo, l_lo, AT);
4003 break;
4005 case lir_logic_xor:
4006 __ xorr(dst_lo, l_lo, AT);
4007 break;
4009 default: ShouldNotReachHere();
4010 }
4011 #endif
4013 } else {
4014 Register r_lo = right->as_register_lo();
4015 Register r_hi = right->as_register_hi();
4017 switch (code) {
4018 case lir_logic_and:
4019 __ andr(dst_lo, l_lo, r_lo);
4020 NOT_LP64(__ andr(dst_hi, l_hi, r_hi);)
4021 break;
4022 case lir_logic_or:
4023 __ orr(dst_lo, l_lo, r_lo);
4024 NOT_LP64(__ orr(dst_hi, l_hi, r_hi);)
4025 break;
4026 case lir_logic_xor:
4027 __ xorr(dst_lo, l_lo, r_lo);
4028 NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);)
4029 break;
4030 default: ShouldNotReachHere();
4031 }
4032 }
4033 }
4034 }
4036 //done here. aoqi. 12-12 22:25
4037 // we assume that eax and edx can be overwritten
4038 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
4040 assert(left->is_single_cpu(), "left must be register");
4041 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
4042 assert(result->is_single_cpu(), "result must be register");
4044 Register lreg = left->as_register();
4045 Register dreg = result->as_register();
4047 if (right->is_constant()) {
4048 int divisor = right->as_constant_ptr()->as_jint();
4049 assert(divisor!=0, "must be nonzero");
4050 #ifndef _LP64
4051 __ move(AT, divisor);
4052 __ div(lreg, AT);
4053 #else
4054 __ li(AT, divisor);
4055 __ ddiv(lreg, AT);
4056 #endif
4057 int idivl_offset = code_offset();
4059 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
4060 We must trap an exception manually. */
4061 __ teq(R0, AT, 0x7);
4062 __ nop();
4063 __ nop();
4064 add_debug_info_for_div0(idivl_offset, info);
4065 } else {
4066 Register rreg = right->as_register();
4067 #ifndef _LP64
4068 __ div(lreg, rreg);
4069 #else
4070 __ ddiv(lreg, rreg);
4071 #endif
4073 int idivl_offset = code_offset();
4074 __ teq(R0, rreg, 0x7);
4075 __ nop();
4076 __ nop();
4077 add_debug_info_for_div0(idivl_offset, info);
4078 }
4080 // get the result
4081 if (code == lir_irem) {
4082 __ mfhi(dreg);
4083 #ifdef _LP64
4084 if (result->type() == T_INT)
4085 __ sll(dreg, dreg, 0);
4086 #endif
4087 } else if (code == lir_idiv) {
4088 __ mflo(dreg);
4089 } else {
4090 ShouldNotReachHere();
4091 }
4092 }
4094 void LIR_Assembler::arithmetic_frem(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
4095 if (left->is_single_fpu()) {
4096 assert(right->is_single_fpu(),"right must be float");
4097 assert(result->is_single_fpu(), "dest must be float");
4098 assert(temp->is_single_fpu(), "dest must be float");
4100 FloatRegister lreg = left->as_float_reg();
4101 FloatRegister rreg = right->as_float_reg();
4102 FloatRegister res = result->as_float_reg();
4103 FloatRegister tmp = temp->as_float_reg();
4105 switch (code) {
4106 case lir_frem:
4107 __ rem_s(res, lreg, rreg, tmp);
4108 break;
4109 default : ShouldNotReachHere();
4110 }
4111 } else if (left->is_double_fpu()) {
4112 assert(right->is_double_fpu(),"right must be double");
4113 assert(result->is_double_fpu(), "dest must be double");
4114 assert(temp->is_double_fpu(), "dest must be double");
4116 FloatRegister lreg = left->as_double_reg();
4117 FloatRegister rreg = right->as_double_reg();
4118 FloatRegister res = result->as_double_reg();
4119 FloatRegister tmp = temp->as_double_reg();
4121 switch (code) {
4122 case lir_frem:
4123 __ rem_d(res, lreg, rreg, tmp);
4124 break;
4125 default : ShouldNotReachHere();
4126 }
4127 }
4128 }
4130 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst,LIR_Op2 * op) {
4131 Register dstreg = dst->as_register();
4132 if (code == lir_cmp_fd2i) {
4133 if (left->is_single_fpu()) {
4134 FloatRegister leftreg = left->as_float_reg();
4135 FloatRegister rightreg = right->as_float_reg();
4137 Label done;
4138 // equal?
4139 __ c_eq_s(leftreg, rightreg);
4140 __ bc1t(done);
4141 __ delayed();
4142 __ move(dstreg, R0);
4143 // less?
4144 __ c_olt_s(leftreg, rightreg);
4145 __ bc1t(done);
4146 __ delayed();
4147 __ move(dstreg, -1);
4148 // great
4149 __ move(dstreg, 1);
4151 __ bind(done);
4152 } else {
4153 assert(left->is_double_fpu(), "Must double");
4154 FloatRegister leftreg = left->as_double_reg();
4155 FloatRegister rightreg = right->as_double_reg();
4157 Label done;
4158 // equal?
4159 __ c_eq_d(leftreg, rightreg);
4160 __ bc1t(done);
4161 __ delayed();
4162 __ move(dstreg, R0);
4163 // less?
4164 __ c_olt_d(leftreg, rightreg);
4165 __ bc1t(done);
4166 __ delayed();
4167 __ move(dstreg, -1);
4168 // great
4169 __ move(dstreg, 1);
4171 __ bind(done);
4172 }
4173 } else if (code == lir_ucmp_fd2i) {
4174 if (left->is_single_fpu()) {
4175 FloatRegister leftreg = left->as_float_reg();
4176 FloatRegister rightreg = right->as_float_reg();
4178 Label done;
4179 // equal?
4180 __ c_eq_s(leftreg, rightreg);
4181 __ bc1t(done);
4182 __ delayed();
4183 __ move(dstreg, R0);
4184 // less?
4185 __ c_ult_s(leftreg, rightreg);
4186 __ bc1t(done);
4187 __ delayed();
4188 __ move(dstreg, -1);
4189 // great
4190 __ move(dstreg, 1);
4192 __ bind(done);
4193 } else {
4194 assert(left->is_double_fpu(), "Must double");
4195 FloatRegister leftreg = left->as_double_reg();
4196 FloatRegister rightreg = right->as_double_reg();
4198 Label done;
4199 // equal?
4200 __ c_eq_d(leftreg, rightreg);
4201 __ bc1t(done);
4202 __ delayed();
4203 __ move(dstreg, R0);
4204 // less?
4205 __ c_ult_d(leftreg, rightreg);
4206 __ bc1t(done);
4207 __ delayed();
4208 __ move(dstreg, -1);
4209 // great
4210 __ move(dstreg, 1);
4212 __ bind(done);
4213 }
4214 } else {
4215 assert(code == lir_cmp_l2i, "check");
4216 Register l_lo, l_hi, r_lo, r_hi, d_lo, d_hi;
4217 l_lo = left->as_register_lo();
4218 l_hi = left->as_register_hi();
4219 r_lo = right->as_register_lo();
4220 r_hi = right->as_register_hi();
4222 Label done;
4223 #ifndef _LP64
4224 // less?
4225 __ slt(AT, l_hi, r_hi);
4226 __ bne(AT, R0, done);
4227 __ delayed();
4228 __ move(dstreg, -1);
4229 // great?
4230 __ slt(AT, r_hi, l_hi);
4231 __ bne(AT, R0, done);
4232 __ delayed();
4233 __ move(dstreg, 1);
4234 #endif
4236 // now compare low 32 bits
4237 // below?
4238 #ifndef _LP64
4239 __ sltu(AT, l_lo, r_lo);
4240 #else
4241 __ slt(AT, l_lo, r_lo);
4242 #endif
4243 __ bne(AT, R0, done);
4244 __ delayed();
4245 __ move(dstreg, -1);
4246 // above?
4247 #ifndef _LP64
4248 __ sltu(AT, r_lo, l_lo);
4249 #else
4250 __ slt(AT, r_lo, l_lo);
4251 #endif
4252 __ bne(AT, R0, done);
4253 __ delayed();
4254 __ move(dstreg, 1);
4255 // equal
4256 __ move(dstreg, R0);
4258 __ bind(done);
4259 }
4260 }
4263 void LIR_Assembler::align_call(LIR_Code code) {
4264 if (os::is_MP()) {
4265 // make sure that the displacement word of the call ends up word aligned
4266 int offset = __ offset();
4267 switch (code) {
4268 case lir_static_call:
4269 case lir_optvirtual_call:
4270 case lir_dynamic_call:
4271 offset += NativeCall::displacement_offset;
4272 break;
4273 case lir_icvirtual_call:
4274 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
4275 break;
4276 case lir_virtual_call: // currently, sparc-specific for niagara
4277 default: ShouldNotReachHere();
4278 }
4279 while (offset % BytesPerWord != 0) {
4280 __ nop();
4281 offset += 4;
4282 }
4283 }
4284 }
4287 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
4288 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned");
4289 __ call(op->addr(), rtype);
4290 __ delayed()->nop();
4291 add_call_info(code_offset(), op->info());
4292 }
4295 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
4296 __ ic_call(op->addr());
4297 add_call_info(code_offset(), op->info());
4298 }
4301 /* Currently, vtable-dispatch is only enabled for sparc platforms */
4302 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
4303 ShouldNotReachHere();
4304 }
4308 void LIR_Assembler::emit_static_call_stub() {
4309 address call_pc = __ pc();
4310 address stub = __ start_a_stub(call_stub_size);
4311 if (stub == NULL) {
4312 bailout("static call stub overflow");
4313 return;
4314 }
4315 int start = __ offset();
4316 __ relocate(static_stub_Relocation::spec(call_pc));
4318 Metadata *o = NULL;
4319 int index = __ oop_recorder()->allocate_metadata_index(o);
4320 RelocationHolder rspec = metadata_Relocation::spec(index);
4321 __ relocate(rspec);
4322 //see set_to_interpreted
4323 __ patchable_set48(Rmethod, (long)o);
4325 __ patchable_set48(AT, (long)-1);
4326 __ jr(AT);
4327 __ delayed()->nop();
4328 assert(__ offset() - start <= call_stub_size, "stub too big");
4329 __ end_a_stub();
4330 }
4333 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
4334 assert(exceptionOop->as_register()== V0, "must match");
4335 assert(exceptionPC->as_register()== V1, "must match");
4337 // exception object is not added to oop map by LinearScan
4338 // (LinearScan assumes that no oops are in fixed registers)
4340 info->add_register_oop(exceptionOop);
4341 long pc_for_athrow = (long)__ pc();
4342 int pc_for_athrow_offset = __ offset();
4343 Register epc = exceptionPC->as_register();
4344 __ relocate(relocInfo::internal_pc_type);
4345 __ li48(epc, pc_for_athrow);
4346 add_call_info(pc_for_athrow_offset, info); // for exception handler
4347 __ verify_not_null_oop(V0);
4348 // search an exception handler (eax: exception oop, edx: throwing pc)
4349 if (compilation()->has_fpu_code()) {
4350 __ call(Runtime1::entry_for(Runtime1::handle_exception_id),
4351 relocInfo::runtime_call_type);
4352 } else {
4353 __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
4354 relocInfo::runtime_call_type);
4355 }
4356 __ delayed()->nop();
4357 }
4359 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
4360 assert(exceptionOop->as_register()== FSR, "must match");
4361 __ b(_unwind_handler_entry);
4362 __ delayed()->nop();
4363 }
4365 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
4366 // optimized version for linear scan:
4367 // * tmp must be unused
4368 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
4370 #ifdef _LP64
4371 Register count_reg = count->as_register();
4372 Register value_reg;
4373 Register dest_reg;
4374 if (left->is_single_cpu()) {
4375 value_reg = left->as_register();
4376 dest_reg = dest->as_register();
4378 } else if (left->is_double_cpu()) {
4379 value_reg = left->as_register_lo();
4380 dest_reg = dest->as_register_lo();
4381 } else {
4382 ShouldNotReachHere();
4383 }
4384 assert_different_registers(count_reg, value_reg);
4385 switch (code) {
4386 case lir_shl:
4387 if (dest->type() == T_INT)
4388 __ sllv(dest_reg, value_reg, count_reg);
4389 else
4390 __ dsllv(dest_reg, value_reg, count_reg);
4391 break;
4392 case lir_shr: __ dsrav(dest_reg, value_reg, count_reg); break;
4393 case lir_ushr:
4394 #if 1
4395 /*
4396 Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4397 However, dsrl will shift in company with the highest 32 bits.
4398 Thus, if the source register contains a negative value,
4399 the resulti is incorrect.
4400 * DoubleCvt.java
4401 *
4402 * static int inp (int shift)
4403 * {
4404 * return -1 >>> (32 - shift);
4405 * }
4406 *
4407 * 26 ushift_right [t0|I] [a4|I] [a6|I]
4408 * 0x00000055616d2a98: dsrl a6, t0, a4 <-- error
4409 */
4411 // java.math.MutableBigInteger::primitiveRightShift
4412 //
4413 // 108 ushift_right [a6|I] [a4|I] [a4|I]
4414 // 0x00000055646d2f70: dsll32 a4, a6, 0 \
4415 // 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error!
4416 // 0x00000055646d2f78: dsrl a4, a4, a4 /
4417 if (left->type() == T_INT && dest->type() == T_INT) {
4418 __ dsll32(AT, value_reg, 0); // Omit the high 32 bits
4419 __ dsrl32(AT, AT, 0);
4420 __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift
4421 break;
4422 }
4423 #endif
4424 __ dsrlv(dest_reg, value_reg, count_reg); break;
4425 default: ShouldNotReachHere();
4426 }
4427 #else
4428 if (left->is_single_cpu()) {
4429 Register value_reg = left->as_register();
4430 Register count_reg = count->as_register();
4431 Register dest_reg = dest->as_register();
4432 assert_different_registers(count_reg, value_reg);
4434 switch (code) {
4435 case lir_shl: __ sllv(dest_reg, value_reg, count_reg); break;
4436 case lir_shr: __ srav(dest_reg, value_reg, count_reg); break;
4437 case lir_ushr: __ srlv(dest_reg, value_reg, count_reg); break;
4438 default: ShouldNotReachHere();
4439 }
4441 } else if (left->is_double_cpu()) {
4442 Register creg = count->as_register();
4443 Register lo = left->as_register_lo();
4444 Register hi = left->as_register_hi();
4445 Register dlo = dest->as_register_lo();
4446 Register dhi = dest->as_register_hi();
4448 __ andi(creg, creg, 0x3f);
4449 switch (code) {
4450 case lir_shl:
4451 {
4452 Label normal, done, notzero;
4454 //count=0
4455 __ bne(creg, R0, notzero);
4456 __ delayed()->nop();
4457 __ move(dlo, lo);
4458 __ b(done);
4459 __ delayed();
4460 __ move(dhi, hi);
4462 //count>=32
4463 __ bind(notzero);
4464 __ sltiu(AT, creg, BitsPerWord);
4465 __ bne(AT, R0, normal);
4466 __ delayed();
4467 __ addiu(AT, creg, (-1) * BitsPerWord);
4468 __ sllv(dhi, lo, AT);
4469 __ b(done);
4470 __ delayed();
4471 __ move(dlo, R0);
4473 //count<32
4474 __ bind(normal);
4475 __ sllv(dhi, hi, creg);
4476 __ move(AT, BitsPerWord);
4477 __ sub(AT, AT, creg);
4478 __ srlv(AT, lo, AT);
4479 __ orr(dhi, dhi, AT);
4480 __ sllv(dlo, lo, creg);
4481 __ bind(done);
4482 }
4483 break;
4484 case lir_shr:
4485 {
4486 Label normal, done, notzero;
4488 //count=0
4489 __ bne(creg, R0, notzero);
4490 __ delayed()->nop();
4491 __ move(dhi, hi);
4492 __ b(done);
4493 __ delayed();
4494 __ move(dlo, lo);
4496 //count>=32
4497 __ bind(notzero);
4498 __ sltiu(AT, creg, BitsPerWord);
4499 __ bne(AT, R0, normal);
4500 __ delayed();
4501 __ addiu(AT, creg, (-1) * BitsPerWord);
4502 __ srav(dlo, hi, AT);
4503 __ b(done);
4504 __ delayed();
4505 __ sra(dhi, hi, BitsPerWord - 1);
4507 //count<32
4508 __ bind(normal);
4509 __ srlv(dlo, lo, creg);
4510 __ move(AT, BitsPerWord);
4511 __ sub(AT, AT, creg);
4512 __ sllv(AT, hi, AT);
4513 __ orr(dlo, dlo, AT);
4514 __ srav(dhi, hi, creg);
4515 __ bind(done);
4516 }
4517 break;
4518 case lir_ushr:
4519 {
4520 Label normal, done, notzero;
4522 //count=zero
4523 __ bne(creg, R0, notzero);
4524 __ delayed()->nop();
4525 __ move(dhi, hi);
4526 __ b(done);
4527 __ delayed();
4528 __ move(dlo, lo);
4530 //count>=32
4531 __ bind(notzero);
4532 __ sltiu(AT, creg, BitsPerWord);
4533 __ bne(AT, R0, normal);
4534 __ delayed();
4535 __ addi(AT, creg, (-1) * BitsPerWord);
4536 __ srlv(dlo, hi, AT);
4537 __ b(done);
4538 __ delayed();
4539 __ move(dhi, R0);
4541 //count<32
4542 __ bind(normal);
4543 __ srlv(dlo, lo, creg);
4544 __ move(AT, BitsPerWord);
4545 __ sub(AT, AT, creg);
4546 __ sllv(AT, hi, AT);
4547 __ orr(dlo, dlo, AT);
4548 __ srlv(dhi, hi, creg);
4549 __ bind(done);
4550 }
4551 break;
4552 default: ShouldNotReachHere();
4553 }
4554 } else {
4555 ShouldNotReachHere();
4556 }
4557 #endif
4559 }
4561 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
4562 if (dest->is_single_cpu()) {
4563 /* In WebClient,
4564 * virtual jboolean java.util.concurrent.atomic.AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl.compareAndSet
4565 *
4566 * 130 ushift_right [a4a4|J] [int:9|I] [a4|L]
4567 */
4568 Register value_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
4569 Register dest_reg = dest->as_register();
4570 count = count & 0x1F; // Java spec
4572 switch (code) {
4573 #ifdef _LP64
4574 case lir_shl:
4575 if (dest->type() == T_INT)
4576 __ sll(dest_reg, value_reg, count);
4577 else
4578 __ dsll(dest_reg, value_reg, count);
4579 break;
4580 case lir_shr: __ dsra(dest_reg, value_reg, count); break;
4581 case lir_ushr:
4582 #if 1
4583 if (left->type() == T_INT && dest->type() == T_INT) {
4584 /* Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4585 However, dsrl will shift in company with the highest 32 bits.
4586 Thus, if the source register contains a negative value,
4587 the resulti is incorrect.
4589 Example: in java.util.HashMap.get()
4591 68 ushift_right [t0|I] [int:20|I] [a4|I]
4592 dsrl a4, t0, 20
4594 t0: 0xFFFFFFFF87654321 (64bits for 0x87654321)
4596 ushift_right t0, 16 -> a4
4598 a4: 00000000 00008765 (right)
4599 a4: FFFFFFFF FFFF8765 (wrong)
4600 */
4601 __ dsll32(dest_reg, value_reg, 0); // Omit the high 32 bits
4602 __ dsrl32(dest_reg, dest_reg, count); // Unsigned right shift
4603 break;
4604 }
4605 #endif
4607 __ dsrl(dest_reg, value_reg, count);
4608 break;
4609 #else
4610 case lir_shl: __ sll(dest_reg, value_reg, count); break;
4611 case lir_shr: __ sra(dest_reg, value_reg, count); break;
4612 case lir_ushr: __ srl(dest_reg, value_reg, count); break;
4613 #endif
4614 default: ShouldNotReachHere();
4615 }
4617 } else if (dest->is_double_cpu()) {
4618 Register valuelo = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
4619 Register destlo = dest->as_register_lo();
4620 count = count & 0x3f;
4621 #ifdef _LP64
4622 switch (code) {
4623 case lir_shl: __ dsll(destlo, valuelo, count); break;
4624 case lir_shr: __ dsra(destlo, valuelo, count); break;
4625 case lir_ushr: __ dsrl(destlo, valuelo, count); break;
4626 default: ShouldNotReachHere();
4627 }
4628 #else
4629 Register desthi = dest->as_register_hi();
4630 Register valuehi = left->as_register_hi();
4631 assert_different_registers(destlo, valuehi, desthi);
4632 switch (code) {
4633 case lir_shl:
4634 if (count==0) {
4635 __ move(destlo, valuelo);
4636 __ move(desthi, valuehi);
4637 } else if (count>=32) {
4638 __ sll(desthi, valuelo, count-32);
4639 __ move(destlo, R0);
4640 } else {
4641 __ srl(AT, valuelo, 32 - count);
4642 __ sll(destlo, valuelo, count);
4643 __ sll(desthi, valuehi, count);
4644 __ orr(desthi, desthi, AT);
4645 }
4646 break;
4648 case lir_shr:
4649 if (count==0) {
4650 __ move(destlo, valuelo);
4651 __ move(desthi, valuehi);
4652 } else if (count>=32) {
4653 __ sra(destlo, valuehi, count-32);
4654 __ sra(desthi, valuehi, 31);
4655 } else {
4656 __ sll(AT, valuehi, 32 - count);
4657 __ sra(desthi, valuehi, count);
4658 __ srl(destlo, valuelo, count);
4659 __ orr(destlo, destlo, AT);
4660 }
4661 break;
4663 case lir_ushr:
4664 if (count==0) {
4665 __ move(destlo, valuelo);
4666 __ move(desthi, valuehi);
4667 } else if (count>=32) {
4668 __ sra(destlo, valuehi, count-32);
4669 __ move(desthi, R0);
4670 } else {
4671 __ sll(AT, valuehi, 32 - count);
4672 __ srl(desthi, valuehi, count);
4673 __ srl(destlo, valuelo, count);
4674 __ orr(destlo, destlo, AT);
4675 }
4676 break;
4678 default: ShouldNotReachHere();
4679 }
4680 #endif
4681 } else {
4682 ShouldNotReachHere();
4683 }
4684 }
4686 void LIR_Assembler::store_parameter(Register r, int offset_from_esp_in_words) {
4687 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4688 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4689 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
4690 __ st_ptr(r, SP, offset_from_sp_in_bytes);
4691 }
4694 void LIR_Assembler::store_parameter(jint c, int offset_from_esp_in_words) {
4695 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4696 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4697 assert(offset_from_esp_in_words < frame_map()->reserved_argument_area_size(), "invalid offset");
4698 __ move(AT, c);
4699 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
4700 }
4702 void LIR_Assembler::store_parameter(jobject o, int offset_from_esp_in_words) {
4703 assert(offset_from_esp_in_words >= 0, "invalid offset from esp");
4704 int offset_from_sp_in_bytes = offset_from_esp_in_words * BytesPerWord;
4705 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
4706 int oop_index = __ oop_recorder()->find_index(o);
4707 RelocationHolder rspec = oop_Relocation::spec(oop_index);
4708 __ relocate(rspec);
4709 #ifndef _LP64
4710 //by_css
4711 __ lui(AT, Assembler::split_high((int)o));
4712 __ addiu(AT, AT, Assembler::split_low((int)o));
4713 #else
4714 __ li48(AT, (long)o);
4715 #endif
4717 __ st_ptr(AT, SP, offset_from_sp_in_bytes);
4719 }
4722 // This code replaces a call to arraycopy; no exception may
4723 // be thrown in this code, they must be thrown in the System.arraycopy
4724 // activation frame; we could save some checks if this would not be the case
4725 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
4728 ciArrayKlass* default_type = op->expected_type();
4729 Register src = op->src()->as_register();
4730 Register dst = op->dst()->as_register();
4731 Register src_pos = op->src_pos()->as_register();
4732 Register dst_pos = op->dst_pos()->as_register();
4733 Register length = op->length()->as_register();
4734 Register tmp = T8;
4735 #ifndef OPT_THREAD
4736 Register java_thread = T8;
4737 #else
4738 Register java_thread = TREG;
4739 #endif
4740 CodeStub* stub = op->stub();
4742 int flags = op->flags();
4743 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
4744 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
4746 // if we don't know anything or it's an object array, just go through the generic arraycopy
4747 if (default_type == NULL) {
4748 Label done;
4749 // save outgoing arguments on stack in case call to System.arraycopy is needed
4750 // HACK ALERT. This code used to push the parameters in a hardwired fashion
4751 // for interpreter calling conventions. Now we have to do it in new style conventions.
4752 // For the moment until C1 gets the new register allocator I just force all the
4753 // args to the right place (except the register args) and then on the back side
4754 // reload the register args properly if we go slow path. Yuck
4756 // this is saved in the caller's reserved argument area
4757 //FIXME, maybe It will change something in the stack;
4758 // These are proper for the calling convention
4759 //store_parameter(length, 2);
4760 //store_parameter(dst_pos, 1);
4761 //store_parameter(dst, 0);
4763 // these are just temporary placements until we need to reload
4764 //store_parameter(src_pos, 3);
4765 //store_parameter(src, 4);
4766 assert(src == T0 && src_pos == A0, "mismatch in calling convention");
4767 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
4769 __ push(src);
4770 __ push(dst);
4771 __ push(src_pos);
4772 __ push(dst_pos);
4773 __ push(length);
4776 // save SP and align
4777 #ifndef OPT_THREAD
4778 __ get_thread(java_thread);
4779 #endif
4780 __ st_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
4781 #ifndef _LP64
4782 __ addi(SP, SP, (-5) * wordSize);
4783 __ move(AT, -(StackAlignmentInBytes));
4784 __ andr(SP, SP, AT);
4785 // push argument
4786 __ sw(length, SP, 4 * wordSize);
4787 #else
4788 __ move(A4, length);
4789 #endif
4790 __ move(A3, dst_pos);
4791 __ move(A2, dst);
4792 __ move(A1, src_pos);
4793 __ move(A0, src);
4794 // make call
4795 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
4796 __ call(entry, relocInfo::runtime_call_type);
4797 __ delayed()->nop();
4798 // restore SP
4799 #ifndef OPT_THREAD
4800 __ get_thread(java_thread);
4801 #endif
4802 __ ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
4803 __ super_pop(length);
4804 __ super_pop(dst_pos);
4805 __ super_pop(src_pos);
4806 __ super_pop(dst);
4807 __ super_pop(src);
4809 __ beq_far(V0, R0, *stub->continuation());
4810 __ delayed()->nop();
4813 __ b_far(*stub->entry());
4814 __ delayed()->nop();
4815 __ bind(*stub->continuation());
4816 return;
4817 }
4818 assert(default_type != NULL
4819 && default_type->is_array_klass()
4820 && default_type->is_loaded(),
4821 "must be true at this point");
4823 int elem_size = type2aelembytes(basic_type);
4824 int shift_amount;
4825 switch (elem_size) {
4826 case 1 :shift_amount = 0; break;
4827 case 2 :shift_amount = 1; break;
4828 case 4 :shift_amount = 2; break;
4829 case 8 :shift_amount = 3; break;
4830 default:ShouldNotReachHere();
4831 }
4833 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
4834 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
4835 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
4836 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
4838 // test for NULL
4839 if (flags & LIR_OpArrayCopy::src_null_check) {
4840 __ beq_far(src, R0, *stub->entry());
4841 __ delayed()->nop();
4842 }
4843 if (flags & LIR_OpArrayCopy::dst_null_check) {
4844 __ beq_far(dst, R0, *stub->entry());
4845 __ delayed()->nop();
4846 }
4848 // check if negative
4849 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
4850 __ bltz(src_pos, *stub->entry());
4851 __ delayed()->nop();
4852 }
4853 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
4854 __ bltz(dst_pos, *stub->entry());
4855 __ delayed()->nop();
4856 }
4857 if (flags & LIR_OpArrayCopy::length_positive_check) {
4858 __ bltz(length, *stub->entry());
4859 __ delayed()->nop();
4860 }
4862 if (flags & LIR_OpArrayCopy::src_range_check) {
4863 __ add(AT, src_pos, length);
4864 __ lw(tmp, src_length_addr);
4865 __ sltu(AT, tmp, AT);
4866 __ bne_far(AT, R0, *stub->entry());
4867 __ delayed()->nop();
4868 }
4869 if (flags & LIR_OpArrayCopy::dst_range_check) {
4870 __ add(AT, dst_pos, length);
4871 __ lw(tmp, dst_length_addr);
4872 __ sltu(AT, tmp, AT);
4873 __ bne_far(AT, R0, *stub->entry());
4874 __ delayed()->nop();
4875 }
4877 if (flags & LIR_OpArrayCopy::type_check) {
4878 if (UseCompressedClassPointers) {
4879 __ lwu(AT, src_klass_addr);
4880 __ lwu(tmp, dst_klass_addr);
4881 } else {
4882 __ ld(AT, src_klass_addr);
4883 __ ld(tmp, dst_klass_addr);
4884 }
4885 __ bne_far(AT, tmp, *stub->entry());
4886 __ delayed()->nop();
4887 }
4889 #ifdef ASSERT
4890 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
4891 // Sanity check the known type with the incoming class. For the
4892 // primitive case the types must match exactly. For the object array
4893 // case, if no type check is needed then the dst type must match the
4894 // expected type and the src type is so subtype which we can't check. If
4895 // a type check i needed then at this point the classes are known to be
4896 // the same but again which don't know which type so we can't check them.
4897 Label known_ok, halt;
4898 __ mov_metadata(tmp, default_type->constant_encoding());
4899 #ifdef _LP64
4900 if (UseCompressedClassPointers) {
4901 __ encode_klass_not_null(tmp);
4902 }
4903 #endif
4904 if (basic_type != T_OBJECT) {
4905 if (UseCompressedClassPointers) {
4906 __ lwu(AT, dst_klass_addr);
4907 } else {
4908 __ ld(AT, dst_klass_addr);
4909 }
4910 __ bne(AT, tmp, halt);
4911 __ delayed()->nop();
4912 if (UseCompressedClassPointers) {
4913 __ lwu(AT, src_klass_addr);
4914 } else {
4915 __ ld(AT, src_klass_addr);
4916 }
4917 __ beq(AT, tmp, known_ok);
4918 __ delayed()->nop();
4919 } else {
4920 if (UseCompressedClassPointers) {
4921 __ lwu(AT, dst_klass_addr);
4922 } else {
4923 __ ld(AT, dst_klass_addr);
4924 }
4925 __ beq(AT, tmp, known_ok);
4926 __ delayed()->nop();
4927 __ beq(src, dst, known_ok);
4928 __ delayed()->nop();
4929 }
4930 __ bind(halt);
4931 __ stop("incorrect type information in arraycopy");
4932 __ bind(known_ok);
4933 }
4934 #endif
4935 __ push(src);
4936 __ push(dst);
4937 __ push(src_pos);
4938 __ push(dst_pos);
4939 __ push(length);
4942 assert(A0 != A1 &&
4943 A0 != length &&
4944 A1 != length, "register checks");
4945 __ move(AT, dst_pos);
4946 if (shift_amount > 0 && basic_type != T_OBJECT) {
4947 #ifndef _LP64
4948 __ sll(A2, length, shift_amount);
4949 #else
4950 __ dsll(A2, length, shift_amount);
4951 #endif
4952 } else {
4953 if (length!=A2)
4954 __ move(A2, length);
4955 }
4956 __ move(A3, src_pos );
4957 assert(A0 != dst_pos &&
4958 A0 != dst &&
4959 dst_pos != dst, "register checks");
4961 assert_different_registers(A0, dst_pos, dst);
4962 #ifndef _LP64
4963 __ sll(AT, AT, shift_amount);
4964 #else
4965 __ dsll(AT, AT, shift_amount);
4966 #endif
4967 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
4968 __ add(A1, dst, AT);
4970 #ifndef _LP64
4971 __ sll(AT, A3, shift_amount);
4972 #else
4973 __ dsll(AT, A3, shift_amount);
4974 #endif
4975 __ addi(AT, AT, arrayOopDesc::base_offset_in_bytes(basic_type));
4976 __ add(A0, src, AT);
4980 if (basic_type == T_OBJECT) {
4981 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 3);
4982 } else {
4983 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 3);
4984 }
4985 __ super_pop(length);
4986 __ super_pop(dst_pos);
4987 __ super_pop(src_pos);
4988 __ super_pop(dst);
4989 __ super_pop(src);
4991 __ bind(*stub->continuation());
4992 }
4994 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
4995 tty->print_cr("LIR_Assembler::emit_updatecrc32 unimplemented yet !");
4996 Unimplemented();
4997 }
4999 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
5000 Register obj = op->obj_opr()->as_register(); // may not be an oop
5001 Register hdr = op->hdr_opr()->as_register();
5002 Register lock = op->lock_opr()->is_single_cpu() ? op->lock_opr()->as_register(): op->lock_opr()->as_register_lo();
5003 if (!UseFastLocking) {
5004 __ b_far(*op->stub()->entry());
5005 __ delayed()->nop();
5006 } else if (op->code() == lir_lock) {
5007 Register scratch = noreg;
5008 if (UseBiasedLocking) {
5009 scratch = op->scratch_opr()->as_register();
5010 }
5011 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
5012 "lock_reg must point to the displaced header");
5013 // add debug info for NullPointerException only if one is possible
5014 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
5015 if (op->info() != NULL) {
5016 //add_debug_info_for_null_check_here(op->info());
5017 add_debug_info_for_null_check(null_check_offset,op->info());
5018 }
5019 // done
5020 } else if (op->code() == lir_unlock) {
5021 assert(BasicLock::displaced_header_offset_in_bytes() == 0,
5022 "lock_reg must point to the displaced header");
5023 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
5024 } else {
5025 Unimplemented();
5026 }
5027 __ bind(*op->stub()->continuation());
5028 }
5032 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
5033 ciMethod* method = op->profiled_method();
5034 int bci = op->profiled_bci();
5035 ciMethod* callee = op->profiled_callee();
5036 // Update counter for all call types
5037 ciMethodData* md = method->method_data();
5038 if (md == NULL) {
5039 bailout("out of memory building methodDataOop");
5040 return;
5041 }
5042 ciProfileData* data = md->bci_to_data(bci);
5043 assert(data->is_CounterData(), "need CounterData for calls");
5044 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
5045 Register mdo = op->mdo()->as_register();
5047 __ mov_metadata(mdo, md->constant_encoding());
5049 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
5050 __ ld_ptr(AT, counter_addr);
5051 __ addi(AT, AT, DataLayout::counter_increment);
5052 __ st_ptr(AT, counter_addr);
5054 Bytecodes::Code bc = method->java_code_at_bci(bci);
5055 const bool callee_is_static = callee->is_loaded() && callee->is_static();
5056 // Perform additional virtual call profiling for invokevirtual and
5057 // invokeinterface bytecodes
5058 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
5059 !callee_is_static && //required for optimized MH invokes
5060 C1ProfileVirtualCalls) {
5061 assert(op->recv()->is_single_cpu(), "recv must be allocated");
5062 Register recv = op->recv()->as_register();
5063 assert_different_registers(mdo, recv);
5064 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
5065 ciKlass* known_klass = op->known_holder();
5066 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
5067 // We know the type that will be seen at this call site; we can
5068 // statically update the methodDataOop rather than needing to do
5069 // dynamic tests on the receiver type
5071 // NOTE: we should probably put a lock around this search to
5072 // avoid collisions by concurrent compilations
5073 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
5074 uint i;
5075 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5076 ciKlass* receiver = vc_data->receiver(i);
5077 if (known_klass->equals(receiver)) {
5078 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5079 __ ld_ptr(AT, data_addr);
5080 __ addi(AT, AT, DataLayout::counter_increment);
5081 __ st_ptr(AT, data_addr);
5082 return;
5083 }
5084 }
5086 // Receiver type not found in profile data; select an empty slot
5088 // Note that this is less efficient than it should be because it
5089 // always does a write to the receiver part of the
5090 // VirtualCallData rather than just the first time
5091 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5092 ciKlass* receiver = vc_data->receiver(i);
5093 if (receiver == NULL) {
5094 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5095 __ mov_metadata(AT, known_klass->constant_encoding());
5096 __ st_ptr(AT,recv_addr);
5097 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5098 __ ld_ptr(AT, data_addr);
5099 __ addi(AT, AT, DataLayout::counter_increment);
5100 __ st_ptr(AT, data_addr);
5101 return;
5102 }
5103 }
5104 } else {
5105 //__ ld_ptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
5106 __ load_klass(recv, recv);
5107 Label update_done;
5108 uint i;
5109 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5110 Label next_test;
5111 // See if the receiver is receiver[n].
5112 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
5113 __ bne(recv,AT,next_test);
5114 __ delayed()->nop();
5115 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5116 __ ld_ptr(AT, data_addr);
5117 __ addi(AT, AT, DataLayout::counter_increment);
5118 __ st_ptr(AT, data_addr);
5119 __ b(update_done);
5120 __ delayed()->nop();
5121 __ bind(next_test);
5122 }
5124 // Didn't find receiver; find next empty slot and fill it in
5125 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5126 Label next_test;
5127 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5128 __ ld_ptr(AT, recv_addr);
5129 __ bne(AT, R0, next_test);
5130 __ delayed()->nop();
5131 __ st_ptr(recv, recv_addr);
5132 __ move(AT, DataLayout::counter_increment);
5133 __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))));
5134 if (i < (VirtualCallData::row_limit() - 1)) {
5135 __ b(update_done);
5136 __ delayed()->nop();
5137 }
5138 __ bind(next_test);
5139 }
5140 __ bind(update_done);
5141 }
5142 }
5143 }
5145 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
5146 // Newly added in OpenJDK 8
5147 Unimplemented();
5148 }
5150 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
5151 Unimplemented();
5152 }
5155 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
5156 if (dst->is_single_cpu())
5157 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
5158 else if (dst->is_double_cpu())
5159 __ lea(dst->as_register_lo(), frame_map()->address_for_monitor_lock(monitor_no));
5160 }
5162 void LIR_Assembler::align_backward_branch_target() {
5163 __ align(BytesPerWord);
5164 }
5167 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
5168 if (left->is_single_cpu()) {
5169 __ subu(dest->as_register(), R0, left->as_register());
5170 } else if (left->is_double_cpu()) {
5171 #ifndef _LP64
5172 Register lo = left->as_register_lo();
5173 Register hi = left->as_register_hi();
5174 Register dlo = dest->as_register_lo();
5175 Register dhi = dest->as_register_hi();
5176 assert(dlo != hi, "register checks");
5177 __ nor(dlo, R0, lo);
5178 __ addiu(dlo, dlo, 1);
5179 __ sltiu(AT, dlo, 1);
5180 __ nor(dhi, R0, hi);
5181 __ addu(dhi, dhi, AT);
5182 #else
5183 __ subu(dest->as_register_lo(), R0, left->as_register_lo());
5184 #endif
5185 } else if (left->is_single_fpu()) {
5186 //for mips , does it required ?
5187 __ neg_s(dest->as_float_reg(), left->as_float_reg());
5188 } else if (left->is_double_fpu()) {
5189 //for mips , does it required ?
5190 __ neg_d(dest->as_double_reg(), left->as_double_reg());
5191 }else {
5192 ShouldNotReachHere();
5193 }
5194 }
5196 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
5197 assert(addr->is_address() && dest->is_register(), "check");
5198 Register reg;
5199 reg = dest->as_pointer_register();
5200 __ lea(reg, as_Address(addr->as_address_ptr()));
5201 }
5204 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
5205 if (o == NULL) {
5206 // This seems wrong as we do not emit relocInfo
5207 // for classes that are not loaded yet, i.e., they will be
5208 // never GC'd
5209 #ifndef _LP64
5210 //by_css
5211 __ lui(reg, Assembler::split_high((int)o));
5212 __ addiu(reg, reg, Assembler::split_low((int)o));
5213 #else
5214 __ li48(reg, (long)o);
5215 //__ patchable_set48(reg, (long)o);
5216 #endif
5217 } else {
5218 int oop_index = __ oop_recorder()->find_index(o);
5219 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5220 __ relocate(rspec);
5221 #ifndef _LP64
5222 //by_css
5223 __ lui(reg, Assembler::split_high((int)o));
5224 __ addiu(reg, reg, Assembler::split_low((int)o));
5225 #else
5226 __ li48(reg, (long)o);
5227 //__ patchable_set48(reg, (long)o);
5228 #endif
5229 }
5230 }
5232 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
5233 assert(!tmp->is_valid(), "don't need temporary");
5234 __ call(dest, relocInfo::runtime_call_type);
5235 __ delayed()->nop();
5236 if (info != NULL) {
5237 add_call_info_here(info);
5238 }
5239 }
5241 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
5242 assert(type == T_LONG, "only for volatile long fields");
5243 if (info != NULL) {
5244 add_debug_info_for_null_check_here(info);
5245 }
5247 if(src->is_register() && dest->is_address()) {
5248 if(src->is_double_cpu()) {
5249 #ifdef _LP64
5250 __ sd(src->as_register_lo(), as_Address(dest->as_address_ptr()));
5251 #else
5252 Unimplemented();
5253 //__ sw(src->as_register_lo(), as_Address(dest->as_address_ptr()));
5254 //__ sw(src->as_register_hi(), as_Address(dest->as_address_ptr()).base(),
5255 //as_Address(dest->as_address_ptr()).disp() +4);
5256 #endif
5257 } else if (src->is_double_fpu()) {
5258 #ifdef _LP64
5259 __ sdc1(src->as_double_reg(), as_Address(dest->as_address_ptr()));
5260 #else
5261 Unimplemented();
5262 //__ swc1(src->as_fpu_lo(), as_Address(dest->as_address_ptr()));
5263 //__ swc1(src->as_fpu_hi(), as_Address(dest->as_address_ptr()).base(),
5264 //as_Address(dest->as_address_ptr()).disp() +4);
5265 #endif
5266 } else {
5267 ShouldNotReachHere();
5268 }
5269 } else if (src->is_address() && dest->is_register()){
5270 if(dest->is_double_cpu()) {
5271 #ifdef _LP64
5272 __ ld(dest->as_register_lo(), as_Address(src->as_address_ptr()));
5273 #else
5274 Unimplemented();
5275 // __ lw(dest->as_register_lo(), as_Address(src->as_address_ptr()));
5276 // __ lw(dest->as_register_hi(), as_Address(src->as_address_ptr()).base(),
5277 // as_Address(src->as_address_ptr()).disp() +4);
5278 #endif
5279 } else if (dest->is_double_fpu()) {
5280 #ifdef _LP64
5281 __ ldc1(dest->as_double_reg(), as_Address(src->as_address_ptr()));
5282 #else
5283 Unimplemented();
5284 // __ lwc1(dest->as_fpu_lo(), as_Address(src->as_address_ptr()));
5285 // __ lwc1(dest->as_fpu_hi(), as_Address(src->as_address_ptr()).base(),
5286 // as_Address(src->as_address_ptr()).disp() +4);
5287 #endif
5288 } else {
5289 ShouldNotReachHere();
5290 }
5291 } else {
5292 ShouldNotReachHere();
5293 }
5294 }
5296 #ifdef ASSERT
5297 // emit run-time assertion
5298 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
5299 tty->print_cr("LIR_Assembler::emit_assert unimplemented yet!");
5300 Unimplemented();
5301 }
5302 #endif
5304 void LIR_Assembler::membar() {
5305 __ sync();
5306 }
5308 void LIR_Assembler::membar_acquire() {
5309 __ sync();
5310 }
5312 void LIR_Assembler::membar_release() {
5313 __ sync();
5314 }
5316 void LIR_Assembler::membar_loadload() {
5317 // no-op
5318 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
5319 }
5321 void LIR_Assembler::membar_storestore() {
5322 // no-op
5323 // //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
5324 }
5326 void LIR_Assembler::membar_loadstore() {
5327 // no-op
5328 // //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
5329 }
5331 void LIR_Assembler::membar_storeload() {
5332 //__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
5333 }
5336 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
5337 assert(result_reg->is_register(), "check");
5338 #ifndef OPT_THREAD
5339 __ get_thread(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()));
5340 #else
5341 __ move(NOT_LP64(result_reg->as_register()) LP64_ONLY(result_reg->as_register_lo()), TREG);
5342 #endif
5343 }
5345 void LIR_Assembler::peephole(LIR_List*) {
5346 // do nothing for now
5347 }
5349 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
5350 /* assert(data == dest, "xchg/xadd uses only 2 operands");
5352 if (data->type() == T_INT) {
5353 if (code == lir_xadd) {
5354 if (os::is_MP()) {
5355 __ lock();
5356 }
5357 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
5358 } else {
5359 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
5360 }
5361 } else if (data->is_oop()) {
5362 assert (code == lir_xchg, "xadd for oops");
5363 Register obj = data->as_register();
5364 #ifdef _LP64
5365 if (UseCompressedOops) {
5366 __ encode_heap_oop(obj);
5367 __ xchgl(obj, as_Address(src->as_address_ptr()));
5368 __ decode_heap_oop(obj);
5369 } else {
5370 __ xchgptr(obj, as_Address(src->as_address_ptr()));
5371 }
5372 #else
5373 __ xchgl(obj, as_Address(src->as_address_ptr()));
5374 #endif
5375 } else if (data->type() == T_LONG) {
5376 #ifdef _LP64
5377 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
5378 if (code == lir_xadd) {
5379 if (os::is_MP()) {
5380 __ lock();
5381 }
5382 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
5383 } else {
5384 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
5385 }
5386 #else
5387 ShouldNotReachHere();
5388 #endif
5389 } else {
5390 ShouldNotReachHere();
5391 }*/
5392 ShouldNotReachHere();
5393 }
5395 #undef __