Mon, 12 Mar 2012 15:28:07 -0700
7152957: VM crashes with assert(false) failed: bad AD file
Reviewed-by: kvn, never
Contributed-by: nils.eliasson@oracle.com
1 /*
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "c1/c1_ValueStack.hpp"
31 #include "ci/ciArrayKlass.hpp"
32 #include "ci/ciInstance.hpp"
33 #include "gc_interface/collectedHeap.hpp"
34 #include "memory/barrierSet.hpp"
35 #include "memory/cardTableModRefBS.hpp"
36 #include "nativeInst_sparc.hpp"
37 #include "oops/objArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
40 #define __ _masm->
43 //------------------------------------------------------------
46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
47 if (opr->is_constant()) {
48 LIR_Const* constant = opr->as_constant_ptr();
49 switch (constant->type()) {
50 case T_INT: {
51 jint value = constant->as_jint();
52 return Assembler::is_simm13(value);
53 }
55 default:
56 return false;
57 }
58 }
59 return false;
60 }
63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
64 switch (op->code()) {
65 case lir_null_check:
66 return true;
69 case lir_add:
70 case lir_ushr:
71 case lir_shr:
72 case lir_shl:
73 // integer shifts and adds are always one instruction
74 return op->result_opr()->is_single_cpu();
77 case lir_move: {
78 LIR_Op1* op1 = op->as_Op1();
79 LIR_Opr src = op1->in_opr();
80 LIR_Opr dst = op1->result_opr();
82 if (src == dst) {
83 NEEDS_CLEANUP;
84 // this works around a problem where moves with the same src and dst
85 // end up in the delay slot and then the assembler swallows the mov
86 // since it has no effect and then it complains because the delay slot
87 // is empty. returning false stops the optimizer from putting this in
88 // the delay slot
89 return false;
90 }
92 // don't put moves involving oops into the delay slot since the VerifyOops code
93 // will make it much larger than a single instruction.
94 if (VerifyOops) {
95 return false;
96 }
98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
100 return false;
101 }
103 if (UseCompressedOops) {
104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
106 }
108 if (dst->is_register()) {
109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
110 return !PatchALot;
111 } else if (src->is_single_stack()) {
112 return true;
113 }
114 }
116 if (src->is_register()) {
117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
118 return !PatchALot;
119 } else if (dst->is_single_stack()) {
120 return true;
121 }
122 }
124 if (dst->is_register() &&
125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
127 return true;
128 }
130 return false;
131 }
133 default:
134 return false;
135 }
136 ShouldNotReachHere();
137 }
140 LIR_Opr LIR_Assembler::receiverOpr() {
141 return FrameMap::O0_oop_opr;
142 }
145 LIR_Opr LIR_Assembler::osrBufferPointer() {
146 return FrameMap::I0_opr;
147 }
150 int LIR_Assembler::initial_frame_size_in_bytes() {
151 return in_bytes(frame_map()->framesize_in_bytes());
152 }
155 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
156 // we fetch the class of the receiver (O0) and compare it with the cached class.
157 // If they do not match we jump to slow case.
158 int LIR_Assembler::check_icache() {
159 int offset = __ offset();
160 __ inline_cache_check(O0, G5_inline_cache_reg);
161 return offset;
162 }
165 void LIR_Assembler::osr_entry() {
166 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
167 //
168 // 1. Create a new compiled activation.
169 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
170 // at the osr_bci; it is not initialized.
171 // 3. Jump to the continuation address in compiled code to resume execution.
173 // OSR entry point
174 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
175 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
176 ValueStack* entry_state = osr_entry->end()->state();
177 int number_of_locks = entry_state->locks_size();
179 // Create a frame for the compiled activation.
180 __ build_frame(initial_frame_size_in_bytes());
182 // OSR buffer is
183 //
184 // locals[nlocals-1..0]
185 // monitors[number_of_locks-1..0]
186 //
187 // locals is a direct copy of the interpreter frame so in the osr buffer
188 // so first slot in the local array is the last local from the interpreter
189 // and last slot is local[0] (receiver) from the interpreter
190 //
191 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
192 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
193 // in the interpreter frame (the method lock if a sync method)
195 // Initialize monitors in the compiled activation.
196 // I0: pointer to osr buffer
197 //
198 // All other registers are dead at this point and the locals will be
199 // copied into place by code emitted in the IR.
201 Register OSR_buf = osrBufferPointer()->as_register();
202 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
203 int monitor_offset = BytesPerWord * method()->max_locals() +
204 (2 * BytesPerWord) * (number_of_locks - 1);
205 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
206 // the OSR buffer using 2 word entries: first the lock and then
207 // the oop.
208 for (int i = 0; i < number_of_locks; i++) {
209 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
210 #ifdef ASSERT
211 // verify the interpreter's monitor has a non-null object
212 {
213 Label L;
214 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
215 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L);
216 __ stop("locked object is NULL");
217 __ bind(L);
218 }
219 #endif // ASSERT
220 // Copy the lock field into the compiled activation.
221 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
222 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
223 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
224 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
225 }
226 }
227 }
230 // Optimized Library calls
231 // This is the fast version of java.lang.String.compare; it has not
232 // OSR-entry and therefore, we generate a slow version for OSR's
233 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
234 Register str0 = left->as_register();
235 Register str1 = right->as_register();
237 Label Ldone;
239 Register result = dst->as_register();
240 {
241 // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0
242 // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1
243 // Also, get string0.count-string1.count in o7 and get the condition code set
244 // Note: some instructions have been hoisted for better instruction scheduling
246 Register tmp0 = L0;
247 Register tmp1 = L1;
248 Register tmp2 = L2;
250 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
251 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
252 int count_offset = java_lang_String:: count_offset_in_bytes();
254 __ load_heap_oop(str0, value_offset, tmp0);
255 __ ld(str0, offset_offset, tmp2);
256 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
257 __ ld(str0, count_offset, str0);
258 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
260 // str1 may be null
261 add_debug_info_for_null_check_here(info);
263 __ load_heap_oop(str1, value_offset, tmp1);
264 __ add(tmp0, tmp2, tmp0);
266 __ ld(str1, offset_offset, tmp2);
267 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
268 __ ld(str1, count_offset, str1);
269 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
270 __ subcc(str0, str1, O7);
271 __ add(tmp1, tmp2, tmp1);
272 }
274 {
275 // Compute the minimum of the string lengths, scale it and store it in limit
276 Register count0 = I0;
277 Register count1 = I1;
278 Register limit = L3;
280 Label Lskip;
281 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
282 __ br(Assembler::greater, true, Assembler::pt, Lskip);
283 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
284 __ bind(Lskip);
286 // If either string is empty (or both of them) the result is the difference in lengths
287 __ cmp(limit, 0);
288 __ br(Assembler::equal, true, Assembler::pn, Ldone);
289 __ delayed()->mov(O7, result); // result is difference in lengths
290 }
292 {
293 // Neither string is empty
294 Label Lloop;
296 Register base0 = L0;
297 Register base1 = L1;
298 Register chr0 = I0;
299 Register chr1 = I1;
300 Register limit = L3;
302 // Shift base0 and base1 to the end of the arrays, negate limit
303 __ add(base0, limit, base0);
304 __ add(base1, limit, base1);
305 __ neg(limit); // limit = -min{string0.count, strin1.count}
307 __ lduh(base0, limit, chr0);
308 __ bind(Lloop);
309 __ lduh(base1, limit, chr1);
310 __ subcc(chr0, chr1, chr0);
311 __ br(Assembler::notZero, false, Assembler::pn, Ldone);
312 assert(chr0 == result, "result must be pre-placed");
313 __ delayed()->inccc(limit, sizeof(jchar));
314 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
315 __ delayed()->lduh(base0, limit, chr0);
316 }
318 // If strings are equal up to min length, return the length difference.
319 __ mov(O7, result);
321 // Otherwise, return the difference between the first mismatched chars.
322 __ bind(Ldone);
323 }
326 // --------------------------------------------------------------------------------------------
328 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
329 if (!GenerateSynchronizationCode) return;
331 Register obj_reg = obj_opr->as_register();
332 Register lock_reg = lock_opr->as_register();
334 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
335 Register reg = mon_addr.base();
336 int offset = mon_addr.disp();
337 // compute pointer to BasicLock
338 if (mon_addr.is_simm13()) {
339 __ add(reg, offset, lock_reg);
340 }
341 else {
342 __ set(offset, lock_reg);
343 __ add(reg, lock_reg, lock_reg);
344 }
345 // unlock object
346 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
347 // _slow_case_stubs->append(slow_case);
348 // temporary fix: must be created after exceptionhandler, therefore as call stub
349 _slow_case_stubs->append(slow_case);
350 if (UseFastLocking) {
351 // try inlined fast unlocking first, revert to slow locking if it fails
352 // note: lock_reg points to the displaced header since the displaced header offset is 0!
353 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
354 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
355 } else {
356 // always do slow unlocking
357 // note: the slow unlocking code could be inlined here, however if we use
358 // slow unlocking, speed doesn't matter anyway and this solution is
359 // simpler and requires less duplicated code - additionally, the
360 // slow unlocking code is the same in either case which simplifies
361 // debugging
362 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
363 __ delayed()->nop();
364 }
365 // done
366 __ bind(*slow_case->continuation());
367 }
370 int LIR_Assembler::emit_exception_handler() {
371 // if the last instruction is a call (typically to do a throw which
372 // is coming at the end after block reordering) the return address
373 // must still point into the code area in order to avoid assertion
374 // failures when searching for the corresponding bci => add a nop
375 // (was bug 5/14/1999 - gri)
376 __ nop();
378 // generate code for exception handler
379 ciMethod* method = compilation()->method();
381 address handler_base = __ start_a_stub(exception_handler_size);
383 if (handler_base == NULL) {
384 // not enough space left for the handler
385 bailout("exception handler overflow");
386 return -1;
387 }
389 int offset = code_offset();
391 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
392 __ delayed()->nop();
393 __ should_not_reach_here();
394 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
395 __ end_a_stub();
397 return offset;
398 }
401 // Emit the code to remove the frame from the stack in the exception
402 // unwind path.
403 int LIR_Assembler::emit_unwind_handler() {
404 #ifndef PRODUCT
405 if (CommentedAssembly) {
406 _masm->block_comment("Unwind handler");
407 }
408 #endif
410 int offset = code_offset();
412 // Fetch the exception from TLS and clear out exception related thread state
413 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
414 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
415 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
417 __ bind(_unwind_handler_entry);
418 __ verify_not_null_oop(O0);
419 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
420 __ mov(O0, I0); // Preserve the exception
421 }
423 // Preform needed unlocking
424 MonitorExitStub* stub = NULL;
425 if (method()->is_synchronized()) {
426 monitor_address(0, FrameMap::I1_opr);
427 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
428 __ unlock_object(I3, I2, I1, *stub->entry());
429 __ bind(*stub->continuation());
430 }
432 if (compilation()->env()->dtrace_method_probes()) {
433 __ mov(G2_thread, O0);
434 jobject2reg(method()->constant_encoding(), O1);
435 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
436 __ delayed()->nop();
437 }
439 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
440 __ mov(I0, O0); // Restore the exception
441 }
443 // dispatch to the unwind logic
444 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
445 __ delayed()->nop();
447 // Emit the slow path assembly
448 if (stub != NULL) {
449 stub->emit_code(this);
450 }
452 return offset;
453 }
456 int LIR_Assembler::emit_deopt_handler() {
457 // if the last instruction is a call (typically to do a throw which
458 // is coming at the end after block reordering) the return address
459 // must still point into the code area in order to avoid assertion
460 // failures when searching for the corresponding bci => add a nop
461 // (was bug 5/14/1999 - gri)
462 __ nop();
464 // generate code for deopt handler
465 ciMethod* method = compilation()->method();
466 address handler_base = __ start_a_stub(deopt_handler_size);
467 if (handler_base == NULL) {
468 // not enough space left for the handler
469 bailout("deopt handler overflow");
470 return -1;
471 }
473 int offset = code_offset();
474 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
475 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
476 __ delayed()->nop();
477 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
478 __ end_a_stub();
480 return offset;
481 }
484 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
485 if (o == NULL) {
486 __ set(NULL_WORD, reg);
487 } else {
488 int oop_index = __ oop_recorder()->find_index(o);
489 RelocationHolder rspec = oop_Relocation::spec(oop_index);
490 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
491 }
492 }
495 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
496 // Allocate a new index in oop table to hold the oop once it's been patched
497 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
498 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
500 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
501 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
502 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
503 // NULL will be dynamically patched later and the patched value may be large. We must
504 // therefore generate the sethi/add as a placeholders
505 __ patchable_set(addrlit, reg);
507 patching_epilog(patch, lir_patch_normal, reg, info);
508 }
511 void LIR_Assembler::emit_op3(LIR_Op3* op) {
512 Register Rdividend = op->in_opr1()->as_register();
513 Register Rdivisor = noreg;
514 Register Rscratch = op->in_opr3()->as_register();
515 Register Rresult = op->result_opr()->as_register();
516 int divisor = -1;
518 if (op->in_opr2()->is_register()) {
519 Rdivisor = op->in_opr2()->as_register();
520 } else {
521 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
522 assert(Assembler::is_simm13(divisor), "can only handle simm13");
523 }
525 assert(Rdividend != Rscratch, "");
526 assert(Rdivisor != Rscratch, "");
527 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
529 if (Rdivisor == noreg && is_power_of_2(divisor)) {
530 // convert division by a power of two into some shifts and logical operations
531 if (op->code() == lir_idiv) {
532 if (divisor == 2) {
533 __ srl(Rdividend, 31, Rscratch);
534 } else {
535 __ sra(Rdividend, 31, Rscratch);
536 __ and3(Rscratch, divisor - 1, Rscratch);
537 }
538 __ add(Rdividend, Rscratch, Rscratch);
539 __ sra(Rscratch, log2_intptr(divisor), Rresult);
540 return;
541 } else {
542 if (divisor == 2) {
543 __ srl(Rdividend, 31, Rscratch);
544 } else {
545 __ sra(Rdividend, 31, Rscratch);
546 __ and3(Rscratch, divisor - 1,Rscratch);
547 }
548 __ add(Rdividend, Rscratch, Rscratch);
549 __ andn(Rscratch, divisor - 1,Rscratch);
550 __ sub(Rdividend, Rscratch, Rresult);
551 return;
552 }
553 }
555 __ sra(Rdividend, 31, Rscratch);
556 __ wry(Rscratch);
557 if (!VM_Version::v9_instructions_work()) {
558 // v9 doesn't require these nops
559 __ nop();
560 __ nop();
561 __ nop();
562 __ nop();
563 }
565 add_debug_info_for_div0_here(op->info());
567 if (Rdivisor != noreg) {
568 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
569 } else {
570 assert(Assembler::is_simm13(divisor), "can only handle simm13");
571 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
572 }
574 Label skip;
575 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
576 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
577 __ bind(skip);
579 if (op->code() == lir_irem) {
580 if (Rdivisor != noreg) {
581 __ smul(Rscratch, Rdivisor, Rscratch);
582 } else {
583 __ smul(Rscratch, divisor, Rscratch);
584 }
585 __ sub(Rdividend, Rscratch, Rresult);
586 }
587 }
590 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
591 #ifdef ASSERT
592 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
593 if (op->block() != NULL) _branch_target_blocks.append(op->block());
594 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
595 #endif
596 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
598 if (op->cond() == lir_cond_always) {
599 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
600 } else if (op->code() == lir_cond_float_branch) {
601 assert(op->ublock() != NULL, "must have unordered successor");
602 bool is_unordered = (op->ublock() == op->block());
603 Assembler::Condition acond;
604 switch (op->cond()) {
605 case lir_cond_equal: acond = Assembler::f_equal; break;
606 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
607 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
608 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
609 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
610 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
611 default : ShouldNotReachHere();
612 };
614 if (!VM_Version::v9_instructions_work()) {
615 __ nop();
616 }
617 __ fb( acond, false, Assembler::pn, *(op->label()));
618 } else {
619 assert (op->code() == lir_branch, "just checking");
621 Assembler::Condition acond;
622 switch (op->cond()) {
623 case lir_cond_equal: acond = Assembler::equal; break;
624 case lir_cond_notEqual: acond = Assembler::notEqual; break;
625 case lir_cond_less: acond = Assembler::less; break;
626 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
627 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
628 case lir_cond_greater: acond = Assembler::greater; break;
629 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
630 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
631 default: ShouldNotReachHere();
632 };
634 // sparc has different condition codes for testing 32-bit
635 // vs. 64-bit values. We could always test xcc is we could
636 // guarantee that 32-bit loads always sign extended but that isn't
637 // true and since sign extension isn't free, it would impose a
638 // slight cost.
639 #ifdef _LP64
640 if (op->type() == T_INT) {
641 __ br(acond, false, Assembler::pn, *(op->label()));
642 } else
643 #endif
644 __ brx(acond, false, Assembler::pn, *(op->label()));
645 }
646 // The peephole pass fills the delay slot
647 }
650 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
651 Bytecodes::Code code = op->bytecode();
652 LIR_Opr dst = op->result_opr();
654 switch(code) {
655 case Bytecodes::_i2l: {
656 Register rlo = dst->as_register_lo();
657 Register rhi = dst->as_register_hi();
658 Register rval = op->in_opr()->as_register();
659 #ifdef _LP64
660 __ sra(rval, 0, rlo);
661 #else
662 __ mov(rval, rlo);
663 __ sra(rval, BitsPerInt-1, rhi);
664 #endif
665 break;
666 }
667 case Bytecodes::_i2d:
668 case Bytecodes::_i2f: {
669 bool is_double = (code == Bytecodes::_i2d);
670 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
671 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
672 FloatRegister rsrc = op->in_opr()->as_float_reg();
673 if (rsrc != rdst) {
674 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
675 }
676 __ fitof(w, rdst, rdst);
677 break;
678 }
679 case Bytecodes::_f2i:{
680 FloatRegister rsrc = op->in_opr()->as_float_reg();
681 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
682 Label L;
683 // result must be 0 if value is NaN; test by comparing value to itself
684 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
685 if (!VM_Version::v9_instructions_work()) {
686 __ nop();
687 }
688 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
689 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
690 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
691 // move integer result from float register to int register
692 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
693 __ bind (L);
694 break;
695 }
696 case Bytecodes::_l2i: {
697 Register rlo = op->in_opr()->as_register_lo();
698 Register rhi = op->in_opr()->as_register_hi();
699 Register rdst = dst->as_register();
700 #ifdef _LP64
701 __ sra(rlo, 0, rdst);
702 #else
703 __ mov(rlo, rdst);
704 #endif
705 break;
706 }
707 case Bytecodes::_d2f:
708 case Bytecodes::_f2d: {
709 bool is_double = (code == Bytecodes::_f2d);
710 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
711 LIR_Opr val = op->in_opr();
712 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
713 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
714 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
715 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
716 __ ftof(vw, dw, rval, rdst);
717 break;
718 }
719 case Bytecodes::_i2s:
720 case Bytecodes::_i2b: {
721 Register rval = op->in_opr()->as_register();
722 Register rdst = dst->as_register();
723 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
724 __ sll (rval, shift, rdst);
725 __ sra (rdst, shift, rdst);
726 break;
727 }
728 case Bytecodes::_i2c: {
729 Register rval = op->in_opr()->as_register();
730 Register rdst = dst->as_register();
731 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
732 __ sll (rval, shift, rdst);
733 __ srl (rdst, shift, rdst);
734 break;
735 }
737 default: ShouldNotReachHere();
738 }
739 }
742 void LIR_Assembler::align_call(LIR_Code) {
743 // do nothing since all instructions are word aligned on sparc
744 }
747 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
748 __ call(op->addr(), rtype);
749 // The peephole pass fills the delay slot, add_call_info is done in
750 // LIR_Assembler::emit_delay.
751 }
754 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
755 RelocationHolder rspec = virtual_call_Relocation::spec(pc());
756 __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
757 __ relocate(rspec);
758 __ call(op->addr(), relocInfo::none);
759 // The peephole pass fills the delay slot, add_call_info is done in
760 // LIR_Assembler::emit_delay.
761 }
764 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
765 add_debug_info_for_null_check_here(op->info());
766 __ load_klass(O0, G3_scratch);
767 if (Assembler::is_simm13(op->vtable_offset())) {
768 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
769 } else {
770 // This will generate 2 instructions
771 __ set(op->vtable_offset(), G5_method);
772 // ld_ptr, set_hi, set
773 __ ld_ptr(G3_scratch, G5_method, G5_method);
774 }
775 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch);
776 __ callr(G3_scratch, G0);
777 // the peephole pass fills the delay slot
778 }
780 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
781 int store_offset;
782 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
783 assert(!unaligned, "can't handle this");
784 // for offsets larger than a simm13 we setup the offset in O7
785 __ set(offset, O7);
786 store_offset = store(from_reg, base, O7, type, wide);
787 } else {
788 if (type == T_ARRAY || type == T_OBJECT) {
789 __ verify_oop(from_reg->as_register());
790 }
791 store_offset = code_offset();
792 switch (type) {
793 case T_BOOLEAN: // fall through
794 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
795 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
796 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
797 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
798 case T_LONG :
799 #ifdef _LP64
800 if (unaligned || PatchALot) {
801 __ srax(from_reg->as_register_lo(), 32, O7);
802 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
803 __ stw(O7, base, offset + hi_word_offset_in_bytes);
804 } else {
805 __ stx(from_reg->as_register_lo(), base, offset);
806 }
807 #else
808 assert(Assembler::is_simm13(offset + 4), "must be");
809 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
810 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
811 #endif
812 break;
813 case T_ADDRESS:
814 __ st_ptr(from_reg->as_register(), base, offset);
815 break;
816 case T_ARRAY : // fall through
817 case T_OBJECT:
818 {
819 if (UseCompressedOops && !wide) {
820 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
821 store_offset = code_offset();
822 __ stw(G3_scratch, base, offset);
823 } else {
824 __ st_ptr(from_reg->as_register(), base, offset);
825 }
826 break;
827 }
829 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
830 case T_DOUBLE:
831 {
832 FloatRegister reg = from_reg->as_double_reg();
833 // split unaligned stores
834 if (unaligned || PatchALot) {
835 assert(Assembler::is_simm13(offset + 4), "must be");
836 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
837 __ stf(FloatRegisterImpl::S, reg, base, offset);
838 } else {
839 __ stf(FloatRegisterImpl::D, reg, base, offset);
840 }
841 break;
842 }
843 default : ShouldNotReachHere();
844 }
845 }
846 return store_offset;
847 }
850 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
851 if (type == T_ARRAY || type == T_OBJECT) {
852 __ verify_oop(from_reg->as_register());
853 }
854 int store_offset = code_offset();
855 switch (type) {
856 case T_BOOLEAN: // fall through
857 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
858 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
859 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
860 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
861 case T_LONG :
862 #ifdef _LP64
863 __ stx(from_reg->as_register_lo(), base, disp);
864 #else
865 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
866 __ std(from_reg->as_register_hi(), base, disp);
867 #endif
868 break;
869 case T_ADDRESS:
870 __ st_ptr(from_reg->as_register(), base, disp);
871 break;
872 case T_ARRAY : // fall through
873 case T_OBJECT:
874 {
875 if (UseCompressedOops && !wide) {
876 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
877 store_offset = code_offset();
878 __ stw(G3_scratch, base, disp);
879 } else {
880 __ st_ptr(from_reg->as_register(), base, disp);
881 }
882 break;
883 }
884 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
885 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
886 default : ShouldNotReachHere();
887 }
888 return store_offset;
889 }
892 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
893 int load_offset;
894 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
895 assert(base != O7, "destroying register");
896 assert(!unaligned, "can't handle this");
897 // for offsets larger than a simm13 we setup the offset in O7
898 __ set(offset, O7);
899 load_offset = load(base, O7, to_reg, type, wide);
900 } else {
901 load_offset = code_offset();
902 switch(type) {
903 case T_BOOLEAN: // fall through
904 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
905 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
906 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
907 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
908 case T_LONG :
909 if (!unaligned) {
910 #ifdef _LP64
911 __ ldx(base, offset, to_reg->as_register_lo());
912 #else
913 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
914 "must be sequential");
915 __ ldd(base, offset, to_reg->as_register_hi());
916 #endif
917 } else {
918 #ifdef _LP64
919 assert(base != to_reg->as_register_lo(), "can't handle this");
920 assert(O7 != to_reg->as_register_lo(), "can't handle this");
921 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
922 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
923 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
924 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
925 #else
926 if (base == to_reg->as_register_lo()) {
927 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
928 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
929 } else {
930 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
931 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
932 }
933 #endif
934 }
935 break;
936 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
937 case T_ARRAY : // fall through
938 case T_OBJECT:
939 {
940 if (UseCompressedOops && !wide) {
941 __ lduw(base, offset, to_reg->as_register());
942 __ decode_heap_oop(to_reg->as_register());
943 } else {
944 __ ld_ptr(base, offset, to_reg->as_register());
945 }
946 break;
947 }
948 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
949 case T_DOUBLE:
950 {
951 FloatRegister reg = to_reg->as_double_reg();
952 // split unaligned loads
953 if (unaligned || PatchALot) {
954 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
955 __ ldf(FloatRegisterImpl::S, base, offset, reg);
956 } else {
957 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
958 }
959 break;
960 }
961 default : ShouldNotReachHere();
962 }
963 if (type == T_ARRAY || type == T_OBJECT) {
964 __ verify_oop(to_reg->as_register());
965 }
966 }
967 return load_offset;
968 }
971 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
972 int load_offset = code_offset();
973 switch(type) {
974 case T_BOOLEAN: // fall through
975 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
976 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
977 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
978 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
979 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
980 case T_ARRAY : // fall through
981 case T_OBJECT:
982 {
983 if (UseCompressedOops && !wide) {
984 __ lduw(base, disp, to_reg->as_register());
985 __ decode_heap_oop(to_reg->as_register());
986 } else {
987 __ ld_ptr(base, disp, to_reg->as_register());
988 }
989 break;
990 }
991 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
992 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
993 case T_LONG :
994 #ifdef _LP64
995 __ ldx(base, disp, to_reg->as_register_lo());
996 #else
997 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
998 "must be sequential");
999 __ ldd(base, disp, to_reg->as_register_hi());
1000 #endif
1001 break;
1002 default : ShouldNotReachHere();
1003 }
1004 if (type == T_ARRAY || type == T_OBJECT) {
1005 __ verify_oop(to_reg->as_register());
1006 }
1007 return load_offset;
1008 }
1010 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
1011 LIR_Const* c = src->as_constant_ptr();
1012 switch (c->type()) {
1013 case T_INT:
1014 case T_FLOAT: {
1015 Register src_reg = O7;
1016 int value = c->as_jint_bits();
1017 if (value == 0) {
1018 src_reg = G0;
1019 } else {
1020 __ set(value, O7);
1021 }
1022 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1023 __ stw(src_reg, addr.base(), addr.disp());
1024 break;
1025 }
1026 case T_ADDRESS: {
1027 Register src_reg = O7;
1028 int value = c->as_jint_bits();
1029 if (value == 0) {
1030 src_reg = G0;
1031 } else {
1032 __ set(value, O7);
1033 }
1034 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1035 __ st_ptr(src_reg, addr.base(), addr.disp());
1036 break;
1037 }
1038 case T_OBJECT: {
1039 Register src_reg = O7;
1040 jobject2reg(c->as_jobject(), src_reg);
1041 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1042 __ st_ptr(src_reg, addr.base(), addr.disp());
1043 break;
1044 }
1045 case T_LONG:
1046 case T_DOUBLE: {
1047 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
1049 Register tmp = O7;
1050 int value_lo = c->as_jint_lo_bits();
1051 if (value_lo == 0) {
1052 tmp = G0;
1053 } else {
1054 __ set(value_lo, O7);
1055 }
1056 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
1057 int value_hi = c->as_jint_hi_bits();
1058 if (value_hi == 0) {
1059 tmp = G0;
1060 } else {
1061 __ set(value_hi, O7);
1062 }
1063 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
1064 break;
1065 }
1066 default:
1067 Unimplemented();
1068 }
1069 }
1072 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
1073 LIR_Const* c = src->as_constant_ptr();
1074 LIR_Address* addr = dest->as_address_ptr();
1075 Register base = addr->base()->as_pointer_register();
1076 int offset = -1;
1078 switch (c->type()) {
1079 case T_INT:
1080 case T_FLOAT:
1081 case T_ADDRESS: {
1082 LIR_Opr tmp = FrameMap::O7_opr;
1083 int value = c->as_jint_bits();
1084 if (value == 0) {
1085 tmp = FrameMap::G0_opr;
1086 } else if (Assembler::is_simm13(value)) {
1087 __ set(value, O7);
1088 }
1089 if (addr->index()->is_valid()) {
1090 assert(addr->disp() == 0, "must be zero");
1091 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1092 } else {
1093 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1094 offset = store(tmp, base, addr->disp(), type, wide, false);
1095 }
1096 break;
1097 }
1098 case T_LONG:
1099 case T_DOUBLE: {
1100 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
1101 assert(Assembler::is_simm13(addr->disp()) &&
1102 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
1104 LIR_Opr tmp = FrameMap::O7_opr;
1105 int value_lo = c->as_jint_lo_bits();
1106 if (value_lo == 0) {
1107 tmp = FrameMap::G0_opr;
1108 } else {
1109 __ set(value_lo, O7);
1110 }
1111 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
1112 int value_hi = c->as_jint_hi_bits();
1113 if (value_hi == 0) {
1114 tmp = FrameMap::G0_opr;
1115 } else {
1116 __ set(value_hi, O7);
1117 }
1118 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
1119 break;
1120 }
1121 case T_OBJECT: {
1122 jobject obj = c->as_jobject();
1123 LIR_Opr tmp;
1124 if (obj == NULL) {
1125 tmp = FrameMap::G0_opr;
1126 } else {
1127 tmp = FrameMap::O7_opr;
1128 jobject2reg(c->as_jobject(), O7);
1129 }
1130 // handle either reg+reg or reg+disp address
1131 if (addr->index()->is_valid()) {
1132 assert(addr->disp() == 0, "must be zero");
1133 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1134 } else {
1135 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1136 offset = store(tmp, base, addr->disp(), type, wide, false);
1137 }
1139 break;
1140 }
1141 default:
1142 Unimplemented();
1143 }
1144 if (info != NULL) {
1145 assert(offset != -1, "offset should've been set");
1146 add_debug_info_for_null_check(offset, info);
1147 }
1148 }
1151 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1152 LIR_Const* c = src->as_constant_ptr();
1153 LIR_Opr to_reg = dest;
1155 switch (c->type()) {
1156 case T_INT:
1157 case T_ADDRESS:
1158 {
1159 jint con = c->as_jint();
1160 if (to_reg->is_single_cpu()) {
1161 assert(patch_code == lir_patch_none, "no patching handled here");
1162 __ set(con, to_reg->as_register());
1163 } else {
1164 ShouldNotReachHere();
1165 assert(to_reg->is_single_fpu(), "wrong register kind");
1167 __ set(con, O7);
1168 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
1169 __ st(O7, temp_slot);
1170 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
1171 }
1172 }
1173 break;
1175 case T_LONG:
1176 {
1177 jlong con = c->as_jlong();
1179 if (to_reg->is_double_cpu()) {
1180 #ifdef _LP64
1181 __ set(con, to_reg->as_register_lo());
1182 #else
1183 __ set(low(con), to_reg->as_register_lo());
1184 __ set(high(con), to_reg->as_register_hi());
1185 #endif
1186 #ifdef _LP64
1187 } else if (to_reg->is_single_cpu()) {
1188 __ set(con, to_reg->as_register());
1189 #endif
1190 } else {
1191 ShouldNotReachHere();
1192 assert(to_reg->is_double_fpu(), "wrong register kind");
1193 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
1194 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
1195 __ set(low(con), O7);
1196 __ st(O7, temp_slot_lo);
1197 __ set(high(con), O7);
1198 __ st(O7, temp_slot_hi);
1199 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
1200 }
1201 }
1202 break;
1204 case T_OBJECT:
1205 {
1206 if (patch_code == lir_patch_none) {
1207 jobject2reg(c->as_jobject(), to_reg->as_register());
1208 } else {
1209 jobject2reg_with_patching(to_reg->as_register(), info);
1210 }
1211 }
1212 break;
1214 case T_FLOAT:
1215 {
1216 address const_addr = __ float_constant(c->as_jfloat());
1217 if (const_addr == NULL) {
1218 bailout("const section overflow");
1219 break;
1220 }
1221 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1222 AddressLiteral const_addrlit(const_addr, rspec);
1223 if (to_reg->is_single_fpu()) {
1224 __ patchable_sethi(const_addrlit, O7);
1225 __ relocate(rspec);
1226 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
1228 } else {
1229 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1231 __ set(const_addrlit, O7);
1232 __ ld(O7, 0, to_reg->as_register());
1233 }
1234 }
1235 break;
1237 case T_DOUBLE:
1238 {
1239 address const_addr = __ double_constant(c->as_jdouble());
1240 if (const_addr == NULL) {
1241 bailout("const section overflow");
1242 break;
1243 }
1244 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1246 if (to_reg->is_double_fpu()) {
1247 AddressLiteral const_addrlit(const_addr, rspec);
1248 __ patchable_sethi(const_addrlit, O7);
1249 __ relocate(rspec);
1250 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
1251 } else {
1252 assert(to_reg->is_double_cpu(), "Must be a long register.");
1253 #ifdef _LP64
1254 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
1255 #else
1256 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
1257 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
1258 #endif
1259 }
1261 }
1262 break;
1264 default:
1265 ShouldNotReachHere();
1266 }
1267 }
1269 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1270 Register reg = addr->base()->as_register();
1271 return Address(reg, addr->disp());
1272 }
1275 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1276 switch (type) {
1277 case T_INT:
1278 case T_FLOAT: {
1279 Register tmp = O7;
1280 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1281 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1282 __ lduw(from.base(), from.disp(), tmp);
1283 __ stw(tmp, to.base(), to.disp());
1284 break;
1285 }
1286 case T_OBJECT: {
1287 Register tmp = O7;
1288 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1289 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1290 __ ld_ptr(from.base(), from.disp(), tmp);
1291 __ st_ptr(tmp, to.base(), to.disp());
1292 break;
1293 }
1294 case T_LONG:
1295 case T_DOUBLE: {
1296 Register tmp = O7;
1297 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1298 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
1299 __ lduw(from.base(), from.disp(), tmp);
1300 __ stw(tmp, to.base(), to.disp());
1301 __ lduw(from.base(), from.disp() + 4, tmp);
1302 __ stw(tmp, to.base(), to.disp() + 4);
1303 break;
1304 }
1306 default:
1307 ShouldNotReachHere();
1308 }
1309 }
1312 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1313 Address base = as_Address(addr);
1314 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
1315 }
1318 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1319 Address base = as_Address(addr);
1320 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
1321 }
1324 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1325 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1327 LIR_Address* addr = src_opr->as_address_ptr();
1328 LIR_Opr to_reg = dest;
1330 Register src = addr->base()->as_pointer_register();
1331 Register disp_reg = noreg;
1332 int disp_value = addr->disp();
1333 bool needs_patching = (patch_code != lir_patch_none);
1335 if (addr->base()->type() == T_OBJECT) {
1336 __ verify_oop(src);
1337 }
1339 PatchingStub* patch = NULL;
1340 if (needs_patching) {
1341 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1342 assert(!to_reg->is_double_cpu() ||
1343 patch_code == lir_patch_none ||
1344 patch_code == lir_patch_normal, "patching doesn't match register");
1345 }
1347 if (addr->index()->is_illegal()) {
1348 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1349 if (needs_patching) {
1350 __ patchable_set(0, O7);
1351 } else {
1352 __ set(disp_value, O7);
1353 }
1354 disp_reg = O7;
1355 }
1356 } else if (unaligned || PatchALot) {
1357 __ add(src, addr->index()->as_register(), O7);
1358 src = O7;
1359 } else {
1360 disp_reg = addr->index()->as_pointer_register();
1361 assert(disp_value == 0, "can't handle 3 operand addresses");
1362 }
1364 // remember the offset of the load. The patching_epilog must be done
1365 // before the call to add_debug_info, otherwise the PcDescs don't get
1366 // entered in increasing order.
1367 int offset = code_offset();
1369 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1370 if (disp_reg == noreg) {
1371 offset = load(src, disp_value, to_reg, type, wide, unaligned);
1372 } else {
1373 assert(!unaligned, "can't handle this");
1374 offset = load(src, disp_reg, to_reg, type, wide);
1375 }
1377 if (patch != NULL) {
1378 patching_epilog(patch, patch_code, src, info);
1379 }
1380 if (info != NULL) add_debug_info_for_null_check(offset, info);
1381 }
1384 void LIR_Assembler::prefetchr(LIR_Opr src) {
1385 LIR_Address* addr = src->as_address_ptr();
1386 Address from_addr = as_Address(addr);
1388 if (VM_Version::has_v9()) {
1389 __ prefetch(from_addr, Assembler::severalReads);
1390 }
1391 }
1394 void LIR_Assembler::prefetchw(LIR_Opr src) {
1395 LIR_Address* addr = src->as_address_ptr();
1396 Address from_addr = as_Address(addr);
1398 if (VM_Version::has_v9()) {
1399 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
1400 }
1401 }
1404 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1405 Address addr;
1406 if (src->is_single_word()) {
1407 addr = frame_map()->address_for_slot(src->single_stack_ix());
1408 } else if (src->is_double_word()) {
1409 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1410 }
1412 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1413 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
1414 }
1417 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1418 Address addr;
1419 if (dest->is_single_word()) {
1420 addr = frame_map()->address_for_slot(dest->single_stack_ix());
1421 } else if (dest->is_double_word()) {
1422 addr = frame_map()->address_for_slot(dest->double_stack_ix());
1423 }
1424 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1425 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
1426 }
1429 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1430 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1431 if (from_reg->is_double_fpu()) {
1432 // double to double moves
1433 assert(to_reg->is_double_fpu(), "should match");
1434 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1435 } else {
1436 // float to float moves
1437 assert(to_reg->is_single_fpu(), "should match");
1438 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1439 }
1440 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1441 if (from_reg->is_double_cpu()) {
1442 #ifdef _LP64
1443 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1444 #else
1445 assert(to_reg->is_double_cpu() &&
1446 from_reg->as_register_hi() != to_reg->as_register_lo() &&
1447 from_reg->as_register_lo() != to_reg->as_register_hi(),
1448 "should both be long and not overlap");
1449 // long to long moves
1450 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
1451 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
1452 #endif
1453 #ifdef _LP64
1454 } else if (to_reg->is_double_cpu()) {
1455 // int to int moves
1456 __ mov(from_reg->as_register(), to_reg->as_register_lo());
1457 #endif
1458 } else {
1459 // int to int moves
1460 __ mov(from_reg->as_register(), to_reg->as_register());
1461 }
1462 } else {
1463 ShouldNotReachHere();
1464 }
1465 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1466 __ verify_oop(to_reg->as_register());
1467 }
1468 }
1471 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1472 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1473 bool wide, bool unaligned) {
1474 LIR_Address* addr = dest->as_address_ptr();
1476 Register src = addr->base()->as_pointer_register();
1477 Register disp_reg = noreg;
1478 int disp_value = addr->disp();
1479 bool needs_patching = (patch_code != lir_patch_none);
1481 if (addr->base()->is_oop_register()) {
1482 __ verify_oop(src);
1483 }
1485 PatchingStub* patch = NULL;
1486 if (needs_patching) {
1487 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1488 assert(!from_reg->is_double_cpu() ||
1489 patch_code == lir_patch_none ||
1490 patch_code == lir_patch_normal, "patching doesn't match register");
1491 }
1493 if (addr->index()->is_illegal()) {
1494 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1495 if (needs_patching) {
1496 __ patchable_set(0, O7);
1497 } else {
1498 __ set(disp_value, O7);
1499 }
1500 disp_reg = O7;
1501 }
1502 } else if (unaligned || PatchALot) {
1503 __ add(src, addr->index()->as_register(), O7);
1504 src = O7;
1505 } else {
1506 disp_reg = addr->index()->as_pointer_register();
1507 assert(disp_value == 0, "can't handle 3 operand addresses");
1508 }
1510 // remember the offset of the store. The patching_epilog must be done
1511 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1512 // entered in increasing order.
1513 int offset;
1515 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1516 if (disp_reg == noreg) {
1517 offset = store(from_reg, src, disp_value, type, wide, unaligned);
1518 } else {
1519 assert(!unaligned, "can't handle this");
1520 offset = store(from_reg, src, disp_reg, type, wide);
1521 }
1523 if (patch != NULL) {
1524 patching_epilog(patch, patch_code, src, info);
1525 }
1527 if (info != NULL) add_debug_info_for_null_check(offset, info);
1528 }
1531 void LIR_Assembler::return_op(LIR_Opr result) {
1532 // the poll may need a register so just pick one that isn't the return register
1533 #if defined(TIERED) && !defined(_LP64)
1534 if (result->type_field() == LIR_OprDesc::long_type) {
1535 // Must move the result to G1
1536 // Must leave proper result in O0,O1 and G1 (TIERED only)
1537 __ sllx(I0, 32, G1); // Shift bits into high G1
1538 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
1539 __ or3 (I1, G1, G1); // OR 64 bits into G1
1540 #ifdef ASSERT
1541 // mangle it so any problems will show up
1542 __ set(0xdeadbeef, I0);
1543 __ set(0xdeadbeef, I1);
1544 #endif
1545 }
1546 #endif // TIERED
1547 __ set((intptr_t)os::get_polling_page(), L0);
1548 __ relocate(relocInfo::poll_return_type);
1549 __ ld_ptr(L0, 0, G0);
1550 __ ret();
1551 __ delayed()->restore();
1552 }
1555 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1556 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
1557 if (info != NULL) {
1558 add_debug_info_for_branch(info);
1559 } else {
1560 __ relocate(relocInfo::poll_type);
1561 }
1563 int offset = __ offset();
1564 __ ld_ptr(tmp->as_register(), 0, G0);
1566 return offset;
1567 }
1570 void LIR_Assembler::emit_static_call_stub() {
1571 address call_pc = __ pc();
1572 address stub = __ start_a_stub(call_stub_size);
1573 if (stub == NULL) {
1574 bailout("static call stub overflow");
1575 return;
1576 }
1578 int start = __ offset();
1579 __ relocate(static_stub_Relocation::spec(call_pc));
1581 __ set_oop(NULL, G5);
1582 // must be set to -1 at code generation time
1583 AddressLiteral addrlit(-1);
1584 __ jump_to(addrlit, G3);
1585 __ delayed()->nop();
1587 assert(__ offset() - start <= call_stub_size, "stub too big");
1588 __ end_a_stub();
1589 }
1592 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1593 if (opr1->is_single_fpu()) {
1594 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
1595 } else if (opr1->is_double_fpu()) {
1596 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
1597 } else if (opr1->is_single_cpu()) {
1598 if (opr2->is_constant()) {
1599 switch (opr2->as_constant_ptr()->type()) {
1600 case T_INT:
1601 { jint con = opr2->as_constant_ptr()->as_jint();
1602 if (Assembler::is_simm13(con)) {
1603 __ cmp(opr1->as_register(), con);
1604 } else {
1605 __ set(con, O7);
1606 __ cmp(opr1->as_register(), O7);
1607 }
1608 }
1609 break;
1611 case T_OBJECT:
1612 // there are only equal/notequal comparisions on objects
1613 { jobject con = opr2->as_constant_ptr()->as_jobject();
1614 if (con == NULL) {
1615 __ cmp(opr1->as_register(), 0);
1616 } else {
1617 jobject2reg(con, O7);
1618 __ cmp(opr1->as_register(), O7);
1619 }
1620 }
1621 break;
1623 default:
1624 ShouldNotReachHere();
1625 break;
1626 }
1627 } else {
1628 if (opr2->is_address()) {
1629 LIR_Address * addr = opr2->as_address_ptr();
1630 BasicType type = addr->type();
1631 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1632 else __ ld(as_Address(addr), O7);
1633 __ cmp(opr1->as_register(), O7);
1634 } else {
1635 __ cmp(opr1->as_register(), opr2->as_register());
1636 }
1637 }
1638 } else if (opr1->is_double_cpu()) {
1639 Register xlo = opr1->as_register_lo();
1640 Register xhi = opr1->as_register_hi();
1641 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1642 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
1643 #ifdef _LP64
1644 __ orcc(xhi, G0, G0);
1645 #else
1646 __ orcc(xhi, xlo, G0);
1647 #endif
1648 } else if (opr2->is_register()) {
1649 Register ylo = opr2->as_register_lo();
1650 Register yhi = opr2->as_register_hi();
1651 #ifdef _LP64
1652 __ cmp(xlo, ylo);
1653 #else
1654 __ subcc(xlo, ylo, xlo);
1655 __ subccc(xhi, yhi, xhi);
1656 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1657 __ orcc(xhi, xlo, G0);
1658 }
1659 #endif
1660 } else {
1661 ShouldNotReachHere();
1662 }
1663 } else if (opr1->is_address()) {
1664 LIR_Address * addr = opr1->as_address_ptr();
1665 BasicType type = addr->type();
1666 assert (opr2->is_constant(), "Checking");
1667 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1668 else __ ld(as_Address(addr), O7);
1669 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
1670 } else {
1671 ShouldNotReachHere();
1672 }
1673 }
1676 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1677 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1678 bool is_unordered_less = (code == lir_ucmp_fd2i);
1679 if (left->is_single_fpu()) {
1680 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1681 } else if (left->is_double_fpu()) {
1682 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1683 } else {
1684 ShouldNotReachHere();
1685 }
1686 } else if (code == lir_cmp_l2i) {
1687 #ifdef _LP64
1688 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
1689 #else
1690 __ lcmp(left->as_register_hi(), left->as_register_lo(),
1691 right->as_register_hi(), right->as_register_lo(),
1692 dst->as_register());
1693 #endif
1694 } else {
1695 ShouldNotReachHere();
1696 }
1697 }
1700 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1701 Assembler::Condition acond;
1702 switch (condition) {
1703 case lir_cond_equal: acond = Assembler::equal; break;
1704 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1705 case lir_cond_less: acond = Assembler::less; break;
1706 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1707 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
1708 case lir_cond_greater: acond = Assembler::greater; break;
1709 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
1710 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
1711 default: ShouldNotReachHere();
1712 };
1714 if (opr1->is_constant() && opr1->type() == T_INT) {
1715 Register dest = result->as_register();
1716 // load up first part of constant before branch
1717 // and do the rest in the delay slot.
1718 if (!Assembler::is_simm13(opr1->as_jint())) {
1719 __ sethi(opr1->as_jint(), dest);
1720 }
1721 } else if (opr1->is_constant()) {
1722 const2reg(opr1, result, lir_patch_none, NULL);
1723 } else if (opr1->is_register()) {
1724 reg2reg(opr1, result);
1725 } else if (opr1->is_stack()) {
1726 stack2reg(opr1, result, result->type());
1727 } else {
1728 ShouldNotReachHere();
1729 }
1730 Label skip;
1731 #ifdef _LP64
1732 if (type == T_INT) {
1733 __ br(acond, false, Assembler::pt, skip);
1734 } else
1735 #endif
1736 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
1737 if (opr1->is_constant() && opr1->type() == T_INT) {
1738 Register dest = result->as_register();
1739 if (Assembler::is_simm13(opr1->as_jint())) {
1740 __ delayed()->or3(G0, opr1->as_jint(), dest);
1741 } else {
1742 // the sethi has been done above, so just put in the low 10 bits
1743 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
1744 }
1745 } else {
1746 // can't do anything useful in the delay slot
1747 __ delayed()->nop();
1748 }
1749 if (opr2->is_constant()) {
1750 const2reg(opr2, result, lir_patch_none, NULL);
1751 } else if (opr2->is_register()) {
1752 reg2reg(opr2, result);
1753 } else if (opr2->is_stack()) {
1754 stack2reg(opr2, result, result->type());
1755 } else {
1756 ShouldNotReachHere();
1757 }
1758 __ bind(skip);
1759 }
1762 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1763 assert(info == NULL, "unused on this code path");
1764 assert(left->is_register(), "wrong items state");
1765 assert(dest->is_register(), "wrong items state");
1767 if (right->is_register()) {
1768 if (dest->is_float_kind()) {
1770 FloatRegister lreg, rreg, res;
1771 FloatRegisterImpl::Width w;
1772 if (right->is_single_fpu()) {
1773 w = FloatRegisterImpl::S;
1774 lreg = left->as_float_reg();
1775 rreg = right->as_float_reg();
1776 res = dest->as_float_reg();
1777 } else {
1778 w = FloatRegisterImpl::D;
1779 lreg = left->as_double_reg();
1780 rreg = right->as_double_reg();
1781 res = dest->as_double_reg();
1782 }
1784 switch (code) {
1785 case lir_add: __ fadd(w, lreg, rreg, res); break;
1786 case lir_sub: __ fsub(w, lreg, rreg, res); break;
1787 case lir_mul: // fall through
1788 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
1789 case lir_div: // fall through
1790 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
1791 default: ShouldNotReachHere();
1792 }
1794 } else if (dest->is_double_cpu()) {
1795 #ifdef _LP64
1796 Register dst_lo = dest->as_register_lo();
1797 Register op1_lo = left->as_pointer_register();
1798 Register op2_lo = right->as_pointer_register();
1800 switch (code) {
1801 case lir_add:
1802 __ add(op1_lo, op2_lo, dst_lo);
1803 break;
1805 case lir_sub:
1806 __ sub(op1_lo, op2_lo, dst_lo);
1807 break;
1809 default: ShouldNotReachHere();
1810 }
1811 #else
1812 Register op1_lo = left->as_register_lo();
1813 Register op1_hi = left->as_register_hi();
1814 Register op2_lo = right->as_register_lo();
1815 Register op2_hi = right->as_register_hi();
1816 Register dst_lo = dest->as_register_lo();
1817 Register dst_hi = dest->as_register_hi();
1819 switch (code) {
1820 case lir_add:
1821 __ addcc(op1_lo, op2_lo, dst_lo);
1822 __ addc (op1_hi, op2_hi, dst_hi);
1823 break;
1825 case lir_sub:
1826 __ subcc(op1_lo, op2_lo, dst_lo);
1827 __ subc (op1_hi, op2_hi, dst_hi);
1828 break;
1830 default: ShouldNotReachHere();
1831 }
1832 #endif
1833 } else {
1834 assert (right->is_single_cpu(), "Just Checking");
1836 Register lreg = left->as_register();
1837 Register res = dest->as_register();
1838 Register rreg = right->as_register();
1839 switch (code) {
1840 case lir_add: __ add (lreg, rreg, res); break;
1841 case lir_sub: __ sub (lreg, rreg, res); break;
1842 case lir_mul: __ mult (lreg, rreg, res); break;
1843 default: ShouldNotReachHere();
1844 }
1845 }
1846 } else {
1847 assert (right->is_constant(), "must be constant");
1849 if (dest->is_single_cpu()) {
1850 Register lreg = left->as_register();
1851 Register res = dest->as_register();
1852 int simm13 = right->as_constant_ptr()->as_jint();
1854 switch (code) {
1855 case lir_add: __ add (lreg, simm13, res); break;
1856 case lir_sub: __ sub (lreg, simm13, res); break;
1857 case lir_mul: __ mult (lreg, simm13, res); break;
1858 default: ShouldNotReachHere();
1859 }
1860 } else {
1861 Register lreg = left->as_pointer_register();
1862 Register res = dest->as_register_lo();
1863 long con = right->as_constant_ptr()->as_jlong();
1864 assert(Assembler::is_simm13(con), "must be simm13");
1866 switch (code) {
1867 case lir_add: __ add (lreg, (int)con, res); break;
1868 case lir_sub: __ sub (lreg, (int)con, res); break;
1869 case lir_mul: __ mult (lreg, (int)con, res); break;
1870 default: ShouldNotReachHere();
1871 }
1872 }
1873 }
1874 }
1877 void LIR_Assembler::fpop() {
1878 // do nothing
1879 }
1882 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1883 switch (code) {
1884 case lir_sin:
1885 case lir_tan:
1886 case lir_cos: {
1887 assert(thread->is_valid(), "preserve the thread object for performance reasons");
1888 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
1889 break;
1890 }
1891 case lir_sqrt: {
1892 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1893 FloatRegister src_reg = value->as_double_reg();
1894 FloatRegister dst_reg = dest->as_double_reg();
1895 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
1896 break;
1897 }
1898 case lir_abs: {
1899 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1900 FloatRegister src_reg = value->as_double_reg();
1901 FloatRegister dst_reg = dest->as_double_reg();
1902 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
1903 break;
1904 }
1905 default: {
1906 ShouldNotReachHere();
1907 break;
1908 }
1909 }
1910 }
1913 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1914 if (right->is_constant()) {
1915 if (dest->is_single_cpu()) {
1916 int simm13 = right->as_constant_ptr()->as_jint();
1917 switch (code) {
1918 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
1919 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
1920 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
1921 default: ShouldNotReachHere();
1922 }
1923 } else {
1924 long c = right->as_constant_ptr()->as_jlong();
1925 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
1926 int simm13 = (int)c;
1927 switch (code) {
1928 case lir_logic_and:
1929 #ifndef _LP64
1930 __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
1931 #endif
1932 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
1933 break;
1935 case lir_logic_or:
1936 #ifndef _LP64
1937 __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
1938 #endif
1939 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
1940 break;
1942 case lir_logic_xor:
1943 #ifndef _LP64
1944 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
1945 #endif
1946 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
1947 break;
1949 default: ShouldNotReachHere();
1950 }
1951 }
1952 } else {
1953 assert(right->is_register(), "right should be in register");
1955 if (dest->is_single_cpu()) {
1956 switch (code) {
1957 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
1958 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
1959 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
1960 default: ShouldNotReachHere();
1961 }
1962 } else {
1963 #ifdef _LP64
1964 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
1965 left->as_register_lo();
1966 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
1967 right->as_register_lo();
1969 switch (code) {
1970 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
1971 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
1972 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
1973 default: ShouldNotReachHere();
1974 }
1975 #else
1976 switch (code) {
1977 case lir_logic_and:
1978 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
1979 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
1980 break;
1982 case lir_logic_or:
1983 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
1984 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
1985 break;
1987 case lir_logic_xor:
1988 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
1989 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
1990 break;
1992 default: ShouldNotReachHere();
1993 }
1994 #endif
1995 }
1996 }
1997 }
2000 int LIR_Assembler::shift_amount(BasicType t) {
2001 int elem_size = type2aelembytes(t);
2002 switch (elem_size) {
2003 case 1 : return 0;
2004 case 2 : return 1;
2005 case 4 : return 2;
2006 case 8 : return 3;
2007 }
2008 ShouldNotReachHere();
2009 return -1;
2010 }
2013 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2014 assert(exceptionOop->as_register() == Oexception, "should match");
2015 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
2017 info->add_register_oop(exceptionOop);
2019 // reuse the debug info from the safepoint poll for the throw op itself
2020 address pc_for_athrow = __ pc();
2021 int pc_for_athrow_offset = __ offset();
2022 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
2023 __ set(pc_for_athrow, Oissuing_pc, rspec);
2024 add_call_info(pc_for_athrow_offset, info); // for exception handler
2026 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
2027 __ delayed()->nop();
2028 }
2031 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2032 assert(exceptionOop->as_register() == Oexception, "should match");
2034 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
2035 __ delayed()->nop();
2036 }
2039 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2040 Register src = op->src()->as_register();
2041 Register dst = op->dst()->as_register();
2042 Register src_pos = op->src_pos()->as_register();
2043 Register dst_pos = op->dst_pos()->as_register();
2044 Register length = op->length()->as_register();
2045 Register tmp = op->tmp()->as_register();
2046 Register tmp2 = O7;
2048 int flags = op->flags();
2049 ciArrayKlass* default_type = op->expected_type();
2050 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2051 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2053 #ifdef _LP64
2054 // higher 32bits must be null
2055 __ sra(dst_pos, 0, dst_pos);
2056 __ sra(src_pos, 0, src_pos);
2057 __ sra(length, 0, length);
2058 #endif
2060 // set up the arraycopy stub information
2061 ArrayCopyStub* stub = op->stub();
2063 // always do stub if no type information is available. it's ok if
2064 // the known type isn't loaded since the code sanity checks
2065 // in debug mode and the type isn't required when we know the exact type
2066 // also check that the type is an array type.
2067 if (op->expected_type() == NULL) {
2068 __ mov(src, O0);
2069 __ mov(src_pos, O1);
2070 __ mov(dst, O2);
2071 __ mov(dst_pos, O3);
2072 __ mov(length, O4);
2073 address copyfunc_addr = StubRoutines::generic_arraycopy();
2075 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
2076 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
2077 } else {
2078 #ifndef PRODUCT
2079 if (PrintC1Statistics) {
2080 address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
2081 __ inc_counter(counter, G1, G3);
2082 }
2083 #endif
2084 __ call_VM_leaf(tmp, copyfunc_addr);
2085 }
2087 if (copyfunc_addr != NULL) {
2088 __ xor3(O0, -1, tmp);
2089 __ sub(length, tmp, length);
2090 __ add(src_pos, tmp, src_pos);
2091 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
2092 __ delayed()->add(dst_pos, tmp, dst_pos);
2093 } else {
2094 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
2095 __ delayed()->nop();
2096 }
2097 __ bind(*stub->continuation());
2098 return;
2099 }
2101 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
2103 // make sure src and dst are non-null and load array length
2104 if (flags & LIR_OpArrayCopy::src_null_check) {
2105 __ tst(src);
2106 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2107 __ delayed()->nop();
2108 }
2110 if (flags & LIR_OpArrayCopy::dst_null_check) {
2111 __ tst(dst);
2112 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2113 __ delayed()->nop();
2114 }
2116 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2117 // test src_pos register
2118 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());
2119 __ delayed()->nop();
2120 }
2122 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2123 // test dst_pos register
2124 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry());
2125 __ delayed()->nop();
2126 }
2128 if (flags & LIR_OpArrayCopy::length_positive_check) {
2129 // make sure length isn't negative
2130 __ cmp_zero_and_br(Assembler::less, length, *stub->entry());
2131 __ delayed()->nop();
2132 }
2134 if (flags & LIR_OpArrayCopy::src_range_check) {
2135 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
2136 __ add(length, src_pos, tmp);
2137 __ cmp(tmp2, tmp);
2138 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2139 __ delayed()->nop();
2140 }
2142 if (flags & LIR_OpArrayCopy::dst_range_check) {
2143 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
2144 __ add(length, dst_pos, tmp);
2145 __ cmp(tmp2, tmp);
2146 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2147 __ delayed()->nop();
2148 }
2150 int shift = shift_amount(basic_type);
2152 if (flags & LIR_OpArrayCopy::type_check) {
2153 // We don't know the array types are compatible
2154 if (basic_type != T_OBJECT) {
2155 // Simple test for basic type arrays
2156 if (UseCompressedOops) {
2157 // We don't need decode because we just need to compare
2158 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
2159 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2160 __ cmp(tmp, tmp2);
2161 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2162 } else {
2163 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
2164 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2165 __ cmp(tmp, tmp2);
2166 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2167 }
2168 __ delayed()->nop();
2169 } else {
2170 // For object arrays, if src is a sub class of dst then we can
2171 // safely do the copy.
2172 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2174 Label cont, slow;
2175 assert_different_registers(tmp, tmp2, G3, G1);
2177 __ load_klass(src, G3);
2178 __ load_klass(dst, G1);
2180 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
2182 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2183 __ delayed()->nop();
2185 __ cmp(G3, 0);
2186 if (copyfunc_addr != NULL) { // use stub if available
2187 // src is not a sub class of dst so we have to do a
2188 // per-element check.
2189 __ br(Assembler::notEqual, false, Assembler::pt, cont);
2190 __ delayed()->nop();
2192 __ bind(slow);
2194 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2195 if ((flags & mask) != mask) {
2196 // Check that at least both of them object arrays.
2197 assert(flags & mask, "one of the two should be known to be an object array");
2199 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2200 __ load_klass(src, tmp);
2201 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2202 __ load_klass(dst, tmp);
2203 }
2204 int lh_offset = in_bytes(Klass::layout_helper_offset());
2206 __ lduw(tmp, lh_offset, tmp2);
2208 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2209 __ set(objArray_lh, tmp);
2210 __ cmp(tmp, tmp2);
2211 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2212 __ delayed()->nop();
2213 }
2215 Register src_ptr = O0;
2216 Register dst_ptr = O1;
2217 Register len = O2;
2218 Register chk_off = O3;
2219 Register super_k = O4;
2221 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2222 if (shift == 0) {
2223 __ add(src_ptr, src_pos, src_ptr);
2224 } else {
2225 __ sll(src_pos, shift, tmp);
2226 __ add(src_ptr, tmp, src_ptr);
2227 }
2229 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2230 if (shift == 0) {
2231 __ add(dst_ptr, dst_pos, dst_ptr);
2232 } else {
2233 __ sll(dst_pos, shift, tmp);
2234 __ add(dst_ptr, tmp, dst_ptr);
2235 }
2236 __ mov(length, len);
2237 __ load_klass(dst, tmp);
2239 int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
2240 __ ld_ptr(tmp, ek_offset, super_k);
2242 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2243 __ lduw(super_k, sco_offset, chk_off);
2245 __ call_VM_leaf(tmp, copyfunc_addr);
2247 #ifndef PRODUCT
2248 if (PrintC1Statistics) {
2249 Label failed;
2250 __ br_notnull_short(O0, Assembler::pn, failed);
2251 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
2252 __ bind(failed);
2253 }
2254 #endif
2256 __ br_null(O0, false, Assembler::pt, *stub->continuation());
2257 __ delayed()->xor3(O0, -1, tmp);
2259 #ifndef PRODUCT
2260 if (PrintC1Statistics) {
2261 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3);
2262 }
2263 #endif
2265 __ sub(length, tmp, length);
2266 __ add(src_pos, tmp, src_pos);
2267 __ br(Assembler::always, false, Assembler::pt, *stub->entry());
2268 __ delayed()->add(dst_pos, tmp, dst_pos);
2270 __ bind(cont);
2271 } else {
2272 __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2273 __ delayed()->nop();
2274 __ bind(cont);
2275 }
2276 }
2277 }
2279 #ifdef ASSERT
2280 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2281 // Sanity check the known type with the incoming class. For the
2282 // primitive case the types must match exactly with src.klass and
2283 // dst.klass each exactly matching the default type. For the
2284 // object array case, if no type check is needed then either the
2285 // dst type is exactly the expected type and the src type is a
2286 // subtype which we can't check or src is the same array as dst
2287 // but not necessarily exactly of type default_type.
2288 Label known_ok, halt;
2289 jobject2reg(op->expected_type()->constant_encoding(), tmp);
2290 if (UseCompressedOops) {
2291 // tmp holds the default type. It currently comes uncompressed after the
2292 // load of a constant, so encode it.
2293 __ encode_heap_oop(tmp);
2294 // load the raw value of the dst klass, since we will be comparing
2295 // uncompressed values directly.
2296 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2297 if (basic_type != T_OBJECT) {
2298 __ cmp(tmp, tmp2);
2299 __ br(Assembler::notEqual, false, Assembler::pn, halt);
2300 // load the raw value of the src klass.
2301 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
2302 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2303 } else {
2304 __ cmp(tmp, tmp2);
2305 __ br(Assembler::equal, false, Assembler::pn, known_ok);
2306 __ delayed()->cmp(src, dst);
2307 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2308 __ delayed()->nop();
2309 }
2310 } else {
2311 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2312 if (basic_type != T_OBJECT) {
2313 __ cmp(tmp, tmp2);
2314 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
2315 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
2316 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2317 } else {
2318 __ cmp(tmp, tmp2);
2319 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2320 __ delayed()->cmp(src, dst);
2321 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2322 __ delayed()->nop();
2323 }
2324 }
2325 __ bind(halt);
2326 __ stop("incorrect type information in arraycopy");
2327 __ bind(known_ok);
2328 }
2329 #endif
2331 #ifndef PRODUCT
2332 if (PrintC1Statistics) {
2333 address counter = Runtime1::arraycopy_count_address(basic_type);
2334 __ inc_counter(counter, G1, G3);
2335 }
2336 #endif
2338 Register src_ptr = O0;
2339 Register dst_ptr = O1;
2340 Register len = O2;
2342 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2343 if (shift == 0) {
2344 __ add(src_ptr, src_pos, src_ptr);
2345 } else {
2346 __ sll(src_pos, shift, tmp);
2347 __ add(src_ptr, tmp, src_ptr);
2348 }
2350 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2351 if (shift == 0) {
2352 __ add(dst_ptr, dst_pos, dst_ptr);
2353 } else {
2354 __ sll(dst_pos, shift, tmp);
2355 __ add(dst_ptr, tmp, dst_ptr);
2356 }
2358 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2359 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2360 const char *name;
2361 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2363 // arraycopy stubs takes a length in number of elements, so don't scale it.
2364 __ mov(length, len);
2365 __ call_VM_leaf(tmp, entry);
2367 __ bind(*stub->continuation());
2368 }
2371 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2372 if (dest->is_single_cpu()) {
2373 #ifdef _LP64
2374 if (left->type() == T_OBJECT) {
2375 switch (code) {
2376 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
2377 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
2378 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2379 default: ShouldNotReachHere();
2380 }
2381 } else
2382 #endif
2383 switch (code) {
2384 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
2385 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
2386 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2387 default: ShouldNotReachHere();
2388 }
2389 } else {
2390 #ifdef _LP64
2391 switch (code) {
2392 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2393 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2394 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2395 default: ShouldNotReachHere();
2396 }
2397 #else
2398 switch (code) {
2399 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2400 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2401 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2402 default: ShouldNotReachHere();
2403 }
2404 #endif
2405 }
2406 }
2409 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2410 #ifdef _LP64
2411 if (left->type() == T_OBJECT) {
2412 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
2413 Register l = left->as_register();
2414 Register d = dest->as_register_lo();
2415 switch (code) {
2416 case lir_shl: __ sllx (l, count, d); break;
2417 case lir_shr: __ srax (l, count, d); break;
2418 case lir_ushr: __ srlx (l, count, d); break;
2419 default: ShouldNotReachHere();
2420 }
2421 return;
2422 }
2423 #endif
2425 if (dest->is_single_cpu()) {
2426 count = count & 0x1F; // Java spec
2427 switch (code) {
2428 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
2429 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
2430 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
2431 default: ShouldNotReachHere();
2432 }
2433 } else if (dest->is_double_cpu()) {
2434 count = count & 63; // Java spec
2435 switch (code) {
2436 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2437 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2438 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2439 default: ShouldNotReachHere();
2440 }
2441 } else {
2442 ShouldNotReachHere();
2443 }
2444 }
2447 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2448 assert(op->tmp1()->as_register() == G1 &&
2449 op->tmp2()->as_register() == G3 &&
2450 op->tmp3()->as_register() == G4 &&
2451 op->obj()->as_register() == O0 &&
2452 op->klass()->as_register() == G5, "must be");
2453 if (op->init_check()) {
2454 __ ldub(op->klass()->as_register(),
2455 in_bytes(instanceKlass::init_state_offset()),
2456 op->tmp1()->as_register());
2457 add_debug_info_for_null_check_here(op->stub()->info());
2458 __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
2459 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
2460 __ delayed()->nop();
2461 }
2462 __ allocate_object(op->obj()->as_register(),
2463 op->tmp1()->as_register(),
2464 op->tmp2()->as_register(),
2465 op->tmp3()->as_register(),
2466 op->header_size(),
2467 op->object_size(),
2468 op->klass()->as_register(),
2469 *op->stub()->entry());
2470 __ bind(*op->stub()->continuation());
2471 __ verify_oop(op->obj()->as_register());
2472 }
2475 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2476 assert(op->tmp1()->as_register() == G1 &&
2477 op->tmp2()->as_register() == G3 &&
2478 op->tmp3()->as_register() == G4 &&
2479 op->tmp4()->as_register() == O1 &&
2480 op->klass()->as_register() == G5, "must be");
2482 LP64_ONLY( __ signx(op->len()->as_register()); )
2483 if (UseSlowPath ||
2484 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2485 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2486 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2487 __ delayed()->nop();
2488 } else {
2489 __ allocate_array(op->obj()->as_register(),
2490 op->len()->as_register(),
2491 op->tmp1()->as_register(),
2492 op->tmp2()->as_register(),
2493 op->tmp3()->as_register(),
2494 arrayOopDesc::header_size(op->type()),
2495 type2aelembytes(op->type()),
2496 op->klass()->as_register(),
2497 *op->stub()->entry());
2498 }
2499 __ bind(*op->stub()->continuation());
2500 }
2503 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2504 ciMethodData *md, ciProfileData *data,
2505 Register recv, Register tmp1, Label* update_done) {
2506 uint i;
2507 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2508 Label next_test;
2509 // See if the receiver is receiver[n].
2510 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2511 mdo_offset_bias);
2512 __ ld_ptr(receiver_addr, tmp1);
2513 __ verify_oop(tmp1);
2514 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
2515 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2516 mdo_offset_bias);
2517 __ ld_ptr(data_addr, tmp1);
2518 __ add(tmp1, DataLayout::counter_increment, tmp1);
2519 __ st_ptr(tmp1, data_addr);
2520 __ ba(*update_done);
2521 __ delayed()->nop();
2522 __ bind(next_test);
2523 }
2525 // Didn't find receiver; find next empty slot and fill it in
2526 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2527 Label next_test;
2528 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2529 mdo_offset_bias);
2530 __ ld_ptr(recv_addr, tmp1);
2531 __ br_notnull_short(tmp1, Assembler::pt, next_test);
2532 __ st_ptr(recv, recv_addr);
2533 __ set(DataLayout::counter_increment, tmp1);
2534 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2535 mdo_offset_bias);
2536 __ ba(*update_done);
2537 __ delayed()->nop();
2538 __ bind(next_test);
2539 }
2540 }
2543 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2544 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2545 md = method->method_data_or_null();
2546 assert(md != NULL, "Sanity");
2547 data = md->bci_to_data(bci);
2548 assert(data != NULL, "need data for checkcast");
2549 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2550 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2551 // The offset is large so bias the mdo by the base of the slot so
2552 // that the ld can use simm13s to reference the slots of the data
2553 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2554 }
2555 }
2557 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2558 // we always need a stub for the failure case.
2559 CodeStub* stub = op->stub();
2560 Register obj = op->object()->as_register();
2561 Register k_RInfo = op->tmp1()->as_register();
2562 Register klass_RInfo = op->tmp2()->as_register();
2563 Register dst = op->result_opr()->as_register();
2564 Register Rtmp1 = op->tmp3()->as_register();
2565 ciKlass* k = op->klass();
2568 if (obj == k_RInfo) {
2569 k_RInfo = klass_RInfo;
2570 klass_RInfo = obj;
2571 }
2573 ciMethodData* md;
2574 ciProfileData* data;
2575 int mdo_offset_bias = 0;
2576 if (op->should_profile()) {
2577 ciMethod* method = op->profiled_method();
2578 assert(method != NULL, "Should have method");
2579 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2581 Label not_null;
2582 __ br_notnull_short(obj, Assembler::pn, not_null);
2583 Register mdo = k_RInfo;
2584 Register data_val = Rtmp1;
2585 jobject2reg(md->constant_encoding(), mdo);
2586 if (mdo_offset_bias > 0) {
2587 __ set(mdo_offset_bias, data_val);
2588 __ add(mdo, data_val, mdo);
2589 }
2590 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2591 __ ldub(flags_addr, data_val);
2592 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2593 __ stb(data_val, flags_addr);
2594 __ ba(*obj_is_null);
2595 __ delayed()->nop();
2596 __ bind(not_null);
2597 } else {
2598 __ br_null(obj, false, Assembler::pn, *obj_is_null);
2599 __ delayed()->nop();
2600 }
2602 Label profile_cast_failure, profile_cast_success;
2603 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
2604 Label *success_target = op->should_profile() ? &profile_cast_success : success;
2606 // patching may screw with our temporaries on sparc,
2607 // so let's do it before loading the class
2608 if (k->is_loaded()) {
2609 jobject2reg(k->constant_encoding(), k_RInfo);
2610 } else {
2611 jobject2reg_with_patching(k_RInfo, op->info_for_patch());
2612 }
2613 assert(obj != k_RInfo, "must be different");
2615 // get object class
2616 // not a safepoint as obj null check happens earlier
2617 __ load_klass(obj, klass_RInfo);
2618 if (op->fast_check()) {
2619 assert_different_registers(klass_RInfo, k_RInfo);
2620 __ cmp(k_RInfo, klass_RInfo);
2621 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
2622 __ delayed()->nop();
2623 } else {
2624 bool need_slow_path = true;
2625 if (k->is_loaded()) {
2626 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
2627 need_slow_path = false;
2628 // perform the fast part of the checking logic
2629 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
2630 (need_slow_path ? success_target : NULL),
2631 failure_target, NULL,
2632 RegisterOrConstant(k->super_check_offset()));
2633 } else {
2634 // perform the fast part of the checking logic
2635 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
2636 failure_target, NULL);
2637 }
2638 if (need_slow_path) {
2639 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2640 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2641 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2642 __ delayed()->nop();
2643 __ cmp(G3, 0);
2644 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2645 __ delayed()->nop();
2646 // Fall through to success case
2647 }
2648 }
2650 if (op->should_profile()) {
2651 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2652 assert_different_registers(obj, mdo, recv, tmp1);
2653 __ bind(profile_cast_success);
2654 jobject2reg(md->constant_encoding(), mdo);
2655 if (mdo_offset_bias > 0) {
2656 __ set(mdo_offset_bias, tmp1);
2657 __ add(mdo, tmp1, mdo);
2658 }
2659 __ load_klass(obj, recv);
2660 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
2661 // Jump over the failure case
2662 __ ba(*success);
2663 __ delayed()->nop();
2664 // Cast failure case
2665 __ bind(profile_cast_failure);
2666 jobject2reg(md->constant_encoding(), mdo);
2667 if (mdo_offset_bias > 0) {
2668 __ set(mdo_offset_bias, tmp1);
2669 __ add(mdo, tmp1, mdo);
2670 }
2671 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2672 __ ld_ptr(data_addr, tmp1);
2673 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2674 __ st_ptr(tmp1, data_addr);
2675 __ ba(*failure);
2676 __ delayed()->nop();
2677 }
2678 __ ba(*success);
2679 __ delayed()->nop();
2680 }
2682 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2683 LIR_Code code = op->code();
2684 if (code == lir_store_check) {
2685 Register value = op->object()->as_register();
2686 Register array = op->array()->as_register();
2687 Register k_RInfo = op->tmp1()->as_register();
2688 Register klass_RInfo = op->tmp2()->as_register();
2689 Register Rtmp1 = op->tmp3()->as_register();
2691 __ verify_oop(value);
2692 CodeStub* stub = op->stub();
2693 // check if it needs to be profiled
2694 ciMethodData* md;
2695 ciProfileData* data;
2696 int mdo_offset_bias = 0;
2697 if (op->should_profile()) {
2698 ciMethod* method = op->profiled_method();
2699 assert(method != NULL, "Should have method");
2700 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2701 }
2702 Label profile_cast_success, profile_cast_failure, done;
2703 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
2704 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
2706 if (op->should_profile()) {
2707 Label not_null;
2708 __ br_notnull_short(value, Assembler::pn, not_null);
2709 Register mdo = k_RInfo;
2710 Register data_val = Rtmp1;
2711 jobject2reg(md->constant_encoding(), mdo);
2712 if (mdo_offset_bias > 0) {
2713 __ set(mdo_offset_bias, data_val);
2714 __ add(mdo, data_val, mdo);
2715 }
2716 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2717 __ ldub(flags_addr, data_val);
2718 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2719 __ stb(data_val, flags_addr);
2720 __ ba_short(done);
2721 __ bind(not_null);
2722 } else {
2723 __ br_null_short(value, Assembler::pn, done);
2724 }
2725 add_debug_info_for_null_check_here(op->info_for_exception());
2726 __ load_klass(array, k_RInfo);
2727 __ load_klass(value, klass_RInfo);
2729 // get instance klass
2730 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo);
2731 // perform the fast part of the checking logic
2732 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
2734 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2735 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2736 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2737 __ delayed()->nop();
2738 __ cmp(G3, 0);
2739 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2740 __ delayed()->nop();
2741 // fall through to the success case
2743 if (op->should_profile()) {
2744 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2745 assert_different_registers(value, mdo, recv, tmp1);
2746 __ bind(profile_cast_success);
2747 jobject2reg(md->constant_encoding(), mdo);
2748 if (mdo_offset_bias > 0) {
2749 __ set(mdo_offset_bias, tmp1);
2750 __ add(mdo, tmp1, mdo);
2751 }
2752 __ load_klass(value, recv);
2753 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2754 __ ba_short(done);
2755 // Cast failure case
2756 __ bind(profile_cast_failure);
2757 jobject2reg(md->constant_encoding(), mdo);
2758 if (mdo_offset_bias > 0) {
2759 __ set(mdo_offset_bias, tmp1);
2760 __ add(mdo, tmp1, mdo);
2761 }
2762 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2763 __ ld_ptr(data_addr, tmp1);
2764 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2765 __ st_ptr(tmp1, data_addr);
2766 __ ba(*stub->entry());
2767 __ delayed()->nop();
2768 }
2769 __ bind(done);
2770 } else if (code == lir_checkcast) {
2771 Register obj = op->object()->as_register();
2772 Register dst = op->result_opr()->as_register();
2773 Label success;
2774 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
2775 __ bind(success);
2776 __ mov(obj, dst);
2777 } else if (code == lir_instanceof) {
2778 Register obj = op->object()->as_register();
2779 Register dst = op->result_opr()->as_register();
2780 Label success, failure, done;
2781 emit_typecheck_helper(op, &success, &failure, &failure);
2782 __ bind(failure);
2783 __ set(0, dst);
2784 __ ba_short(done);
2785 __ bind(success);
2786 __ set(1, dst);
2787 __ bind(done);
2788 } else {
2789 ShouldNotReachHere();
2790 }
2792 }
2795 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2796 if (op->code() == lir_cas_long) {
2797 assert(VM_Version::supports_cx8(), "wrong machine");
2798 Register addr = op->addr()->as_pointer_register();
2799 Register cmp_value_lo = op->cmp_value()->as_register_lo();
2800 Register cmp_value_hi = op->cmp_value()->as_register_hi();
2801 Register new_value_lo = op->new_value()->as_register_lo();
2802 Register new_value_hi = op->new_value()->as_register_hi();
2803 Register t1 = op->tmp1()->as_register();
2804 Register t2 = op->tmp2()->as_register();
2805 #ifdef _LP64
2806 __ mov(cmp_value_lo, t1);
2807 __ mov(new_value_lo, t2);
2808 // perform the compare and swap operation
2809 __ casx(addr, t1, t2);
2810 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2811 // overwritten with the original value in "addr" and will be equal to t1.
2812 __ cmp(t1, t2);
2813 #else
2814 // move high and low halves of long values into single registers
2815 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
2816 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
2817 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
2818 __ sllx(new_value_hi, 32, t2);
2819 __ srl(new_value_lo, 0, new_value_lo);
2820 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
2821 // perform the compare and swap operation
2822 __ casx(addr, t1, t2);
2823 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2824 // overwritten with the original value in "addr" and will be equal to t1.
2825 // Produce icc flag for 32bit.
2826 __ sub(t1, t2, t2);
2827 __ srlx(t2, 32, t1);
2828 __ orcc(t2, t1, G0);
2829 #endif
2830 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2831 Register addr = op->addr()->as_pointer_register();
2832 Register cmp_value = op->cmp_value()->as_register();
2833 Register new_value = op->new_value()->as_register();
2834 Register t1 = op->tmp1()->as_register();
2835 Register t2 = op->tmp2()->as_register();
2836 __ mov(cmp_value, t1);
2837 __ mov(new_value, t2);
2838 if (op->code() == lir_cas_obj) {
2839 if (UseCompressedOops) {
2840 __ encode_heap_oop(t1);
2841 __ encode_heap_oop(t2);
2842 __ cas(addr, t1, t2);
2843 } else {
2844 __ cas_ptr(addr, t1, t2);
2845 }
2846 } else {
2847 __ cas(addr, t1, t2);
2848 }
2849 __ cmp(t1, t2);
2850 } else {
2851 Unimplemented();
2852 }
2853 }
2855 void LIR_Assembler::set_24bit_FPU() {
2856 Unimplemented();
2857 }
2860 void LIR_Assembler::reset_FPU() {
2861 Unimplemented();
2862 }
2865 void LIR_Assembler::breakpoint() {
2866 __ breakpoint_trap();
2867 }
2870 void LIR_Assembler::push(LIR_Opr opr) {
2871 Unimplemented();
2872 }
2875 void LIR_Assembler::pop(LIR_Opr opr) {
2876 Unimplemented();
2877 }
2880 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2881 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2882 Register dst = dst_opr->as_register();
2883 Register reg = mon_addr.base();
2884 int offset = mon_addr.disp();
2885 // compute pointer to BasicLock
2886 if (mon_addr.is_simm13()) {
2887 __ add(reg, offset, dst);
2888 } else {
2889 __ set(offset, dst);
2890 __ add(dst, reg, dst);
2891 }
2892 }
2895 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2896 Register obj = op->obj_opr()->as_register();
2897 Register hdr = op->hdr_opr()->as_register();
2898 Register lock = op->lock_opr()->as_register();
2900 // obj may not be an oop
2901 if (op->code() == lir_lock) {
2902 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2903 if (UseFastLocking) {
2904 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2905 // add debug info for NullPointerException only if one is possible
2906 if (op->info() != NULL) {
2907 add_debug_info_for_null_check_here(op->info());
2908 }
2909 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2910 } else {
2911 // always do slow locking
2912 // note: the slow locking code could be inlined here, however if we use
2913 // slow locking, speed doesn't matter anyway and this solution is
2914 // simpler and requires less duplicated code - additionally, the
2915 // slow locking code is the same in either case which simplifies
2916 // debugging
2917 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2918 __ delayed()->nop();
2919 }
2920 } else {
2921 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2922 if (UseFastLocking) {
2923 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2924 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2925 } else {
2926 // always do slow unlocking
2927 // note: the slow unlocking code could be inlined here, however if we use
2928 // slow unlocking, speed doesn't matter anyway and this solution is
2929 // simpler and requires less duplicated code - additionally, the
2930 // slow unlocking code is the same in either case which simplifies
2931 // debugging
2932 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2933 __ delayed()->nop();
2934 }
2935 }
2936 __ bind(*op->stub()->continuation());
2937 }
2940 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2941 ciMethod* method = op->profiled_method();
2942 int bci = op->profiled_bci();
2944 // Update counter for all call types
2945 ciMethodData* md = method->method_data_or_null();
2946 assert(md != NULL, "Sanity");
2947 ciProfileData* data = md->bci_to_data(bci);
2948 assert(data->is_CounterData(), "need CounterData for calls");
2949 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2950 Register mdo = op->mdo()->as_register();
2951 #ifdef _LP64
2952 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2953 Register tmp1 = op->tmp1()->as_register_lo();
2954 #else
2955 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2956 Register tmp1 = op->tmp1()->as_register();
2957 #endif
2958 jobject2reg(md->constant_encoding(), mdo);
2959 int mdo_offset_bias = 0;
2960 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2961 data->size_in_bytes())) {
2962 // The offset is large so bias the mdo by the base of the slot so
2963 // that the ld can use simm13s to reference the slots of the data
2964 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2965 __ set(mdo_offset_bias, O7);
2966 __ add(mdo, O7, mdo);
2967 }
2969 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2970 Bytecodes::Code bc = method->java_code_at_bci(bci);
2971 // Perform additional virtual call profiling for invokevirtual and
2972 // invokeinterface bytecodes
2973 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2974 C1ProfileVirtualCalls) {
2975 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2976 Register recv = op->recv()->as_register();
2977 assert_different_registers(mdo, tmp1, recv);
2978 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2979 ciKlass* known_klass = op->known_holder();
2980 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2981 // We know the type that will be seen at this call site; we can
2982 // statically update the methodDataOop rather than needing to do
2983 // dynamic tests on the receiver type
2985 // NOTE: we should probably put a lock around this search to
2986 // avoid collisions by concurrent compilations
2987 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2988 uint i;
2989 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2990 ciKlass* receiver = vc_data->receiver(i);
2991 if (known_klass->equals(receiver)) {
2992 Address data_addr(mdo, md->byte_offset_of_slot(data,
2993 VirtualCallData::receiver_count_offset(i)) -
2994 mdo_offset_bias);
2995 __ ld_ptr(data_addr, tmp1);
2996 __ add(tmp1, DataLayout::counter_increment, tmp1);
2997 __ st_ptr(tmp1, data_addr);
2998 return;
2999 }
3000 }
3002 // Receiver type not found in profile data; select an empty slot
3004 // Note that this is less efficient than it should be because it
3005 // always does a write to the receiver part of the
3006 // VirtualCallData rather than just the first time
3007 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3008 ciKlass* receiver = vc_data->receiver(i);
3009 if (receiver == NULL) {
3010 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
3011 mdo_offset_bias);
3012 jobject2reg(known_klass->constant_encoding(), tmp1);
3013 __ st_ptr(tmp1, recv_addr);
3014 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
3015 mdo_offset_bias);
3016 __ ld_ptr(data_addr, tmp1);
3017 __ add(tmp1, DataLayout::counter_increment, tmp1);
3018 __ st_ptr(tmp1, data_addr);
3019 return;
3020 }
3021 }
3022 } else {
3023 __ load_klass(recv, recv);
3024 Label update_done;
3025 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
3026 // Receiver did not match any saved receiver and there is no empty row for it.
3027 // Increment total counter to indicate polymorphic case.
3028 __ ld_ptr(counter_addr, tmp1);
3029 __ add(tmp1, DataLayout::counter_increment, tmp1);
3030 __ st_ptr(tmp1, counter_addr);
3032 __ bind(update_done);
3033 }
3034 } else {
3035 // Static call
3036 __ ld_ptr(counter_addr, tmp1);
3037 __ add(tmp1, DataLayout::counter_increment, tmp1);
3038 __ st_ptr(tmp1, counter_addr);
3039 }
3040 }
3042 void LIR_Assembler::align_backward_branch_target() {
3043 __ align(OptoLoopAlignment);
3044 }
3047 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
3048 // make sure we are expecting a delay
3049 // this has the side effect of clearing the delay state
3050 // so we can use _masm instead of _masm->delayed() to do the
3051 // code generation.
3052 __ delayed();
3054 // make sure we only emit one instruction
3055 int offset = code_offset();
3056 op->delay_op()->emit_code(this);
3057 #ifdef ASSERT
3058 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
3059 op->delay_op()->print();
3060 }
3061 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
3062 "only one instruction can go in a delay slot");
3063 #endif
3065 // we may also be emitting the call info for the instruction
3066 // which we are the delay slot of.
3067 CodeEmitInfo* call_info = op->call_info();
3068 if (call_info) {
3069 add_call_info(code_offset(), call_info);
3070 }
3072 if (VerifyStackAtCalls) {
3073 _masm->sub(FP, SP, O7);
3074 _masm->cmp(O7, initial_frame_size_in_bytes());
3075 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
3076 }
3077 }
3080 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3081 assert(left->is_register(), "can only handle registers");
3083 if (left->is_single_cpu()) {
3084 __ neg(left->as_register(), dest->as_register());
3085 } else if (left->is_single_fpu()) {
3086 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
3087 } else if (left->is_double_fpu()) {
3088 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
3089 } else {
3090 assert (left->is_double_cpu(), "Must be a long");
3091 Register Rlow = left->as_register_lo();
3092 Register Rhi = left->as_register_hi();
3093 #ifdef _LP64
3094 __ sub(G0, Rlow, dest->as_register_lo());
3095 #else
3096 __ subcc(G0, Rlow, dest->as_register_lo());
3097 __ subc (G0, Rhi, dest->as_register_hi());
3098 #endif
3099 }
3100 }
3103 void LIR_Assembler::fxch(int i) {
3104 Unimplemented();
3105 }
3107 void LIR_Assembler::fld(int i) {
3108 Unimplemented();
3109 }
3111 void LIR_Assembler::ffree(int i) {
3112 Unimplemented();
3113 }
3115 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
3116 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3118 // if tmp is invalid, then the function being called doesn't destroy the thread
3119 if (tmp->is_valid()) {
3120 __ save_thread(tmp->as_register());
3121 }
3122 __ call(dest, relocInfo::runtime_call_type);
3123 __ delayed()->nop();
3124 if (info != NULL) {
3125 add_call_info_here(info);
3126 }
3127 if (tmp->is_valid()) {
3128 __ restore_thread(tmp->as_register());
3129 }
3131 #ifdef ASSERT
3132 __ verify_thread();
3133 #endif // ASSERT
3134 }
3137 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3138 #ifdef _LP64
3139 ShouldNotReachHere();
3140 #endif
3142 NEEDS_CLEANUP;
3143 if (type == T_LONG) {
3144 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
3146 // (extended to allow indexed as well as constant displaced for JSR-166)
3147 Register idx = noreg; // contains either constant offset or index
3149 int disp = mem_addr->disp();
3150 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3151 if (!Assembler::is_simm13(disp)) {
3152 idx = O7;
3153 __ set(disp, idx);
3154 }
3155 } else {
3156 assert(disp == 0, "not both indexed and disp");
3157 idx = mem_addr->index()->as_register();
3158 }
3160 int null_check_offset = -1;
3162 Register base = mem_addr->base()->as_register();
3163 if (src->is_register() && dest->is_address()) {
3164 // G4 is high half, G5 is low half
3165 if (VM_Version::v9_instructions_work()) {
3166 // clear the top bits of G5, and scale up G4
3167 __ srl (src->as_register_lo(), 0, G5);
3168 __ sllx(src->as_register_hi(), 32, G4);
3169 // combine the two halves into the 64 bits of G4
3170 __ or3(G4, G5, G4);
3171 null_check_offset = __ offset();
3172 if (idx == noreg) {
3173 __ stx(G4, base, disp);
3174 } else {
3175 __ stx(G4, base, idx);
3176 }
3177 } else {
3178 __ mov (src->as_register_hi(), G4);
3179 __ mov (src->as_register_lo(), G5);
3180 null_check_offset = __ offset();
3181 if (idx == noreg) {
3182 __ std(G4, base, disp);
3183 } else {
3184 __ std(G4, base, idx);
3185 }
3186 }
3187 } else if (src->is_address() && dest->is_register()) {
3188 null_check_offset = __ offset();
3189 if (VM_Version::v9_instructions_work()) {
3190 if (idx == noreg) {
3191 __ ldx(base, disp, G5);
3192 } else {
3193 __ ldx(base, idx, G5);
3194 }
3195 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3196 __ mov (G5, dest->as_register_lo()); // copy low half into lo
3197 } else {
3198 if (idx == noreg) {
3199 __ ldd(base, disp, G4);
3200 } else {
3201 __ ldd(base, idx, G4);
3202 }
3203 // G4 is high half, G5 is low half
3204 __ mov (G4, dest->as_register_hi());
3205 __ mov (G5, dest->as_register_lo());
3206 }
3207 } else {
3208 Unimplemented();
3209 }
3210 if (info != NULL) {
3211 add_debug_info_for_null_check(null_check_offset, info);
3212 }
3214 } else {
3215 // use normal move for all other volatiles since they don't need
3216 // special handling to remain atomic.
3217 move_op(src, dest, type, lir_patch_none, info, false, false, false);
3218 }
3219 }
3221 void LIR_Assembler::membar() {
3222 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3223 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3224 }
3226 void LIR_Assembler::membar_acquire() {
3227 // no-op on TSO
3228 }
3230 void LIR_Assembler::membar_release() {
3231 // no-op on TSO
3232 }
3234 void LIR_Assembler::membar_loadload() {
3235 // no-op
3236 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3237 }
3239 void LIR_Assembler::membar_storestore() {
3240 // no-op
3241 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3242 }
3244 void LIR_Assembler::membar_loadstore() {
3245 // no-op
3246 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3247 }
3249 void LIR_Assembler::membar_storeload() {
3250 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3251 }
3254 // Pack two sequential registers containing 32 bit values
3255 // into a single 64 bit register.
3256 // src and src->successor() are packed into dst
3257 // src and dst may be the same register.
3258 // Note: src is destroyed
3259 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
3260 Register rs = src->as_register();
3261 Register rd = dst->as_register_lo();
3262 __ sllx(rs, 32, rs);
3263 __ srl(rs->successor(), 0, rs->successor());
3264 __ or3(rs, rs->successor(), rd);
3265 }
3267 // Unpack a 64 bit value in a register into
3268 // two sequential registers.
3269 // src is unpacked into dst and dst->successor()
3270 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
3271 Register rs = src->as_register_lo();
3272 Register rd = dst->as_register_hi();
3273 assert_different_registers(rs, rd, rd->successor());
3274 __ srlx(rs, 32, rd);
3275 __ srl (rs, 0, rd->successor());
3276 }
3279 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
3280 LIR_Address* addr = addr_opr->as_address_ptr();
3281 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
3283 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
3284 }
3287 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3288 assert(result_reg->is_register(), "check");
3289 __ mov(G2_thread, result_reg->as_register());
3290 }
3293 void LIR_Assembler::peephole(LIR_List* lir) {
3294 LIR_OpList* inst = lir->instructions_list();
3295 for (int i = 0; i < inst->length(); i++) {
3296 LIR_Op* op = inst->at(i);
3297 switch (op->code()) {
3298 case lir_cond_float_branch:
3299 case lir_branch: {
3300 LIR_OpBranch* branch = op->as_OpBranch();
3301 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
3302 LIR_Op* delay_op = NULL;
3303 // we'd like to be able to pull following instructions into
3304 // this slot but we don't know enough to do it safely yet so
3305 // only optimize block to block control flow.
3306 if (LIRFillDelaySlots && branch->block()) {
3307 LIR_Op* prev = inst->at(i - 1);
3308 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
3309 // swap previous instruction into delay slot
3310 inst->at_put(i - 1, op);
3311 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3312 #ifndef PRODUCT
3313 if (LIRTracePeephole) {
3314 tty->print_cr("delayed");
3315 inst->at(i - 1)->print();
3316 inst->at(i)->print();
3317 tty->cr();
3318 }
3319 #endif
3320 continue;
3321 }
3322 }
3324 if (!delay_op) {
3325 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
3326 }
3327 inst->insert_before(i + 1, delay_op);
3328 break;
3329 }
3330 case lir_static_call:
3331 case lir_virtual_call:
3332 case lir_icvirtual_call:
3333 case lir_optvirtual_call:
3334 case lir_dynamic_call: {
3335 LIR_Op* prev = inst->at(i - 1);
3336 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
3337 (op->code() != lir_virtual_call ||
3338 !prev->result_opr()->is_single_cpu() ||
3339 prev->result_opr()->as_register() != O0) &&
3340 LIR_Assembler::is_single_instruction(prev)) {
3341 // Only moves without info can be put into the delay slot.
3342 // Also don't allow the setup of the receiver in the delay
3343 // slot for vtable calls.
3344 inst->at_put(i - 1, op);
3345 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3346 #ifndef PRODUCT
3347 if (LIRTracePeephole) {
3348 tty->print_cr("delayed");
3349 inst->at(i - 1)->print();
3350 inst->at(i)->print();
3351 tty->cr();
3352 }
3353 #endif
3354 } else {
3355 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
3356 inst->insert_before(i + 1, delay_op);
3357 i++;
3358 }
3360 #if defined(TIERED) && !defined(_LP64)
3361 // fixup the return value from G1 to O0/O1 for long returns.
3362 // It's done here instead of in LIRGenerator because there's
3363 // such a mismatch between the single reg and double reg
3364 // calling convention.
3365 LIR_OpJavaCall* callop = op->as_OpJavaCall();
3366 if (callop->result_opr() == FrameMap::out_long_opr) {
3367 LIR_OpJavaCall* call;
3368 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
3369 for (int a = 0; a < arguments->length(); a++) {
3370 arguments[a] = callop->arguments()[a];
3371 }
3372 if (op->code() == lir_virtual_call) {
3373 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3374 callop->vtable_offset(), arguments, callop->info());
3375 } else {
3376 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3377 callop->addr(), arguments, callop->info());
3378 }
3379 inst->at_put(i - 1, call);
3380 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
3381 T_LONG, lir_patch_none, NULL));
3382 }
3383 #endif
3384 break;
3385 }
3386 }
3387 }
3388 }
3393 #undef __