Mon, 12 Aug 2013 17:37:02 +0200
8015107: NPG: Use consistent naming for metaspace concepts
Reviewed-by: coleenp, mgerdin, hseigel
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "c1/c1_ValueStack.hpp"
31 #include "ci/ciArrayKlass.hpp"
32 #include "ci/ciInstance.hpp"
33 #include "gc_interface/collectedHeap.hpp"
34 #include "memory/barrierSet.hpp"
35 #include "memory/cardTableModRefBS.hpp"
36 #include "nativeInst_sparc.hpp"
37 #include "oops/objArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
40 #define __ _masm->
43 //------------------------------------------------------------
46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
47 if (opr->is_constant()) {
48 LIR_Const* constant = opr->as_constant_ptr();
49 switch (constant->type()) {
50 case T_INT: {
51 jint value = constant->as_jint();
52 return Assembler::is_simm13(value);
53 }
55 default:
56 return false;
57 }
58 }
59 return false;
60 }
63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
64 switch (op->code()) {
65 case lir_null_check:
66 return true;
69 case lir_add:
70 case lir_ushr:
71 case lir_shr:
72 case lir_shl:
73 // integer shifts and adds are always one instruction
74 return op->result_opr()->is_single_cpu();
77 case lir_move: {
78 LIR_Op1* op1 = op->as_Op1();
79 LIR_Opr src = op1->in_opr();
80 LIR_Opr dst = op1->result_opr();
82 if (src == dst) {
83 NEEDS_CLEANUP;
84 // this works around a problem where moves with the same src and dst
85 // end up in the delay slot and then the assembler swallows the mov
86 // since it has no effect and then it complains because the delay slot
87 // is empty. returning false stops the optimizer from putting this in
88 // the delay slot
89 return false;
90 }
92 // don't put moves involving oops into the delay slot since the VerifyOops code
93 // will make it much larger than a single instruction.
94 if (VerifyOops) {
95 return false;
96 }
98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
100 return false;
101 }
103 if (UseCompressedOops) {
104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
106 }
108 if (UseCompressedClassPointers) {
109 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
110 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
111 }
113 if (dst->is_register()) {
114 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
115 return !PatchALot;
116 } else if (src->is_single_stack()) {
117 return true;
118 }
119 }
121 if (src->is_register()) {
122 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
123 return !PatchALot;
124 } else if (dst->is_single_stack()) {
125 return true;
126 }
127 }
129 if (dst->is_register() &&
130 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
131 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
132 return true;
133 }
135 return false;
136 }
138 default:
139 return false;
140 }
141 ShouldNotReachHere();
142 }
145 LIR_Opr LIR_Assembler::receiverOpr() {
146 return FrameMap::O0_oop_opr;
147 }
150 LIR_Opr LIR_Assembler::osrBufferPointer() {
151 return FrameMap::I0_opr;
152 }
155 int LIR_Assembler::initial_frame_size_in_bytes() {
156 return in_bytes(frame_map()->framesize_in_bytes());
157 }
160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
161 // we fetch the class of the receiver (O0) and compare it with the cached class.
162 // If they do not match we jump to slow case.
163 int LIR_Assembler::check_icache() {
164 int offset = __ offset();
165 __ inline_cache_check(O0, G5_inline_cache_reg);
166 return offset;
167 }
170 void LIR_Assembler::osr_entry() {
171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
172 //
173 // 1. Create a new compiled activation.
174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
175 // at the osr_bci; it is not initialized.
176 // 3. Jump to the continuation address in compiled code to resume execution.
178 // OSR entry point
179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
180 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
181 ValueStack* entry_state = osr_entry->end()->state();
182 int number_of_locks = entry_state->locks_size();
184 // Create a frame for the compiled activation.
185 __ build_frame(initial_frame_size_in_bytes());
187 // OSR buffer is
188 //
189 // locals[nlocals-1..0]
190 // monitors[number_of_locks-1..0]
191 //
192 // locals is a direct copy of the interpreter frame so in the osr buffer
193 // so first slot in the local array is the last local from the interpreter
194 // and last slot is local[0] (receiver) from the interpreter
195 //
196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
198 // in the interpreter frame (the method lock if a sync method)
200 // Initialize monitors in the compiled activation.
201 // I0: pointer to osr buffer
202 //
203 // All other registers are dead at this point and the locals will be
204 // copied into place by code emitted in the IR.
206 Register OSR_buf = osrBufferPointer()->as_register();
207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
208 int monitor_offset = BytesPerWord * method()->max_locals() +
209 (2 * BytesPerWord) * (number_of_locks - 1);
210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
211 // the OSR buffer using 2 word entries: first the lock and then
212 // the oop.
213 for (int i = 0; i < number_of_locks; i++) {
214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
215 #ifdef ASSERT
216 // verify the interpreter's monitor has a non-null object
217 {
218 Label L;
219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
220 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L);
221 __ stop("locked object is NULL");
222 __ bind(L);
223 }
224 #endif // ASSERT
225 // Copy the lock field into the compiled activation.
226 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
227 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
228 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
229 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
230 }
231 }
232 }
235 // Optimized Library calls
236 // This is the fast version of java.lang.String.compare; it has not
237 // OSR-entry and therefore, we generate a slow version for OSR's
238 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
239 Register str0 = left->as_register();
240 Register str1 = right->as_register();
242 Label Ldone;
244 Register result = dst->as_register();
245 {
246 // Get a pointer to the first character of string0 in tmp0
247 // and get string0.length() in str0
248 // Get a pointer to the first character of string1 in tmp1
249 // and get string1.length() in str1
250 // Also, get string0.length()-string1.length() in
251 // o7 and get the condition code set
252 // Note: some instructions have been hoisted for better instruction scheduling
254 Register tmp0 = L0;
255 Register tmp1 = L1;
256 Register tmp2 = L2;
258 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
259 if (java_lang_String::has_offset_field()) {
260 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
261 int count_offset = java_lang_String:: count_offset_in_bytes();
262 __ load_heap_oop(str0, value_offset, tmp0);
263 __ ld(str0, offset_offset, tmp2);
264 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
265 __ ld(str0, count_offset, str0);
266 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
267 } else {
268 __ load_heap_oop(str0, value_offset, tmp1);
269 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
270 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0);
271 }
273 // str1 may be null
274 add_debug_info_for_null_check_here(info);
276 if (java_lang_String::has_offset_field()) {
277 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
278 int count_offset = java_lang_String:: count_offset_in_bytes();
279 __ load_heap_oop(str1, value_offset, tmp1);
280 __ add(tmp0, tmp2, tmp0);
282 __ ld(str1, offset_offset, tmp2);
283 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
284 __ ld(str1, count_offset, str1);
285 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
286 __ add(tmp1, tmp2, tmp1);
287 } else {
288 __ load_heap_oop(str1, value_offset, tmp2);
289 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
290 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1);
291 }
292 __ subcc(str0, str1, O7);
293 }
295 {
296 // Compute the minimum of the string lengths, scale it and store it in limit
297 Register count0 = I0;
298 Register count1 = I1;
299 Register limit = L3;
301 Label Lskip;
302 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
303 __ br(Assembler::greater, true, Assembler::pt, Lskip);
304 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
305 __ bind(Lskip);
307 // If either string is empty (or both of them) the result is the difference in lengths
308 __ cmp(limit, 0);
309 __ br(Assembler::equal, true, Assembler::pn, Ldone);
310 __ delayed()->mov(O7, result); // result is difference in lengths
311 }
313 {
314 // Neither string is empty
315 Label Lloop;
317 Register base0 = L0;
318 Register base1 = L1;
319 Register chr0 = I0;
320 Register chr1 = I1;
321 Register limit = L3;
323 // Shift base0 and base1 to the end of the arrays, negate limit
324 __ add(base0, limit, base0);
325 __ add(base1, limit, base1);
326 __ neg(limit); // limit = -min{string0.length(), string1.length()}
328 __ lduh(base0, limit, chr0);
329 __ bind(Lloop);
330 __ lduh(base1, limit, chr1);
331 __ subcc(chr0, chr1, chr0);
332 __ br(Assembler::notZero, false, Assembler::pn, Ldone);
333 assert(chr0 == result, "result must be pre-placed");
334 __ delayed()->inccc(limit, sizeof(jchar));
335 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
336 __ delayed()->lduh(base0, limit, chr0);
337 }
339 // If strings are equal up to min length, return the length difference.
340 __ mov(O7, result);
342 // Otherwise, return the difference between the first mismatched chars.
343 __ bind(Ldone);
344 }
347 // --------------------------------------------------------------------------------------------
349 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
350 if (!GenerateSynchronizationCode) return;
352 Register obj_reg = obj_opr->as_register();
353 Register lock_reg = lock_opr->as_register();
355 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
356 Register reg = mon_addr.base();
357 int offset = mon_addr.disp();
358 // compute pointer to BasicLock
359 if (mon_addr.is_simm13()) {
360 __ add(reg, offset, lock_reg);
361 }
362 else {
363 __ set(offset, lock_reg);
364 __ add(reg, lock_reg, lock_reg);
365 }
366 // unlock object
367 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
368 // _slow_case_stubs->append(slow_case);
369 // temporary fix: must be created after exceptionhandler, therefore as call stub
370 _slow_case_stubs->append(slow_case);
371 if (UseFastLocking) {
372 // try inlined fast unlocking first, revert to slow locking if it fails
373 // note: lock_reg points to the displaced header since the displaced header offset is 0!
374 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
375 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
376 } else {
377 // always do slow unlocking
378 // note: the slow unlocking code could be inlined here, however if we use
379 // slow unlocking, speed doesn't matter anyway and this solution is
380 // simpler and requires less duplicated code - additionally, the
381 // slow unlocking code is the same in either case which simplifies
382 // debugging
383 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
384 __ delayed()->nop();
385 }
386 // done
387 __ bind(*slow_case->continuation());
388 }
391 int LIR_Assembler::emit_exception_handler() {
392 // if the last instruction is a call (typically to do a throw which
393 // is coming at the end after block reordering) the return address
394 // must still point into the code area in order to avoid assertion
395 // failures when searching for the corresponding bci => add a nop
396 // (was bug 5/14/1999 - gri)
397 __ nop();
399 // generate code for exception handler
400 ciMethod* method = compilation()->method();
402 address handler_base = __ start_a_stub(exception_handler_size);
404 if (handler_base == NULL) {
405 // not enough space left for the handler
406 bailout("exception handler overflow");
407 return -1;
408 }
410 int offset = code_offset();
412 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
413 __ delayed()->nop();
414 __ should_not_reach_here();
415 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
416 __ end_a_stub();
418 return offset;
419 }
422 // Emit the code to remove the frame from the stack in the exception
423 // unwind path.
424 int LIR_Assembler::emit_unwind_handler() {
425 #ifndef PRODUCT
426 if (CommentedAssembly) {
427 _masm->block_comment("Unwind handler");
428 }
429 #endif
431 int offset = code_offset();
433 // Fetch the exception from TLS and clear out exception related thread state
434 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
435 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
436 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
438 __ bind(_unwind_handler_entry);
439 __ verify_not_null_oop(O0);
440 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
441 __ mov(O0, I0); // Preserve the exception
442 }
444 // Preform needed unlocking
445 MonitorExitStub* stub = NULL;
446 if (method()->is_synchronized()) {
447 monitor_address(0, FrameMap::I1_opr);
448 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
449 __ unlock_object(I3, I2, I1, *stub->entry());
450 __ bind(*stub->continuation());
451 }
453 if (compilation()->env()->dtrace_method_probes()) {
454 __ mov(G2_thread, O0);
455 __ save_thread(I1); // need to preserve thread in G2 across
456 // runtime call
457 metadata2reg(method()->constant_encoding(), O1);
458 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
459 __ delayed()->nop();
460 __ restore_thread(I1);
461 }
463 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
464 __ mov(I0, O0); // Restore the exception
465 }
467 // dispatch to the unwind logic
468 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
469 __ delayed()->nop();
471 // Emit the slow path assembly
472 if (stub != NULL) {
473 stub->emit_code(this);
474 }
476 return offset;
477 }
480 int LIR_Assembler::emit_deopt_handler() {
481 // if the last instruction is a call (typically to do a throw which
482 // is coming at the end after block reordering) the return address
483 // must still point into the code area in order to avoid assertion
484 // failures when searching for the corresponding bci => add a nop
485 // (was bug 5/14/1999 - gri)
486 __ nop();
488 // generate code for deopt handler
489 ciMethod* method = compilation()->method();
490 address handler_base = __ start_a_stub(deopt_handler_size);
491 if (handler_base == NULL) {
492 // not enough space left for the handler
493 bailout("deopt handler overflow");
494 return -1;
495 }
497 int offset = code_offset();
498 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
499 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
500 __ delayed()->nop();
501 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
502 __ end_a_stub();
504 return offset;
505 }
508 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
509 if (o == NULL) {
510 __ set(NULL_WORD, reg);
511 } else {
512 int oop_index = __ oop_recorder()->find_index(o);
513 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
514 RelocationHolder rspec = oop_Relocation::spec(oop_index);
515 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
516 }
517 }
520 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
521 // Allocate a new index in table to hold the object once it's been patched
522 int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
523 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
525 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
526 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
527 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
528 // NULL will be dynamically patched later and the patched value may be large. We must
529 // therefore generate the sethi/add as a placeholders
530 __ patchable_set(addrlit, reg);
532 patching_epilog(patch, lir_patch_normal, reg, info);
533 }
536 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
537 __ set_metadata_constant(o, reg);
538 }
540 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
541 // Allocate a new index in table to hold the klass once it's been patched
542 int index = __ oop_recorder()->allocate_metadata_index(NULL);
543 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
544 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index));
545 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
546 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
547 // NULL will be dynamically patched later and the patched value may be large. We must
548 // therefore generate the sethi/add as a placeholders
549 __ patchable_set(addrlit, reg);
551 patching_epilog(patch, lir_patch_normal, reg, info);
552 }
554 void LIR_Assembler::emit_op3(LIR_Op3* op) {
555 Register Rdividend = op->in_opr1()->as_register();
556 Register Rdivisor = noreg;
557 Register Rscratch = op->in_opr3()->as_register();
558 Register Rresult = op->result_opr()->as_register();
559 int divisor = -1;
561 if (op->in_opr2()->is_register()) {
562 Rdivisor = op->in_opr2()->as_register();
563 } else {
564 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
565 assert(Assembler::is_simm13(divisor), "can only handle simm13");
566 }
568 assert(Rdividend != Rscratch, "");
569 assert(Rdivisor != Rscratch, "");
570 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
572 if (Rdivisor == noreg && is_power_of_2(divisor)) {
573 // convert division by a power of two into some shifts and logical operations
574 if (op->code() == lir_idiv) {
575 if (divisor == 2) {
576 __ srl(Rdividend, 31, Rscratch);
577 } else {
578 __ sra(Rdividend, 31, Rscratch);
579 __ and3(Rscratch, divisor - 1, Rscratch);
580 }
581 __ add(Rdividend, Rscratch, Rscratch);
582 __ sra(Rscratch, log2_intptr(divisor), Rresult);
583 return;
584 } else {
585 if (divisor == 2) {
586 __ srl(Rdividend, 31, Rscratch);
587 } else {
588 __ sra(Rdividend, 31, Rscratch);
589 __ and3(Rscratch, divisor - 1,Rscratch);
590 }
591 __ add(Rdividend, Rscratch, Rscratch);
592 __ andn(Rscratch, divisor - 1,Rscratch);
593 __ sub(Rdividend, Rscratch, Rresult);
594 return;
595 }
596 }
598 __ sra(Rdividend, 31, Rscratch);
599 __ wry(Rscratch);
601 add_debug_info_for_div0_here(op->info());
603 if (Rdivisor != noreg) {
604 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
605 } else {
606 assert(Assembler::is_simm13(divisor), "can only handle simm13");
607 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
608 }
610 Label skip;
611 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
612 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
613 __ bind(skip);
615 if (op->code() == lir_irem) {
616 if (Rdivisor != noreg) {
617 __ smul(Rscratch, Rdivisor, Rscratch);
618 } else {
619 __ smul(Rscratch, divisor, Rscratch);
620 }
621 __ sub(Rdividend, Rscratch, Rresult);
622 }
623 }
626 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
627 #ifdef ASSERT
628 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
629 if (op->block() != NULL) _branch_target_blocks.append(op->block());
630 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
631 #endif
632 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
634 if (op->cond() == lir_cond_always) {
635 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
636 } else if (op->code() == lir_cond_float_branch) {
637 assert(op->ublock() != NULL, "must have unordered successor");
638 bool is_unordered = (op->ublock() == op->block());
639 Assembler::Condition acond;
640 switch (op->cond()) {
641 case lir_cond_equal: acond = Assembler::f_equal; break;
642 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
643 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
644 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
645 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
646 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
647 default : ShouldNotReachHere();
648 }
649 __ fb( acond, false, Assembler::pn, *(op->label()));
650 } else {
651 assert (op->code() == lir_branch, "just checking");
653 Assembler::Condition acond;
654 switch (op->cond()) {
655 case lir_cond_equal: acond = Assembler::equal; break;
656 case lir_cond_notEqual: acond = Assembler::notEqual; break;
657 case lir_cond_less: acond = Assembler::less; break;
658 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
659 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
660 case lir_cond_greater: acond = Assembler::greater; break;
661 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
662 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
663 default: ShouldNotReachHere();
664 };
666 // sparc has different condition codes for testing 32-bit
667 // vs. 64-bit values. We could always test xcc is we could
668 // guarantee that 32-bit loads always sign extended but that isn't
669 // true and since sign extension isn't free, it would impose a
670 // slight cost.
671 #ifdef _LP64
672 if (op->type() == T_INT) {
673 __ br(acond, false, Assembler::pn, *(op->label()));
674 } else
675 #endif
676 __ brx(acond, false, Assembler::pn, *(op->label()));
677 }
678 // The peephole pass fills the delay slot
679 }
682 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
683 Bytecodes::Code code = op->bytecode();
684 LIR_Opr dst = op->result_opr();
686 switch(code) {
687 case Bytecodes::_i2l: {
688 Register rlo = dst->as_register_lo();
689 Register rhi = dst->as_register_hi();
690 Register rval = op->in_opr()->as_register();
691 #ifdef _LP64
692 __ sra(rval, 0, rlo);
693 #else
694 __ mov(rval, rlo);
695 __ sra(rval, BitsPerInt-1, rhi);
696 #endif
697 break;
698 }
699 case Bytecodes::_i2d:
700 case Bytecodes::_i2f: {
701 bool is_double = (code == Bytecodes::_i2d);
702 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
703 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
704 FloatRegister rsrc = op->in_opr()->as_float_reg();
705 if (rsrc != rdst) {
706 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
707 }
708 __ fitof(w, rdst, rdst);
709 break;
710 }
711 case Bytecodes::_f2i:{
712 FloatRegister rsrc = op->in_opr()->as_float_reg();
713 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
714 Label L;
715 // result must be 0 if value is NaN; test by comparing value to itself
716 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
717 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
718 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
719 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
720 // move integer result from float register to int register
721 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
722 __ bind (L);
723 break;
724 }
725 case Bytecodes::_l2i: {
726 Register rlo = op->in_opr()->as_register_lo();
727 Register rhi = op->in_opr()->as_register_hi();
728 Register rdst = dst->as_register();
729 #ifdef _LP64
730 __ sra(rlo, 0, rdst);
731 #else
732 __ mov(rlo, rdst);
733 #endif
734 break;
735 }
736 case Bytecodes::_d2f:
737 case Bytecodes::_f2d: {
738 bool is_double = (code == Bytecodes::_f2d);
739 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
740 LIR_Opr val = op->in_opr();
741 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
742 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
743 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
744 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
745 __ ftof(vw, dw, rval, rdst);
746 break;
747 }
748 case Bytecodes::_i2s:
749 case Bytecodes::_i2b: {
750 Register rval = op->in_opr()->as_register();
751 Register rdst = dst->as_register();
752 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
753 __ sll (rval, shift, rdst);
754 __ sra (rdst, shift, rdst);
755 break;
756 }
757 case Bytecodes::_i2c: {
758 Register rval = op->in_opr()->as_register();
759 Register rdst = dst->as_register();
760 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
761 __ sll (rval, shift, rdst);
762 __ srl (rdst, shift, rdst);
763 break;
764 }
766 default: ShouldNotReachHere();
767 }
768 }
771 void LIR_Assembler::align_call(LIR_Code) {
772 // do nothing since all instructions are word aligned on sparc
773 }
776 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
777 __ call(op->addr(), rtype);
778 // The peephole pass fills the delay slot, add_call_info is done in
779 // LIR_Assembler::emit_delay.
780 }
783 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
784 __ ic_call(op->addr(), false);
785 // The peephole pass fills the delay slot, add_call_info is done in
786 // LIR_Assembler::emit_delay.
787 }
790 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
791 add_debug_info_for_null_check_here(op->info());
792 __ load_klass(O0, G3_scratch);
793 if (Assembler::is_simm13(op->vtable_offset())) {
794 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
795 } else {
796 // This will generate 2 instructions
797 __ set(op->vtable_offset(), G5_method);
798 // ld_ptr, set_hi, set
799 __ ld_ptr(G3_scratch, G5_method, G5_method);
800 }
801 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch);
802 __ callr(G3_scratch, G0);
803 // the peephole pass fills the delay slot
804 }
806 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
807 int store_offset;
808 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
809 assert(!unaligned, "can't handle this");
810 // for offsets larger than a simm13 we setup the offset in O7
811 __ set(offset, O7);
812 store_offset = store(from_reg, base, O7, type, wide);
813 } else {
814 if (type == T_ARRAY || type == T_OBJECT) {
815 __ verify_oop(from_reg->as_register());
816 }
817 store_offset = code_offset();
818 switch (type) {
819 case T_BOOLEAN: // fall through
820 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
821 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
822 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
823 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
824 case T_LONG :
825 #ifdef _LP64
826 if (unaligned || PatchALot) {
827 __ srax(from_reg->as_register_lo(), 32, O7);
828 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
829 __ stw(O7, base, offset + hi_word_offset_in_bytes);
830 } else {
831 __ stx(from_reg->as_register_lo(), base, offset);
832 }
833 #else
834 assert(Assembler::is_simm13(offset + 4), "must be");
835 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
836 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
837 #endif
838 break;
839 case T_ADDRESS:
840 case T_METADATA:
841 __ st_ptr(from_reg->as_register(), base, offset);
842 break;
843 case T_ARRAY : // fall through
844 case T_OBJECT:
845 {
846 if (UseCompressedOops && !wide) {
847 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
848 store_offset = code_offset();
849 __ stw(G3_scratch, base, offset);
850 } else {
851 __ st_ptr(from_reg->as_register(), base, offset);
852 }
853 break;
854 }
856 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
857 case T_DOUBLE:
858 {
859 FloatRegister reg = from_reg->as_double_reg();
860 // split unaligned stores
861 if (unaligned || PatchALot) {
862 assert(Assembler::is_simm13(offset + 4), "must be");
863 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
864 __ stf(FloatRegisterImpl::S, reg, base, offset);
865 } else {
866 __ stf(FloatRegisterImpl::D, reg, base, offset);
867 }
868 break;
869 }
870 default : ShouldNotReachHere();
871 }
872 }
873 return store_offset;
874 }
877 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
878 if (type == T_ARRAY || type == T_OBJECT) {
879 __ verify_oop(from_reg->as_register());
880 }
881 int store_offset = code_offset();
882 switch (type) {
883 case T_BOOLEAN: // fall through
884 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
885 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
886 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
887 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
888 case T_LONG :
889 #ifdef _LP64
890 __ stx(from_reg->as_register_lo(), base, disp);
891 #else
892 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
893 __ std(from_reg->as_register_hi(), base, disp);
894 #endif
895 break;
896 case T_ADDRESS:
897 __ st_ptr(from_reg->as_register(), base, disp);
898 break;
899 case T_ARRAY : // fall through
900 case T_OBJECT:
901 {
902 if (UseCompressedOops && !wide) {
903 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
904 store_offset = code_offset();
905 __ stw(G3_scratch, base, disp);
906 } else {
907 __ st_ptr(from_reg->as_register(), base, disp);
908 }
909 break;
910 }
911 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
912 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
913 default : ShouldNotReachHere();
914 }
915 return store_offset;
916 }
919 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
920 int load_offset;
921 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
922 assert(base != O7, "destroying register");
923 assert(!unaligned, "can't handle this");
924 // for offsets larger than a simm13 we setup the offset in O7
925 __ set(offset, O7);
926 load_offset = load(base, O7, to_reg, type, wide);
927 } else {
928 load_offset = code_offset();
929 switch(type) {
930 case T_BOOLEAN: // fall through
931 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
932 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
933 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
934 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
935 case T_LONG :
936 if (!unaligned) {
937 #ifdef _LP64
938 __ ldx(base, offset, to_reg->as_register_lo());
939 #else
940 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
941 "must be sequential");
942 __ ldd(base, offset, to_reg->as_register_hi());
943 #endif
944 } else {
945 #ifdef _LP64
946 assert(base != to_reg->as_register_lo(), "can't handle this");
947 assert(O7 != to_reg->as_register_lo(), "can't handle this");
948 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
949 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
950 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
951 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
952 #else
953 if (base == to_reg->as_register_lo()) {
954 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
955 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
956 } else {
957 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
958 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
959 }
960 #endif
961 }
962 break;
963 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
964 case T_ADDRESS:
965 #ifdef _LP64
966 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
967 __ lduw(base, offset, to_reg->as_register());
968 __ decode_klass_not_null(to_reg->as_register());
969 } else
970 #endif
971 {
972 __ ld_ptr(base, offset, to_reg->as_register());
973 }
974 break;
975 case T_ARRAY : // fall through
976 case T_OBJECT:
977 {
978 if (UseCompressedOops && !wide) {
979 __ lduw(base, offset, to_reg->as_register());
980 __ decode_heap_oop(to_reg->as_register());
981 } else {
982 __ ld_ptr(base, offset, to_reg->as_register());
983 }
984 break;
985 }
986 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
987 case T_DOUBLE:
988 {
989 FloatRegister reg = to_reg->as_double_reg();
990 // split unaligned loads
991 if (unaligned || PatchALot) {
992 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
993 __ ldf(FloatRegisterImpl::S, base, offset, reg);
994 } else {
995 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
996 }
997 break;
998 }
999 default : ShouldNotReachHere();
1000 }
1001 if (type == T_ARRAY || type == T_OBJECT) {
1002 __ verify_oop(to_reg->as_register());
1003 }
1004 }
1005 return load_offset;
1006 }
1009 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
1010 int load_offset = code_offset();
1011 switch(type) {
1012 case T_BOOLEAN: // fall through
1013 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
1014 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
1015 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
1016 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
1017 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
1018 case T_ARRAY : // fall through
1019 case T_OBJECT:
1020 {
1021 if (UseCompressedOops && !wide) {
1022 __ lduw(base, disp, to_reg->as_register());
1023 __ decode_heap_oop(to_reg->as_register());
1024 } else {
1025 __ ld_ptr(base, disp, to_reg->as_register());
1026 }
1027 break;
1028 }
1029 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
1030 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
1031 case T_LONG :
1032 #ifdef _LP64
1033 __ ldx(base, disp, to_reg->as_register_lo());
1034 #else
1035 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
1036 "must be sequential");
1037 __ ldd(base, disp, to_reg->as_register_hi());
1038 #endif
1039 break;
1040 default : ShouldNotReachHere();
1041 }
1042 if (type == T_ARRAY || type == T_OBJECT) {
1043 __ verify_oop(to_reg->as_register());
1044 }
1045 return load_offset;
1046 }
1048 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
1049 LIR_Const* c = src->as_constant_ptr();
1050 switch (c->type()) {
1051 case T_INT:
1052 case T_FLOAT: {
1053 Register src_reg = O7;
1054 int value = c->as_jint_bits();
1055 if (value == 0) {
1056 src_reg = G0;
1057 } else {
1058 __ set(value, O7);
1059 }
1060 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1061 __ stw(src_reg, addr.base(), addr.disp());
1062 break;
1063 }
1064 case T_ADDRESS: {
1065 Register src_reg = O7;
1066 int value = c->as_jint_bits();
1067 if (value == 0) {
1068 src_reg = G0;
1069 } else {
1070 __ set(value, O7);
1071 }
1072 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1073 __ st_ptr(src_reg, addr.base(), addr.disp());
1074 break;
1075 }
1076 case T_OBJECT: {
1077 Register src_reg = O7;
1078 jobject2reg(c->as_jobject(), src_reg);
1079 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1080 __ st_ptr(src_reg, addr.base(), addr.disp());
1081 break;
1082 }
1083 case T_LONG:
1084 case T_DOUBLE: {
1085 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
1087 Register tmp = O7;
1088 int value_lo = c->as_jint_lo_bits();
1089 if (value_lo == 0) {
1090 tmp = G0;
1091 } else {
1092 __ set(value_lo, O7);
1093 }
1094 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
1095 int value_hi = c->as_jint_hi_bits();
1096 if (value_hi == 0) {
1097 tmp = G0;
1098 } else {
1099 __ set(value_hi, O7);
1100 }
1101 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
1102 break;
1103 }
1104 default:
1105 Unimplemented();
1106 }
1107 }
1110 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
1111 LIR_Const* c = src->as_constant_ptr();
1112 LIR_Address* addr = dest->as_address_ptr();
1113 Register base = addr->base()->as_pointer_register();
1114 int offset = -1;
1116 switch (c->type()) {
1117 case T_INT:
1118 case T_FLOAT:
1119 case T_ADDRESS: {
1120 LIR_Opr tmp = FrameMap::O7_opr;
1121 int value = c->as_jint_bits();
1122 if (value == 0) {
1123 tmp = FrameMap::G0_opr;
1124 } else if (Assembler::is_simm13(value)) {
1125 __ set(value, O7);
1126 }
1127 if (addr->index()->is_valid()) {
1128 assert(addr->disp() == 0, "must be zero");
1129 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1130 } else {
1131 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1132 offset = store(tmp, base, addr->disp(), type, wide, false);
1133 }
1134 break;
1135 }
1136 case T_LONG:
1137 case T_DOUBLE: {
1138 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
1139 assert(Assembler::is_simm13(addr->disp()) &&
1140 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
1142 LIR_Opr tmp = FrameMap::O7_opr;
1143 int value_lo = c->as_jint_lo_bits();
1144 if (value_lo == 0) {
1145 tmp = FrameMap::G0_opr;
1146 } else {
1147 __ set(value_lo, O7);
1148 }
1149 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
1150 int value_hi = c->as_jint_hi_bits();
1151 if (value_hi == 0) {
1152 tmp = FrameMap::G0_opr;
1153 } else {
1154 __ set(value_hi, O7);
1155 }
1156 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
1157 break;
1158 }
1159 case T_OBJECT: {
1160 jobject obj = c->as_jobject();
1161 LIR_Opr tmp;
1162 if (obj == NULL) {
1163 tmp = FrameMap::G0_opr;
1164 } else {
1165 tmp = FrameMap::O7_opr;
1166 jobject2reg(c->as_jobject(), O7);
1167 }
1168 // handle either reg+reg or reg+disp address
1169 if (addr->index()->is_valid()) {
1170 assert(addr->disp() == 0, "must be zero");
1171 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1172 } else {
1173 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1174 offset = store(tmp, base, addr->disp(), type, wide, false);
1175 }
1177 break;
1178 }
1179 default:
1180 Unimplemented();
1181 }
1182 if (info != NULL) {
1183 assert(offset != -1, "offset should've been set");
1184 add_debug_info_for_null_check(offset, info);
1185 }
1186 }
1189 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1190 LIR_Const* c = src->as_constant_ptr();
1191 LIR_Opr to_reg = dest;
1193 switch (c->type()) {
1194 case T_INT:
1195 case T_ADDRESS:
1196 {
1197 jint con = c->as_jint();
1198 if (to_reg->is_single_cpu()) {
1199 assert(patch_code == lir_patch_none, "no patching handled here");
1200 __ set(con, to_reg->as_register());
1201 } else {
1202 ShouldNotReachHere();
1203 assert(to_reg->is_single_fpu(), "wrong register kind");
1205 __ set(con, O7);
1206 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
1207 __ st(O7, temp_slot);
1208 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
1209 }
1210 }
1211 break;
1213 case T_LONG:
1214 {
1215 jlong con = c->as_jlong();
1217 if (to_reg->is_double_cpu()) {
1218 #ifdef _LP64
1219 __ set(con, to_reg->as_register_lo());
1220 #else
1221 __ set(low(con), to_reg->as_register_lo());
1222 __ set(high(con), to_reg->as_register_hi());
1223 #endif
1224 #ifdef _LP64
1225 } else if (to_reg->is_single_cpu()) {
1226 __ set(con, to_reg->as_register());
1227 #endif
1228 } else {
1229 ShouldNotReachHere();
1230 assert(to_reg->is_double_fpu(), "wrong register kind");
1231 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
1232 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
1233 __ set(low(con), O7);
1234 __ st(O7, temp_slot_lo);
1235 __ set(high(con), O7);
1236 __ st(O7, temp_slot_hi);
1237 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
1238 }
1239 }
1240 break;
1242 case T_OBJECT:
1243 {
1244 if (patch_code == lir_patch_none) {
1245 jobject2reg(c->as_jobject(), to_reg->as_register());
1246 } else {
1247 jobject2reg_with_patching(to_reg->as_register(), info);
1248 }
1249 }
1250 break;
1252 case T_METADATA:
1253 {
1254 if (patch_code == lir_patch_none) {
1255 metadata2reg(c->as_metadata(), to_reg->as_register());
1256 } else {
1257 klass2reg_with_patching(to_reg->as_register(), info);
1258 }
1259 }
1260 break;
1262 case T_FLOAT:
1263 {
1264 address const_addr = __ float_constant(c->as_jfloat());
1265 if (const_addr == NULL) {
1266 bailout("const section overflow");
1267 break;
1268 }
1269 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1270 AddressLiteral const_addrlit(const_addr, rspec);
1271 if (to_reg->is_single_fpu()) {
1272 __ patchable_sethi(const_addrlit, O7);
1273 __ relocate(rspec);
1274 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
1276 } else {
1277 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1279 __ set(const_addrlit, O7);
1280 __ ld(O7, 0, to_reg->as_register());
1281 }
1282 }
1283 break;
1285 case T_DOUBLE:
1286 {
1287 address const_addr = __ double_constant(c->as_jdouble());
1288 if (const_addr == NULL) {
1289 bailout("const section overflow");
1290 break;
1291 }
1292 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1294 if (to_reg->is_double_fpu()) {
1295 AddressLiteral const_addrlit(const_addr, rspec);
1296 __ patchable_sethi(const_addrlit, O7);
1297 __ relocate(rspec);
1298 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
1299 } else {
1300 assert(to_reg->is_double_cpu(), "Must be a long register.");
1301 #ifdef _LP64
1302 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
1303 #else
1304 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
1305 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
1306 #endif
1307 }
1309 }
1310 break;
1312 default:
1313 ShouldNotReachHere();
1314 }
1315 }
1317 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1318 Register reg = addr->base()->as_register();
1319 LIR_Opr index = addr->index();
1320 if (index->is_illegal()) {
1321 return Address(reg, addr->disp());
1322 } else {
1323 assert (addr->disp() == 0, "unsupported address mode");
1324 return Address(reg, index->as_pointer_register());
1325 }
1326 }
1329 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1330 switch (type) {
1331 case T_INT:
1332 case T_FLOAT: {
1333 Register tmp = O7;
1334 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1335 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1336 __ lduw(from.base(), from.disp(), tmp);
1337 __ stw(tmp, to.base(), to.disp());
1338 break;
1339 }
1340 case T_OBJECT: {
1341 Register tmp = O7;
1342 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1343 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1344 __ ld_ptr(from.base(), from.disp(), tmp);
1345 __ st_ptr(tmp, to.base(), to.disp());
1346 break;
1347 }
1348 case T_LONG:
1349 case T_DOUBLE: {
1350 Register tmp = O7;
1351 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1352 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
1353 __ lduw(from.base(), from.disp(), tmp);
1354 __ stw(tmp, to.base(), to.disp());
1355 __ lduw(from.base(), from.disp() + 4, tmp);
1356 __ stw(tmp, to.base(), to.disp() + 4);
1357 break;
1358 }
1360 default:
1361 ShouldNotReachHere();
1362 }
1363 }
1366 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1367 Address base = as_Address(addr);
1368 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
1369 }
1372 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1373 Address base = as_Address(addr);
1374 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
1375 }
1378 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1379 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1381 assert(type != T_METADATA, "load of metadata ptr not supported");
1382 LIR_Address* addr = src_opr->as_address_ptr();
1383 LIR_Opr to_reg = dest;
1385 Register src = addr->base()->as_pointer_register();
1386 Register disp_reg = noreg;
1387 int disp_value = addr->disp();
1388 bool needs_patching = (patch_code != lir_patch_none);
1390 if (addr->base()->type() == T_OBJECT) {
1391 __ verify_oop(src);
1392 }
1394 PatchingStub* patch = NULL;
1395 if (needs_patching) {
1396 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1397 assert(!to_reg->is_double_cpu() ||
1398 patch_code == lir_patch_none ||
1399 patch_code == lir_patch_normal, "patching doesn't match register");
1400 }
1402 if (addr->index()->is_illegal()) {
1403 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1404 if (needs_patching) {
1405 __ patchable_set(0, O7);
1406 } else {
1407 __ set(disp_value, O7);
1408 }
1409 disp_reg = O7;
1410 }
1411 } else if (unaligned || PatchALot) {
1412 __ add(src, addr->index()->as_register(), O7);
1413 src = O7;
1414 } else {
1415 disp_reg = addr->index()->as_pointer_register();
1416 assert(disp_value == 0, "can't handle 3 operand addresses");
1417 }
1419 // remember the offset of the load. The patching_epilog must be done
1420 // before the call to add_debug_info, otherwise the PcDescs don't get
1421 // entered in increasing order.
1422 int offset = code_offset();
1424 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1425 if (disp_reg == noreg) {
1426 offset = load(src, disp_value, to_reg, type, wide, unaligned);
1427 } else {
1428 assert(!unaligned, "can't handle this");
1429 offset = load(src, disp_reg, to_reg, type, wide);
1430 }
1432 if (patch != NULL) {
1433 patching_epilog(patch, patch_code, src, info);
1434 }
1435 if (info != NULL) add_debug_info_for_null_check(offset, info);
1436 }
1439 void LIR_Assembler::prefetchr(LIR_Opr src) {
1440 LIR_Address* addr = src->as_address_ptr();
1441 Address from_addr = as_Address(addr);
1443 if (VM_Version::has_v9()) {
1444 __ prefetch(from_addr, Assembler::severalReads);
1445 }
1446 }
1449 void LIR_Assembler::prefetchw(LIR_Opr src) {
1450 LIR_Address* addr = src->as_address_ptr();
1451 Address from_addr = as_Address(addr);
1453 if (VM_Version::has_v9()) {
1454 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
1455 }
1456 }
1459 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1460 Address addr;
1461 if (src->is_single_word()) {
1462 addr = frame_map()->address_for_slot(src->single_stack_ix());
1463 } else if (src->is_double_word()) {
1464 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1465 }
1467 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1468 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
1469 }
1472 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1473 Address addr;
1474 if (dest->is_single_word()) {
1475 addr = frame_map()->address_for_slot(dest->single_stack_ix());
1476 } else if (dest->is_double_word()) {
1477 addr = frame_map()->address_for_slot(dest->double_stack_ix());
1478 }
1479 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1480 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
1481 }
1484 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1485 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1486 if (from_reg->is_double_fpu()) {
1487 // double to double moves
1488 assert(to_reg->is_double_fpu(), "should match");
1489 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1490 } else {
1491 // float to float moves
1492 assert(to_reg->is_single_fpu(), "should match");
1493 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1494 }
1495 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1496 if (from_reg->is_double_cpu()) {
1497 #ifdef _LP64
1498 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1499 #else
1500 assert(to_reg->is_double_cpu() &&
1501 from_reg->as_register_hi() != to_reg->as_register_lo() &&
1502 from_reg->as_register_lo() != to_reg->as_register_hi(),
1503 "should both be long and not overlap");
1504 // long to long moves
1505 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
1506 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
1507 #endif
1508 #ifdef _LP64
1509 } else if (to_reg->is_double_cpu()) {
1510 // int to int moves
1511 __ mov(from_reg->as_register(), to_reg->as_register_lo());
1512 #endif
1513 } else {
1514 // int to int moves
1515 __ mov(from_reg->as_register(), to_reg->as_register());
1516 }
1517 } else {
1518 ShouldNotReachHere();
1519 }
1520 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1521 __ verify_oop(to_reg->as_register());
1522 }
1523 }
1526 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1527 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1528 bool wide, bool unaligned) {
1529 assert(type != T_METADATA, "store of metadata ptr not supported");
1530 LIR_Address* addr = dest->as_address_ptr();
1532 Register src = addr->base()->as_pointer_register();
1533 Register disp_reg = noreg;
1534 int disp_value = addr->disp();
1535 bool needs_patching = (patch_code != lir_patch_none);
1537 if (addr->base()->is_oop_register()) {
1538 __ verify_oop(src);
1539 }
1541 PatchingStub* patch = NULL;
1542 if (needs_patching) {
1543 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1544 assert(!from_reg->is_double_cpu() ||
1545 patch_code == lir_patch_none ||
1546 patch_code == lir_patch_normal, "patching doesn't match register");
1547 }
1549 if (addr->index()->is_illegal()) {
1550 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1551 if (needs_patching) {
1552 __ patchable_set(0, O7);
1553 } else {
1554 __ set(disp_value, O7);
1555 }
1556 disp_reg = O7;
1557 }
1558 } else if (unaligned || PatchALot) {
1559 __ add(src, addr->index()->as_register(), O7);
1560 src = O7;
1561 } else {
1562 disp_reg = addr->index()->as_pointer_register();
1563 assert(disp_value == 0, "can't handle 3 operand addresses");
1564 }
1566 // remember the offset of the store. The patching_epilog must be done
1567 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1568 // entered in increasing order.
1569 int offset;
1571 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1572 if (disp_reg == noreg) {
1573 offset = store(from_reg, src, disp_value, type, wide, unaligned);
1574 } else {
1575 assert(!unaligned, "can't handle this");
1576 offset = store(from_reg, src, disp_reg, type, wide);
1577 }
1579 if (patch != NULL) {
1580 patching_epilog(patch, patch_code, src, info);
1581 }
1583 if (info != NULL) add_debug_info_for_null_check(offset, info);
1584 }
1587 void LIR_Assembler::return_op(LIR_Opr result) {
1588 // the poll may need a register so just pick one that isn't the return register
1589 #if defined(TIERED) && !defined(_LP64)
1590 if (result->type_field() == LIR_OprDesc::long_type) {
1591 // Must move the result to G1
1592 // Must leave proper result in O0,O1 and G1 (TIERED only)
1593 __ sllx(I0, 32, G1); // Shift bits into high G1
1594 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
1595 __ or3 (I1, G1, G1); // OR 64 bits into G1
1596 #ifdef ASSERT
1597 // mangle it so any problems will show up
1598 __ set(0xdeadbeef, I0);
1599 __ set(0xdeadbeef, I1);
1600 #endif
1601 }
1602 #endif // TIERED
1603 __ set((intptr_t)os::get_polling_page(), L0);
1604 __ relocate(relocInfo::poll_return_type);
1605 __ ld_ptr(L0, 0, G0);
1606 __ ret();
1607 __ delayed()->restore();
1608 }
1611 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1612 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
1613 if (info != NULL) {
1614 add_debug_info_for_branch(info);
1615 } else {
1616 __ relocate(relocInfo::poll_type);
1617 }
1619 int offset = __ offset();
1620 __ ld_ptr(tmp->as_register(), 0, G0);
1622 return offset;
1623 }
1626 void LIR_Assembler::emit_static_call_stub() {
1627 address call_pc = __ pc();
1628 address stub = __ start_a_stub(call_stub_size);
1629 if (stub == NULL) {
1630 bailout("static call stub overflow");
1631 return;
1632 }
1634 int start = __ offset();
1635 __ relocate(static_stub_Relocation::spec(call_pc));
1637 __ set_metadata(NULL, G5);
1638 // must be set to -1 at code generation time
1639 AddressLiteral addrlit(-1);
1640 __ jump_to(addrlit, G3);
1641 __ delayed()->nop();
1643 assert(__ offset() - start <= call_stub_size, "stub too big");
1644 __ end_a_stub();
1645 }
1648 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1649 if (opr1->is_single_fpu()) {
1650 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
1651 } else if (opr1->is_double_fpu()) {
1652 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
1653 } else if (opr1->is_single_cpu()) {
1654 if (opr2->is_constant()) {
1655 switch (opr2->as_constant_ptr()->type()) {
1656 case T_INT:
1657 { jint con = opr2->as_constant_ptr()->as_jint();
1658 if (Assembler::is_simm13(con)) {
1659 __ cmp(opr1->as_register(), con);
1660 } else {
1661 __ set(con, O7);
1662 __ cmp(opr1->as_register(), O7);
1663 }
1664 }
1665 break;
1667 case T_OBJECT:
1668 // there are only equal/notequal comparisions on objects
1669 { jobject con = opr2->as_constant_ptr()->as_jobject();
1670 if (con == NULL) {
1671 __ cmp(opr1->as_register(), 0);
1672 } else {
1673 jobject2reg(con, O7);
1674 __ cmp(opr1->as_register(), O7);
1675 }
1676 }
1677 break;
1679 default:
1680 ShouldNotReachHere();
1681 break;
1682 }
1683 } else {
1684 if (opr2->is_address()) {
1685 LIR_Address * addr = opr2->as_address_ptr();
1686 BasicType type = addr->type();
1687 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1688 else __ ld(as_Address(addr), O7);
1689 __ cmp(opr1->as_register(), O7);
1690 } else {
1691 __ cmp(opr1->as_register(), opr2->as_register());
1692 }
1693 }
1694 } else if (opr1->is_double_cpu()) {
1695 Register xlo = opr1->as_register_lo();
1696 Register xhi = opr1->as_register_hi();
1697 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1698 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
1699 #ifdef _LP64
1700 __ orcc(xhi, G0, G0);
1701 #else
1702 __ orcc(xhi, xlo, G0);
1703 #endif
1704 } else if (opr2->is_register()) {
1705 Register ylo = opr2->as_register_lo();
1706 Register yhi = opr2->as_register_hi();
1707 #ifdef _LP64
1708 __ cmp(xlo, ylo);
1709 #else
1710 __ subcc(xlo, ylo, xlo);
1711 __ subccc(xhi, yhi, xhi);
1712 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1713 __ orcc(xhi, xlo, G0);
1714 }
1715 #endif
1716 } else {
1717 ShouldNotReachHere();
1718 }
1719 } else if (opr1->is_address()) {
1720 LIR_Address * addr = opr1->as_address_ptr();
1721 BasicType type = addr->type();
1722 assert (opr2->is_constant(), "Checking");
1723 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1724 else __ ld(as_Address(addr), O7);
1725 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
1726 } else {
1727 ShouldNotReachHere();
1728 }
1729 }
1732 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1733 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1734 bool is_unordered_less = (code == lir_ucmp_fd2i);
1735 if (left->is_single_fpu()) {
1736 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1737 } else if (left->is_double_fpu()) {
1738 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1739 } else {
1740 ShouldNotReachHere();
1741 }
1742 } else if (code == lir_cmp_l2i) {
1743 #ifdef _LP64
1744 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
1745 #else
1746 __ lcmp(left->as_register_hi(), left->as_register_lo(),
1747 right->as_register_hi(), right->as_register_lo(),
1748 dst->as_register());
1749 #endif
1750 } else {
1751 ShouldNotReachHere();
1752 }
1753 }
1756 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1757 Assembler::Condition acond;
1758 switch (condition) {
1759 case lir_cond_equal: acond = Assembler::equal; break;
1760 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1761 case lir_cond_less: acond = Assembler::less; break;
1762 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1763 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
1764 case lir_cond_greater: acond = Assembler::greater; break;
1765 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
1766 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
1767 default: ShouldNotReachHere();
1768 };
1770 if (opr1->is_constant() && opr1->type() == T_INT) {
1771 Register dest = result->as_register();
1772 // load up first part of constant before branch
1773 // and do the rest in the delay slot.
1774 if (!Assembler::is_simm13(opr1->as_jint())) {
1775 __ sethi(opr1->as_jint(), dest);
1776 }
1777 } else if (opr1->is_constant()) {
1778 const2reg(opr1, result, lir_patch_none, NULL);
1779 } else if (opr1->is_register()) {
1780 reg2reg(opr1, result);
1781 } else if (opr1->is_stack()) {
1782 stack2reg(opr1, result, result->type());
1783 } else {
1784 ShouldNotReachHere();
1785 }
1786 Label skip;
1787 #ifdef _LP64
1788 if (type == T_INT) {
1789 __ br(acond, false, Assembler::pt, skip);
1790 } else
1791 #endif
1792 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
1793 if (opr1->is_constant() && opr1->type() == T_INT) {
1794 Register dest = result->as_register();
1795 if (Assembler::is_simm13(opr1->as_jint())) {
1796 __ delayed()->or3(G0, opr1->as_jint(), dest);
1797 } else {
1798 // the sethi has been done above, so just put in the low 10 bits
1799 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
1800 }
1801 } else {
1802 // can't do anything useful in the delay slot
1803 __ delayed()->nop();
1804 }
1805 if (opr2->is_constant()) {
1806 const2reg(opr2, result, lir_patch_none, NULL);
1807 } else if (opr2->is_register()) {
1808 reg2reg(opr2, result);
1809 } else if (opr2->is_stack()) {
1810 stack2reg(opr2, result, result->type());
1811 } else {
1812 ShouldNotReachHere();
1813 }
1814 __ bind(skip);
1815 }
1818 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1819 assert(info == NULL, "unused on this code path");
1820 assert(left->is_register(), "wrong items state");
1821 assert(dest->is_register(), "wrong items state");
1823 if (right->is_register()) {
1824 if (dest->is_float_kind()) {
1826 FloatRegister lreg, rreg, res;
1827 FloatRegisterImpl::Width w;
1828 if (right->is_single_fpu()) {
1829 w = FloatRegisterImpl::S;
1830 lreg = left->as_float_reg();
1831 rreg = right->as_float_reg();
1832 res = dest->as_float_reg();
1833 } else {
1834 w = FloatRegisterImpl::D;
1835 lreg = left->as_double_reg();
1836 rreg = right->as_double_reg();
1837 res = dest->as_double_reg();
1838 }
1840 switch (code) {
1841 case lir_add: __ fadd(w, lreg, rreg, res); break;
1842 case lir_sub: __ fsub(w, lreg, rreg, res); break;
1843 case lir_mul: // fall through
1844 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
1845 case lir_div: // fall through
1846 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
1847 default: ShouldNotReachHere();
1848 }
1850 } else if (dest->is_double_cpu()) {
1851 #ifdef _LP64
1852 Register dst_lo = dest->as_register_lo();
1853 Register op1_lo = left->as_pointer_register();
1854 Register op2_lo = right->as_pointer_register();
1856 switch (code) {
1857 case lir_add:
1858 __ add(op1_lo, op2_lo, dst_lo);
1859 break;
1861 case lir_sub:
1862 __ sub(op1_lo, op2_lo, dst_lo);
1863 break;
1865 default: ShouldNotReachHere();
1866 }
1867 #else
1868 Register op1_lo = left->as_register_lo();
1869 Register op1_hi = left->as_register_hi();
1870 Register op2_lo = right->as_register_lo();
1871 Register op2_hi = right->as_register_hi();
1872 Register dst_lo = dest->as_register_lo();
1873 Register dst_hi = dest->as_register_hi();
1875 switch (code) {
1876 case lir_add:
1877 __ addcc(op1_lo, op2_lo, dst_lo);
1878 __ addc (op1_hi, op2_hi, dst_hi);
1879 break;
1881 case lir_sub:
1882 __ subcc(op1_lo, op2_lo, dst_lo);
1883 __ subc (op1_hi, op2_hi, dst_hi);
1884 break;
1886 default: ShouldNotReachHere();
1887 }
1888 #endif
1889 } else {
1890 assert (right->is_single_cpu(), "Just Checking");
1892 Register lreg = left->as_register();
1893 Register res = dest->as_register();
1894 Register rreg = right->as_register();
1895 switch (code) {
1896 case lir_add: __ add (lreg, rreg, res); break;
1897 case lir_sub: __ sub (lreg, rreg, res); break;
1898 case lir_mul: __ mulx (lreg, rreg, res); break;
1899 default: ShouldNotReachHere();
1900 }
1901 }
1902 } else {
1903 assert (right->is_constant(), "must be constant");
1905 if (dest->is_single_cpu()) {
1906 Register lreg = left->as_register();
1907 Register res = dest->as_register();
1908 int simm13 = right->as_constant_ptr()->as_jint();
1910 switch (code) {
1911 case lir_add: __ add (lreg, simm13, res); break;
1912 case lir_sub: __ sub (lreg, simm13, res); break;
1913 case lir_mul: __ mulx (lreg, simm13, res); break;
1914 default: ShouldNotReachHere();
1915 }
1916 } else {
1917 Register lreg = left->as_pointer_register();
1918 Register res = dest->as_register_lo();
1919 long con = right->as_constant_ptr()->as_jlong();
1920 assert(Assembler::is_simm13(con), "must be simm13");
1922 switch (code) {
1923 case lir_add: __ add (lreg, (int)con, res); break;
1924 case lir_sub: __ sub (lreg, (int)con, res); break;
1925 case lir_mul: __ mulx (lreg, (int)con, res); break;
1926 default: ShouldNotReachHere();
1927 }
1928 }
1929 }
1930 }
1933 void LIR_Assembler::fpop() {
1934 // do nothing
1935 }
1938 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1939 switch (code) {
1940 case lir_sin:
1941 case lir_tan:
1942 case lir_cos: {
1943 assert(thread->is_valid(), "preserve the thread object for performance reasons");
1944 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
1945 break;
1946 }
1947 case lir_sqrt: {
1948 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1949 FloatRegister src_reg = value->as_double_reg();
1950 FloatRegister dst_reg = dest->as_double_reg();
1951 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
1952 break;
1953 }
1954 case lir_abs: {
1955 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1956 FloatRegister src_reg = value->as_double_reg();
1957 FloatRegister dst_reg = dest->as_double_reg();
1958 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
1959 break;
1960 }
1961 default: {
1962 ShouldNotReachHere();
1963 break;
1964 }
1965 }
1966 }
1969 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1970 if (right->is_constant()) {
1971 if (dest->is_single_cpu()) {
1972 int simm13 = right->as_constant_ptr()->as_jint();
1973 switch (code) {
1974 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
1975 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
1976 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
1977 default: ShouldNotReachHere();
1978 }
1979 } else {
1980 long c = right->as_constant_ptr()->as_jlong();
1981 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
1982 int simm13 = (int)c;
1983 switch (code) {
1984 case lir_logic_and:
1985 #ifndef _LP64
1986 __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
1987 #endif
1988 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
1989 break;
1991 case lir_logic_or:
1992 #ifndef _LP64
1993 __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
1994 #endif
1995 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
1996 break;
1998 case lir_logic_xor:
1999 #ifndef _LP64
2000 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
2001 #endif
2002 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
2003 break;
2005 default: ShouldNotReachHere();
2006 }
2007 }
2008 } else {
2009 assert(right->is_register(), "right should be in register");
2011 if (dest->is_single_cpu()) {
2012 switch (code) {
2013 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
2014 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
2015 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
2016 default: ShouldNotReachHere();
2017 }
2018 } else {
2019 #ifdef _LP64
2020 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
2021 left->as_register_lo();
2022 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
2023 right->as_register_lo();
2025 switch (code) {
2026 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
2027 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
2028 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
2029 default: ShouldNotReachHere();
2030 }
2031 #else
2032 switch (code) {
2033 case lir_logic_and:
2034 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2035 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2036 break;
2038 case lir_logic_or:
2039 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2040 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2041 break;
2043 case lir_logic_xor:
2044 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2045 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2046 break;
2048 default: ShouldNotReachHere();
2049 }
2050 #endif
2051 }
2052 }
2053 }
2056 int LIR_Assembler::shift_amount(BasicType t) {
2057 int elem_size = type2aelembytes(t);
2058 switch (elem_size) {
2059 case 1 : return 0;
2060 case 2 : return 1;
2061 case 4 : return 2;
2062 case 8 : return 3;
2063 }
2064 ShouldNotReachHere();
2065 return -1;
2066 }
2069 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2070 assert(exceptionOop->as_register() == Oexception, "should match");
2071 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
2073 info->add_register_oop(exceptionOop);
2075 // reuse the debug info from the safepoint poll for the throw op itself
2076 address pc_for_athrow = __ pc();
2077 int pc_for_athrow_offset = __ offset();
2078 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
2079 __ set(pc_for_athrow, Oissuing_pc, rspec);
2080 add_call_info(pc_for_athrow_offset, info); // for exception handler
2082 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
2083 __ delayed()->nop();
2084 }
2087 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2088 assert(exceptionOop->as_register() == Oexception, "should match");
2090 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
2091 __ delayed()->nop();
2092 }
2094 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2095 Register src = op->src()->as_register();
2096 Register dst = op->dst()->as_register();
2097 Register src_pos = op->src_pos()->as_register();
2098 Register dst_pos = op->dst_pos()->as_register();
2099 Register length = op->length()->as_register();
2100 Register tmp = op->tmp()->as_register();
2101 Register tmp2 = O7;
2103 int flags = op->flags();
2104 ciArrayKlass* default_type = op->expected_type();
2105 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2106 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2108 #ifdef _LP64
2109 // higher 32bits must be null
2110 __ sra(dst_pos, 0, dst_pos);
2111 __ sra(src_pos, 0, src_pos);
2112 __ sra(length, 0, length);
2113 #endif
2115 // set up the arraycopy stub information
2116 ArrayCopyStub* stub = op->stub();
2118 // always do stub if no type information is available. it's ok if
2119 // the known type isn't loaded since the code sanity checks
2120 // in debug mode and the type isn't required when we know the exact type
2121 // also check that the type is an array type.
2122 if (op->expected_type() == NULL) {
2123 __ mov(src, O0);
2124 __ mov(src_pos, O1);
2125 __ mov(dst, O2);
2126 __ mov(dst_pos, O3);
2127 __ mov(length, O4);
2128 address copyfunc_addr = StubRoutines::generic_arraycopy();
2130 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
2131 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
2132 } else {
2133 #ifndef PRODUCT
2134 if (PrintC1Statistics) {
2135 address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
2136 __ inc_counter(counter, G1, G3);
2137 }
2138 #endif
2139 __ call_VM_leaf(tmp, copyfunc_addr);
2140 }
2142 if (copyfunc_addr != NULL) {
2143 __ xor3(O0, -1, tmp);
2144 __ sub(length, tmp, length);
2145 __ add(src_pos, tmp, src_pos);
2146 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
2147 __ delayed()->add(dst_pos, tmp, dst_pos);
2148 } else {
2149 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
2150 __ delayed()->nop();
2151 }
2152 __ bind(*stub->continuation());
2153 return;
2154 }
2156 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
2158 // make sure src and dst are non-null and load array length
2159 if (flags & LIR_OpArrayCopy::src_null_check) {
2160 __ tst(src);
2161 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2162 __ delayed()->nop();
2163 }
2165 if (flags & LIR_OpArrayCopy::dst_null_check) {
2166 __ tst(dst);
2167 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2168 __ delayed()->nop();
2169 }
2171 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2172 // test src_pos register
2173 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());
2174 __ delayed()->nop();
2175 }
2177 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2178 // test dst_pos register
2179 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry());
2180 __ delayed()->nop();
2181 }
2183 if (flags & LIR_OpArrayCopy::length_positive_check) {
2184 // make sure length isn't negative
2185 __ cmp_zero_and_br(Assembler::less, length, *stub->entry());
2186 __ delayed()->nop();
2187 }
2189 if (flags & LIR_OpArrayCopy::src_range_check) {
2190 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
2191 __ add(length, src_pos, tmp);
2192 __ cmp(tmp2, tmp);
2193 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2194 __ delayed()->nop();
2195 }
2197 if (flags & LIR_OpArrayCopy::dst_range_check) {
2198 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
2199 __ add(length, dst_pos, tmp);
2200 __ cmp(tmp2, tmp);
2201 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2202 __ delayed()->nop();
2203 }
2205 int shift = shift_amount(basic_type);
2207 if (flags & LIR_OpArrayCopy::type_check) {
2208 // We don't know the array types are compatible
2209 if (basic_type != T_OBJECT) {
2210 // Simple test for basic type arrays
2211 if (UseCompressedClassPointers) {
2212 // We don't need decode because we just need to compare
2213 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
2214 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2215 __ cmp(tmp, tmp2);
2216 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2217 } else {
2218 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
2219 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2220 __ cmp(tmp, tmp2);
2221 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2222 }
2223 __ delayed()->nop();
2224 } else {
2225 // For object arrays, if src is a sub class of dst then we can
2226 // safely do the copy.
2227 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2229 Label cont, slow;
2230 assert_different_registers(tmp, tmp2, G3, G1);
2232 __ load_klass(src, G3);
2233 __ load_klass(dst, G1);
2235 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
2237 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2238 __ delayed()->nop();
2240 __ cmp(G3, 0);
2241 if (copyfunc_addr != NULL) { // use stub if available
2242 // src is not a sub class of dst so we have to do a
2243 // per-element check.
2244 __ br(Assembler::notEqual, false, Assembler::pt, cont);
2245 __ delayed()->nop();
2247 __ bind(slow);
2249 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2250 if ((flags & mask) != mask) {
2251 // Check that at least both of them object arrays.
2252 assert(flags & mask, "one of the two should be known to be an object array");
2254 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2255 __ load_klass(src, tmp);
2256 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2257 __ load_klass(dst, tmp);
2258 }
2259 int lh_offset = in_bytes(Klass::layout_helper_offset());
2261 __ lduw(tmp, lh_offset, tmp2);
2263 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2264 __ set(objArray_lh, tmp);
2265 __ cmp(tmp, tmp2);
2266 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2267 __ delayed()->nop();
2268 }
2270 Register src_ptr = O0;
2271 Register dst_ptr = O1;
2272 Register len = O2;
2273 Register chk_off = O3;
2274 Register super_k = O4;
2276 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2277 if (shift == 0) {
2278 __ add(src_ptr, src_pos, src_ptr);
2279 } else {
2280 __ sll(src_pos, shift, tmp);
2281 __ add(src_ptr, tmp, src_ptr);
2282 }
2284 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2285 if (shift == 0) {
2286 __ add(dst_ptr, dst_pos, dst_ptr);
2287 } else {
2288 __ sll(dst_pos, shift, tmp);
2289 __ add(dst_ptr, tmp, dst_ptr);
2290 }
2291 __ mov(length, len);
2292 __ load_klass(dst, tmp);
2294 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2295 __ ld_ptr(tmp, ek_offset, super_k);
2297 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2298 __ lduw(super_k, sco_offset, chk_off);
2300 __ call_VM_leaf(tmp, copyfunc_addr);
2302 #ifndef PRODUCT
2303 if (PrintC1Statistics) {
2304 Label failed;
2305 __ br_notnull_short(O0, Assembler::pn, failed);
2306 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
2307 __ bind(failed);
2308 }
2309 #endif
2311 __ br_null(O0, false, Assembler::pt, *stub->continuation());
2312 __ delayed()->xor3(O0, -1, tmp);
2314 #ifndef PRODUCT
2315 if (PrintC1Statistics) {
2316 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3);
2317 }
2318 #endif
2320 __ sub(length, tmp, length);
2321 __ add(src_pos, tmp, src_pos);
2322 __ br(Assembler::always, false, Assembler::pt, *stub->entry());
2323 __ delayed()->add(dst_pos, tmp, dst_pos);
2325 __ bind(cont);
2326 } else {
2327 __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2328 __ delayed()->nop();
2329 __ bind(cont);
2330 }
2331 }
2332 }
2334 #ifdef ASSERT
2335 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2336 // Sanity check the known type with the incoming class. For the
2337 // primitive case the types must match exactly with src.klass and
2338 // dst.klass each exactly matching the default type. For the
2339 // object array case, if no type check is needed then either the
2340 // dst type is exactly the expected type and the src type is a
2341 // subtype which we can't check or src is the same array as dst
2342 // but not necessarily exactly of type default_type.
2343 Label known_ok, halt;
2344 metadata2reg(op->expected_type()->constant_encoding(), tmp);
2345 if (UseCompressedClassPointers) {
2346 // tmp holds the default type. It currently comes uncompressed after the
2347 // load of a constant, so encode it.
2348 __ encode_klass_not_null(tmp);
2349 // load the raw value of the dst klass, since we will be comparing
2350 // uncompressed values directly.
2351 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2352 if (basic_type != T_OBJECT) {
2353 __ cmp(tmp, tmp2);
2354 __ br(Assembler::notEqual, false, Assembler::pn, halt);
2355 // load the raw value of the src klass.
2356 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
2357 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2358 } else {
2359 __ cmp(tmp, tmp2);
2360 __ br(Assembler::equal, false, Assembler::pn, known_ok);
2361 __ delayed()->cmp(src, dst);
2362 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2363 __ delayed()->nop();
2364 }
2365 } else {
2366 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2367 if (basic_type != T_OBJECT) {
2368 __ cmp(tmp, tmp2);
2369 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
2370 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
2371 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2372 } else {
2373 __ cmp(tmp, tmp2);
2374 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2375 __ delayed()->cmp(src, dst);
2376 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2377 __ delayed()->nop();
2378 }
2379 }
2380 __ bind(halt);
2381 __ stop("incorrect type information in arraycopy");
2382 __ bind(known_ok);
2383 }
2384 #endif
2386 #ifndef PRODUCT
2387 if (PrintC1Statistics) {
2388 address counter = Runtime1::arraycopy_count_address(basic_type);
2389 __ inc_counter(counter, G1, G3);
2390 }
2391 #endif
2393 Register src_ptr = O0;
2394 Register dst_ptr = O1;
2395 Register len = O2;
2397 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2398 if (shift == 0) {
2399 __ add(src_ptr, src_pos, src_ptr);
2400 } else {
2401 __ sll(src_pos, shift, tmp);
2402 __ add(src_ptr, tmp, src_ptr);
2403 }
2405 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2406 if (shift == 0) {
2407 __ add(dst_ptr, dst_pos, dst_ptr);
2408 } else {
2409 __ sll(dst_pos, shift, tmp);
2410 __ add(dst_ptr, tmp, dst_ptr);
2411 }
2413 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2414 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2415 const char *name;
2416 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2418 // arraycopy stubs takes a length in number of elements, so don't scale it.
2419 __ mov(length, len);
2420 __ call_VM_leaf(tmp, entry);
2422 __ bind(*stub->continuation());
2423 }
2426 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2427 if (dest->is_single_cpu()) {
2428 #ifdef _LP64
2429 if (left->type() == T_OBJECT) {
2430 switch (code) {
2431 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
2432 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
2433 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2434 default: ShouldNotReachHere();
2435 }
2436 } else
2437 #endif
2438 switch (code) {
2439 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
2440 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
2441 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2442 default: ShouldNotReachHere();
2443 }
2444 } else {
2445 #ifdef _LP64
2446 switch (code) {
2447 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2448 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2449 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2450 default: ShouldNotReachHere();
2451 }
2452 #else
2453 switch (code) {
2454 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2455 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2456 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2457 default: ShouldNotReachHere();
2458 }
2459 #endif
2460 }
2461 }
2464 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2465 #ifdef _LP64
2466 if (left->type() == T_OBJECT) {
2467 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
2468 Register l = left->as_register();
2469 Register d = dest->as_register_lo();
2470 switch (code) {
2471 case lir_shl: __ sllx (l, count, d); break;
2472 case lir_shr: __ srax (l, count, d); break;
2473 case lir_ushr: __ srlx (l, count, d); break;
2474 default: ShouldNotReachHere();
2475 }
2476 return;
2477 }
2478 #endif
2480 if (dest->is_single_cpu()) {
2481 count = count & 0x1F; // Java spec
2482 switch (code) {
2483 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
2484 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
2485 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
2486 default: ShouldNotReachHere();
2487 }
2488 } else if (dest->is_double_cpu()) {
2489 count = count & 63; // Java spec
2490 switch (code) {
2491 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2492 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2493 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2494 default: ShouldNotReachHere();
2495 }
2496 } else {
2497 ShouldNotReachHere();
2498 }
2499 }
2502 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2503 assert(op->tmp1()->as_register() == G1 &&
2504 op->tmp2()->as_register() == G3 &&
2505 op->tmp3()->as_register() == G4 &&
2506 op->obj()->as_register() == O0 &&
2507 op->klass()->as_register() == G5, "must be");
2508 if (op->init_check()) {
2509 __ ldub(op->klass()->as_register(),
2510 in_bytes(InstanceKlass::init_state_offset()),
2511 op->tmp1()->as_register());
2512 add_debug_info_for_null_check_here(op->stub()->info());
2513 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2514 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
2515 __ delayed()->nop();
2516 }
2517 __ allocate_object(op->obj()->as_register(),
2518 op->tmp1()->as_register(),
2519 op->tmp2()->as_register(),
2520 op->tmp3()->as_register(),
2521 op->header_size(),
2522 op->object_size(),
2523 op->klass()->as_register(),
2524 *op->stub()->entry());
2525 __ bind(*op->stub()->continuation());
2526 __ verify_oop(op->obj()->as_register());
2527 }
2530 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2531 assert(op->tmp1()->as_register() == G1 &&
2532 op->tmp2()->as_register() == G3 &&
2533 op->tmp3()->as_register() == G4 &&
2534 op->tmp4()->as_register() == O1 &&
2535 op->klass()->as_register() == G5, "must be");
2537 LP64_ONLY( __ signx(op->len()->as_register()); )
2538 if (UseSlowPath ||
2539 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2540 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2541 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2542 __ delayed()->nop();
2543 } else {
2544 __ allocate_array(op->obj()->as_register(),
2545 op->len()->as_register(),
2546 op->tmp1()->as_register(),
2547 op->tmp2()->as_register(),
2548 op->tmp3()->as_register(),
2549 arrayOopDesc::header_size(op->type()),
2550 type2aelembytes(op->type()),
2551 op->klass()->as_register(),
2552 *op->stub()->entry());
2553 }
2554 __ bind(*op->stub()->continuation());
2555 }
2558 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2559 ciMethodData *md, ciProfileData *data,
2560 Register recv, Register tmp1, Label* update_done) {
2561 uint i;
2562 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2563 Label next_test;
2564 // See if the receiver is receiver[n].
2565 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2566 mdo_offset_bias);
2567 __ ld_ptr(receiver_addr, tmp1);
2568 __ verify_oop(tmp1);
2569 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
2570 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2571 mdo_offset_bias);
2572 __ ld_ptr(data_addr, tmp1);
2573 __ add(tmp1, DataLayout::counter_increment, tmp1);
2574 __ st_ptr(tmp1, data_addr);
2575 __ ba(*update_done);
2576 __ delayed()->nop();
2577 __ bind(next_test);
2578 }
2580 // Didn't find receiver; find next empty slot and fill it in
2581 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2582 Label next_test;
2583 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2584 mdo_offset_bias);
2585 __ ld_ptr(recv_addr, tmp1);
2586 __ br_notnull_short(tmp1, Assembler::pt, next_test);
2587 __ st_ptr(recv, recv_addr);
2588 __ set(DataLayout::counter_increment, tmp1);
2589 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2590 mdo_offset_bias);
2591 __ ba(*update_done);
2592 __ delayed()->nop();
2593 __ bind(next_test);
2594 }
2595 }
2598 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2599 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2600 md = method->method_data_or_null();
2601 assert(md != NULL, "Sanity");
2602 data = md->bci_to_data(bci);
2603 assert(data != NULL, "need data for checkcast");
2604 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2605 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2606 // The offset is large so bias the mdo by the base of the slot so
2607 // that the ld can use simm13s to reference the slots of the data
2608 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2609 }
2610 }
2612 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2613 // we always need a stub for the failure case.
2614 CodeStub* stub = op->stub();
2615 Register obj = op->object()->as_register();
2616 Register k_RInfo = op->tmp1()->as_register();
2617 Register klass_RInfo = op->tmp2()->as_register();
2618 Register dst = op->result_opr()->as_register();
2619 Register Rtmp1 = op->tmp3()->as_register();
2620 ciKlass* k = op->klass();
2623 if (obj == k_RInfo) {
2624 k_RInfo = klass_RInfo;
2625 klass_RInfo = obj;
2626 }
2628 ciMethodData* md;
2629 ciProfileData* data;
2630 int mdo_offset_bias = 0;
2631 if (op->should_profile()) {
2632 ciMethod* method = op->profiled_method();
2633 assert(method != NULL, "Should have method");
2634 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2636 Label not_null;
2637 __ br_notnull_short(obj, Assembler::pn, not_null);
2638 Register mdo = k_RInfo;
2639 Register data_val = Rtmp1;
2640 metadata2reg(md->constant_encoding(), mdo);
2641 if (mdo_offset_bias > 0) {
2642 __ set(mdo_offset_bias, data_val);
2643 __ add(mdo, data_val, mdo);
2644 }
2645 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2646 __ ldub(flags_addr, data_val);
2647 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2648 __ stb(data_val, flags_addr);
2649 __ ba(*obj_is_null);
2650 __ delayed()->nop();
2651 __ bind(not_null);
2652 } else {
2653 __ br_null(obj, false, Assembler::pn, *obj_is_null);
2654 __ delayed()->nop();
2655 }
2657 Label profile_cast_failure, profile_cast_success;
2658 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
2659 Label *success_target = op->should_profile() ? &profile_cast_success : success;
2661 // patching may screw with our temporaries on sparc,
2662 // so let's do it before loading the class
2663 if (k->is_loaded()) {
2664 metadata2reg(k->constant_encoding(), k_RInfo);
2665 } else {
2666 klass2reg_with_patching(k_RInfo, op->info_for_patch());
2667 }
2668 assert(obj != k_RInfo, "must be different");
2670 // get object class
2671 // not a safepoint as obj null check happens earlier
2672 __ load_klass(obj, klass_RInfo);
2673 if (op->fast_check()) {
2674 assert_different_registers(klass_RInfo, k_RInfo);
2675 __ cmp(k_RInfo, klass_RInfo);
2676 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
2677 __ delayed()->nop();
2678 } else {
2679 bool need_slow_path = true;
2680 if (k->is_loaded()) {
2681 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
2682 need_slow_path = false;
2683 // perform the fast part of the checking logic
2684 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
2685 (need_slow_path ? success_target : NULL),
2686 failure_target, NULL,
2687 RegisterOrConstant(k->super_check_offset()));
2688 } else {
2689 // perform the fast part of the checking logic
2690 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
2691 failure_target, NULL);
2692 }
2693 if (need_slow_path) {
2694 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2695 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2696 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2697 __ delayed()->nop();
2698 __ cmp(G3, 0);
2699 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2700 __ delayed()->nop();
2701 // Fall through to success case
2702 }
2703 }
2705 if (op->should_profile()) {
2706 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2707 assert_different_registers(obj, mdo, recv, tmp1);
2708 __ bind(profile_cast_success);
2709 metadata2reg(md->constant_encoding(), mdo);
2710 if (mdo_offset_bias > 0) {
2711 __ set(mdo_offset_bias, tmp1);
2712 __ add(mdo, tmp1, mdo);
2713 }
2714 __ load_klass(obj, recv);
2715 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
2716 // Jump over the failure case
2717 __ ba(*success);
2718 __ delayed()->nop();
2719 // Cast failure case
2720 __ bind(profile_cast_failure);
2721 metadata2reg(md->constant_encoding(), mdo);
2722 if (mdo_offset_bias > 0) {
2723 __ set(mdo_offset_bias, tmp1);
2724 __ add(mdo, tmp1, mdo);
2725 }
2726 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2727 __ ld_ptr(data_addr, tmp1);
2728 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2729 __ st_ptr(tmp1, data_addr);
2730 __ ba(*failure);
2731 __ delayed()->nop();
2732 }
2733 __ ba(*success);
2734 __ delayed()->nop();
2735 }
2737 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2738 LIR_Code code = op->code();
2739 if (code == lir_store_check) {
2740 Register value = op->object()->as_register();
2741 Register array = op->array()->as_register();
2742 Register k_RInfo = op->tmp1()->as_register();
2743 Register klass_RInfo = op->tmp2()->as_register();
2744 Register Rtmp1 = op->tmp3()->as_register();
2746 __ verify_oop(value);
2747 CodeStub* stub = op->stub();
2748 // check if it needs to be profiled
2749 ciMethodData* md;
2750 ciProfileData* data;
2751 int mdo_offset_bias = 0;
2752 if (op->should_profile()) {
2753 ciMethod* method = op->profiled_method();
2754 assert(method != NULL, "Should have method");
2755 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2756 }
2757 Label profile_cast_success, profile_cast_failure, done;
2758 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
2759 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
2761 if (op->should_profile()) {
2762 Label not_null;
2763 __ br_notnull_short(value, Assembler::pn, not_null);
2764 Register mdo = k_RInfo;
2765 Register data_val = Rtmp1;
2766 metadata2reg(md->constant_encoding(), mdo);
2767 if (mdo_offset_bias > 0) {
2768 __ set(mdo_offset_bias, data_val);
2769 __ add(mdo, data_val, mdo);
2770 }
2771 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2772 __ ldub(flags_addr, data_val);
2773 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2774 __ stb(data_val, flags_addr);
2775 __ ba_short(done);
2776 __ bind(not_null);
2777 } else {
2778 __ br_null_short(value, Assembler::pn, done);
2779 }
2780 add_debug_info_for_null_check_here(op->info_for_exception());
2781 __ load_klass(array, k_RInfo);
2782 __ load_klass(value, klass_RInfo);
2784 // get instance klass
2785 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo);
2786 // perform the fast part of the checking logic
2787 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
2789 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2790 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2791 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2792 __ delayed()->nop();
2793 __ cmp(G3, 0);
2794 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2795 __ delayed()->nop();
2796 // fall through to the success case
2798 if (op->should_profile()) {
2799 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2800 assert_different_registers(value, mdo, recv, tmp1);
2801 __ bind(profile_cast_success);
2802 metadata2reg(md->constant_encoding(), mdo);
2803 if (mdo_offset_bias > 0) {
2804 __ set(mdo_offset_bias, tmp1);
2805 __ add(mdo, tmp1, mdo);
2806 }
2807 __ load_klass(value, recv);
2808 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2809 __ ba_short(done);
2810 // Cast failure case
2811 __ bind(profile_cast_failure);
2812 metadata2reg(md->constant_encoding(), mdo);
2813 if (mdo_offset_bias > 0) {
2814 __ set(mdo_offset_bias, tmp1);
2815 __ add(mdo, tmp1, mdo);
2816 }
2817 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2818 __ ld_ptr(data_addr, tmp1);
2819 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2820 __ st_ptr(tmp1, data_addr);
2821 __ ba(*stub->entry());
2822 __ delayed()->nop();
2823 }
2824 __ bind(done);
2825 } else if (code == lir_checkcast) {
2826 Register obj = op->object()->as_register();
2827 Register dst = op->result_opr()->as_register();
2828 Label success;
2829 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
2830 __ bind(success);
2831 __ mov(obj, dst);
2832 } else if (code == lir_instanceof) {
2833 Register obj = op->object()->as_register();
2834 Register dst = op->result_opr()->as_register();
2835 Label success, failure, done;
2836 emit_typecheck_helper(op, &success, &failure, &failure);
2837 __ bind(failure);
2838 __ set(0, dst);
2839 __ ba_short(done);
2840 __ bind(success);
2841 __ set(1, dst);
2842 __ bind(done);
2843 } else {
2844 ShouldNotReachHere();
2845 }
2847 }
2850 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2851 if (op->code() == lir_cas_long) {
2852 assert(VM_Version::supports_cx8(), "wrong machine");
2853 Register addr = op->addr()->as_pointer_register();
2854 Register cmp_value_lo = op->cmp_value()->as_register_lo();
2855 Register cmp_value_hi = op->cmp_value()->as_register_hi();
2856 Register new_value_lo = op->new_value()->as_register_lo();
2857 Register new_value_hi = op->new_value()->as_register_hi();
2858 Register t1 = op->tmp1()->as_register();
2859 Register t2 = op->tmp2()->as_register();
2860 #ifdef _LP64
2861 __ mov(cmp_value_lo, t1);
2862 __ mov(new_value_lo, t2);
2863 // perform the compare and swap operation
2864 __ casx(addr, t1, t2);
2865 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2866 // overwritten with the original value in "addr" and will be equal to t1.
2867 __ cmp(t1, t2);
2868 #else
2869 // move high and low halves of long values into single registers
2870 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
2871 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
2872 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
2873 __ sllx(new_value_hi, 32, t2);
2874 __ srl(new_value_lo, 0, new_value_lo);
2875 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
2876 // perform the compare and swap operation
2877 __ casx(addr, t1, t2);
2878 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2879 // overwritten with the original value in "addr" and will be equal to t1.
2880 // Produce icc flag for 32bit.
2881 __ sub(t1, t2, t2);
2882 __ srlx(t2, 32, t1);
2883 __ orcc(t2, t1, G0);
2884 #endif
2885 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2886 Register addr = op->addr()->as_pointer_register();
2887 Register cmp_value = op->cmp_value()->as_register();
2888 Register new_value = op->new_value()->as_register();
2889 Register t1 = op->tmp1()->as_register();
2890 Register t2 = op->tmp2()->as_register();
2891 __ mov(cmp_value, t1);
2892 __ mov(new_value, t2);
2893 if (op->code() == lir_cas_obj) {
2894 if (UseCompressedOops) {
2895 __ encode_heap_oop(t1);
2896 __ encode_heap_oop(t2);
2897 __ cas(addr, t1, t2);
2898 } else {
2899 __ cas_ptr(addr, t1, t2);
2900 }
2901 } else {
2902 __ cas(addr, t1, t2);
2903 }
2904 __ cmp(t1, t2);
2905 } else {
2906 Unimplemented();
2907 }
2908 }
2910 void LIR_Assembler::set_24bit_FPU() {
2911 Unimplemented();
2912 }
2915 void LIR_Assembler::reset_FPU() {
2916 Unimplemented();
2917 }
2920 void LIR_Assembler::breakpoint() {
2921 __ breakpoint_trap();
2922 }
2925 void LIR_Assembler::push(LIR_Opr opr) {
2926 Unimplemented();
2927 }
2930 void LIR_Assembler::pop(LIR_Opr opr) {
2931 Unimplemented();
2932 }
2935 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2936 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2937 Register dst = dst_opr->as_register();
2938 Register reg = mon_addr.base();
2939 int offset = mon_addr.disp();
2940 // compute pointer to BasicLock
2941 if (mon_addr.is_simm13()) {
2942 __ add(reg, offset, dst);
2943 } else {
2944 __ set(offset, dst);
2945 __ add(dst, reg, dst);
2946 }
2947 }
2949 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2950 fatal("CRC32 intrinsic is not implemented on this platform");
2951 }
2953 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2954 Register obj = op->obj_opr()->as_register();
2955 Register hdr = op->hdr_opr()->as_register();
2956 Register lock = op->lock_opr()->as_register();
2958 // obj may not be an oop
2959 if (op->code() == lir_lock) {
2960 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2961 if (UseFastLocking) {
2962 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2963 // add debug info for NullPointerException only if one is possible
2964 if (op->info() != NULL) {
2965 add_debug_info_for_null_check_here(op->info());
2966 }
2967 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2968 } else {
2969 // always do slow locking
2970 // note: the slow locking code could be inlined here, however if we use
2971 // slow locking, speed doesn't matter anyway and this solution is
2972 // simpler and requires less duplicated code - additionally, the
2973 // slow locking code is the same in either case which simplifies
2974 // debugging
2975 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2976 __ delayed()->nop();
2977 }
2978 } else {
2979 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2980 if (UseFastLocking) {
2981 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2982 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2983 } else {
2984 // always do slow unlocking
2985 // note: the slow unlocking code could be inlined here, however if we use
2986 // slow unlocking, speed doesn't matter anyway and this solution is
2987 // simpler and requires less duplicated code - additionally, the
2988 // slow unlocking code is the same in either case which simplifies
2989 // debugging
2990 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2991 __ delayed()->nop();
2992 }
2993 }
2994 __ bind(*op->stub()->continuation());
2995 }
2998 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2999 ciMethod* method = op->profiled_method();
3000 int bci = op->profiled_bci();
3001 ciMethod* callee = op->profiled_callee();
3003 // Update counter for all call types
3004 ciMethodData* md = method->method_data_or_null();
3005 assert(md != NULL, "Sanity");
3006 ciProfileData* data = md->bci_to_data(bci);
3007 assert(data->is_CounterData(), "need CounterData for calls");
3008 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3009 Register mdo = op->mdo()->as_register();
3010 #ifdef _LP64
3011 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
3012 Register tmp1 = op->tmp1()->as_register_lo();
3013 #else
3014 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
3015 Register tmp1 = op->tmp1()->as_register();
3016 #endif
3017 metadata2reg(md->constant_encoding(), mdo);
3018 int mdo_offset_bias = 0;
3019 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
3020 data->size_in_bytes())) {
3021 // The offset is large so bias the mdo by the base of the slot so
3022 // that the ld can use simm13s to reference the slots of the data
3023 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
3024 __ set(mdo_offset_bias, O7);
3025 __ add(mdo, O7, mdo);
3026 }
3028 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
3029 Bytecodes::Code bc = method->java_code_at_bci(bci);
3030 const bool callee_is_static = callee->is_loaded() && callee->is_static();
3031 // Perform additional virtual call profiling for invokevirtual and
3032 // invokeinterface bytecodes
3033 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3034 !callee_is_static && // required for optimized MH invokes
3035 C1ProfileVirtualCalls) {
3036 assert(op->recv()->is_single_cpu(), "recv must be allocated");
3037 Register recv = op->recv()->as_register();
3038 assert_different_registers(mdo, tmp1, recv);
3039 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3040 ciKlass* known_klass = op->known_holder();
3041 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3042 // We know the type that will be seen at this call site; we can
3043 // statically update the MethodData* rather than needing to do
3044 // dynamic tests on the receiver type
3046 // NOTE: we should probably put a lock around this search to
3047 // avoid collisions by concurrent compilations
3048 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3049 uint i;
3050 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3051 ciKlass* receiver = vc_data->receiver(i);
3052 if (known_klass->equals(receiver)) {
3053 Address data_addr(mdo, md->byte_offset_of_slot(data,
3054 VirtualCallData::receiver_count_offset(i)) -
3055 mdo_offset_bias);
3056 __ ld_ptr(data_addr, tmp1);
3057 __ add(tmp1, DataLayout::counter_increment, tmp1);
3058 __ st_ptr(tmp1, data_addr);
3059 return;
3060 }
3061 }
3063 // Receiver type not found in profile data; select an empty slot
3065 // Note that this is less efficient than it should be because it
3066 // always does a write to the receiver part of the
3067 // VirtualCallData rather than just the first time
3068 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3069 ciKlass* receiver = vc_data->receiver(i);
3070 if (receiver == NULL) {
3071 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
3072 mdo_offset_bias);
3073 metadata2reg(known_klass->constant_encoding(), tmp1);
3074 __ st_ptr(tmp1, recv_addr);
3075 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
3076 mdo_offset_bias);
3077 __ ld_ptr(data_addr, tmp1);
3078 __ add(tmp1, DataLayout::counter_increment, tmp1);
3079 __ st_ptr(tmp1, data_addr);
3080 return;
3081 }
3082 }
3083 } else {
3084 __ load_klass(recv, recv);
3085 Label update_done;
3086 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
3087 // Receiver did not match any saved receiver and there is no empty row for it.
3088 // Increment total counter to indicate polymorphic case.
3089 __ ld_ptr(counter_addr, tmp1);
3090 __ add(tmp1, DataLayout::counter_increment, tmp1);
3091 __ st_ptr(tmp1, counter_addr);
3093 __ bind(update_done);
3094 }
3095 } else {
3096 // Static call
3097 __ ld_ptr(counter_addr, tmp1);
3098 __ add(tmp1, DataLayout::counter_increment, tmp1);
3099 __ st_ptr(tmp1, counter_addr);
3100 }
3101 }
3103 void LIR_Assembler::align_backward_branch_target() {
3104 __ align(OptoLoopAlignment);
3105 }
3108 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
3109 // make sure we are expecting a delay
3110 // this has the side effect of clearing the delay state
3111 // so we can use _masm instead of _masm->delayed() to do the
3112 // code generation.
3113 __ delayed();
3115 // make sure we only emit one instruction
3116 int offset = code_offset();
3117 op->delay_op()->emit_code(this);
3118 #ifdef ASSERT
3119 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
3120 op->delay_op()->print();
3121 }
3122 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
3123 "only one instruction can go in a delay slot");
3124 #endif
3126 // we may also be emitting the call info for the instruction
3127 // which we are the delay slot of.
3128 CodeEmitInfo* call_info = op->call_info();
3129 if (call_info) {
3130 add_call_info(code_offset(), call_info);
3131 }
3133 if (VerifyStackAtCalls) {
3134 _masm->sub(FP, SP, O7);
3135 _masm->cmp(O7, initial_frame_size_in_bytes());
3136 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
3137 }
3138 }
3141 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3142 assert(left->is_register(), "can only handle registers");
3144 if (left->is_single_cpu()) {
3145 __ neg(left->as_register(), dest->as_register());
3146 } else if (left->is_single_fpu()) {
3147 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
3148 } else if (left->is_double_fpu()) {
3149 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
3150 } else {
3151 assert (left->is_double_cpu(), "Must be a long");
3152 Register Rlow = left->as_register_lo();
3153 Register Rhi = left->as_register_hi();
3154 #ifdef _LP64
3155 __ sub(G0, Rlow, dest->as_register_lo());
3156 #else
3157 __ subcc(G0, Rlow, dest->as_register_lo());
3158 __ subc (G0, Rhi, dest->as_register_hi());
3159 #endif
3160 }
3161 }
3164 void LIR_Assembler::fxch(int i) {
3165 Unimplemented();
3166 }
3168 void LIR_Assembler::fld(int i) {
3169 Unimplemented();
3170 }
3172 void LIR_Assembler::ffree(int i) {
3173 Unimplemented();
3174 }
3176 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
3177 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3179 // if tmp is invalid, then the function being called doesn't destroy the thread
3180 if (tmp->is_valid()) {
3181 __ save_thread(tmp->as_register());
3182 }
3183 __ call(dest, relocInfo::runtime_call_type);
3184 __ delayed()->nop();
3185 if (info != NULL) {
3186 add_call_info_here(info);
3187 }
3188 if (tmp->is_valid()) {
3189 __ restore_thread(tmp->as_register());
3190 }
3192 #ifdef ASSERT
3193 __ verify_thread();
3194 #endif // ASSERT
3195 }
3198 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3199 #ifdef _LP64
3200 ShouldNotReachHere();
3201 #endif
3203 NEEDS_CLEANUP;
3204 if (type == T_LONG) {
3205 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
3207 // (extended to allow indexed as well as constant displaced for JSR-166)
3208 Register idx = noreg; // contains either constant offset or index
3210 int disp = mem_addr->disp();
3211 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3212 if (!Assembler::is_simm13(disp)) {
3213 idx = O7;
3214 __ set(disp, idx);
3215 }
3216 } else {
3217 assert(disp == 0, "not both indexed and disp");
3218 idx = mem_addr->index()->as_register();
3219 }
3221 int null_check_offset = -1;
3223 Register base = mem_addr->base()->as_register();
3224 if (src->is_register() && dest->is_address()) {
3225 // G4 is high half, G5 is low half
3226 // clear the top bits of G5, and scale up G4
3227 __ srl (src->as_register_lo(), 0, G5);
3228 __ sllx(src->as_register_hi(), 32, G4);
3229 // combine the two halves into the 64 bits of G4
3230 __ or3(G4, G5, G4);
3231 null_check_offset = __ offset();
3232 if (idx == noreg) {
3233 __ stx(G4, base, disp);
3234 } else {
3235 __ stx(G4, base, idx);
3236 }
3237 } else if (src->is_address() && dest->is_register()) {
3238 null_check_offset = __ offset();
3239 if (idx == noreg) {
3240 __ ldx(base, disp, G5);
3241 } else {
3242 __ ldx(base, idx, G5);
3243 }
3244 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3245 __ mov (G5, dest->as_register_lo()); // copy low half into lo
3246 } else {
3247 Unimplemented();
3248 }
3249 if (info != NULL) {
3250 add_debug_info_for_null_check(null_check_offset, info);
3251 }
3253 } else {
3254 // use normal move for all other volatiles since they don't need
3255 // special handling to remain atomic.
3256 move_op(src, dest, type, lir_patch_none, info, false, false, false);
3257 }
3258 }
3260 void LIR_Assembler::membar() {
3261 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3262 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3263 }
3265 void LIR_Assembler::membar_acquire() {
3266 // no-op on TSO
3267 }
3269 void LIR_Assembler::membar_release() {
3270 // no-op on TSO
3271 }
3273 void LIR_Assembler::membar_loadload() {
3274 // no-op
3275 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3276 }
3278 void LIR_Assembler::membar_storestore() {
3279 // no-op
3280 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3281 }
3283 void LIR_Assembler::membar_loadstore() {
3284 // no-op
3285 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3286 }
3288 void LIR_Assembler::membar_storeload() {
3289 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3290 }
3293 // Pack two sequential registers containing 32 bit values
3294 // into a single 64 bit register.
3295 // src and src->successor() are packed into dst
3296 // src and dst may be the same register.
3297 // Note: src is destroyed
3298 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
3299 Register rs = src->as_register();
3300 Register rd = dst->as_register_lo();
3301 __ sllx(rs, 32, rs);
3302 __ srl(rs->successor(), 0, rs->successor());
3303 __ or3(rs, rs->successor(), rd);
3304 }
3306 // Unpack a 64 bit value in a register into
3307 // two sequential registers.
3308 // src is unpacked into dst and dst->successor()
3309 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
3310 Register rs = src->as_register_lo();
3311 Register rd = dst->as_register_hi();
3312 assert_different_registers(rs, rd, rd->successor());
3313 __ srlx(rs, 32, rd);
3314 __ srl (rs, 0, rd->successor());
3315 }
3318 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
3319 LIR_Address* addr = addr_opr->as_address_ptr();
3320 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
3322 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
3323 }
3326 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3327 assert(result_reg->is_register(), "check");
3328 __ mov(G2_thread, result_reg->as_register());
3329 }
3331 #ifdef ASSERT
3332 // emit run-time assertion
3333 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3334 assert(op->code() == lir_assert, "must be");
3336 if (op->in_opr1()->is_valid()) {
3337 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3338 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3339 } else {
3340 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3341 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3342 }
3344 Label ok;
3345 if (op->condition() != lir_cond_always) {
3346 Assembler::Condition acond;
3347 switch (op->condition()) {
3348 case lir_cond_equal: acond = Assembler::equal; break;
3349 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3350 case lir_cond_less: acond = Assembler::less; break;
3351 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3352 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
3353 case lir_cond_greater: acond = Assembler::greater; break;
3354 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
3355 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
3356 default: ShouldNotReachHere();
3357 };
3358 __ br(acond, false, Assembler::pt, ok);
3359 __ delayed()->nop();
3360 }
3361 if (op->halt()) {
3362 const char* str = __ code_string(op->msg());
3363 __ stop(str);
3364 } else {
3365 breakpoint();
3366 }
3367 __ bind(ok);
3368 }
3369 #endif
3371 void LIR_Assembler::peephole(LIR_List* lir) {
3372 LIR_OpList* inst = lir->instructions_list();
3373 for (int i = 0; i < inst->length(); i++) {
3374 LIR_Op* op = inst->at(i);
3375 switch (op->code()) {
3376 case lir_cond_float_branch:
3377 case lir_branch: {
3378 LIR_OpBranch* branch = op->as_OpBranch();
3379 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
3380 LIR_Op* delay_op = NULL;
3381 // we'd like to be able to pull following instructions into
3382 // this slot but we don't know enough to do it safely yet so
3383 // only optimize block to block control flow.
3384 if (LIRFillDelaySlots && branch->block()) {
3385 LIR_Op* prev = inst->at(i - 1);
3386 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
3387 // swap previous instruction into delay slot
3388 inst->at_put(i - 1, op);
3389 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3390 #ifndef PRODUCT
3391 if (LIRTracePeephole) {
3392 tty->print_cr("delayed");
3393 inst->at(i - 1)->print();
3394 inst->at(i)->print();
3395 tty->cr();
3396 }
3397 #endif
3398 continue;
3399 }
3400 }
3402 if (!delay_op) {
3403 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
3404 }
3405 inst->insert_before(i + 1, delay_op);
3406 break;
3407 }
3408 case lir_static_call:
3409 case lir_virtual_call:
3410 case lir_icvirtual_call:
3411 case lir_optvirtual_call:
3412 case lir_dynamic_call: {
3413 LIR_Op* prev = inst->at(i - 1);
3414 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
3415 (op->code() != lir_virtual_call ||
3416 !prev->result_opr()->is_single_cpu() ||
3417 prev->result_opr()->as_register() != O0) &&
3418 LIR_Assembler::is_single_instruction(prev)) {
3419 // Only moves without info can be put into the delay slot.
3420 // Also don't allow the setup of the receiver in the delay
3421 // slot for vtable calls.
3422 inst->at_put(i - 1, op);
3423 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3424 #ifndef PRODUCT
3425 if (LIRTracePeephole) {
3426 tty->print_cr("delayed");
3427 inst->at(i - 1)->print();
3428 inst->at(i)->print();
3429 tty->cr();
3430 }
3431 #endif
3432 } else {
3433 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
3434 inst->insert_before(i + 1, delay_op);
3435 i++;
3436 }
3438 #if defined(TIERED) && !defined(_LP64)
3439 // fixup the return value from G1 to O0/O1 for long returns.
3440 // It's done here instead of in LIRGenerator because there's
3441 // such a mismatch between the single reg and double reg
3442 // calling convention.
3443 LIR_OpJavaCall* callop = op->as_OpJavaCall();
3444 if (callop->result_opr() == FrameMap::out_long_opr) {
3445 LIR_OpJavaCall* call;
3446 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
3447 for (int a = 0; a < arguments->length(); a++) {
3448 arguments[a] = callop->arguments()[a];
3449 }
3450 if (op->code() == lir_virtual_call) {
3451 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3452 callop->vtable_offset(), arguments, callop->info());
3453 } else {
3454 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3455 callop->addr(), arguments, callop->info());
3456 }
3457 inst->at_put(i - 1, call);
3458 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
3459 T_LONG, lir_patch_none, NULL));
3460 }
3461 #endif
3462 break;
3463 }
3464 }
3465 }
3466 }
3468 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3469 LIR_Address* addr = src->as_address_ptr();
3471 assert(data == dest, "swap uses only 2 operands");
3472 assert (code == lir_xchg, "no xadd on sparc");
3474 if (data->type() == T_INT) {
3475 __ swap(as_Address(addr), data->as_register());
3476 } else if (data->is_oop()) {
3477 Register obj = data->as_register();
3478 Register narrow = tmp->as_register();
3479 #ifdef _LP64
3480 assert(UseCompressedOops, "swap is 32bit only");
3481 __ encode_heap_oop(obj, narrow);
3482 __ swap(as_Address(addr), narrow);
3483 __ decode_heap_oop(narrow, obj);
3484 #else
3485 __ swap(as_Address(addr), obj);
3486 #endif
3487 } else {
3488 ShouldNotReachHere();
3489 }
3490 }
3492 #undef __