Thu, 24 May 2018 18:41:44 +0800
Merge
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "c1/c1_Compilation.hpp"
33 #include "c1/c1_Instruction.hpp"
34 #include "c1/c1_InstructionPrinter.hpp"
35 #include "c1/c1_LIRAssembler.hpp"
36 #include "c1/c1_MacroAssembler.hpp"
37 #include "c1/c1_ValueStack.hpp"
38 #include "ci/ciInstance.hpp"
39 #ifdef TARGET_ARCH_x86
40 # include "nativeInst_x86.hpp"
41 # include "vmreg_x86.inline.hpp"
42 #endif
43 #ifdef TARGET_ARCH_mips
44 # include "nativeInst_mips.hpp"
45 # include "vmreg_mips.inline.hpp"
46 #endif
47 #ifdef TARGET_ARCH_sparc
48 # include "nativeInst_sparc.hpp"
49 # include "vmreg_sparc.inline.hpp"
50 #endif
51 #ifdef TARGET_ARCH_zero
52 # include "nativeInst_zero.hpp"
53 # include "vmreg_zero.inline.hpp"
54 #endif
55 #ifdef TARGET_ARCH_arm
56 # include "nativeInst_arm.hpp"
57 # include "vmreg_arm.inline.hpp"
58 #endif
59 #ifdef TARGET_ARCH_ppc
60 # include "nativeInst_ppc.hpp"
61 # include "vmreg_ppc.inline.hpp"
62 #endif
65 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
66 // we must have enough patching space so that call can be inserted
67 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
68 _masm->nop();
69 }
70 patch->install(_masm, patch_code, obj, info);
71 append_code_stub(patch);
73 #ifdef ASSERT
74 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
75 if (patch->id() == PatchingStub::access_field_id) {
76 switch (code) {
77 case Bytecodes::_putstatic:
78 case Bytecodes::_getstatic:
79 case Bytecodes::_putfield:
80 case Bytecodes::_getfield:
81 break;
82 default:
83 ShouldNotReachHere();
84 }
85 } else if (patch->id() == PatchingStub::load_klass_id) {
86 switch (code) {
87 case Bytecodes::_new:
88 case Bytecodes::_anewarray:
89 case Bytecodes::_multianewarray:
90 case Bytecodes::_instanceof:
91 case Bytecodes::_checkcast:
92 break;
93 default:
94 ShouldNotReachHere();
95 }
96 } else if (patch->id() == PatchingStub::load_mirror_id) {
97 switch (code) {
98 case Bytecodes::_putstatic:
99 case Bytecodes::_getstatic:
100 case Bytecodes::_ldc:
101 case Bytecodes::_ldc_w:
102 break;
103 default:
104 ShouldNotReachHere();
105 }
106 } else if (patch->id() == PatchingStub::load_appendix_id) {
107 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
108 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
109 } else {
110 ShouldNotReachHere();
111 }
112 #endif
113 }
115 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
116 IRScope* scope = info->scope();
117 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
118 if (Bytecodes::has_optional_appendix(bc_raw)) {
119 return PatchingStub::load_appendix_id;
120 }
121 return PatchingStub::load_mirror_id;
122 }
124 //---------------------------------------------------------------
127 LIR_Assembler::LIR_Assembler(Compilation* c):
128 _compilation(c)
129 , _masm(c->masm())
130 , _bs(Universe::heap()->barrier_set())
131 , _frame_map(c->frame_map())
132 , _current_block(NULL)
133 , _pending_non_safepoint(NULL)
134 , _pending_non_safepoint_offset(0)
135 {
136 _slow_case_stubs = new CodeStubList();
137 }
140 LIR_Assembler::~LIR_Assembler() {
141 }
144 void LIR_Assembler::check_codespace() {
145 CodeSection* cs = _masm->code_section();
146 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
147 BAILOUT("CodeBuffer overflow");
148 }
149 }
152 void LIR_Assembler::append_code_stub(CodeStub* stub) {
153 _slow_case_stubs->append(stub);
154 }
156 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
157 for (int m = 0; m < stub_list->length(); m++) {
158 CodeStub* s = (*stub_list)[m];
160 check_codespace();
161 CHECK_BAILOUT();
163 #ifndef PRODUCT
164 if (CommentedAssembly) {
165 stringStream st;
166 s->print_name(&st);
167 st.print(" slow case");
168 _masm->block_comment(st.as_string());
169 }
170 #endif
171 s->emit_code(this);
172 #ifdef ASSERT
173 s->assert_no_unbound_labels();
174 #endif
175 }
176 }
179 void LIR_Assembler::emit_slow_case_stubs() {
180 emit_stubs(_slow_case_stubs);
181 }
184 bool LIR_Assembler::needs_icache(ciMethod* method) const {
185 return !method->is_static();
186 }
189 int LIR_Assembler::code_offset() const {
190 return _masm->offset();
191 }
194 address LIR_Assembler::pc() const {
195 return _masm->pc();
196 }
198 // To bang the stack of this compiled method we use the stack size
199 // that the interpreter would need in case of a deoptimization. This
200 // removes the need to bang the stack in the deoptimization blob which
201 // in turn simplifies stack overflow handling.
202 int LIR_Assembler::bang_size_in_bytes() const {
203 return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
204 }
206 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
207 for (int i = 0; i < info_list->length(); i++) {
208 XHandlers* handlers = info_list->at(i)->exception_handlers();
210 for (int j = 0; j < handlers->length(); j++) {
211 XHandler* handler = handlers->handler_at(j);
212 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
213 assert(handler->entry_code() == NULL ||
214 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
215 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
217 if (handler->entry_pco() == -1) {
218 // entry code not emitted yet
219 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
220 handler->set_entry_pco(code_offset());
221 if (CommentedAssembly) {
222 _masm->block_comment("Exception adapter block");
223 }
224 emit_lir_list(handler->entry_code());
225 } else {
226 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
227 }
229 assert(handler->entry_pco() != -1, "must be set now");
230 }
231 }
232 }
233 }
236 void LIR_Assembler::emit_code(BlockList* hir) {
237 if (PrintLIR) {
238 print_LIR(hir);
239 }
241 int n = hir->length();
242 for (int i = 0; i < n; i++) {
243 emit_block(hir->at(i));
244 CHECK_BAILOUT();
245 }
247 flush_debug_info(code_offset());
249 DEBUG_ONLY(check_no_unbound_labels());
250 }
253 void LIR_Assembler::emit_block(BlockBegin* block) {
254 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
255 align_backward_branch_target();
256 }
258 // if this block is the start of an exception handler, record the
259 // PC offset of the first instruction for later construction of
260 // the ExceptionHandlerTable
261 if (block->is_set(BlockBegin::exception_entry_flag)) {
262 block->set_exception_handler_pco(code_offset());
263 }
265 #ifndef PRODUCT
266 if (PrintLIRWithAssembly) {
267 // don't print Phi's
268 InstructionPrinter ip(false);
269 block->print(ip);
270 }
271 #endif /* PRODUCT */
273 assert(block->lir() != NULL, "must have LIR");
274 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
276 #ifndef PRODUCT
277 if (CommentedAssembly) {
278 stringStream st;
279 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
280 _masm->block_comment(st.as_string());
281 }
282 #endif
284 emit_lir_list(block->lir());
286 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
287 }
290 void LIR_Assembler::emit_lir_list(LIR_List* list) {
291 peephole(list);
293 int n = list->length();
294 for (int i = 0; i < n; i++) {
295 LIR_Op* op = list->at(i);
297 check_codespace();
298 CHECK_BAILOUT();
300 #ifndef PRODUCT
301 if (CommentedAssembly) {
302 // Don't record out every op since that's too verbose. Print
303 // branches since they include block and stub names. Also print
304 // patching moves since they generate funny looking code.
305 if (op->code() == lir_branch ||
306 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
307 stringStream st;
308 op->print_on(&st);
309 _masm->block_comment(st.as_string());
310 }
311 }
312 if (PrintLIRWithAssembly) {
313 // print out the LIR operation followed by the resulting assembly
314 list->at(i)->print(); tty->cr();
315 }
316 #endif /* PRODUCT */
318 op->emit_code(this);
320 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
321 process_debug_info(op);
322 }
324 #ifndef PRODUCT
325 if (PrintLIRWithAssembly) {
326 _masm->code()->decode();
327 }
328 #endif /* PRODUCT */
329 }
330 }
332 #ifdef ASSERT
333 void LIR_Assembler::check_no_unbound_labels() {
334 CHECK_BAILOUT();
336 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
337 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
338 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
339 assert(false, "unbound label");
340 }
341 }
342 }
343 #endif
345 //----------------------------------debug info--------------------------------
348 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
349 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
350 int pc_offset = code_offset();
351 flush_debug_info(pc_offset);
352 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
353 if (info->exception_handlers() != NULL) {
354 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
355 }
356 }
359 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
360 flush_debug_info(pc_offset);
361 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
362 if (cinfo->exception_handlers() != NULL) {
363 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
364 }
365 }
367 static ValueStack* debug_info(Instruction* ins) {
368 StateSplit* ss = ins->as_StateSplit();
369 if (ss != NULL) return ss->state();
370 return ins->state_before();
371 }
373 void LIR_Assembler::process_debug_info(LIR_Op* op) {
374 Instruction* src = op->source();
375 if (src == NULL) return;
376 int pc_offset = code_offset();
377 if (_pending_non_safepoint == src) {
378 _pending_non_safepoint_offset = pc_offset;
379 return;
380 }
381 ValueStack* vstack = debug_info(src);
382 if (vstack == NULL) return;
383 if (_pending_non_safepoint != NULL) {
384 // Got some old debug info. Get rid of it.
385 if (debug_info(_pending_non_safepoint) == vstack) {
386 _pending_non_safepoint_offset = pc_offset;
387 return;
388 }
389 if (_pending_non_safepoint_offset < pc_offset) {
390 record_non_safepoint_debug_info();
391 }
392 _pending_non_safepoint = NULL;
393 }
394 // Remember the debug info.
395 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
396 _pending_non_safepoint = src;
397 _pending_non_safepoint_offset = pc_offset;
398 }
399 }
401 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
402 // Return NULL if n is too large.
403 // Returns the caller_bci for the next-younger state, also.
404 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
405 ValueStack* t = s;
406 for (int i = 0; i < n; i++) {
407 if (t == NULL) break;
408 t = t->caller_state();
409 }
410 if (t == NULL) return NULL;
411 for (;;) {
412 ValueStack* tc = t->caller_state();
413 if (tc == NULL) return s;
414 t = tc;
415 bci_result = tc->bci();
416 s = s->caller_state();
417 }
418 }
420 void LIR_Assembler::record_non_safepoint_debug_info() {
421 int pc_offset = _pending_non_safepoint_offset;
422 ValueStack* vstack = debug_info(_pending_non_safepoint);
423 int bci = vstack->bci();
425 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
426 assert(debug_info->recording_non_safepoints(), "sanity");
428 debug_info->add_non_safepoint(pc_offset);
430 // Visit scopes from oldest to youngest.
431 for (int n = 0; ; n++) {
432 int s_bci = bci;
433 ValueStack* s = nth_oldest(vstack, n, s_bci);
434 if (s == NULL) break;
435 IRScope* scope = s->scope();
436 //Always pass false for reexecute since these ScopeDescs are never used for deopt
437 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
438 }
440 debug_info->end_non_safepoint(pc_offset);
441 }
444 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
445 add_debug_info_for_null_check(code_offset(), cinfo);
446 }
448 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
449 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
450 append_code_stub(stub);
451 }
453 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
454 add_debug_info_for_div0(code_offset(), info);
455 }
457 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
458 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
459 append_code_stub(stub);
460 }
462 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
463 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
464 }
467 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
468 verify_oop_map(op->info());
470 if (os::is_MP()) {
471 // must align calls sites, otherwise they can't be updated atomically on MP hardware
472 align_call(op->code());
473 }
475 // emit the static call stub stuff out of line
476 emit_static_call_stub();
477 CHECK_BAILOUT();
479 switch (op->code()) {
480 case lir_static_call:
481 case lir_dynamic_call:
482 call(op, relocInfo::static_call_type);
483 break;
484 case lir_optvirtual_call:
485 call(op, relocInfo::opt_virtual_call_type);
486 break;
487 case lir_icvirtual_call:
488 ic_call(op);
489 break;
490 case lir_virtual_call:
491 vtable_call(op);
492 break;
493 default:
494 fatal(err_msg_res("unexpected op code: %s", op->name()));
495 break;
496 }
498 // JSR 292
499 // Record if this method has MethodHandle invokes.
500 if (op->is_method_handle_invoke()) {
501 compilation()->set_has_method_handle_invokes(true);
502 }
504 #if defined(X86) && defined(TIERED)
505 // C2 leave fpu stack dirty clean it
506 if (UseSSE < 2) {
507 int i;
508 for ( i = 1; i <= 7 ; i++ ) {
509 ffree(i);
510 }
511 if (!op->result_opr()->is_float_kind()) {
512 ffree(0);
513 }
514 }
515 #endif // X86 && TIERED
516 }
519 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
520 _masm->bind (*(op->label()));
521 }
524 void LIR_Assembler::emit_op1(LIR_Op1* op) {
525 switch (op->code()) {
526 case lir_move:
527 if (op->move_kind() == lir_move_volatile) {
528 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
529 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
530 } else {
531 move_op(op->in_opr(), op->result_opr(), op->type(),
532 op->patch_code(), op->info(), op->pop_fpu_stack(),
533 op->move_kind() == lir_move_unaligned,
534 op->move_kind() == lir_move_wide);
535 }
536 break;
538 case lir_prefetchr:
539 prefetchr(op->in_opr());
540 break;
542 case lir_prefetchw:
543 prefetchw(op->in_opr());
544 break;
546 case lir_roundfp: {
547 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
548 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
549 break;
550 }
552 case lir_return:
553 return_op(op->in_opr());
554 break;
556 case lir_safepoint:
557 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
558 _masm->nop();
559 }
560 safepoint_poll(op->in_opr(), op->info());
561 break;
563 case lir_fxch:
564 fxch(op->in_opr()->as_jint());
565 break;
567 case lir_fld:
568 fld(op->in_opr()->as_jint());
569 break;
571 case lir_ffree:
572 ffree(op->in_opr()->as_jint());
573 break;
575 case lir_branch:
576 break;
578 case lir_push:
579 push(op->in_opr());
580 break;
582 case lir_pop:
583 pop(op->in_opr());
584 break;
586 case lir_neg:
587 negate(op->in_opr(), op->result_opr());
588 break;
590 case lir_leal:
591 leal(op->in_opr(), op->result_opr());
592 break;
594 case lir_null_check:
595 if (GenerateCompilerNullChecks) {
596 add_debug_info_for_null_check_here(op->info());
598 if (op->in_opr()->is_single_cpu()) {
599 _masm->null_check(op->in_opr()->as_register());
600 } else {
601 Unimplemented();
602 }
603 }
604 break;
606 case lir_monaddr:
607 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
608 break;
610 #ifdef SPARC
611 case lir_pack64:
612 pack64(op->in_opr(), op->result_opr());
613 break;
615 case lir_unpack64:
616 unpack64(op->in_opr(), op->result_opr());
617 break;
618 #endif
620 case lir_unwind:
621 unwind_op(op->in_opr());
622 break;
624 default:
625 Unimplemented();
626 break;
627 }
628 }
631 void LIR_Assembler::emit_op0(LIR_Op0* op) {
632 switch (op->code()) {
633 case lir_word_align: {
634 while (code_offset() % BytesPerWord != 0) {
635 _masm->nop();
636 }
637 break;
638 }
640 case lir_nop:
641 assert(op->info() == NULL, "not supported");
642 _masm->nop();
643 break;
645 case lir_label:
646 Unimplemented();
647 break;
649 case lir_build_frame:
650 build_frame();
651 break;
653 case lir_std_entry:
654 // init offsets
655 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
656 _masm->align(CodeEntryAlignment);
657 if (needs_icache(compilation()->method())) {
658 check_icache();
659 }
660 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
661 _masm->verified_entry();
662 build_frame();
663 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
664 break;
666 case lir_osr_entry:
667 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
668 osr_entry();
669 break;
671 case lir_24bit_FPU:
672 set_24bit_FPU();
673 break;
675 case lir_reset_FPU:
676 reset_FPU();
677 break;
679 case lir_breakpoint:
680 breakpoint();
681 break;
683 case lir_fpop_raw:
684 fpop();
685 break;
687 case lir_membar:
688 membar();
689 break;
691 case lir_membar_acquire:
692 membar_acquire();
693 break;
695 case lir_membar_release:
696 membar_release();
697 break;
699 case lir_membar_loadload:
700 membar_loadload();
701 break;
703 case lir_membar_storestore:
704 membar_storestore();
705 break;
707 case lir_membar_loadstore:
708 membar_loadstore();
709 break;
711 case lir_membar_storeload:
712 membar_storeload();
713 break;
715 case lir_get_thread:
716 get_thread(op->result_opr());
717 break;
719 default:
720 ShouldNotReachHere();
721 break;
722 }
723 }
726 void LIR_Assembler::emit_op2(LIR_Op2* op) {
727 switch (op->code()) {
728 #ifndef MIPS64
729 case lir_cmp:
730 if (op->info() != NULL) {
731 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
732 "shouldn't be codeemitinfo for non-address operands");
733 add_debug_info_for_null_check_here(op->info()); // exception possible
734 }
735 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
736 break;
737 #else
738 case lir_null_check_for_branch:
739 if (op->info() != NULL) {
740 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
741 "shouldn't be codeemitinfo for non-address operands");
742 add_debug_info_for_null_check_here(op->info()); // exception possible
743 }
744 break;
745 #endif
747 case lir_cmp_l2i:
748 case lir_cmp_fd2i:
749 case lir_ucmp_fd2i:
750 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
751 break;
753 case lir_cmove:
754 #ifndef MIPS64
755 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
756 #endif
757 break;
759 case lir_shl:
760 case lir_shr:
761 case lir_ushr:
762 if (op->in_opr2()->is_constant()) {
763 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
764 } else {
765 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
766 }
767 break;
769 case lir_add:
770 case lir_sub:
771 case lir_mul:
772 case lir_mul_strictfp:
773 case lir_div:
774 case lir_div_strictfp:
775 case lir_rem:
776 assert(op->fpu_pop_count() < 2, "");
777 arith_op(
778 op->code(),
779 op->in_opr1(),
780 op->in_opr2(),
781 op->result_opr(),
782 op->info(),
783 op->fpu_pop_count() == 1);
784 break;
786 case lir_abs:
787 case lir_sqrt:
788 case lir_sin:
789 case lir_tan:
790 case lir_cos:
791 case lir_log:
792 case lir_log10:
793 case lir_exp:
794 case lir_pow:
795 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
796 break;
798 case lir_logic_and:
799 case lir_logic_or:
800 case lir_logic_xor:
801 logic_op(
802 op->code(),
803 op->in_opr1(),
804 op->in_opr2(),
805 op->result_opr());
806 break;
808 case lir_throw:
809 throw_op(op->in_opr1(), op->in_opr2(), op->info());
810 break;
812 case lir_xadd:
813 case lir_xchg:
814 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
815 break;
817 default:
818 Unimplemented();
819 break;
820 }
821 }
824 void LIR_Assembler::build_frame() {
825 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
826 }
829 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
830 assert((src->is_single_fpu() && dest->is_single_stack()) ||
831 (src->is_double_fpu() && dest->is_double_stack()),
832 "round_fp: rounds register -> stack location");
834 reg2stack (src, dest, src->type(), pop_fpu_stack);
835 }
838 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
839 if (src->is_register()) {
840 if (dest->is_register()) {
841 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
842 reg2reg(src, dest);
843 } else if (dest->is_stack()) {
844 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
845 reg2stack(src, dest, type, pop_fpu_stack);
846 } else if (dest->is_address()) {
847 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
848 } else {
849 ShouldNotReachHere();
850 }
852 } else if (src->is_stack()) {
853 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
854 if (dest->is_register()) {
855 stack2reg(src, dest, type);
856 } else if (dest->is_stack()) {
857 stack2stack(src, dest, type);
858 } else {
859 ShouldNotReachHere();
860 }
862 } else if (src->is_constant()) {
863 if (dest->is_register()) {
864 const2reg(src, dest, patch_code, info); // patching is possible
865 } else if (dest->is_stack()) {
866 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
867 const2stack(src, dest);
868 } else if (dest->is_address()) {
869 assert(patch_code == lir_patch_none, "no patching allowed here");
870 const2mem(src, dest, type, info, wide);
871 } else {
872 ShouldNotReachHere();
873 }
875 } else if (src->is_address()) {
876 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
878 } else {
879 ShouldNotReachHere();
880 }
881 }
884 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
885 #ifndef PRODUCT
886 if (VerifyOops) {
887 OopMapStream s(info->oop_map());
888 while (!s.is_done()) {
889 OopMapValue v = s.current();
890 if (v.is_oop()) {
891 VMReg r = v.reg();
892 if (!r->is_stack()) {
893 stringStream st;
894 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
895 #ifdef SPARC
896 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
897 #else
898 _masm->verify_oop(r->as_Register());
899 #endif
900 } else {
901 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
902 }
903 }
904 check_codespace();
905 CHECK_BAILOUT();
907 s.next();
908 }
909 }
910 #endif
911 }