Tue, 08 Aug 2017 15:57:29 +0800
merge
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "c1/c1_Compilation.hpp"
33 #include "c1/c1_Instruction.hpp"
34 #include "c1/c1_InstructionPrinter.hpp"
35 #include "c1/c1_LIRAssembler.hpp"
36 #include "c1/c1_MacroAssembler.hpp"
37 #include "c1/c1_ValueStack.hpp"
38 #include "ci/ciInstance.hpp"
39 #ifdef TARGET_ARCH_x86
40 # include "nativeInst_x86.hpp"
41 # include "vmreg_x86.inline.hpp"
42 #endif
43 #ifdef TARGET_ARCH_mips
44 # include "nativeInst_mips.hpp"
45 # include "vmreg_mips.inline.hpp"
46 #endif
47 #ifdef TARGET_ARCH_sparc
48 # include "nativeInst_sparc.hpp"
49 # include "vmreg_sparc.inline.hpp"
50 #endif
51 #ifdef TARGET_ARCH_zero
52 # include "nativeInst_zero.hpp"
53 # include "vmreg_zero.inline.hpp"
54 #endif
55 #ifdef TARGET_ARCH_arm
56 # include "nativeInst_arm.hpp"
57 # include "vmreg_arm.inline.hpp"
58 #endif
59 #ifdef TARGET_ARCH_ppc
60 # include "nativeInst_ppc.hpp"
61 # include "vmreg_ppc.inline.hpp"
62 #endif
65 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
66 // we must have enough patching space so that call can be inserted
67 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
68 _masm->nop();
69 }
70 patch->install(_masm, patch_code, obj, info);
71 append_code_stub(patch);
73 #ifdef ASSERT
74 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
75 if (patch->id() == PatchingStub::access_field_id) {
76 switch (code) {
77 case Bytecodes::_putstatic:
78 case Bytecodes::_getstatic:
79 case Bytecodes::_putfield:
80 case Bytecodes::_getfield:
81 break;
82 default:
83 ShouldNotReachHere();
84 }
85 } else if (patch->id() == PatchingStub::load_klass_id) {
86 switch (code) {
87 case Bytecodes::_new:
88 case Bytecodes::_anewarray:
89 case Bytecodes::_multianewarray:
90 case Bytecodes::_instanceof:
91 case Bytecodes::_checkcast:
92 break;
93 default:
94 ShouldNotReachHere();
95 }
96 } else if (patch->id() == PatchingStub::load_mirror_id) {
97 switch (code) {
98 case Bytecodes::_putstatic:
99 case Bytecodes::_getstatic:
100 case Bytecodes::_ldc:
101 case Bytecodes::_ldc_w:
102 break;
103 default:
104 ShouldNotReachHere();
105 }
106 } else if (patch->id() == PatchingStub::load_appendix_id) {
107 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
108 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
109 } else {
110 ShouldNotReachHere();
111 }
112 #endif
113 }
115 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
116 IRScope* scope = info->scope();
117 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
118 if (Bytecodes::has_optional_appendix(bc_raw)) {
119 return PatchingStub::load_appendix_id;
120 }
121 return PatchingStub::load_mirror_id;
122 }
124 //---------------------------------------------------------------
127 LIR_Assembler::LIR_Assembler(Compilation* c):
128 _compilation(c)
129 , _masm(c->masm())
130 , _bs(Universe::heap()->barrier_set())
131 , _frame_map(c->frame_map())
132 , _current_block(NULL)
133 , _pending_non_safepoint(NULL)
134 , _pending_non_safepoint_offset(0)
135 {
136 _slow_case_stubs = new CodeStubList();
137 }
140 LIR_Assembler::~LIR_Assembler() {
141 }
144 void LIR_Assembler::check_codespace() {
145 CodeSection* cs = _masm->code_section();
146 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
147 BAILOUT("CodeBuffer overflow");
148 }
149 }
152 void LIR_Assembler::append_code_stub(CodeStub* stub) {
153 _slow_case_stubs->append(stub);
154 }
156 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
157 for (int m = 0; m < stub_list->length(); m++) {
158 CodeStub* s = (*stub_list)[m];
160 check_codespace();
161 CHECK_BAILOUT();
163 #ifndef PRODUCT
164 if (CommentedAssembly) {
165 stringStream st;
166 s->print_name(&st);
167 st.print(" slow case");
168 _masm->block_comment(st.as_string());
169 }
170 #endif
171 s->emit_code(this);
172 #ifdef ASSERT
173 s->assert_no_unbound_labels();
174 #endif
175 }
176 }
179 void LIR_Assembler::emit_slow_case_stubs() {
180 emit_stubs(_slow_case_stubs);
181 }
184 bool LIR_Assembler::needs_icache(ciMethod* method) const {
185 return !method->is_static();
186 }
189 int LIR_Assembler::code_offset() const {
190 return _masm->offset();
191 }
194 address LIR_Assembler::pc() const {
195 return _masm->pc();
196 }
198 // To bang the stack of this compiled method we use the stack size
199 // that the interpreter would need in case of a deoptimization. This
200 // removes the need to bang the stack in the deoptimization blob which
201 // in turn simplifies stack overflow handling.
202 int LIR_Assembler::bang_size_in_bytes() const {
203 return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
204 }
206 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
207 for (int i = 0; i < info_list->length(); i++) {
208 XHandlers* handlers = info_list->at(i)->exception_handlers();
210 for (int j = 0; j < handlers->length(); j++) {
211 XHandler* handler = handlers->handler_at(j);
212 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
213 assert(handler->entry_code() == NULL ||
214 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
215 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
217 if (handler->entry_pco() == -1) {
218 // entry code not emitted yet
219 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
220 handler->set_entry_pco(code_offset());
221 if (CommentedAssembly) {
222 _masm->block_comment("Exception adapter block");
223 }
224 emit_lir_list(handler->entry_code());
225 } else {
226 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
227 }
229 assert(handler->entry_pco() != -1, "must be set now");
230 }
231 }
232 }
233 }
236 void LIR_Assembler::emit_code(BlockList* hir) {
237 if (PrintLIR) {
238 print_LIR(hir);
239 }
241 int n = hir->length();
242 for (int i = 0; i < n; i++) {
243 emit_block(hir->at(i));
244 CHECK_BAILOUT();
245 }
247 flush_debug_info(code_offset());
249 DEBUG_ONLY(check_no_unbound_labels());
250 }
253 void LIR_Assembler::emit_block(BlockBegin* block) {
254 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
255 align_backward_branch_target();
256 }
258 // if this block is the start of an exception handler, record the
259 // PC offset of the first instruction for later construction of
260 // the ExceptionHandlerTable
261 if (block->is_set(BlockBegin::exception_entry_flag)) {
262 block->set_exception_handler_pco(code_offset());
263 }
265 #ifndef PRODUCT
266 if (PrintLIRWithAssembly) {
267 // don't print Phi's
268 InstructionPrinter ip(false);
269 block->print(ip);
270 }
271 #endif /* PRODUCT */
273 assert(block->lir() != NULL, "must have LIR");
274 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
276 #ifndef PRODUCT
277 if (CommentedAssembly) {
278 stringStream st;
279 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
280 _masm->block_comment(st.as_string());
281 }
282 #endif
284 emit_lir_list(block->lir());
286 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
287 }
290 void LIR_Assembler::emit_lir_list(LIR_List* list) {
291 peephole(list);
293 int n = list->length();
294 for (int i = 0; i < n; i++) {
295 LIR_Op* op = list->at(i);
297 check_codespace();
298 CHECK_BAILOUT();
300 #ifndef PRODUCT
301 if (CommentedAssembly) {
302 // Don't record out every op since that's too verbose. Print
303 // branches since they include block and stub names. Also print
304 // patching moves since they generate funny looking code.
305 if (op->code() == lir_branch ||
306 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
307 stringStream st;
308 op->print_on(&st);
309 _masm->block_comment(st.as_string());
310 }
311 }
312 if (PrintLIRWithAssembly) {
313 // print out the LIR operation followed by the resulting assembly
314 list->at(i)->print(); tty->cr();
315 }
316 #endif /* PRODUCT */
318 op->emit_code(this);
320 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
321 process_debug_info(op);
322 }
324 #ifndef PRODUCT
325 if (PrintLIRWithAssembly) {
326 _masm->code()->decode();
327 }
328 #endif /* PRODUCT */
329 }
330 }
332 #ifdef ASSERT
333 void LIR_Assembler::check_no_unbound_labels() {
334 CHECK_BAILOUT();
336 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
337 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
338 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
339 assert(false, "unbound label");
340 }
341 }
342 }
343 #endif
345 //----------------------------------debug info--------------------------------
348 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
349 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
350 int pc_offset = code_offset();
351 flush_debug_info(pc_offset);
352 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
353 if (info->exception_handlers() != NULL) {
354 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
355 }
356 }
359 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
360 flush_debug_info(pc_offset);
361 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
362 if (cinfo->exception_handlers() != NULL) {
363 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
364 }
365 }
367 static ValueStack* debug_info(Instruction* ins) {
368 StateSplit* ss = ins->as_StateSplit();
369 if (ss != NULL) return ss->state();
370 return ins->state_before();
371 }
373 void LIR_Assembler::process_debug_info(LIR_Op* op) {
374 Instruction* src = op->source();
375 if (src == NULL) return;
376 int pc_offset = code_offset();
377 if (_pending_non_safepoint == src) {
378 _pending_non_safepoint_offset = pc_offset;
379 return;
380 }
381 ValueStack* vstack = debug_info(src);
382 if (vstack == NULL) return;
383 if (_pending_non_safepoint != NULL) {
384 // Got some old debug info. Get rid of it.
385 if (debug_info(_pending_non_safepoint) == vstack) {
386 _pending_non_safepoint_offset = pc_offset;
387 return;
388 }
389 if (_pending_non_safepoint_offset < pc_offset) {
390 record_non_safepoint_debug_info();
391 }
392 _pending_non_safepoint = NULL;
393 }
394 // Remember the debug info.
395 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
396 _pending_non_safepoint = src;
397 _pending_non_safepoint_offset = pc_offset;
398 }
399 }
401 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
402 // Return NULL if n is too large.
403 // Returns the caller_bci for the next-younger state, also.
404 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
405 ValueStack* t = s;
406 for (int i = 0; i < n; i++) {
407 if (t == NULL) break;
408 t = t->caller_state();
409 }
410 if (t == NULL) return NULL;
411 for (;;) {
412 ValueStack* tc = t->caller_state();
413 if (tc == NULL) return s;
414 t = tc;
415 bci_result = tc->bci();
416 s = s->caller_state();
417 }
418 }
420 void LIR_Assembler::record_non_safepoint_debug_info() {
421 int pc_offset = _pending_non_safepoint_offset;
422 ValueStack* vstack = debug_info(_pending_non_safepoint);
423 int bci = vstack->bci();
425 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
426 assert(debug_info->recording_non_safepoints(), "sanity");
428 debug_info->add_non_safepoint(pc_offset);
430 // Visit scopes from oldest to youngest.
431 for (int n = 0; ; n++) {
432 int s_bci = bci;
433 ValueStack* s = nth_oldest(vstack, n, s_bci);
434 if (s == NULL) break;
435 IRScope* scope = s->scope();
436 //Always pass false for reexecute since these ScopeDescs are never used for deopt
437 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
438 }
440 debug_info->end_non_safepoint(pc_offset);
441 }
444 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
445 add_debug_info_for_null_check(code_offset(), cinfo);
446 }
448 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
449 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
450 append_code_stub(stub);
451 }
453 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
454 add_debug_info_for_div0(code_offset(), info);
455 }
457 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
458 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
459 append_code_stub(stub);
460 }
462 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
463 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
464 }
467 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
468 verify_oop_map(op->info());
470 if (os::is_MP()) {
471 // must align calls sites, otherwise they can't be updated atomically on MP hardware
472 align_call(op->code());
473 }
475 // emit the static call stub stuff out of line
476 emit_static_call_stub();
478 switch (op->code()) {
479 case lir_static_call:
480 case lir_dynamic_call:
481 call(op, relocInfo::static_call_type);
482 break;
483 case lir_optvirtual_call:
484 call(op, relocInfo::opt_virtual_call_type);
485 break;
486 case lir_icvirtual_call:
487 ic_call(op);
488 break;
489 case lir_virtual_call:
490 vtable_call(op);
491 break;
492 default:
493 fatal(err_msg_res("unexpected op code: %s", op->name()));
494 break;
495 }
497 // JSR 292
498 // Record if this method has MethodHandle invokes.
499 if (op->is_method_handle_invoke()) {
500 compilation()->set_has_method_handle_invokes(true);
501 }
503 #if defined(X86) && defined(TIERED)
504 // C2 leave fpu stack dirty clean it
505 if (UseSSE < 2) {
506 int i;
507 for ( i = 1; i <= 7 ; i++ ) {
508 ffree(i);
509 }
510 if (!op->result_opr()->is_float_kind()) {
511 ffree(0);
512 }
513 }
514 #endif // X86 && TIERED
515 }
518 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
519 _masm->bind (*(op->label()));
520 }
523 void LIR_Assembler::emit_op1(LIR_Op1* op) {
524 switch (op->code()) {
525 case lir_move:
526 if (op->move_kind() == lir_move_volatile) {
527 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
528 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
529 } else {
530 move_op(op->in_opr(), op->result_opr(), op->type(),
531 op->patch_code(), op->info(), op->pop_fpu_stack(),
532 op->move_kind() == lir_move_unaligned,
533 op->move_kind() == lir_move_wide);
534 }
535 break;
537 case lir_prefetchr:
538 prefetchr(op->in_opr());
539 break;
541 case lir_prefetchw:
542 prefetchw(op->in_opr());
543 break;
545 case lir_roundfp: {
546 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
547 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
548 break;
549 }
551 case lir_return:
552 return_op(op->in_opr());
553 break;
555 case lir_safepoint:
556 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
557 _masm->nop();
558 }
559 safepoint_poll(op->in_opr(), op->info());
560 break;
562 case lir_fxch:
563 fxch(op->in_opr()->as_jint());
564 break;
566 case lir_fld:
567 fld(op->in_opr()->as_jint());
568 break;
570 case lir_ffree:
571 ffree(op->in_opr()->as_jint());
572 break;
574 case lir_branch:
575 break;
577 case lir_push:
578 push(op->in_opr());
579 break;
581 case lir_pop:
582 pop(op->in_opr());
583 break;
585 case lir_neg:
586 negate(op->in_opr(), op->result_opr());
587 break;
589 case lir_leal:
590 leal(op->in_opr(), op->result_opr());
591 break;
593 case lir_null_check:
594 if (GenerateCompilerNullChecks) {
595 add_debug_info_for_null_check_here(op->info());
597 if (op->in_opr()->is_single_cpu()) {
598 _masm->null_check(op->in_opr()->as_register());
599 } else {
600 Unimplemented();
601 }
602 }
603 break;
605 case lir_monaddr:
606 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
607 break;
609 #ifdef SPARC
610 case lir_pack64:
611 pack64(op->in_opr(), op->result_opr());
612 break;
614 case lir_unpack64:
615 unpack64(op->in_opr(), op->result_opr());
616 break;
617 #endif
619 case lir_unwind:
620 unwind_op(op->in_opr());
621 break;
623 default:
624 Unimplemented();
625 break;
626 }
627 }
630 void LIR_Assembler::emit_op0(LIR_Op0* op) {
631 switch (op->code()) {
632 case lir_word_align: {
633 while (code_offset() % BytesPerWord != 0) {
634 _masm->nop();
635 }
636 break;
637 }
639 case lir_nop:
640 assert(op->info() == NULL, "not supported");
641 _masm->nop();
642 break;
644 case lir_label:
645 Unimplemented();
646 break;
648 case lir_build_frame:
649 build_frame();
650 break;
652 case lir_std_entry:
653 // init offsets
654 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
655 _masm->align(CodeEntryAlignment);
656 if (needs_icache(compilation()->method())) {
657 check_icache();
658 }
659 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
660 _masm->verified_entry();
661 build_frame();
662 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
663 break;
665 case lir_osr_entry:
666 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
667 osr_entry();
668 break;
670 case lir_24bit_FPU:
671 set_24bit_FPU();
672 break;
674 case lir_reset_FPU:
675 reset_FPU();
676 break;
678 case lir_breakpoint:
679 breakpoint();
680 break;
682 case lir_fpop_raw:
683 fpop();
684 break;
686 case lir_membar:
687 membar();
688 break;
690 case lir_membar_acquire:
691 membar_acquire();
692 break;
694 case lir_membar_release:
695 membar_release();
696 break;
698 case lir_membar_loadload:
699 membar_loadload();
700 break;
702 case lir_membar_storestore:
703 membar_storestore();
704 break;
706 case lir_membar_loadstore:
707 membar_loadstore();
708 break;
710 case lir_membar_storeload:
711 membar_storeload();
712 break;
714 case lir_get_thread:
715 get_thread(op->result_opr());
716 break;
718 default:
719 ShouldNotReachHere();
720 break;
721 }
722 }
725 void LIR_Assembler::emit_op2(LIR_Op2* op) {
726 switch (op->code()) {
727 #ifndef MIPS64
728 case lir_cmp:
729 if (op->info() != NULL) {
730 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
731 "shouldn't be codeemitinfo for non-address operands");
732 add_debug_info_for_null_check_here(op->info()); // exception possible
733 }
734 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
735 break;
736 #else
737 case lir_null_check_for_branch:
738 if (op->info() != NULL) {
739 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
740 "shouldn't be codeemitinfo for non-address operands");
741 add_debug_info_for_null_check_here(op->info()); // exception possible
742 }
743 break;
744 #endif
746 case lir_cmp_l2i:
747 case lir_cmp_fd2i:
748 case lir_ucmp_fd2i:
749 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
750 break;
752 case lir_cmove:
753 #ifndef MIPS64
754 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
755 #endif
756 break;
758 case lir_shl:
759 case lir_shr:
760 case lir_ushr:
761 if (op->in_opr2()->is_constant()) {
762 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
763 } else {
764 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
765 }
766 break;
768 case lir_add:
769 case lir_sub:
770 case lir_mul:
771 case lir_mul_strictfp:
772 case lir_div:
773 case lir_div_strictfp:
774 case lir_rem:
775 assert(op->fpu_pop_count() < 2, "");
776 arith_op(
777 op->code(),
778 op->in_opr1(),
779 op->in_opr2(),
780 op->result_opr(),
781 op->info(),
782 op->fpu_pop_count() == 1);
783 break;
785 case lir_abs:
786 case lir_sqrt:
787 case lir_sin:
788 case lir_tan:
789 case lir_cos:
790 case lir_log:
791 case lir_log10:
792 case lir_exp:
793 case lir_pow:
794 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
795 break;
797 case lir_logic_and:
798 case lir_logic_or:
799 case lir_logic_xor:
800 logic_op(
801 op->code(),
802 op->in_opr1(),
803 op->in_opr2(),
804 op->result_opr());
805 break;
807 case lir_throw:
808 throw_op(op->in_opr1(), op->in_opr2(), op->info());
809 break;
811 case lir_xadd:
812 case lir_xchg:
813 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
814 break;
816 default:
817 Unimplemented();
818 break;
819 }
820 }
823 void LIR_Assembler::build_frame() {
824 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
825 }
828 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
829 assert((src->is_single_fpu() && dest->is_single_stack()) ||
830 (src->is_double_fpu() && dest->is_double_stack()),
831 "round_fp: rounds register -> stack location");
833 reg2stack (src, dest, src->type(), pop_fpu_stack);
834 }
837 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
838 if (src->is_register()) {
839 if (dest->is_register()) {
840 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
841 reg2reg(src, dest);
842 } else if (dest->is_stack()) {
843 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
844 reg2stack(src, dest, type, pop_fpu_stack);
845 } else if (dest->is_address()) {
846 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
847 } else {
848 ShouldNotReachHere();
849 }
851 } else if (src->is_stack()) {
852 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
853 if (dest->is_register()) {
854 stack2reg(src, dest, type);
855 } else if (dest->is_stack()) {
856 stack2stack(src, dest, type);
857 } else {
858 ShouldNotReachHere();
859 }
861 } else if (src->is_constant()) {
862 if (dest->is_register()) {
863 const2reg(src, dest, patch_code, info); // patching is possible
864 } else if (dest->is_stack()) {
865 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
866 const2stack(src, dest);
867 } else if (dest->is_address()) {
868 assert(patch_code == lir_patch_none, "no patching allowed here");
869 const2mem(src, dest, type, info, wide);
870 } else {
871 ShouldNotReachHere();
872 }
874 } else if (src->is_address()) {
875 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
877 } else {
878 ShouldNotReachHere();
879 }
880 }
883 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
884 #ifndef PRODUCT
885 if (VerifyOops) {
886 OopMapStream s(info->oop_map());
887 while (!s.is_done()) {
888 OopMapValue v = s.current();
889 if (v.is_oop()) {
890 VMReg r = v.reg();
891 if (!r->is_stack()) {
892 stringStream st;
893 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
894 #ifdef SPARC
895 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
896 #else
897 _masm->verify_oop(r->as_Register());
898 #endif
899 } else {
900 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
901 }
902 }
903 check_codespace();
904 CHECK_BAILOUT();
906 s.next();
907 }
908 }
909 #endif
910 }