Wed, 03 Jul 2019 20:42:37 +0800
Merge
1 /*
2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "c1/c1_Compilation.hpp"
33 #include "c1/c1_Instruction.hpp"
34 #include "c1/c1_InstructionPrinter.hpp"
35 #include "c1/c1_LIRAssembler.hpp"
36 #include "c1/c1_MacroAssembler.hpp"
37 #include "c1/c1_ValueStack.hpp"
38 #include "ci/ciInstance.hpp"
39 #ifdef TARGET_ARCH_x86
40 # include "nativeInst_x86.hpp"
41 # include "vmreg_x86.inline.hpp"
42 #endif
43 #ifdef TARGET_ARCH_mips
44 # include "nativeInst_mips.hpp"
45 # include "vmreg_mips.inline.hpp"
46 #endif
47 #ifdef TARGET_ARCH_sparc
48 # include "nativeInst_sparc.hpp"
49 # include "vmreg_sparc.inline.hpp"
50 #endif
51 #ifdef TARGET_ARCH_zero
52 # include "nativeInst_zero.hpp"
53 # include "vmreg_zero.inline.hpp"
54 #endif
55 #ifdef TARGET_ARCH_arm
56 # include "nativeInst_arm.hpp"
57 # include "vmreg_arm.inline.hpp"
58 #endif
59 #ifdef TARGET_ARCH_ppc
60 # include "nativeInst_ppc.hpp"
61 # include "vmreg_ppc.inline.hpp"
62 #endif
65 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
66 // we must have enough patching space so that call can be inserted
67 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
68 _masm->nop();
69 }
70 patch->install(_masm, patch_code, obj, info);
71 append_code_stub(patch);
73 #ifdef ASSERT
74 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
75 if (patch->id() == PatchingStub::access_field_id) {
76 switch (code) {
77 case Bytecodes::_putstatic:
78 case Bytecodes::_getstatic:
79 case Bytecodes::_putfield:
80 case Bytecodes::_getfield:
81 break;
82 default:
83 ShouldNotReachHere();
84 }
85 } else if (patch->id() == PatchingStub::load_klass_id) {
86 switch (code) {
87 case Bytecodes::_new:
88 case Bytecodes::_anewarray:
89 case Bytecodes::_multianewarray:
90 case Bytecodes::_instanceof:
91 case Bytecodes::_checkcast:
92 break;
93 default:
94 ShouldNotReachHere();
95 }
96 } else if (patch->id() == PatchingStub::load_mirror_id) {
97 switch (code) {
98 case Bytecodes::_putstatic:
99 case Bytecodes::_getstatic:
100 case Bytecodes::_ldc:
101 case Bytecodes::_ldc_w:
102 break;
103 default:
104 ShouldNotReachHere();
105 }
106 } else if (patch->id() == PatchingStub::load_appendix_id) {
107 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
108 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
109 } else {
110 ShouldNotReachHere();
111 }
112 #endif
113 }
115 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
116 IRScope* scope = info->scope();
117 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
118 if (Bytecodes::has_optional_appendix(bc_raw)) {
119 return PatchingStub::load_appendix_id;
120 }
121 return PatchingStub::load_mirror_id;
122 }
124 //---------------------------------------------------------------
127 LIR_Assembler::LIR_Assembler(Compilation* c):
128 _compilation(c)
129 , _masm(c->masm())
130 , _bs(Universe::heap()->barrier_set())
131 , _frame_map(c->frame_map())
132 , _current_block(NULL)
133 , _pending_non_safepoint(NULL)
134 , _pending_non_safepoint_offset(0)
135 {
136 _slow_case_stubs = new CodeStubList();
137 }
140 LIR_Assembler::~LIR_Assembler() {
141 // The unwind handler label may be unbound if this destructor is invoked because of a bail-out.
142 // Reset it here to avoid an assertion.
143 _unwind_handler_entry.reset();
144 }
147 void LIR_Assembler::check_codespace() {
148 CodeSection* cs = _masm->code_section();
149 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
150 BAILOUT("CodeBuffer overflow");
151 }
152 }
155 void LIR_Assembler::append_code_stub(CodeStub* stub) {
156 _slow_case_stubs->append(stub);
157 }
159 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
160 for (int m = 0; m < stub_list->length(); m++) {
161 CodeStub* s = (*stub_list)[m];
163 check_codespace();
164 CHECK_BAILOUT();
166 #ifndef PRODUCT
167 if (CommentedAssembly) {
168 stringStream st;
169 s->print_name(&st);
170 st.print(" slow case");
171 _masm->block_comment(st.as_string());
172 }
173 #endif
174 s->emit_code(this);
175 #ifdef ASSERT
176 s->assert_no_unbound_labels();
177 #endif
178 }
179 }
182 void LIR_Assembler::emit_slow_case_stubs() {
183 emit_stubs(_slow_case_stubs);
184 }
187 bool LIR_Assembler::needs_icache(ciMethod* method) const {
188 return !method->is_static();
189 }
192 int LIR_Assembler::code_offset() const {
193 return _masm->offset();
194 }
197 address LIR_Assembler::pc() const {
198 return _masm->pc();
199 }
201 // To bang the stack of this compiled method we use the stack size
202 // that the interpreter would need in case of a deoptimization. This
203 // removes the need to bang the stack in the deoptimization blob which
204 // in turn simplifies stack overflow handling.
205 int LIR_Assembler::bang_size_in_bytes() const {
206 return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
207 }
209 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
210 for (int i = 0; i < info_list->length(); i++) {
211 XHandlers* handlers = info_list->at(i)->exception_handlers();
213 for (int j = 0; j < handlers->length(); j++) {
214 XHandler* handler = handlers->handler_at(j);
215 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
216 assert(handler->entry_code() == NULL ||
217 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
218 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
220 if (handler->entry_pco() == -1) {
221 // entry code not emitted yet
222 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
223 handler->set_entry_pco(code_offset());
224 if (CommentedAssembly) {
225 _masm->block_comment("Exception adapter block");
226 }
227 emit_lir_list(handler->entry_code());
228 } else {
229 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
230 }
232 assert(handler->entry_pco() != -1, "must be set now");
233 }
234 }
235 }
236 }
239 void LIR_Assembler::emit_code(BlockList* hir) {
240 if (PrintLIR) {
241 print_LIR(hir);
242 }
244 int n = hir->length();
245 for (int i = 0; i < n; i++) {
246 emit_block(hir->at(i));
247 CHECK_BAILOUT();
248 }
250 flush_debug_info(code_offset());
252 DEBUG_ONLY(check_no_unbound_labels());
253 }
256 void LIR_Assembler::emit_block(BlockBegin* block) {
257 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
258 align_backward_branch_target();
259 }
261 // if this block is the start of an exception handler, record the
262 // PC offset of the first instruction for later construction of
263 // the ExceptionHandlerTable
264 if (block->is_set(BlockBegin::exception_entry_flag)) {
265 block->set_exception_handler_pco(code_offset());
266 }
268 #ifndef PRODUCT
269 if (PrintLIRWithAssembly) {
270 // don't print Phi's
271 InstructionPrinter ip(false);
272 block->print(ip);
273 }
274 #endif /* PRODUCT */
276 assert(block->lir() != NULL, "must have LIR");
277 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
279 #ifndef PRODUCT
280 if (CommentedAssembly) {
281 stringStream st;
282 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
283 _masm->block_comment(st.as_string());
284 }
285 #endif
287 emit_lir_list(block->lir());
289 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
290 }
293 void LIR_Assembler::emit_lir_list(LIR_List* list) {
294 peephole(list);
296 int n = list->length();
297 for (int i = 0; i < n; i++) {
298 LIR_Op* op = list->at(i);
300 check_codespace();
301 CHECK_BAILOUT();
303 #ifndef PRODUCT
304 if (CommentedAssembly) {
305 // Don't record out every op since that's too verbose. Print
306 // branches since they include block and stub names. Also print
307 // patching moves since they generate funny looking code.
308 if (op->code() == lir_branch ||
309 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
310 stringStream st;
311 op->print_on(&st);
312 _masm->block_comment(st.as_string());
313 }
314 }
315 if (PrintLIRWithAssembly) {
316 // print out the LIR operation followed by the resulting assembly
317 list->at(i)->print(); tty->cr();
318 }
319 #endif /* PRODUCT */
321 op->emit_code(this);
323 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
324 process_debug_info(op);
325 }
327 #ifndef PRODUCT
328 if (PrintLIRWithAssembly) {
329 _masm->code()->decode();
330 }
331 #endif /* PRODUCT */
332 }
333 }
335 #ifdef ASSERT
336 void LIR_Assembler::check_no_unbound_labels() {
337 CHECK_BAILOUT();
339 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
340 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
341 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
342 assert(false, "unbound label");
343 }
344 }
345 }
346 #endif
348 //----------------------------------debug info--------------------------------
351 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
352 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
353 int pc_offset = code_offset();
354 flush_debug_info(pc_offset);
355 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
356 if (info->exception_handlers() != NULL) {
357 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
358 }
359 }
362 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
363 flush_debug_info(pc_offset);
364 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
365 if (cinfo->exception_handlers() != NULL) {
366 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
367 }
368 }
370 static ValueStack* debug_info(Instruction* ins) {
371 StateSplit* ss = ins->as_StateSplit();
372 if (ss != NULL) return ss->state();
373 return ins->state_before();
374 }
376 void LIR_Assembler::process_debug_info(LIR_Op* op) {
377 Instruction* src = op->source();
378 if (src == NULL) return;
379 int pc_offset = code_offset();
380 if (_pending_non_safepoint == src) {
381 _pending_non_safepoint_offset = pc_offset;
382 return;
383 }
384 ValueStack* vstack = debug_info(src);
385 if (vstack == NULL) return;
386 if (_pending_non_safepoint != NULL) {
387 // Got some old debug info. Get rid of it.
388 if (debug_info(_pending_non_safepoint) == vstack) {
389 _pending_non_safepoint_offset = pc_offset;
390 return;
391 }
392 if (_pending_non_safepoint_offset < pc_offset) {
393 record_non_safepoint_debug_info();
394 }
395 _pending_non_safepoint = NULL;
396 }
397 // Remember the debug info.
398 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
399 _pending_non_safepoint = src;
400 _pending_non_safepoint_offset = pc_offset;
401 }
402 }
404 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
405 // Return NULL if n is too large.
406 // Returns the caller_bci for the next-younger state, also.
407 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
408 ValueStack* t = s;
409 for (int i = 0; i < n; i++) {
410 if (t == NULL) break;
411 t = t->caller_state();
412 }
413 if (t == NULL) return NULL;
414 for (;;) {
415 ValueStack* tc = t->caller_state();
416 if (tc == NULL) return s;
417 t = tc;
418 bci_result = tc->bci();
419 s = s->caller_state();
420 }
421 }
423 void LIR_Assembler::record_non_safepoint_debug_info() {
424 int pc_offset = _pending_non_safepoint_offset;
425 ValueStack* vstack = debug_info(_pending_non_safepoint);
426 int bci = vstack->bci();
428 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
429 assert(debug_info->recording_non_safepoints(), "sanity");
431 debug_info->add_non_safepoint(pc_offset);
433 // Visit scopes from oldest to youngest.
434 for (int n = 0; ; n++) {
435 int s_bci = bci;
436 ValueStack* s = nth_oldest(vstack, n, s_bci);
437 if (s == NULL) break;
438 IRScope* scope = s->scope();
439 //Always pass false for reexecute since these ScopeDescs are never used for deopt
440 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
441 }
443 debug_info->end_non_safepoint(pc_offset);
444 }
447 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
448 add_debug_info_for_null_check(code_offset(), cinfo);
449 }
451 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
452 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
453 append_code_stub(stub);
454 }
456 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
457 add_debug_info_for_div0(code_offset(), info);
458 }
460 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
461 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
462 append_code_stub(stub);
463 }
465 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
466 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
467 }
470 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
471 verify_oop_map(op->info());
473 if (os::is_MP()) {
474 // must align calls sites, otherwise they can't be updated atomically on MP hardware
475 align_call(op->code());
476 }
478 // emit the static call stub stuff out of line
479 emit_static_call_stub();
480 CHECK_BAILOUT();
482 switch (op->code()) {
483 case lir_static_call:
484 case lir_dynamic_call:
485 call(op, relocInfo::static_call_type);
486 break;
487 case lir_optvirtual_call:
488 call(op, relocInfo::opt_virtual_call_type);
489 break;
490 case lir_icvirtual_call:
491 ic_call(op);
492 break;
493 case lir_virtual_call:
494 vtable_call(op);
495 break;
496 default:
497 fatal(err_msg_res("unexpected op code: %s", op->name()));
498 break;
499 }
501 // JSR 292
502 // Record if this method has MethodHandle invokes.
503 if (op->is_method_handle_invoke()) {
504 compilation()->set_has_method_handle_invokes(true);
505 }
507 #if defined(X86) && defined(TIERED)
508 // C2 leave fpu stack dirty clean it
509 if (UseSSE < 2) {
510 int i;
511 for ( i = 1; i <= 7 ; i++ ) {
512 ffree(i);
513 }
514 if (!op->result_opr()->is_float_kind()) {
515 ffree(0);
516 }
517 }
518 #endif // X86 && TIERED
519 }
522 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
523 _masm->bind (*(op->label()));
524 }
527 void LIR_Assembler::emit_op1(LIR_Op1* op) {
528 switch (op->code()) {
529 case lir_move:
530 if (op->move_kind() == lir_move_volatile) {
531 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
532 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
533 } else {
534 move_op(op->in_opr(), op->result_opr(), op->type(),
535 op->patch_code(), op->info(), op->pop_fpu_stack(),
536 op->move_kind() == lir_move_unaligned,
537 op->move_kind() == lir_move_wide);
538 }
539 break;
541 case lir_prefetchr:
542 prefetchr(op->in_opr());
543 break;
545 case lir_prefetchw:
546 prefetchw(op->in_opr());
547 break;
549 case lir_roundfp: {
550 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
551 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
552 break;
553 }
555 case lir_return:
556 return_op(op->in_opr());
557 break;
559 case lir_safepoint:
560 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
561 _masm->nop();
562 }
563 safepoint_poll(op->in_opr(), op->info());
564 break;
566 case lir_fxch:
567 fxch(op->in_opr()->as_jint());
568 break;
570 case lir_fld:
571 fld(op->in_opr()->as_jint());
572 break;
574 case lir_ffree:
575 ffree(op->in_opr()->as_jint());
576 break;
578 case lir_branch:
579 break;
581 case lir_push:
582 push(op->in_opr());
583 break;
585 case lir_pop:
586 pop(op->in_opr());
587 break;
589 case lir_neg:
590 negate(op->in_opr(), op->result_opr());
591 break;
593 case lir_leal:
594 leal(op->in_opr(), op->result_opr());
595 break;
597 case lir_null_check:
598 if (GenerateCompilerNullChecks) {
599 add_debug_info_for_null_check_here(op->info());
601 if (op->in_opr()->is_single_cpu()) {
602 _masm->null_check(op->in_opr()->as_register());
603 } else {
604 Unimplemented();
605 }
606 }
607 break;
609 case lir_monaddr:
610 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
611 break;
613 #ifdef SPARC
614 case lir_pack64:
615 pack64(op->in_opr(), op->result_opr());
616 break;
618 case lir_unpack64:
619 unpack64(op->in_opr(), op->result_opr());
620 break;
621 #endif
623 case lir_unwind:
624 unwind_op(op->in_opr());
625 break;
627 default:
628 Unimplemented();
629 break;
630 }
631 }
634 void LIR_Assembler::emit_op0(LIR_Op0* op) {
635 switch (op->code()) {
636 case lir_word_align: {
637 while (code_offset() % BytesPerWord != 0) {
638 _masm->nop();
639 }
640 break;
641 }
643 case lir_nop:
644 assert(op->info() == NULL, "not supported");
645 _masm->nop();
646 break;
648 case lir_label:
649 Unimplemented();
650 break;
652 case lir_build_frame:
653 build_frame();
654 break;
656 case lir_std_entry:
657 // init offsets
658 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
659 _masm->align(CodeEntryAlignment);
660 if (needs_icache(compilation()->method())) {
661 check_icache();
662 }
663 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
664 _masm->verified_entry();
665 build_frame();
666 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
667 break;
669 case lir_osr_entry:
670 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
671 osr_entry();
672 break;
674 case lir_24bit_FPU:
675 set_24bit_FPU();
676 break;
678 case lir_reset_FPU:
679 reset_FPU();
680 break;
682 case lir_breakpoint:
683 breakpoint();
684 break;
686 case lir_fpop_raw:
687 fpop();
688 break;
690 case lir_membar:
691 membar();
692 break;
694 case lir_membar_acquire:
695 membar_acquire();
696 break;
698 case lir_membar_release:
699 membar_release();
700 break;
702 case lir_membar_loadload:
703 membar_loadload();
704 break;
706 case lir_membar_storestore:
707 membar_storestore();
708 break;
710 case lir_membar_loadstore:
711 membar_loadstore();
712 break;
714 case lir_membar_storeload:
715 membar_storeload();
716 break;
718 case lir_get_thread:
719 get_thread(op->result_opr());
720 break;
722 default:
723 ShouldNotReachHere();
724 break;
725 }
726 }
729 void LIR_Assembler::emit_op2(LIR_Op2* op) {
730 switch (op->code()) {
731 #ifndef MIPS
732 case lir_cmp:
733 if (op->info() != NULL) {
734 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
735 "shouldn't be codeemitinfo for non-address operands");
736 add_debug_info_for_null_check_here(op->info()); // exception possible
737 }
738 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
739 break;
740 #else
741 case lir_null_check_for_branch:
742 if (op->info() != NULL) {
743 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
744 "shouldn't be codeemitinfo for non-address operands");
745 add_debug_info_for_null_check_here(op->info()); // exception possible
746 }
747 break;
748 #endif
750 case lir_cmp_l2i:
751 case lir_cmp_fd2i:
752 case lir_ucmp_fd2i:
753 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
754 break;
756 case lir_cmove:
757 #ifndef MIPS
758 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
759 #endif
760 break;
762 case lir_shl:
763 case lir_shr:
764 case lir_ushr:
765 if (op->in_opr2()->is_constant()) {
766 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
767 } else {
768 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
769 }
770 break;
772 case lir_add:
773 case lir_sub:
774 case lir_mul:
775 case lir_mul_strictfp:
776 case lir_div:
777 case lir_div_strictfp:
778 case lir_rem:
779 assert(op->fpu_pop_count() < 2, "");
780 arith_op(
781 op->code(),
782 op->in_opr1(),
783 op->in_opr2(),
784 op->result_opr(),
785 op->info(),
786 op->fpu_pop_count() == 1);
787 break;
789 case lir_abs:
790 case lir_sqrt:
791 case lir_sin:
792 case lir_tan:
793 case lir_cos:
794 case lir_log:
795 case lir_log10:
796 case lir_exp:
797 case lir_pow:
798 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
799 break;
801 case lir_logic_and:
802 case lir_logic_or:
803 case lir_logic_xor:
804 logic_op(
805 op->code(),
806 op->in_opr1(),
807 op->in_opr2(),
808 op->result_opr());
809 break;
811 case lir_throw:
812 throw_op(op->in_opr1(), op->in_opr2(), op->info());
813 break;
815 case lir_xadd:
816 case lir_xchg:
817 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
818 break;
820 default:
821 Unimplemented();
822 break;
823 }
824 }
827 void LIR_Assembler::build_frame() {
828 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
829 }
832 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
833 assert((src->is_single_fpu() && dest->is_single_stack()) ||
834 (src->is_double_fpu() && dest->is_double_stack()),
835 "round_fp: rounds register -> stack location");
837 reg2stack (src, dest, src->type(), pop_fpu_stack);
838 }
841 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
842 if (src->is_register()) {
843 if (dest->is_register()) {
844 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
845 reg2reg(src, dest);
846 } else if (dest->is_stack()) {
847 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
848 reg2stack(src, dest, type, pop_fpu_stack);
849 } else if (dest->is_address()) {
850 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
851 } else {
852 ShouldNotReachHere();
853 }
855 } else if (src->is_stack()) {
856 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
857 if (dest->is_register()) {
858 stack2reg(src, dest, type);
859 } else if (dest->is_stack()) {
860 stack2stack(src, dest, type);
861 } else {
862 ShouldNotReachHere();
863 }
865 } else if (src->is_constant()) {
866 if (dest->is_register()) {
867 const2reg(src, dest, patch_code, info); // patching is possible
868 } else if (dest->is_stack()) {
869 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
870 const2stack(src, dest);
871 } else if (dest->is_address()) {
872 assert(patch_code == lir_patch_none, "no patching allowed here");
873 const2mem(src, dest, type, info, wide);
874 } else {
875 ShouldNotReachHere();
876 }
878 } else if (src->is_address()) {
879 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
881 } else {
882 ShouldNotReachHere();
883 }
884 }
887 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
888 #ifndef PRODUCT
889 if (VerifyOops) {
890 OopMapStream s(info->oop_map());
891 while (!s.is_done()) {
892 OopMapValue v = s.current();
893 if (v.is_oop()) {
894 VMReg r = v.reg();
895 if (!r->is_stack()) {
896 stringStream st;
897 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
898 #ifdef SPARC
899 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
900 #else
901 _masm->verify_oop(r->as_Register());
902 #endif
903 } else {
904 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
905 }
906 }
907 check_codespace();
908 CHECK_BAILOUT();
910 s.next();
911 }
912 }
913 #endif
914 }