src/share/vm/c1/c1_LIRAssembler.cpp

Tue, 24 Feb 2015 15:04:52 -0500

author
dlong
date
Tue, 24 Feb 2015 15:04:52 -0500
changeset 7598
ddce0b7cee93
parent 6723
0bf37f737702
child 6876
710a3c8b516e
child 8427
c3d0bd36ab28
permissions
-rw-r--r--

8072383: resolve conflicts between open and closed ports
Summary: refactor close to remove references to closed ports
Reviewed-by: kvn, simonis, sgehwolf, dholmes

     1 /*
     2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "c1/c1_Compilation.hpp"
    27 #include "c1/c1_Instruction.hpp"
    28 #include "c1/c1_InstructionPrinter.hpp"
    29 #include "c1/c1_LIRAssembler.hpp"
    30 #include "c1/c1_MacroAssembler.hpp"
    31 #include "c1/c1_ValueStack.hpp"
    32 #include "ci/ciInstance.hpp"
    33 #ifdef TARGET_ARCH_x86
    34 # include "nativeInst_x86.hpp"
    35 # include "vmreg_x86.inline.hpp"
    36 #endif
    37 #ifdef TARGET_ARCH_sparc
    38 # include "nativeInst_sparc.hpp"
    39 # include "vmreg_sparc.inline.hpp"
    40 #endif
    41 #ifdef TARGET_ARCH_zero
    42 # include "nativeInst_zero.hpp"
    43 # include "vmreg_zero.inline.hpp"
    44 #endif
    45 #ifdef TARGET_ARCH_arm
    46 # include "nativeInst_arm.hpp"
    47 # include "vmreg_arm.inline.hpp"
    48 #endif
    49 #ifdef TARGET_ARCH_ppc
    50 # include "nativeInst_ppc.hpp"
    51 # include "vmreg_ppc.inline.hpp"
    52 #endif
    55 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
    56   // we must have enough patching space so that call can be inserted
    57   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
    58     _masm->nop();
    59   }
    60   patch->install(_masm, patch_code, obj, info);
    61   append_code_stub(patch);
    63 #ifdef ASSERT
    64   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
    65   if (patch->id() == PatchingStub::access_field_id) {
    66     switch (code) {
    67       case Bytecodes::_putstatic:
    68       case Bytecodes::_getstatic:
    69       case Bytecodes::_putfield:
    70       case Bytecodes::_getfield:
    71         break;
    72       default:
    73         ShouldNotReachHere();
    74     }
    75   } else if (patch->id() == PatchingStub::load_klass_id) {
    76     switch (code) {
    77       case Bytecodes::_new:
    78       case Bytecodes::_anewarray:
    79       case Bytecodes::_multianewarray:
    80       case Bytecodes::_instanceof:
    81       case Bytecodes::_checkcast:
    82         break;
    83       default:
    84         ShouldNotReachHere();
    85     }
    86   } else if (patch->id() == PatchingStub::load_mirror_id) {
    87     switch (code) {
    88       case Bytecodes::_putstatic:
    89       case Bytecodes::_getstatic:
    90       case Bytecodes::_ldc:
    91       case Bytecodes::_ldc_w:
    92         break;
    93       default:
    94         ShouldNotReachHere();
    95     }
    96   } else if (patch->id() == PatchingStub::load_appendix_id) {
    97     Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
    98     assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
    99   } else {
   100     ShouldNotReachHere();
   101   }
   102 #endif
   103 }
   105 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
   106   IRScope* scope = info->scope();
   107   Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
   108   if (Bytecodes::has_optional_appendix(bc_raw)) {
   109     return PatchingStub::load_appendix_id;
   110   }
   111   return PatchingStub::load_mirror_id;
   112 }
   114 //---------------------------------------------------------------
   117 LIR_Assembler::LIR_Assembler(Compilation* c):
   118    _compilation(c)
   119  , _masm(c->masm())
   120  , _bs(Universe::heap()->barrier_set())
   121  , _frame_map(c->frame_map())
   122  , _current_block(NULL)
   123  , _pending_non_safepoint(NULL)
   124  , _pending_non_safepoint_offset(0)
   125 {
   126   _slow_case_stubs = new CodeStubList();
   127 }
   130 LIR_Assembler::~LIR_Assembler() {
   131 }
   134 void LIR_Assembler::check_codespace() {
   135   CodeSection* cs = _masm->code_section();
   136   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
   137     BAILOUT("CodeBuffer overflow");
   138   }
   139 }
   142 void LIR_Assembler::append_code_stub(CodeStub* stub) {
   143   _slow_case_stubs->append(stub);
   144 }
   146 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
   147   for (int m = 0; m < stub_list->length(); m++) {
   148     CodeStub* s = (*stub_list)[m];
   150     check_codespace();
   151     CHECK_BAILOUT();
   153 #ifndef PRODUCT
   154     if (CommentedAssembly) {
   155       stringStream st;
   156       s->print_name(&st);
   157       st.print(" slow case");
   158       _masm->block_comment(st.as_string());
   159     }
   160 #endif
   161     s->emit_code(this);
   162 #ifdef ASSERT
   163     s->assert_no_unbound_labels();
   164 #endif
   165   }
   166 }
   169 void LIR_Assembler::emit_slow_case_stubs() {
   170   emit_stubs(_slow_case_stubs);
   171 }
   174 bool LIR_Assembler::needs_icache(ciMethod* method) const {
   175   return !method->is_static();
   176 }
   179 int LIR_Assembler::code_offset() const {
   180   return _masm->offset();
   181 }
   184 address LIR_Assembler::pc() const {
   185   return _masm->pc();
   186 }
   188 // To bang the stack of this compiled method we use the stack size
   189 // that the interpreter would need in case of a deoptimization. This
   190 // removes the need to bang the stack in the deoptimization blob which
   191 // in turn simplifies stack overflow handling.
   192 int LIR_Assembler::bang_size_in_bytes() const {
   193   return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
   194 }
   196 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
   197   for (int i = 0; i < info_list->length(); i++) {
   198     XHandlers* handlers = info_list->at(i)->exception_handlers();
   200     for (int j = 0; j < handlers->length(); j++) {
   201       XHandler* handler = handlers->handler_at(j);
   202       assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
   203       assert(handler->entry_code() == NULL ||
   204              handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
   205              handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
   207       if (handler->entry_pco() == -1) {
   208         // entry code not emitted yet
   209         if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
   210           handler->set_entry_pco(code_offset());
   211           if (CommentedAssembly) {
   212             _masm->block_comment("Exception adapter block");
   213           }
   214           emit_lir_list(handler->entry_code());
   215         } else {
   216           handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
   217         }
   219         assert(handler->entry_pco() != -1, "must be set now");
   220       }
   221     }
   222   }
   223 }
   226 void LIR_Assembler::emit_code(BlockList* hir) {
   227   if (PrintLIR) {
   228     print_LIR(hir);
   229   }
   231   int n = hir->length();
   232   for (int i = 0; i < n; i++) {
   233     emit_block(hir->at(i));
   234     CHECK_BAILOUT();
   235   }
   237   flush_debug_info(code_offset());
   239   DEBUG_ONLY(check_no_unbound_labels());
   240 }
   243 void LIR_Assembler::emit_block(BlockBegin* block) {
   244   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
   245     align_backward_branch_target();
   246   }
   248   // if this block is the start of an exception handler, record the
   249   // PC offset of the first instruction for later construction of
   250   // the ExceptionHandlerTable
   251   if (block->is_set(BlockBegin::exception_entry_flag)) {
   252     block->set_exception_handler_pco(code_offset());
   253   }
   255 #ifndef PRODUCT
   256   if (PrintLIRWithAssembly) {
   257     // don't print Phi's
   258     InstructionPrinter ip(false);
   259     block->print(ip);
   260   }
   261 #endif /* PRODUCT */
   263   assert(block->lir() != NULL, "must have LIR");
   264   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
   266 #ifndef PRODUCT
   267   if (CommentedAssembly) {
   268     stringStream st;
   269     st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
   270     _masm->block_comment(st.as_string());
   271   }
   272 #endif
   274   emit_lir_list(block->lir());
   276   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
   277 }
   280 void LIR_Assembler::emit_lir_list(LIR_List* list) {
   281   peephole(list);
   283   int n = list->length();
   284   for (int i = 0; i < n; i++) {
   285     LIR_Op* op = list->at(i);
   287     check_codespace();
   288     CHECK_BAILOUT();
   290 #ifndef PRODUCT
   291     if (CommentedAssembly) {
   292       // Don't record out every op since that's too verbose.  Print
   293       // branches since they include block and stub names.  Also print
   294       // patching moves since they generate funny looking code.
   295       if (op->code() == lir_branch ||
   296           (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
   297         stringStream st;
   298         op->print_on(&st);
   299         _masm->block_comment(st.as_string());
   300       }
   301     }
   302     if (PrintLIRWithAssembly) {
   303       // print out the LIR operation followed by the resulting assembly
   304       list->at(i)->print(); tty->cr();
   305     }
   306 #endif /* PRODUCT */
   308     op->emit_code(this);
   310     if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
   311       process_debug_info(op);
   312     }
   314 #ifndef PRODUCT
   315     if (PrintLIRWithAssembly) {
   316       _masm->code()->decode();
   317     }
   318 #endif /* PRODUCT */
   319   }
   320 }
   322 #ifdef ASSERT
   323 void LIR_Assembler::check_no_unbound_labels() {
   324   CHECK_BAILOUT();
   326   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
   327     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
   328       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
   329       assert(false, "unbound label");
   330     }
   331   }
   332 }
   333 #endif
   335 //----------------------------------debug info--------------------------------
   338 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
   339   _masm->code_section()->relocate(pc(), relocInfo::poll_type);
   340   int pc_offset = code_offset();
   341   flush_debug_info(pc_offset);
   342   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   343   if (info->exception_handlers() != NULL) {
   344     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
   345   }
   346 }
   349 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
   350   flush_debug_info(pc_offset);
   351   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   352   if (cinfo->exception_handlers() != NULL) {
   353     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
   354   }
   355 }
   357 static ValueStack* debug_info(Instruction* ins) {
   358   StateSplit* ss = ins->as_StateSplit();
   359   if (ss != NULL) return ss->state();
   360   return ins->state_before();
   361 }
   363 void LIR_Assembler::process_debug_info(LIR_Op* op) {
   364   Instruction* src = op->source();
   365   if (src == NULL)  return;
   366   int pc_offset = code_offset();
   367   if (_pending_non_safepoint == src) {
   368     _pending_non_safepoint_offset = pc_offset;
   369     return;
   370   }
   371   ValueStack* vstack = debug_info(src);
   372   if (vstack == NULL)  return;
   373   if (_pending_non_safepoint != NULL) {
   374     // Got some old debug info.  Get rid of it.
   375     if (debug_info(_pending_non_safepoint) == vstack) {
   376       _pending_non_safepoint_offset = pc_offset;
   377       return;
   378     }
   379     if (_pending_non_safepoint_offset < pc_offset) {
   380       record_non_safepoint_debug_info();
   381     }
   382     _pending_non_safepoint = NULL;
   383   }
   384   // Remember the debug info.
   385   if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
   386     _pending_non_safepoint = src;
   387     _pending_non_safepoint_offset = pc_offset;
   388   }
   389 }
   391 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
   392 // Return NULL if n is too large.
   393 // Returns the caller_bci for the next-younger state, also.
   394 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
   395   ValueStack* t = s;
   396   for (int i = 0; i < n; i++) {
   397     if (t == NULL)  break;
   398     t = t->caller_state();
   399   }
   400   if (t == NULL)  return NULL;
   401   for (;;) {
   402     ValueStack* tc = t->caller_state();
   403     if (tc == NULL)  return s;
   404     t = tc;
   405     bci_result = tc->bci();
   406     s = s->caller_state();
   407   }
   408 }
   410 void LIR_Assembler::record_non_safepoint_debug_info() {
   411   int         pc_offset = _pending_non_safepoint_offset;
   412   ValueStack* vstack    = debug_info(_pending_non_safepoint);
   413   int         bci       = vstack->bci();
   415   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
   416   assert(debug_info->recording_non_safepoints(), "sanity");
   418   debug_info->add_non_safepoint(pc_offset);
   420   // Visit scopes from oldest to youngest.
   421   for (int n = 0; ; n++) {
   422     int s_bci = bci;
   423     ValueStack* s = nth_oldest(vstack, n, s_bci);
   424     if (s == NULL)  break;
   425     IRScope* scope = s->scope();
   426     //Always pass false for reexecute since these ScopeDescs are never used for deopt
   427     debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
   428   }
   430   debug_info->end_non_safepoint(pc_offset);
   431 }
   434 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
   435   add_debug_info_for_null_check(code_offset(), cinfo);
   436 }
   438 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
   439   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
   440   append_code_stub(stub);
   441 }
   443 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
   444   add_debug_info_for_div0(code_offset(), info);
   445 }
   447 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
   448   DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
   449   append_code_stub(stub);
   450 }
   452 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
   453   rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
   454 }
   457 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
   458   verify_oop_map(op->info());
   460   if (os::is_MP()) {
   461     // must align calls sites, otherwise they can't be updated atomically on MP hardware
   462     align_call(op->code());
   463   }
   465   // emit the static call stub stuff out of line
   466   emit_static_call_stub();
   468   switch (op->code()) {
   469   case lir_static_call:
   470   case lir_dynamic_call:
   471     call(op, relocInfo::static_call_type);
   472     break;
   473   case lir_optvirtual_call:
   474     call(op, relocInfo::opt_virtual_call_type);
   475     break;
   476   case lir_icvirtual_call:
   477     ic_call(op);
   478     break;
   479   case lir_virtual_call:
   480     vtable_call(op);
   481     break;
   482   default:
   483     fatal(err_msg_res("unexpected op code: %s", op->name()));
   484     break;
   485   }
   487   // JSR 292
   488   // Record if this method has MethodHandle invokes.
   489   if (op->is_method_handle_invoke()) {
   490     compilation()->set_has_method_handle_invokes(true);
   491   }
   493 #if defined(X86) && defined(TIERED)
   494   // C2 leave fpu stack dirty clean it
   495   if (UseSSE < 2) {
   496     int i;
   497     for ( i = 1; i <= 7 ; i++ ) {
   498       ffree(i);
   499     }
   500     if (!op->result_opr()->is_float_kind()) {
   501       ffree(0);
   502     }
   503   }
   504 #endif // X86 && TIERED
   505 }
   508 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
   509   _masm->bind (*(op->label()));
   510 }
   513 void LIR_Assembler::emit_op1(LIR_Op1* op) {
   514   switch (op->code()) {
   515     case lir_move:
   516       if (op->move_kind() == lir_move_volatile) {
   517         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
   518         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
   519       } else {
   520         move_op(op->in_opr(), op->result_opr(), op->type(),
   521                 op->patch_code(), op->info(), op->pop_fpu_stack(),
   522                 op->move_kind() == lir_move_unaligned,
   523                 op->move_kind() == lir_move_wide);
   524       }
   525       break;
   527     case lir_prefetchr:
   528       prefetchr(op->in_opr());
   529       break;
   531     case lir_prefetchw:
   532       prefetchw(op->in_opr());
   533       break;
   535     case lir_roundfp: {
   536       LIR_OpRoundFP* round_op = op->as_OpRoundFP();
   537       roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
   538       break;
   539     }
   541     case lir_return:
   542       return_op(op->in_opr());
   543       break;
   545     case lir_safepoint:
   546       if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
   547         _masm->nop();
   548       }
   549       safepoint_poll(op->in_opr(), op->info());
   550       break;
   552     case lir_fxch:
   553       fxch(op->in_opr()->as_jint());
   554       break;
   556     case lir_fld:
   557       fld(op->in_opr()->as_jint());
   558       break;
   560     case lir_ffree:
   561       ffree(op->in_opr()->as_jint());
   562       break;
   564     case lir_branch:
   565       break;
   567     case lir_push:
   568       push(op->in_opr());
   569       break;
   571     case lir_pop:
   572       pop(op->in_opr());
   573       break;
   575     case lir_neg:
   576       negate(op->in_opr(), op->result_opr());
   577       break;
   579     case lir_leal:
   580       leal(op->in_opr(), op->result_opr());
   581       break;
   583     case lir_null_check:
   584       if (GenerateCompilerNullChecks) {
   585         add_debug_info_for_null_check_here(op->info());
   587         if (op->in_opr()->is_single_cpu()) {
   588           _masm->null_check(op->in_opr()->as_register());
   589         } else {
   590           Unimplemented();
   591         }
   592       }
   593       break;
   595     case lir_monaddr:
   596       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
   597       break;
   599 #ifdef SPARC
   600     case lir_pack64:
   601       pack64(op->in_opr(), op->result_opr());
   602       break;
   604     case lir_unpack64:
   605       unpack64(op->in_opr(), op->result_opr());
   606       break;
   607 #endif
   609     case lir_unwind:
   610       unwind_op(op->in_opr());
   611       break;
   613     default:
   614       Unimplemented();
   615       break;
   616   }
   617 }
   620 void LIR_Assembler::emit_op0(LIR_Op0* op) {
   621   switch (op->code()) {
   622     case lir_word_align: {
   623       while (code_offset() % BytesPerWord != 0) {
   624         _masm->nop();
   625       }
   626       break;
   627     }
   629     case lir_nop:
   630       assert(op->info() == NULL, "not supported");
   631       _masm->nop();
   632       break;
   634     case lir_label:
   635       Unimplemented();
   636       break;
   638     case lir_build_frame:
   639       build_frame();
   640       break;
   642     case lir_std_entry:
   643       // init offsets
   644       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
   645       _masm->align(CodeEntryAlignment);
   646       if (needs_icache(compilation()->method())) {
   647         check_icache();
   648       }
   649       offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
   650       _masm->verified_entry();
   651       build_frame();
   652       offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
   653       break;
   655     case lir_osr_entry:
   656       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
   657       osr_entry();
   658       break;
   660     case lir_24bit_FPU:
   661       set_24bit_FPU();
   662       break;
   664     case lir_reset_FPU:
   665       reset_FPU();
   666       break;
   668     case lir_breakpoint:
   669       breakpoint();
   670       break;
   672     case lir_fpop_raw:
   673       fpop();
   674       break;
   676     case lir_membar:
   677       membar();
   678       break;
   680     case lir_membar_acquire:
   681       membar_acquire();
   682       break;
   684     case lir_membar_release:
   685       membar_release();
   686       break;
   688     case lir_membar_loadload:
   689       membar_loadload();
   690       break;
   692     case lir_membar_storestore:
   693       membar_storestore();
   694       break;
   696     case lir_membar_loadstore:
   697       membar_loadstore();
   698       break;
   700     case lir_membar_storeload:
   701       membar_storeload();
   702       break;
   704     case lir_get_thread:
   705       get_thread(op->result_opr());
   706       break;
   708     default:
   709       ShouldNotReachHere();
   710       break;
   711   }
   712 }
   715 void LIR_Assembler::emit_op2(LIR_Op2* op) {
   716   switch (op->code()) {
   717     case lir_cmp:
   718       if (op->info() != NULL) {
   719         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
   720                "shouldn't be codeemitinfo for non-address operands");
   721         add_debug_info_for_null_check_here(op->info()); // exception possible
   722       }
   723       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
   724       break;
   726     case lir_cmp_l2i:
   727     case lir_cmp_fd2i:
   728     case lir_ucmp_fd2i:
   729       comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
   730       break;
   732     case lir_cmove:
   733       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
   734       break;
   736     case lir_shl:
   737     case lir_shr:
   738     case lir_ushr:
   739       if (op->in_opr2()->is_constant()) {
   740         shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
   741       } else {
   742         shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
   743       }
   744       break;
   746     case lir_add:
   747     case lir_sub:
   748     case lir_mul:
   749     case lir_mul_strictfp:
   750     case lir_div:
   751     case lir_div_strictfp:
   752     case lir_rem:
   753       assert(op->fpu_pop_count() < 2, "");
   754       arith_op(
   755         op->code(),
   756         op->in_opr1(),
   757         op->in_opr2(),
   758         op->result_opr(),
   759         op->info(),
   760         op->fpu_pop_count() == 1);
   761       break;
   763     case lir_abs:
   764     case lir_sqrt:
   765     case lir_sin:
   766     case lir_tan:
   767     case lir_cos:
   768     case lir_log:
   769     case lir_log10:
   770     case lir_exp:
   771     case lir_pow:
   772       intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
   773       break;
   775     case lir_logic_and:
   776     case lir_logic_or:
   777     case lir_logic_xor:
   778       logic_op(
   779         op->code(),
   780         op->in_opr1(),
   781         op->in_opr2(),
   782         op->result_opr());
   783       break;
   785     case lir_throw:
   786       throw_op(op->in_opr1(), op->in_opr2(), op->info());
   787       break;
   789     case lir_xadd:
   790     case lir_xchg:
   791       atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
   792       break;
   794     default:
   795       Unimplemented();
   796       break;
   797   }
   798 }
   801 void LIR_Assembler::build_frame() {
   802   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
   803 }
   806 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
   807   assert((src->is_single_fpu() && dest->is_single_stack()) ||
   808          (src->is_double_fpu() && dest->is_double_stack()),
   809          "round_fp: rounds register -> stack location");
   811   reg2stack (src, dest, src->type(), pop_fpu_stack);
   812 }
   815 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
   816   if (src->is_register()) {
   817     if (dest->is_register()) {
   818       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   819       reg2reg(src,  dest);
   820     } else if (dest->is_stack()) {
   821       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   822       reg2stack(src, dest, type, pop_fpu_stack);
   823     } else if (dest->is_address()) {
   824       reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
   825     } else {
   826       ShouldNotReachHere();
   827     }
   829   } else if (src->is_stack()) {
   830     assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   831     if (dest->is_register()) {
   832       stack2reg(src, dest, type);
   833     } else if (dest->is_stack()) {
   834       stack2stack(src, dest, type);
   835     } else {
   836       ShouldNotReachHere();
   837     }
   839   } else if (src->is_constant()) {
   840     if (dest->is_register()) {
   841       const2reg(src, dest, patch_code, info); // patching is possible
   842     } else if (dest->is_stack()) {
   843       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   844       const2stack(src, dest);
   845     } else if (dest->is_address()) {
   846       assert(patch_code == lir_patch_none, "no patching allowed here");
   847       const2mem(src, dest, type, info, wide);
   848     } else {
   849       ShouldNotReachHere();
   850     }
   852   } else if (src->is_address()) {
   853     mem2reg(src, dest, type, patch_code, info, wide, unaligned);
   855   } else {
   856     ShouldNotReachHere();
   857   }
   858 }
   861 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
   862 #ifndef PRODUCT
   863   if (VerifyOops) {
   864     OopMapStream s(info->oop_map());
   865     while (!s.is_done()) {
   866       OopMapValue v = s.current();
   867       if (v.is_oop()) {
   868         VMReg r = v.reg();
   869         if (!r->is_stack()) {
   870           stringStream st;
   871           st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
   872 #ifdef SPARC
   873           _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
   874 #else
   875           _masm->verify_oop(r->as_Register());
   876 #endif
   877         } else {
   878           _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
   879         }
   880       }
   881       check_codespace();
   882       CHECK_BAILOUT();
   884       s.next();
   885     }
   886   }
   887 #endif
   888 }

mercurial