src/share/vm/c1/c1_LIRAssembler.cpp

changeset 0
f90c822e73f8
child 1
2d8a650513c2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,888 @@
     1.4 +/*
     1.5 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "c1/c1_Compilation.hpp"
    1.30 +#include "c1/c1_Instruction.hpp"
    1.31 +#include "c1/c1_InstructionPrinter.hpp"
    1.32 +#include "c1/c1_LIRAssembler.hpp"
    1.33 +#include "c1/c1_MacroAssembler.hpp"
    1.34 +#include "c1/c1_ValueStack.hpp"
    1.35 +#include "ci/ciInstance.hpp"
    1.36 +#ifdef TARGET_ARCH_x86
    1.37 +# include "nativeInst_x86.hpp"
    1.38 +# include "vmreg_x86.inline.hpp"
    1.39 +#endif
    1.40 +#ifdef TARGET_ARCH_sparc
    1.41 +# include "nativeInst_sparc.hpp"
    1.42 +# include "vmreg_sparc.inline.hpp"
    1.43 +#endif
    1.44 +#ifdef TARGET_ARCH_zero
    1.45 +# include "nativeInst_zero.hpp"
    1.46 +# include "vmreg_zero.inline.hpp"
    1.47 +#endif
    1.48 +#ifdef TARGET_ARCH_arm
    1.49 +# include "nativeInst_arm.hpp"
    1.50 +# include "vmreg_arm.inline.hpp"
    1.51 +#endif
    1.52 +#ifdef TARGET_ARCH_ppc
    1.53 +# include "nativeInst_ppc.hpp"
    1.54 +# include "vmreg_ppc.inline.hpp"
    1.55 +#endif
    1.56 +
    1.57 +
    1.58 +void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
    1.59 +  // we must have enough patching space so that call can be inserted
    1.60 +  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
    1.61 +    _masm->nop();
    1.62 +  }
    1.63 +  patch->install(_masm, patch_code, obj, info);
    1.64 +  append_code_stub(patch);
    1.65 +
    1.66 +#ifdef ASSERT
    1.67 +  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
    1.68 +  if (patch->id() == PatchingStub::access_field_id) {
    1.69 +    switch (code) {
    1.70 +      case Bytecodes::_putstatic:
    1.71 +      case Bytecodes::_getstatic:
    1.72 +      case Bytecodes::_putfield:
    1.73 +      case Bytecodes::_getfield:
    1.74 +        break;
    1.75 +      default:
    1.76 +        ShouldNotReachHere();
    1.77 +    }
    1.78 +  } else if (patch->id() == PatchingStub::load_klass_id) {
    1.79 +    switch (code) {
    1.80 +      case Bytecodes::_new:
    1.81 +      case Bytecodes::_anewarray:
    1.82 +      case Bytecodes::_multianewarray:
    1.83 +      case Bytecodes::_instanceof:
    1.84 +      case Bytecodes::_checkcast:
    1.85 +        break;
    1.86 +      default:
    1.87 +        ShouldNotReachHere();
    1.88 +    }
    1.89 +  } else if (patch->id() == PatchingStub::load_mirror_id) {
    1.90 +    switch (code) {
    1.91 +      case Bytecodes::_putstatic:
    1.92 +      case Bytecodes::_getstatic:
    1.93 +      case Bytecodes::_ldc:
    1.94 +      case Bytecodes::_ldc_w:
    1.95 +        break;
    1.96 +      default:
    1.97 +        ShouldNotReachHere();
    1.98 +    }
    1.99 +  } else if (patch->id() == PatchingStub::load_appendix_id) {
   1.100 +    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
   1.101 +    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
   1.102 +  } else {
   1.103 +    ShouldNotReachHere();
   1.104 +  }
   1.105 +#endif
   1.106 +}
   1.107 +
   1.108 +PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
   1.109 +  IRScope* scope = info->scope();
   1.110 +  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
   1.111 +  if (Bytecodes::has_optional_appendix(bc_raw)) {
   1.112 +    return PatchingStub::load_appendix_id;
   1.113 +  }
   1.114 +  return PatchingStub::load_mirror_id;
   1.115 +}
   1.116 +
   1.117 +//---------------------------------------------------------------
   1.118 +
   1.119 +
   1.120 +LIR_Assembler::LIR_Assembler(Compilation* c):
   1.121 +   _compilation(c)
   1.122 + , _masm(c->masm())
   1.123 + , _bs(Universe::heap()->barrier_set())
   1.124 + , _frame_map(c->frame_map())
   1.125 + , _current_block(NULL)
   1.126 + , _pending_non_safepoint(NULL)
   1.127 + , _pending_non_safepoint_offset(0)
   1.128 +{
   1.129 +  _slow_case_stubs = new CodeStubList();
   1.130 +}
   1.131 +
   1.132 +
   1.133 +LIR_Assembler::~LIR_Assembler() {
   1.134 +}
   1.135 +
   1.136 +
   1.137 +void LIR_Assembler::check_codespace() {
   1.138 +  CodeSection* cs = _masm->code_section();
   1.139 +  if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
   1.140 +    BAILOUT("CodeBuffer overflow");
   1.141 +  }
   1.142 +}
   1.143 +
   1.144 +
   1.145 +void LIR_Assembler::append_code_stub(CodeStub* stub) {
   1.146 +  _slow_case_stubs->append(stub);
   1.147 +}
   1.148 +
   1.149 +void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
   1.150 +  for (int m = 0; m < stub_list->length(); m++) {
   1.151 +    CodeStub* s = (*stub_list)[m];
   1.152 +
   1.153 +    check_codespace();
   1.154 +    CHECK_BAILOUT();
   1.155 +
   1.156 +#ifndef PRODUCT
   1.157 +    if (CommentedAssembly) {
   1.158 +      stringStream st;
   1.159 +      s->print_name(&st);
   1.160 +      st.print(" slow case");
   1.161 +      _masm->block_comment(st.as_string());
   1.162 +    }
   1.163 +#endif
   1.164 +    s->emit_code(this);
   1.165 +#ifdef ASSERT
   1.166 +    s->assert_no_unbound_labels();
   1.167 +#endif
   1.168 +  }
   1.169 +}
   1.170 +
   1.171 +
   1.172 +void LIR_Assembler::emit_slow_case_stubs() {
   1.173 +  emit_stubs(_slow_case_stubs);
   1.174 +}
   1.175 +
   1.176 +
   1.177 +bool LIR_Assembler::needs_icache(ciMethod* method) const {
   1.178 +  return !method->is_static();
   1.179 +}
   1.180 +
   1.181 +
   1.182 +int LIR_Assembler::code_offset() const {
   1.183 +  return _masm->offset();
   1.184 +}
   1.185 +
   1.186 +
   1.187 +address LIR_Assembler::pc() const {
   1.188 +  return _masm->pc();
   1.189 +}
   1.190 +
   1.191 +// To bang the stack of this compiled method we use the stack size
   1.192 +// that the interpreter would need in case of a deoptimization. This
   1.193 +// removes the need to bang the stack in the deoptimization blob which
   1.194 +// in turn simplifies stack overflow handling.
   1.195 +int LIR_Assembler::bang_size_in_bytes() const {
   1.196 +  return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
   1.197 +}
   1.198 +
   1.199 +void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
   1.200 +  for (int i = 0; i < info_list->length(); i++) {
   1.201 +    XHandlers* handlers = info_list->at(i)->exception_handlers();
   1.202 +
   1.203 +    for (int j = 0; j < handlers->length(); j++) {
   1.204 +      XHandler* handler = handlers->handler_at(j);
   1.205 +      assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
   1.206 +      assert(handler->entry_code() == NULL ||
   1.207 +             handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
   1.208 +             handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
   1.209 +
   1.210 +      if (handler->entry_pco() == -1) {
   1.211 +        // entry code not emitted yet
   1.212 +        if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
   1.213 +          handler->set_entry_pco(code_offset());
   1.214 +          if (CommentedAssembly) {
   1.215 +            _masm->block_comment("Exception adapter block");
   1.216 +          }
   1.217 +          emit_lir_list(handler->entry_code());
   1.218 +        } else {
   1.219 +          handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
   1.220 +        }
   1.221 +
   1.222 +        assert(handler->entry_pco() != -1, "must be set now");
   1.223 +      }
   1.224 +    }
   1.225 +  }
   1.226 +}
   1.227 +
   1.228 +
   1.229 +void LIR_Assembler::emit_code(BlockList* hir) {
   1.230 +  if (PrintLIR) {
   1.231 +    print_LIR(hir);
   1.232 +  }
   1.233 +
   1.234 +  int n = hir->length();
   1.235 +  for (int i = 0; i < n; i++) {
   1.236 +    emit_block(hir->at(i));
   1.237 +    CHECK_BAILOUT();
   1.238 +  }
   1.239 +
   1.240 +  flush_debug_info(code_offset());
   1.241 +
   1.242 +  DEBUG_ONLY(check_no_unbound_labels());
   1.243 +}
   1.244 +
   1.245 +
   1.246 +void LIR_Assembler::emit_block(BlockBegin* block) {
   1.247 +  if (block->is_set(BlockBegin::backward_branch_target_flag)) {
   1.248 +    align_backward_branch_target();
   1.249 +  }
   1.250 +
   1.251 +  // if this block is the start of an exception handler, record the
   1.252 +  // PC offset of the first instruction for later construction of
   1.253 +  // the ExceptionHandlerTable
   1.254 +  if (block->is_set(BlockBegin::exception_entry_flag)) {
   1.255 +    block->set_exception_handler_pco(code_offset());
   1.256 +  }
   1.257 +
   1.258 +#ifndef PRODUCT
   1.259 +  if (PrintLIRWithAssembly) {
   1.260 +    // don't print Phi's
   1.261 +    InstructionPrinter ip(false);
   1.262 +    block->print(ip);
   1.263 +  }
   1.264 +#endif /* PRODUCT */
   1.265 +
   1.266 +  assert(block->lir() != NULL, "must have LIR");
   1.267 +  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
   1.268 +
   1.269 +#ifndef PRODUCT
   1.270 +  if (CommentedAssembly) {
   1.271 +    stringStream st;
   1.272 +    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
   1.273 +    _masm->block_comment(st.as_string());
   1.274 +  }
   1.275 +#endif
   1.276 +
   1.277 +  emit_lir_list(block->lir());
   1.278 +
   1.279 +  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
   1.280 +}
   1.281 +
   1.282 +
   1.283 +void LIR_Assembler::emit_lir_list(LIR_List* list) {
   1.284 +  peephole(list);
   1.285 +
   1.286 +  int n = list->length();
   1.287 +  for (int i = 0; i < n; i++) {
   1.288 +    LIR_Op* op = list->at(i);
   1.289 +
   1.290 +    check_codespace();
   1.291 +    CHECK_BAILOUT();
   1.292 +
   1.293 +#ifndef PRODUCT
   1.294 +    if (CommentedAssembly) {
   1.295 +      // Don't record out every op since that's too verbose.  Print
   1.296 +      // branches since they include block and stub names.  Also print
   1.297 +      // patching moves since they generate funny looking code.
   1.298 +      if (op->code() == lir_branch ||
   1.299 +          (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
   1.300 +        stringStream st;
   1.301 +        op->print_on(&st);
   1.302 +        _masm->block_comment(st.as_string());
   1.303 +      }
   1.304 +    }
   1.305 +    if (PrintLIRWithAssembly) {
   1.306 +      // print out the LIR operation followed by the resulting assembly
   1.307 +      list->at(i)->print(); tty->cr();
   1.308 +    }
   1.309 +#endif /* PRODUCT */
   1.310 +
   1.311 +    op->emit_code(this);
   1.312 +
   1.313 +    if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
   1.314 +      process_debug_info(op);
   1.315 +    }
   1.316 +
   1.317 +#ifndef PRODUCT
   1.318 +    if (PrintLIRWithAssembly) {
   1.319 +      _masm->code()->decode();
   1.320 +    }
   1.321 +#endif /* PRODUCT */
   1.322 +  }
   1.323 +}
   1.324 +
   1.325 +#ifdef ASSERT
   1.326 +void LIR_Assembler::check_no_unbound_labels() {
   1.327 +  CHECK_BAILOUT();
   1.328 +
   1.329 +  for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
   1.330 +    if (!_branch_target_blocks.at(i)->label()->is_bound()) {
   1.331 +      tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
   1.332 +      assert(false, "unbound label");
   1.333 +    }
   1.334 +  }
   1.335 +}
   1.336 +#endif
   1.337 +
   1.338 +//----------------------------------debug info--------------------------------
   1.339 +
   1.340 +
   1.341 +void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
   1.342 +  _masm->code_section()->relocate(pc(), relocInfo::poll_type);
   1.343 +  int pc_offset = code_offset();
   1.344 +  flush_debug_info(pc_offset);
   1.345 +  info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   1.346 +  if (info->exception_handlers() != NULL) {
   1.347 +    compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
   1.348 +  }
   1.349 +}
   1.350 +
   1.351 +
   1.352 +void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
   1.353 +  flush_debug_info(pc_offset);
   1.354 +  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   1.355 +  if (cinfo->exception_handlers() != NULL) {
   1.356 +    compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
   1.357 +  }
   1.358 +}
   1.359 +
   1.360 +static ValueStack* debug_info(Instruction* ins) {
   1.361 +  StateSplit* ss = ins->as_StateSplit();
   1.362 +  if (ss != NULL) return ss->state();
   1.363 +  return ins->state_before();
   1.364 +}
   1.365 +
   1.366 +void LIR_Assembler::process_debug_info(LIR_Op* op) {
   1.367 +  Instruction* src = op->source();
   1.368 +  if (src == NULL)  return;
   1.369 +  int pc_offset = code_offset();
   1.370 +  if (_pending_non_safepoint == src) {
   1.371 +    _pending_non_safepoint_offset = pc_offset;
   1.372 +    return;
   1.373 +  }
   1.374 +  ValueStack* vstack = debug_info(src);
   1.375 +  if (vstack == NULL)  return;
   1.376 +  if (_pending_non_safepoint != NULL) {
   1.377 +    // Got some old debug info.  Get rid of it.
   1.378 +    if (debug_info(_pending_non_safepoint) == vstack) {
   1.379 +      _pending_non_safepoint_offset = pc_offset;
   1.380 +      return;
   1.381 +    }
   1.382 +    if (_pending_non_safepoint_offset < pc_offset) {
   1.383 +      record_non_safepoint_debug_info();
   1.384 +    }
   1.385 +    _pending_non_safepoint = NULL;
   1.386 +  }
   1.387 +  // Remember the debug info.
   1.388 +  if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
   1.389 +    _pending_non_safepoint = src;
   1.390 +    _pending_non_safepoint_offset = pc_offset;
   1.391 +  }
   1.392 +}
   1.393 +
   1.394 +// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
   1.395 +// Return NULL if n is too large.
   1.396 +// Returns the caller_bci for the next-younger state, also.
   1.397 +static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
   1.398 +  ValueStack* t = s;
   1.399 +  for (int i = 0; i < n; i++) {
   1.400 +    if (t == NULL)  break;
   1.401 +    t = t->caller_state();
   1.402 +  }
   1.403 +  if (t == NULL)  return NULL;
   1.404 +  for (;;) {
   1.405 +    ValueStack* tc = t->caller_state();
   1.406 +    if (tc == NULL)  return s;
   1.407 +    t = tc;
   1.408 +    bci_result = tc->bci();
   1.409 +    s = s->caller_state();
   1.410 +  }
   1.411 +}
   1.412 +
   1.413 +void LIR_Assembler::record_non_safepoint_debug_info() {
   1.414 +  int         pc_offset = _pending_non_safepoint_offset;
   1.415 +  ValueStack* vstack    = debug_info(_pending_non_safepoint);
   1.416 +  int         bci       = vstack->bci();
   1.417 +
   1.418 +  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
   1.419 +  assert(debug_info->recording_non_safepoints(), "sanity");
   1.420 +
   1.421 +  debug_info->add_non_safepoint(pc_offset);
   1.422 +
   1.423 +  // Visit scopes from oldest to youngest.
   1.424 +  for (int n = 0; ; n++) {
   1.425 +    int s_bci = bci;
   1.426 +    ValueStack* s = nth_oldest(vstack, n, s_bci);
   1.427 +    if (s == NULL)  break;
   1.428 +    IRScope* scope = s->scope();
   1.429 +    //Always pass false for reexecute since these ScopeDescs are never used for deopt
   1.430 +    debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
   1.431 +  }
   1.432 +
   1.433 +  debug_info->end_non_safepoint(pc_offset);
   1.434 +}
   1.435 +
   1.436 +
   1.437 +void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
   1.438 +  add_debug_info_for_null_check(code_offset(), cinfo);
   1.439 +}
   1.440 +
   1.441 +void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
   1.442 +  ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
   1.443 +  append_code_stub(stub);
   1.444 +}
   1.445 +
   1.446 +void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
   1.447 +  add_debug_info_for_div0(code_offset(), info);
   1.448 +}
   1.449 +
   1.450 +void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
   1.451 +  DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
   1.452 +  append_code_stub(stub);
   1.453 +}
   1.454 +
   1.455 +void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
   1.456 +  rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
   1.457 +}
   1.458 +
   1.459 +
   1.460 +void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
   1.461 +  verify_oop_map(op->info());
   1.462 +
   1.463 +  if (os::is_MP()) {
   1.464 +    // must align calls sites, otherwise they can't be updated atomically on MP hardware
   1.465 +    align_call(op->code());
   1.466 +  }
   1.467 +
   1.468 +  // emit the static call stub stuff out of line
   1.469 +  emit_static_call_stub();
   1.470 +
   1.471 +  switch (op->code()) {
   1.472 +  case lir_static_call:
   1.473 +  case lir_dynamic_call:
   1.474 +    call(op, relocInfo::static_call_type);
   1.475 +    break;
   1.476 +  case lir_optvirtual_call:
   1.477 +    call(op, relocInfo::opt_virtual_call_type);
   1.478 +    break;
   1.479 +  case lir_icvirtual_call:
   1.480 +    ic_call(op);
   1.481 +    break;
   1.482 +  case lir_virtual_call:
   1.483 +    vtable_call(op);
   1.484 +    break;
   1.485 +  default:
   1.486 +    fatal(err_msg_res("unexpected op code: %s", op->name()));
   1.487 +    break;
   1.488 +  }
   1.489 +
   1.490 +  // JSR 292
   1.491 +  // Record if this method has MethodHandle invokes.
   1.492 +  if (op->is_method_handle_invoke()) {
   1.493 +    compilation()->set_has_method_handle_invokes(true);
   1.494 +  }
   1.495 +
   1.496 +#if defined(X86) && defined(TIERED)
   1.497 +  // C2 leave fpu stack dirty clean it
   1.498 +  if (UseSSE < 2) {
   1.499 +    int i;
   1.500 +    for ( i = 1; i <= 7 ; i++ ) {
   1.501 +      ffree(i);
   1.502 +    }
   1.503 +    if (!op->result_opr()->is_float_kind()) {
   1.504 +      ffree(0);
   1.505 +    }
   1.506 +  }
   1.507 +#endif // X86 && TIERED
   1.508 +}
   1.509 +
   1.510 +
   1.511 +void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
   1.512 +  _masm->bind (*(op->label()));
   1.513 +}
   1.514 +
   1.515 +
   1.516 +void LIR_Assembler::emit_op1(LIR_Op1* op) {
   1.517 +  switch (op->code()) {
   1.518 +    case lir_move:
   1.519 +      if (op->move_kind() == lir_move_volatile) {
   1.520 +        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
   1.521 +        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
   1.522 +      } else {
   1.523 +        move_op(op->in_opr(), op->result_opr(), op->type(),
   1.524 +                op->patch_code(), op->info(), op->pop_fpu_stack(),
   1.525 +                op->move_kind() == lir_move_unaligned,
   1.526 +                op->move_kind() == lir_move_wide);
   1.527 +      }
   1.528 +      break;
   1.529 +
   1.530 +    case lir_prefetchr:
   1.531 +      prefetchr(op->in_opr());
   1.532 +      break;
   1.533 +
   1.534 +    case lir_prefetchw:
   1.535 +      prefetchw(op->in_opr());
   1.536 +      break;
   1.537 +
   1.538 +    case lir_roundfp: {
   1.539 +      LIR_OpRoundFP* round_op = op->as_OpRoundFP();
   1.540 +      roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
   1.541 +      break;
   1.542 +    }
   1.543 +
   1.544 +    case lir_return:
   1.545 +      return_op(op->in_opr());
   1.546 +      break;
   1.547 +
   1.548 +    case lir_safepoint:
   1.549 +      if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
   1.550 +        _masm->nop();
   1.551 +      }
   1.552 +      safepoint_poll(op->in_opr(), op->info());
   1.553 +      break;
   1.554 +
   1.555 +    case lir_fxch:
   1.556 +      fxch(op->in_opr()->as_jint());
   1.557 +      break;
   1.558 +
   1.559 +    case lir_fld:
   1.560 +      fld(op->in_opr()->as_jint());
   1.561 +      break;
   1.562 +
   1.563 +    case lir_ffree:
   1.564 +      ffree(op->in_opr()->as_jint());
   1.565 +      break;
   1.566 +
   1.567 +    case lir_branch:
   1.568 +      break;
   1.569 +
   1.570 +    case lir_push:
   1.571 +      push(op->in_opr());
   1.572 +      break;
   1.573 +
   1.574 +    case lir_pop:
   1.575 +      pop(op->in_opr());
   1.576 +      break;
   1.577 +
   1.578 +    case lir_neg:
   1.579 +      negate(op->in_opr(), op->result_opr());
   1.580 +      break;
   1.581 +
   1.582 +    case lir_leal:
   1.583 +      leal(op->in_opr(), op->result_opr());
   1.584 +      break;
   1.585 +
   1.586 +    case lir_null_check:
   1.587 +      if (GenerateCompilerNullChecks) {
   1.588 +        add_debug_info_for_null_check_here(op->info());
   1.589 +
   1.590 +        if (op->in_opr()->is_single_cpu()) {
   1.591 +          _masm->null_check(op->in_opr()->as_register());
   1.592 +        } else {
   1.593 +          Unimplemented();
   1.594 +        }
   1.595 +      }
   1.596 +      break;
   1.597 +
   1.598 +    case lir_monaddr:
   1.599 +      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
   1.600 +      break;
   1.601 +
   1.602 +#ifdef SPARC
   1.603 +    case lir_pack64:
   1.604 +      pack64(op->in_opr(), op->result_opr());
   1.605 +      break;
   1.606 +
   1.607 +    case lir_unpack64:
   1.608 +      unpack64(op->in_opr(), op->result_opr());
   1.609 +      break;
   1.610 +#endif
   1.611 +
   1.612 +    case lir_unwind:
   1.613 +      unwind_op(op->in_opr());
   1.614 +      break;
   1.615 +
   1.616 +    default:
   1.617 +      Unimplemented();
   1.618 +      break;
   1.619 +  }
   1.620 +}
   1.621 +
   1.622 +
   1.623 +void LIR_Assembler::emit_op0(LIR_Op0* op) {
   1.624 +  switch (op->code()) {
   1.625 +    case lir_word_align: {
   1.626 +      while (code_offset() % BytesPerWord != 0) {
   1.627 +        _masm->nop();
   1.628 +      }
   1.629 +      break;
   1.630 +    }
   1.631 +
   1.632 +    case lir_nop:
   1.633 +      assert(op->info() == NULL, "not supported");
   1.634 +      _masm->nop();
   1.635 +      break;
   1.636 +
   1.637 +    case lir_label:
   1.638 +      Unimplemented();
   1.639 +      break;
   1.640 +
   1.641 +    case lir_build_frame:
   1.642 +      build_frame();
   1.643 +      break;
   1.644 +
   1.645 +    case lir_std_entry:
   1.646 +      // init offsets
   1.647 +      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
   1.648 +      _masm->align(CodeEntryAlignment);
   1.649 +      if (needs_icache(compilation()->method())) {
   1.650 +        check_icache();
   1.651 +      }
   1.652 +      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
   1.653 +      _masm->verified_entry();
   1.654 +      build_frame();
   1.655 +      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
   1.656 +      break;
   1.657 +
   1.658 +    case lir_osr_entry:
   1.659 +      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
   1.660 +      osr_entry();
   1.661 +      break;
   1.662 +
   1.663 +    case lir_24bit_FPU:
   1.664 +      set_24bit_FPU();
   1.665 +      break;
   1.666 +
   1.667 +    case lir_reset_FPU:
   1.668 +      reset_FPU();
   1.669 +      break;
   1.670 +
   1.671 +    case lir_breakpoint:
   1.672 +      breakpoint();
   1.673 +      break;
   1.674 +
   1.675 +    case lir_fpop_raw:
   1.676 +      fpop();
   1.677 +      break;
   1.678 +
   1.679 +    case lir_membar:
   1.680 +      membar();
   1.681 +      break;
   1.682 +
   1.683 +    case lir_membar_acquire:
   1.684 +      membar_acquire();
   1.685 +      break;
   1.686 +
   1.687 +    case lir_membar_release:
   1.688 +      membar_release();
   1.689 +      break;
   1.690 +
   1.691 +    case lir_membar_loadload:
   1.692 +      membar_loadload();
   1.693 +      break;
   1.694 +
   1.695 +    case lir_membar_storestore:
   1.696 +      membar_storestore();
   1.697 +      break;
   1.698 +
   1.699 +    case lir_membar_loadstore:
   1.700 +      membar_loadstore();
   1.701 +      break;
   1.702 +
   1.703 +    case lir_membar_storeload:
   1.704 +      membar_storeload();
   1.705 +      break;
   1.706 +
   1.707 +    case lir_get_thread:
   1.708 +      get_thread(op->result_opr());
   1.709 +      break;
   1.710 +
   1.711 +    default:
   1.712 +      ShouldNotReachHere();
   1.713 +      break;
   1.714 +  }
   1.715 +}
   1.716 +
   1.717 +
   1.718 +void LIR_Assembler::emit_op2(LIR_Op2* op) {
   1.719 +  switch (op->code()) {
   1.720 +    case lir_cmp:
   1.721 +      if (op->info() != NULL) {
   1.722 +        assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
   1.723 +               "shouldn't be codeemitinfo for non-address operands");
   1.724 +        add_debug_info_for_null_check_here(op->info()); // exception possible
   1.725 +      }
   1.726 +      comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
   1.727 +      break;
   1.728 +
   1.729 +    case lir_cmp_l2i:
   1.730 +    case lir_cmp_fd2i:
   1.731 +    case lir_ucmp_fd2i:
   1.732 +      comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
   1.733 +      break;
   1.734 +
   1.735 +    case lir_cmove:
   1.736 +      cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
   1.737 +      break;
   1.738 +
   1.739 +    case lir_shl:
   1.740 +    case lir_shr:
   1.741 +    case lir_ushr:
   1.742 +      if (op->in_opr2()->is_constant()) {
   1.743 +        shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
   1.744 +      } else {
   1.745 +        shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
   1.746 +      }
   1.747 +      break;
   1.748 +
   1.749 +    case lir_add:
   1.750 +    case lir_sub:
   1.751 +    case lir_mul:
   1.752 +    case lir_mul_strictfp:
   1.753 +    case lir_div:
   1.754 +    case lir_div_strictfp:
   1.755 +    case lir_rem:
   1.756 +      assert(op->fpu_pop_count() < 2, "");
   1.757 +      arith_op(
   1.758 +        op->code(),
   1.759 +        op->in_opr1(),
   1.760 +        op->in_opr2(),
   1.761 +        op->result_opr(),
   1.762 +        op->info(),
   1.763 +        op->fpu_pop_count() == 1);
   1.764 +      break;
   1.765 +
   1.766 +    case lir_abs:
   1.767 +    case lir_sqrt:
   1.768 +    case lir_sin:
   1.769 +    case lir_tan:
   1.770 +    case lir_cos:
   1.771 +    case lir_log:
   1.772 +    case lir_log10:
   1.773 +    case lir_exp:
   1.774 +    case lir_pow:
   1.775 +      intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
   1.776 +      break;
   1.777 +
   1.778 +    case lir_logic_and:
   1.779 +    case lir_logic_or:
   1.780 +    case lir_logic_xor:
   1.781 +      logic_op(
   1.782 +        op->code(),
   1.783 +        op->in_opr1(),
   1.784 +        op->in_opr2(),
   1.785 +        op->result_opr());
   1.786 +      break;
   1.787 +
   1.788 +    case lir_throw:
   1.789 +      throw_op(op->in_opr1(), op->in_opr2(), op->info());
   1.790 +      break;
   1.791 +
   1.792 +    case lir_xadd:
   1.793 +    case lir_xchg:
   1.794 +      atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
   1.795 +      break;
   1.796 +
   1.797 +    default:
   1.798 +      Unimplemented();
   1.799 +      break;
   1.800 +  }
   1.801 +}
   1.802 +
   1.803 +
   1.804 +void LIR_Assembler::build_frame() {
   1.805 +  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
   1.806 +}
   1.807 +
   1.808 +
   1.809 +void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
   1.810 +  assert((src->is_single_fpu() && dest->is_single_stack()) ||
   1.811 +         (src->is_double_fpu() && dest->is_double_stack()),
   1.812 +         "round_fp: rounds register -> stack location");
   1.813 +
   1.814 +  reg2stack (src, dest, src->type(), pop_fpu_stack);
   1.815 +}
   1.816 +
   1.817 +
   1.818 +void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
   1.819 +  if (src->is_register()) {
   1.820 +    if (dest->is_register()) {
   1.821 +      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.822 +      reg2reg(src,  dest);
   1.823 +    } else if (dest->is_stack()) {
   1.824 +      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.825 +      reg2stack(src, dest, type, pop_fpu_stack);
   1.826 +    } else if (dest->is_address()) {
   1.827 +      reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
   1.828 +    } else {
   1.829 +      ShouldNotReachHere();
   1.830 +    }
   1.831 +
   1.832 +  } else if (src->is_stack()) {
   1.833 +    assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.834 +    if (dest->is_register()) {
   1.835 +      stack2reg(src, dest, type);
   1.836 +    } else if (dest->is_stack()) {
   1.837 +      stack2stack(src, dest, type);
   1.838 +    } else {
   1.839 +      ShouldNotReachHere();
   1.840 +    }
   1.841 +
   1.842 +  } else if (src->is_constant()) {
   1.843 +    if (dest->is_register()) {
   1.844 +      const2reg(src, dest, patch_code, info); // patching is possible
   1.845 +    } else if (dest->is_stack()) {
   1.846 +      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.847 +      const2stack(src, dest);
   1.848 +    } else if (dest->is_address()) {
   1.849 +      assert(patch_code == lir_patch_none, "no patching allowed here");
   1.850 +      const2mem(src, dest, type, info, wide);
   1.851 +    } else {
   1.852 +      ShouldNotReachHere();
   1.853 +    }
   1.854 +
   1.855 +  } else if (src->is_address()) {
   1.856 +    mem2reg(src, dest, type, patch_code, info, wide, unaligned);
   1.857 +
   1.858 +  } else {
   1.859 +    ShouldNotReachHere();
   1.860 +  }
   1.861 +}
   1.862 +
   1.863 +
   1.864 +void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
   1.865 +#ifndef PRODUCT
   1.866 +  if (VerifyOops) {
   1.867 +    OopMapStream s(info->oop_map());
   1.868 +    while (!s.is_done()) {
   1.869 +      OopMapValue v = s.current();
   1.870 +      if (v.is_oop()) {
   1.871 +        VMReg r = v.reg();
   1.872 +        if (!r->is_stack()) {
   1.873 +          stringStream st;
   1.874 +          st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
   1.875 +#ifdef SPARC
   1.876 +          _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
   1.877 +#else
   1.878 +          _masm->verify_oop(r->as_Register());
   1.879 +#endif
   1.880 +        } else {
   1.881 +          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
   1.882 +        }
   1.883 +      }
   1.884 +      check_codespace();
   1.885 +      CHECK_BAILOUT();
   1.886 +
   1.887 +      s.next();
   1.888 +    }
   1.889 +  }
   1.890 +#endif
   1.891 +}

mercurial