src/share/vm/c1/c1_LIRAssembler.cpp

changeset 435
a61af66fc99e
child 739
dc7f315e41f7
child 777
37f87013dfd8
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,794 @@
     1.4 +/*
     1.5 + * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +# include "incls/_precompiled.incl"
    1.29 +# include "incls/_c1_LIRAssembler.cpp.incl"
    1.30 +
    1.31 +
    1.32 +void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
    1.33 +  // we must have enough patching space so that call can be inserted
    1.34 +  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
    1.35 +    _masm->nop();
    1.36 +  }
    1.37 +  patch->install(_masm, patch_code, obj, info);
    1.38 +  append_patching_stub(patch);
    1.39 +
    1.40 +#ifdef ASSERT
    1.41 +  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
    1.42 +  if (patch->id() == PatchingStub::access_field_id) {
    1.43 +    switch (code) {
    1.44 +      case Bytecodes::_putstatic:
    1.45 +      case Bytecodes::_getstatic:
    1.46 +      case Bytecodes::_putfield:
    1.47 +      case Bytecodes::_getfield:
    1.48 +        break;
    1.49 +      default:
    1.50 +        ShouldNotReachHere();
    1.51 +    }
    1.52 +  } else if (patch->id() == PatchingStub::load_klass_id) {
    1.53 +    switch (code) {
    1.54 +      case Bytecodes::_putstatic:
    1.55 +      case Bytecodes::_getstatic:
    1.56 +      case Bytecodes::_new:
    1.57 +      case Bytecodes::_anewarray:
    1.58 +      case Bytecodes::_multianewarray:
    1.59 +      case Bytecodes::_instanceof:
    1.60 +      case Bytecodes::_checkcast:
    1.61 +      case Bytecodes::_ldc:
    1.62 +      case Bytecodes::_ldc_w:
    1.63 +        break;
    1.64 +      default:
    1.65 +        ShouldNotReachHere();
    1.66 +    }
    1.67 +  } else {
    1.68 +    ShouldNotReachHere();
    1.69 +  }
    1.70 +#endif
    1.71 +}
    1.72 +
    1.73 +
    1.74 +//---------------------------------------------------------------
    1.75 +
    1.76 +
    1.77 +LIR_Assembler::LIR_Assembler(Compilation* c):
    1.78 +   _compilation(c)
    1.79 + , _masm(c->masm())
    1.80 + , _frame_map(c->frame_map())
    1.81 + , _current_block(NULL)
    1.82 + , _pending_non_safepoint(NULL)
    1.83 + , _pending_non_safepoint_offset(0)
    1.84 +{
    1.85 +  _slow_case_stubs = new CodeStubList();
    1.86 +}
    1.87 +
    1.88 +
    1.89 +LIR_Assembler::~LIR_Assembler() {
    1.90 +}
    1.91 +
    1.92 +
    1.93 +void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
    1.94 +  _slow_case_stubs->append(stub);
    1.95 +}
    1.96 +
    1.97 +
    1.98 +void LIR_Assembler::check_codespace() {
    1.99 +  CodeSection* cs = _masm->code_section();
   1.100 +  if (cs->remaining() < (int)(1*K)) {
   1.101 +    BAILOUT("CodeBuffer overflow");
   1.102 +  }
   1.103 +}
   1.104 +
   1.105 +
   1.106 +void LIR_Assembler::emit_code_stub(CodeStub* stub) {
   1.107 +  _slow_case_stubs->append(stub);
   1.108 +}
   1.109 +
   1.110 +void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
   1.111 +  for (int m = 0; m < stub_list->length(); m++) {
   1.112 +    CodeStub* s = (*stub_list)[m];
   1.113 +
   1.114 +    check_codespace();
   1.115 +    CHECK_BAILOUT();
   1.116 +
   1.117 +#ifndef PRODUCT
   1.118 +    if (CommentedAssembly) {
   1.119 +      stringStream st;
   1.120 +      s->print_name(&st);
   1.121 +      st.print(" slow case");
   1.122 +      _masm->block_comment(st.as_string());
   1.123 +    }
   1.124 +#endif
   1.125 +    s->emit_code(this);
   1.126 +#ifdef ASSERT
   1.127 +    s->assert_no_unbound_labels();
   1.128 +#endif
   1.129 +  }
   1.130 +}
   1.131 +
   1.132 +
   1.133 +void LIR_Assembler::emit_slow_case_stubs() {
   1.134 +  emit_stubs(_slow_case_stubs);
   1.135 +}
   1.136 +
   1.137 +
   1.138 +bool LIR_Assembler::needs_icache(ciMethod* method) const {
   1.139 +  return !method->is_static();
   1.140 +}
   1.141 +
   1.142 +
   1.143 +int LIR_Assembler::code_offset() const {
   1.144 +  return _masm->offset();
   1.145 +}
   1.146 +
   1.147 +
   1.148 +address LIR_Assembler::pc() const {
   1.149 +  return _masm->pc();
   1.150 +}
   1.151 +
   1.152 +
   1.153 +void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
   1.154 +  for (int i = 0; i < info_list->length(); i++) {
   1.155 +    XHandlers* handlers = info_list->at(i)->exception_handlers();
   1.156 +
   1.157 +    for (int j = 0; j < handlers->length(); j++) {
   1.158 +      XHandler* handler = handlers->handler_at(j);
   1.159 +      assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
   1.160 +      assert(handler->entry_code() == NULL ||
   1.161 +             handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
   1.162 +             handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
   1.163 +
   1.164 +      if (handler->entry_pco() == -1) {
   1.165 +        // entry code not emitted yet
   1.166 +        if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
   1.167 +          handler->set_entry_pco(code_offset());
   1.168 +          if (CommentedAssembly) {
   1.169 +            _masm->block_comment("Exception adapter block");
   1.170 +          }
   1.171 +          emit_lir_list(handler->entry_code());
   1.172 +        } else {
   1.173 +          handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
   1.174 +        }
   1.175 +
   1.176 +        assert(handler->entry_pco() != -1, "must be set now");
   1.177 +      }
   1.178 +    }
   1.179 +  }
   1.180 +}
   1.181 +
   1.182 +
   1.183 +void LIR_Assembler::emit_code(BlockList* hir) {
   1.184 +  if (PrintLIR) {
   1.185 +    print_LIR(hir);
   1.186 +  }
   1.187 +
   1.188 +  int n = hir->length();
   1.189 +  for (int i = 0; i < n; i++) {
   1.190 +    emit_block(hir->at(i));
   1.191 +    CHECK_BAILOUT();
   1.192 +  }
   1.193 +
   1.194 +  flush_debug_info(code_offset());
   1.195 +
   1.196 +  DEBUG_ONLY(check_no_unbound_labels());
   1.197 +}
   1.198 +
   1.199 +
   1.200 +void LIR_Assembler::emit_block(BlockBegin* block) {
   1.201 +  if (block->is_set(BlockBegin::backward_branch_target_flag)) {
   1.202 +    align_backward_branch_target();
   1.203 +  }
   1.204 +
   1.205 +  // if this block is the start of an exception handler, record the
   1.206 +  // PC offset of the first instruction for later construction of
   1.207 +  // the ExceptionHandlerTable
   1.208 +  if (block->is_set(BlockBegin::exception_entry_flag)) {
   1.209 +    block->set_exception_handler_pco(code_offset());
   1.210 +  }
   1.211 +
   1.212 +#ifndef PRODUCT
   1.213 +  if (PrintLIRWithAssembly) {
   1.214 +    // don't print Phi's
   1.215 +    InstructionPrinter ip(false);
   1.216 +    block->print(ip);
   1.217 +  }
   1.218 +#endif /* PRODUCT */
   1.219 +
   1.220 +  assert(block->lir() != NULL, "must have LIR");
   1.221 +  IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
   1.222 +
   1.223 +#ifndef PRODUCT
   1.224 +  if (CommentedAssembly) {
   1.225 +    stringStream st;
   1.226 +    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
   1.227 +    _masm->block_comment(st.as_string());
   1.228 +  }
   1.229 +#endif
   1.230 +
   1.231 +  emit_lir_list(block->lir());
   1.232 +
   1.233 +  IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
   1.234 +}
   1.235 +
   1.236 +
   1.237 +void LIR_Assembler::emit_lir_list(LIR_List* list) {
   1.238 +  peephole(list);
   1.239 +
   1.240 +  int n = list->length();
   1.241 +  for (int i = 0; i < n; i++) {
   1.242 +    LIR_Op* op = list->at(i);
   1.243 +
   1.244 +    check_codespace();
   1.245 +    CHECK_BAILOUT();
   1.246 +
   1.247 +#ifndef PRODUCT
   1.248 +    if (CommentedAssembly) {
   1.249 +      // Don't record out every op since that's too verbose.  Print
   1.250 +      // branches since they include block and stub names.  Also print
   1.251 +      // patching moves since they generate funny looking code.
   1.252 +      if (op->code() == lir_branch ||
   1.253 +          (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
   1.254 +        stringStream st;
   1.255 +        op->print_on(&st);
   1.256 +        _masm->block_comment(st.as_string());
   1.257 +      }
   1.258 +    }
   1.259 +    if (PrintLIRWithAssembly) {
   1.260 +      // print out the LIR operation followed by the resulting assembly
   1.261 +      list->at(i)->print(); tty->cr();
   1.262 +    }
   1.263 +#endif /* PRODUCT */
   1.264 +
   1.265 +    op->emit_code(this);
   1.266 +
   1.267 +    if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
   1.268 +      process_debug_info(op);
   1.269 +    }
   1.270 +
   1.271 +#ifndef PRODUCT
   1.272 +    if (PrintLIRWithAssembly) {
   1.273 +      _masm->code()->decode();
   1.274 +    }
   1.275 +#endif /* PRODUCT */
   1.276 +  }
   1.277 +}
   1.278 +
   1.279 +#ifdef ASSERT
   1.280 +void LIR_Assembler::check_no_unbound_labels() {
   1.281 +  CHECK_BAILOUT();
   1.282 +
   1.283 +  for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
   1.284 +    if (!_branch_target_blocks.at(i)->label()->is_bound()) {
   1.285 +      tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
   1.286 +      assert(false, "unbound label");
   1.287 +    }
   1.288 +  }
   1.289 +}
   1.290 +#endif
   1.291 +
   1.292 +//----------------------------------debug info--------------------------------
   1.293 +
   1.294 +
   1.295 +void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
   1.296 +  _masm->code_section()->relocate(pc(), relocInfo::poll_type);
   1.297 +  int pc_offset = code_offset();
   1.298 +  flush_debug_info(pc_offset);
   1.299 +  info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   1.300 +  if (info->exception_handlers() != NULL) {
   1.301 +    compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
   1.302 +  }
   1.303 +}
   1.304 +
   1.305 +
   1.306 +void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
   1.307 +  flush_debug_info(pc_offset);
   1.308 +  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   1.309 +  if (cinfo->exception_handlers() != NULL) {
   1.310 +    compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
   1.311 +  }
   1.312 +}
   1.313 +
   1.314 +static ValueStack* debug_info(Instruction* ins) {
   1.315 +  StateSplit* ss = ins->as_StateSplit();
   1.316 +  if (ss != NULL) return ss->state();
   1.317 +  return ins->lock_stack();
   1.318 +}
   1.319 +
   1.320 +void LIR_Assembler::process_debug_info(LIR_Op* op) {
   1.321 +  Instruction* src = op->source();
   1.322 +  if (src == NULL)  return;
   1.323 +  int pc_offset = code_offset();
   1.324 +  if (_pending_non_safepoint == src) {
   1.325 +    _pending_non_safepoint_offset = pc_offset;
   1.326 +    return;
   1.327 +  }
   1.328 +  ValueStack* vstack = debug_info(src);
   1.329 +  if (vstack == NULL)  return;
   1.330 +  if (_pending_non_safepoint != NULL) {
   1.331 +    // Got some old debug info.  Get rid of it.
   1.332 +    if (_pending_non_safepoint->bci() == src->bci() &&
   1.333 +        debug_info(_pending_non_safepoint) == vstack) {
   1.334 +      _pending_non_safepoint_offset = pc_offset;
   1.335 +      return;
   1.336 +    }
   1.337 +    if (_pending_non_safepoint_offset < pc_offset) {
   1.338 +      record_non_safepoint_debug_info();
   1.339 +    }
   1.340 +    _pending_non_safepoint = NULL;
   1.341 +  }
   1.342 +  // Remember the debug info.
   1.343 +  if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
   1.344 +    _pending_non_safepoint = src;
   1.345 +    _pending_non_safepoint_offset = pc_offset;
   1.346 +  }
   1.347 +}
   1.348 +
   1.349 +// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
   1.350 +// Return NULL if n is too large.
   1.351 +// Returns the caller_bci for the next-younger state, also.
   1.352 +static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
   1.353 +  ValueStack* t = s;
   1.354 +  for (int i = 0; i < n; i++) {
   1.355 +    if (t == NULL)  break;
   1.356 +    t = t->caller_state();
   1.357 +  }
   1.358 +  if (t == NULL)  return NULL;
   1.359 +  for (;;) {
   1.360 +    ValueStack* tc = t->caller_state();
   1.361 +    if (tc == NULL)  return s;
   1.362 +    t = tc;
   1.363 +    bci_result = s->scope()->caller_bci();
   1.364 +    s = s->caller_state();
   1.365 +  }
   1.366 +}
   1.367 +
   1.368 +void LIR_Assembler::record_non_safepoint_debug_info() {
   1.369 +  int         pc_offset = _pending_non_safepoint_offset;
   1.370 +  ValueStack* vstack    = debug_info(_pending_non_safepoint);
   1.371 +  int         bci       = _pending_non_safepoint->bci();
   1.372 +
   1.373 +  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
   1.374 +  assert(debug_info->recording_non_safepoints(), "sanity");
   1.375 +
   1.376 +  debug_info->add_non_safepoint(pc_offset);
   1.377 +
   1.378 +  // Visit scopes from oldest to youngest.
   1.379 +  for (int n = 0; ; n++) {
   1.380 +    int s_bci = bci;
   1.381 +    ValueStack* s = nth_oldest(vstack, n, s_bci);
   1.382 +    if (s == NULL)  break;
   1.383 +    IRScope* scope = s->scope();
   1.384 +    debug_info->describe_scope(pc_offset, scope->method(), s_bci);
   1.385 +  }
   1.386 +
   1.387 +  debug_info->end_non_safepoint(pc_offset);
   1.388 +}
   1.389 +
   1.390 +
   1.391 +void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
   1.392 +  add_debug_info_for_null_check(code_offset(), cinfo);
   1.393 +}
   1.394 +
   1.395 +void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
   1.396 +  ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
   1.397 +  emit_code_stub(stub);
   1.398 +}
   1.399 +
   1.400 +void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
   1.401 +  add_debug_info_for_div0(code_offset(), info);
   1.402 +}
   1.403 +
   1.404 +void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
   1.405 +  DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
   1.406 +  emit_code_stub(stub);
   1.407 +}
   1.408 +
   1.409 +void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
   1.410 +  rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
   1.411 +}
   1.412 +
   1.413 +
   1.414 +void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
   1.415 +  verify_oop_map(op->info());
   1.416 +
   1.417 +  if (os::is_MP()) {
   1.418 +    // must align calls sites, otherwise they can't be updated atomically on MP hardware
   1.419 +    align_call(op->code());
   1.420 +  }
   1.421 +
   1.422 +  // emit the static call stub stuff out of line
   1.423 +  emit_static_call_stub();
   1.424 +
   1.425 +  switch (op->code()) {
   1.426 +  case lir_static_call:
   1.427 +    call(op->addr(), relocInfo::static_call_type, op->info());
   1.428 +    break;
   1.429 +  case lir_optvirtual_call:
   1.430 +    call(op->addr(), relocInfo::opt_virtual_call_type, op->info());
   1.431 +    break;
   1.432 +  case lir_icvirtual_call:
   1.433 +    ic_call(op->addr(), op->info());
   1.434 +    break;
   1.435 +  case lir_virtual_call:
   1.436 +    vtable_call(op->vtable_offset(), op->info());
   1.437 +    break;
   1.438 +  default: ShouldNotReachHere();
   1.439 +  }
   1.440 +#if defined(IA32) && defined(TIERED)
   1.441 +  // C2 leave fpu stack dirty clean it
   1.442 +  if (UseSSE < 2) {
   1.443 +    int i;
   1.444 +    for ( i = 1; i <= 7 ; i++ ) {
   1.445 +      ffree(i);
   1.446 +    }
   1.447 +    if (!op->result_opr()->is_float_kind()) {
   1.448 +      ffree(0);
   1.449 +    }
   1.450 +  }
   1.451 +#endif // IA32 && TIERED
   1.452 +}
   1.453 +
   1.454 +
   1.455 +void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
   1.456 +  _masm->bind (*(op->label()));
   1.457 +}
   1.458 +
   1.459 +
   1.460 +void LIR_Assembler::emit_op1(LIR_Op1* op) {
   1.461 +  switch (op->code()) {
   1.462 +    case lir_move:
   1.463 +      if (op->move_kind() == lir_move_volatile) {
   1.464 +        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
   1.465 +        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
   1.466 +      } else {
   1.467 +        move_op(op->in_opr(), op->result_opr(), op->type(),
   1.468 +                op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
   1.469 +      }
   1.470 +      break;
   1.471 +
   1.472 +    case lir_prefetchr:
   1.473 +      prefetchr(op->in_opr());
   1.474 +      break;
   1.475 +
   1.476 +    case lir_prefetchw:
   1.477 +      prefetchw(op->in_opr());
   1.478 +      break;
   1.479 +
   1.480 +    case lir_roundfp: {
   1.481 +      LIR_OpRoundFP* round_op = op->as_OpRoundFP();
   1.482 +      roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
   1.483 +      break;
   1.484 +    }
   1.485 +
   1.486 +    case lir_return:
   1.487 +      return_op(op->in_opr());
   1.488 +      break;
   1.489 +
   1.490 +    case lir_safepoint:
   1.491 +      if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
   1.492 +        _masm->nop();
   1.493 +      }
   1.494 +      safepoint_poll(op->in_opr(), op->info());
   1.495 +      break;
   1.496 +
   1.497 +    case lir_fxch:
   1.498 +      fxch(op->in_opr()->as_jint());
   1.499 +      break;
   1.500 +
   1.501 +    case lir_fld:
   1.502 +      fld(op->in_opr()->as_jint());
   1.503 +      break;
   1.504 +
   1.505 +    case lir_ffree:
   1.506 +      ffree(op->in_opr()->as_jint());
   1.507 +      break;
   1.508 +
   1.509 +    case lir_branch:
   1.510 +      break;
   1.511 +
   1.512 +    case lir_push:
   1.513 +      push(op->in_opr());
   1.514 +      break;
   1.515 +
   1.516 +    case lir_pop:
   1.517 +      pop(op->in_opr());
   1.518 +      break;
   1.519 +
   1.520 +    case lir_neg:
   1.521 +      negate(op->in_opr(), op->result_opr());
   1.522 +      break;
   1.523 +
   1.524 +    case lir_leal:
   1.525 +      leal(op->in_opr(), op->result_opr());
   1.526 +      break;
   1.527 +
   1.528 +    case lir_null_check:
   1.529 +      if (GenerateCompilerNullChecks) {
   1.530 +        add_debug_info_for_null_check_here(op->info());
   1.531 +
   1.532 +        if (op->in_opr()->is_single_cpu()) {
   1.533 +          _masm->null_check(op->in_opr()->as_register());
   1.534 +        } else {
   1.535 +          Unimplemented();
   1.536 +        }
   1.537 +      }
   1.538 +      break;
   1.539 +
   1.540 +    case lir_monaddr:
   1.541 +      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
   1.542 +      break;
   1.543 +
   1.544 +    default:
   1.545 +      Unimplemented();
   1.546 +      break;
   1.547 +  }
   1.548 +}
   1.549 +
   1.550 +
   1.551 +void LIR_Assembler::emit_op0(LIR_Op0* op) {
   1.552 +  switch (op->code()) {
   1.553 +    case lir_word_align: {
   1.554 +      while (code_offset() % BytesPerWord != 0) {
   1.555 +        _masm->nop();
   1.556 +      }
   1.557 +      break;
   1.558 +    }
   1.559 +
   1.560 +    case lir_nop:
   1.561 +      assert(op->info() == NULL, "not supported");
   1.562 +      _masm->nop();
   1.563 +      break;
   1.564 +
   1.565 +    case lir_label:
   1.566 +      Unimplemented();
   1.567 +      break;
   1.568 +
   1.569 +    case lir_build_frame:
   1.570 +      build_frame();
   1.571 +      break;
   1.572 +
   1.573 +    case lir_std_entry:
   1.574 +      // init offsets
   1.575 +      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
   1.576 +      _masm->align(CodeEntryAlignment);
   1.577 +      if (needs_icache(compilation()->method())) {
   1.578 +        check_icache();
   1.579 +      }
   1.580 +      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
   1.581 +      _masm->verified_entry();
   1.582 +      build_frame();
   1.583 +      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
   1.584 +      break;
   1.585 +
   1.586 +    case lir_osr_entry:
   1.587 +      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
   1.588 +      osr_entry();
   1.589 +      break;
   1.590 +
   1.591 +    case lir_24bit_FPU:
   1.592 +      set_24bit_FPU();
   1.593 +      break;
   1.594 +
   1.595 +    case lir_reset_FPU:
   1.596 +      reset_FPU();
   1.597 +      break;
   1.598 +
   1.599 +    case lir_breakpoint:
   1.600 +      breakpoint();
   1.601 +      break;
   1.602 +
   1.603 +    case lir_fpop_raw:
   1.604 +      fpop();
   1.605 +      break;
   1.606 +
   1.607 +    case lir_membar:
   1.608 +      membar();
   1.609 +      break;
   1.610 +
   1.611 +    case lir_membar_acquire:
   1.612 +      membar_acquire();
   1.613 +      break;
   1.614 +
   1.615 +    case lir_membar_release:
   1.616 +      membar_release();
   1.617 +      break;
   1.618 +
   1.619 +    case lir_get_thread:
   1.620 +      get_thread(op->result_opr());
   1.621 +      break;
   1.622 +
   1.623 +    default:
   1.624 +      ShouldNotReachHere();
   1.625 +      break;
   1.626 +  }
   1.627 +}
   1.628 +
   1.629 +
   1.630 +void LIR_Assembler::emit_op2(LIR_Op2* op) {
   1.631 +  switch (op->code()) {
   1.632 +    case lir_cmp:
   1.633 +      if (op->info() != NULL) {
   1.634 +        assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
   1.635 +               "shouldn't be codeemitinfo for non-address operands");
   1.636 +        add_debug_info_for_null_check_here(op->info()); // exception possible
   1.637 +      }
   1.638 +      comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
   1.639 +      break;
   1.640 +
   1.641 +    case lir_cmp_l2i:
   1.642 +    case lir_cmp_fd2i:
   1.643 +    case lir_ucmp_fd2i:
   1.644 +      comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
   1.645 +      break;
   1.646 +
   1.647 +    case lir_cmove:
   1.648 +      cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
   1.649 +      break;
   1.650 +
   1.651 +    case lir_shl:
   1.652 +    case lir_shr:
   1.653 +    case lir_ushr:
   1.654 +      if (op->in_opr2()->is_constant()) {
   1.655 +        shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
   1.656 +      } else {
   1.657 +        shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
   1.658 +      }
   1.659 +      break;
   1.660 +
   1.661 +    case lir_add:
   1.662 +    case lir_sub:
   1.663 +    case lir_mul:
   1.664 +    case lir_mul_strictfp:
   1.665 +    case lir_div:
   1.666 +    case lir_div_strictfp:
   1.667 +    case lir_rem:
   1.668 +      assert(op->fpu_pop_count() < 2, "");
   1.669 +      arith_op(
   1.670 +        op->code(),
   1.671 +        op->in_opr1(),
   1.672 +        op->in_opr2(),
   1.673 +        op->result_opr(),
   1.674 +        op->info(),
   1.675 +        op->fpu_pop_count() == 1);
   1.676 +      break;
   1.677 +
   1.678 +    case lir_abs:
   1.679 +    case lir_sqrt:
   1.680 +    case lir_sin:
   1.681 +    case lir_tan:
   1.682 +    case lir_cos:
   1.683 +    case lir_log:
   1.684 +    case lir_log10:
   1.685 +      intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
   1.686 +      break;
   1.687 +
   1.688 +    case lir_logic_and:
   1.689 +    case lir_logic_or:
   1.690 +    case lir_logic_xor:
   1.691 +      logic_op(
   1.692 +        op->code(),
   1.693 +        op->in_opr1(),
   1.694 +        op->in_opr2(),
   1.695 +        op->result_opr());
   1.696 +      break;
   1.697 +
   1.698 +    case lir_throw:
   1.699 +    case lir_unwind:
   1.700 +      throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind);
   1.701 +      break;
   1.702 +
   1.703 +    default:
   1.704 +      Unimplemented();
   1.705 +      break;
   1.706 +  }
   1.707 +}
   1.708 +
   1.709 +
   1.710 +void LIR_Assembler::build_frame() {
   1.711 +  _masm->build_frame(initial_frame_size_in_bytes());
   1.712 +}
   1.713 +
   1.714 +
   1.715 +void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
   1.716 +  assert((src->is_single_fpu() && dest->is_single_stack()) ||
   1.717 +         (src->is_double_fpu() && dest->is_double_stack()),
   1.718 +         "round_fp: rounds register -> stack location");
   1.719 +
   1.720 +  reg2stack (src, dest, src->type(), pop_fpu_stack);
   1.721 +}
   1.722 +
   1.723 +
   1.724 +void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
   1.725 +  if (src->is_register()) {
   1.726 +    if (dest->is_register()) {
   1.727 +      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.728 +      reg2reg(src,  dest);
   1.729 +    } else if (dest->is_stack()) {
   1.730 +      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.731 +      reg2stack(src, dest, type, pop_fpu_stack);
   1.732 +    } else if (dest->is_address()) {
   1.733 +      reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
   1.734 +    } else {
   1.735 +      ShouldNotReachHere();
   1.736 +    }
   1.737 +
   1.738 +  } else if (src->is_stack()) {
   1.739 +    assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.740 +    if (dest->is_register()) {
   1.741 +      stack2reg(src, dest, type);
   1.742 +    } else if (dest->is_stack()) {
   1.743 +      stack2stack(src, dest, type);
   1.744 +    } else {
   1.745 +      ShouldNotReachHere();
   1.746 +    }
   1.747 +
   1.748 +  } else if (src->is_constant()) {
   1.749 +    if (dest->is_register()) {
   1.750 +      const2reg(src, dest, patch_code, info); // patching is possible
   1.751 +    } else if (dest->is_stack()) {
   1.752 +      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
   1.753 +      const2stack(src, dest);
   1.754 +    } else if (dest->is_address()) {
   1.755 +      assert(patch_code == lir_patch_none, "no patching allowed here");
   1.756 +      const2mem(src, dest, type, info);
   1.757 +    } else {
   1.758 +      ShouldNotReachHere();
   1.759 +    }
   1.760 +
   1.761 +  } else if (src->is_address()) {
   1.762 +    mem2reg(src, dest, type, patch_code, info, unaligned);
   1.763 +
   1.764 +  } else {
   1.765 +    ShouldNotReachHere();
   1.766 +  }
   1.767 +}
   1.768 +
   1.769 +
   1.770 +void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
   1.771 +#ifndef PRODUCT
   1.772 +  if (VerifyOopMaps || VerifyOops) {
   1.773 +    bool v = VerifyOops;
   1.774 +    VerifyOops = true;
   1.775 +    OopMapStream s(info->oop_map());
   1.776 +    while (!s.is_done()) {
   1.777 +      OopMapValue v = s.current();
   1.778 +      if (v.is_oop()) {
   1.779 +        VMReg r = v.reg();
   1.780 +        if (!r->is_stack()) {
   1.781 +          stringStream st;
   1.782 +          st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
   1.783 +#ifdef SPARC
   1.784 +          _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
   1.785 +#else
   1.786 +          _masm->verify_oop(r->as_Register());
   1.787 +#endif
   1.788 +        } else {
   1.789 +          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
   1.790 +        }
   1.791 +      }
   1.792 +      s.next();
   1.793 +    }
   1.794 +    VerifyOops = v;
   1.795 +  }
   1.796 +#endif
   1.797 +}

mercurial