src/share/vm/c1/c1_GraphBuilder.cpp

changeset 435
a61af66fc99e
child 894
3a86a8dcf27c
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,3835 @@
     1.4 +/*
     1.5 + * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "incls/_precompiled.incl"
    1.29 +#include "incls/_c1_GraphBuilder.cpp.incl"
    1.30 +
    1.31 +class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
    1.32 + private:
    1.33 +  Compilation* _compilation;
    1.34 +  IRScope*     _scope;
    1.35 +
    1.36 +  BlockList    _blocks;                // internal list of all blocks
    1.37 +  BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
    1.38 +
    1.39 +  // fields used by mark_loops
    1.40 +  BitMap       _active;                // for iteration of control flow graph
    1.41 +  BitMap       _visited;               // for iteration of control flow graph
    1.42 +  intArray     _loop_map;              // caches the information if a block is contained in a loop
    1.43 +  int          _next_loop_index;       // next free loop number
    1.44 +  int          _next_block_number;     // for reverse postorder numbering of blocks
    1.45 +
    1.46 +  // accessors
    1.47 +  Compilation*  compilation() const              { return _compilation; }
    1.48 +  IRScope*      scope() const                    { return _scope; }
    1.49 +  ciMethod*     method() const                   { return scope()->method(); }
    1.50 +  XHandlers*    xhandlers() const                { return scope()->xhandlers(); }
    1.51 +
    1.52 +  // unified bailout support
    1.53 +  void          bailout(const char* msg) const   { compilation()->bailout(msg); }
    1.54 +  bool          bailed_out() const               { return compilation()->bailed_out(); }
    1.55 +
    1.56 +  // helper functions
    1.57 +  BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
    1.58 +  void handle_exceptions(BlockBegin* current, int cur_bci);
    1.59 +  void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
    1.60 +  void store_one(BlockBegin* current, int local);
    1.61 +  void store_two(BlockBegin* current, int local);
    1.62 +  void set_entries(int osr_bci);
    1.63 +  void set_leaders();
    1.64 +
    1.65 +  void make_loop_header(BlockBegin* block);
    1.66 +  void mark_loops();
    1.67 +  int  mark_loops(BlockBegin* b, bool in_subroutine);
    1.68 +
    1.69 +  // debugging
    1.70 +#ifndef PRODUCT
    1.71 +  void print();
    1.72 +#endif
    1.73 +
    1.74 + public:
    1.75 +  // creation
    1.76 +  BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
    1.77 +
    1.78 +  // accessors for GraphBuilder
    1.79 +  BlockList*    bci2block() const                { return _bci2block; }
    1.80 +};
    1.81 +
    1.82 +
    1.83 +// Implementation of BlockListBuilder
    1.84 +
    1.85 +BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
    1.86 + : _compilation(compilation)
    1.87 + , _scope(scope)
    1.88 + , _blocks(16)
    1.89 + , _bci2block(new BlockList(scope->method()->code_size(), NULL))
    1.90 + , _next_block_number(0)
    1.91 + , _active()         // size not known yet
    1.92 + , _visited()        // size not known yet
    1.93 + , _next_loop_index(0)
    1.94 + , _loop_map() // size not known yet
    1.95 +{
    1.96 +  set_entries(osr_bci);
    1.97 +  set_leaders();
    1.98 +  CHECK_BAILOUT();
    1.99 +
   1.100 +  mark_loops();
   1.101 +  NOT_PRODUCT(if (PrintInitialBlockList) print());
   1.102 +
   1.103 +#ifndef PRODUCT
   1.104 +  if (PrintCFGToFile) {
   1.105 +    stringStream title;
   1.106 +    title.print("BlockListBuilder ");
   1.107 +    scope->method()->print_name(&title);
   1.108 +    CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false);
   1.109 +  }
   1.110 +#endif
   1.111 +}
   1.112 +
   1.113 +
   1.114 +void BlockListBuilder::set_entries(int osr_bci) {
   1.115 +  // generate start blocks
   1.116 +  BlockBegin* std_entry = make_block_at(0, NULL);
   1.117 +  if (scope()->caller() == NULL) {
   1.118 +    std_entry->set(BlockBegin::std_entry_flag);
   1.119 +  }
   1.120 +  if (osr_bci != -1) {
   1.121 +    BlockBegin* osr_entry = make_block_at(osr_bci, NULL);
   1.122 +    osr_entry->set(BlockBegin::osr_entry_flag);
   1.123 +  }
   1.124 +
   1.125 +  // generate exception entry blocks
   1.126 +  XHandlers* list = xhandlers();
   1.127 +  const int n = list->length();
   1.128 +  for (int i = 0; i < n; i++) {
   1.129 +    XHandler* h = list->handler_at(i);
   1.130 +    BlockBegin* entry = make_block_at(h->handler_bci(), NULL);
   1.131 +    entry->set(BlockBegin::exception_entry_flag);
   1.132 +    h->set_entry_block(entry);
   1.133 +  }
   1.134 +}
   1.135 +
   1.136 +
   1.137 +BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
   1.138 +  assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
   1.139 +
   1.140 +  BlockBegin* block = _bci2block->at(cur_bci);
   1.141 +  if (block == NULL) {
   1.142 +    block = new BlockBegin(cur_bci);
   1.143 +    block->init_stores_to_locals(method()->max_locals());
   1.144 +    _bci2block->at_put(cur_bci, block);
   1.145 +    _blocks.append(block);
   1.146 +
   1.147 +    assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
   1.148 +  }
   1.149 +
   1.150 +  if (predecessor != NULL) {
   1.151 +    if (block->is_set(BlockBegin::exception_entry_flag)) {
   1.152 +      BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
   1.153 +    }
   1.154 +
   1.155 +    predecessor->add_successor(block);
   1.156 +    block->increment_total_preds();
   1.157 +  }
   1.158 +
   1.159 +  return block;
   1.160 +}
   1.161 +
   1.162 +
   1.163 +inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
   1.164 +  current->stores_to_locals().set_bit(local);
   1.165 +}
   1.166 +inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
   1.167 +  store_one(current, local);
   1.168 +  store_one(current, local + 1);
   1.169 +}
   1.170 +
   1.171 +
   1.172 +void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
   1.173 +  // Draws edges from a block to its exception handlers
   1.174 +  XHandlers* list = xhandlers();
   1.175 +  const int n = list->length();
   1.176 +
   1.177 +  for (int i = 0; i < n; i++) {
   1.178 +    XHandler* h = list->handler_at(i);
   1.179 +
   1.180 +    if (h->covers(cur_bci)) {
   1.181 +      BlockBegin* entry = h->entry_block();
   1.182 +      assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set");
   1.183 +      assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
   1.184 +
   1.185 +      // add each exception handler only once
   1.186 +      if (!current->is_successor(entry)) {
   1.187 +        current->add_successor(entry);
   1.188 +        entry->increment_total_preds();
   1.189 +      }
   1.190 +
   1.191 +      // stop when reaching catchall
   1.192 +      if (h->catch_type() == 0) break;
   1.193 +    }
   1.194 +  }
   1.195 +}
   1.196 +
   1.197 +void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
   1.198 +  // start a new block after jsr-bytecode and link this block into cfg
   1.199 +  make_block_at(next_bci, current);
   1.200 +
   1.201 +  // start a new block at the subroutine entry at mark it with special flag
   1.202 +  BlockBegin* sr_block = make_block_at(sr_bci, current);
   1.203 +  if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
   1.204 +    sr_block->set(BlockBegin::subroutine_entry_flag);
   1.205 +  }
   1.206 +}
   1.207 +
   1.208 +
   1.209 +void BlockListBuilder::set_leaders() {
   1.210 +  bool has_xhandlers = xhandlers()->has_handlers();
   1.211 +  BlockBegin* current = NULL;
   1.212 +
   1.213 +  // The information which bci starts a new block simplifies the analysis
   1.214 +  // Without it, backward branches could jump to a bci where no block was created
   1.215 +  // during bytecode iteration. This would require the creation of a new block at the
   1.216 +  // branch target and a modification of the successor lists.
   1.217 +  BitMap bci_block_start = method()->bci_block_start();
   1.218 +
   1.219 +  ciBytecodeStream s(method());
   1.220 +  while (s.next() != ciBytecodeStream::EOBC()) {
   1.221 +    int cur_bci = s.cur_bci();
   1.222 +
   1.223 +    if (bci_block_start.at(cur_bci)) {
   1.224 +      current = make_block_at(cur_bci, current);
   1.225 +    }
   1.226 +    assert(current != NULL, "must have current block");
   1.227 +
   1.228 +    if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
   1.229 +      handle_exceptions(current, cur_bci);
   1.230 +    }
   1.231 +
   1.232 +    switch (s.cur_bc()) {
   1.233 +      // track stores to local variables for selective creation of phi functions
   1.234 +      case Bytecodes::_iinc:     store_one(current, s.get_index()); break;
   1.235 +      case Bytecodes::_istore:   store_one(current, s.get_index()); break;
   1.236 +      case Bytecodes::_lstore:   store_two(current, s.get_index()); break;
   1.237 +      case Bytecodes::_fstore:   store_one(current, s.get_index()); break;
   1.238 +      case Bytecodes::_dstore:   store_two(current, s.get_index()); break;
   1.239 +      case Bytecodes::_astore:   store_one(current, s.get_index()); break;
   1.240 +      case Bytecodes::_istore_0: store_one(current, 0); break;
   1.241 +      case Bytecodes::_istore_1: store_one(current, 1); break;
   1.242 +      case Bytecodes::_istore_2: store_one(current, 2); break;
   1.243 +      case Bytecodes::_istore_3: store_one(current, 3); break;
   1.244 +      case Bytecodes::_lstore_0: store_two(current, 0); break;
   1.245 +      case Bytecodes::_lstore_1: store_two(current, 1); break;
   1.246 +      case Bytecodes::_lstore_2: store_two(current, 2); break;
   1.247 +      case Bytecodes::_lstore_3: store_two(current, 3); break;
   1.248 +      case Bytecodes::_fstore_0: store_one(current, 0); break;
   1.249 +      case Bytecodes::_fstore_1: store_one(current, 1); break;
   1.250 +      case Bytecodes::_fstore_2: store_one(current, 2); break;
   1.251 +      case Bytecodes::_fstore_3: store_one(current, 3); break;
   1.252 +      case Bytecodes::_dstore_0: store_two(current, 0); break;
   1.253 +      case Bytecodes::_dstore_1: store_two(current, 1); break;
   1.254 +      case Bytecodes::_dstore_2: store_two(current, 2); break;
   1.255 +      case Bytecodes::_dstore_3: store_two(current, 3); break;
   1.256 +      case Bytecodes::_astore_0: store_one(current, 0); break;
   1.257 +      case Bytecodes::_astore_1: store_one(current, 1); break;
   1.258 +      case Bytecodes::_astore_2: store_one(current, 2); break;
   1.259 +      case Bytecodes::_astore_3: store_one(current, 3); break;
   1.260 +
   1.261 +      // track bytecodes that affect the control flow
   1.262 +      case Bytecodes::_athrow:  // fall through
   1.263 +      case Bytecodes::_ret:     // fall through
   1.264 +      case Bytecodes::_ireturn: // fall through
   1.265 +      case Bytecodes::_lreturn: // fall through
   1.266 +      case Bytecodes::_freturn: // fall through
   1.267 +      case Bytecodes::_dreturn: // fall through
   1.268 +      case Bytecodes::_areturn: // fall through
   1.269 +      case Bytecodes::_return:
   1.270 +        current = NULL;
   1.271 +        break;
   1.272 +
   1.273 +      case Bytecodes::_ifeq:      // fall through
   1.274 +      case Bytecodes::_ifne:      // fall through
   1.275 +      case Bytecodes::_iflt:      // fall through
   1.276 +      case Bytecodes::_ifge:      // fall through
   1.277 +      case Bytecodes::_ifgt:      // fall through
   1.278 +      case Bytecodes::_ifle:      // fall through
   1.279 +      case Bytecodes::_if_icmpeq: // fall through
   1.280 +      case Bytecodes::_if_icmpne: // fall through
   1.281 +      case Bytecodes::_if_icmplt: // fall through
   1.282 +      case Bytecodes::_if_icmpge: // fall through
   1.283 +      case Bytecodes::_if_icmpgt: // fall through
   1.284 +      case Bytecodes::_if_icmple: // fall through
   1.285 +      case Bytecodes::_if_acmpeq: // fall through
   1.286 +      case Bytecodes::_if_acmpne: // fall through
   1.287 +      case Bytecodes::_ifnull:    // fall through
   1.288 +      case Bytecodes::_ifnonnull:
   1.289 +        make_block_at(s.next_bci(), current);
   1.290 +        make_block_at(s.get_dest(), current);
   1.291 +        current = NULL;
   1.292 +        break;
   1.293 +
   1.294 +      case Bytecodes::_goto:
   1.295 +        make_block_at(s.get_dest(), current);
   1.296 +        current = NULL;
   1.297 +        break;
   1.298 +
   1.299 +      case Bytecodes::_goto_w:
   1.300 +        make_block_at(s.get_far_dest(), current);
   1.301 +        current = NULL;
   1.302 +        break;
   1.303 +
   1.304 +      case Bytecodes::_jsr:
   1.305 +        handle_jsr(current, s.get_dest(), s.next_bci());
   1.306 +        current = NULL;
   1.307 +        break;
   1.308 +
   1.309 +      case Bytecodes::_jsr_w:
   1.310 +        handle_jsr(current, s.get_far_dest(), s.next_bci());
   1.311 +        current = NULL;
   1.312 +        break;
   1.313 +
   1.314 +      case Bytecodes::_tableswitch: {
   1.315 +        // set block for each case
   1.316 +        Bytecode_tableswitch *switch_ = Bytecode_tableswitch_at(s.cur_bcp());
   1.317 +        int l = switch_->length();
   1.318 +        for (int i = 0; i < l; i++) {
   1.319 +          make_block_at(cur_bci + switch_->dest_offset_at(i), current);
   1.320 +        }
   1.321 +        make_block_at(cur_bci + switch_->default_offset(), current);
   1.322 +        current = NULL;
   1.323 +        break;
   1.324 +      }
   1.325 +
   1.326 +      case Bytecodes::_lookupswitch: {
   1.327 +        // set block for each case
   1.328 +        Bytecode_lookupswitch *switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
   1.329 +        int l = switch_->number_of_pairs();
   1.330 +        for (int i = 0; i < l; i++) {
   1.331 +          make_block_at(cur_bci + switch_->pair_at(i)->offset(), current);
   1.332 +        }
   1.333 +        make_block_at(cur_bci + switch_->default_offset(), current);
   1.334 +        current = NULL;
   1.335 +        break;
   1.336 +      }
   1.337 +    }
   1.338 +  }
   1.339 +}
   1.340 +
   1.341 +
   1.342 +void BlockListBuilder::mark_loops() {
   1.343 +  ResourceMark rm;
   1.344 +
   1.345 +  _active = BitMap(BlockBegin::number_of_blocks());         _active.clear();
   1.346 +  _visited = BitMap(BlockBegin::number_of_blocks());        _visited.clear();
   1.347 +  _loop_map = intArray(BlockBegin::number_of_blocks(), 0);
   1.348 +  _next_loop_index = 0;
   1.349 +  _next_block_number = _blocks.length();
   1.350 +
   1.351 +  // recursively iterate the control flow graph
   1.352 +  mark_loops(_bci2block->at(0), false);
   1.353 +  assert(_next_block_number >= 0, "invalid block numbers");
   1.354 +}
   1.355 +
   1.356 +void BlockListBuilder::make_loop_header(BlockBegin* block) {
   1.357 +  if (block->is_set(BlockBegin::exception_entry_flag)) {
   1.358 +    // exception edges may look like loops but don't mark them as such
   1.359 +    // since it screws up block ordering.
   1.360 +    return;
   1.361 +  }
   1.362 +  if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
   1.363 +    block->set(BlockBegin::parser_loop_header_flag);
   1.364 +
   1.365 +    assert(_loop_map.at(block->block_id()) == 0, "must not be set yet");
   1.366 +    assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer");
   1.367 +    _loop_map.at_put(block->block_id(), 1 << _next_loop_index);
   1.368 +    if (_next_loop_index < 31) _next_loop_index++;
   1.369 +  } else {
   1.370 +    // block already marked as loop header
   1.371 +    assert(is_power_of_2(_loop_map.at(block->block_id())), "exactly one bit must be set");
   1.372 +  }
   1.373 +}
   1.374 +
   1.375 +int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
   1.376 +  int block_id = block->block_id();
   1.377 +
   1.378 +  if (_visited.at(block_id)) {
   1.379 +    if (_active.at(block_id)) {
   1.380 +      // reached block via backward branch
   1.381 +      make_loop_header(block);
   1.382 +    }
   1.383 +    // return cached loop information for this block
   1.384 +    return _loop_map.at(block_id);
   1.385 +  }
   1.386 +
   1.387 +  if (block->is_set(BlockBegin::subroutine_entry_flag)) {
   1.388 +    in_subroutine = true;
   1.389 +  }
   1.390 +
   1.391 +  // set active and visited bits before successors are processed
   1.392 +  _visited.set_bit(block_id);
   1.393 +  _active.set_bit(block_id);
   1.394 +
   1.395 +  intptr_t loop_state = 0;
   1.396 +  for (int i = block->number_of_sux() - 1; i >= 0; i--) {
   1.397 +    // recursively process all successors
   1.398 +    loop_state |= mark_loops(block->sux_at(i), in_subroutine);
   1.399 +  }
   1.400 +
   1.401 +  // clear active-bit after all successors are processed
   1.402 +  _active.clear_bit(block_id);
   1.403 +
   1.404 +  // reverse-post-order numbering of all blocks
   1.405 +  block->set_depth_first_number(_next_block_number);
   1.406 +  _next_block_number--;
   1.407 +
   1.408 +  if (loop_state != 0 || in_subroutine ) {
   1.409 +    // block is contained at least in one loop, so phi functions are necessary
   1.410 +    // phi functions are also necessary for all locals stored in a subroutine
   1.411 +    scope()->requires_phi_function().set_union(block->stores_to_locals());
   1.412 +  }
   1.413 +
   1.414 +  if (block->is_set(BlockBegin::parser_loop_header_flag)) {
   1.415 +    int header_loop_state = _loop_map.at(block_id);
   1.416 +    assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set");
   1.417 +
   1.418 +    // If the highest bit is set (i.e. when integer value is negative), the method
   1.419 +    // has 32 or more loops. This bit is never cleared because it is used for multiple loops
   1.420 +    if (header_loop_state >= 0) {
   1.421 +      clear_bits(loop_state, header_loop_state);
   1.422 +    }
   1.423 +  }
   1.424 +
   1.425 +  // cache and return loop information for this block
   1.426 +  _loop_map.at_put(block_id, loop_state);
   1.427 +  return loop_state;
   1.428 +}
   1.429 +
   1.430 +
   1.431 +#ifndef PRODUCT
   1.432 +
   1.433 +int compare_depth_first(BlockBegin** a, BlockBegin** b) {
   1.434 +  return (*a)->depth_first_number() - (*b)->depth_first_number();
   1.435 +}
   1.436 +
   1.437 +void BlockListBuilder::print() {
   1.438 +  tty->print("----- initial block list of BlockListBuilder for method ");
   1.439 +  method()->print_short_name();
   1.440 +  tty->cr();
   1.441 +
   1.442 +  // better readability if blocks are sorted in processing order
   1.443 +  _blocks.sort(compare_depth_first);
   1.444 +
   1.445 +  for (int i = 0; i < _blocks.length(); i++) {
   1.446 +    BlockBegin* cur = _blocks.at(i);
   1.447 +    tty->print("%4d: B%-4d bci: %-4d  preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
   1.448 +
   1.449 +    tty->print(cur->is_set(BlockBegin::std_entry_flag)               ? " std" : "    ");
   1.450 +    tty->print(cur->is_set(BlockBegin::osr_entry_flag)               ? " osr" : "    ");
   1.451 +    tty->print(cur->is_set(BlockBegin::exception_entry_flag)         ? " ex" : "   ");
   1.452 +    tty->print(cur->is_set(BlockBegin::subroutine_entry_flag)        ? " sr" : "   ");
   1.453 +    tty->print(cur->is_set(BlockBegin::parser_loop_header_flag)      ? " lh" : "   ");
   1.454 +
   1.455 +    if (cur->number_of_sux() > 0) {
   1.456 +      tty->print("    sux: ");
   1.457 +      for (int j = 0; j < cur->number_of_sux(); j++) {
   1.458 +        BlockBegin* sux = cur->sux_at(j);
   1.459 +        tty->print("B%d ", sux->block_id());
   1.460 +      }
   1.461 +    }
   1.462 +    tty->cr();
   1.463 +  }
   1.464 +}
   1.465 +
   1.466 +#endif
   1.467 +
   1.468 +
   1.469 +// A simple growable array of Values indexed by ciFields
   1.470 +class FieldBuffer: public CompilationResourceObj {
   1.471 + private:
   1.472 +  GrowableArray<Value> _values;
   1.473 +
   1.474 + public:
   1.475 +  FieldBuffer() {}
   1.476 +
   1.477 +  void kill() {
   1.478 +    _values.trunc_to(0);
   1.479 +  }
   1.480 +
   1.481 +  Value at(ciField* field) {
   1.482 +    assert(field->holder()->is_loaded(), "must be a loaded field");
   1.483 +    int offset = field->offset();
   1.484 +    if (offset < _values.length()) {
   1.485 +      return _values.at(offset);
   1.486 +    } else {
   1.487 +      return NULL;
   1.488 +    }
   1.489 +  }
   1.490 +
   1.491 +  void at_put(ciField* field, Value value) {
   1.492 +    assert(field->holder()->is_loaded(), "must be a loaded field");
   1.493 +    int offset = field->offset();
   1.494 +    _values.at_put_grow(offset, value, NULL);
   1.495 +  }
   1.496 +
   1.497 +};
   1.498 +
   1.499 +
   1.500 +// MemoryBuffer is fairly simple model of the current state of memory.
   1.501 +// It partitions memory into several pieces.  The first piece is
   1.502 +// generic memory where little is known about the owner of the memory.
   1.503 +// This is conceptually represented by the tuple <O, F, V> which says
   1.504 +// that the field F of object O has value V.  This is flattened so
   1.505 +// that F is represented by the offset of the field and the parallel
   1.506 +// arrays _objects and _values are used for O and V.  Loads of O.F can
   1.507 +// simply use V.  Newly allocated objects are kept in a separate list
   1.508 +// along with a parallel array for each object which represents the
   1.509 +// current value of its fields.  Stores of the default value to fields
   1.510 +// which have never been stored to before are eliminated since they
   1.511 +// are redundant.  Once newly allocated objects are stored into
   1.512 +// another object or they are passed out of the current compile they
   1.513 +// are treated like generic memory.
   1.514 +
   1.515 +class MemoryBuffer: public CompilationResourceObj {
   1.516 + private:
   1.517 +  FieldBuffer                 _values;
   1.518 +  GrowableArray<Value>        _objects;
   1.519 +  GrowableArray<Value>        _newobjects;
   1.520 +  GrowableArray<FieldBuffer*> _fields;
   1.521 +
   1.522 + public:
   1.523 +  MemoryBuffer() {}
   1.524 +
   1.525 +  StoreField* store(StoreField* st) {
   1.526 +    if (!EliminateFieldAccess) {
   1.527 +      return st;
   1.528 +    }
   1.529 +
   1.530 +    Value object = st->obj();
   1.531 +    Value value = st->value();
   1.532 +    ciField* field = st->field();
   1.533 +    if (field->holder()->is_loaded()) {
   1.534 +      int offset = field->offset();
   1.535 +      int index = _newobjects.find(object);
   1.536 +      if (index != -1) {
   1.537 +        // newly allocated object with no other stores performed on this field
   1.538 +        FieldBuffer* buf = _fields.at(index);
   1.539 +        if (buf->at(field) == NULL && is_default_value(value)) {
   1.540 +#ifndef PRODUCT
   1.541 +          if (PrintIRDuringConstruction && Verbose) {
   1.542 +            tty->print_cr("Eliminated store for object %d:", index);
   1.543 +            st->print_line();
   1.544 +          }
   1.545 +#endif
   1.546 +          return NULL;
   1.547 +        } else {
   1.548 +          buf->at_put(field, value);
   1.549 +        }
   1.550 +      } else {
   1.551 +        _objects.at_put_grow(offset, object, NULL);
   1.552 +        _values.at_put(field, value);
   1.553 +      }
   1.554 +
   1.555 +      store_value(value);
   1.556 +    } else {
   1.557 +      // if we held onto field names we could alias based on names but
   1.558 +      // we don't know what's being stored to so kill it all.
   1.559 +      kill();
   1.560 +    }
   1.561 +    return st;
   1.562 +  }
   1.563 +
   1.564 +
   1.565 +  // return true if this value correspond to the default value of a field.
   1.566 +  bool is_default_value(Value value) {
   1.567 +    Constant* con = value->as_Constant();
   1.568 +    if (con) {
   1.569 +      switch (con->type()->tag()) {
   1.570 +        case intTag:    return con->type()->as_IntConstant()->value() == 0;
   1.571 +        case longTag:   return con->type()->as_LongConstant()->value() == 0;
   1.572 +        case floatTag:  return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
   1.573 +        case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
   1.574 +        case objectTag: return con->type() == objectNull;
   1.575 +        default:  ShouldNotReachHere();
   1.576 +      }
   1.577 +    }
   1.578 +    return false;
   1.579 +  }
   1.580 +
   1.581 +
   1.582 +  // return either the actual value of a load or the load itself
   1.583 +  Value load(LoadField* load) {
   1.584 +    if (!EliminateFieldAccess) {
   1.585 +      return load;
   1.586 +    }
   1.587 +
   1.588 +    if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) {
   1.589 +      // can't skip load since value might get rounded as a side effect
   1.590 +      return load;
   1.591 +    }
   1.592 +
   1.593 +    ciField* field = load->field();
   1.594 +    Value object   = load->obj();
   1.595 +    if (field->holder()->is_loaded() && !field->is_volatile()) {
   1.596 +      int offset = field->offset();
   1.597 +      Value result = NULL;
   1.598 +      int index = _newobjects.find(object);
   1.599 +      if (index != -1) {
   1.600 +        result = _fields.at(index)->at(field);
   1.601 +      } else if (_objects.at_grow(offset, NULL) == object) {
   1.602 +        result = _values.at(field);
   1.603 +      }
   1.604 +      if (result != NULL) {
   1.605 +#ifndef PRODUCT
   1.606 +        if (PrintIRDuringConstruction && Verbose) {
   1.607 +          tty->print_cr("Eliminated load: ");
   1.608 +          load->print_line();
   1.609 +        }
   1.610 +#endif
   1.611 +        assert(result->type()->tag() == load->type()->tag(), "wrong types");
   1.612 +        return result;
   1.613 +      }
   1.614 +    }
   1.615 +    return load;
   1.616 +  }
   1.617 +
   1.618 +  // Record this newly allocated object
   1.619 +  void new_instance(NewInstance* object) {
   1.620 +    int index = _newobjects.length();
   1.621 +    _newobjects.append(object);
   1.622 +    if (_fields.at_grow(index, NULL) == NULL) {
   1.623 +      _fields.at_put(index, new FieldBuffer());
   1.624 +    } else {
   1.625 +      _fields.at(index)->kill();
   1.626 +    }
   1.627 +  }
   1.628 +
   1.629 +  void store_value(Value value) {
   1.630 +    int index = _newobjects.find(value);
   1.631 +    if (index != -1) {
   1.632 +      // stored a newly allocated object into another object.
   1.633 +      // Assume we've lost track of it as separate slice of memory.
   1.634 +      // We could do better by keeping track of whether individual
   1.635 +      // fields could alias each other.
   1.636 +      _newobjects.remove_at(index);
   1.637 +      // pull out the field info and store it at the end up the list
   1.638 +      // of field info list to be reused later.
   1.639 +      _fields.append(_fields.at(index));
   1.640 +      _fields.remove_at(index);
   1.641 +    }
   1.642 +  }
   1.643 +
   1.644 +  void kill() {
   1.645 +    _newobjects.trunc_to(0);
   1.646 +    _objects.trunc_to(0);
   1.647 +    _values.kill();
   1.648 +  }
   1.649 +};
   1.650 +
   1.651 +
   1.652 +// Implementation of GraphBuilder's ScopeData
   1.653 +
   1.654 +GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
   1.655 +  : _parent(parent)
   1.656 +  , _bci2block(NULL)
   1.657 +  , _scope(NULL)
   1.658 +  , _has_handler(false)
   1.659 +  , _stream(NULL)
   1.660 +  , _work_list(NULL)
   1.661 +  , _parsing_jsr(false)
   1.662 +  , _jsr_xhandlers(NULL)
   1.663 +  , _caller_stack_size(-1)
   1.664 +  , _continuation(NULL)
   1.665 +  , _continuation_state(NULL)
   1.666 +  , _num_returns(0)
   1.667 +  , _cleanup_block(NULL)
   1.668 +  , _cleanup_return_prev(NULL)
   1.669 +  , _cleanup_state(NULL)
   1.670 +{
   1.671 +  if (parent != NULL) {
   1.672 +    _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
   1.673 +  } else {
   1.674 +    _max_inline_size = MaxInlineSize;
   1.675 +  }
   1.676 +  if (_max_inline_size < MaxTrivialSize) {
   1.677 +    _max_inline_size = MaxTrivialSize;
   1.678 +  }
   1.679 +}
   1.680 +
   1.681 +
   1.682 +void GraphBuilder::kill_field(ciField* field) {
   1.683 +  if (UseLocalValueNumbering) {
   1.684 +    vmap()->kill_field(field);
   1.685 +  }
   1.686 +}
   1.687 +
   1.688 +
   1.689 +void GraphBuilder::kill_array(Value value) {
   1.690 +  if (UseLocalValueNumbering) {
   1.691 +    vmap()->kill_array(value->type());
   1.692 +  }
   1.693 +  _memory->store_value(value);
   1.694 +}
   1.695 +
   1.696 +
   1.697 +void GraphBuilder::kill_all() {
   1.698 +  if (UseLocalValueNumbering) {
   1.699 +    vmap()->kill_all();
   1.700 +  }
   1.701 +  _memory->kill();
   1.702 +}
   1.703 +
   1.704 +
   1.705 +BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
   1.706 +  if (parsing_jsr()) {
   1.707 +    // It is necessary to clone all blocks associated with a
   1.708 +    // subroutine, including those for exception handlers in the scope
   1.709 +    // of the method containing the jsr (because those exception
   1.710 +    // handlers may contain ret instructions in some cases).
   1.711 +    BlockBegin* block = bci2block()->at(bci);
   1.712 +    if (block != NULL && block == parent()->bci2block()->at(bci)) {
   1.713 +      BlockBegin* new_block = new BlockBegin(block->bci());
   1.714 +#ifndef PRODUCT
   1.715 +      if (PrintInitialBlockList) {
   1.716 +        tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
   1.717 +                      block->block_id(), block->bci(), new_block->block_id());
   1.718 +      }
   1.719 +#endif
   1.720 +      // copy data from cloned blocked
   1.721 +      new_block->set_depth_first_number(block->depth_first_number());
   1.722 +      if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
   1.723 +      // Preserve certain flags for assertion checking
   1.724 +      if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
   1.725 +      if (block->is_set(BlockBegin::exception_entry_flag))  new_block->set(BlockBegin::exception_entry_flag);
   1.726 +
   1.727 +      // copy was_visited_flag to allow early detection of bailouts
   1.728 +      // if a block that is used in a jsr has already been visited before,
   1.729 +      // it is shared between the normal control flow and a subroutine
   1.730 +      // BlockBegin::try_merge returns false when the flag is set, this leads
   1.731 +      // to a compilation bailout
   1.732 +      if (block->is_set(BlockBegin::was_visited_flag))  new_block->set(BlockBegin::was_visited_flag);
   1.733 +
   1.734 +      bci2block()->at_put(bci, new_block);
   1.735 +      block = new_block;
   1.736 +    }
   1.737 +    return block;
   1.738 +  } else {
   1.739 +    return bci2block()->at(bci);
   1.740 +  }
   1.741 +}
   1.742 +
   1.743 +
   1.744 +XHandlers* GraphBuilder::ScopeData::xhandlers() const {
   1.745 +  if (_jsr_xhandlers == NULL) {
   1.746 +    assert(!parsing_jsr(), "");
   1.747 +    return scope()->xhandlers();
   1.748 +  }
   1.749 +  assert(parsing_jsr(), "");
   1.750 +  return _jsr_xhandlers;
   1.751 +}
   1.752 +
   1.753 +
   1.754 +void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
   1.755 +  _scope = scope;
   1.756 +  bool parent_has_handler = false;
   1.757 +  if (parent() != NULL) {
   1.758 +    parent_has_handler = parent()->has_handler();
   1.759 +  }
   1.760 +  _has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
   1.761 +}
   1.762 +
   1.763 +
   1.764 +void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
   1.765 +                                                      Instruction* return_prev,
   1.766 +                                                      ValueStack* return_state) {
   1.767 +  _cleanup_block       = block;
   1.768 +  _cleanup_return_prev = return_prev;
   1.769 +  _cleanup_state       = return_state;
   1.770 +}
   1.771 +
   1.772 +
   1.773 +void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
   1.774 +  if (_work_list == NULL) {
   1.775 +    _work_list = new BlockList();
   1.776 +  }
   1.777 +
   1.778 +  if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
   1.779 +    // Do not start parsing the continuation block while in a
   1.780 +    // sub-scope
   1.781 +    if (parsing_jsr()) {
   1.782 +      if (block == jsr_continuation()) {
   1.783 +        return;
   1.784 +      }
   1.785 +    } else {
   1.786 +      if (block == continuation()) {
   1.787 +        return;
   1.788 +      }
   1.789 +    }
   1.790 +    block->set(BlockBegin::is_on_work_list_flag);
   1.791 +    _work_list->push(block);
   1.792 +
   1.793 +    sort_top_into_worklist(_work_list, block);
   1.794 +  }
   1.795 +}
   1.796 +
   1.797 +
   1.798 +void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
   1.799 +  assert(worklist->top() == top, "");
   1.800 +  // sort block descending into work list
   1.801 +  const int dfn = top->depth_first_number();
   1.802 +  assert(dfn != -1, "unknown depth first number");
   1.803 +  int i = worklist->length()-2;
   1.804 +  while (i >= 0) {
   1.805 +    BlockBegin* b = worklist->at(i);
   1.806 +    if (b->depth_first_number() < dfn) {
   1.807 +      worklist->at_put(i+1, b);
   1.808 +    } else {
   1.809 +      break;
   1.810 +    }
   1.811 +    i --;
   1.812 +  }
   1.813 +  if (i >= -1) worklist->at_put(i + 1, top);
   1.814 +}
   1.815 +
   1.816 +int GraphBuilder::ScopeData::caller_stack_size() const {
   1.817 +  ValueStack* state = scope()->caller_state();
   1.818 +  if (state == NULL) {
   1.819 +    return 0;
   1.820 +  }
   1.821 +  return state->stack_size();
   1.822 +}
   1.823 +
   1.824 +
   1.825 +BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
   1.826 +  if (is_work_list_empty()) {
   1.827 +    return NULL;
   1.828 +  }
   1.829 +  return _work_list->pop();
   1.830 +}
   1.831 +
   1.832 +
   1.833 +bool GraphBuilder::ScopeData::is_work_list_empty() const {
   1.834 +  return (_work_list == NULL || _work_list->length() == 0);
   1.835 +}
   1.836 +
   1.837 +
   1.838 +void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
   1.839 +  assert(parsing_jsr(), "");
   1.840 +  // clone all the exception handlers from the scope
   1.841 +  XHandlers* handlers = new XHandlers(scope()->xhandlers());
   1.842 +  const int n = handlers->length();
   1.843 +  for (int i = 0; i < n; i++) {
   1.844 +    // The XHandlers need to be adjusted to dispatch to the cloned
   1.845 +    // handler block instead of the default one but the synthetic
   1.846 +    // unlocker needs to be handled specially.  The synthetic unlocker
   1.847 +    // should be left alone since there can be only one and all code
   1.848 +    // should dispatch to the same one.
   1.849 +    XHandler* h = handlers->handler_at(i);
   1.850 +    if (h->handler_bci() != SynchronizationEntryBCI) {
   1.851 +      h->set_entry_block(block_at(h->handler_bci()));
   1.852 +    } else {
   1.853 +      assert(h->entry_block()->is_set(BlockBegin::default_exception_handler_flag),
   1.854 +             "should be the synthetic unlock block");
   1.855 +    }
   1.856 +  }
   1.857 +  _jsr_xhandlers = handlers;
   1.858 +}
   1.859 +
   1.860 +
   1.861 +int GraphBuilder::ScopeData::num_returns() {
   1.862 +  if (parsing_jsr()) {
   1.863 +    return parent()->num_returns();
   1.864 +  }
   1.865 +  return _num_returns;
   1.866 +}
   1.867 +
   1.868 +
   1.869 +void GraphBuilder::ScopeData::incr_num_returns() {
   1.870 +  if (parsing_jsr()) {
   1.871 +    parent()->incr_num_returns();
   1.872 +  } else {
   1.873 +    ++_num_returns;
   1.874 +  }
   1.875 +}
   1.876 +
   1.877 +
   1.878 +// Implementation of GraphBuilder
   1.879 +
   1.880 +#define INLINE_BAILOUT(msg)        { inline_bailout(msg); return false; }
   1.881 +
   1.882 +
   1.883 +void GraphBuilder::load_constant() {
   1.884 +  ciConstant con = stream()->get_constant();
   1.885 +  if (con.basic_type() == T_ILLEGAL) {
   1.886 +    BAILOUT("could not resolve a constant");
   1.887 +  } else {
   1.888 +    ValueType* t = illegalType;
   1.889 +    ValueStack* patch_state = NULL;
   1.890 +    switch (con.basic_type()) {
   1.891 +      case T_BOOLEAN: t = new IntConstant     (con.as_boolean()); break;
   1.892 +      case T_BYTE   : t = new IntConstant     (con.as_byte   ()); break;
   1.893 +      case T_CHAR   : t = new IntConstant     (con.as_char   ()); break;
   1.894 +      case T_SHORT  : t = new IntConstant     (con.as_short  ()); break;
   1.895 +      case T_INT    : t = new IntConstant     (con.as_int    ()); break;
   1.896 +      case T_LONG   : t = new LongConstant    (con.as_long   ()); break;
   1.897 +      case T_FLOAT  : t = new FloatConstant   (con.as_float  ()); break;
   1.898 +      case T_DOUBLE : t = new DoubleConstant  (con.as_double ()); break;
   1.899 +      case T_ARRAY  : t = new ArrayConstant   (con.as_object ()->as_array   ()); break;
   1.900 +      case T_OBJECT :
   1.901 +       {
   1.902 +        ciObject* obj = con.as_object();
   1.903 +        if (obj->is_klass()) {
   1.904 +          ciKlass* klass = obj->as_klass();
   1.905 +          if (!klass->is_loaded() || PatchALot) {
   1.906 +            patch_state = state()->copy();
   1.907 +            t = new ObjectConstant(obj);
   1.908 +          } else {
   1.909 +            t = new InstanceConstant(klass->java_mirror());
   1.910 +          }
   1.911 +        } else {
   1.912 +          t = new InstanceConstant(obj->as_instance());
   1.913 +        }
   1.914 +        break;
   1.915 +       }
   1.916 +      default       : ShouldNotReachHere();
   1.917 +    }
   1.918 +    Value x;
   1.919 +    if (patch_state != NULL) {
   1.920 +      x = new Constant(t, patch_state);
   1.921 +    } else {
   1.922 +      x = new Constant(t);
   1.923 +    }
   1.924 +    push(t, append(x));
   1.925 +  }
   1.926 +}
   1.927 +
   1.928 +
   1.929 +void GraphBuilder::load_local(ValueType* type, int index) {
   1.930 +  Value x = state()->load_local(index);
   1.931 +  push(type, x);
   1.932 +}
   1.933 +
   1.934 +
   1.935 +void GraphBuilder::store_local(ValueType* type, int index) {
   1.936 +  Value x = pop(type);
   1.937 +  store_local(state(), x, type, index);
   1.938 +}
   1.939 +
   1.940 +
   1.941 +void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) {
   1.942 +  if (parsing_jsr()) {
   1.943 +    // We need to do additional tracking of the location of the return
   1.944 +    // address for jsrs since we don't handle arbitrary jsr/ret
   1.945 +    // constructs. Here we are figuring out in which circumstances we
   1.946 +    // need to bail out.
   1.947 +    if (x->type()->is_address()) {
   1.948 +      scope_data()->set_jsr_return_address_local(index);
   1.949 +
   1.950 +      // Also check parent jsrs (if any) at this time to see whether
   1.951 +      // they are using this local. We don't handle skipping over a
   1.952 +      // ret.
   1.953 +      for (ScopeData* cur_scope_data = scope_data()->parent();
   1.954 +           cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
   1.955 +           cur_scope_data = cur_scope_data->parent()) {
   1.956 +        if (cur_scope_data->jsr_return_address_local() == index) {
   1.957 +          BAILOUT("subroutine overwrites return address from previous subroutine");
   1.958 +        }
   1.959 +      }
   1.960 +    } else if (index == scope_data()->jsr_return_address_local()) {
   1.961 +      scope_data()->set_jsr_return_address_local(-1);
   1.962 +    }
   1.963 +  }
   1.964 +
   1.965 +  state->store_local(index, round_fp(x));
   1.966 +}
   1.967 +
   1.968 +
   1.969 +void GraphBuilder::load_indexed(BasicType type) {
   1.970 +  Value index = ipop();
   1.971 +  Value array = apop();
   1.972 +  Value length = NULL;
   1.973 +  if (CSEArrayLength ||
   1.974 +      (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
   1.975 +      (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
   1.976 +    length = append(new ArrayLength(array, lock_stack()));
   1.977 +  }
   1.978 +  push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, lock_stack())));
   1.979 +}
   1.980 +
   1.981 +
   1.982 +void GraphBuilder::store_indexed(BasicType type) {
   1.983 +  Value value = pop(as_ValueType(type));
   1.984 +  Value index = ipop();
   1.985 +  Value array = apop();
   1.986 +  Value length = NULL;
   1.987 +  if (CSEArrayLength ||
   1.988 +      (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
   1.989 +      (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
   1.990 +    length = append(new ArrayLength(array, lock_stack()));
   1.991 +  }
   1.992 +  StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack());
   1.993 +  kill_array(value); // invalidate all CSEs that are memory accesses of the same type
   1.994 +  append(result);
   1.995 +}
   1.996 +
   1.997 +
   1.998 +void GraphBuilder::stack_op(Bytecodes::Code code) {
   1.999 +  switch (code) {
  1.1000 +    case Bytecodes::_pop:
  1.1001 +      { state()->raw_pop();
  1.1002 +      }
  1.1003 +      break;
  1.1004 +    case Bytecodes::_pop2:
  1.1005 +      { state()->raw_pop();
  1.1006 +        state()->raw_pop();
  1.1007 +      }
  1.1008 +      break;
  1.1009 +    case Bytecodes::_dup:
  1.1010 +      { Value w = state()->raw_pop();
  1.1011 +        state()->raw_push(w);
  1.1012 +        state()->raw_push(w);
  1.1013 +      }
  1.1014 +      break;
  1.1015 +    case Bytecodes::_dup_x1:
  1.1016 +      { Value w1 = state()->raw_pop();
  1.1017 +        Value w2 = state()->raw_pop();
  1.1018 +        state()->raw_push(w1);
  1.1019 +        state()->raw_push(w2);
  1.1020 +        state()->raw_push(w1);
  1.1021 +      }
  1.1022 +      break;
  1.1023 +    case Bytecodes::_dup_x2:
  1.1024 +      { Value w1 = state()->raw_pop();
  1.1025 +        Value w2 = state()->raw_pop();
  1.1026 +        Value w3 = state()->raw_pop();
  1.1027 +        state()->raw_push(w1);
  1.1028 +        state()->raw_push(w3);
  1.1029 +        state()->raw_push(w2);
  1.1030 +        state()->raw_push(w1);
  1.1031 +      }
  1.1032 +      break;
  1.1033 +    case Bytecodes::_dup2:
  1.1034 +      { Value w1 = state()->raw_pop();
  1.1035 +        Value w2 = state()->raw_pop();
  1.1036 +        state()->raw_push(w2);
  1.1037 +        state()->raw_push(w1);
  1.1038 +        state()->raw_push(w2);
  1.1039 +        state()->raw_push(w1);
  1.1040 +      }
  1.1041 +      break;
  1.1042 +    case Bytecodes::_dup2_x1:
  1.1043 +      { Value w1 = state()->raw_pop();
  1.1044 +        Value w2 = state()->raw_pop();
  1.1045 +        Value w3 = state()->raw_pop();
  1.1046 +        state()->raw_push(w2);
  1.1047 +        state()->raw_push(w1);
  1.1048 +        state()->raw_push(w3);
  1.1049 +        state()->raw_push(w2);
  1.1050 +        state()->raw_push(w1);
  1.1051 +      }
  1.1052 +      break;
  1.1053 +    case Bytecodes::_dup2_x2:
  1.1054 +      { Value w1 = state()->raw_pop();
  1.1055 +        Value w2 = state()->raw_pop();
  1.1056 +        Value w3 = state()->raw_pop();
  1.1057 +        Value w4 = state()->raw_pop();
  1.1058 +        state()->raw_push(w2);
  1.1059 +        state()->raw_push(w1);
  1.1060 +        state()->raw_push(w4);
  1.1061 +        state()->raw_push(w3);
  1.1062 +        state()->raw_push(w2);
  1.1063 +        state()->raw_push(w1);
  1.1064 +      }
  1.1065 +      break;
  1.1066 +    case Bytecodes::_swap:
  1.1067 +      { Value w1 = state()->raw_pop();
  1.1068 +        Value w2 = state()->raw_pop();
  1.1069 +        state()->raw_push(w1);
  1.1070 +        state()->raw_push(w2);
  1.1071 +      }
  1.1072 +      break;
  1.1073 +    default:
  1.1074 +      ShouldNotReachHere();
  1.1075 +      break;
  1.1076 +  }
  1.1077 +}
  1.1078 +
  1.1079 +
  1.1080 +void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* stack) {
  1.1081 +  Value y = pop(type);
  1.1082 +  Value x = pop(type);
  1.1083 +  // NOTE: strictfp can be queried from current method since we don't
  1.1084 +  // inline methods with differing strictfp bits
  1.1085 +  Value res = new ArithmeticOp(code, x, y, method()->is_strict(), stack);
  1.1086 +  // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
  1.1087 +  res = append(res);
  1.1088 +  if (method()->is_strict()) {
  1.1089 +    res = round_fp(res);
  1.1090 +  }
  1.1091 +  push(type, res);
  1.1092 +}
  1.1093 +
  1.1094 +
  1.1095 +void GraphBuilder::negate_op(ValueType* type) {
  1.1096 +  push(type, append(new NegateOp(pop(type))));
  1.1097 +}
  1.1098 +
  1.1099 +
  1.1100 +void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
  1.1101 +  Value s = ipop();
  1.1102 +  Value x = pop(type);
  1.1103 +  // try to simplify
  1.1104 +  // Note: This code should go into the canonicalizer as soon as it can
  1.1105 +  //       can handle canonicalized forms that contain more than one node.
  1.1106 +  if (CanonicalizeNodes && code == Bytecodes::_iushr) {
  1.1107 +    // pattern: x >>> s
  1.1108 +    IntConstant* s1 = s->type()->as_IntConstant();
  1.1109 +    if (s1 != NULL) {
  1.1110 +      // pattern: x >>> s1, with s1 constant
  1.1111 +      ShiftOp* l = x->as_ShiftOp();
  1.1112 +      if (l != NULL && l->op() == Bytecodes::_ishl) {
  1.1113 +        // pattern: (a << b) >>> s1
  1.1114 +        IntConstant* s0 = l->y()->type()->as_IntConstant();
  1.1115 +        if (s0 != NULL) {
  1.1116 +          // pattern: (a << s0) >>> s1
  1.1117 +          const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
  1.1118 +          const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
  1.1119 +          if (s0c == s1c) {
  1.1120 +            if (s0c == 0) {
  1.1121 +              // pattern: (a << 0) >>> 0 => simplify to: a
  1.1122 +              ipush(l->x());
  1.1123 +            } else {
  1.1124 +              // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
  1.1125 +              assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
  1.1126 +              const int m = (1 << (BitsPerInt - s0c)) - 1;
  1.1127 +              Value s = append(new Constant(new IntConstant(m)));
  1.1128 +              ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
  1.1129 +            }
  1.1130 +            return;
  1.1131 +          }
  1.1132 +        }
  1.1133 +      }
  1.1134 +    }
  1.1135 +  }
  1.1136 +  // could not simplify
  1.1137 +  push(type, append(new ShiftOp(code, x, s)));
  1.1138 +}
  1.1139 +
  1.1140 +
  1.1141 +void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
  1.1142 +  Value y = pop(type);
  1.1143 +  Value x = pop(type);
  1.1144 +  push(type, append(new LogicOp(code, x, y)));
  1.1145 +}
  1.1146 +
  1.1147 +
  1.1148 +void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
  1.1149 +  ValueStack* state_before = state()->copy();
  1.1150 +  Value y = pop(type);
  1.1151 +  Value x = pop(type);
  1.1152 +  ipush(append(new CompareOp(code, x, y, state_before)));
  1.1153 +}
  1.1154 +
  1.1155 +
  1.1156 +void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
  1.1157 +  push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
  1.1158 +}
  1.1159 +
  1.1160 +
  1.1161 +void GraphBuilder::increment() {
  1.1162 +  int index = stream()->get_index();
  1.1163 +  int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
  1.1164 +  load_local(intType, index);
  1.1165 +  ipush(append(new Constant(new IntConstant(delta))));
  1.1166 +  arithmetic_op(intType, Bytecodes::_iadd);
  1.1167 +  store_local(intType, index);
  1.1168 +}
  1.1169 +
  1.1170 +
  1.1171 +void GraphBuilder::_goto(int from_bci, int to_bci) {
  1.1172 +  profile_bci(from_bci);
  1.1173 +  append(new Goto(block_at(to_bci), to_bci <= from_bci));
  1.1174 +}
  1.1175 +
  1.1176 +
  1.1177 +void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
  1.1178 +  BlockBegin* tsux = block_at(stream()->get_dest());
  1.1179 +  BlockBegin* fsux = block_at(stream()->next_bci());
  1.1180 +  bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
  1.1181 +  If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If();
  1.1182 +  if (profile_branches() && (if_node != NULL)) {
  1.1183 +    if_node->set_profiled_method(method());
  1.1184 +    if_node->set_profiled_bci(bci());
  1.1185 +    if_node->set_should_profile(true);
  1.1186 +  }
  1.1187 +}
  1.1188 +
  1.1189 +
  1.1190 +void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
  1.1191 +  Value y = append(new Constant(intZero));
  1.1192 +  ValueStack* state_before = state()->copy();
  1.1193 +  Value x = ipop();
  1.1194 +  if_node(x, cond, y, state_before);
  1.1195 +}
  1.1196 +
  1.1197 +
  1.1198 +void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
  1.1199 +  Value y = append(new Constant(objectNull));
  1.1200 +  ValueStack* state_before = state()->copy();
  1.1201 +  Value x = apop();
  1.1202 +  if_node(x, cond, y, state_before);
  1.1203 +}
  1.1204 +
  1.1205 +
  1.1206 +void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
  1.1207 +  ValueStack* state_before = state()->copy();
  1.1208 +  Value y = pop(type);
  1.1209 +  Value x = pop(type);
  1.1210 +  if_node(x, cond, y, state_before);
  1.1211 +}
  1.1212 +
  1.1213 +
  1.1214 +void GraphBuilder::jsr(int dest) {
  1.1215 +  // We only handle well-formed jsrs (those which are "block-structured").
  1.1216 +  // If the bytecodes are strange (jumping out of a jsr block) then we
  1.1217 +  // might end up trying to re-parse a block containing a jsr which
  1.1218 +  // has already been activated. Watch for this case and bail out.
  1.1219 +  for (ScopeData* cur_scope_data = scope_data();
  1.1220 +       cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
  1.1221 +       cur_scope_data = cur_scope_data->parent()) {
  1.1222 +    if (cur_scope_data->jsr_entry_bci() == dest) {
  1.1223 +      BAILOUT("too-complicated jsr/ret structure");
  1.1224 +    }
  1.1225 +  }
  1.1226 +
  1.1227 +  push(addressType, append(new Constant(new AddressConstant(next_bci()))));
  1.1228 +  if (!try_inline_jsr(dest)) {
  1.1229 +    return; // bailed out while parsing and inlining subroutine
  1.1230 +  }
  1.1231 +}
  1.1232 +
  1.1233 +
  1.1234 +void GraphBuilder::ret(int local_index) {
  1.1235 +  if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
  1.1236 +
  1.1237 +  if (local_index != scope_data()->jsr_return_address_local()) {
  1.1238 +    BAILOUT("can not handle complicated jsr/ret constructs");
  1.1239 +  }
  1.1240 +
  1.1241 +  // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
  1.1242 +  append(new Goto(scope_data()->jsr_continuation(), false));
  1.1243 +}
  1.1244 +
  1.1245 +
  1.1246 +void GraphBuilder::table_switch() {
  1.1247 +  Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(method()->code() + bci());
  1.1248 +  const int l = switch_->length();
  1.1249 +  if (CanonicalizeNodes && l == 1) {
  1.1250 +    // total of 2 successors => use If instead of switch
  1.1251 +    // Note: This code should go into the canonicalizer as soon as it can
  1.1252 +    //       can handle canonicalized forms that contain more than one node.
  1.1253 +    Value key = append(new Constant(new IntConstant(switch_->low_key())));
  1.1254 +    BlockBegin* tsux = block_at(bci() + switch_->dest_offset_at(0));
  1.1255 +    BlockBegin* fsux = block_at(bci() + switch_->default_offset());
  1.1256 +    bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
  1.1257 +    ValueStack* state_before = is_bb ? state() : NULL;
  1.1258 +    append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
  1.1259 +  } else {
  1.1260 +    // collect successors
  1.1261 +    BlockList* sux = new BlockList(l + 1, NULL);
  1.1262 +    int i;
  1.1263 +    bool has_bb = false;
  1.1264 +    for (i = 0; i < l; i++) {
  1.1265 +      sux->at_put(i, block_at(bci() + switch_->dest_offset_at(i)));
  1.1266 +      if (switch_->dest_offset_at(i) < 0) has_bb = true;
  1.1267 +    }
  1.1268 +    // add default successor
  1.1269 +    sux->at_put(i, block_at(bci() + switch_->default_offset()));
  1.1270 +    ValueStack* state_before = has_bb ? state() : NULL;
  1.1271 +    append(new TableSwitch(ipop(), sux, switch_->low_key(), state_before, has_bb));
  1.1272 +  }
  1.1273 +}
  1.1274 +
  1.1275 +
  1.1276 +void GraphBuilder::lookup_switch() {
  1.1277 +  Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(method()->code() + bci());
  1.1278 +  const int l = switch_->number_of_pairs();
  1.1279 +  if (CanonicalizeNodes && l == 1) {
  1.1280 +    // total of 2 successors => use If instead of switch
  1.1281 +    // Note: This code should go into the canonicalizer as soon as it can
  1.1282 +    //       can handle canonicalized forms that contain more than one node.
  1.1283 +    // simplify to If
  1.1284 +    LookupswitchPair* pair = switch_->pair_at(0);
  1.1285 +    Value key = append(new Constant(new IntConstant(pair->match())));
  1.1286 +    BlockBegin* tsux = block_at(bci() + pair->offset());
  1.1287 +    BlockBegin* fsux = block_at(bci() + switch_->default_offset());
  1.1288 +    bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
  1.1289 +    ValueStack* state_before = is_bb ? state() : NULL;
  1.1290 +    append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
  1.1291 +  } else {
  1.1292 +    // collect successors & keys
  1.1293 +    BlockList* sux = new BlockList(l + 1, NULL);
  1.1294 +    intArray* keys = new intArray(l, 0);
  1.1295 +    int i;
  1.1296 +    bool has_bb = false;
  1.1297 +    for (i = 0; i < l; i++) {
  1.1298 +      LookupswitchPair* pair = switch_->pair_at(i);
  1.1299 +      if (pair->offset() < 0) has_bb = true;
  1.1300 +      sux->at_put(i, block_at(bci() + pair->offset()));
  1.1301 +      keys->at_put(i, pair->match());
  1.1302 +    }
  1.1303 +    // add default successor
  1.1304 +    sux->at_put(i, block_at(bci() + switch_->default_offset()));
  1.1305 +    ValueStack* state_before = has_bb ? state() : NULL;
  1.1306 +    append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
  1.1307 +  }
  1.1308 +}
  1.1309 +
  1.1310 +void GraphBuilder::call_register_finalizer() {
  1.1311 +  // If the receiver requires finalization then emit code to perform
  1.1312 +  // the registration on return.
  1.1313 +
  1.1314 +  // Gather some type information about the receiver
  1.1315 +  Value receiver = state()->load_local(0);
  1.1316 +  assert(receiver != NULL, "must have a receiver");
  1.1317 +  ciType* declared_type = receiver->declared_type();
  1.1318 +  ciType* exact_type = receiver->exact_type();
  1.1319 +  if (exact_type == NULL &&
  1.1320 +      receiver->as_Local() &&
  1.1321 +      receiver->as_Local()->java_index() == 0) {
  1.1322 +    ciInstanceKlass* ik = compilation()->method()->holder();
  1.1323 +    if (ik->is_final()) {
  1.1324 +      exact_type = ik;
  1.1325 +    } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
  1.1326 +      // test class is leaf class
  1.1327 +      compilation()->dependency_recorder()->assert_leaf_type(ik);
  1.1328 +      exact_type = ik;
  1.1329 +    } else {
  1.1330 +      declared_type = ik;
  1.1331 +    }
  1.1332 +  }
  1.1333 +
  1.1334 +  // see if we know statically that registration isn't required
  1.1335 +  bool needs_check = true;
  1.1336 +  if (exact_type != NULL) {
  1.1337 +    needs_check = exact_type->as_instance_klass()->has_finalizer();
  1.1338 +  } else if (declared_type != NULL) {
  1.1339 +    ciInstanceKlass* ik = declared_type->as_instance_klass();
  1.1340 +    if (!Dependencies::has_finalizable_subclass(ik)) {
  1.1341 +      compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
  1.1342 +      needs_check = false;
  1.1343 +    }
  1.1344 +  }
  1.1345 +
  1.1346 +  if (needs_check) {
  1.1347 +    // Perform the registration of finalizable objects.
  1.1348 +    load_local(objectType, 0);
  1.1349 +    append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
  1.1350 +                               state()->pop_arguments(1),
  1.1351 +                               true, lock_stack(), true));
  1.1352 +  }
  1.1353 +}
  1.1354 +
  1.1355 +
  1.1356 +void GraphBuilder::method_return(Value x) {
  1.1357 +  if (RegisterFinalizersAtInit &&
  1.1358 +      method()->intrinsic_id() == vmIntrinsics::_Object_init) {
  1.1359 +    call_register_finalizer();
  1.1360 +  }
  1.1361 +
  1.1362 +  // Check to see whether we are inlining. If so, Return
  1.1363 +  // instructions become Gotos to the continuation point.
  1.1364 +  if (continuation() != NULL) {
  1.1365 +    assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
  1.1366 +
  1.1367 +    // If the inlined method is synchronized, the monitor must be
  1.1368 +    // released before we jump to the continuation block.
  1.1369 +    if (method()->is_synchronized()) {
  1.1370 +      int i = state()->caller_state()->locks_size();
  1.1371 +      assert(state()->locks_size() == i + 1, "receiver must be locked here");
  1.1372 +      monitorexit(state()->lock_at(i), SynchronizationEntryBCI);
  1.1373 +    }
  1.1374 +
  1.1375 +    state()->truncate_stack(caller_stack_size());
  1.1376 +    if (x != NULL) {
  1.1377 +      state()->push(x->type(), x);
  1.1378 +    }
  1.1379 +    Goto* goto_callee = new Goto(continuation(), false);
  1.1380 +
  1.1381 +    // See whether this is the first return; if so, store off some
  1.1382 +    // of the state for later examination
  1.1383 +    if (num_returns() == 0) {
  1.1384 +      set_inline_cleanup_info(_block, _last, state());
  1.1385 +    }
  1.1386 +
  1.1387 +    // State at end of inlined method is the state of the caller
  1.1388 +    // without the method parameters on stack, including the
  1.1389 +    // return value, if any, of the inlined method on operand stack.
  1.1390 +    set_state(scope_data()->continuation_state()->copy());
  1.1391 +    if (x) {
  1.1392 +      state()->push(x->type(), x);
  1.1393 +    }
  1.1394 +
  1.1395 +    // The current bci() is in the wrong scope, so use the bci() of
  1.1396 +    // the continuation point.
  1.1397 +    append_with_bci(goto_callee, scope_data()->continuation()->bci());
  1.1398 +    incr_num_returns();
  1.1399 +
  1.1400 +    return;
  1.1401 +  }
  1.1402 +
  1.1403 +  state()->truncate_stack(0);
  1.1404 +  if (method()->is_synchronized()) {
  1.1405 +    // perform the unlocking before exiting the method
  1.1406 +    Value receiver;
  1.1407 +    if (!method()->is_static()) {
  1.1408 +      receiver = _initial_state->local_at(0);
  1.1409 +    } else {
  1.1410 +      receiver = append(new Constant(new ClassConstant(method()->holder())));
  1.1411 +    }
  1.1412 +    append_split(new MonitorExit(receiver, state()->unlock()));
  1.1413 +  }
  1.1414 +
  1.1415 +  append(new Return(x));
  1.1416 +}
  1.1417 +
  1.1418 +
  1.1419 +void GraphBuilder::access_field(Bytecodes::Code code) {
  1.1420 +  bool will_link;
  1.1421 +  ciField* field = stream()->get_field(will_link);
  1.1422 +  ciInstanceKlass* holder = field->holder();
  1.1423 +  BasicType field_type = field->type()->basic_type();
  1.1424 +  ValueType* type = as_ValueType(field_type);
  1.1425 +  // call will_link again to determine if the field is valid.
  1.1426 +  const bool is_loaded = holder->is_loaded() &&
  1.1427 +                         field->will_link(method()->holder(), code);
  1.1428 +  const bool is_initialized = is_loaded && holder->is_initialized();
  1.1429 +
  1.1430 +  ValueStack* state_copy = NULL;
  1.1431 +  if (!is_initialized || PatchALot) {
  1.1432 +    // save state before instruction for debug info when
  1.1433 +    // deoptimization happens during patching
  1.1434 +    state_copy = state()->copy();
  1.1435 +  }
  1.1436 +
  1.1437 +  Value obj = NULL;
  1.1438 +  if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
  1.1439 +    // commoning of class constants should only occur if the class is
  1.1440 +    // fully initialized and resolved in this constant pool.  The will_link test
  1.1441 +    // above essentially checks if this class is resolved in this constant pool
  1.1442 +    // so, the is_initialized flag should be suffiect.
  1.1443 +    if (state_copy != NULL) {
  1.1444 +      // build a patching constant
  1.1445 +      obj = new Constant(new ClassConstant(holder), state_copy);
  1.1446 +    } else {
  1.1447 +      obj = new Constant(new ClassConstant(holder));
  1.1448 +    }
  1.1449 +  }
  1.1450 +
  1.1451 +
  1.1452 +  const int offset = is_loaded ? field->offset() : -1;
  1.1453 +  switch (code) {
  1.1454 +    case Bytecodes::_getstatic: {
  1.1455 +      // check for compile-time constants, i.e., initialized static final fields
  1.1456 +      Instruction* constant = NULL;
  1.1457 +      if (field->is_constant() && !PatchALot) {
  1.1458 +        ciConstant field_val = field->constant_value();
  1.1459 +        BasicType field_type = field_val.basic_type();
  1.1460 +        switch (field_type) {
  1.1461 +        case T_ARRAY:
  1.1462 +        case T_OBJECT:
  1.1463 +          if (field_val.as_object()->has_encoding()) {
  1.1464 +            constant =  new Constant(as_ValueType(field_val));
  1.1465 +          }
  1.1466 +          break;
  1.1467 +
  1.1468 +        default:
  1.1469 +          constant = new Constant(as_ValueType(field_val));
  1.1470 +        }
  1.1471 +      }
  1.1472 +      if (constant != NULL) {
  1.1473 +        push(type, append(constant));
  1.1474 +        state_copy = NULL; // Not a potential deoptimization point (see set_state_before logic below)
  1.1475 +      } else {
  1.1476 +        push(type, append(new LoadField(append(obj), offset, field, true,
  1.1477 +                                        lock_stack(), state_copy, is_loaded, is_initialized)));
  1.1478 +      }
  1.1479 +      break;
  1.1480 +    }
  1.1481 +    case Bytecodes::_putstatic:
  1.1482 +      { Value val = pop(type);
  1.1483 +        append(new StoreField(append(obj), offset, field, val, true, lock_stack(), state_copy, is_loaded, is_initialized));
  1.1484 +        if (UseLocalValueNumbering) {
  1.1485 +          vmap()->kill_field(field);   // invalidate all CSEs that are memory accesses
  1.1486 +        }
  1.1487 +      }
  1.1488 +      break;
  1.1489 +    case Bytecodes::_getfield :
  1.1490 +      {
  1.1491 +        LoadField* load = new LoadField(apop(), offset, field, false, lock_stack(), state_copy, is_loaded, true);
  1.1492 +        Value replacement = is_loaded ? _memory->load(load) : load;
  1.1493 +        if (replacement != load) {
  1.1494 +          assert(replacement->bci() != -99 || replacement->as_Phi() || replacement->as_Local(),
  1.1495 +                 "should already by linked");
  1.1496 +          push(type, replacement);
  1.1497 +        } else {
  1.1498 +          push(type, append(load));
  1.1499 +        }
  1.1500 +        break;
  1.1501 +      }
  1.1502 +
  1.1503 +    case Bytecodes::_putfield :
  1.1504 +      { Value val = pop(type);
  1.1505 +        StoreField* store = new StoreField(apop(), offset, field, val, false, lock_stack(), state_copy, is_loaded, true);
  1.1506 +        if (is_loaded) store = _memory->store(store);
  1.1507 +        if (store != NULL) {
  1.1508 +          append(store);
  1.1509 +          kill_field(field);   // invalidate all CSEs that are accesses of this field
  1.1510 +        }
  1.1511 +      }
  1.1512 +      break;
  1.1513 +    default                   :
  1.1514 +      ShouldNotReachHere();
  1.1515 +      break;
  1.1516 +  }
  1.1517 +}
  1.1518 +
  1.1519 +
  1.1520 +Dependencies* GraphBuilder::dependency_recorder() const {
  1.1521 +  assert(DeoptC1, "need debug information");
  1.1522 +  compilation()->set_needs_debug_information(true);
  1.1523 +  return compilation()->dependency_recorder();
  1.1524 +}
  1.1525 +
  1.1526 +
  1.1527 +void GraphBuilder::invoke(Bytecodes::Code code) {
  1.1528 +  bool will_link;
  1.1529 +  ciMethod* target = stream()->get_method(will_link);
  1.1530 +  // we have to make sure the argument size (incl. the receiver)
  1.1531 +  // is correct for compilation (the call would fail later during
  1.1532 +  // linkage anyway) - was bug (gri 7/28/99)
  1.1533 +  if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error");
  1.1534 +  ciInstanceKlass* klass = target->holder();
  1.1535 +
  1.1536 +  // check if CHA possible: if so, change the code to invoke_special
  1.1537 +  ciInstanceKlass* calling_klass = method()->holder();
  1.1538 +  ciKlass* holder = stream()->get_declared_method_holder();
  1.1539 +  ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
  1.1540 +  ciInstanceKlass* actual_recv = callee_holder;
  1.1541 +
  1.1542 +  // some methods are obviously bindable without any type checks so
  1.1543 +  // convert them directly to an invokespecial.
  1.1544 +  if (target->is_loaded() && !target->is_abstract() &&
  1.1545 +      target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
  1.1546 +    code = Bytecodes::_invokespecial;
  1.1547 +  }
  1.1548 +
  1.1549 +  // NEEDS_CLEANUP
  1.1550 +  // I've added the target-is_loaded() test below but I don't really understand
  1.1551 +  // how klass->is_loaded() can be true and yet target->is_loaded() is false.
  1.1552 +  // this happened while running the JCK invokevirtual tests under doit.  TKR
  1.1553 +  ciMethod* cha_monomorphic_target = NULL;
  1.1554 +  ciMethod* exact_target = NULL;
  1.1555 +  if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded()) {
  1.1556 +    Value receiver = NULL;
  1.1557 +    ciInstanceKlass* receiver_klass = NULL;
  1.1558 +    bool type_is_exact = false;
  1.1559 +    // try to find a precise receiver type
  1.1560 +    if (will_link && !target->is_static()) {
  1.1561 +      int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
  1.1562 +      receiver = state()->stack_at(index);
  1.1563 +      ciType* type = receiver->exact_type();
  1.1564 +      if (type != NULL && type->is_loaded() &&
  1.1565 +          type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
  1.1566 +        receiver_klass = (ciInstanceKlass*) type;
  1.1567 +        type_is_exact = true;
  1.1568 +      }
  1.1569 +      if (type == NULL) {
  1.1570 +        type = receiver->declared_type();
  1.1571 +        if (type != NULL && type->is_loaded() &&
  1.1572 +            type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
  1.1573 +          receiver_klass = (ciInstanceKlass*) type;
  1.1574 +          if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) {
  1.1575 +            // Insert a dependency on this type since
  1.1576 +            // find_monomorphic_target may assume it's already done.
  1.1577 +            dependency_recorder()->assert_leaf_type(receiver_klass);
  1.1578 +            type_is_exact = true;
  1.1579 +          }
  1.1580 +        }
  1.1581 +      }
  1.1582 +    }
  1.1583 +    if (receiver_klass != NULL && type_is_exact &&
  1.1584 +        receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) {
  1.1585 +      // If we have the exact receiver type we can bind directly to
  1.1586 +      // the method to call.
  1.1587 +      exact_target = target->resolve_invoke(calling_klass, receiver_klass);
  1.1588 +      if (exact_target != NULL) {
  1.1589 +        target = exact_target;
  1.1590 +        code = Bytecodes::_invokespecial;
  1.1591 +      }
  1.1592 +    }
  1.1593 +    if (receiver_klass != NULL &&
  1.1594 +        receiver_klass->is_subtype_of(actual_recv) &&
  1.1595 +        actual_recv->is_initialized()) {
  1.1596 +      actual_recv = receiver_klass;
  1.1597 +    }
  1.1598 +
  1.1599 +    if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
  1.1600 +        (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
  1.1601 +      // Use CHA on the receiver to select a more precise method.
  1.1602 +      cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
  1.1603 +    } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) {
  1.1604 +      // if there is only one implementor of this interface then we
  1.1605 +      // may be able bind this invoke directly to the implementing
  1.1606 +      // klass but we need both a dependence on the single interface
  1.1607 +      // and on the method we bind to.  Additionally since all we know
  1.1608 +      // about the receiver type is the it's supposed to implement the
  1.1609 +      // interface we have to insert a check that it's the class we
  1.1610 +      // expect.  Interface types are not checked by the verifier so
  1.1611 +      // they are roughly equivalent to Object.
  1.1612 +      ciInstanceKlass* singleton = NULL;
  1.1613 +      if (target->holder()->nof_implementors() == 1) {
  1.1614 +        singleton = target->holder()->implementor(0);
  1.1615 +      }
  1.1616 +      if (singleton) {
  1.1617 +        cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
  1.1618 +        if (cha_monomorphic_target != NULL) {
  1.1619 +          // If CHA is able to bind this invoke then update the class
  1.1620 +          // to match that class, otherwise klass will refer to the
  1.1621 +          // interface.
  1.1622 +          klass = cha_monomorphic_target->holder();
  1.1623 +          actual_recv = target->holder();
  1.1624 +
  1.1625 +          // insert a check it's really the expected class.
  1.1626 +          CheckCast* c = new CheckCast(klass, receiver, NULL);
  1.1627 +          c->set_incompatible_class_change_check();
  1.1628 +          c->set_direct_compare(klass->is_final());
  1.1629 +          append_split(c);
  1.1630 +        }
  1.1631 +      }
  1.1632 +    }
  1.1633 +  }
  1.1634 +
  1.1635 +  if (cha_monomorphic_target != NULL) {
  1.1636 +    if (cha_monomorphic_target->is_abstract()) {
  1.1637 +      // Do not optimize for abstract methods
  1.1638 +      cha_monomorphic_target = NULL;
  1.1639 +    }
  1.1640 +  }
  1.1641 +
  1.1642 +  if (cha_monomorphic_target != NULL) {
  1.1643 +    if (!(target->is_final_method())) {
  1.1644 +      // If we inlined because CHA revealed only a single target method,
  1.1645 +      // then we are dependent on that target method not getting overridden
  1.1646 +      // by dynamic class loading.  Be sure to test the "static" receiver
  1.1647 +      // dest_method here, as opposed to the actual receiver, which may
  1.1648 +      // falsely lead us to believe that the receiver is final or private.
  1.1649 +      dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target);
  1.1650 +    }
  1.1651 +    code = Bytecodes::_invokespecial;
  1.1652 +  }
  1.1653 +  // check if we could do inlining
  1.1654 +  if (!PatchALot && Inline && klass->is_loaded() &&
  1.1655 +      (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
  1.1656 +      && target->will_link(klass, callee_holder, code)) {
  1.1657 +    // callee is known => check if we have static binding
  1.1658 +    assert(target->is_loaded(), "callee must be known");
  1.1659 +    if (code == Bytecodes::_invokestatic
  1.1660 +     || code == Bytecodes::_invokespecial
  1.1661 +     || code == Bytecodes::_invokevirtual && target->is_final_method()
  1.1662 +    ) {
  1.1663 +      // static binding => check if callee is ok
  1.1664 +      ciMethod* inline_target = (cha_monomorphic_target != NULL)
  1.1665 +                                  ? cha_monomorphic_target
  1.1666 +                                  : target;
  1.1667 +      bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
  1.1668 +      CHECK_BAILOUT();
  1.1669 +
  1.1670 +#ifndef PRODUCT
  1.1671 +      // printing
  1.1672 +      if (PrintInlining && !res) {
  1.1673 +        // if it was successfully inlined, then it was already printed.
  1.1674 +        print_inline_result(inline_target, res);
  1.1675 +      }
  1.1676 +#endif
  1.1677 +      clear_inline_bailout();
  1.1678 +      if (res) {
  1.1679 +        // Register dependence if JVMTI has either breakpoint
  1.1680 +        // setting or hotswapping of methods capabilities since they may
  1.1681 +        // cause deoptimization.
  1.1682 +        if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
  1.1683 +          dependency_recorder()->assert_evol_method(inline_target);
  1.1684 +        }
  1.1685 +        return;
  1.1686 +      }
  1.1687 +    }
  1.1688 +  }
  1.1689 +  // If we attempted an inline which did not succeed because of a
  1.1690 +  // bailout during construction of the callee graph, the entire
  1.1691 +  // compilation has to be aborted. This is fairly rare and currently
  1.1692 +  // seems to only occur for jasm-generated classes which contain
  1.1693 +  // jsr/ret pairs which are not associated with finally clauses and
  1.1694 +  // do not have exception handlers in the containing method, and are
  1.1695 +  // therefore not caught early enough to abort the inlining without
  1.1696 +  // corrupting the graph. (We currently bail out with a non-empty
  1.1697 +  // stack at a ret in these situations.)
  1.1698 +  CHECK_BAILOUT();
  1.1699 +
  1.1700 +  // inlining not successful => standard invoke
  1.1701 +  bool is_static = code == Bytecodes::_invokestatic;
  1.1702 +  ValueType* result_type = as_ValueType(target->return_type());
  1.1703 +  Values* args = state()->pop_arguments(target->arg_size_no_receiver());
  1.1704 +  Value recv = is_static ? NULL : apop();
  1.1705 +  bool is_loaded = target->is_loaded();
  1.1706 +  int vtable_index = methodOopDesc::invalid_vtable_index;
  1.1707 +
  1.1708 +#ifdef SPARC
  1.1709 +  // Currently only supported on Sparc.
  1.1710 +  // The UseInlineCaches only controls dispatch to invokevirtuals for
  1.1711 +  // loaded classes which we weren't able to statically bind.
  1.1712 +  if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
  1.1713 +      && !target->can_be_statically_bound()) {
  1.1714 +    // Find a vtable index if one is available
  1.1715 +    vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
  1.1716 +  }
  1.1717 +#endif
  1.1718 +
  1.1719 +  if (recv != NULL &&
  1.1720 +      (code == Bytecodes::_invokespecial ||
  1.1721 +       !is_loaded || target->is_final() ||
  1.1722 +       profile_calls())) {
  1.1723 +    // invokespecial always needs a NULL check.  invokevirtual where
  1.1724 +    // the target is final or where it's not known that whether the
  1.1725 +    // target is final requires a NULL check.  Otherwise normal
  1.1726 +    // invokevirtual will perform the null check during the lookup
  1.1727 +    // logic or the unverified entry point.  Profiling of calls
  1.1728 +    // requires that the null check is performed in all cases.
  1.1729 +    null_check(recv);
  1.1730 +  }
  1.1731 +
  1.1732 +  if (profile_calls()) {
  1.1733 +    assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
  1.1734 +    ciKlass* target_klass = NULL;
  1.1735 +    if (cha_monomorphic_target != NULL) {
  1.1736 +      target_klass = cha_monomorphic_target->holder();
  1.1737 +    } else if (exact_target != NULL) {
  1.1738 +      target_klass = exact_target->holder();
  1.1739 +    }
  1.1740 +    profile_call(recv, target_klass);
  1.1741 +  }
  1.1742 +
  1.1743 +  Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target);
  1.1744 +  // push result
  1.1745 +  append_split(result);
  1.1746 +
  1.1747 +  if (result_type != voidType) {
  1.1748 +    if (method()->is_strict()) {
  1.1749 +      push(result_type, round_fp(result));
  1.1750 +    } else {
  1.1751 +      push(result_type, result);
  1.1752 +    }
  1.1753 +  }
  1.1754 +}
  1.1755 +
  1.1756 +
  1.1757 +void GraphBuilder::new_instance(int klass_index) {
  1.1758 +  bool will_link;
  1.1759 +  ciKlass* klass = stream()->get_klass(will_link);
  1.1760 +  assert(klass->is_instance_klass(), "must be an instance klass");
  1.1761 +  NewInstance* new_instance = new NewInstance(klass->as_instance_klass());
  1.1762 +  _memory->new_instance(new_instance);
  1.1763 +  apush(append_split(new_instance));
  1.1764 +}
  1.1765 +
  1.1766 +
  1.1767 +void GraphBuilder::new_type_array() {
  1.1768 +  apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index())));
  1.1769 +}
  1.1770 +
  1.1771 +
  1.1772 +void GraphBuilder::new_object_array() {
  1.1773 +  bool will_link;
  1.1774 +  ciKlass* klass = stream()->get_klass(will_link);
  1.1775 +  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
  1.1776 +  NewArray* n = new NewObjectArray(klass, ipop(), state_before);
  1.1777 +  apush(append_split(n));
  1.1778 +}
  1.1779 +
  1.1780 +
  1.1781 +bool GraphBuilder::direct_compare(ciKlass* k) {
  1.1782 +  if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
  1.1783 +    ciInstanceKlass* ik = k->as_instance_klass();
  1.1784 +    if (ik->is_final()) {
  1.1785 +      return true;
  1.1786 +    } else {
  1.1787 +      if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
  1.1788 +        // test class is leaf class
  1.1789 +        dependency_recorder()->assert_leaf_type(ik);
  1.1790 +        return true;
  1.1791 +      }
  1.1792 +    }
  1.1793 +  }
  1.1794 +  return false;
  1.1795 +}
  1.1796 +
  1.1797 +
  1.1798 +void GraphBuilder::check_cast(int klass_index) {
  1.1799 +  bool will_link;
  1.1800 +  ciKlass* klass = stream()->get_klass(will_link);
  1.1801 +  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
  1.1802 +  CheckCast* c = new CheckCast(klass, apop(), state_before);
  1.1803 +  apush(append_split(c));
  1.1804 +  c->set_direct_compare(direct_compare(klass));
  1.1805 +  if (profile_checkcasts()) {
  1.1806 +    c->set_profiled_method(method());
  1.1807 +    c->set_profiled_bci(bci());
  1.1808 +    c->set_should_profile(true);
  1.1809 +  }
  1.1810 +}
  1.1811 +
  1.1812 +
  1.1813 +void GraphBuilder::instance_of(int klass_index) {
  1.1814 +  bool will_link;
  1.1815 +  ciKlass* klass = stream()->get_klass(will_link);
  1.1816 +  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
  1.1817 +  InstanceOf* i = new InstanceOf(klass, apop(), state_before);
  1.1818 +  ipush(append_split(i));
  1.1819 +  i->set_direct_compare(direct_compare(klass));
  1.1820 +}
  1.1821 +
  1.1822 +
  1.1823 +void GraphBuilder::monitorenter(Value x, int bci) {
  1.1824 +  // save state before locking in case of deoptimization after a NullPointerException
  1.1825 +  ValueStack* lock_stack_before = lock_stack();
  1.1826 +  append_with_bci(new MonitorEnter(x, state()->lock(scope(), x), lock_stack_before), bci);
  1.1827 +  kill_all();
  1.1828 +}
  1.1829 +
  1.1830 +
  1.1831 +void GraphBuilder::monitorexit(Value x, int bci) {
  1.1832 +  // Note: the comment below is only relevant for the case where we do
  1.1833 +  // not deoptimize due to asynchronous exceptions (!(DeoptC1 &&
  1.1834 +  // DeoptOnAsyncException), which is not used anymore)
  1.1835 +
  1.1836 +  // Note: Potentially, the monitor state in an exception handler
  1.1837 +  //       can be wrong due to wrong 'initialization' of the handler
  1.1838 +  //       via a wrong asynchronous exception path. This can happen,
  1.1839 +  //       if the exception handler range for asynchronous exceptions
  1.1840 +  //       is too long (see also java bug 4327029, and comment in
  1.1841 +  //       GraphBuilder::handle_exception()). This may cause 'under-
  1.1842 +  //       flow' of the monitor stack => bailout instead.
  1.1843 +  if (state()->locks_size() < 1) BAILOUT("monitor stack underflow");
  1.1844 +  append_with_bci(new MonitorExit(x, state()->unlock()), bci);
  1.1845 +  kill_all();
  1.1846 +}
  1.1847 +
  1.1848 +
  1.1849 +void GraphBuilder::new_multi_array(int dimensions) {
  1.1850 +  bool will_link;
  1.1851 +  ciKlass* klass = stream()->get_klass(will_link);
  1.1852 +  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
  1.1853 +
  1.1854 +  Values* dims = new Values(dimensions, NULL);
  1.1855 +  // fill in all dimensions
  1.1856 +  int i = dimensions;
  1.1857 +  while (i-- > 0) dims->at_put(i, ipop());
  1.1858 +  // create array
  1.1859 +  NewArray* n = new NewMultiArray(klass, dims, state_before);
  1.1860 +  apush(append_split(n));
  1.1861 +}
  1.1862 +
  1.1863 +
  1.1864 +void GraphBuilder::throw_op(int bci) {
  1.1865 +  // We require that the debug info for a Throw be the "state before"
  1.1866 +  // the Throw (i.e., exception oop is still on TOS)
  1.1867 +  ValueStack* state_before = state()->copy();
  1.1868 +  Throw* t = new Throw(apop(), state_before);
  1.1869 +  append_with_bci(t, bci);
  1.1870 +}
  1.1871 +
  1.1872 +
  1.1873 +Value GraphBuilder::round_fp(Value fp_value) {
  1.1874 +  // no rounding needed if SSE2 is used
  1.1875 +  if (RoundFPResults && UseSSE < 2) {
  1.1876 +    // Must currently insert rounding node for doubleword values that
  1.1877 +    // are results of expressions (i.e., not loads from memory or
  1.1878 +    // constants)
  1.1879 +    if (fp_value->type()->tag() == doubleTag &&
  1.1880 +        fp_value->as_Constant() == NULL &&
  1.1881 +        fp_value->as_Local() == NULL &&       // method parameters need no rounding
  1.1882 +        fp_value->as_RoundFP() == NULL) {
  1.1883 +      return append(new RoundFP(fp_value));
  1.1884 +    }
  1.1885 +  }
  1.1886 +  return fp_value;
  1.1887 +}
  1.1888 +
  1.1889 +
  1.1890 +Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
  1.1891 +  Canonicalizer canon(instr, bci);
  1.1892 +  Instruction* i1 = canon.canonical();
  1.1893 +  if (i1->bci() != -99) {
  1.1894 +    // Canonicalizer returned an instruction which was already
  1.1895 +    // appended so simply return it.
  1.1896 +    return i1;
  1.1897 +  } else if (UseLocalValueNumbering) {
  1.1898 +    // Lookup the instruction in the ValueMap and add it to the map if
  1.1899 +    // it's not found.
  1.1900 +    Instruction* i2 = vmap()->find_insert(i1);
  1.1901 +    if (i2 != i1) {
  1.1902 +      // found an entry in the value map, so just return it.
  1.1903 +      assert(i2->bci() != -1, "should already be linked");
  1.1904 +      return i2;
  1.1905 +    }
  1.1906 +  }
  1.1907 +
  1.1908 +  if (i1->as_Phi() == NULL && i1->as_Local() == NULL) {
  1.1909 +    // i1 was not eliminated => append it
  1.1910 +    assert(i1->next() == NULL, "shouldn't already be linked");
  1.1911 +    _last = _last->set_next(i1, canon.bci());
  1.1912 +    if (++_instruction_count >= InstructionCountCutoff
  1.1913 +        && !bailed_out()) {
  1.1914 +      // set the bailout state but complete normal processing.  We
  1.1915 +      // might do a little more work before noticing the bailout so we
  1.1916 +      // want processing to continue normally until it's noticed.
  1.1917 +      bailout("Method and/or inlining is too large");
  1.1918 +    }
  1.1919 +
  1.1920 +#ifndef PRODUCT
  1.1921 +    if (PrintIRDuringConstruction) {
  1.1922 +      InstructionPrinter ip;
  1.1923 +      ip.print_line(i1);
  1.1924 +      if (Verbose) {
  1.1925 +        state()->print();
  1.1926 +      }
  1.1927 +    }
  1.1928 +#endif
  1.1929 +    assert(_last == i1, "adjust code below");
  1.1930 +    StateSplit* s = i1->as_StateSplit();
  1.1931 +    if (s != NULL && i1->as_BlockEnd() == NULL) {
  1.1932 +      // Continue CSE across certain intrinsics
  1.1933 +      Intrinsic* intrinsic = s->as_Intrinsic();
  1.1934 +      if (UseLocalValueNumbering) {
  1.1935 +        if (intrinsic == NULL || !intrinsic->preserves_state()) {
  1.1936 +          vmap()->kill_all();      // for now, hopefully we need this only for calls eventually
  1.1937 +          }
  1.1938 +      }
  1.1939 +      if (EliminateFieldAccess) {
  1.1940 +        if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
  1.1941 +          _memory->kill();
  1.1942 +        }
  1.1943 +      }
  1.1944 +      s->set_state(state()->copy());
  1.1945 +    }
  1.1946 +    // set up exception handlers for this instruction if necessary
  1.1947 +    if (i1->can_trap()) {
  1.1948 +      assert(exception_state() != NULL || !has_handler(), "must have setup exception state");
  1.1949 +      i1->set_exception_handlers(handle_exception(bci));
  1.1950 +    }
  1.1951 +  }
  1.1952 +  return i1;
  1.1953 +}
  1.1954 +
  1.1955 +
  1.1956 +Instruction* GraphBuilder::append(Instruction* instr) {
  1.1957 +  assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used");
  1.1958 +  return append_with_bci(instr, bci());
  1.1959 +}
  1.1960 +
  1.1961 +
  1.1962 +Instruction* GraphBuilder::append_split(StateSplit* instr) {
  1.1963 +  return append_with_bci(instr, bci());
  1.1964 +}
  1.1965 +
  1.1966 +
  1.1967 +void GraphBuilder::null_check(Value value) {
  1.1968 +  if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) {
  1.1969 +    return;
  1.1970 +  } else {
  1.1971 +    Constant* con = value->as_Constant();
  1.1972 +    if (con) {
  1.1973 +      ObjectType* c = con->type()->as_ObjectType();
  1.1974 +      if (c && c->is_loaded()) {
  1.1975 +        ObjectConstant* oc = c->as_ObjectConstant();
  1.1976 +        if (!oc || !oc->value()->is_null_object()) {
  1.1977 +          return;
  1.1978 +        }
  1.1979 +      }
  1.1980 +    }
  1.1981 +  }
  1.1982 +  append(new NullCheck(value, lock_stack()));
  1.1983 +}
  1.1984 +
  1.1985 +
  1.1986 +
  1.1987 +XHandlers* GraphBuilder::handle_exception(int cur_bci) {
  1.1988 +  // fast path if it is guaranteed that no exception handlers are present
  1.1989 +  if (!has_handler()) {
  1.1990 +    // TODO: check if return NULL is possible (avoids empty lists)
  1.1991 +    return new XHandlers();
  1.1992 +  }
  1.1993 +
  1.1994 +  XHandlers*  exception_handlers = new XHandlers();
  1.1995 +  ScopeData*  cur_scope_data = scope_data();
  1.1996 +  ValueStack* s = exception_state();
  1.1997 +  int scope_count = 0;
  1.1998 +
  1.1999 +  assert(s != NULL, "exception state must be set");
  1.2000 +  do {
  1.2001 +    assert(cur_scope_data->scope() == s->scope(), "scopes do not match");
  1.2002 +    assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
  1.2003 +
  1.2004 +    // join with all potential exception handlers
  1.2005 +    XHandlers* list = cur_scope_data->xhandlers();
  1.2006 +    const int n = list->length();
  1.2007 +    for (int i = 0; i < n; i++) {
  1.2008 +      XHandler* h = list->handler_at(i);
  1.2009 +      if (h->covers(cur_bci)) {
  1.2010 +        // h is a potential exception handler => join it
  1.2011 +        compilation()->set_has_exception_handlers(true);
  1.2012 +
  1.2013 +        BlockBegin* entry = h->entry_block();
  1.2014 +        if (entry == block()) {
  1.2015 +          // It's acceptable for an exception handler to cover itself
  1.2016 +          // but we don't handle that in the parser currently.  It's
  1.2017 +          // very rare so we bailout instead of trying to handle it.
  1.2018 +          BAILOUT_("exception handler covers itself", exception_handlers);
  1.2019 +        }
  1.2020 +        assert(entry->bci() == h->handler_bci(), "must match");
  1.2021 +        assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
  1.2022 +
  1.2023 +        // previously this was a BAILOUT, but this is not necessary
  1.2024 +        // now because asynchronous exceptions are not handled this way.
  1.2025 +        assert(entry->state() == NULL || s->locks_size() == entry->state()->locks_size(), "locks do not match");
  1.2026 +
  1.2027 +        // xhandler start with an empty expression stack
  1.2028 +        s->truncate_stack(cur_scope_data->caller_stack_size());
  1.2029 +
  1.2030 +        // Note: Usually this join must work. However, very
  1.2031 +        // complicated jsr-ret structures where we don't ret from
  1.2032 +        // the subroutine can cause the objects on the monitor
  1.2033 +        // stacks to not match because blocks can be parsed twice.
  1.2034 +        // The only test case we've seen so far which exhibits this
  1.2035 +        // problem is caught by the infinite recursion test in
  1.2036 +        // GraphBuilder::jsr() if the join doesn't work.
  1.2037 +        if (!entry->try_merge(s)) {
  1.2038 +          BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
  1.2039 +        }
  1.2040 +
  1.2041 +        // add current state for correct handling of phi functions at begin of xhandler
  1.2042 +        int phi_operand = entry->add_exception_state(s);
  1.2043 +
  1.2044 +        // add entry to the list of xhandlers of this block
  1.2045 +        _block->add_exception_handler(entry);
  1.2046 +
  1.2047 +        // add back-edge from xhandler entry to this block
  1.2048 +        if (!entry->is_predecessor(_block)) {
  1.2049 +          entry->add_predecessor(_block);
  1.2050 +        }
  1.2051 +
  1.2052 +        // clone XHandler because phi_operand and scope_count can not be shared
  1.2053 +        XHandler* new_xhandler = new XHandler(h);
  1.2054 +        new_xhandler->set_phi_operand(phi_operand);
  1.2055 +        new_xhandler->set_scope_count(scope_count);
  1.2056 +        exception_handlers->append(new_xhandler);
  1.2057 +
  1.2058 +        // fill in exception handler subgraph lazily
  1.2059 +        assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet");
  1.2060 +        cur_scope_data->add_to_work_list(entry);
  1.2061 +
  1.2062 +        // stop when reaching catchall
  1.2063 +        if (h->catch_type() == 0) {
  1.2064 +          return exception_handlers;
  1.2065 +        }
  1.2066 +      }
  1.2067 +    }
  1.2068 +
  1.2069 +    // Set up iteration for next time.
  1.2070 +    // If parsing a jsr, do not grab exception handlers from the
  1.2071 +    // parent scopes for this method (already got them, and they
  1.2072 +    // needed to be cloned)
  1.2073 +    if (cur_scope_data->parsing_jsr()) {
  1.2074 +      IRScope* tmp_scope = cur_scope_data->scope();
  1.2075 +      while (cur_scope_data->parent() != NULL &&
  1.2076 +             cur_scope_data->parent()->scope() == tmp_scope) {
  1.2077 +        cur_scope_data = cur_scope_data->parent();
  1.2078 +      }
  1.2079 +    }
  1.2080 +    if (cur_scope_data != NULL) {
  1.2081 +      if (cur_scope_data->parent() != NULL) {
  1.2082 +        // must use pop_scope instead of caller_state to preserve all monitors
  1.2083 +        s = s->pop_scope();
  1.2084 +      }
  1.2085 +      cur_bci = cur_scope_data->scope()->caller_bci();
  1.2086 +      cur_scope_data = cur_scope_data->parent();
  1.2087 +      scope_count++;
  1.2088 +    }
  1.2089 +  } while (cur_scope_data != NULL);
  1.2090 +
  1.2091 +  return exception_handlers;
  1.2092 +}
  1.2093 +
  1.2094 +
  1.2095 +// Helper class for simplifying Phis.
  1.2096 +class PhiSimplifier : public BlockClosure {
  1.2097 + private:
  1.2098 +  bool _has_substitutions;
  1.2099 +  Value simplify(Value v);
  1.2100 +
  1.2101 + public:
  1.2102 +  PhiSimplifier(BlockBegin* start) : _has_substitutions(false) {
  1.2103 +    start->iterate_preorder(this);
  1.2104 +    if (_has_substitutions) {
  1.2105 +      SubstitutionResolver sr(start);
  1.2106 +    }
  1.2107 +  }
  1.2108 +  void block_do(BlockBegin* b);
  1.2109 +  bool has_substitutions() const { return _has_substitutions; }
  1.2110 +};
  1.2111 +
  1.2112 +
  1.2113 +Value PhiSimplifier::simplify(Value v) {
  1.2114 +  Phi* phi = v->as_Phi();
  1.2115 +
  1.2116 +  if (phi == NULL) {
  1.2117 +    // no phi function
  1.2118 +    return v;
  1.2119 +  } else if (v->has_subst()) {
  1.2120 +    // already substituted; subst can be phi itself -> simplify
  1.2121 +    return simplify(v->subst());
  1.2122 +  } else if (phi->is_set(Phi::cannot_simplify)) {
  1.2123 +    // already tried to simplify phi before
  1.2124 +    return phi;
  1.2125 +  } else if (phi->is_set(Phi::visited)) {
  1.2126 +    // break cycles in phi functions
  1.2127 +    return phi;
  1.2128 +  } else if (phi->type()->is_illegal()) {
  1.2129 +    // illegal phi functions are ignored anyway
  1.2130 +    return phi;
  1.2131 +
  1.2132 +  } else {
  1.2133 +    // mark phi function as processed to break cycles in phi functions
  1.2134 +    phi->set(Phi::visited);
  1.2135 +
  1.2136 +    // simplify x = [y, x] and x = [y, y] to y
  1.2137 +    Value subst = NULL;
  1.2138 +    int opd_count = phi->operand_count();
  1.2139 +    for (int i = 0; i < opd_count; i++) {
  1.2140 +      Value opd = phi->operand_at(i);
  1.2141 +      assert(opd != NULL, "Operand must exist!");
  1.2142 +
  1.2143 +      if (opd->type()->is_illegal()) {
  1.2144 +        // if one operand is illegal, the entire phi function is illegal
  1.2145 +        phi->make_illegal();
  1.2146 +        phi->clear(Phi::visited);
  1.2147 +        return phi;
  1.2148 +      }
  1.2149 +
  1.2150 +      Value new_opd = simplify(opd);
  1.2151 +      assert(new_opd != NULL, "Simplified operand must exist!");
  1.2152 +
  1.2153 +      if (new_opd != phi && new_opd != subst) {
  1.2154 +        if (subst == NULL) {
  1.2155 +          subst = new_opd;
  1.2156 +        } else {
  1.2157 +          // no simplification possible
  1.2158 +          phi->set(Phi::cannot_simplify);
  1.2159 +          phi->clear(Phi::visited);
  1.2160 +          return phi;
  1.2161 +        }
  1.2162 +      }
  1.2163 +    }
  1.2164 +
  1.2165 +    // sucessfully simplified phi function
  1.2166 +    assert(subst != NULL, "illegal phi function");
  1.2167 +    _has_substitutions = true;
  1.2168 +    phi->clear(Phi::visited);
  1.2169 +    phi->set_subst(subst);
  1.2170 +
  1.2171 +#ifndef PRODUCT
  1.2172 +    if (PrintPhiFunctions) {
  1.2173 +      tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
  1.2174 +    }
  1.2175 +#endif
  1.2176 +
  1.2177 +    return subst;
  1.2178 +  }
  1.2179 +}
  1.2180 +
  1.2181 +
  1.2182 +void PhiSimplifier::block_do(BlockBegin* b) {
  1.2183 +  for_each_phi_fun(b, phi,
  1.2184 +    simplify(phi);
  1.2185 +  );
  1.2186 +
  1.2187 +#ifdef ASSERT
  1.2188 +  for_each_phi_fun(b, phi,
  1.2189 +                   assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
  1.2190 +  );
  1.2191 +
  1.2192 +  ValueStack* state = b->state()->caller_state();
  1.2193 +  int index;
  1.2194 +  Value value;
  1.2195 +  for_each_state(state) {
  1.2196 +    for_each_local_value(state, index, value) {
  1.2197 +      Phi* phi = value->as_Phi();
  1.2198 +      assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
  1.2199 +    }
  1.2200 +  }
  1.2201 +#endif
  1.2202 +}
  1.2203 +
  1.2204 +// This method is called after all blocks are filled with HIR instructions
  1.2205 +// It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
  1.2206 +void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) {
  1.2207 +  PhiSimplifier simplifier(start);
  1.2208 +}
  1.2209 +
  1.2210 +
  1.2211 +void GraphBuilder::connect_to_end(BlockBegin* beg) {
  1.2212 +  // setup iteration
  1.2213 +  kill_all();
  1.2214 +  _block = beg;
  1.2215 +  _state = beg->state()->copy();
  1.2216 +  _last  = beg;
  1.2217 +  iterate_bytecodes_for_block(beg->bci());
  1.2218 +}
  1.2219 +
  1.2220 +
  1.2221 +BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
  1.2222 +#ifndef PRODUCT
  1.2223 +  if (PrintIRDuringConstruction) {
  1.2224 +    tty->cr();
  1.2225 +    InstructionPrinter ip;
  1.2226 +    ip.print_instr(_block); tty->cr();
  1.2227 +    ip.print_stack(_block->state()); tty->cr();
  1.2228 +    ip.print_inline_level(_block);
  1.2229 +    ip.print_head();
  1.2230 +    tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size());
  1.2231 +  }
  1.2232 +#endif
  1.2233 +  _skip_block = false;
  1.2234 +  assert(state() != NULL, "ValueStack missing!");
  1.2235 +  ciBytecodeStream s(method());
  1.2236 +  s.reset_to_bci(bci);
  1.2237 +  int prev_bci = bci;
  1.2238 +  scope_data()->set_stream(&s);
  1.2239 +  // iterate
  1.2240 +  Bytecodes::Code code = Bytecodes::_illegal;
  1.2241 +  bool push_exception = false;
  1.2242 +
  1.2243 +  if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) {
  1.2244 +    // first thing in the exception entry block should be the exception object.
  1.2245 +    push_exception = true;
  1.2246 +  }
  1.2247 +
  1.2248 +  while (!bailed_out() && last()->as_BlockEnd() == NULL &&
  1.2249 +         (code = stream()->next()) != ciBytecodeStream::EOBC() &&
  1.2250 +         (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
  1.2251 +
  1.2252 +    if (has_handler() && can_trap(method(), code)) {
  1.2253 +      // copy the state because it is modified before handle_exception is called
  1.2254 +      set_exception_state(state()->copy());
  1.2255 +    } else {
  1.2256 +      // handle_exception is not called for this bytecode
  1.2257 +      set_exception_state(NULL);
  1.2258 +    }
  1.2259 +
  1.2260 +    // Check for active jsr during OSR compilation
  1.2261 +    if (compilation()->is_osr_compile()
  1.2262 +        && scope()->is_top_scope()
  1.2263 +        && parsing_jsr()
  1.2264 +        && s.cur_bci() == compilation()->osr_bci()) {
  1.2265 +      bailout("OSR not supported while a jsr is active");
  1.2266 +    }
  1.2267 +
  1.2268 +    if (push_exception) {
  1.2269 +      apush(append(new ExceptionObject()));
  1.2270 +      push_exception = false;
  1.2271 +    }
  1.2272 +
  1.2273 +    // handle bytecode
  1.2274 +    switch (code) {
  1.2275 +      case Bytecodes::_nop            : /* nothing to do */ break;
  1.2276 +      case Bytecodes::_aconst_null    : apush(append(new Constant(objectNull            ))); break;
  1.2277 +      case Bytecodes::_iconst_m1      : ipush(append(new Constant(new IntConstant   (-1)))); break;
  1.2278 +      case Bytecodes::_iconst_0       : ipush(append(new Constant(intZero               ))); break;
  1.2279 +      case Bytecodes::_iconst_1       : ipush(append(new Constant(intOne                ))); break;
  1.2280 +      case Bytecodes::_iconst_2       : ipush(append(new Constant(new IntConstant   ( 2)))); break;
  1.2281 +      case Bytecodes::_iconst_3       : ipush(append(new Constant(new IntConstant   ( 3)))); break;
  1.2282 +      case Bytecodes::_iconst_4       : ipush(append(new Constant(new IntConstant   ( 4)))); break;
  1.2283 +      case Bytecodes::_iconst_5       : ipush(append(new Constant(new IntConstant   ( 5)))); break;
  1.2284 +      case Bytecodes::_lconst_0       : lpush(append(new Constant(new LongConstant  ( 0)))); break;
  1.2285 +      case Bytecodes::_lconst_1       : lpush(append(new Constant(new LongConstant  ( 1)))); break;
  1.2286 +      case Bytecodes::_fconst_0       : fpush(append(new Constant(new FloatConstant ( 0)))); break;
  1.2287 +      case Bytecodes::_fconst_1       : fpush(append(new Constant(new FloatConstant ( 1)))); break;
  1.2288 +      case Bytecodes::_fconst_2       : fpush(append(new Constant(new FloatConstant ( 2)))); break;
  1.2289 +      case Bytecodes::_dconst_0       : dpush(append(new Constant(new DoubleConstant( 0)))); break;
  1.2290 +      case Bytecodes::_dconst_1       : dpush(append(new Constant(new DoubleConstant( 1)))); break;
  1.2291 +      case Bytecodes::_bipush         : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
  1.2292 +      case Bytecodes::_sipush         : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
  1.2293 +      case Bytecodes::_ldc            : // fall through
  1.2294 +      case Bytecodes::_ldc_w          : // fall through
  1.2295 +      case Bytecodes::_ldc2_w         : load_constant(); break;
  1.2296 +      case Bytecodes::_iload          : load_local(intType     , s.get_index()); break;
  1.2297 +      case Bytecodes::_lload          : load_local(longType    , s.get_index()); break;
  1.2298 +      case Bytecodes::_fload          : load_local(floatType   , s.get_index()); break;
  1.2299 +      case Bytecodes::_dload          : load_local(doubleType  , s.get_index()); break;
  1.2300 +      case Bytecodes::_aload          : load_local(instanceType, s.get_index()); break;
  1.2301 +      case Bytecodes::_iload_0        : load_local(intType   , 0); break;
  1.2302 +      case Bytecodes::_iload_1        : load_local(intType   , 1); break;
  1.2303 +      case Bytecodes::_iload_2        : load_local(intType   , 2); break;
  1.2304 +      case Bytecodes::_iload_3        : load_local(intType   , 3); break;
  1.2305 +      case Bytecodes::_lload_0        : load_local(longType  , 0); break;
  1.2306 +      case Bytecodes::_lload_1        : load_local(longType  , 1); break;
  1.2307 +      case Bytecodes::_lload_2        : load_local(longType  , 2); break;
  1.2308 +      case Bytecodes::_lload_3        : load_local(longType  , 3); break;
  1.2309 +      case Bytecodes::_fload_0        : load_local(floatType , 0); break;
  1.2310 +      case Bytecodes::_fload_1        : load_local(floatType , 1); break;
  1.2311 +      case Bytecodes::_fload_2        : load_local(floatType , 2); break;
  1.2312 +      case Bytecodes::_fload_3        : load_local(floatType , 3); break;
  1.2313 +      case Bytecodes::_dload_0        : load_local(doubleType, 0); break;
  1.2314 +      case Bytecodes::_dload_1        : load_local(doubleType, 1); break;
  1.2315 +      case Bytecodes::_dload_2        : load_local(doubleType, 2); break;
  1.2316 +      case Bytecodes::_dload_3        : load_local(doubleType, 3); break;
  1.2317 +      case Bytecodes::_aload_0        : load_local(objectType, 0); break;
  1.2318 +      case Bytecodes::_aload_1        : load_local(objectType, 1); break;
  1.2319 +      case Bytecodes::_aload_2        : load_local(objectType, 2); break;
  1.2320 +      case Bytecodes::_aload_3        : load_local(objectType, 3); break;
  1.2321 +      case Bytecodes::_iaload         : load_indexed(T_INT   ); break;
  1.2322 +      case Bytecodes::_laload         : load_indexed(T_LONG  ); break;
  1.2323 +      case Bytecodes::_faload         : load_indexed(T_FLOAT ); break;
  1.2324 +      case Bytecodes::_daload         : load_indexed(T_DOUBLE); break;
  1.2325 +      case Bytecodes::_aaload         : load_indexed(T_OBJECT); break;
  1.2326 +      case Bytecodes::_baload         : load_indexed(T_BYTE  ); break;
  1.2327 +      case Bytecodes::_caload         : load_indexed(T_CHAR  ); break;
  1.2328 +      case Bytecodes::_saload         : load_indexed(T_SHORT ); break;
  1.2329 +      case Bytecodes::_istore         : store_local(intType   , s.get_index()); break;
  1.2330 +      case Bytecodes::_lstore         : store_local(longType  , s.get_index()); break;
  1.2331 +      case Bytecodes::_fstore         : store_local(floatType , s.get_index()); break;
  1.2332 +      case Bytecodes::_dstore         : store_local(doubleType, s.get_index()); break;
  1.2333 +      case Bytecodes::_astore         : store_local(objectType, s.get_index()); break;
  1.2334 +      case Bytecodes::_istore_0       : store_local(intType   , 0); break;
  1.2335 +      case Bytecodes::_istore_1       : store_local(intType   , 1); break;
  1.2336 +      case Bytecodes::_istore_2       : store_local(intType   , 2); break;
  1.2337 +      case Bytecodes::_istore_3       : store_local(intType   , 3); break;
  1.2338 +      case Bytecodes::_lstore_0       : store_local(longType  , 0); break;
  1.2339 +      case Bytecodes::_lstore_1       : store_local(longType  , 1); break;
  1.2340 +      case Bytecodes::_lstore_2       : store_local(longType  , 2); break;
  1.2341 +      case Bytecodes::_lstore_3       : store_local(longType  , 3); break;
  1.2342 +      case Bytecodes::_fstore_0       : store_local(floatType , 0); break;
  1.2343 +      case Bytecodes::_fstore_1       : store_local(floatType , 1); break;
  1.2344 +      case Bytecodes::_fstore_2       : store_local(floatType , 2); break;
  1.2345 +      case Bytecodes::_fstore_3       : store_local(floatType , 3); break;
  1.2346 +      case Bytecodes::_dstore_0       : store_local(doubleType, 0); break;
  1.2347 +      case Bytecodes::_dstore_1       : store_local(doubleType, 1); break;
  1.2348 +      case Bytecodes::_dstore_2       : store_local(doubleType, 2); break;
  1.2349 +      case Bytecodes::_dstore_3       : store_local(doubleType, 3); break;
  1.2350 +      case Bytecodes::_astore_0       : store_local(objectType, 0); break;
  1.2351 +      case Bytecodes::_astore_1       : store_local(objectType, 1); break;
  1.2352 +      case Bytecodes::_astore_2       : store_local(objectType, 2); break;
  1.2353 +      case Bytecodes::_astore_3       : store_local(objectType, 3); break;
  1.2354 +      case Bytecodes::_iastore        : store_indexed(T_INT   ); break;
  1.2355 +      case Bytecodes::_lastore        : store_indexed(T_LONG  ); break;
  1.2356 +      case Bytecodes::_fastore        : store_indexed(T_FLOAT ); break;
  1.2357 +      case Bytecodes::_dastore        : store_indexed(T_DOUBLE); break;
  1.2358 +      case Bytecodes::_aastore        : store_indexed(T_OBJECT); break;
  1.2359 +      case Bytecodes::_bastore        : store_indexed(T_BYTE  ); break;
  1.2360 +      case Bytecodes::_castore        : store_indexed(T_CHAR  ); break;
  1.2361 +      case Bytecodes::_sastore        : store_indexed(T_SHORT ); break;
  1.2362 +      case Bytecodes::_pop            : // fall through
  1.2363 +      case Bytecodes::_pop2           : // fall through
  1.2364 +      case Bytecodes::_dup            : // fall through
  1.2365 +      case Bytecodes::_dup_x1         : // fall through
  1.2366 +      case Bytecodes::_dup_x2         : // fall through
  1.2367 +      case Bytecodes::_dup2           : // fall through
  1.2368 +      case Bytecodes::_dup2_x1        : // fall through
  1.2369 +      case Bytecodes::_dup2_x2        : // fall through
  1.2370 +      case Bytecodes::_swap           : stack_op(code); break;
  1.2371 +      case Bytecodes::_iadd           : arithmetic_op(intType   , code); break;
  1.2372 +      case Bytecodes::_ladd           : arithmetic_op(longType  , code); break;
  1.2373 +      case Bytecodes::_fadd           : arithmetic_op(floatType , code); break;
  1.2374 +      case Bytecodes::_dadd           : arithmetic_op(doubleType, code); break;
  1.2375 +      case Bytecodes::_isub           : arithmetic_op(intType   , code); break;
  1.2376 +      case Bytecodes::_lsub           : arithmetic_op(longType  , code); break;
  1.2377 +      case Bytecodes::_fsub           : arithmetic_op(floatType , code); break;
  1.2378 +      case Bytecodes::_dsub           : arithmetic_op(doubleType, code); break;
  1.2379 +      case Bytecodes::_imul           : arithmetic_op(intType   , code); break;
  1.2380 +      case Bytecodes::_lmul           : arithmetic_op(longType  , code); break;
  1.2381 +      case Bytecodes::_fmul           : arithmetic_op(floatType , code); break;
  1.2382 +      case Bytecodes::_dmul           : arithmetic_op(doubleType, code); break;
  1.2383 +      case Bytecodes::_idiv           : arithmetic_op(intType   , code, lock_stack()); break;
  1.2384 +      case Bytecodes::_ldiv           : arithmetic_op(longType  , code, lock_stack()); break;
  1.2385 +      case Bytecodes::_fdiv           : arithmetic_op(floatType , code); break;
  1.2386 +      case Bytecodes::_ddiv           : arithmetic_op(doubleType, code); break;
  1.2387 +      case Bytecodes::_irem           : arithmetic_op(intType   , code, lock_stack()); break;
  1.2388 +      case Bytecodes::_lrem           : arithmetic_op(longType  , code, lock_stack()); break;
  1.2389 +      case Bytecodes::_frem           : arithmetic_op(floatType , code); break;
  1.2390 +      case Bytecodes::_drem           : arithmetic_op(doubleType, code); break;
  1.2391 +      case Bytecodes::_ineg           : negate_op(intType   ); break;
  1.2392 +      case Bytecodes::_lneg           : negate_op(longType  ); break;
  1.2393 +      case Bytecodes::_fneg           : negate_op(floatType ); break;
  1.2394 +      case Bytecodes::_dneg           : negate_op(doubleType); break;
  1.2395 +      case Bytecodes::_ishl           : shift_op(intType , code); break;
  1.2396 +      case Bytecodes::_lshl           : shift_op(longType, code); break;
  1.2397 +      case Bytecodes::_ishr           : shift_op(intType , code); break;
  1.2398 +      case Bytecodes::_lshr           : shift_op(longType, code); break;
  1.2399 +      case Bytecodes::_iushr          : shift_op(intType , code); break;
  1.2400 +      case Bytecodes::_lushr          : shift_op(longType, code); break;
  1.2401 +      case Bytecodes::_iand           : logic_op(intType , code); break;
  1.2402 +      case Bytecodes::_land           : logic_op(longType, code); break;
  1.2403 +      case Bytecodes::_ior            : logic_op(intType , code); break;
  1.2404 +      case Bytecodes::_lor            : logic_op(longType, code); break;
  1.2405 +      case Bytecodes::_ixor           : logic_op(intType , code); break;
  1.2406 +      case Bytecodes::_lxor           : logic_op(longType, code); break;
  1.2407 +      case Bytecodes::_iinc           : increment(); break;
  1.2408 +      case Bytecodes::_i2l            : convert(code, T_INT   , T_LONG  ); break;
  1.2409 +      case Bytecodes::_i2f            : convert(code, T_INT   , T_FLOAT ); break;
  1.2410 +      case Bytecodes::_i2d            : convert(code, T_INT   , T_DOUBLE); break;
  1.2411 +      case Bytecodes::_l2i            : convert(code, T_LONG  , T_INT   ); break;
  1.2412 +      case Bytecodes::_l2f            : convert(code, T_LONG  , T_FLOAT ); break;
  1.2413 +      case Bytecodes::_l2d            : convert(code, T_LONG  , T_DOUBLE); break;
  1.2414 +      case Bytecodes::_f2i            : convert(code, T_FLOAT , T_INT   ); break;
  1.2415 +      case Bytecodes::_f2l            : convert(code, T_FLOAT , T_LONG  ); break;
  1.2416 +      case Bytecodes::_f2d            : convert(code, T_FLOAT , T_DOUBLE); break;
  1.2417 +      case Bytecodes::_d2i            : convert(code, T_DOUBLE, T_INT   ); break;
  1.2418 +      case Bytecodes::_d2l            : convert(code, T_DOUBLE, T_LONG  ); break;
  1.2419 +      case Bytecodes::_d2f            : convert(code, T_DOUBLE, T_FLOAT ); break;
  1.2420 +      case Bytecodes::_i2b            : convert(code, T_INT   , T_BYTE  ); break;
  1.2421 +      case Bytecodes::_i2c            : convert(code, T_INT   , T_CHAR  ); break;
  1.2422 +      case Bytecodes::_i2s            : convert(code, T_INT   , T_SHORT ); break;
  1.2423 +      case Bytecodes::_lcmp           : compare_op(longType  , code); break;
  1.2424 +      case Bytecodes::_fcmpl          : compare_op(floatType , code); break;
  1.2425 +      case Bytecodes::_fcmpg          : compare_op(floatType , code); break;
  1.2426 +      case Bytecodes::_dcmpl          : compare_op(doubleType, code); break;
  1.2427 +      case Bytecodes::_dcmpg          : compare_op(doubleType, code); break;
  1.2428 +      case Bytecodes::_ifeq           : if_zero(intType   , If::eql); break;
  1.2429 +      case Bytecodes::_ifne           : if_zero(intType   , If::neq); break;
  1.2430 +      case Bytecodes::_iflt           : if_zero(intType   , If::lss); break;
  1.2431 +      case Bytecodes::_ifge           : if_zero(intType   , If::geq); break;
  1.2432 +      case Bytecodes::_ifgt           : if_zero(intType   , If::gtr); break;
  1.2433 +      case Bytecodes::_ifle           : if_zero(intType   , If::leq); break;
  1.2434 +      case Bytecodes::_if_icmpeq      : if_same(intType   , If::eql); break;
  1.2435 +      case Bytecodes::_if_icmpne      : if_same(intType   , If::neq); break;
  1.2436 +      case Bytecodes::_if_icmplt      : if_same(intType   , If::lss); break;
  1.2437 +      case Bytecodes::_if_icmpge      : if_same(intType   , If::geq); break;
  1.2438 +      case Bytecodes::_if_icmpgt      : if_same(intType   , If::gtr); break;
  1.2439 +      case Bytecodes::_if_icmple      : if_same(intType   , If::leq); break;
  1.2440 +      case Bytecodes::_if_acmpeq      : if_same(objectType, If::eql); break;
  1.2441 +      case Bytecodes::_if_acmpne      : if_same(objectType, If::neq); break;
  1.2442 +      case Bytecodes::_goto           : _goto(s.cur_bci(), s.get_dest()); break;
  1.2443 +      case Bytecodes::_jsr            : jsr(s.get_dest()); break;
  1.2444 +      case Bytecodes::_ret            : ret(s.get_index()); break;
  1.2445 +      case Bytecodes::_tableswitch    : table_switch(); break;
  1.2446 +      case Bytecodes::_lookupswitch   : lookup_switch(); break;
  1.2447 +      case Bytecodes::_ireturn        : method_return(ipop()); break;
  1.2448 +      case Bytecodes::_lreturn        : method_return(lpop()); break;
  1.2449 +      case Bytecodes::_freturn        : method_return(fpop()); break;
  1.2450 +      case Bytecodes::_dreturn        : method_return(dpop()); break;
  1.2451 +      case Bytecodes::_areturn        : method_return(apop()); break;
  1.2452 +      case Bytecodes::_return         : method_return(NULL  ); break;
  1.2453 +      case Bytecodes::_getstatic      : // fall through
  1.2454 +      case Bytecodes::_putstatic      : // fall through
  1.2455 +      case Bytecodes::_getfield       : // fall through
  1.2456 +      case Bytecodes::_putfield       : access_field(code); break;
  1.2457 +      case Bytecodes::_invokevirtual  : // fall through
  1.2458 +      case Bytecodes::_invokespecial  : // fall through
  1.2459 +      case Bytecodes::_invokestatic   : // fall through
  1.2460 +      case Bytecodes::_invokeinterface: invoke(code); break;
  1.2461 +      case Bytecodes::_xxxunusedxxx   : ShouldNotReachHere(); break;
  1.2462 +      case Bytecodes::_new            : new_instance(s.get_index_big()); break;
  1.2463 +      case Bytecodes::_newarray       : new_type_array(); break;
  1.2464 +      case Bytecodes::_anewarray      : new_object_array(); break;
  1.2465 +      case Bytecodes::_arraylength    : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
  1.2466 +      case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
  1.2467 +      case Bytecodes::_checkcast      : check_cast(s.get_index_big()); break;
  1.2468 +      case Bytecodes::_instanceof     : instance_of(s.get_index_big()); break;
  1.2469 +      // Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
  1.2470 +      case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
  1.2471 +      case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
  1.2472 +      case Bytecodes::_wide           : ShouldNotReachHere(); break;
  1.2473 +      case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break;
  1.2474 +      case Bytecodes::_ifnull         : if_null(objectType, If::eql); break;
  1.2475 +      case Bytecodes::_ifnonnull      : if_null(objectType, If::neq); break;
  1.2476 +      case Bytecodes::_goto_w         : _goto(s.cur_bci(), s.get_far_dest()); break;
  1.2477 +      case Bytecodes::_jsr_w          : jsr(s.get_far_dest()); break;
  1.2478 +      case Bytecodes::_breakpoint     : BAILOUT_("concurrent setting of breakpoint", NULL);
  1.2479 +      default                         : ShouldNotReachHere(); break;
  1.2480 +    }
  1.2481 +    // save current bci to setup Goto at the end
  1.2482 +    prev_bci = s.cur_bci();
  1.2483 +  }
  1.2484 +  CHECK_BAILOUT_(NULL);
  1.2485 +  // stop processing of this block (see try_inline_full)
  1.2486 +  if (_skip_block) {
  1.2487 +    _skip_block = false;
  1.2488 +    assert(_last && _last->as_BlockEnd(), "");
  1.2489 +    return _last->as_BlockEnd();
  1.2490 +  }
  1.2491 +  // if there are any, check if last instruction is a BlockEnd instruction
  1.2492 +  BlockEnd* end = last()->as_BlockEnd();
  1.2493 +  if (end == NULL) {
  1.2494 +    // all blocks must end with a BlockEnd instruction => add a Goto
  1.2495 +    end = new Goto(block_at(s.cur_bci()), false);
  1.2496 +    _last = _last->set_next(end, prev_bci);
  1.2497 +  }
  1.2498 +  assert(end == last()->as_BlockEnd(), "inconsistency");
  1.2499 +
  1.2500 +  // if the method terminates, we don't need the stack anymore
  1.2501 +  if (end->as_Return() != NULL) {
  1.2502 +    state()->clear_stack();
  1.2503 +  } else if (end->as_Throw() != NULL) {
  1.2504 +    // May have exception handler in caller scopes
  1.2505 +    state()->truncate_stack(scope()->lock_stack_size());
  1.2506 +  }
  1.2507 +
  1.2508 +  // connect to begin & set state
  1.2509 +  // NOTE that inlining may have changed the block we are parsing
  1.2510 +  block()->set_end(end);
  1.2511 +  end->set_state(state());
  1.2512 +  // propagate state
  1.2513 +  for (int i = end->number_of_sux() - 1; i >= 0; i--) {
  1.2514 +    BlockBegin* sux = end->sux_at(i);
  1.2515 +    assert(sux->is_predecessor(block()), "predecessor missing");
  1.2516 +    // be careful, bailout if bytecodes are strange
  1.2517 +    if (!sux->try_merge(state())) BAILOUT_("block join failed", NULL);
  1.2518 +    scope_data()->add_to_work_list(end->sux_at(i));
  1.2519 +  }
  1.2520 +
  1.2521 +  scope_data()->set_stream(NULL);
  1.2522 +
  1.2523 +  // done
  1.2524 +  return end;
  1.2525 +}
  1.2526 +
  1.2527 +
  1.2528 +void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) {
  1.2529 +  do {
  1.2530 +    if (start_in_current_block_for_inlining && !bailed_out()) {
  1.2531 +      iterate_bytecodes_for_block(0);
  1.2532 +      start_in_current_block_for_inlining = false;
  1.2533 +    } else {
  1.2534 +      BlockBegin* b;
  1.2535 +      while ((b = scope_data()->remove_from_work_list()) != NULL) {
  1.2536 +        if (!b->is_set(BlockBegin::was_visited_flag)) {
  1.2537 +          if (b->is_set(BlockBegin::osr_entry_flag)) {
  1.2538 +            // we're about to parse the osr entry block, so make sure
  1.2539 +            // we setup the OSR edge leading into this block so that
  1.2540 +            // Phis get setup correctly.
  1.2541 +            setup_osr_entry_block();
  1.2542 +            // this is no longer the osr entry block, so clear it.
  1.2543 +            b->clear(BlockBegin::osr_entry_flag);
  1.2544 +          }
  1.2545 +          b->set(BlockBegin::was_visited_flag);
  1.2546 +          connect_to_end(b);
  1.2547 +        }
  1.2548 +      }
  1.2549 +    }
  1.2550 +  } while (!bailed_out() && !scope_data()->is_work_list_empty());
  1.2551 +}
  1.2552 +
  1.2553 +
  1.2554 +bool GraphBuilder::_is_initialized = false;
  1.2555 +bool GraphBuilder::_can_trap      [Bytecodes::number_of_java_codes];
  1.2556 +bool GraphBuilder::_is_async[Bytecodes::number_of_java_codes];
  1.2557 +
  1.2558 +void GraphBuilder::initialize() {
  1.2559 +  // make sure initialization happens only once (need a
  1.2560 +  // lock here, if we allow the compiler to be re-entrant)
  1.2561 +  if (is_initialized()) return;
  1.2562 +  _is_initialized = true;
  1.2563 +
  1.2564 +  // the following bytecodes are assumed to potentially
  1.2565 +  // throw exceptions in compiled code - note that e.g.
  1.2566 +  // monitorexit & the return bytecodes do not throw
  1.2567 +  // exceptions since monitor pairing proved that they
  1.2568 +  // succeed (if monitor pairing succeeded)
  1.2569 +  Bytecodes::Code can_trap_list[] =
  1.2570 +    { Bytecodes::_ldc
  1.2571 +    , Bytecodes::_ldc_w
  1.2572 +    , Bytecodes::_ldc2_w
  1.2573 +    , Bytecodes::_iaload
  1.2574 +    , Bytecodes::_laload
  1.2575 +    , Bytecodes::_faload
  1.2576 +    , Bytecodes::_daload
  1.2577 +    , Bytecodes::_aaload
  1.2578 +    , Bytecodes::_baload
  1.2579 +    , Bytecodes::_caload
  1.2580 +    , Bytecodes::_saload
  1.2581 +    , Bytecodes::_iastore
  1.2582 +    , Bytecodes::_lastore
  1.2583 +    , Bytecodes::_fastore
  1.2584 +    , Bytecodes::_dastore
  1.2585 +    , Bytecodes::_aastore
  1.2586 +    , Bytecodes::_bastore
  1.2587 +    , Bytecodes::_castore
  1.2588 +    , Bytecodes::_sastore
  1.2589 +    , Bytecodes::_idiv
  1.2590 +    , Bytecodes::_ldiv
  1.2591 +    , Bytecodes::_irem
  1.2592 +    , Bytecodes::_lrem
  1.2593 +    , Bytecodes::_getstatic
  1.2594 +    , Bytecodes::_putstatic
  1.2595 +    , Bytecodes::_getfield
  1.2596 +    , Bytecodes::_putfield
  1.2597 +    , Bytecodes::_invokevirtual
  1.2598 +    , Bytecodes::_invokespecial
  1.2599 +    , Bytecodes::_invokestatic
  1.2600 +    , Bytecodes::_invokeinterface
  1.2601 +    , Bytecodes::_new
  1.2602 +    , Bytecodes::_newarray
  1.2603 +    , Bytecodes::_anewarray
  1.2604 +    , Bytecodes::_arraylength
  1.2605 +    , Bytecodes::_athrow
  1.2606 +    , Bytecodes::_checkcast
  1.2607 +    , Bytecodes::_instanceof
  1.2608 +    , Bytecodes::_monitorenter
  1.2609 +    , Bytecodes::_multianewarray
  1.2610 +    };
  1.2611 +
  1.2612 +  // the following bytecodes are assumed to potentially
  1.2613 +  // throw asynchronous exceptions in compiled code due
  1.2614 +  // to safepoints (note: these entries could be merged
  1.2615 +  // with the can_trap_list - however, we need to know
  1.2616 +  // which ones are asynchronous for now - see also the
  1.2617 +  // comment in GraphBuilder::handle_exception)
  1.2618 +  Bytecodes::Code is_async_list[] =
  1.2619 +    { Bytecodes::_ifeq
  1.2620 +    , Bytecodes::_ifne
  1.2621 +    , Bytecodes::_iflt
  1.2622 +    , Bytecodes::_ifge
  1.2623 +    , Bytecodes::_ifgt
  1.2624 +    , Bytecodes::_ifle
  1.2625 +    , Bytecodes::_if_icmpeq
  1.2626 +    , Bytecodes::_if_icmpne
  1.2627 +    , Bytecodes::_if_icmplt
  1.2628 +    , Bytecodes::_if_icmpge
  1.2629 +    , Bytecodes::_if_icmpgt
  1.2630 +    , Bytecodes::_if_icmple
  1.2631 +    , Bytecodes::_if_acmpeq
  1.2632 +    , Bytecodes::_if_acmpne
  1.2633 +    , Bytecodes::_goto
  1.2634 +    , Bytecodes::_jsr
  1.2635 +    , Bytecodes::_ret
  1.2636 +    , Bytecodes::_tableswitch
  1.2637 +    , Bytecodes::_lookupswitch
  1.2638 +    , Bytecodes::_ireturn
  1.2639 +    , Bytecodes::_lreturn
  1.2640 +    , Bytecodes::_freturn
  1.2641 +    , Bytecodes::_dreturn
  1.2642 +    , Bytecodes::_areturn
  1.2643 +    , Bytecodes::_return
  1.2644 +    , Bytecodes::_ifnull
  1.2645 +    , Bytecodes::_ifnonnull
  1.2646 +    , Bytecodes::_goto_w
  1.2647 +    , Bytecodes::_jsr_w
  1.2648 +    };
  1.2649 +
  1.2650 +  // inititialize trap tables
  1.2651 +  for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
  1.2652 +    _can_trap[i] = false;
  1.2653 +    _is_async[i] = false;
  1.2654 +  }
  1.2655 +  // set standard trap info
  1.2656 +  for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
  1.2657 +    _can_trap[can_trap_list[j]] = true;
  1.2658 +  }
  1.2659 +
  1.2660 +  // We now deoptimize if an asynchronous exception is thrown. This
  1.2661 +  // considerably cleans up corner case issues related to javac's
  1.2662 +  // incorrect exception handler ranges for async exceptions and
  1.2663 +  // allows us to precisely analyze the types of exceptions from
  1.2664 +  // certain bytecodes.
  1.2665 +  if (!(DeoptC1 && DeoptOnAsyncException)) {
  1.2666 +    // set asynchronous trap info
  1.2667 +    for (uint k = 0; k < ARRAY_SIZE(is_async_list); k++) {
  1.2668 +      assert(!_can_trap[is_async_list[k]], "can_trap_list and is_async_list should be disjoint");
  1.2669 +      _can_trap[is_async_list[k]] = true;
  1.2670 +      _is_async[is_async_list[k]] = true;
  1.2671 +    }
  1.2672 +  }
  1.2673 +}
  1.2674 +
  1.2675 +
  1.2676 +BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
  1.2677 +  assert(entry->is_set(f), "entry/flag mismatch");
  1.2678 +  // create header block
  1.2679 +  BlockBegin* h = new BlockBegin(entry->bci());
  1.2680 +  h->set_depth_first_number(0);
  1.2681 +
  1.2682 +  Value l = h;
  1.2683 +  if (profile_branches()) {
  1.2684 +    // Increment the invocation count on entry to the method.  We
  1.2685 +    // can't use profile_invocation here because append isn't setup to
  1.2686 +    // work properly at this point.  The instruction have to be
  1.2687 +    // appended to the instruction stream by hand.
  1.2688 +    Value m = new Constant(new ObjectConstant(compilation()->method()));
  1.2689 +    h->set_next(m, 0);
  1.2690 +    Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1);
  1.2691 +    m->set_next(p, 0);
  1.2692 +    l = p;
  1.2693 +  }
  1.2694 +
  1.2695 +  BlockEnd* g = new Goto(entry, false);
  1.2696 +  l->set_next(g, entry->bci());
  1.2697 +  h->set_end(g);
  1.2698 +  h->set(f);
  1.2699 +  // setup header block end state
  1.2700 +  ValueStack* s = state->copy(); // can use copy since stack is empty (=> no phis)
  1.2701 +  assert(s->stack_is_empty(), "must have empty stack at entry point");
  1.2702 +  g->set_state(s);
  1.2703 +  return h;
  1.2704 +}
  1.2705 +
  1.2706 +
  1.2707 +
  1.2708 +BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
  1.2709 +  BlockBegin* start = new BlockBegin(0);
  1.2710 +
  1.2711 +  // This code eliminates the empty start block at the beginning of
  1.2712 +  // each method.  Previously, each method started with the
  1.2713 +  // start-block created below, and this block was followed by the
  1.2714 +  // header block that was always empty.  This header block is only
  1.2715 +  // necesary if std_entry is also a backward branch target because
  1.2716 +  // then phi functions may be necessary in the header block.  It's
  1.2717 +  // also necessary when profiling so that there's a single block that
  1.2718 +  // can increment the interpreter_invocation_count.
  1.2719 +  BlockBegin* new_header_block;
  1.2720 +  if (std_entry->number_of_preds() == 0 && !profile_branches()) {
  1.2721 +    new_header_block = std_entry;
  1.2722 +  } else {
  1.2723 +    new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
  1.2724 +  }
  1.2725 +
  1.2726 +  // setup start block (root for the IR graph)
  1.2727 +  Base* base =
  1.2728 +    new Base(
  1.2729 +      new_header_block,
  1.2730 +      osr_entry
  1.2731 +    );
  1.2732 +  start->set_next(base, 0);
  1.2733 +  start->set_end(base);
  1.2734 +  // create & setup state for start block
  1.2735 +  start->set_state(state->copy());
  1.2736 +  base->set_state(state->copy());
  1.2737 +
  1.2738 +  if (base->std_entry()->state() == NULL) {
  1.2739 +    // setup states for header blocks
  1.2740 +    base->std_entry()->merge(state);
  1.2741 +  }
  1.2742 +
  1.2743 +  assert(base->std_entry()->state() != NULL, "");
  1.2744 +  return start;
  1.2745 +}
  1.2746 +
  1.2747 +
  1.2748 +void GraphBuilder::setup_osr_entry_block() {
  1.2749 +  assert(compilation()->is_osr_compile(), "only for osrs");
  1.2750 +
  1.2751 +  int osr_bci = compilation()->osr_bci();
  1.2752 +  ciBytecodeStream s(method());
  1.2753 +  s.reset_to_bci(osr_bci);
  1.2754 +  s.next();
  1.2755 +  scope_data()->set_stream(&s);
  1.2756 +
  1.2757 +  // create a new block to be the osr setup code
  1.2758 +  _osr_entry = new BlockBegin(osr_bci);
  1.2759 +  _osr_entry->set(BlockBegin::osr_entry_flag);
  1.2760 +  _osr_entry->set_depth_first_number(0);
  1.2761 +  BlockBegin* target = bci2block()->at(osr_bci);
  1.2762 +  assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there");
  1.2763 +  // the osr entry has no values for locals
  1.2764 +  ValueStack* state = target->state()->copy();
  1.2765 +  _osr_entry->set_state(state);
  1.2766 +
  1.2767 +  kill_all();
  1.2768 +  _block = _osr_entry;
  1.2769 +  _state = _osr_entry->state()->copy();
  1.2770 +  _last  = _osr_entry;
  1.2771 +  Value e = append(new OsrEntry());
  1.2772 +  e->set_needs_null_check(false);
  1.2773 +
  1.2774 +  // OSR buffer is
  1.2775 +  //
  1.2776 +  // locals[nlocals-1..0]
  1.2777 +  // monitors[number_of_locks-1..0]
  1.2778 +  //
  1.2779 +  // locals is a direct copy of the interpreter frame so in the osr buffer
  1.2780 +  // so first slot in the local array is the last local from the interpreter
  1.2781 +  // and last slot is local[0] (receiver) from the interpreter
  1.2782 +  //
  1.2783 +  // Similarly with locks. The first lock slot in the osr buffer is the nth lock
  1.2784 +  // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
  1.2785 +  // in the interpreter frame (the method lock if a sync method)
  1.2786 +
  1.2787 +  // Initialize monitors in the compiled activation.
  1.2788 +
  1.2789 +  int index;
  1.2790 +  Value local;
  1.2791 +
  1.2792 +  // find all the locals that the interpreter thinks contain live oops
  1.2793 +  const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
  1.2794 +
  1.2795 +  // compute the offset into the locals so that we can treat the buffer
  1.2796 +  // as if the locals were still in the interpreter frame
  1.2797 +  int locals_offset = BytesPerWord * (method()->max_locals() - 1);
  1.2798 +  for_each_local_value(state, index, local) {
  1.2799 +    int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord;
  1.2800 +    Value get;
  1.2801 +    if (local->type()->is_object_kind() && !live_oops.at(index)) {
  1.2802 +      // The interpreter thinks this local is dead but the compiler
  1.2803 +      // doesn't so pretend that the interpreter passed in null.
  1.2804 +      get = append(new Constant(objectNull));
  1.2805 +    } else {
  1.2806 +      get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
  1.2807 +                                    append(new Constant(new IntConstant(offset))),
  1.2808 +                                    0,
  1.2809 +                                    true));
  1.2810 +    }
  1.2811 +    _state->store_local(index, get);
  1.2812 +  }
  1.2813 +
  1.2814 +  // the storage for the OSR buffer is freed manually in the LIRGenerator.
  1.2815 +
  1.2816 +  assert(state->caller_state() == NULL, "should be top scope");
  1.2817 +  state->clear_locals();
  1.2818 +  Goto* g = new Goto(target, false);
  1.2819 +  g->set_state(_state->copy());
  1.2820 +  append(g);
  1.2821 +  _osr_entry->set_end(g);
  1.2822 +  target->merge(_osr_entry->end()->state());
  1.2823 +
  1.2824 +  scope_data()->set_stream(NULL);
  1.2825 +}
  1.2826 +
  1.2827 +
  1.2828 +ValueStack* GraphBuilder::state_at_entry() {
  1.2829 +  ValueStack* state = new ValueStack(scope(), method()->max_locals(), method()->max_stack());
  1.2830 +
  1.2831 +  // Set up locals for receiver
  1.2832 +  int idx = 0;
  1.2833 +  if (!method()->is_static()) {
  1.2834 +    // we should always see the receiver
  1.2835 +    state->store_local(idx, new Local(objectType, idx));
  1.2836 +    idx = 1;
  1.2837 +  }
  1.2838 +
  1.2839 +  // Set up locals for incoming arguments
  1.2840 +  ciSignature* sig = method()->signature();
  1.2841 +  for (int i = 0; i < sig->count(); i++) {
  1.2842 +    ciType* type = sig->type_at(i);
  1.2843 +    BasicType basic_type = type->basic_type();
  1.2844 +    // don't allow T_ARRAY to propagate into locals types
  1.2845 +    if (basic_type == T_ARRAY) basic_type = T_OBJECT;
  1.2846 +    ValueType* vt = as_ValueType(basic_type);
  1.2847 +    state->store_local(idx, new Local(vt, idx));
  1.2848 +    idx += type->size();
  1.2849 +  }
  1.2850 +
  1.2851 +  // lock synchronized method
  1.2852 +  if (method()->is_synchronized()) {
  1.2853 +    state->lock(scope(), NULL);
  1.2854 +  }
  1.2855 +
  1.2856 +  return state;
  1.2857 +}
  1.2858 +
  1.2859 +
  1.2860 +GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
  1.2861 +  : _scope_data(NULL)
  1.2862 +  , _exception_state(NULL)
  1.2863 +  , _instruction_count(0)
  1.2864 +  , _osr_entry(NULL)
  1.2865 +  , _memory(new MemoryBuffer())
  1.2866 +  , _compilation(compilation)
  1.2867 +  , _inline_bailout_msg(NULL)
  1.2868 +{
  1.2869 +  int osr_bci = compilation->osr_bci();
  1.2870 +
  1.2871 +  // determine entry points and bci2block mapping
  1.2872 +  BlockListBuilder blm(compilation, scope, osr_bci);
  1.2873 +  CHECK_BAILOUT();
  1.2874 +
  1.2875 +  BlockList* bci2block = blm.bci2block();
  1.2876 +  BlockBegin* start_block = bci2block->at(0);
  1.2877 +
  1.2878 +  assert(is_initialized(), "GraphBuilder must have been initialized");
  1.2879 +  push_root_scope(scope, bci2block, start_block);
  1.2880 +
  1.2881 +  // setup state for std entry
  1.2882 +  _initial_state = state_at_entry();
  1.2883 +  start_block->merge(_initial_state);
  1.2884 +
  1.2885 +  BlockBegin* sync_handler = NULL;
  1.2886 +  if (method()->is_synchronized() || DTraceMethodProbes) {
  1.2887 +    // setup an exception handler to do the unlocking and/or notification
  1.2888 +    sync_handler = new BlockBegin(-1);
  1.2889 +    sync_handler->set(BlockBegin::exception_entry_flag);
  1.2890 +    sync_handler->set(BlockBegin::is_on_work_list_flag);
  1.2891 +    sync_handler->set(BlockBegin::default_exception_handler_flag);
  1.2892 +
  1.2893 +    ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
  1.2894 +    XHandler* h = new XHandler(desc);
  1.2895 +    h->set_entry_block(sync_handler);
  1.2896 +    scope_data()->xhandlers()->append(h);
  1.2897 +    scope_data()->set_has_handler();
  1.2898 +  }
  1.2899 +
  1.2900 +  // complete graph
  1.2901 +  _vmap        = new ValueMap();
  1.2902 +  scope->compute_lock_stack_size();
  1.2903 +  switch (scope->method()->intrinsic_id()) {
  1.2904 +  case vmIntrinsics::_dabs          : // fall through
  1.2905 +  case vmIntrinsics::_dsqrt         : // fall through
  1.2906 +  case vmIntrinsics::_dsin          : // fall through
  1.2907 +  case vmIntrinsics::_dcos          : // fall through
  1.2908 +  case vmIntrinsics::_dtan          : // fall through
  1.2909 +  case vmIntrinsics::_dlog          : // fall through
  1.2910 +  case vmIntrinsics::_dlog10        : // fall through
  1.2911 +    {
  1.2912 +      // Compiles where the root method is an intrinsic need a special
  1.2913 +      // compilation environment because the bytecodes for the method
  1.2914 +      // shouldn't be parsed during the compilation, only the special
  1.2915 +      // Intrinsic node should be emitted.  If this isn't done the the
  1.2916 +      // code for the inlined version will be different than the root
  1.2917 +      // compiled version which could lead to monotonicity problems on
  1.2918 +      // intel.
  1.2919 +
  1.2920 +      // Set up a stream so that appending instructions works properly.
  1.2921 +      ciBytecodeStream s(scope->method());
  1.2922 +      s.reset_to_bci(0);
  1.2923 +      scope_data()->set_stream(&s);
  1.2924 +      s.next();
  1.2925 +
  1.2926 +      // setup the initial block state
  1.2927 +      _block = start_block;
  1.2928 +      _state = start_block->state()->copy();
  1.2929 +      _last  = start_block;
  1.2930 +      load_local(doubleType, 0);
  1.2931 +
  1.2932 +      // Emit the intrinsic node.
  1.2933 +      bool result = try_inline_intrinsics(scope->method());
  1.2934 +      if (!result) BAILOUT("failed to inline intrinsic");
  1.2935 +      method_return(dpop());
  1.2936 +
  1.2937 +      // connect the begin and end blocks and we're all done.
  1.2938 +      BlockEnd* end = last()->as_BlockEnd();
  1.2939 +      block()->set_end(end);
  1.2940 +      end->set_state(state());
  1.2941 +      break;
  1.2942 +    }
  1.2943 +  default:
  1.2944 +    scope_data()->add_to_work_list(start_block);
  1.2945 +    iterate_all_blocks();
  1.2946 +    break;
  1.2947 +  }
  1.2948 +  CHECK_BAILOUT();
  1.2949 +
  1.2950 +  if (sync_handler && sync_handler->state() != NULL) {
  1.2951 +    Value lock = NULL;
  1.2952 +    if (method()->is_synchronized()) {
  1.2953 +      lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) :
  1.2954 +                                     _initial_state->local_at(0);
  1.2955 +
  1.2956 +      sync_handler->state()->unlock();
  1.2957 +      sync_handler->state()->lock(scope, lock);
  1.2958 +
  1.2959 +    }
  1.2960 +    fill_sync_handler(lock, sync_handler, true);
  1.2961 +  }
  1.2962 +
  1.2963 +  _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
  1.2964 +
  1.2965 +  eliminate_redundant_phis(_start);
  1.2966 +
  1.2967 +  NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats());
  1.2968 +  // for osr compile, bailout if some requirements are not fulfilled
  1.2969 +  if (osr_bci != -1) {
  1.2970 +    BlockBegin* osr_block = blm.bci2block()->at(osr_bci);
  1.2971 +    assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile");
  1.2972 +
  1.2973 +    // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
  1.2974 +    if (!osr_block->state()->stack_is_empty()) {
  1.2975 +      BAILOUT("stack not empty at OSR entry point");
  1.2976 +    }
  1.2977 +  }
  1.2978 +#ifndef PRODUCT
  1.2979 +  if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count);
  1.2980 +#endif
  1.2981 +}
  1.2982 +
  1.2983 +
  1.2984 +ValueStack* GraphBuilder::lock_stack() {
  1.2985 +  // return a new ValueStack representing just the current lock stack
  1.2986 +  // (for debug info at safepoints in exception throwing or handling)
  1.2987 +  ValueStack* new_stack = state()->copy_locks();
  1.2988 +  return new_stack;
  1.2989 +}
  1.2990 +
  1.2991 +
  1.2992 +int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
  1.2993 +  int recur_level = 0;
  1.2994 +  for (IRScope* s = scope(); s != NULL; s = s->caller()) {
  1.2995 +    if (s->method() == cur_callee) {
  1.2996 +      ++recur_level;
  1.2997 +    }
  1.2998 +  }
  1.2999 +  return recur_level;
  1.3000 +}
  1.3001 +
  1.3002 +
  1.3003 +bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
  1.3004 +  // Clear out any existing inline bailout condition
  1.3005 +  clear_inline_bailout();
  1.3006 +
  1.3007 +  if (callee->should_exclude()) {
  1.3008 +    // callee is excluded
  1.3009 +    INLINE_BAILOUT("excluded by CompilerOracle")
  1.3010 +  } else if (!callee->can_be_compiled()) {
  1.3011 +    // callee is not compilable (prob. has breakpoints)
  1.3012 +    INLINE_BAILOUT("not compilable")
  1.3013 +  } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
  1.3014 +    // intrinsics can be native or not
  1.3015 +    return true;
  1.3016 +  } else if (callee->is_native()) {
  1.3017 +    // non-intrinsic natives cannot be inlined
  1.3018 +    INLINE_BAILOUT("non-intrinsic native")
  1.3019 +  } else if (callee->is_abstract()) {
  1.3020 +    INLINE_BAILOUT("abstract")
  1.3021 +  } else {
  1.3022 +    return try_inline_full(callee, holder_known);
  1.3023 +  }
  1.3024 +}
  1.3025 +
  1.3026 +
  1.3027 +bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
  1.3028 +  if (!InlineNatives           ) INLINE_BAILOUT("intrinsic method inlining disabled");
  1.3029 +  if (callee->is_synchronized()) INLINE_BAILOUT("intrinsic method is synchronized");
  1.3030 +  // callee seems like a good candidate
  1.3031 +  // determine id
  1.3032 +  bool preserves_state = false;
  1.3033 +  bool cantrap = true;
  1.3034 +  vmIntrinsics::ID id = callee->intrinsic_id();
  1.3035 +  switch (id) {
  1.3036 +    case vmIntrinsics::_arraycopy     :
  1.3037 +      if (!InlineArrayCopy) return false;
  1.3038 +      break;
  1.3039 +
  1.3040 +    case vmIntrinsics::_currentTimeMillis:
  1.3041 +    case vmIntrinsics::_nanoTime:
  1.3042 +      preserves_state = true;
  1.3043 +      cantrap = false;
  1.3044 +      break;
  1.3045 +
  1.3046 +    case vmIntrinsics::_floatToRawIntBits   :
  1.3047 +    case vmIntrinsics::_intBitsToFloat      :
  1.3048 +    case vmIntrinsics::_doubleToRawLongBits :
  1.3049 +    case vmIntrinsics::_longBitsToDouble    :
  1.3050 +      if (!InlineMathNatives) return false;
  1.3051 +      preserves_state = true;
  1.3052 +      cantrap = false;
  1.3053 +      break;
  1.3054 +
  1.3055 +    case vmIntrinsics::_getClass      :
  1.3056 +      if (!InlineClassNatives) return false;
  1.3057 +      preserves_state = true;
  1.3058 +      break;
  1.3059 +
  1.3060 +    case vmIntrinsics::_currentThread :
  1.3061 +      if (!InlineThreadNatives) return false;
  1.3062 +      preserves_state = true;
  1.3063 +      cantrap = false;
  1.3064 +      break;
  1.3065 +
  1.3066 +    case vmIntrinsics::_dabs          : // fall through
  1.3067 +    case vmIntrinsics::_dsqrt         : // fall through
  1.3068 +    case vmIntrinsics::_dsin          : // fall through
  1.3069 +    case vmIntrinsics::_dcos          : // fall through
  1.3070 +    case vmIntrinsics::_dtan          : // fall through
  1.3071 +    case vmIntrinsics::_dlog          : // fall through
  1.3072 +    case vmIntrinsics::_dlog10        : // fall through
  1.3073 +      if (!InlineMathNatives) return false;
  1.3074 +      cantrap = false;
  1.3075 +      preserves_state = true;
  1.3076 +      break;
  1.3077 +
  1.3078 +    // sun/misc/AtomicLong.attemptUpdate
  1.3079 +    case vmIntrinsics::_attemptUpdate :
  1.3080 +      if (!VM_Version::supports_cx8()) return false;
  1.3081 +      if (!InlineAtomicLong) return false;
  1.3082 +      preserves_state = true;
  1.3083 +      break;
  1.3084 +
  1.3085 +    // Use special nodes for Unsafe instructions so we can more easily
  1.3086 +    // perform an address-mode optimization on the raw variants
  1.3087 +    case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT,  false);
  1.3088 +    case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
  1.3089 +    case vmIntrinsics::_getByte   : return append_unsafe_get_obj(callee, T_BYTE,    false);
  1.3090 +    case vmIntrinsics::_getShort  : return append_unsafe_get_obj(callee, T_SHORT,   false);
  1.3091 +    case vmIntrinsics::_getChar   : return append_unsafe_get_obj(callee, T_CHAR,    false);
  1.3092 +    case vmIntrinsics::_getInt    : return append_unsafe_get_obj(callee, T_INT,     false);
  1.3093 +    case vmIntrinsics::_getLong   : return append_unsafe_get_obj(callee, T_LONG,    false);
  1.3094 +    case vmIntrinsics::_getFloat  : return append_unsafe_get_obj(callee, T_FLOAT,   false);
  1.3095 +    case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE,  false);
  1.3096 +
  1.3097 +    case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT,  false);
  1.3098 +    case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
  1.3099 +    case vmIntrinsics::_putByte   : return append_unsafe_put_obj(callee, T_BYTE,    false);
  1.3100 +    case vmIntrinsics::_putShort  : return append_unsafe_put_obj(callee, T_SHORT,   false);
  1.3101 +    case vmIntrinsics::_putChar   : return append_unsafe_put_obj(callee, T_CHAR,    false);
  1.3102 +    case vmIntrinsics::_putInt    : return append_unsafe_put_obj(callee, T_INT,     false);
  1.3103 +    case vmIntrinsics::_putLong   : return append_unsafe_put_obj(callee, T_LONG,    false);
  1.3104 +    case vmIntrinsics::_putFloat  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
  1.3105 +    case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false);
  1.3106 +
  1.3107 +    case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true);
  1.3108 +    case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
  1.3109 +    case vmIntrinsics::_getByteVolatile   : return append_unsafe_get_obj(callee, T_BYTE,    true);
  1.3110 +    case vmIntrinsics::_getShortVolatile  : return append_unsafe_get_obj(callee, T_SHORT,   true);
  1.3111 +    case vmIntrinsics::_getCharVolatile   : return append_unsafe_get_obj(callee, T_CHAR,    true);
  1.3112 +    case vmIntrinsics::_getIntVolatile    : return append_unsafe_get_obj(callee, T_INT,     true);
  1.3113 +    case vmIntrinsics::_getLongVolatile   : return append_unsafe_get_obj(callee, T_LONG,    true);
  1.3114 +    case vmIntrinsics::_getFloatVolatile  : return append_unsafe_get_obj(callee, T_FLOAT,   true);
  1.3115 +    case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE,  true);
  1.3116 +
  1.3117 +    case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT,  true);
  1.3118 +    case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
  1.3119 +    case vmIntrinsics::_putByteVolatile   : return append_unsafe_put_obj(callee, T_BYTE,    true);
  1.3120 +    case vmIntrinsics::_putShortVolatile  : return append_unsafe_put_obj(callee, T_SHORT,   true);
  1.3121 +    case vmIntrinsics::_putCharVolatile   : return append_unsafe_put_obj(callee, T_CHAR,    true);
  1.3122 +    case vmIntrinsics::_putIntVolatile    : return append_unsafe_put_obj(callee, T_INT,     true);
  1.3123 +    case vmIntrinsics::_putLongVolatile   : return append_unsafe_put_obj(callee, T_LONG,    true);
  1.3124 +    case vmIntrinsics::_putFloatVolatile  : return append_unsafe_put_obj(callee, T_FLOAT,   true);
  1.3125 +    case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE,  true);
  1.3126 +
  1.3127 +    case vmIntrinsics::_getByte_raw   : return append_unsafe_get_raw(callee, T_BYTE);
  1.3128 +    case vmIntrinsics::_getShort_raw  : return append_unsafe_get_raw(callee, T_SHORT);
  1.3129 +    case vmIntrinsics::_getChar_raw   : return append_unsafe_get_raw(callee, T_CHAR);
  1.3130 +    case vmIntrinsics::_getInt_raw    : return append_unsafe_get_raw(callee, T_INT);
  1.3131 +    case vmIntrinsics::_getLong_raw   : return append_unsafe_get_raw(callee, T_LONG);
  1.3132 +    case vmIntrinsics::_getFloat_raw  : return append_unsafe_get_raw(callee, T_FLOAT);
  1.3133 +    case vmIntrinsics::_getDouble_raw : return append_unsafe_get_raw(callee, T_DOUBLE);
  1.3134 +
  1.3135 +    case vmIntrinsics::_putByte_raw   : return append_unsafe_put_raw(callee, T_BYTE);
  1.3136 +    case vmIntrinsics::_putShort_raw  : return append_unsafe_put_raw(callee, T_SHORT);
  1.3137 +    case vmIntrinsics::_putChar_raw   : return append_unsafe_put_raw(callee, T_CHAR);
  1.3138 +    case vmIntrinsics::_putInt_raw    : return append_unsafe_put_raw(callee, T_INT);
  1.3139 +    case vmIntrinsics::_putLong_raw   : return append_unsafe_put_raw(callee, T_LONG);
  1.3140 +    case vmIntrinsics::_putFloat_raw  : return append_unsafe_put_raw(callee, T_FLOAT);
  1.3141 +    case vmIntrinsics::_putDouble_raw : return append_unsafe_put_raw(callee, T_DOUBLE);
  1.3142 +
  1.3143 +    case vmIntrinsics::_prefetchRead        : return append_unsafe_prefetch(callee, false, false);
  1.3144 +    case vmIntrinsics::_prefetchWrite       : return append_unsafe_prefetch(callee, false, true);
  1.3145 +    case vmIntrinsics::_prefetchReadStatic  : return append_unsafe_prefetch(callee, true,  false);
  1.3146 +    case vmIntrinsics::_prefetchWriteStatic : return append_unsafe_prefetch(callee, true,  true);
  1.3147 +
  1.3148 +    case vmIntrinsics::_checkIndex    :
  1.3149 +      if (!InlineNIOCheckIndex) return false;
  1.3150 +      preserves_state = true;
  1.3151 +      break;
  1.3152 +    case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT,  true);
  1.3153 +    case vmIntrinsics::_putOrderedInt    : return append_unsafe_put_obj(callee, T_INT,     true);
  1.3154 +    case vmIntrinsics::_putOrderedLong   : return append_unsafe_put_obj(callee, T_LONG,    true);
  1.3155 +
  1.3156 +    case vmIntrinsics::_compareAndSwapLong:
  1.3157 +      if (!VM_Version::supports_cx8()) return false;
  1.3158 +      // fall through
  1.3159 +    case vmIntrinsics::_compareAndSwapInt:
  1.3160 +    case vmIntrinsics::_compareAndSwapObject:
  1.3161 +      append_unsafe_CAS(callee);
  1.3162 +      return true;
  1.3163 +
  1.3164 +    default                       : return false; // do not inline
  1.3165 +  }
  1.3166 +  // create intrinsic node
  1.3167 +  const bool has_receiver = !callee->is_static();
  1.3168 +  ValueType* result_type = as_ValueType(callee->return_type());
  1.3169 +
  1.3170 +  Values* args = state()->pop_arguments(callee->arg_size());
  1.3171 +  ValueStack* locks = lock_stack();
  1.3172 +  if (profile_calls()) {
  1.3173 +    // Don't profile in the special case where the root method
  1.3174 +    // is the intrinsic
  1.3175 +    if (callee != method()) {
  1.3176 +      Value recv = NULL;
  1.3177 +      if (has_receiver) {
  1.3178 +        recv = args->at(0);
  1.3179 +        null_check(recv);
  1.3180 +      }
  1.3181 +      profile_call(recv, NULL);
  1.3182 +    }
  1.3183 +  }
  1.3184 +
  1.3185 +  Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, lock_stack(),
  1.3186 +                                    preserves_state, cantrap);
  1.3187 +  // append instruction & push result
  1.3188 +  Value value = append_split(result);
  1.3189 +  if (result_type != voidType) push(result_type, value);
  1.3190 +
  1.3191 +#ifndef PRODUCT
  1.3192 +  // printing
  1.3193 +  if (PrintInlining) {
  1.3194 +    print_inline_result(callee, true);
  1.3195 +  }
  1.3196 +#endif
  1.3197 +
  1.3198 +  // done
  1.3199 +  return true;
  1.3200 +}
  1.3201 +
  1.3202 +
  1.3203 +bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) {
  1.3204 +  // Introduce a new callee continuation point - all Ret instructions
  1.3205 +  // will be replaced with Gotos to this point.
  1.3206 +  BlockBegin* cont = block_at(next_bci());
  1.3207 +  assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr");
  1.3208 +
  1.3209 +  // Note: can not assign state to continuation yet, as we have to
  1.3210 +  // pick up the state from the Ret instructions.
  1.3211 +
  1.3212 +  // Push callee scope
  1.3213 +  push_scope_for_jsr(cont, jsr_dest_bci);
  1.3214 +
  1.3215 +  // Temporarily set up bytecode stream so we can append instructions
  1.3216 +  // (only using the bci of this stream)
  1.3217 +  scope_data()->set_stream(scope_data()->parent()->stream());
  1.3218 +
  1.3219 +  BlockBegin* jsr_start_block = block_at(jsr_dest_bci);
  1.3220 +  assert(jsr_start_block != NULL, "jsr start block must exist");
  1.3221 +  assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
  1.3222 +  Goto* goto_sub = new Goto(jsr_start_block, false);
  1.3223 +  goto_sub->set_state(state());
  1.3224 +  // Must copy state to avoid wrong sharing when parsing bytecodes
  1.3225 +  assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block");
  1.3226 +  jsr_start_block->set_state(state()->copy());
  1.3227 +  append(goto_sub);
  1.3228 +  _block->set_end(goto_sub);
  1.3229 +  _last = _block = jsr_start_block;
  1.3230 +
  1.3231 +  // Clear out bytecode stream
  1.3232 +  scope_data()->set_stream(NULL);
  1.3233 +
  1.3234 +  scope_data()->add_to_work_list(jsr_start_block);
  1.3235 +
  1.3236 +  // Ready to resume parsing in subroutine
  1.3237 +  iterate_all_blocks();
  1.3238 +
  1.3239 +  // If we bailed out during parsing, return immediately (this is bad news)
  1.3240 +  CHECK_BAILOUT_(false);
  1.3241 +
  1.3242 +  // Detect whether the continuation can actually be reached. If not,
  1.3243 +  // it has not had state set by the join() operations in
  1.3244 +  // iterate_bytecodes_for_block()/ret() and we should not touch the
  1.3245 +  // iteration state. The calling activation of
  1.3246 +  // iterate_bytecodes_for_block will then complete normally.
  1.3247 +  if (cont->state() != NULL) {
  1.3248 +    if (!cont->is_set(BlockBegin::was_visited_flag)) {
  1.3249 +      // add continuation to work list instead of parsing it immediately
  1.3250 +      scope_data()->parent()->add_to_work_list(cont);
  1.3251 +    }
  1.3252 +  }
  1.3253 +
  1.3254 +  assert(jsr_continuation() == cont, "continuation must not have changed");
  1.3255 +  assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) ||
  1.3256 +         jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag),
  1.3257 +         "continuation can only be visited in case of backward branches");
  1.3258 +  assert(_last && _last->as_BlockEnd(), "block must have end");
  1.3259 +
  1.3260 +  // continuation is in work list, so end iteration of current block
  1.3261 +  _skip_block = true;
  1.3262 +  pop_scope_for_jsr();
  1.3263 +
  1.3264 +  return true;
  1.3265 +}
  1.3266 +
  1.3267 +
  1.3268 +// Inline the entry of a synchronized method as a monitor enter and
  1.3269 +// register the exception handler which releases the monitor if an
  1.3270 +// exception is thrown within the callee. Note that the monitor enter
  1.3271 +// cannot throw an exception itself, because the receiver is
  1.3272 +// guaranteed to be non-null by the explicit null check at the
  1.3273 +// beginning of inlining.
  1.3274 +void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
  1.3275 +  assert(lock != NULL && sync_handler != NULL, "lock or handler missing");
  1.3276 +
  1.3277 +  set_exception_state(state()->copy());
  1.3278 +  monitorenter(lock, SynchronizationEntryBCI);
  1.3279 +  assert(_last->as_MonitorEnter() != NULL, "monitor enter expected");
  1.3280 +  _last->set_needs_null_check(false);
  1.3281 +
  1.3282 +  sync_handler->set(BlockBegin::exception_entry_flag);
  1.3283 +  sync_handler->set(BlockBegin::is_on_work_list_flag);
  1.3284 +
  1.3285 +  ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
  1.3286 +  XHandler* h = new XHandler(desc);
  1.3287 +  h->set_entry_block(sync_handler);
  1.3288 +  scope_data()->xhandlers()->append(h);
  1.3289 +  scope_data()->set_has_handler();
  1.3290 +}
  1.3291 +
  1.3292 +
  1.3293 +// If an exception is thrown and not handled within an inlined
  1.3294 +// synchronized method, the monitor must be released before the
  1.3295 +// exception is rethrown in the outer scope. Generate the appropriate
  1.3296 +// instructions here.
  1.3297 +void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) {
  1.3298 +  BlockBegin* orig_block = _block;
  1.3299 +  ValueStack* orig_state = _state;
  1.3300 +  Instruction* orig_last = _last;
  1.3301 +  _last = _block = sync_handler;
  1.3302 +  _state = sync_handler->state()->copy();
  1.3303 +
  1.3304 +  assert(sync_handler != NULL, "handler missing");
  1.3305 +  assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here");
  1.3306 +
  1.3307 +  assert(lock != NULL || default_handler, "lock or handler missing");
  1.3308 +
  1.3309 +  XHandler* h = scope_data()->xhandlers()->remove_last();
  1.3310 +  assert(h->entry_block() == sync_handler, "corrupt list of handlers");
  1.3311 +
  1.3312 +  block()->set(BlockBegin::was_visited_flag);
  1.3313 +  Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI);
  1.3314 +  assert(exception->is_pinned(), "must be");
  1.3315 +
  1.3316 +  int bci = SynchronizationEntryBCI;
  1.3317 +  if (lock) {
  1.3318 +    assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
  1.3319 +    if (lock->bci() == -99) {
  1.3320 +      lock = append_with_bci(lock, -1);
  1.3321 +    }
  1.3322 +
  1.3323 +    // exit the monitor in the context of the synchronized method
  1.3324 +    monitorexit(lock, SynchronizationEntryBCI);
  1.3325 +
  1.3326 +    // exit the context of the synchronized method
  1.3327 +    if (!default_handler) {
  1.3328 +      pop_scope();
  1.3329 +      _state = _state->copy();
  1.3330 +      bci = _state->scope()->caller_bci();
  1.3331 +      _state = _state->pop_scope()->copy();
  1.3332 +    }
  1.3333 +  }
  1.3334 +
  1.3335 +  // perform the throw as if at the the call site
  1.3336 +  apush(exception);
  1.3337 +
  1.3338 +  set_exception_state(state()->copy());
  1.3339 +  throw_op(bci);
  1.3340 +
  1.3341 +  BlockEnd* end = last()->as_BlockEnd();
  1.3342 +  block()->set_end(end);
  1.3343 +  end->set_state(state());
  1.3344 +
  1.3345 +  _block = orig_block;
  1.3346 +  _state = orig_state;
  1.3347 +  _last = orig_last;
  1.3348 +}
  1.3349 +
  1.3350 +
  1.3351 +bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
  1.3352 +  assert(!callee->is_native(), "callee must not be native");
  1.3353 +
  1.3354 +  // first perform tests of things it's not possible to inline
  1.3355 +  if (callee->has_exception_handlers() &&
  1.3356 +      !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
  1.3357 +  if (callee->is_synchronized() &&
  1.3358 +      !InlineSynchronizedMethods         ) INLINE_BAILOUT("callee is synchronized");
  1.3359 +  if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet");
  1.3360 +  if (!callee->has_balanced_monitors())    INLINE_BAILOUT("callee's monitors do not match");
  1.3361 +
  1.3362 +  // Proper inlining of methods with jsrs requires a little more work.
  1.3363 +  if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
  1.3364 +
  1.3365 +  // now perform tests that are based on flag settings
  1.3366 +  if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("too-deep inlining");
  1.3367 +  if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
  1.3368 +  if (callee->code_size() > max_inline_size()                 ) INLINE_BAILOUT("callee is too large");
  1.3369 +
  1.3370 +  // don't inline throwable methods unless the inlining tree is rooted in a throwable class
  1.3371 +  if (callee->name() == ciSymbol::object_initializer_name() &&
  1.3372 +      callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
  1.3373 +    // Throwable constructor call
  1.3374 +    IRScope* top = scope();
  1.3375 +    while (top->caller() != NULL) {
  1.3376 +      top = top->caller();
  1.3377 +    }
  1.3378 +    if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
  1.3379 +      INLINE_BAILOUT("don't inline Throwable constructors");
  1.3380 +    }
  1.3381 +  }
  1.3382 +
  1.3383 +  // When SSE2 is used on intel, then no special handling is needed
  1.3384 +  // for strictfp because the enum-constant is fixed at compile time,
  1.3385 +  // the check for UseSSE2 is needed here
  1.3386 +  if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
  1.3387 +    INLINE_BAILOUT("caller and callee have different strict fp requirements");
  1.3388 +  }
  1.3389 +
  1.3390 +  if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
  1.3391 +    INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
  1.3392 +  }
  1.3393 +
  1.3394 +#ifndef PRODUCT
  1.3395 +      // printing
  1.3396 +  if (PrintInlining) {
  1.3397 +    print_inline_result(callee, true);
  1.3398 +  }
  1.3399 +#endif
  1.3400 +
  1.3401 +  // NOTE: Bailouts from this point on, which occur at the
  1.3402 +  // GraphBuilder level, do not cause bailout just of the inlining but
  1.3403 +  // in fact of the entire compilation.
  1.3404 +
  1.3405 +  BlockBegin* orig_block = block();
  1.3406 +
  1.3407 +  const int args_base = state()->stack_size() - callee->arg_size();
  1.3408 +  assert(args_base >= 0, "stack underflow during inlining");
  1.3409 +
  1.3410 +  // Insert null check if necessary
  1.3411 +  Value recv = NULL;
  1.3412 +  if (code() != Bytecodes::_invokestatic) {
  1.3413 +    // note: null check must happen even if first instruction of callee does
  1.3414 +    //       an implicit null check since the callee is in a different scope
  1.3415 +    //       and we must make sure exception handling does the right thing
  1.3416 +    assert(!callee->is_static(), "callee must not be static");
  1.3417 +    assert(callee->arg_size() > 0, "must have at least a receiver");
  1.3418 +    recv = state()->stack_at(args_base);
  1.3419 +    null_check(recv);
  1.3420 +  }
  1.3421 +
  1.3422 +  if (profile_inlined_calls()) {
  1.3423 +    profile_call(recv, holder_known ? callee->holder() : NULL);
  1.3424 +  }
  1.3425 +
  1.3426 +  profile_invocation(callee);
  1.3427 +
  1.3428 +  // Introduce a new callee continuation point - if the callee has
  1.3429 +  // more than one return instruction or the return does not allow
  1.3430 +  // fall-through of control flow, all return instructions of the
  1.3431 +  // callee will need to be replaced by Goto's pointing to this
  1.3432 +  // continuation point.
  1.3433 +  BlockBegin* cont = block_at(next_bci());
  1.3434 +  bool continuation_existed = true;
  1.3435 +  if (cont == NULL) {
  1.3436 +    cont = new BlockBegin(next_bci());
  1.3437 +    // low number so that continuation gets parsed as early as possible
  1.3438 +    cont->set_depth_first_number(0);
  1.3439 +#ifndef PRODUCT
  1.3440 +    if (PrintInitialBlockList) {
  1.3441 +      tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
  1.3442 +                    cont->block_id(), cont->bci(), bci());
  1.3443 +    }
  1.3444 +#endif
  1.3445 +    continuation_existed = false;
  1.3446 +  }
  1.3447 +  // Record number of predecessors of continuation block before
  1.3448 +  // inlining, to detect if inlined method has edges to its
  1.3449 +  // continuation after inlining.
  1.3450 +  int continuation_preds = cont->number_of_preds();
  1.3451 +
  1.3452 +  // Push callee scope
  1.3453 +  push_scope(callee, cont);
  1.3454 +
  1.3455 +  // the BlockListBuilder for the callee could have bailed out
  1.3456 +  CHECK_BAILOUT_(false);
  1.3457 +
  1.3458 +  // Temporarily set up bytecode stream so we can append instructions
  1.3459 +  // (only using the bci of this stream)
  1.3460 +  scope_data()->set_stream(scope_data()->parent()->stream());
  1.3461 +
  1.3462 +  // Pass parameters into callee state: add assignments
  1.3463 +  // note: this will also ensure that all arguments are computed before being passed
  1.3464 +  ValueStack* callee_state = state();
  1.3465 +  ValueStack* caller_state = scope()->caller_state();
  1.3466 +  { int i = args_base;
  1.3467 +    while (i < caller_state->stack_size()) {
  1.3468 +      const int par_no = i - args_base;
  1.3469 +      Value  arg = caller_state->stack_at_inc(i);
  1.3470 +      // NOTE: take base() of arg->type() to avoid problems storing
  1.3471 +      // constants
  1.3472 +      store_local(callee_state, arg, arg->type()->base(), par_no);
  1.3473 +    }
  1.3474 +  }
  1.3475 +
  1.3476 +  // Remove args from stack.
  1.3477 +  // Note that we preserve locals state in case we can use it later
  1.3478 +  // (see use of pop_scope() below)
  1.3479 +  caller_state->truncate_stack(args_base);
  1.3480 +  callee_state->truncate_stack(args_base);
  1.3481 +
  1.3482 +  // Setup state that is used at returns form the inlined method.
  1.3483 +  // This is essentially the state of the continuation block,
  1.3484 +  // but without the return value on stack, if any, this will
  1.3485 +  // be pushed at the return instruction (see method_return).
  1.3486 +  scope_data()->set_continuation_state(caller_state->copy());
  1.3487 +
  1.3488 +  // Compute lock stack size for callee scope now that args have been passed
  1.3489 +  scope()->compute_lock_stack_size();
  1.3490 +
  1.3491 +  Value lock;
  1.3492 +  BlockBegin* sync_handler;
  1.3493 +
  1.3494 +  // Inline the locking of the receiver if the callee is synchronized
  1.3495 +  if (callee->is_synchronized()) {
  1.3496 +    lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
  1.3497 +                               : state()->local_at(0);
  1.3498 +    sync_handler = new BlockBegin(-1);
  1.3499 +    inline_sync_entry(lock, sync_handler);
  1.3500 +
  1.3501 +    // recompute the lock stack size
  1.3502 +    scope()->compute_lock_stack_size();
  1.3503 +  }
  1.3504 +
  1.3505 +
  1.3506 +  BlockBegin* callee_start_block = block_at(0);
  1.3507 +  if (callee_start_block != NULL) {
  1.3508 +    assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
  1.3509 +    Goto* goto_callee = new Goto(callee_start_block, false);
  1.3510 +    goto_callee->set_state(state());
  1.3511 +    // The state for this goto is in the scope of the callee, so use
  1.3512 +    // the entry bci for the callee instead of the call site bci.
  1.3513 +    append_with_bci(goto_callee, 0);
  1.3514 +    _block->set_end(goto_callee);
  1.3515 +    callee_start_block->merge(callee_state);
  1.3516 +
  1.3517 +    _last = _block = callee_start_block;
  1.3518 +
  1.3519 +    scope_data()->add_to_work_list(callee_start_block);
  1.3520 +  }
  1.3521 +
  1.3522 +  // Clear out bytecode stream
  1.3523 +  scope_data()->set_stream(NULL);
  1.3524 +
  1.3525 +  // Ready to resume parsing in callee (either in the same block we
  1.3526 +  // were in before or in the callee's start block)
  1.3527 +  iterate_all_blocks(callee_start_block == NULL);
  1.3528 +
  1.3529 +  // If we bailed out during parsing, return immediately (this is bad news)
  1.3530 +  if (bailed_out()) return false;
  1.3531 +
  1.3532 +  // iterate_all_blocks theoretically traverses in random order; in
  1.3533 +  // practice, we have only traversed the continuation if we are
  1.3534 +  // inlining into a subroutine
  1.3535 +  assert(continuation_existed ||
  1.3536 +         !continuation()->is_set(BlockBegin::was_visited_flag),
  1.3537 +         "continuation should not have been parsed yet if we created it");
  1.3538 +
  1.3539 +  // If we bailed out during parsing, return immediately (this is bad news)
  1.3540 +  CHECK_BAILOUT_(false);
  1.3541 +
  1.3542 +  // At this point we are almost ready to return and resume parsing of
  1.3543 +  // the caller back in the GraphBuilder. The only thing we want to do
  1.3544 +  // first is an optimization: during parsing of the callee we
  1.3545 +  // generated at least one Goto to the continuation block. If we
  1.3546 +  // generated exactly one, and if the inlined method spanned exactly
  1.3547 +  // one block (and we didn't have to Goto its entry), then we snip
  1.3548 +  // off the Goto to the continuation, allowing control to fall
  1.3549 +  // through back into the caller block and effectively performing
  1.3550 +  // block merging. This allows load elimination and CSE to take place
  1.3551 +  // across multiple callee scopes if they are relatively simple, and
  1.3552 +  // is currently essential to making inlining profitable.
  1.3553 +  if (   num_returns() == 1
  1.3554 +      && block() == orig_block
  1.3555 +      && block() == inline_cleanup_block()) {
  1.3556 +    _last = inline_cleanup_return_prev();
  1.3557 +    _state = inline_cleanup_state()->pop_scope();
  1.3558 +  } else if (continuation_preds == cont->number_of_preds()) {
  1.3559 +    // Inlining caused that the instructions after the invoke in the
  1.3560 +    // caller are not reachable any more. So skip filling this block
  1.3561 +    // with instructions!
  1.3562 +    assert (cont == continuation(), "");
  1.3563 +    assert(_last && _last->as_BlockEnd(), "");
  1.3564 +    _skip_block = true;
  1.3565 +  } else {
  1.3566 +    // Resume parsing in continuation block unless it was already parsed.
  1.3567 +    // Note that if we don't change _last here, iteration in
  1.3568 +    // iterate_bytecodes_for_block will stop when we return.
  1.3569 +    if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
  1.3570 +      // add continuation to work list instead of parsing it immediately
  1.3571 +      assert(_last && _last->as_BlockEnd(), "");
  1.3572 +      scope_data()->parent()->add_to_work_list(continuation());
  1.3573 +      _skip_block = true;
  1.3574 +    }
  1.3575 +  }
  1.3576 +
  1.3577 +  // Fill the exception handler for synchronized methods with instructions
  1.3578 +  if (callee->is_synchronized() && sync_handler->state() != NULL) {
  1.3579 +    fill_sync_handler(lock, sync_handler);
  1.3580 +  } else {
  1.3581 +    pop_scope();
  1.3582 +  }
  1.3583 +
  1.3584 +  compilation()->notice_inlined_method(callee);
  1.3585 +
  1.3586 +  return true;
  1.3587 +}
  1.3588 +
  1.3589 +
  1.3590 +void GraphBuilder::inline_bailout(const char* msg) {
  1.3591 +  assert(msg != NULL, "inline bailout msg must exist");
  1.3592 +  _inline_bailout_msg = msg;
  1.3593 +}
  1.3594 +
  1.3595 +
  1.3596 +void GraphBuilder::clear_inline_bailout() {
  1.3597 +  _inline_bailout_msg = NULL;
  1.3598 +}
  1.3599 +
  1.3600 +
  1.3601 +void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) {
  1.3602 +  ScopeData* data = new ScopeData(NULL);
  1.3603 +  data->set_scope(scope);
  1.3604 +  data->set_bci2block(bci2block);
  1.3605 +  _scope_data = data;
  1.3606 +  _block = start;
  1.3607 +}
  1.3608 +
  1.3609 +
  1.3610 +void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) {
  1.3611 +  IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false);
  1.3612 +  scope()->add_callee(callee_scope);
  1.3613 +
  1.3614 +  BlockListBuilder blb(compilation(), callee_scope, -1);
  1.3615 +  CHECK_BAILOUT();
  1.3616 +
  1.3617 +  if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) {
  1.3618 +    // this scope can be inlined directly into the caller so remove
  1.3619 +    // the block at bci 0.
  1.3620 +    blb.bci2block()->at_put(0, NULL);
  1.3621 +  }
  1.3622 +
  1.3623 +  callee_scope->set_caller_state(state());
  1.3624 +  set_state(state()->push_scope(callee_scope));
  1.3625 +
  1.3626 +  ScopeData* data = new ScopeData(scope_data());
  1.3627 +  data->set_scope(callee_scope);
  1.3628 +  data->set_bci2block(blb.bci2block());
  1.3629 +  data->set_continuation(continuation);
  1.3630 +  _scope_data = data;
  1.3631 +}
  1.3632 +
  1.3633 +
  1.3634 +void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) {
  1.3635 +  ScopeData* data = new ScopeData(scope_data());
  1.3636 +  data->set_parsing_jsr();
  1.3637 +  data->set_jsr_entry_bci(jsr_dest_bci);
  1.3638 +  data->set_jsr_return_address_local(-1);
  1.3639 +  // Must clone bci2block list as we will be mutating it in order to
  1.3640 +  // properly clone all blocks in jsr region as well as exception
  1.3641 +  // handlers containing rets
  1.3642 +  BlockList* new_bci2block = new BlockList(bci2block()->length());
  1.3643 +  new_bci2block->push_all(bci2block());
  1.3644 +  data->set_bci2block(new_bci2block);
  1.3645 +  data->set_scope(scope());
  1.3646 +  data->setup_jsr_xhandlers();
  1.3647 +  data->set_continuation(continuation());
  1.3648 +  if (continuation() != NULL) {
  1.3649 +    assert(continuation_state() != NULL, "");
  1.3650 +    data->set_continuation_state(continuation_state()->copy());
  1.3651 +  }
  1.3652 +  data->set_jsr_continuation(jsr_continuation);
  1.3653 +  _scope_data = data;
  1.3654 +}
  1.3655 +
  1.3656 +
  1.3657 +void GraphBuilder::pop_scope() {
  1.3658 +  int number_of_locks = scope()->number_of_locks();
  1.3659 +  _scope_data = scope_data()->parent();
  1.3660 +  // accumulate minimum number of monitor slots to be reserved
  1.3661 +  scope()->set_min_number_of_locks(number_of_locks);
  1.3662 +}
  1.3663 +
  1.3664 +
  1.3665 +void GraphBuilder::pop_scope_for_jsr() {
  1.3666 +  _scope_data = scope_data()->parent();
  1.3667 +}
  1.3668 +
  1.3669 +bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
  1.3670 +  if (InlineUnsafeOps) {
  1.3671 +    Values* args = state()->pop_arguments(callee->arg_size());
  1.3672 +    null_check(args->at(0));
  1.3673 +    Instruction* offset = args->at(2);
  1.3674 +#ifndef _LP64
  1.3675 +    offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  1.3676 +#endif
  1.3677 +    Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile));
  1.3678 +    push(op->type(), op);
  1.3679 +    compilation()->set_has_unsafe_access(true);
  1.3680 +  }
  1.3681 +  return InlineUnsafeOps;
  1.3682 +}
  1.3683 +
  1.3684 +
  1.3685 +bool GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) {
  1.3686 +  if (InlineUnsafeOps) {
  1.3687 +    Values* args = state()->pop_arguments(callee->arg_size());
  1.3688 +    null_check(args->at(0));
  1.3689 +    Instruction* offset = args->at(2);
  1.3690 +#ifndef _LP64
  1.3691 +    offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  1.3692 +#endif
  1.3693 +    Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile));
  1.3694 +    compilation()->set_has_unsafe_access(true);
  1.3695 +    kill_all();
  1.3696 +  }
  1.3697 +  return InlineUnsafeOps;
  1.3698 +}
  1.3699 +
  1.3700 +
  1.3701 +bool GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) {
  1.3702 +  if (InlineUnsafeOps) {
  1.3703 +    Values* args = state()->pop_arguments(callee->arg_size());
  1.3704 +    null_check(args->at(0));
  1.3705 +    Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false));
  1.3706 +    push(op->type(), op);
  1.3707 +    compilation()->set_has_unsafe_access(true);
  1.3708 +  }
  1.3709 +  return InlineUnsafeOps;
  1.3710 +}
  1.3711 +
  1.3712 +
  1.3713 +bool GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) {
  1.3714 +  if (InlineUnsafeOps) {
  1.3715 +    Values* args = state()->pop_arguments(callee->arg_size());
  1.3716 +    null_check(args->at(0));
  1.3717 +    Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2)));
  1.3718 +    compilation()->set_has_unsafe_access(true);
  1.3719 +  }
  1.3720 +  return InlineUnsafeOps;
  1.3721 +}
  1.3722 +
  1.3723 +
  1.3724 +bool GraphBuilder::append_unsafe_prefetch(ciMethod* callee, bool is_static, bool is_store) {
  1.3725 +  if (InlineUnsafeOps) {
  1.3726 +    Values* args = state()->pop_arguments(callee->arg_size());
  1.3727 +    int obj_arg_index = 1; // Assume non-static case
  1.3728 +    if (is_static) {
  1.3729 +      obj_arg_index = 0;
  1.3730 +    } else {
  1.3731 +      null_check(args->at(0));
  1.3732 +    }
  1.3733 +    Instruction* offset = args->at(obj_arg_index + 1);
  1.3734 +#ifndef _LP64
  1.3735 +    offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  1.3736 +#endif
  1.3737 +    Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset))
  1.3738 +                               : append(new UnsafePrefetchRead (args->at(obj_arg_index), offset));
  1.3739 +    compilation()->set_has_unsafe_access(true);
  1.3740 +  }
  1.3741 +  return InlineUnsafeOps;
  1.3742 +}
  1.3743 +
  1.3744 +
  1.3745 +void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
  1.3746 +  ValueType* result_type = as_ValueType(callee->return_type());
  1.3747 +  assert(result_type->is_int(), "int result");
  1.3748 +  Values* args = state()->pop_arguments(callee->arg_size());
  1.3749 +
  1.3750 +  // Pop off some args to speically handle, then push back
  1.3751 +  Value newval = args->pop();
  1.3752 +  Value cmpval = args->pop();
  1.3753 +  Value offset = args->pop();
  1.3754 +  Value src = args->pop();
  1.3755 +  Value unsafe_obj = args->pop();
  1.3756 +
  1.3757 +  // Separately handle the unsafe arg. It is not needed for code
  1.3758 +  // generation, but must be null checked
  1.3759 +  null_check(unsafe_obj);
  1.3760 +
  1.3761 +#ifndef _LP64
  1.3762 +  offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  1.3763 +#endif
  1.3764 +
  1.3765 +  args->push(src);
  1.3766 +  args->push(offset);
  1.3767 +  args->push(cmpval);
  1.3768 +  args->push(newval);
  1.3769 +
  1.3770 +  // An unsafe CAS can alias with other field accesses, but we don't
  1.3771 +  // know which ones so mark the state as no preserved.  This will
  1.3772 +  // cause CSE to invalidate memory across it.
  1.3773 +  bool preserves_state = false;
  1.3774 +  Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, lock_stack(), preserves_state);
  1.3775 +  append_split(result);
  1.3776 +  push(result_type, result);
  1.3777 +  compilation()->set_has_unsafe_access(true);
  1.3778 +}
  1.3779 +
  1.3780 +
  1.3781 +#ifndef PRODUCT
  1.3782 +void GraphBuilder::print_inline_result(ciMethod* callee, bool res) {
  1.3783 +  const char sync_char      = callee->is_synchronized()        ? 's' : ' ';
  1.3784 +  const char exception_char = callee->has_exception_handlers() ? '!' : ' ';
  1.3785 +  const char monitors_char  = callee->has_monitor_bytecodes()  ? 'm' : ' ';
  1.3786 +  tty->print("     %c%c%c ", sync_char, exception_char, monitors_char);
  1.3787 +  for (int i = 0; i < scope()->level(); i++) tty->print("  ");
  1.3788 +  if (res) {
  1.3789 +    tty->print("  ");
  1.3790 +  } else {
  1.3791 +    tty->print("- ");
  1.3792 +  }
  1.3793 +  tty->print("@ %d  ", bci());
  1.3794 +  callee->print_short_name();
  1.3795 +  tty->print(" (%d bytes)", callee->code_size());
  1.3796 +  if (_inline_bailout_msg) {
  1.3797 +    tty->print("  %s", _inline_bailout_msg);
  1.3798 +  }
  1.3799 +  tty->cr();
  1.3800 +
  1.3801 +  if (res && CIPrintMethodCodes) {
  1.3802 +    callee->print_codes();
  1.3803 +  }
  1.3804 +}
  1.3805 +
  1.3806 +
  1.3807 +void GraphBuilder::print_stats() {
  1.3808 +  vmap()->print();
  1.3809 +}
  1.3810 +#endif // PRODUCT
  1.3811 +
  1.3812 +
  1.3813 +void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
  1.3814 +  append(new ProfileCall(method(), bci(), recv, known_holder));
  1.3815 +}
  1.3816 +
  1.3817 +
  1.3818 +void GraphBuilder::profile_invocation(ciMethod* callee) {
  1.3819 +  if (profile_calls()) {
  1.3820 +    // increment the interpreter_invocation_count for the inlinee
  1.3821 +    Value m = append(new Constant(new ObjectConstant(callee)));
  1.3822 +    append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1));
  1.3823 +  }
  1.3824 +}
  1.3825 +
  1.3826 +
  1.3827 +void GraphBuilder::profile_bci(int bci) {
  1.3828 +  if (profile_branches()) {
  1.3829 +    ciMethodData* md = method()->method_data();
  1.3830 +    if (md == NULL) {
  1.3831 +      BAILOUT("out of memory building methodDataOop");
  1.3832 +    }
  1.3833 +    ciProfileData* data = md->bci_to_data(bci);
  1.3834 +    assert(data != NULL && data->is_JumpData(), "need JumpData for goto");
  1.3835 +    Value mdo = append(new Constant(new ObjectConstant(md)));
  1.3836 +    append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1));
  1.3837 +  }
  1.3838 +}

mercurial