src/share/vm/c1/c1_GraphBuilder.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2658
c7f3d0b4570f
child 2784
92add02409c9
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "c1/c1_CFGPrinter.hpp"
    27 #include "c1/c1_Canonicalizer.hpp"
    28 #include "c1/c1_Compilation.hpp"
    29 #include "c1/c1_GraphBuilder.hpp"
    30 #include "c1/c1_InstructionPrinter.hpp"
    31 #include "ci/ciField.hpp"
    32 #include "ci/ciKlass.hpp"
    33 #include "interpreter/bytecode.hpp"
    34 #include "runtime/sharedRuntime.hpp"
    35 #include "utilities/bitMap.inline.hpp"
    37 class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
    38  private:
    39   Compilation* _compilation;
    40   IRScope*     _scope;
    42   BlockList    _blocks;                // internal list of all blocks
    43   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
    45   // fields used by mark_loops
    46   BitMap       _active;                // for iteration of control flow graph
    47   BitMap       _visited;               // for iteration of control flow graph
    48   intArray     _loop_map;              // caches the information if a block is contained in a loop
    49   int          _next_loop_index;       // next free loop number
    50   int          _next_block_number;     // for reverse postorder numbering of blocks
    52   // accessors
    53   Compilation*  compilation() const              { return _compilation; }
    54   IRScope*      scope() const                    { return _scope; }
    55   ciMethod*     method() const                   { return scope()->method(); }
    56   XHandlers*    xhandlers() const                { return scope()->xhandlers(); }
    58   // unified bailout support
    59   void          bailout(const char* msg) const   { compilation()->bailout(msg); }
    60   bool          bailed_out() const               { return compilation()->bailed_out(); }
    62   // helper functions
    63   BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
    64   void handle_exceptions(BlockBegin* current, int cur_bci);
    65   void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
    66   void store_one(BlockBegin* current, int local);
    67   void store_two(BlockBegin* current, int local);
    68   void set_entries(int osr_bci);
    69   void set_leaders();
    71   void make_loop_header(BlockBegin* block);
    72   void mark_loops();
    73   int  mark_loops(BlockBegin* b, bool in_subroutine);
    75   // debugging
    76 #ifndef PRODUCT
    77   void print();
    78 #endif
    80  public:
    81   // creation
    82   BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
    84   // accessors for GraphBuilder
    85   BlockList*    bci2block() const                { return _bci2block; }
    86 };
    89 // Implementation of BlockListBuilder
    91 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
    92  : _compilation(compilation)
    93  , _scope(scope)
    94  , _blocks(16)
    95  , _bci2block(new BlockList(scope->method()->code_size(), NULL))
    96  , _next_block_number(0)
    97  , _active()         // size not known yet
    98  , _visited()        // size not known yet
    99  , _next_loop_index(0)
   100  , _loop_map() // size not known yet
   101 {
   102   set_entries(osr_bci);
   103   set_leaders();
   104   CHECK_BAILOUT();
   106   mark_loops();
   107   NOT_PRODUCT(if (PrintInitialBlockList) print());
   109 #ifndef PRODUCT
   110   if (PrintCFGToFile) {
   111     stringStream title;
   112     title.print("BlockListBuilder ");
   113     scope->method()->print_name(&title);
   114     CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false);
   115   }
   116 #endif
   117 }
   120 void BlockListBuilder::set_entries(int osr_bci) {
   121   // generate start blocks
   122   BlockBegin* std_entry = make_block_at(0, NULL);
   123   if (scope()->caller() == NULL) {
   124     std_entry->set(BlockBegin::std_entry_flag);
   125   }
   126   if (osr_bci != -1) {
   127     BlockBegin* osr_entry = make_block_at(osr_bci, NULL);
   128     osr_entry->set(BlockBegin::osr_entry_flag);
   129   }
   131   // generate exception entry blocks
   132   XHandlers* list = xhandlers();
   133   const int n = list->length();
   134   for (int i = 0; i < n; i++) {
   135     XHandler* h = list->handler_at(i);
   136     BlockBegin* entry = make_block_at(h->handler_bci(), NULL);
   137     entry->set(BlockBegin::exception_entry_flag);
   138     h->set_entry_block(entry);
   139   }
   140 }
   143 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
   144   assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
   146   BlockBegin* block = _bci2block->at(cur_bci);
   147   if (block == NULL) {
   148     block = new BlockBegin(cur_bci);
   149     block->init_stores_to_locals(method()->max_locals());
   150     _bci2block->at_put(cur_bci, block);
   151     _blocks.append(block);
   153     assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
   154   }
   156   if (predecessor != NULL) {
   157     if (block->is_set(BlockBegin::exception_entry_flag)) {
   158       BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
   159     }
   161     predecessor->add_successor(block);
   162     block->increment_total_preds();
   163   }
   165   return block;
   166 }
   169 inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
   170   current->stores_to_locals().set_bit(local);
   171 }
   172 inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
   173   store_one(current, local);
   174   store_one(current, local + 1);
   175 }
   178 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
   179   // Draws edges from a block to its exception handlers
   180   XHandlers* list = xhandlers();
   181   const int n = list->length();
   183   for (int i = 0; i < n; i++) {
   184     XHandler* h = list->handler_at(i);
   186     if (h->covers(cur_bci)) {
   187       BlockBegin* entry = h->entry_block();
   188       assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set");
   189       assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
   191       // add each exception handler only once
   192       if (!current->is_successor(entry)) {
   193         current->add_successor(entry);
   194         entry->increment_total_preds();
   195       }
   197       // stop when reaching catchall
   198       if (h->catch_type() == 0) break;
   199     }
   200   }
   201 }
   203 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
   204   // start a new block after jsr-bytecode and link this block into cfg
   205   make_block_at(next_bci, current);
   207   // start a new block at the subroutine entry at mark it with special flag
   208   BlockBegin* sr_block = make_block_at(sr_bci, current);
   209   if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
   210     sr_block->set(BlockBegin::subroutine_entry_flag);
   211   }
   212 }
   215 void BlockListBuilder::set_leaders() {
   216   bool has_xhandlers = xhandlers()->has_handlers();
   217   BlockBegin* current = NULL;
   219   // The information which bci starts a new block simplifies the analysis
   220   // Without it, backward branches could jump to a bci where no block was created
   221   // during bytecode iteration. This would require the creation of a new block at the
   222   // branch target and a modification of the successor lists.
   223   BitMap bci_block_start = method()->bci_block_start();
   225   ciBytecodeStream s(method());
   226   while (s.next() != ciBytecodeStream::EOBC()) {
   227     int cur_bci = s.cur_bci();
   229     if (bci_block_start.at(cur_bci)) {
   230       current = make_block_at(cur_bci, current);
   231     }
   232     assert(current != NULL, "must have current block");
   234     if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
   235       handle_exceptions(current, cur_bci);
   236     }
   238     switch (s.cur_bc()) {
   239       // track stores to local variables for selective creation of phi functions
   240       case Bytecodes::_iinc:     store_one(current, s.get_index()); break;
   241       case Bytecodes::_istore:   store_one(current, s.get_index()); break;
   242       case Bytecodes::_lstore:   store_two(current, s.get_index()); break;
   243       case Bytecodes::_fstore:   store_one(current, s.get_index()); break;
   244       case Bytecodes::_dstore:   store_two(current, s.get_index()); break;
   245       case Bytecodes::_astore:   store_one(current, s.get_index()); break;
   246       case Bytecodes::_istore_0: store_one(current, 0); break;
   247       case Bytecodes::_istore_1: store_one(current, 1); break;
   248       case Bytecodes::_istore_2: store_one(current, 2); break;
   249       case Bytecodes::_istore_3: store_one(current, 3); break;
   250       case Bytecodes::_lstore_0: store_two(current, 0); break;
   251       case Bytecodes::_lstore_1: store_two(current, 1); break;
   252       case Bytecodes::_lstore_2: store_two(current, 2); break;
   253       case Bytecodes::_lstore_3: store_two(current, 3); break;
   254       case Bytecodes::_fstore_0: store_one(current, 0); break;
   255       case Bytecodes::_fstore_1: store_one(current, 1); break;
   256       case Bytecodes::_fstore_2: store_one(current, 2); break;
   257       case Bytecodes::_fstore_3: store_one(current, 3); break;
   258       case Bytecodes::_dstore_0: store_two(current, 0); break;
   259       case Bytecodes::_dstore_1: store_two(current, 1); break;
   260       case Bytecodes::_dstore_2: store_two(current, 2); break;
   261       case Bytecodes::_dstore_3: store_two(current, 3); break;
   262       case Bytecodes::_astore_0: store_one(current, 0); break;
   263       case Bytecodes::_astore_1: store_one(current, 1); break;
   264       case Bytecodes::_astore_2: store_one(current, 2); break;
   265       case Bytecodes::_astore_3: store_one(current, 3); break;
   267       // track bytecodes that affect the control flow
   268       case Bytecodes::_athrow:  // fall through
   269       case Bytecodes::_ret:     // fall through
   270       case Bytecodes::_ireturn: // fall through
   271       case Bytecodes::_lreturn: // fall through
   272       case Bytecodes::_freturn: // fall through
   273       case Bytecodes::_dreturn: // fall through
   274       case Bytecodes::_areturn: // fall through
   275       case Bytecodes::_return:
   276         current = NULL;
   277         break;
   279       case Bytecodes::_ifeq:      // fall through
   280       case Bytecodes::_ifne:      // fall through
   281       case Bytecodes::_iflt:      // fall through
   282       case Bytecodes::_ifge:      // fall through
   283       case Bytecodes::_ifgt:      // fall through
   284       case Bytecodes::_ifle:      // fall through
   285       case Bytecodes::_if_icmpeq: // fall through
   286       case Bytecodes::_if_icmpne: // fall through
   287       case Bytecodes::_if_icmplt: // fall through
   288       case Bytecodes::_if_icmpge: // fall through
   289       case Bytecodes::_if_icmpgt: // fall through
   290       case Bytecodes::_if_icmple: // fall through
   291       case Bytecodes::_if_acmpeq: // fall through
   292       case Bytecodes::_if_acmpne: // fall through
   293       case Bytecodes::_ifnull:    // fall through
   294       case Bytecodes::_ifnonnull:
   295         make_block_at(s.next_bci(), current);
   296         make_block_at(s.get_dest(), current);
   297         current = NULL;
   298         break;
   300       case Bytecodes::_goto:
   301         make_block_at(s.get_dest(), current);
   302         current = NULL;
   303         break;
   305       case Bytecodes::_goto_w:
   306         make_block_at(s.get_far_dest(), current);
   307         current = NULL;
   308         break;
   310       case Bytecodes::_jsr:
   311         handle_jsr(current, s.get_dest(), s.next_bci());
   312         current = NULL;
   313         break;
   315       case Bytecodes::_jsr_w:
   316         handle_jsr(current, s.get_far_dest(), s.next_bci());
   317         current = NULL;
   318         break;
   320       case Bytecodes::_tableswitch: {
   321         // set block for each case
   322         Bytecode_tableswitch sw(&s);
   323         int l = sw.length();
   324         for (int i = 0; i < l; i++) {
   325           make_block_at(cur_bci + sw.dest_offset_at(i), current);
   326         }
   327         make_block_at(cur_bci + sw.default_offset(), current);
   328         current = NULL;
   329         break;
   330       }
   332       case Bytecodes::_lookupswitch: {
   333         // set block for each case
   334         Bytecode_lookupswitch sw(&s);
   335         int l = sw.number_of_pairs();
   336         for (int i = 0; i < l; i++) {
   337           make_block_at(cur_bci + sw.pair_at(i).offset(), current);
   338         }
   339         make_block_at(cur_bci + sw.default_offset(), current);
   340         current = NULL;
   341         break;
   342       }
   343     }
   344   }
   345 }
   348 void BlockListBuilder::mark_loops() {
   349   ResourceMark rm;
   351   _active = BitMap(BlockBegin::number_of_blocks());         _active.clear();
   352   _visited = BitMap(BlockBegin::number_of_blocks());        _visited.clear();
   353   _loop_map = intArray(BlockBegin::number_of_blocks(), 0);
   354   _next_loop_index = 0;
   355   _next_block_number = _blocks.length();
   357   // recursively iterate the control flow graph
   358   mark_loops(_bci2block->at(0), false);
   359   assert(_next_block_number >= 0, "invalid block numbers");
   360 }
   362 void BlockListBuilder::make_loop_header(BlockBegin* block) {
   363   if (block->is_set(BlockBegin::exception_entry_flag)) {
   364     // exception edges may look like loops but don't mark them as such
   365     // since it screws up block ordering.
   366     return;
   367   }
   368   if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
   369     block->set(BlockBegin::parser_loop_header_flag);
   371     assert(_loop_map.at(block->block_id()) == 0, "must not be set yet");
   372     assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer");
   373     _loop_map.at_put(block->block_id(), 1 << _next_loop_index);
   374     if (_next_loop_index < 31) _next_loop_index++;
   375   } else {
   376     // block already marked as loop header
   377     assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
   378   }
   379 }
   381 int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
   382   int block_id = block->block_id();
   384   if (_visited.at(block_id)) {
   385     if (_active.at(block_id)) {
   386       // reached block via backward branch
   387       make_loop_header(block);
   388     }
   389     // return cached loop information for this block
   390     return _loop_map.at(block_id);
   391   }
   393   if (block->is_set(BlockBegin::subroutine_entry_flag)) {
   394     in_subroutine = true;
   395   }
   397   // set active and visited bits before successors are processed
   398   _visited.set_bit(block_id);
   399   _active.set_bit(block_id);
   401   intptr_t loop_state = 0;
   402   for (int i = block->number_of_sux() - 1; i >= 0; i--) {
   403     // recursively process all successors
   404     loop_state |= mark_loops(block->sux_at(i), in_subroutine);
   405   }
   407   // clear active-bit after all successors are processed
   408   _active.clear_bit(block_id);
   410   // reverse-post-order numbering of all blocks
   411   block->set_depth_first_number(_next_block_number);
   412   _next_block_number--;
   414   if (loop_state != 0 || in_subroutine ) {
   415     // block is contained at least in one loop, so phi functions are necessary
   416     // phi functions are also necessary for all locals stored in a subroutine
   417     scope()->requires_phi_function().set_union(block->stores_to_locals());
   418   }
   420   if (block->is_set(BlockBegin::parser_loop_header_flag)) {
   421     int header_loop_state = _loop_map.at(block_id);
   422     assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set");
   424     // If the highest bit is set (i.e. when integer value is negative), the method
   425     // has 32 or more loops. This bit is never cleared because it is used for multiple loops
   426     if (header_loop_state >= 0) {
   427       clear_bits(loop_state, header_loop_state);
   428     }
   429   }
   431   // cache and return loop information for this block
   432   _loop_map.at_put(block_id, loop_state);
   433   return loop_state;
   434 }
   437 #ifndef PRODUCT
   439 int compare_depth_first(BlockBegin** a, BlockBegin** b) {
   440   return (*a)->depth_first_number() - (*b)->depth_first_number();
   441 }
   443 void BlockListBuilder::print() {
   444   tty->print("----- initial block list of BlockListBuilder for method ");
   445   method()->print_short_name();
   446   tty->cr();
   448   // better readability if blocks are sorted in processing order
   449   _blocks.sort(compare_depth_first);
   451   for (int i = 0; i < _blocks.length(); i++) {
   452     BlockBegin* cur = _blocks.at(i);
   453     tty->print("%4d: B%-4d bci: %-4d  preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
   455     tty->print(cur->is_set(BlockBegin::std_entry_flag)               ? " std" : "    ");
   456     tty->print(cur->is_set(BlockBegin::osr_entry_flag)               ? " osr" : "    ");
   457     tty->print(cur->is_set(BlockBegin::exception_entry_flag)         ? " ex" : "   ");
   458     tty->print(cur->is_set(BlockBegin::subroutine_entry_flag)        ? " sr" : "   ");
   459     tty->print(cur->is_set(BlockBegin::parser_loop_header_flag)      ? " lh" : "   ");
   461     if (cur->number_of_sux() > 0) {
   462       tty->print("    sux: ");
   463       for (int j = 0; j < cur->number_of_sux(); j++) {
   464         BlockBegin* sux = cur->sux_at(j);
   465         tty->print("B%d ", sux->block_id());
   466       }
   467     }
   468     tty->cr();
   469   }
   470 }
   472 #endif
   475 // A simple growable array of Values indexed by ciFields
   476 class FieldBuffer: public CompilationResourceObj {
   477  private:
   478   GrowableArray<Value> _values;
   480  public:
   481   FieldBuffer() {}
   483   void kill() {
   484     _values.trunc_to(0);
   485   }
   487   Value at(ciField* field) {
   488     assert(field->holder()->is_loaded(), "must be a loaded field");
   489     int offset = field->offset();
   490     if (offset < _values.length()) {
   491       return _values.at(offset);
   492     } else {
   493       return NULL;
   494     }
   495   }
   497   void at_put(ciField* field, Value value) {
   498     assert(field->holder()->is_loaded(), "must be a loaded field");
   499     int offset = field->offset();
   500     _values.at_put_grow(offset, value, NULL);
   501   }
   503 };
   506 // MemoryBuffer is fairly simple model of the current state of memory.
   507 // It partitions memory into several pieces.  The first piece is
   508 // generic memory where little is known about the owner of the memory.
   509 // This is conceptually represented by the tuple <O, F, V> which says
   510 // that the field F of object O has value V.  This is flattened so
   511 // that F is represented by the offset of the field and the parallel
   512 // arrays _objects and _values are used for O and V.  Loads of O.F can
   513 // simply use V.  Newly allocated objects are kept in a separate list
   514 // along with a parallel array for each object which represents the
   515 // current value of its fields.  Stores of the default value to fields
   516 // which have never been stored to before are eliminated since they
   517 // are redundant.  Once newly allocated objects are stored into
   518 // another object or they are passed out of the current compile they
   519 // are treated like generic memory.
   521 class MemoryBuffer: public CompilationResourceObj {
   522  private:
   523   FieldBuffer                 _values;
   524   GrowableArray<Value>        _objects;
   525   GrowableArray<Value>        _newobjects;
   526   GrowableArray<FieldBuffer*> _fields;
   528  public:
   529   MemoryBuffer() {}
   531   StoreField* store(StoreField* st) {
   532     if (!EliminateFieldAccess) {
   533       return st;
   534     }
   536     Value object = st->obj();
   537     Value value = st->value();
   538     ciField* field = st->field();
   539     if (field->holder()->is_loaded()) {
   540       int offset = field->offset();
   541       int index = _newobjects.find(object);
   542       if (index != -1) {
   543         // newly allocated object with no other stores performed on this field
   544         FieldBuffer* buf = _fields.at(index);
   545         if (buf->at(field) == NULL && is_default_value(value)) {
   546 #ifndef PRODUCT
   547           if (PrintIRDuringConstruction && Verbose) {
   548             tty->print_cr("Eliminated store for object %d:", index);
   549             st->print_line();
   550           }
   551 #endif
   552           return NULL;
   553         } else {
   554           buf->at_put(field, value);
   555         }
   556       } else {
   557         _objects.at_put_grow(offset, object, NULL);
   558         _values.at_put(field, value);
   559       }
   561       store_value(value);
   562     } else {
   563       // if we held onto field names we could alias based on names but
   564       // we don't know what's being stored to so kill it all.
   565       kill();
   566     }
   567     return st;
   568   }
   571   // return true if this value correspond to the default value of a field.
   572   bool is_default_value(Value value) {
   573     Constant* con = value->as_Constant();
   574     if (con) {
   575       switch (con->type()->tag()) {
   576         case intTag:    return con->type()->as_IntConstant()->value() == 0;
   577         case longTag:   return con->type()->as_LongConstant()->value() == 0;
   578         case floatTag:  return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
   579         case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
   580         case objectTag: return con->type() == objectNull;
   581         default:  ShouldNotReachHere();
   582       }
   583     }
   584     return false;
   585   }
   588   // return either the actual value of a load or the load itself
   589   Value load(LoadField* load) {
   590     if (!EliminateFieldAccess) {
   591       return load;
   592     }
   594     if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) {
   595       // can't skip load since value might get rounded as a side effect
   596       return load;
   597     }
   599     ciField* field = load->field();
   600     Value object   = load->obj();
   601     if (field->holder()->is_loaded() && !field->is_volatile()) {
   602       int offset = field->offset();
   603       Value result = NULL;
   604       int index = _newobjects.find(object);
   605       if (index != -1) {
   606         result = _fields.at(index)->at(field);
   607       } else if (_objects.at_grow(offset, NULL) == object) {
   608         result = _values.at(field);
   609       }
   610       if (result != NULL) {
   611 #ifndef PRODUCT
   612         if (PrintIRDuringConstruction && Verbose) {
   613           tty->print_cr("Eliminated load: ");
   614           load->print_line();
   615         }
   616 #endif
   617         assert(result->type()->tag() == load->type()->tag(), "wrong types");
   618         return result;
   619       }
   620     }
   621     return load;
   622   }
   624   // Record this newly allocated object
   625   void new_instance(NewInstance* object) {
   626     int index = _newobjects.length();
   627     _newobjects.append(object);
   628     if (_fields.at_grow(index, NULL) == NULL) {
   629       _fields.at_put(index, new FieldBuffer());
   630     } else {
   631       _fields.at(index)->kill();
   632     }
   633   }
   635   void store_value(Value value) {
   636     int index = _newobjects.find(value);
   637     if (index != -1) {
   638       // stored a newly allocated object into another object.
   639       // Assume we've lost track of it as separate slice of memory.
   640       // We could do better by keeping track of whether individual
   641       // fields could alias each other.
   642       _newobjects.remove_at(index);
   643       // pull out the field info and store it at the end up the list
   644       // of field info list to be reused later.
   645       _fields.append(_fields.at(index));
   646       _fields.remove_at(index);
   647     }
   648   }
   650   void kill() {
   651     _newobjects.trunc_to(0);
   652     _objects.trunc_to(0);
   653     _values.kill();
   654   }
   655 };
   658 // Implementation of GraphBuilder's ScopeData
   660 GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
   661   : _parent(parent)
   662   , _bci2block(NULL)
   663   , _scope(NULL)
   664   , _has_handler(false)
   665   , _stream(NULL)
   666   , _work_list(NULL)
   667   , _parsing_jsr(false)
   668   , _jsr_xhandlers(NULL)
   669   , _caller_stack_size(-1)
   670   , _continuation(NULL)
   671   , _num_returns(0)
   672   , _cleanup_block(NULL)
   673   , _cleanup_return_prev(NULL)
   674   , _cleanup_state(NULL)
   675 {
   676   if (parent != NULL) {
   677     _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
   678   } else {
   679     _max_inline_size = MaxInlineSize;
   680   }
   681   if (_max_inline_size < MaxTrivialSize) {
   682     _max_inline_size = MaxTrivialSize;
   683   }
   684 }
   687 void GraphBuilder::kill_all() {
   688   if (UseLocalValueNumbering) {
   689     vmap()->kill_all();
   690   }
   691   _memory->kill();
   692 }
   695 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
   696   if (parsing_jsr()) {
   697     // It is necessary to clone all blocks associated with a
   698     // subroutine, including those for exception handlers in the scope
   699     // of the method containing the jsr (because those exception
   700     // handlers may contain ret instructions in some cases).
   701     BlockBegin* block = bci2block()->at(bci);
   702     if (block != NULL && block == parent()->bci2block()->at(bci)) {
   703       BlockBegin* new_block = new BlockBegin(block->bci());
   704 #ifndef PRODUCT
   705       if (PrintInitialBlockList) {
   706         tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
   707                       block->block_id(), block->bci(), new_block->block_id());
   708       }
   709 #endif
   710       // copy data from cloned blocked
   711       new_block->set_depth_first_number(block->depth_first_number());
   712       if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
   713       // Preserve certain flags for assertion checking
   714       if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
   715       if (block->is_set(BlockBegin::exception_entry_flag))  new_block->set(BlockBegin::exception_entry_flag);
   717       // copy was_visited_flag to allow early detection of bailouts
   718       // if a block that is used in a jsr has already been visited before,
   719       // it is shared between the normal control flow and a subroutine
   720       // BlockBegin::try_merge returns false when the flag is set, this leads
   721       // to a compilation bailout
   722       if (block->is_set(BlockBegin::was_visited_flag))  new_block->set(BlockBegin::was_visited_flag);
   724       bci2block()->at_put(bci, new_block);
   725       block = new_block;
   726     }
   727     return block;
   728   } else {
   729     return bci2block()->at(bci);
   730   }
   731 }
   734 XHandlers* GraphBuilder::ScopeData::xhandlers() const {
   735   if (_jsr_xhandlers == NULL) {
   736     assert(!parsing_jsr(), "");
   737     return scope()->xhandlers();
   738   }
   739   assert(parsing_jsr(), "");
   740   return _jsr_xhandlers;
   741 }
   744 void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
   745   _scope = scope;
   746   bool parent_has_handler = false;
   747   if (parent() != NULL) {
   748     parent_has_handler = parent()->has_handler();
   749   }
   750   _has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
   751 }
   754 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
   755                                                       Instruction* return_prev,
   756                                                       ValueStack* return_state) {
   757   _cleanup_block       = block;
   758   _cleanup_return_prev = return_prev;
   759   _cleanup_state       = return_state;
   760 }
   763 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
   764   if (_work_list == NULL) {
   765     _work_list = new BlockList();
   766   }
   768   if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
   769     // Do not start parsing the continuation block while in a
   770     // sub-scope
   771     if (parsing_jsr()) {
   772       if (block == jsr_continuation()) {
   773         return;
   774       }
   775     } else {
   776       if (block == continuation()) {
   777         return;
   778       }
   779     }
   780     block->set(BlockBegin::is_on_work_list_flag);
   781     _work_list->push(block);
   783     sort_top_into_worklist(_work_list, block);
   784   }
   785 }
   788 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
   789   assert(worklist->top() == top, "");
   790   // sort block descending into work list
   791   const int dfn = top->depth_first_number();
   792   assert(dfn != -1, "unknown depth first number");
   793   int i = worklist->length()-2;
   794   while (i >= 0) {
   795     BlockBegin* b = worklist->at(i);
   796     if (b->depth_first_number() < dfn) {
   797       worklist->at_put(i+1, b);
   798     } else {
   799       break;
   800     }
   801     i --;
   802   }
   803   if (i >= -1) worklist->at_put(i + 1, top);
   804 }
   807 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
   808   if (is_work_list_empty()) {
   809     return NULL;
   810   }
   811   return _work_list->pop();
   812 }
   815 bool GraphBuilder::ScopeData::is_work_list_empty() const {
   816   return (_work_list == NULL || _work_list->length() == 0);
   817 }
   820 void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
   821   assert(parsing_jsr(), "");
   822   // clone all the exception handlers from the scope
   823   XHandlers* handlers = new XHandlers(scope()->xhandlers());
   824   const int n = handlers->length();
   825   for (int i = 0; i < n; i++) {
   826     // The XHandlers need to be adjusted to dispatch to the cloned
   827     // handler block instead of the default one but the synthetic
   828     // unlocker needs to be handled specially.  The synthetic unlocker
   829     // should be left alone since there can be only one and all code
   830     // should dispatch to the same one.
   831     XHandler* h = handlers->handler_at(i);
   832     assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
   833     h->set_entry_block(block_at(h->handler_bci()));
   834   }
   835   _jsr_xhandlers = handlers;
   836 }
   839 int GraphBuilder::ScopeData::num_returns() {
   840   if (parsing_jsr()) {
   841     return parent()->num_returns();
   842   }
   843   return _num_returns;
   844 }
   847 void GraphBuilder::ScopeData::incr_num_returns() {
   848   if (parsing_jsr()) {
   849     parent()->incr_num_returns();
   850   } else {
   851     ++_num_returns;
   852   }
   853 }
   856 // Implementation of GraphBuilder
   858 #define INLINE_BAILOUT(msg)        { inline_bailout(msg); return false; }
   861 void GraphBuilder::load_constant() {
   862   ciConstant con = stream()->get_constant();
   863   if (con.basic_type() == T_ILLEGAL) {
   864     BAILOUT("could not resolve a constant");
   865   } else {
   866     ValueType* t = illegalType;
   867     ValueStack* patch_state = NULL;
   868     switch (con.basic_type()) {
   869       case T_BOOLEAN: t = new IntConstant     (con.as_boolean()); break;
   870       case T_BYTE   : t = new IntConstant     (con.as_byte   ()); break;
   871       case T_CHAR   : t = new IntConstant     (con.as_char   ()); break;
   872       case T_SHORT  : t = new IntConstant     (con.as_short  ()); break;
   873       case T_INT    : t = new IntConstant     (con.as_int    ()); break;
   874       case T_LONG   : t = new LongConstant    (con.as_long   ()); break;
   875       case T_FLOAT  : t = new FloatConstant   (con.as_float  ()); break;
   876       case T_DOUBLE : t = new DoubleConstant  (con.as_double ()); break;
   877       case T_ARRAY  : t = new ArrayConstant   (con.as_object ()->as_array   ()); break;
   878       case T_OBJECT :
   879        {
   880         ciObject* obj = con.as_object();
   881         if (!obj->is_loaded()
   882             || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
   883           patch_state = copy_state_before();
   884           t = new ObjectConstant(obj);
   885         } else {
   886           assert(!obj->is_klass(), "must be java_mirror of klass");
   887           t = new InstanceConstant(obj->as_instance());
   888         }
   889         break;
   890        }
   891       default       : ShouldNotReachHere();
   892     }
   893     Value x;
   894     if (patch_state != NULL) {
   895       x = new Constant(t, patch_state);
   896     } else {
   897       x = new Constant(t);
   898     }
   899     push(t, append(x));
   900   }
   901 }
   904 void GraphBuilder::load_local(ValueType* type, int index) {
   905   Value x = state()->local_at(index);
   906   assert(x != NULL && !x->type()->is_illegal(), "access of illegal local variable");
   907   push(type, x);
   908 }
   911 void GraphBuilder::store_local(ValueType* type, int index) {
   912   Value x = pop(type);
   913   store_local(state(), x, type, index);
   914 }
   917 void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) {
   918   if (parsing_jsr()) {
   919     // We need to do additional tracking of the location of the return
   920     // address for jsrs since we don't handle arbitrary jsr/ret
   921     // constructs. Here we are figuring out in which circumstances we
   922     // need to bail out.
   923     if (x->type()->is_address()) {
   924       scope_data()->set_jsr_return_address_local(index);
   926       // Also check parent jsrs (if any) at this time to see whether
   927       // they are using this local. We don't handle skipping over a
   928       // ret.
   929       for (ScopeData* cur_scope_data = scope_data()->parent();
   930            cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
   931            cur_scope_data = cur_scope_data->parent()) {
   932         if (cur_scope_data->jsr_return_address_local() == index) {
   933           BAILOUT("subroutine overwrites return address from previous subroutine");
   934         }
   935       }
   936     } else if (index == scope_data()->jsr_return_address_local()) {
   937       scope_data()->set_jsr_return_address_local(-1);
   938     }
   939   }
   941   state->store_local(index, round_fp(x));
   942 }
   945 void GraphBuilder::load_indexed(BasicType type) {
   946   ValueStack* state_before = copy_state_for_exception();
   947   Value index = ipop();
   948   Value array = apop();
   949   Value length = NULL;
   950   if (CSEArrayLength ||
   951       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
   952       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
   953     length = append(new ArrayLength(array, state_before));
   954   }
   955   push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));
   956 }
   959 void GraphBuilder::store_indexed(BasicType type) {
   960   ValueStack* state_before = copy_state_for_exception();
   961   Value value = pop(as_ValueType(type));
   962   Value index = ipop();
   963   Value array = apop();
   964   Value length = NULL;
   965   if (CSEArrayLength ||
   966       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
   967       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
   968     length = append(new ArrayLength(array, state_before));
   969   }
   970   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before);
   971   append(result);
   972   _memory->store_value(value);
   974   if (type == T_OBJECT && is_profiling()) {
   975     // Note that we'd collect profile data in this method if we wanted it.
   976     compilation()->set_would_profile(true);
   978     if (profile_checkcasts()) {
   979       result->set_profiled_method(method());
   980       result->set_profiled_bci(bci());
   981       result->set_should_profile(true);
   982     }
   983   }
   984 }
   987 void GraphBuilder::stack_op(Bytecodes::Code code) {
   988   switch (code) {
   989     case Bytecodes::_pop:
   990       { state()->raw_pop();
   991       }
   992       break;
   993     case Bytecodes::_pop2:
   994       { state()->raw_pop();
   995         state()->raw_pop();
   996       }
   997       break;
   998     case Bytecodes::_dup:
   999       { Value w = state()->raw_pop();
  1000         state()->raw_push(w);
  1001         state()->raw_push(w);
  1003       break;
  1004     case Bytecodes::_dup_x1:
  1005       { Value w1 = state()->raw_pop();
  1006         Value w2 = state()->raw_pop();
  1007         state()->raw_push(w1);
  1008         state()->raw_push(w2);
  1009         state()->raw_push(w1);
  1011       break;
  1012     case Bytecodes::_dup_x2:
  1013       { Value w1 = state()->raw_pop();
  1014         Value w2 = state()->raw_pop();
  1015         Value w3 = state()->raw_pop();
  1016         state()->raw_push(w1);
  1017         state()->raw_push(w3);
  1018         state()->raw_push(w2);
  1019         state()->raw_push(w1);
  1021       break;
  1022     case Bytecodes::_dup2:
  1023       { Value w1 = state()->raw_pop();
  1024         Value w2 = state()->raw_pop();
  1025         state()->raw_push(w2);
  1026         state()->raw_push(w1);
  1027         state()->raw_push(w2);
  1028         state()->raw_push(w1);
  1030       break;
  1031     case Bytecodes::_dup2_x1:
  1032       { Value w1 = state()->raw_pop();
  1033         Value w2 = state()->raw_pop();
  1034         Value w3 = state()->raw_pop();
  1035         state()->raw_push(w2);
  1036         state()->raw_push(w1);
  1037         state()->raw_push(w3);
  1038         state()->raw_push(w2);
  1039         state()->raw_push(w1);
  1041       break;
  1042     case Bytecodes::_dup2_x2:
  1043       { Value w1 = state()->raw_pop();
  1044         Value w2 = state()->raw_pop();
  1045         Value w3 = state()->raw_pop();
  1046         Value w4 = state()->raw_pop();
  1047         state()->raw_push(w2);
  1048         state()->raw_push(w1);
  1049         state()->raw_push(w4);
  1050         state()->raw_push(w3);
  1051         state()->raw_push(w2);
  1052         state()->raw_push(w1);
  1054       break;
  1055     case Bytecodes::_swap:
  1056       { Value w1 = state()->raw_pop();
  1057         Value w2 = state()->raw_pop();
  1058         state()->raw_push(w1);
  1059         state()->raw_push(w2);
  1061       break;
  1062     default:
  1063       ShouldNotReachHere();
  1064       break;
  1069 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) {
  1070   Value y = pop(type);
  1071   Value x = pop(type);
  1072   // NOTE: strictfp can be queried from current method since we don't
  1073   // inline methods with differing strictfp bits
  1074   Value res = new ArithmeticOp(code, x, y, method()->is_strict(), state_before);
  1075   // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
  1076   res = append(res);
  1077   if (method()->is_strict()) {
  1078     res = round_fp(res);
  1080   push(type, res);
  1084 void GraphBuilder::negate_op(ValueType* type) {
  1085   push(type, append(new NegateOp(pop(type))));
  1089 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
  1090   Value s = ipop();
  1091   Value x = pop(type);
  1092   // try to simplify
  1093   // Note: This code should go into the canonicalizer as soon as it can
  1094   //       can handle canonicalized forms that contain more than one node.
  1095   if (CanonicalizeNodes && code == Bytecodes::_iushr) {
  1096     // pattern: x >>> s
  1097     IntConstant* s1 = s->type()->as_IntConstant();
  1098     if (s1 != NULL) {
  1099       // pattern: x >>> s1, with s1 constant
  1100       ShiftOp* l = x->as_ShiftOp();
  1101       if (l != NULL && l->op() == Bytecodes::_ishl) {
  1102         // pattern: (a << b) >>> s1
  1103         IntConstant* s0 = l->y()->type()->as_IntConstant();
  1104         if (s0 != NULL) {
  1105           // pattern: (a << s0) >>> s1
  1106           const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
  1107           const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
  1108           if (s0c == s1c) {
  1109             if (s0c == 0) {
  1110               // pattern: (a << 0) >>> 0 => simplify to: a
  1111               ipush(l->x());
  1112             } else {
  1113               // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
  1114               assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
  1115               const int m = (1 << (BitsPerInt - s0c)) - 1;
  1116               Value s = append(new Constant(new IntConstant(m)));
  1117               ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
  1119             return;
  1125   // could not simplify
  1126   push(type, append(new ShiftOp(code, x, s)));
  1130 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
  1131   Value y = pop(type);
  1132   Value x = pop(type);
  1133   push(type, append(new LogicOp(code, x, y)));
  1137 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
  1138   ValueStack* state_before = copy_state_before();
  1139   Value y = pop(type);
  1140   Value x = pop(type);
  1141   ipush(append(new CompareOp(code, x, y, state_before)));
  1145 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
  1146   push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
  1150 void GraphBuilder::increment() {
  1151   int index = stream()->get_index();
  1152   int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
  1153   load_local(intType, index);
  1154   ipush(append(new Constant(new IntConstant(delta))));
  1155   arithmetic_op(intType, Bytecodes::_iadd);
  1156   store_local(intType, index);
  1160 void GraphBuilder::_goto(int from_bci, int to_bci) {
  1161   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
  1162   if (is_profiling()) {
  1163     compilation()->set_would_profile(true);
  1165   if (profile_branches()) {
  1166     x->set_profiled_method(method());
  1167     x->set_profiled_bci(bci());
  1168     x->set_should_profile(true);
  1170   append(x);
  1174 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
  1175   BlockBegin* tsux = block_at(stream()->get_dest());
  1176   BlockBegin* fsux = block_at(stream()->next_bci());
  1177   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
  1178   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
  1180   if (is_profiling()) {
  1181     If* if_node = i->as_If();
  1182     if (if_node != NULL) {
  1183       // Note that we'd collect profile data in this method if we wanted it.
  1184       compilation()->set_would_profile(true);
  1185       // At level 2 we need the proper bci to count backedges
  1186       if_node->set_profiled_bci(bci());
  1187       if (profile_branches()) {
  1188         // Successors can be rotated by the canonicalizer, check for this case.
  1189         if_node->set_profiled_method(method());
  1190         if_node->set_should_profile(true);
  1191         if (if_node->tsux() == fsux) {
  1192           if_node->set_swapped(true);
  1195       return;
  1198     // Check if this If was reduced to Goto.
  1199     Goto *goto_node = i->as_Goto();
  1200     if (goto_node != NULL) {
  1201       compilation()->set_would_profile(true);
  1202       if (profile_branches()) {
  1203         goto_node->set_profiled_method(method());
  1204         goto_node->set_profiled_bci(bci());
  1205         goto_node->set_should_profile(true);
  1206         // Find out which successor is used.
  1207         if (goto_node->default_sux() == tsux) {
  1208           goto_node->set_direction(Goto::taken);
  1209         } else if (goto_node->default_sux() == fsux) {
  1210           goto_node->set_direction(Goto::not_taken);
  1211         } else {
  1212           ShouldNotReachHere();
  1215       return;
  1221 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
  1222   Value y = append(new Constant(intZero));
  1223   ValueStack* state_before = copy_state_before();
  1224   Value x = ipop();
  1225   if_node(x, cond, y, state_before);
  1229 void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
  1230   Value y = append(new Constant(objectNull));
  1231   ValueStack* state_before = copy_state_before();
  1232   Value x = apop();
  1233   if_node(x, cond, y, state_before);
  1237 void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
  1238   ValueStack* state_before = copy_state_before();
  1239   Value y = pop(type);
  1240   Value x = pop(type);
  1241   if_node(x, cond, y, state_before);
  1245 void GraphBuilder::jsr(int dest) {
  1246   // We only handle well-formed jsrs (those which are "block-structured").
  1247   // If the bytecodes are strange (jumping out of a jsr block) then we
  1248   // might end up trying to re-parse a block containing a jsr which
  1249   // has already been activated. Watch for this case and bail out.
  1250   for (ScopeData* cur_scope_data = scope_data();
  1251        cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
  1252        cur_scope_data = cur_scope_data->parent()) {
  1253     if (cur_scope_data->jsr_entry_bci() == dest) {
  1254       BAILOUT("too-complicated jsr/ret structure");
  1258   push(addressType, append(new Constant(new AddressConstant(next_bci()))));
  1259   if (!try_inline_jsr(dest)) {
  1260     return; // bailed out while parsing and inlining subroutine
  1265 void GraphBuilder::ret(int local_index) {
  1266   if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
  1268   if (local_index != scope_data()->jsr_return_address_local()) {
  1269     BAILOUT("can not handle complicated jsr/ret constructs");
  1272   // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
  1273   append(new Goto(scope_data()->jsr_continuation(), false));
  1277 void GraphBuilder::table_switch() {
  1278   Bytecode_tableswitch sw(stream());
  1279   const int l = sw.length();
  1280   if (CanonicalizeNodes && l == 1) {
  1281     // total of 2 successors => use If instead of switch
  1282     // Note: This code should go into the canonicalizer as soon as it can
  1283     //       can handle canonicalized forms that contain more than one node.
  1284     Value key = append(new Constant(new IntConstant(sw.low_key())));
  1285     BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
  1286     BlockBegin* fsux = block_at(bci() + sw.default_offset());
  1287     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
  1288     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
  1289     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
  1290   } else {
  1291     // collect successors
  1292     BlockList* sux = new BlockList(l + 1, NULL);
  1293     int i;
  1294     bool has_bb = false;
  1295     for (i = 0; i < l; i++) {
  1296       sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
  1297       if (sw.dest_offset_at(i) < 0) has_bb = true;
  1299     // add default successor
  1300     sux->at_put(i, block_at(bci() + sw.default_offset()));
  1301     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
  1302     append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
  1307 void GraphBuilder::lookup_switch() {
  1308   Bytecode_lookupswitch sw(stream());
  1309   const int l = sw.number_of_pairs();
  1310   if (CanonicalizeNodes && l == 1) {
  1311     // total of 2 successors => use If instead of switch
  1312     // Note: This code should go into the canonicalizer as soon as it can
  1313     //       can handle canonicalized forms that contain more than one node.
  1314     // simplify to If
  1315     LookupswitchPair pair = sw.pair_at(0);
  1316     Value key = append(new Constant(new IntConstant(pair.match())));
  1317     BlockBegin* tsux = block_at(bci() + pair.offset());
  1318     BlockBegin* fsux = block_at(bci() + sw.default_offset());
  1319     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
  1320     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
  1321     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
  1322   } else {
  1323     // collect successors & keys
  1324     BlockList* sux = new BlockList(l + 1, NULL);
  1325     intArray* keys = new intArray(l, 0);
  1326     int i;
  1327     bool has_bb = false;
  1328     for (i = 0; i < l; i++) {
  1329       LookupswitchPair pair = sw.pair_at(i);
  1330       if (pair.offset() < 0) has_bb = true;
  1331       sux->at_put(i, block_at(bci() + pair.offset()));
  1332       keys->at_put(i, pair.match());
  1334     // add default successor
  1335     sux->at_put(i, block_at(bci() + sw.default_offset()));
  1336     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
  1337     append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
  1341 void GraphBuilder::call_register_finalizer() {
  1342   // If the receiver requires finalization then emit code to perform
  1343   // the registration on return.
  1345   // Gather some type information about the receiver
  1346   Value receiver = state()->local_at(0);
  1347   assert(receiver != NULL, "must have a receiver");
  1348   ciType* declared_type = receiver->declared_type();
  1349   ciType* exact_type = receiver->exact_type();
  1350   if (exact_type == NULL &&
  1351       receiver->as_Local() &&
  1352       receiver->as_Local()->java_index() == 0) {
  1353     ciInstanceKlass* ik = compilation()->method()->holder();
  1354     if (ik->is_final()) {
  1355       exact_type = ik;
  1356     } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
  1357       // test class is leaf class
  1358       compilation()->dependency_recorder()->assert_leaf_type(ik);
  1359       exact_type = ik;
  1360     } else {
  1361       declared_type = ik;
  1365   // see if we know statically that registration isn't required
  1366   bool needs_check = true;
  1367   if (exact_type != NULL) {
  1368     needs_check = exact_type->as_instance_klass()->has_finalizer();
  1369   } else if (declared_type != NULL) {
  1370     ciInstanceKlass* ik = declared_type->as_instance_klass();
  1371     if (!Dependencies::has_finalizable_subclass(ik)) {
  1372       compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
  1373       needs_check = false;
  1377   if (needs_check) {
  1378     // Perform the registration of finalizable objects.
  1379     ValueStack* state_before = copy_state_for_exception();
  1380     load_local(objectType, 0);
  1381     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
  1382                                state()->pop_arguments(1),
  1383                                true, state_before, true));
  1388 void GraphBuilder::method_return(Value x) {
  1389   if (RegisterFinalizersAtInit &&
  1390       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
  1391     call_register_finalizer();
  1394   // Check to see whether we are inlining. If so, Return
  1395   // instructions become Gotos to the continuation point.
  1396   if (continuation() != NULL) {
  1397     assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
  1399     if (compilation()->env()->dtrace_method_probes()) {
  1400       // Report exit from inline methods
  1401       Values* args = new Values(1);
  1402       args->push(append(new Constant(new ObjectConstant(method()))));
  1403       append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
  1406     // If the inlined method is synchronized, the monitor must be
  1407     // released before we jump to the continuation block.
  1408     if (method()->is_synchronized()) {
  1409       assert(state()->locks_size() == 1, "receiver must be locked here");
  1410       monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
  1413     // State at end of inlined method is the state of the caller
  1414     // without the method parameters on stack, including the
  1415     // return value, if any, of the inlined method on operand stack.
  1416     set_state(state()->caller_state()->copy_for_parsing());
  1417     if (x != NULL) {
  1418       state()->push(x->type(), x);
  1420     Goto* goto_callee = new Goto(continuation(), false);
  1422     // See whether this is the first return; if so, store off some
  1423     // of the state for later examination
  1424     if (num_returns() == 0) {
  1425       set_inline_cleanup_info(_block, _last, state());
  1428     // The current bci() is in the wrong scope, so use the bci() of
  1429     // the continuation point.
  1430     append_with_bci(goto_callee, scope_data()->continuation()->bci());
  1431     incr_num_returns();
  1433     return;
  1436   state()->truncate_stack(0);
  1437   if (method()->is_synchronized()) {
  1438     // perform the unlocking before exiting the method
  1439     Value receiver;
  1440     if (!method()->is_static()) {
  1441       receiver = _initial_state->local_at(0);
  1442     } else {
  1443       receiver = append(new Constant(new ClassConstant(method()->holder())));
  1445     append_split(new MonitorExit(receiver, state()->unlock()));
  1448   append(new Return(x));
  1452 void GraphBuilder::access_field(Bytecodes::Code code) {
  1453   bool will_link;
  1454   ciField* field = stream()->get_field(will_link);
  1455   ciInstanceKlass* holder = field->holder();
  1456   BasicType field_type = field->type()->basic_type();
  1457   ValueType* type = as_ValueType(field_type);
  1458   // call will_link again to determine if the field is valid.
  1459   const bool needs_patching = !holder->is_loaded() ||
  1460                               !field->will_link(method()->holder(), code) ||
  1461                               PatchALot;
  1463   ValueStack* state_before = NULL;
  1464   if (!holder->is_initialized() || needs_patching) {
  1465     // save state before instruction for debug info when
  1466     // deoptimization happens during patching
  1467     state_before = copy_state_before();
  1470   Value obj = NULL;
  1471   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
  1472     if (state_before != NULL) {
  1473       // build a patching constant
  1474       obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
  1475     } else {
  1476       obj = new Constant(new InstanceConstant(holder->java_mirror()));
  1481   const int offset = !needs_patching ? field->offset() : -1;
  1482   switch (code) {
  1483     case Bytecodes::_getstatic: {
  1484       // check for compile-time constants, i.e., initialized static final fields
  1485       Instruction* constant = NULL;
  1486       if (field->is_constant() && !PatchALot) {
  1487         ciConstant field_val = field->constant_value();
  1488         BasicType field_type = field_val.basic_type();
  1489         switch (field_type) {
  1490         case T_ARRAY:
  1491         case T_OBJECT:
  1492           if (field_val.as_object()->should_be_constant()) {
  1493             constant =  new Constant(as_ValueType(field_val));
  1495           break;
  1497         default:
  1498           constant = new Constant(as_ValueType(field_val));
  1501       if (constant != NULL) {
  1502         push(type, append(constant));
  1503       } else {
  1504         if (state_before == NULL) {
  1505           state_before = copy_state_for_exception();
  1507         push(type, append(new LoadField(append(obj), offset, field, true,
  1508                                         state_before, needs_patching)));
  1510       break;
  1512     case Bytecodes::_putstatic:
  1513       { Value val = pop(type);
  1514         if (state_before == NULL) {
  1515           state_before = copy_state_for_exception();
  1517         append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
  1519       break;
  1520     case Bytecodes::_getfield :
  1522         if (state_before == NULL) {
  1523           state_before = copy_state_for_exception();
  1525         LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching);
  1526         Value replacement = !needs_patching ? _memory->load(load) : load;
  1527         if (replacement != load) {
  1528           assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
  1529           push(type, replacement);
  1530         } else {
  1531           push(type, append(load));
  1533         break;
  1536     case Bytecodes::_putfield :
  1537       { Value val = pop(type);
  1538         if (state_before == NULL) {
  1539           state_before = copy_state_for_exception();
  1541         StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching);
  1542         if (!needs_patching) store = _memory->store(store);
  1543         if (store != NULL) {
  1544           append(store);
  1547       break;
  1548     default                   :
  1549       ShouldNotReachHere();
  1550       break;
  1555 Dependencies* GraphBuilder::dependency_recorder() const {
  1556   assert(DeoptC1, "need debug information");
  1557   return compilation()->dependency_recorder();
  1561 void GraphBuilder::invoke(Bytecodes::Code code) {
  1562   bool will_link;
  1563   ciMethod* target = stream()->get_method(will_link);
  1564   // we have to make sure the argument size (incl. the receiver)
  1565   // is correct for compilation (the call would fail later during
  1566   // linkage anyway) - was bug (gri 7/28/99)
  1567   if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error");
  1568   ciInstanceKlass* klass = target->holder();
  1570   // check if CHA possible: if so, change the code to invoke_special
  1571   ciInstanceKlass* calling_klass = method()->holder();
  1572   ciKlass* holder = stream()->get_declared_method_holder();
  1573   ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
  1574   ciInstanceKlass* actual_recv = callee_holder;
  1576   // some methods are obviously bindable without any type checks so
  1577   // convert them directly to an invokespecial.
  1578   if (target->is_loaded() && !target->is_abstract() &&
  1579       target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
  1580     code = Bytecodes::_invokespecial;
  1583   // NEEDS_CLEANUP
  1584   // I've added the target-is_loaded() test below but I don't really understand
  1585   // how klass->is_loaded() can be true and yet target->is_loaded() is false.
  1586   // this happened while running the JCK invokevirtual tests under doit.  TKR
  1587   ciMethod* cha_monomorphic_target = NULL;
  1588   ciMethod* exact_target = NULL;
  1589   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
  1590       !target->is_method_handle_invoke()) {
  1591     Value receiver = NULL;
  1592     ciInstanceKlass* receiver_klass = NULL;
  1593     bool type_is_exact = false;
  1594     // try to find a precise receiver type
  1595     if (will_link && !target->is_static()) {
  1596       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
  1597       receiver = state()->stack_at(index);
  1598       ciType* type = receiver->exact_type();
  1599       if (type != NULL && type->is_loaded() &&
  1600           type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
  1601         receiver_klass = (ciInstanceKlass*) type;
  1602         type_is_exact = true;
  1604       if (type == NULL) {
  1605         type = receiver->declared_type();
  1606         if (type != NULL && type->is_loaded() &&
  1607             type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
  1608           receiver_klass = (ciInstanceKlass*) type;
  1609           if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) {
  1610             // Insert a dependency on this type since
  1611             // find_monomorphic_target may assume it's already done.
  1612             dependency_recorder()->assert_leaf_type(receiver_klass);
  1613             type_is_exact = true;
  1618     if (receiver_klass != NULL && type_is_exact &&
  1619         receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) {
  1620       // If we have the exact receiver type we can bind directly to
  1621       // the method to call.
  1622       exact_target = target->resolve_invoke(calling_klass, receiver_klass);
  1623       if (exact_target != NULL) {
  1624         target = exact_target;
  1625         code = Bytecodes::_invokespecial;
  1628     if (receiver_klass != NULL &&
  1629         receiver_klass->is_subtype_of(actual_recv) &&
  1630         actual_recv->is_initialized()) {
  1631       actual_recv = receiver_klass;
  1634     if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
  1635         (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
  1636       // Use CHA on the receiver to select a more precise method.
  1637       cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
  1638     } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) {
  1639       // if there is only one implementor of this interface then we
  1640       // may be able bind this invoke directly to the implementing
  1641       // klass but we need both a dependence on the single interface
  1642       // and on the method we bind to.  Additionally since all we know
  1643       // about the receiver type is the it's supposed to implement the
  1644       // interface we have to insert a check that it's the class we
  1645       // expect.  Interface types are not checked by the verifier so
  1646       // they are roughly equivalent to Object.
  1647       ciInstanceKlass* singleton = NULL;
  1648       if (target->holder()->nof_implementors() == 1) {
  1649         singleton = target->holder()->implementor(0);
  1651       if (singleton) {
  1652         cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
  1653         if (cha_monomorphic_target != NULL) {
  1654           // If CHA is able to bind this invoke then update the class
  1655           // to match that class, otherwise klass will refer to the
  1656           // interface.
  1657           klass = cha_monomorphic_target->holder();
  1658           actual_recv = target->holder();
  1660           // insert a check it's really the expected class.
  1661           CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
  1662           c->set_incompatible_class_change_check();
  1663           c->set_direct_compare(klass->is_final());
  1664           append_split(c);
  1670   if (cha_monomorphic_target != NULL) {
  1671     if (cha_monomorphic_target->is_abstract()) {
  1672       // Do not optimize for abstract methods
  1673       cha_monomorphic_target = NULL;
  1677   if (cha_monomorphic_target != NULL) {
  1678     if (!(target->is_final_method())) {
  1679       // If we inlined because CHA revealed only a single target method,
  1680       // then we are dependent on that target method not getting overridden
  1681       // by dynamic class loading.  Be sure to test the "static" receiver
  1682       // dest_method here, as opposed to the actual receiver, which may
  1683       // falsely lead us to believe that the receiver is final or private.
  1684       dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target);
  1686     code = Bytecodes::_invokespecial;
  1688   // check if we could do inlining
  1689   if (!PatchALot && Inline && klass->is_loaded() &&
  1690       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
  1691       && target->will_link(klass, callee_holder, code)) {
  1692     // callee is known => check if we have static binding
  1693     assert(target->is_loaded(), "callee must be known");
  1694     if (code == Bytecodes::_invokestatic
  1695      || code == Bytecodes::_invokespecial
  1696      || code == Bytecodes::_invokevirtual && target->is_final_method()
  1697     ) {
  1698       // static binding => check if callee is ok
  1699       ciMethod* inline_target = (cha_monomorphic_target != NULL)
  1700                                   ? cha_monomorphic_target
  1701                                   : target;
  1702       bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
  1703       CHECK_BAILOUT();
  1705 #ifndef PRODUCT
  1706       // printing
  1707       if (PrintInlining && !res) {
  1708         // if it was successfully inlined, then it was already printed.
  1709         print_inline_result(inline_target, res);
  1711 #endif
  1712       clear_inline_bailout();
  1713       if (res) {
  1714         // Register dependence if JVMTI has either breakpoint
  1715         // setting or hotswapping of methods capabilities since they may
  1716         // cause deoptimization.
  1717         if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) {
  1718           dependency_recorder()->assert_evol_method(inline_target);
  1720         return;
  1724   // If we attempted an inline which did not succeed because of a
  1725   // bailout during construction of the callee graph, the entire
  1726   // compilation has to be aborted. This is fairly rare and currently
  1727   // seems to only occur for jasm-generated classes which contain
  1728   // jsr/ret pairs which are not associated with finally clauses and
  1729   // do not have exception handlers in the containing method, and are
  1730   // therefore not caught early enough to abort the inlining without
  1731   // corrupting the graph. (We currently bail out with a non-empty
  1732   // stack at a ret in these situations.)
  1733   CHECK_BAILOUT();
  1735   // inlining not successful => standard invoke
  1736   bool is_loaded = target->is_loaded();
  1737   bool has_receiver =
  1738     code == Bytecodes::_invokespecial   ||
  1739     code == Bytecodes::_invokevirtual   ||
  1740     code == Bytecodes::_invokeinterface;
  1741   bool is_invokedynamic = code == Bytecodes::_invokedynamic;
  1742   ValueType* result_type = as_ValueType(target->return_type());
  1744   // We require the debug info to be the "state before" because
  1745   // invokedynamics may deoptimize.
  1746   ValueStack* state_before = is_invokedynamic ? copy_state_before() : copy_state_exhandling();
  1748   Values* args = state()->pop_arguments(target->arg_size_no_receiver());
  1749   Value recv = has_receiver ? apop() : NULL;
  1750   int vtable_index = methodOopDesc::invalid_vtable_index;
  1752 #ifdef SPARC
  1753   // Currently only supported on Sparc.
  1754   // The UseInlineCaches only controls dispatch to invokevirtuals for
  1755   // loaded classes which we weren't able to statically bind.
  1756   if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
  1757       && !target->can_be_statically_bound()) {
  1758     // Find a vtable index if one is available
  1759     vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
  1761 #endif
  1763   if (recv != NULL &&
  1764       (code == Bytecodes::_invokespecial ||
  1765        !is_loaded || target->is_final())) {
  1766     // invokespecial always needs a NULL check.  invokevirtual where
  1767     // the target is final or where it's not known that whether the
  1768     // target is final requires a NULL check.  Otherwise normal
  1769     // invokevirtual will perform the null check during the lookup
  1770     // logic or the unverified entry point.  Profiling of calls
  1771     // requires that the null check is performed in all cases.
  1772     null_check(recv);
  1775   if (is_profiling()) {
  1776     if (recv != NULL && profile_calls()) {
  1777       null_check(recv);
  1779     // Note that we'd collect profile data in this method if we wanted it.
  1780     compilation()->set_would_profile(true);
  1782     if (profile_calls()) {
  1783       assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
  1784       ciKlass* target_klass = NULL;
  1785       if (cha_monomorphic_target != NULL) {
  1786         target_klass = cha_monomorphic_target->holder();
  1787       } else if (exact_target != NULL) {
  1788         target_klass = exact_target->holder();
  1790       profile_call(recv, target_klass);
  1794   Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
  1795   // push result
  1796   append_split(result);
  1798   if (result_type != voidType) {
  1799     if (method()->is_strict()) {
  1800       push(result_type, round_fp(result));
  1801     } else {
  1802       push(result_type, result);
  1808 void GraphBuilder::new_instance(int klass_index) {
  1809   ValueStack* state_before = copy_state_exhandling();
  1810   bool will_link;
  1811   ciKlass* klass = stream()->get_klass(will_link);
  1812   assert(klass->is_instance_klass(), "must be an instance klass");
  1813   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before);
  1814   _memory->new_instance(new_instance);
  1815   apush(append_split(new_instance));
  1819 void GraphBuilder::new_type_array() {
  1820   ValueStack* state_before = copy_state_exhandling();
  1821   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before)));
  1825 void GraphBuilder::new_object_array() {
  1826   bool will_link;
  1827   ciKlass* klass = stream()->get_klass(will_link);
  1828   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
  1829   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
  1830   apush(append_split(n));
  1834 bool GraphBuilder::direct_compare(ciKlass* k) {
  1835   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
  1836     ciInstanceKlass* ik = k->as_instance_klass();
  1837     if (ik->is_final()) {
  1838       return true;
  1839     } else {
  1840       if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
  1841         // test class is leaf class
  1842         dependency_recorder()->assert_leaf_type(ik);
  1843         return true;
  1847   return false;
  1851 void GraphBuilder::check_cast(int klass_index) {
  1852   bool will_link;
  1853   ciKlass* klass = stream()->get_klass(will_link);
  1854   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception();
  1855   CheckCast* c = new CheckCast(klass, apop(), state_before);
  1856   apush(append_split(c));
  1857   c->set_direct_compare(direct_compare(klass));
  1859   if (is_profiling()) {
  1860     // Note that we'd collect profile data in this method if we wanted it.
  1861     compilation()->set_would_profile(true);
  1863     if (profile_checkcasts()) {
  1864       c->set_profiled_method(method());
  1865       c->set_profiled_bci(bci());
  1866       c->set_should_profile(true);
  1872 void GraphBuilder::instance_of(int klass_index) {
  1873   bool will_link;
  1874   ciKlass* klass = stream()->get_klass(will_link);
  1875   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
  1876   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
  1877   ipush(append_split(i));
  1878   i->set_direct_compare(direct_compare(klass));
  1880   if (is_profiling()) {
  1881     // Note that we'd collect profile data in this method if we wanted it.
  1882     compilation()->set_would_profile(true);
  1884     if (profile_checkcasts()) {
  1885       i->set_profiled_method(method());
  1886       i->set_profiled_bci(bci());
  1887       i->set_should_profile(true);
  1893 void GraphBuilder::monitorenter(Value x, int bci) {
  1894   // save state before locking in case of deoptimization after a NullPointerException
  1895   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
  1896   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
  1897   kill_all();
  1901 void GraphBuilder::monitorexit(Value x, int bci) {
  1902   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
  1903   kill_all();
  1907 void GraphBuilder::new_multi_array(int dimensions) {
  1908   bool will_link;
  1909   ciKlass* klass = stream()->get_klass(will_link);
  1910   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
  1912   Values* dims = new Values(dimensions, NULL);
  1913   // fill in all dimensions
  1914   int i = dimensions;
  1915   while (i-- > 0) dims->at_put(i, ipop());
  1916   // create array
  1917   NewArray* n = new NewMultiArray(klass, dims, state_before);
  1918   apush(append_split(n));
  1922 void GraphBuilder::throw_op(int bci) {
  1923   // We require that the debug info for a Throw be the "state before"
  1924   // the Throw (i.e., exception oop is still on TOS)
  1925   ValueStack* state_before = copy_state_before_with_bci(bci);
  1926   Throw* t = new Throw(apop(), state_before);
  1927   // operand stack not needed after a throw
  1928   state()->truncate_stack(0);
  1929   append_with_bci(t, bci);
  1933 Value GraphBuilder::round_fp(Value fp_value) {
  1934   // no rounding needed if SSE2 is used
  1935   if (RoundFPResults && UseSSE < 2) {
  1936     // Must currently insert rounding node for doubleword values that
  1937     // are results of expressions (i.e., not loads from memory or
  1938     // constants)
  1939     if (fp_value->type()->tag() == doubleTag &&
  1940         fp_value->as_Constant() == NULL &&
  1941         fp_value->as_Local() == NULL &&       // method parameters need no rounding
  1942         fp_value->as_RoundFP() == NULL) {
  1943       return append(new RoundFP(fp_value));
  1946   return fp_value;
  1950 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
  1951   Canonicalizer canon(compilation(), instr, bci);
  1952   Instruction* i1 = canon.canonical();
  1953   if (i1->is_linked() || !i1->can_be_linked()) {
  1954     // Canonicalizer returned an instruction which was already
  1955     // appended so simply return it.
  1956     return i1;
  1959   if (UseLocalValueNumbering) {
  1960     // Lookup the instruction in the ValueMap and add it to the map if
  1961     // it's not found.
  1962     Instruction* i2 = vmap()->find_insert(i1);
  1963     if (i2 != i1) {
  1964       // found an entry in the value map, so just return it.
  1965       assert(i2->is_linked(), "should already be linked");
  1966       return i2;
  1968     ValueNumberingEffects vne(vmap());
  1969     i1->visit(&vne);
  1972   // i1 was not eliminated => append it
  1973   assert(i1->next() == NULL, "shouldn't already be linked");
  1974   _last = _last->set_next(i1, canon.bci());
  1976   if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) {
  1977     // set the bailout state but complete normal processing.  We
  1978     // might do a little more work before noticing the bailout so we
  1979     // want processing to continue normally until it's noticed.
  1980     bailout("Method and/or inlining is too large");
  1983 #ifndef PRODUCT
  1984   if (PrintIRDuringConstruction) {
  1985     InstructionPrinter ip;
  1986     ip.print_line(i1);
  1987     if (Verbose) {
  1988       state()->print();
  1991 #endif
  1993   // save state after modification of operand stack for StateSplit instructions
  1994   StateSplit* s = i1->as_StateSplit();
  1995   if (s != NULL) {
  1996     if (EliminateFieldAccess) {
  1997       Intrinsic* intrinsic = s->as_Intrinsic();
  1998       if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
  1999         _memory->kill();
  2002     s->set_state(state()->copy(ValueStack::StateAfter, canon.bci()));
  2005   // set up exception handlers for this instruction if necessary
  2006   if (i1->can_trap()) {
  2007     i1->set_exception_handlers(handle_exception(i1));
  2008     assert(i1->exception_state() != NULL || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state");
  2010   return i1;
  2014 Instruction* GraphBuilder::append(Instruction* instr) {
  2015   assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used");
  2016   return append_with_bci(instr, bci());
  2020 Instruction* GraphBuilder::append_split(StateSplit* instr) {
  2021   return append_with_bci(instr, bci());
  2025 void GraphBuilder::null_check(Value value) {
  2026   if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) {
  2027     return;
  2028   } else {
  2029     Constant* con = value->as_Constant();
  2030     if (con) {
  2031       ObjectType* c = con->type()->as_ObjectType();
  2032       if (c && c->is_loaded()) {
  2033         ObjectConstant* oc = c->as_ObjectConstant();
  2034         if (!oc || !oc->value()->is_null_object()) {
  2035           return;
  2040   append(new NullCheck(value, copy_state_for_exception()));
  2045 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
  2046   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) {
  2047     assert(instruction->exception_state() == NULL
  2048            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
  2049            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->jvmti_can_access_local_variables()),
  2050            "exception_state should be of exception kind");
  2051     return new XHandlers();
  2054   XHandlers*  exception_handlers = new XHandlers();
  2055   ScopeData*  cur_scope_data = scope_data();
  2056   ValueStack* cur_state = instruction->state_before();
  2057   ValueStack* prev_state = NULL;
  2058   int scope_count = 0;
  2060   assert(cur_state != NULL, "state_before must be set");
  2061   do {
  2062     int cur_bci = cur_state->bci();
  2063     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
  2064     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
  2066     // join with all potential exception handlers
  2067     XHandlers* list = cur_scope_data->xhandlers();
  2068     const int n = list->length();
  2069     for (int i = 0; i < n; i++) {
  2070       XHandler* h = list->handler_at(i);
  2071       if (h->covers(cur_bci)) {
  2072         // h is a potential exception handler => join it
  2073         compilation()->set_has_exception_handlers(true);
  2075         BlockBegin* entry = h->entry_block();
  2076         if (entry == block()) {
  2077           // It's acceptable for an exception handler to cover itself
  2078           // but we don't handle that in the parser currently.  It's
  2079           // very rare so we bailout instead of trying to handle it.
  2080           BAILOUT_("exception handler covers itself", exception_handlers);
  2082         assert(entry->bci() == h->handler_bci(), "must match");
  2083         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
  2085         // previously this was a BAILOUT, but this is not necessary
  2086         // now because asynchronous exceptions are not handled this way.
  2087         assert(entry->state() == NULL || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match");
  2089         // xhandler start with an empty expression stack
  2090         if (cur_state->stack_size() != 0) {
  2091           cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
  2093         if (instruction->exception_state() == NULL) {
  2094           instruction->set_exception_state(cur_state);
  2097         // Note: Usually this join must work. However, very
  2098         // complicated jsr-ret structures where we don't ret from
  2099         // the subroutine can cause the objects on the monitor
  2100         // stacks to not match because blocks can be parsed twice.
  2101         // The only test case we've seen so far which exhibits this
  2102         // problem is caught by the infinite recursion test in
  2103         // GraphBuilder::jsr() if the join doesn't work.
  2104         if (!entry->try_merge(cur_state)) {
  2105           BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
  2108         // add current state for correct handling of phi functions at begin of xhandler
  2109         int phi_operand = entry->add_exception_state(cur_state);
  2111         // add entry to the list of xhandlers of this block
  2112         _block->add_exception_handler(entry);
  2114         // add back-edge from xhandler entry to this block
  2115         if (!entry->is_predecessor(_block)) {
  2116           entry->add_predecessor(_block);
  2119         // clone XHandler because phi_operand and scope_count can not be shared
  2120         XHandler* new_xhandler = new XHandler(h);
  2121         new_xhandler->set_phi_operand(phi_operand);
  2122         new_xhandler->set_scope_count(scope_count);
  2123         exception_handlers->append(new_xhandler);
  2125         // fill in exception handler subgraph lazily
  2126         assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet");
  2127         cur_scope_data->add_to_work_list(entry);
  2129         // stop when reaching catchall
  2130         if (h->catch_type() == 0) {
  2131           return exception_handlers;
  2136     if (exception_handlers->length() == 0) {
  2137       // This scope and all callees do not handle exceptions, so the local
  2138       // variables of this scope are not needed. However, the scope itself is
  2139       // required for a correct exception stack trace -> clear out the locals.
  2140       if (_compilation->env()->jvmti_can_access_local_variables()) {
  2141         cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
  2142       } else {
  2143         cur_state = cur_state->copy(ValueStack::EmptyExceptionState, cur_state->bci());
  2145       if (prev_state != NULL) {
  2146         prev_state->set_caller_state(cur_state);
  2148       if (instruction->exception_state() == NULL) {
  2149         instruction->set_exception_state(cur_state);
  2153     // Set up iteration for next time.
  2154     // If parsing a jsr, do not grab exception handlers from the
  2155     // parent scopes for this method (already got them, and they
  2156     // needed to be cloned)
  2158     while (cur_scope_data->parsing_jsr()) {
  2159       cur_scope_data = cur_scope_data->parent();
  2162     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
  2163     assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler");
  2165     prev_state = cur_state;
  2166     cur_state = cur_state->caller_state();
  2167     cur_scope_data = cur_scope_data->parent();
  2168     scope_count++;
  2169   } while (cur_scope_data != NULL);
  2171   return exception_handlers;
  2175 // Helper class for simplifying Phis.
  2176 class PhiSimplifier : public BlockClosure {
  2177  private:
  2178   bool _has_substitutions;
  2179   Value simplify(Value v);
  2181  public:
  2182   PhiSimplifier(BlockBegin* start) : _has_substitutions(false) {
  2183     start->iterate_preorder(this);
  2184     if (_has_substitutions) {
  2185       SubstitutionResolver sr(start);
  2188   void block_do(BlockBegin* b);
  2189   bool has_substitutions() const { return _has_substitutions; }
  2190 };
  2193 Value PhiSimplifier::simplify(Value v) {
  2194   Phi* phi = v->as_Phi();
  2196   if (phi == NULL) {
  2197     // no phi function
  2198     return v;
  2199   } else if (v->has_subst()) {
  2200     // already substituted; subst can be phi itself -> simplify
  2201     return simplify(v->subst());
  2202   } else if (phi->is_set(Phi::cannot_simplify)) {
  2203     // already tried to simplify phi before
  2204     return phi;
  2205   } else if (phi->is_set(Phi::visited)) {
  2206     // break cycles in phi functions
  2207     return phi;
  2208   } else if (phi->type()->is_illegal()) {
  2209     // illegal phi functions are ignored anyway
  2210     return phi;
  2212   } else {
  2213     // mark phi function as processed to break cycles in phi functions
  2214     phi->set(Phi::visited);
  2216     // simplify x = [y, x] and x = [y, y] to y
  2217     Value subst = NULL;
  2218     int opd_count = phi->operand_count();
  2219     for (int i = 0; i < opd_count; i++) {
  2220       Value opd = phi->operand_at(i);
  2221       assert(opd != NULL, "Operand must exist!");
  2223       if (opd->type()->is_illegal()) {
  2224         // if one operand is illegal, the entire phi function is illegal
  2225         phi->make_illegal();
  2226         phi->clear(Phi::visited);
  2227         return phi;
  2230       Value new_opd = simplify(opd);
  2231       assert(new_opd != NULL, "Simplified operand must exist!");
  2233       if (new_opd != phi && new_opd != subst) {
  2234         if (subst == NULL) {
  2235           subst = new_opd;
  2236         } else {
  2237           // no simplification possible
  2238           phi->set(Phi::cannot_simplify);
  2239           phi->clear(Phi::visited);
  2240           return phi;
  2245     // sucessfully simplified phi function
  2246     assert(subst != NULL, "illegal phi function");
  2247     _has_substitutions = true;
  2248     phi->clear(Phi::visited);
  2249     phi->set_subst(subst);
  2251 #ifndef PRODUCT
  2252     if (PrintPhiFunctions) {
  2253       tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
  2255 #endif
  2257     return subst;
  2262 void PhiSimplifier::block_do(BlockBegin* b) {
  2263   for_each_phi_fun(b, phi,
  2264     simplify(phi);
  2265   );
  2267 #ifdef ASSERT
  2268   for_each_phi_fun(b, phi,
  2269                    assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
  2270   );
  2272   ValueStack* state = b->state()->caller_state();
  2273   for_each_state_value(state, value,
  2274     Phi* phi = value->as_Phi();
  2275     assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
  2276   );
  2277 #endif
  2280 // This method is called after all blocks are filled with HIR instructions
  2281 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
  2282 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) {
  2283   PhiSimplifier simplifier(start);
  2287 void GraphBuilder::connect_to_end(BlockBegin* beg) {
  2288   // setup iteration
  2289   kill_all();
  2290   _block = beg;
  2291   _state = beg->state()->copy_for_parsing();
  2292   _last  = beg;
  2293   iterate_bytecodes_for_block(beg->bci());
  2297 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
  2298 #ifndef PRODUCT
  2299   if (PrintIRDuringConstruction) {
  2300     tty->cr();
  2301     InstructionPrinter ip;
  2302     ip.print_instr(_block); tty->cr();
  2303     ip.print_stack(_block->state()); tty->cr();
  2304     ip.print_inline_level(_block);
  2305     ip.print_head();
  2306     tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size());
  2308 #endif
  2309   _skip_block = false;
  2310   assert(state() != NULL, "ValueStack missing!");
  2311   ciBytecodeStream s(method());
  2312   s.reset_to_bci(bci);
  2313   int prev_bci = bci;
  2314   scope_data()->set_stream(&s);
  2315   // iterate
  2316   Bytecodes::Code code = Bytecodes::_illegal;
  2317   bool push_exception = false;
  2319   if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) {
  2320     // first thing in the exception entry block should be the exception object.
  2321     push_exception = true;
  2324   while (!bailed_out() && last()->as_BlockEnd() == NULL &&
  2325          (code = stream()->next()) != ciBytecodeStream::EOBC() &&
  2326          (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
  2327     assert(state()->kind() == ValueStack::Parsing, "invalid state kind");
  2329     // Check for active jsr during OSR compilation
  2330     if (compilation()->is_osr_compile()
  2331         && scope()->is_top_scope()
  2332         && parsing_jsr()
  2333         && s.cur_bci() == compilation()->osr_bci()) {
  2334       bailout("OSR not supported while a jsr is active");
  2337     if (push_exception) {
  2338       apush(append(new ExceptionObject()));
  2339       push_exception = false;
  2342     // handle bytecode
  2343     switch (code) {
  2344       case Bytecodes::_nop            : /* nothing to do */ break;
  2345       case Bytecodes::_aconst_null    : apush(append(new Constant(objectNull            ))); break;
  2346       case Bytecodes::_iconst_m1      : ipush(append(new Constant(new IntConstant   (-1)))); break;
  2347       case Bytecodes::_iconst_0       : ipush(append(new Constant(intZero               ))); break;
  2348       case Bytecodes::_iconst_1       : ipush(append(new Constant(intOne                ))); break;
  2349       case Bytecodes::_iconst_2       : ipush(append(new Constant(new IntConstant   ( 2)))); break;
  2350       case Bytecodes::_iconst_3       : ipush(append(new Constant(new IntConstant   ( 3)))); break;
  2351       case Bytecodes::_iconst_4       : ipush(append(new Constant(new IntConstant   ( 4)))); break;
  2352       case Bytecodes::_iconst_5       : ipush(append(new Constant(new IntConstant   ( 5)))); break;
  2353       case Bytecodes::_lconst_0       : lpush(append(new Constant(new LongConstant  ( 0)))); break;
  2354       case Bytecodes::_lconst_1       : lpush(append(new Constant(new LongConstant  ( 1)))); break;
  2355       case Bytecodes::_fconst_0       : fpush(append(new Constant(new FloatConstant ( 0)))); break;
  2356       case Bytecodes::_fconst_1       : fpush(append(new Constant(new FloatConstant ( 1)))); break;
  2357       case Bytecodes::_fconst_2       : fpush(append(new Constant(new FloatConstant ( 2)))); break;
  2358       case Bytecodes::_dconst_0       : dpush(append(new Constant(new DoubleConstant( 0)))); break;
  2359       case Bytecodes::_dconst_1       : dpush(append(new Constant(new DoubleConstant( 1)))); break;
  2360       case Bytecodes::_bipush         : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
  2361       case Bytecodes::_sipush         : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
  2362       case Bytecodes::_ldc            : // fall through
  2363       case Bytecodes::_ldc_w          : // fall through
  2364       case Bytecodes::_ldc2_w         : load_constant(); break;
  2365       case Bytecodes::_iload          : load_local(intType     , s.get_index()); break;
  2366       case Bytecodes::_lload          : load_local(longType    , s.get_index()); break;
  2367       case Bytecodes::_fload          : load_local(floatType   , s.get_index()); break;
  2368       case Bytecodes::_dload          : load_local(doubleType  , s.get_index()); break;
  2369       case Bytecodes::_aload          : load_local(instanceType, s.get_index()); break;
  2370       case Bytecodes::_iload_0        : load_local(intType   , 0); break;
  2371       case Bytecodes::_iload_1        : load_local(intType   , 1); break;
  2372       case Bytecodes::_iload_2        : load_local(intType   , 2); break;
  2373       case Bytecodes::_iload_3        : load_local(intType   , 3); break;
  2374       case Bytecodes::_lload_0        : load_local(longType  , 0); break;
  2375       case Bytecodes::_lload_1        : load_local(longType  , 1); break;
  2376       case Bytecodes::_lload_2        : load_local(longType  , 2); break;
  2377       case Bytecodes::_lload_3        : load_local(longType  , 3); break;
  2378       case Bytecodes::_fload_0        : load_local(floatType , 0); break;
  2379       case Bytecodes::_fload_1        : load_local(floatType , 1); break;
  2380       case Bytecodes::_fload_2        : load_local(floatType , 2); break;
  2381       case Bytecodes::_fload_3        : load_local(floatType , 3); break;
  2382       case Bytecodes::_dload_0        : load_local(doubleType, 0); break;
  2383       case Bytecodes::_dload_1        : load_local(doubleType, 1); break;
  2384       case Bytecodes::_dload_2        : load_local(doubleType, 2); break;
  2385       case Bytecodes::_dload_3        : load_local(doubleType, 3); break;
  2386       case Bytecodes::_aload_0        : load_local(objectType, 0); break;
  2387       case Bytecodes::_aload_1        : load_local(objectType, 1); break;
  2388       case Bytecodes::_aload_2        : load_local(objectType, 2); break;
  2389       case Bytecodes::_aload_3        : load_local(objectType, 3); break;
  2390       case Bytecodes::_iaload         : load_indexed(T_INT   ); break;
  2391       case Bytecodes::_laload         : load_indexed(T_LONG  ); break;
  2392       case Bytecodes::_faload         : load_indexed(T_FLOAT ); break;
  2393       case Bytecodes::_daload         : load_indexed(T_DOUBLE); break;
  2394       case Bytecodes::_aaload         : load_indexed(T_OBJECT); break;
  2395       case Bytecodes::_baload         : load_indexed(T_BYTE  ); break;
  2396       case Bytecodes::_caload         : load_indexed(T_CHAR  ); break;
  2397       case Bytecodes::_saload         : load_indexed(T_SHORT ); break;
  2398       case Bytecodes::_istore         : store_local(intType   , s.get_index()); break;
  2399       case Bytecodes::_lstore         : store_local(longType  , s.get_index()); break;
  2400       case Bytecodes::_fstore         : store_local(floatType , s.get_index()); break;
  2401       case Bytecodes::_dstore         : store_local(doubleType, s.get_index()); break;
  2402       case Bytecodes::_astore         : store_local(objectType, s.get_index()); break;
  2403       case Bytecodes::_istore_0       : store_local(intType   , 0); break;
  2404       case Bytecodes::_istore_1       : store_local(intType   , 1); break;
  2405       case Bytecodes::_istore_2       : store_local(intType   , 2); break;
  2406       case Bytecodes::_istore_3       : store_local(intType   , 3); break;
  2407       case Bytecodes::_lstore_0       : store_local(longType  , 0); break;
  2408       case Bytecodes::_lstore_1       : store_local(longType  , 1); break;
  2409       case Bytecodes::_lstore_2       : store_local(longType  , 2); break;
  2410       case Bytecodes::_lstore_3       : store_local(longType  , 3); break;
  2411       case Bytecodes::_fstore_0       : store_local(floatType , 0); break;
  2412       case Bytecodes::_fstore_1       : store_local(floatType , 1); break;
  2413       case Bytecodes::_fstore_2       : store_local(floatType , 2); break;
  2414       case Bytecodes::_fstore_3       : store_local(floatType , 3); break;
  2415       case Bytecodes::_dstore_0       : store_local(doubleType, 0); break;
  2416       case Bytecodes::_dstore_1       : store_local(doubleType, 1); break;
  2417       case Bytecodes::_dstore_2       : store_local(doubleType, 2); break;
  2418       case Bytecodes::_dstore_3       : store_local(doubleType, 3); break;
  2419       case Bytecodes::_astore_0       : store_local(objectType, 0); break;
  2420       case Bytecodes::_astore_1       : store_local(objectType, 1); break;
  2421       case Bytecodes::_astore_2       : store_local(objectType, 2); break;
  2422       case Bytecodes::_astore_3       : store_local(objectType, 3); break;
  2423       case Bytecodes::_iastore        : store_indexed(T_INT   ); break;
  2424       case Bytecodes::_lastore        : store_indexed(T_LONG  ); break;
  2425       case Bytecodes::_fastore        : store_indexed(T_FLOAT ); break;
  2426       case Bytecodes::_dastore        : store_indexed(T_DOUBLE); break;
  2427       case Bytecodes::_aastore        : store_indexed(T_OBJECT); break;
  2428       case Bytecodes::_bastore        : store_indexed(T_BYTE  ); break;
  2429       case Bytecodes::_castore        : store_indexed(T_CHAR  ); break;
  2430       case Bytecodes::_sastore        : store_indexed(T_SHORT ); break;
  2431       case Bytecodes::_pop            : // fall through
  2432       case Bytecodes::_pop2           : // fall through
  2433       case Bytecodes::_dup            : // fall through
  2434       case Bytecodes::_dup_x1         : // fall through
  2435       case Bytecodes::_dup_x2         : // fall through
  2436       case Bytecodes::_dup2           : // fall through
  2437       case Bytecodes::_dup2_x1        : // fall through
  2438       case Bytecodes::_dup2_x2        : // fall through
  2439       case Bytecodes::_swap           : stack_op(code); break;
  2440       case Bytecodes::_iadd           : arithmetic_op(intType   , code); break;
  2441       case Bytecodes::_ladd           : arithmetic_op(longType  , code); break;
  2442       case Bytecodes::_fadd           : arithmetic_op(floatType , code); break;
  2443       case Bytecodes::_dadd           : arithmetic_op(doubleType, code); break;
  2444       case Bytecodes::_isub           : arithmetic_op(intType   , code); break;
  2445       case Bytecodes::_lsub           : arithmetic_op(longType  , code); break;
  2446       case Bytecodes::_fsub           : arithmetic_op(floatType , code); break;
  2447       case Bytecodes::_dsub           : arithmetic_op(doubleType, code); break;
  2448       case Bytecodes::_imul           : arithmetic_op(intType   , code); break;
  2449       case Bytecodes::_lmul           : arithmetic_op(longType  , code); break;
  2450       case Bytecodes::_fmul           : arithmetic_op(floatType , code); break;
  2451       case Bytecodes::_dmul           : arithmetic_op(doubleType, code); break;
  2452       case Bytecodes::_idiv           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
  2453       case Bytecodes::_ldiv           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
  2454       case Bytecodes::_fdiv           : arithmetic_op(floatType , code); break;
  2455       case Bytecodes::_ddiv           : arithmetic_op(doubleType, code); break;
  2456       case Bytecodes::_irem           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
  2457       case Bytecodes::_lrem           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
  2458       case Bytecodes::_frem           : arithmetic_op(floatType , code); break;
  2459       case Bytecodes::_drem           : arithmetic_op(doubleType, code); break;
  2460       case Bytecodes::_ineg           : negate_op(intType   ); break;
  2461       case Bytecodes::_lneg           : negate_op(longType  ); break;
  2462       case Bytecodes::_fneg           : negate_op(floatType ); break;
  2463       case Bytecodes::_dneg           : negate_op(doubleType); break;
  2464       case Bytecodes::_ishl           : shift_op(intType , code); break;
  2465       case Bytecodes::_lshl           : shift_op(longType, code); break;
  2466       case Bytecodes::_ishr           : shift_op(intType , code); break;
  2467       case Bytecodes::_lshr           : shift_op(longType, code); break;
  2468       case Bytecodes::_iushr          : shift_op(intType , code); break;
  2469       case Bytecodes::_lushr          : shift_op(longType, code); break;
  2470       case Bytecodes::_iand           : logic_op(intType , code); break;
  2471       case Bytecodes::_land           : logic_op(longType, code); break;
  2472       case Bytecodes::_ior            : logic_op(intType , code); break;
  2473       case Bytecodes::_lor            : logic_op(longType, code); break;
  2474       case Bytecodes::_ixor           : logic_op(intType , code); break;
  2475       case Bytecodes::_lxor           : logic_op(longType, code); break;
  2476       case Bytecodes::_iinc           : increment(); break;
  2477       case Bytecodes::_i2l            : convert(code, T_INT   , T_LONG  ); break;
  2478       case Bytecodes::_i2f            : convert(code, T_INT   , T_FLOAT ); break;
  2479       case Bytecodes::_i2d            : convert(code, T_INT   , T_DOUBLE); break;
  2480       case Bytecodes::_l2i            : convert(code, T_LONG  , T_INT   ); break;
  2481       case Bytecodes::_l2f            : convert(code, T_LONG  , T_FLOAT ); break;
  2482       case Bytecodes::_l2d            : convert(code, T_LONG  , T_DOUBLE); break;
  2483       case Bytecodes::_f2i            : convert(code, T_FLOAT , T_INT   ); break;
  2484       case Bytecodes::_f2l            : convert(code, T_FLOAT , T_LONG  ); break;
  2485       case Bytecodes::_f2d            : convert(code, T_FLOAT , T_DOUBLE); break;
  2486       case Bytecodes::_d2i            : convert(code, T_DOUBLE, T_INT   ); break;
  2487       case Bytecodes::_d2l            : convert(code, T_DOUBLE, T_LONG  ); break;
  2488       case Bytecodes::_d2f            : convert(code, T_DOUBLE, T_FLOAT ); break;
  2489       case Bytecodes::_i2b            : convert(code, T_INT   , T_BYTE  ); break;
  2490       case Bytecodes::_i2c            : convert(code, T_INT   , T_CHAR  ); break;
  2491       case Bytecodes::_i2s            : convert(code, T_INT   , T_SHORT ); break;
  2492       case Bytecodes::_lcmp           : compare_op(longType  , code); break;
  2493       case Bytecodes::_fcmpl          : compare_op(floatType , code); break;
  2494       case Bytecodes::_fcmpg          : compare_op(floatType , code); break;
  2495       case Bytecodes::_dcmpl          : compare_op(doubleType, code); break;
  2496       case Bytecodes::_dcmpg          : compare_op(doubleType, code); break;
  2497       case Bytecodes::_ifeq           : if_zero(intType   , If::eql); break;
  2498       case Bytecodes::_ifne           : if_zero(intType   , If::neq); break;
  2499       case Bytecodes::_iflt           : if_zero(intType   , If::lss); break;
  2500       case Bytecodes::_ifge           : if_zero(intType   , If::geq); break;
  2501       case Bytecodes::_ifgt           : if_zero(intType   , If::gtr); break;
  2502       case Bytecodes::_ifle           : if_zero(intType   , If::leq); break;
  2503       case Bytecodes::_if_icmpeq      : if_same(intType   , If::eql); break;
  2504       case Bytecodes::_if_icmpne      : if_same(intType   , If::neq); break;
  2505       case Bytecodes::_if_icmplt      : if_same(intType   , If::lss); break;
  2506       case Bytecodes::_if_icmpge      : if_same(intType   , If::geq); break;
  2507       case Bytecodes::_if_icmpgt      : if_same(intType   , If::gtr); break;
  2508       case Bytecodes::_if_icmple      : if_same(intType   , If::leq); break;
  2509       case Bytecodes::_if_acmpeq      : if_same(objectType, If::eql); break;
  2510       case Bytecodes::_if_acmpne      : if_same(objectType, If::neq); break;
  2511       case Bytecodes::_goto           : _goto(s.cur_bci(), s.get_dest()); break;
  2512       case Bytecodes::_jsr            : jsr(s.get_dest()); break;
  2513       case Bytecodes::_ret            : ret(s.get_index()); break;
  2514       case Bytecodes::_tableswitch    : table_switch(); break;
  2515       case Bytecodes::_lookupswitch   : lookup_switch(); break;
  2516       case Bytecodes::_ireturn        : method_return(ipop()); break;
  2517       case Bytecodes::_lreturn        : method_return(lpop()); break;
  2518       case Bytecodes::_freturn        : method_return(fpop()); break;
  2519       case Bytecodes::_dreturn        : method_return(dpop()); break;
  2520       case Bytecodes::_areturn        : method_return(apop()); break;
  2521       case Bytecodes::_return         : method_return(NULL  ); break;
  2522       case Bytecodes::_getstatic      : // fall through
  2523       case Bytecodes::_putstatic      : // fall through
  2524       case Bytecodes::_getfield       : // fall through
  2525       case Bytecodes::_putfield       : access_field(code); break;
  2526       case Bytecodes::_invokevirtual  : // fall through
  2527       case Bytecodes::_invokespecial  : // fall through
  2528       case Bytecodes::_invokestatic   : // fall through
  2529       case Bytecodes::_invokedynamic  : // fall through
  2530       case Bytecodes::_invokeinterface: invoke(code); break;
  2531       case Bytecodes::_new            : new_instance(s.get_index_u2()); break;
  2532       case Bytecodes::_newarray       : new_type_array(); break;
  2533       case Bytecodes::_anewarray      : new_object_array(); break;
  2534       case Bytecodes::_arraylength    : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; }
  2535       case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
  2536       case Bytecodes::_checkcast      : check_cast(s.get_index_u2()); break;
  2537       case Bytecodes::_instanceof     : instance_of(s.get_index_u2()); break;
  2538       case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
  2539       case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
  2540       case Bytecodes::_wide           : ShouldNotReachHere(); break;
  2541       case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break;
  2542       case Bytecodes::_ifnull         : if_null(objectType, If::eql); break;
  2543       case Bytecodes::_ifnonnull      : if_null(objectType, If::neq); break;
  2544       case Bytecodes::_goto_w         : _goto(s.cur_bci(), s.get_far_dest()); break;
  2545       case Bytecodes::_jsr_w          : jsr(s.get_far_dest()); break;
  2546       case Bytecodes::_breakpoint     : BAILOUT_("concurrent setting of breakpoint", NULL);
  2547       default                         : ShouldNotReachHere(); break;
  2549     // save current bci to setup Goto at the end
  2550     prev_bci = s.cur_bci();
  2552   CHECK_BAILOUT_(NULL);
  2553   // stop processing of this block (see try_inline_full)
  2554   if (_skip_block) {
  2555     _skip_block = false;
  2556     assert(_last && _last->as_BlockEnd(), "");
  2557     return _last->as_BlockEnd();
  2559   // if there are any, check if last instruction is a BlockEnd instruction
  2560   BlockEnd* end = last()->as_BlockEnd();
  2561   if (end == NULL) {
  2562     // all blocks must end with a BlockEnd instruction => add a Goto
  2563     end = new Goto(block_at(s.cur_bci()), false);
  2564     append(end);
  2566   assert(end == last()->as_BlockEnd(), "inconsistency");
  2568   assert(end->state() != NULL, "state must already be present");
  2569   assert(end->as_Return() == NULL || end->as_Throw() == NULL || end->state()->stack_size() == 0, "stack not needed for return and throw");
  2571   // connect to begin & set state
  2572   // NOTE that inlining may have changed the block we are parsing
  2573   block()->set_end(end);
  2574   // propagate state
  2575   for (int i = end->number_of_sux() - 1; i >= 0; i--) {
  2576     BlockBegin* sux = end->sux_at(i);
  2577     assert(sux->is_predecessor(block()), "predecessor missing");
  2578     // be careful, bailout if bytecodes are strange
  2579     if (!sux->try_merge(end->state())) BAILOUT_("block join failed", NULL);
  2580     scope_data()->add_to_work_list(end->sux_at(i));
  2583   scope_data()->set_stream(NULL);
  2585   // done
  2586   return end;
  2590 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) {
  2591   do {
  2592     if (start_in_current_block_for_inlining && !bailed_out()) {
  2593       iterate_bytecodes_for_block(0);
  2594       start_in_current_block_for_inlining = false;
  2595     } else {
  2596       BlockBegin* b;
  2597       while ((b = scope_data()->remove_from_work_list()) != NULL) {
  2598         if (!b->is_set(BlockBegin::was_visited_flag)) {
  2599           if (b->is_set(BlockBegin::osr_entry_flag)) {
  2600             // we're about to parse the osr entry block, so make sure
  2601             // we setup the OSR edge leading into this block so that
  2602             // Phis get setup correctly.
  2603             setup_osr_entry_block();
  2604             // this is no longer the osr entry block, so clear it.
  2605             b->clear(BlockBegin::osr_entry_flag);
  2607           b->set(BlockBegin::was_visited_flag);
  2608           connect_to_end(b);
  2612   } while (!bailed_out() && !scope_data()->is_work_list_empty());
  2616 bool GraphBuilder::_can_trap      [Bytecodes::number_of_java_codes];
  2618 void GraphBuilder::initialize() {
  2619   // the following bytecodes are assumed to potentially
  2620   // throw exceptions in compiled code - note that e.g.
  2621   // monitorexit & the return bytecodes do not throw
  2622   // exceptions since monitor pairing proved that they
  2623   // succeed (if monitor pairing succeeded)
  2624   Bytecodes::Code can_trap_list[] =
  2625     { Bytecodes::_ldc
  2626     , Bytecodes::_ldc_w
  2627     , Bytecodes::_ldc2_w
  2628     , Bytecodes::_iaload
  2629     , Bytecodes::_laload
  2630     , Bytecodes::_faload
  2631     , Bytecodes::_daload
  2632     , Bytecodes::_aaload
  2633     , Bytecodes::_baload
  2634     , Bytecodes::_caload
  2635     , Bytecodes::_saload
  2636     , Bytecodes::_iastore
  2637     , Bytecodes::_lastore
  2638     , Bytecodes::_fastore
  2639     , Bytecodes::_dastore
  2640     , Bytecodes::_aastore
  2641     , Bytecodes::_bastore
  2642     , Bytecodes::_castore
  2643     , Bytecodes::_sastore
  2644     , Bytecodes::_idiv
  2645     , Bytecodes::_ldiv
  2646     , Bytecodes::_irem
  2647     , Bytecodes::_lrem
  2648     , Bytecodes::_getstatic
  2649     , Bytecodes::_putstatic
  2650     , Bytecodes::_getfield
  2651     , Bytecodes::_putfield
  2652     , Bytecodes::_invokevirtual
  2653     , Bytecodes::_invokespecial
  2654     , Bytecodes::_invokestatic
  2655     , Bytecodes::_invokedynamic
  2656     , Bytecodes::_invokeinterface
  2657     , Bytecodes::_new
  2658     , Bytecodes::_newarray
  2659     , Bytecodes::_anewarray
  2660     , Bytecodes::_arraylength
  2661     , Bytecodes::_athrow
  2662     , Bytecodes::_checkcast
  2663     , Bytecodes::_instanceof
  2664     , Bytecodes::_monitorenter
  2665     , Bytecodes::_multianewarray
  2666     };
  2668   // inititialize trap tables
  2669   for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
  2670     _can_trap[i] = false;
  2672   // set standard trap info
  2673   for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
  2674     _can_trap[can_trap_list[j]] = true;
  2679 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
  2680   assert(entry->is_set(f), "entry/flag mismatch");
  2681   // create header block
  2682   BlockBegin* h = new BlockBegin(entry->bci());
  2683   h->set_depth_first_number(0);
  2685   Value l = h;
  2686   BlockEnd* g = new Goto(entry, false);
  2687   l->set_next(g, entry->bci());
  2688   h->set_end(g);
  2689   h->set(f);
  2690   // setup header block end state
  2691   ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis)
  2692   assert(s->stack_is_empty(), "must have empty stack at entry point");
  2693   g->set_state(s);
  2694   return h;
  2699 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
  2700   BlockBegin* start = new BlockBegin(0);
  2702   // This code eliminates the empty start block at the beginning of
  2703   // each method.  Previously, each method started with the
  2704   // start-block created below, and this block was followed by the
  2705   // header block that was always empty.  This header block is only
  2706   // necesary if std_entry is also a backward branch target because
  2707   // then phi functions may be necessary in the header block.  It's
  2708   // also necessary when profiling so that there's a single block that
  2709   // can increment the interpreter_invocation_count.
  2710   BlockBegin* new_header_block;
  2711   if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
  2712     new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
  2713   } else {
  2714     new_header_block = std_entry;
  2717   // setup start block (root for the IR graph)
  2718   Base* base =
  2719     new Base(
  2720       new_header_block,
  2721       osr_entry
  2722     );
  2723   start->set_next(base, 0);
  2724   start->set_end(base);
  2725   // create & setup state for start block
  2726   start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
  2727   base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
  2729   if (base->std_entry()->state() == NULL) {
  2730     // setup states for header blocks
  2731     base->std_entry()->merge(state);
  2734   assert(base->std_entry()->state() != NULL, "");
  2735   return start;
  2739 void GraphBuilder::setup_osr_entry_block() {
  2740   assert(compilation()->is_osr_compile(), "only for osrs");
  2742   int osr_bci = compilation()->osr_bci();
  2743   ciBytecodeStream s(method());
  2744   s.reset_to_bci(osr_bci);
  2745   s.next();
  2746   scope_data()->set_stream(&s);
  2748   // create a new block to be the osr setup code
  2749   _osr_entry = new BlockBegin(osr_bci);
  2750   _osr_entry->set(BlockBegin::osr_entry_flag);
  2751   _osr_entry->set_depth_first_number(0);
  2752   BlockBegin* target = bci2block()->at(osr_bci);
  2753   assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there");
  2754   // the osr entry has no values for locals
  2755   ValueStack* state = target->state()->copy();
  2756   _osr_entry->set_state(state);
  2758   kill_all();
  2759   _block = _osr_entry;
  2760   _state = _osr_entry->state()->copy();
  2761   assert(_state->bci() == osr_bci, "mismatch");
  2762   _last  = _osr_entry;
  2763   Value e = append(new OsrEntry());
  2764   e->set_needs_null_check(false);
  2766   // OSR buffer is
  2767   //
  2768   // locals[nlocals-1..0]
  2769   // monitors[number_of_locks-1..0]
  2770   //
  2771   // locals is a direct copy of the interpreter frame so in the osr buffer
  2772   // so first slot in the local array is the last local from the interpreter
  2773   // and last slot is local[0] (receiver) from the interpreter
  2774   //
  2775   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
  2776   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
  2777   // in the interpreter frame (the method lock if a sync method)
  2779   // Initialize monitors in the compiled activation.
  2781   int index;
  2782   Value local;
  2784   // find all the locals that the interpreter thinks contain live oops
  2785   const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
  2787   // compute the offset into the locals so that we can treat the buffer
  2788   // as if the locals were still in the interpreter frame
  2789   int locals_offset = BytesPerWord * (method()->max_locals() - 1);
  2790   for_each_local_value(state, index, local) {
  2791     int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord;
  2792     Value get;
  2793     if (local->type()->is_object_kind() && !live_oops.at(index)) {
  2794       // The interpreter thinks this local is dead but the compiler
  2795       // doesn't so pretend that the interpreter passed in null.
  2796       get = append(new Constant(objectNull));
  2797     } else {
  2798       get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
  2799                                     append(new Constant(new IntConstant(offset))),
  2800                                     0,
  2801                                     true /*unaligned*/, true /*wide*/));
  2803     _state->store_local(index, get);
  2806   // the storage for the OSR buffer is freed manually in the LIRGenerator.
  2808   assert(state->caller_state() == NULL, "should be top scope");
  2809   state->clear_locals();
  2810   Goto* g = new Goto(target, false);
  2811   append(g);
  2812   _osr_entry->set_end(g);
  2813   target->merge(_osr_entry->end()->state());
  2815   scope_data()->set_stream(NULL);
  2819 ValueStack* GraphBuilder::state_at_entry() {
  2820   ValueStack* state = new ValueStack(scope(), NULL);
  2822   // Set up locals for receiver
  2823   int idx = 0;
  2824   if (!method()->is_static()) {
  2825     // we should always see the receiver
  2826     state->store_local(idx, new Local(objectType, idx));
  2827     idx = 1;
  2830   // Set up locals for incoming arguments
  2831   ciSignature* sig = method()->signature();
  2832   for (int i = 0; i < sig->count(); i++) {
  2833     ciType* type = sig->type_at(i);
  2834     BasicType basic_type = type->basic_type();
  2835     // don't allow T_ARRAY to propagate into locals types
  2836     if (basic_type == T_ARRAY) basic_type = T_OBJECT;
  2837     ValueType* vt = as_ValueType(basic_type);
  2838     state->store_local(idx, new Local(vt, idx));
  2839     idx += type->size();
  2842   // lock synchronized method
  2843   if (method()->is_synchronized()) {
  2844     state->lock(NULL);
  2847   return state;
  2851 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
  2852   : _scope_data(NULL)
  2853   , _instruction_count(0)
  2854   , _osr_entry(NULL)
  2855   , _memory(new MemoryBuffer())
  2856   , _compilation(compilation)
  2857   , _inline_bailout_msg(NULL)
  2859   int osr_bci = compilation->osr_bci();
  2861   // determine entry points and bci2block mapping
  2862   BlockListBuilder blm(compilation, scope, osr_bci);
  2863   CHECK_BAILOUT();
  2865   BlockList* bci2block = blm.bci2block();
  2866   BlockBegin* start_block = bci2block->at(0);
  2868   push_root_scope(scope, bci2block, start_block);
  2870   // setup state for std entry
  2871   _initial_state = state_at_entry();
  2872   start_block->merge(_initial_state);
  2874   // complete graph
  2875   _vmap        = new ValueMap();
  2876   switch (scope->method()->intrinsic_id()) {
  2877   case vmIntrinsics::_dabs          : // fall through
  2878   case vmIntrinsics::_dsqrt         : // fall through
  2879   case vmIntrinsics::_dsin          : // fall through
  2880   case vmIntrinsics::_dcos          : // fall through
  2881   case vmIntrinsics::_dtan          : // fall through
  2882   case vmIntrinsics::_dlog          : // fall through
  2883   case vmIntrinsics::_dlog10        : // fall through
  2885       // Compiles where the root method is an intrinsic need a special
  2886       // compilation environment because the bytecodes for the method
  2887       // shouldn't be parsed during the compilation, only the special
  2888       // Intrinsic node should be emitted.  If this isn't done the the
  2889       // code for the inlined version will be different than the root
  2890       // compiled version which could lead to monotonicity problems on
  2891       // intel.
  2893       // Set up a stream so that appending instructions works properly.
  2894       ciBytecodeStream s(scope->method());
  2895       s.reset_to_bci(0);
  2896       scope_data()->set_stream(&s);
  2897       s.next();
  2899       // setup the initial block state
  2900       _block = start_block;
  2901       _state = start_block->state()->copy_for_parsing();
  2902       _last  = start_block;
  2903       load_local(doubleType, 0);
  2905       // Emit the intrinsic node.
  2906       bool result = try_inline_intrinsics(scope->method());
  2907       if (!result) BAILOUT("failed to inline intrinsic");
  2908       method_return(dpop());
  2910       // connect the begin and end blocks and we're all done.
  2911       BlockEnd* end = last()->as_BlockEnd();
  2912       block()->set_end(end);
  2913       break;
  2916   case vmIntrinsics::_Reference_get:
  2918       if (UseG1GC) {
  2919         // With java.lang.ref.reference.get() we must go through the
  2920         // intrinsic - when G1 is enabled - even when get() is the root
  2921         // method of the compile so that, if necessary, the value in
  2922         // the referent field of the reference object gets recorded by
  2923         // the pre-barrier code.
  2924         // Specifically, if G1 is enabled, the value in the referent
  2925         // field is recorded by the G1 SATB pre barrier. This will
  2926         // result in the referent being marked live and the reference
  2927         // object removed from the list of discovered references during
  2928         // reference processing.
  2930         // Set up a stream so that appending instructions works properly.
  2931         ciBytecodeStream s(scope->method());
  2932         s.reset_to_bci(0);
  2933         scope_data()->set_stream(&s);
  2934         s.next();
  2936         // setup the initial block state
  2937         _block = start_block;
  2938         _state = start_block->state()->copy_for_parsing();
  2939         _last  = start_block;
  2940         load_local(objectType, 0);
  2942         // Emit the intrinsic node.
  2943         bool result = try_inline_intrinsics(scope->method());
  2944         if (!result) BAILOUT("failed to inline intrinsic");
  2945         method_return(apop());
  2947         // connect the begin and end blocks and we're all done.
  2948         BlockEnd* end = last()->as_BlockEnd();
  2949         block()->set_end(end);
  2950         break;
  2952       // Otherwise, fall thru
  2955   default:
  2956     scope_data()->add_to_work_list(start_block);
  2957     iterate_all_blocks();
  2958     break;
  2960   CHECK_BAILOUT();
  2962   _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
  2964   eliminate_redundant_phis(_start);
  2966   NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats());
  2967   // for osr compile, bailout if some requirements are not fulfilled
  2968   if (osr_bci != -1) {
  2969     BlockBegin* osr_block = blm.bci2block()->at(osr_bci);
  2970     assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile");
  2972     // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
  2973     if (!osr_block->state()->stack_is_empty()) {
  2974       BAILOUT("stack not empty at OSR entry point");
  2977 #ifndef PRODUCT
  2978   if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count);
  2979 #endif
  2983 ValueStack* GraphBuilder::copy_state_before() {
  2984   return copy_state_before_with_bci(bci());
  2987 ValueStack* GraphBuilder::copy_state_exhandling() {
  2988   return copy_state_exhandling_with_bci(bci());
  2991 ValueStack* GraphBuilder::copy_state_for_exception() {
  2992   return copy_state_for_exception_with_bci(bci());
  2995 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) {
  2996   return state()->copy(ValueStack::StateBefore, bci);
  2999 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) {
  3000   if (!has_handler()) return NULL;
  3001   return state()->copy(ValueStack::StateBefore, bci);
  3004 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) {
  3005   ValueStack* s = copy_state_exhandling_with_bci(bci);
  3006   if (s == NULL) {
  3007     if (_compilation->env()->jvmti_can_access_local_variables()) {
  3008       s = state()->copy(ValueStack::ExceptionState, bci);
  3009     } else {
  3010       s = state()->copy(ValueStack::EmptyExceptionState, bci);
  3013   return s;
  3016 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
  3017   int recur_level = 0;
  3018   for (IRScope* s = scope(); s != NULL; s = s->caller()) {
  3019     if (s->method() == cur_callee) {
  3020       ++recur_level;
  3023   return recur_level;
  3027 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
  3028   // Clear out any existing inline bailout condition
  3029   clear_inline_bailout();
  3031   if (callee->should_exclude()) {
  3032     // callee is excluded
  3033     INLINE_BAILOUT("excluded by CompilerOracle")
  3034   } else if (!callee->can_be_compiled()) {
  3035     // callee is not compilable (prob. has breakpoints)
  3036     INLINE_BAILOUT("not compilable")
  3037   } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
  3038     // intrinsics can be native or not
  3039     return true;
  3040   } else if (callee->is_native()) {
  3041     // non-intrinsic natives cannot be inlined
  3042     INLINE_BAILOUT("non-intrinsic native")
  3043   } else if (callee->is_abstract()) {
  3044     INLINE_BAILOUT("abstract")
  3045   } else {
  3046     return try_inline_full(callee, holder_known);
  3051 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
  3052   if (!InlineNatives           ) INLINE_BAILOUT("intrinsic method inlining disabled");
  3053   if (callee->is_synchronized()) {
  3054     // We don't currently support any synchronized intrinsics
  3055     return false;
  3058   // callee seems like a good candidate
  3059   // determine id
  3060   bool preserves_state = false;
  3061   bool cantrap = true;
  3062   vmIntrinsics::ID id = callee->intrinsic_id();
  3063   switch (id) {
  3064     case vmIntrinsics::_arraycopy     :
  3065       if (!InlineArrayCopy) return false;
  3066       break;
  3068     case vmIntrinsics::_currentTimeMillis:
  3069     case vmIntrinsics::_nanoTime:
  3070       preserves_state = true;
  3071       cantrap = false;
  3072       break;
  3074     case vmIntrinsics::_floatToRawIntBits   :
  3075     case vmIntrinsics::_intBitsToFloat      :
  3076     case vmIntrinsics::_doubleToRawLongBits :
  3077     case vmIntrinsics::_longBitsToDouble    :
  3078       if (!InlineMathNatives) return false;
  3079       preserves_state = true;
  3080       cantrap = false;
  3081       break;
  3083     case vmIntrinsics::_getClass      :
  3084       if (!InlineClassNatives) return false;
  3085       preserves_state = true;
  3086       break;
  3088     case vmIntrinsics::_currentThread :
  3089       if (!InlineThreadNatives) return false;
  3090       preserves_state = true;
  3091       cantrap = false;
  3092       break;
  3094     case vmIntrinsics::_dabs          : // fall through
  3095     case vmIntrinsics::_dsqrt         : // fall through
  3096     case vmIntrinsics::_dsin          : // fall through
  3097     case vmIntrinsics::_dcos          : // fall through
  3098     case vmIntrinsics::_dtan          : // fall through
  3099     case vmIntrinsics::_dlog          : // fall through
  3100     case vmIntrinsics::_dlog10        : // fall through
  3101       if (!InlineMathNatives) return false;
  3102       cantrap = false;
  3103       preserves_state = true;
  3104       break;
  3106     // sun/misc/AtomicLong.attemptUpdate
  3107     case vmIntrinsics::_attemptUpdate :
  3108       if (!VM_Version::supports_cx8()) return false;
  3109       if (!InlineAtomicLong) return false;
  3110       preserves_state = true;
  3111       break;
  3113     // Use special nodes for Unsafe instructions so we can more easily
  3114     // perform an address-mode optimization on the raw variants
  3115     case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT,  false);
  3116     case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
  3117     case vmIntrinsics::_getByte   : return append_unsafe_get_obj(callee, T_BYTE,    false);
  3118     case vmIntrinsics::_getShort  : return append_unsafe_get_obj(callee, T_SHORT,   false);
  3119     case vmIntrinsics::_getChar   : return append_unsafe_get_obj(callee, T_CHAR,    false);
  3120     case vmIntrinsics::_getInt    : return append_unsafe_get_obj(callee, T_INT,     false);
  3121     case vmIntrinsics::_getLong   : return append_unsafe_get_obj(callee, T_LONG,    false);
  3122     case vmIntrinsics::_getFloat  : return append_unsafe_get_obj(callee, T_FLOAT,   false);
  3123     case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE,  false);
  3125     case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT,  false);
  3126     case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
  3127     case vmIntrinsics::_putByte   : return append_unsafe_put_obj(callee, T_BYTE,    false);
  3128     case vmIntrinsics::_putShort  : return append_unsafe_put_obj(callee, T_SHORT,   false);
  3129     case vmIntrinsics::_putChar   : return append_unsafe_put_obj(callee, T_CHAR,    false);
  3130     case vmIntrinsics::_putInt    : return append_unsafe_put_obj(callee, T_INT,     false);
  3131     case vmIntrinsics::_putLong   : return append_unsafe_put_obj(callee, T_LONG,    false);
  3132     case vmIntrinsics::_putFloat  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
  3133     case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false);
  3135     case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true);
  3136     case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
  3137     case vmIntrinsics::_getByteVolatile   : return append_unsafe_get_obj(callee, T_BYTE,    true);
  3138     case vmIntrinsics::_getShortVolatile  : return append_unsafe_get_obj(callee, T_SHORT,   true);
  3139     case vmIntrinsics::_getCharVolatile   : return append_unsafe_get_obj(callee, T_CHAR,    true);
  3140     case vmIntrinsics::_getIntVolatile    : return append_unsafe_get_obj(callee, T_INT,     true);
  3141     case vmIntrinsics::_getLongVolatile   : return append_unsafe_get_obj(callee, T_LONG,    true);
  3142     case vmIntrinsics::_getFloatVolatile  : return append_unsafe_get_obj(callee, T_FLOAT,   true);
  3143     case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE,  true);
  3145     case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT,  true);
  3146     case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
  3147     case vmIntrinsics::_putByteVolatile   : return append_unsafe_put_obj(callee, T_BYTE,    true);
  3148     case vmIntrinsics::_putShortVolatile  : return append_unsafe_put_obj(callee, T_SHORT,   true);
  3149     case vmIntrinsics::_putCharVolatile   : return append_unsafe_put_obj(callee, T_CHAR,    true);
  3150     case vmIntrinsics::_putIntVolatile    : return append_unsafe_put_obj(callee, T_INT,     true);
  3151     case vmIntrinsics::_putLongVolatile   : return append_unsafe_put_obj(callee, T_LONG,    true);
  3152     case vmIntrinsics::_putFloatVolatile  : return append_unsafe_put_obj(callee, T_FLOAT,   true);
  3153     case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE,  true);
  3155     case vmIntrinsics::_getByte_raw   : return append_unsafe_get_raw(callee, T_BYTE);
  3156     case vmIntrinsics::_getShort_raw  : return append_unsafe_get_raw(callee, T_SHORT);
  3157     case vmIntrinsics::_getChar_raw   : return append_unsafe_get_raw(callee, T_CHAR);
  3158     case vmIntrinsics::_getInt_raw    : return append_unsafe_get_raw(callee, T_INT);
  3159     case vmIntrinsics::_getLong_raw   : return append_unsafe_get_raw(callee, T_LONG);
  3160     case vmIntrinsics::_getFloat_raw  : return append_unsafe_get_raw(callee, T_FLOAT);
  3161     case vmIntrinsics::_getDouble_raw : return append_unsafe_get_raw(callee, T_DOUBLE);
  3163     case vmIntrinsics::_putByte_raw   : return append_unsafe_put_raw(callee, T_BYTE);
  3164     case vmIntrinsics::_putShort_raw  : return append_unsafe_put_raw(callee, T_SHORT);
  3165     case vmIntrinsics::_putChar_raw   : return append_unsafe_put_raw(callee, T_CHAR);
  3166     case vmIntrinsics::_putInt_raw    : return append_unsafe_put_raw(callee, T_INT);
  3167     case vmIntrinsics::_putLong_raw   : return append_unsafe_put_raw(callee, T_LONG);
  3168     case vmIntrinsics::_putFloat_raw  : return append_unsafe_put_raw(callee, T_FLOAT);
  3169     case vmIntrinsics::_putDouble_raw : return append_unsafe_put_raw(callee, T_DOUBLE);
  3171     case vmIntrinsics::_prefetchRead        : return append_unsafe_prefetch(callee, false, false);
  3172     case vmIntrinsics::_prefetchWrite       : return append_unsafe_prefetch(callee, false, true);
  3173     case vmIntrinsics::_prefetchReadStatic  : return append_unsafe_prefetch(callee, true,  false);
  3174     case vmIntrinsics::_prefetchWriteStatic : return append_unsafe_prefetch(callee, true,  true);
  3176     case vmIntrinsics::_checkIndex    :
  3177       if (!InlineNIOCheckIndex) return false;
  3178       preserves_state = true;
  3179       break;
  3180     case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT,  true);
  3181     case vmIntrinsics::_putOrderedInt    : return append_unsafe_put_obj(callee, T_INT,     true);
  3182     case vmIntrinsics::_putOrderedLong   : return append_unsafe_put_obj(callee, T_LONG,    true);
  3184     case vmIntrinsics::_compareAndSwapLong:
  3185       if (!VM_Version::supports_cx8()) return false;
  3186       // fall through
  3187     case vmIntrinsics::_compareAndSwapInt:
  3188     case vmIntrinsics::_compareAndSwapObject:
  3189       append_unsafe_CAS(callee);
  3190       return true;
  3192     case vmIntrinsics::_Reference_get:
  3193       // It is only when G1 is enabled that we absolutely
  3194       // need to use the intrinsic version of Reference.get()
  3195       // so that the value in the referent field, if necessary,
  3196       // can be registered by the pre-barrier code.
  3197       if (!UseG1GC) return false;
  3198       preserves_state = true;
  3199       break;
  3201     default                       : return false; // do not inline
  3203   // create intrinsic node
  3204   const bool has_receiver = !callee->is_static();
  3205   ValueType* result_type = as_ValueType(callee->return_type());
  3206   ValueStack* state_before = copy_state_for_exception();
  3208   Values* args = state()->pop_arguments(callee->arg_size());
  3210   if (is_profiling()) {
  3211     // Don't profile in the special case where the root method
  3212     // is the intrinsic
  3213     if (callee != method()) {
  3214       // Note that we'd collect profile data in this method if we wanted it.
  3215       compilation()->set_would_profile(true);
  3216       if (profile_calls()) {
  3217         Value recv = NULL;
  3218         if (has_receiver) {
  3219           recv = args->at(0);
  3220           null_check(recv);
  3222         profile_call(recv, NULL);
  3227   Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, state_before,
  3228                                     preserves_state, cantrap);
  3229   // append instruction & push result
  3230   Value value = append_split(result);
  3231   if (result_type != voidType) push(result_type, value);
  3233 #ifndef PRODUCT
  3234   // printing
  3235   if (PrintInlining) {
  3236     print_inline_result(callee, true);
  3238 #endif
  3240   // done
  3241   return true;
  3245 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) {
  3246   // Introduce a new callee continuation point - all Ret instructions
  3247   // will be replaced with Gotos to this point.
  3248   BlockBegin* cont = block_at(next_bci());
  3249   assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr");
  3251   // Note: can not assign state to continuation yet, as we have to
  3252   // pick up the state from the Ret instructions.
  3254   // Push callee scope
  3255   push_scope_for_jsr(cont, jsr_dest_bci);
  3257   // Temporarily set up bytecode stream so we can append instructions
  3258   // (only using the bci of this stream)
  3259   scope_data()->set_stream(scope_data()->parent()->stream());
  3261   BlockBegin* jsr_start_block = block_at(jsr_dest_bci);
  3262   assert(jsr_start_block != NULL, "jsr start block must exist");
  3263   assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
  3264   Goto* goto_sub = new Goto(jsr_start_block, false);
  3265   // Must copy state to avoid wrong sharing when parsing bytecodes
  3266   assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block");
  3267   jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci));
  3268   append(goto_sub);
  3269   _block->set_end(goto_sub);
  3270   _last = _block = jsr_start_block;
  3272   // Clear out bytecode stream
  3273   scope_data()->set_stream(NULL);
  3275   scope_data()->add_to_work_list(jsr_start_block);
  3277   // Ready to resume parsing in subroutine
  3278   iterate_all_blocks();
  3280   // If we bailed out during parsing, return immediately (this is bad news)
  3281   CHECK_BAILOUT_(false);
  3283   // Detect whether the continuation can actually be reached. If not,
  3284   // it has not had state set by the join() operations in
  3285   // iterate_bytecodes_for_block()/ret() and we should not touch the
  3286   // iteration state. The calling activation of
  3287   // iterate_bytecodes_for_block will then complete normally.
  3288   if (cont->state() != NULL) {
  3289     if (!cont->is_set(BlockBegin::was_visited_flag)) {
  3290       // add continuation to work list instead of parsing it immediately
  3291       scope_data()->parent()->add_to_work_list(cont);
  3295   assert(jsr_continuation() == cont, "continuation must not have changed");
  3296   assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) ||
  3297          jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag),
  3298          "continuation can only be visited in case of backward branches");
  3299   assert(_last && _last->as_BlockEnd(), "block must have end");
  3301   // continuation is in work list, so end iteration of current block
  3302   _skip_block = true;
  3303   pop_scope_for_jsr();
  3305   return true;
  3309 // Inline the entry of a synchronized method as a monitor enter and
  3310 // register the exception handler which releases the monitor if an
  3311 // exception is thrown within the callee. Note that the monitor enter
  3312 // cannot throw an exception itself, because the receiver is
  3313 // guaranteed to be non-null by the explicit null check at the
  3314 // beginning of inlining.
  3315 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
  3316   assert(lock != NULL && sync_handler != NULL, "lock or handler missing");
  3318   monitorenter(lock, SynchronizationEntryBCI);
  3319   assert(_last->as_MonitorEnter() != NULL, "monitor enter expected");
  3320   _last->set_needs_null_check(false);
  3322   sync_handler->set(BlockBegin::exception_entry_flag);
  3323   sync_handler->set(BlockBegin::is_on_work_list_flag);
  3325   ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
  3326   XHandler* h = new XHandler(desc);
  3327   h->set_entry_block(sync_handler);
  3328   scope_data()->xhandlers()->append(h);
  3329   scope_data()->set_has_handler();
  3333 // If an exception is thrown and not handled within an inlined
  3334 // synchronized method, the monitor must be released before the
  3335 // exception is rethrown in the outer scope. Generate the appropriate
  3336 // instructions here.
  3337 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) {
  3338   BlockBegin* orig_block = _block;
  3339   ValueStack* orig_state = _state;
  3340   Instruction* orig_last = _last;
  3341   _last = _block = sync_handler;
  3342   _state = sync_handler->state()->copy();
  3344   assert(sync_handler != NULL, "handler missing");
  3345   assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here");
  3347   assert(lock != NULL || default_handler, "lock or handler missing");
  3349   XHandler* h = scope_data()->xhandlers()->remove_last();
  3350   assert(h->entry_block() == sync_handler, "corrupt list of handlers");
  3352   block()->set(BlockBegin::was_visited_flag);
  3353   Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI);
  3354   assert(exception->is_pinned(), "must be");
  3356   int bci = SynchronizationEntryBCI;
  3357   if (compilation()->env()->dtrace_method_probes()) {
  3358     // Report exit from inline methods.  We don't have a stream here
  3359     // so pass an explicit bci of SynchronizationEntryBCI.
  3360     Values* args = new Values(1);
  3361     args->push(append_with_bci(new Constant(new ObjectConstant(method())), bci));
  3362     append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci);
  3365   if (lock) {
  3366     assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
  3367     if (!lock->is_linked()) {
  3368       lock = append_with_bci(lock, bci);
  3371     // exit the monitor in the context of the synchronized method
  3372     monitorexit(lock, bci);
  3374     // exit the context of the synchronized method
  3375     if (!default_handler) {
  3376       pop_scope();
  3377       bci = _state->caller_state()->bci();
  3378       _state = _state->caller_state()->copy_for_parsing();
  3382   // perform the throw as if at the the call site
  3383   apush(exception);
  3384   throw_op(bci);
  3386   BlockEnd* end = last()->as_BlockEnd();
  3387   block()->set_end(end);
  3389   _block = orig_block;
  3390   _state = orig_state;
  3391   _last = orig_last;
  3395 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
  3396   assert(!callee->is_native(), "callee must not be native");
  3397   if (count_backedges() && callee->has_loops()) {
  3398     INLINE_BAILOUT("too complex for tiered");
  3400   // first perform tests of things it's not possible to inline
  3401   if (callee->has_exception_handlers() &&
  3402       !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
  3403   if (callee->is_synchronized() &&
  3404       !InlineSynchronizedMethods         ) INLINE_BAILOUT("callee is synchronized");
  3405   if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet");
  3406   if (!callee->has_balanced_monitors())    INLINE_BAILOUT("callee's monitors do not match");
  3408   // Proper inlining of methods with jsrs requires a little more work.
  3409   if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
  3411   // now perform tests that are based on flag settings
  3412   if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("too-deep inlining");
  3413   if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
  3414   if (callee->code_size() > max_inline_size()                 ) INLINE_BAILOUT("callee is too large");
  3416   // don't inline throwable methods unless the inlining tree is rooted in a throwable class
  3417   if (callee->name() == ciSymbol::object_initializer_name() &&
  3418       callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
  3419     // Throwable constructor call
  3420     IRScope* top = scope();
  3421     while (top->caller() != NULL) {
  3422       top = top->caller();
  3424     if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
  3425       INLINE_BAILOUT("don't inline Throwable constructors");
  3429   // When SSE2 is used on intel, then no special handling is needed
  3430   // for strictfp because the enum-constant is fixed at compile time,
  3431   // the check for UseSSE2 is needed here
  3432   if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
  3433     INLINE_BAILOUT("caller and callee have different strict fp requirements");
  3436   if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
  3437     INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
  3440   if (is_profiling() && !callee->ensure_method_data()) {
  3441     INLINE_BAILOUT("mdo allocation failed");
  3443 #ifndef PRODUCT
  3444       // printing
  3445   if (PrintInlining) {
  3446     print_inline_result(callee, true);
  3448 #endif
  3450   // NOTE: Bailouts from this point on, which occur at the
  3451   // GraphBuilder level, do not cause bailout just of the inlining but
  3452   // in fact of the entire compilation.
  3454   BlockBegin* orig_block = block();
  3456   const int args_base = state()->stack_size() - callee->arg_size();
  3457   assert(args_base >= 0, "stack underflow during inlining");
  3459   // Insert null check if necessary
  3460   Value recv = NULL;
  3461   if (code() != Bytecodes::_invokestatic) {
  3462     // note: null check must happen even if first instruction of callee does
  3463     //       an implicit null check since the callee is in a different scope
  3464     //       and we must make sure exception handling does the right thing
  3465     assert(!callee->is_static(), "callee must not be static");
  3466     assert(callee->arg_size() > 0, "must have at least a receiver");
  3467     recv = state()->stack_at(args_base);
  3468     null_check(recv);
  3471   if (is_profiling()) {
  3472     // Note that we'd collect profile data in this method if we wanted it.
  3473     // this may be redundant here...
  3474     compilation()->set_would_profile(true);
  3476     if (profile_calls()) {
  3477       profile_call(recv, holder_known ? callee->holder() : NULL);
  3479     if (profile_inlined_calls()) {
  3480       profile_invocation(callee, copy_state_before());
  3484   // Introduce a new callee continuation point - if the callee has
  3485   // more than one return instruction or the return does not allow
  3486   // fall-through of control flow, all return instructions of the
  3487   // callee will need to be replaced by Goto's pointing to this
  3488   // continuation point.
  3489   BlockBegin* cont = block_at(next_bci());
  3490   bool continuation_existed = true;
  3491   if (cont == NULL) {
  3492     cont = new BlockBegin(next_bci());
  3493     // low number so that continuation gets parsed as early as possible
  3494     cont->set_depth_first_number(0);
  3495 #ifndef PRODUCT
  3496     if (PrintInitialBlockList) {
  3497       tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
  3498                     cont->block_id(), cont->bci(), bci());
  3500 #endif
  3501     continuation_existed = false;
  3503   // Record number of predecessors of continuation block before
  3504   // inlining, to detect if inlined method has edges to its
  3505   // continuation after inlining.
  3506   int continuation_preds = cont->number_of_preds();
  3508   // Push callee scope
  3509   push_scope(callee, cont);
  3511   // the BlockListBuilder for the callee could have bailed out
  3512   CHECK_BAILOUT_(false);
  3514   // Temporarily set up bytecode stream so we can append instructions
  3515   // (only using the bci of this stream)
  3516   scope_data()->set_stream(scope_data()->parent()->stream());
  3518   // Pass parameters into callee state: add assignments
  3519   // note: this will also ensure that all arguments are computed before being passed
  3520   ValueStack* callee_state = state();
  3521   ValueStack* caller_state = state()->caller_state();
  3522   { int i = args_base;
  3523     while (i < caller_state->stack_size()) {
  3524       const int par_no = i - args_base;
  3525       Value  arg = caller_state->stack_at_inc(i);
  3526       // NOTE: take base() of arg->type() to avoid problems storing
  3527       // constants
  3528       store_local(callee_state, arg, arg->type()->base(), par_no);
  3532   // Remove args from stack.
  3533   // Note that we preserve locals state in case we can use it later
  3534   // (see use of pop_scope() below)
  3535   caller_state->truncate_stack(args_base);
  3536   assert(callee_state->stack_size() == 0, "callee stack must be empty");
  3538   Value lock;
  3539   BlockBegin* sync_handler;
  3541   // Inline the locking of the receiver if the callee is synchronized
  3542   if (callee->is_synchronized()) {
  3543     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
  3544                                : state()->local_at(0);
  3545     sync_handler = new BlockBegin(SynchronizationEntryBCI);
  3546     inline_sync_entry(lock, sync_handler);
  3549   if (compilation()->env()->dtrace_method_probes()) {
  3550     Values* args = new Values(1);
  3551     args->push(append(new Constant(new ObjectConstant(method()))));
  3552     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
  3555   BlockBegin* callee_start_block = block_at(0);
  3556   if (callee_start_block != NULL) {
  3557     assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
  3558     Goto* goto_callee = new Goto(callee_start_block, false);
  3559     // The state for this goto is in the scope of the callee, so use
  3560     // the entry bci for the callee instead of the call site bci.
  3561     append_with_bci(goto_callee, 0);
  3562     _block->set_end(goto_callee);
  3563     callee_start_block->merge(callee_state);
  3565     _last = _block = callee_start_block;
  3567     scope_data()->add_to_work_list(callee_start_block);
  3570   // Clear out bytecode stream
  3571   scope_data()->set_stream(NULL);
  3573   // Ready to resume parsing in callee (either in the same block we
  3574   // were in before or in the callee's start block)
  3575   iterate_all_blocks(callee_start_block == NULL);
  3577   // If we bailed out during parsing, return immediately (this is bad news)
  3578   if (bailed_out()) return false;
  3580   // iterate_all_blocks theoretically traverses in random order; in
  3581   // practice, we have only traversed the continuation if we are
  3582   // inlining into a subroutine
  3583   assert(continuation_existed ||
  3584          !continuation()->is_set(BlockBegin::was_visited_flag),
  3585          "continuation should not have been parsed yet if we created it");
  3587   // If we bailed out during parsing, return immediately (this is bad news)
  3588   CHECK_BAILOUT_(false);
  3590   // At this point we are almost ready to return and resume parsing of
  3591   // the caller back in the GraphBuilder. The only thing we want to do
  3592   // first is an optimization: during parsing of the callee we
  3593   // generated at least one Goto to the continuation block. If we
  3594   // generated exactly one, and if the inlined method spanned exactly
  3595   // one block (and we didn't have to Goto its entry), then we snip
  3596   // off the Goto to the continuation, allowing control to fall
  3597   // through back into the caller block and effectively performing
  3598   // block merging. This allows load elimination and CSE to take place
  3599   // across multiple callee scopes if they are relatively simple, and
  3600   // is currently essential to making inlining profitable.
  3601   if (   num_returns() == 1
  3602       && block() == orig_block
  3603       && block() == inline_cleanup_block()) {
  3604     _last = inline_cleanup_return_prev();
  3605     _state = inline_cleanup_state();
  3606   } else if (continuation_preds == cont->number_of_preds()) {
  3607     // Inlining caused that the instructions after the invoke in the
  3608     // caller are not reachable any more. So skip filling this block
  3609     // with instructions!
  3610     assert (cont == continuation(), "");
  3611     assert(_last && _last->as_BlockEnd(), "");
  3612     _skip_block = true;
  3613   } else {
  3614     // Resume parsing in continuation block unless it was already parsed.
  3615     // Note that if we don't change _last here, iteration in
  3616     // iterate_bytecodes_for_block will stop when we return.
  3617     if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
  3618       // add continuation to work list instead of parsing it immediately
  3619       assert(_last && _last->as_BlockEnd(), "");
  3620       scope_data()->parent()->add_to_work_list(continuation());
  3621       _skip_block = true;
  3625   // Fill the exception handler for synchronized methods with instructions
  3626   if (callee->is_synchronized() && sync_handler->state() != NULL) {
  3627     fill_sync_handler(lock, sync_handler);
  3628   } else {
  3629     pop_scope();
  3632   compilation()->notice_inlined_method(callee);
  3634   return true;
  3638 void GraphBuilder::inline_bailout(const char* msg) {
  3639   assert(msg != NULL, "inline bailout msg must exist");
  3640   _inline_bailout_msg = msg;
  3644 void GraphBuilder::clear_inline_bailout() {
  3645   _inline_bailout_msg = NULL;
  3649 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) {
  3650   ScopeData* data = new ScopeData(NULL);
  3651   data->set_scope(scope);
  3652   data->set_bci2block(bci2block);
  3653   _scope_data = data;
  3654   _block = start;
  3658 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) {
  3659   IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false);
  3660   scope()->add_callee(callee_scope);
  3662   BlockListBuilder blb(compilation(), callee_scope, -1);
  3663   CHECK_BAILOUT();
  3665   if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) {
  3666     // this scope can be inlined directly into the caller so remove
  3667     // the block at bci 0.
  3668     blb.bci2block()->at_put(0, NULL);
  3671   set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci())));
  3673   ScopeData* data = new ScopeData(scope_data());
  3674   data->set_scope(callee_scope);
  3675   data->set_bci2block(blb.bci2block());
  3676   data->set_continuation(continuation);
  3677   _scope_data = data;
  3681 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) {
  3682   ScopeData* data = new ScopeData(scope_data());
  3683   data->set_parsing_jsr();
  3684   data->set_jsr_entry_bci(jsr_dest_bci);
  3685   data->set_jsr_return_address_local(-1);
  3686   // Must clone bci2block list as we will be mutating it in order to
  3687   // properly clone all blocks in jsr region as well as exception
  3688   // handlers containing rets
  3689   BlockList* new_bci2block = new BlockList(bci2block()->length());
  3690   new_bci2block->push_all(bci2block());
  3691   data->set_bci2block(new_bci2block);
  3692   data->set_scope(scope());
  3693   data->setup_jsr_xhandlers();
  3694   data->set_continuation(continuation());
  3695   data->set_jsr_continuation(jsr_continuation);
  3696   _scope_data = data;
  3700 void GraphBuilder::pop_scope() {
  3701   int number_of_locks = scope()->number_of_locks();
  3702   _scope_data = scope_data()->parent();
  3703   // accumulate minimum number of monitor slots to be reserved
  3704   scope()->set_min_number_of_locks(number_of_locks);
  3708 void GraphBuilder::pop_scope_for_jsr() {
  3709   _scope_data = scope_data()->parent();
  3712 bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
  3713   if (InlineUnsafeOps) {
  3714     Values* args = state()->pop_arguments(callee->arg_size());
  3715     null_check(args->at(0));
  3716     Instruction* offset = args->at(2);
  3717 #ifndef _LP64
  3718     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  3719 #endif
  3720     Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile));
  3721     push(op->type(), op);
  3722     compilation()->set_has_unsafe_access(true);
  3724   return InlineUnsafeOps;
  3728 bool GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) {
  3729   if (InlineUnsafeOps) {
  3730     Values* args = state()->pop_arguments(callee->arg_size());
  3731     null_check(args->at(0));
  3732     Instruction* offset = args->at(2);
  3733 #ifndef _LP64
  3734     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  3735 #endif
  3736     Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile));
  3737     compilation()->set_has_unsafe_access(true);
  3738     kill_all();
  3740   return InlineUnsafeOps;
  3744 bool GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) {
  3745   if (InlineUnsafeOps) {
  3746     Values* args = state()->pop_arguments(callee->arg_size());
  3747     null_check(args->at(0));
  3748     Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false));
  3749     push(op->type(), op);
  3750     compilation()->set_has_unsafe_access(true);
  3752   return InlineUnsafeOps;
  3756 bool GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) {
  3757   if (InlineUnsafeOps) {
  3758     Values* args = state()->pop_arguments(callee->arg_size());
  3759     null_check(args->at(0));
  3760     Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2)));
  3761     compilation()->set_has_unsafe_access(true);
  3763   return InlineUnsafeOps;
  3767 bool GraphBuilder::append_unsafe_prefetch(ciMethod* callee, bool is_static, bool is_store) {
  3768   if (InlineUnsafeOps) {
  3769     Values* args = state()->pop_arguments(callee->arg_size());
  3770     int obj_arg_index = 1; // Assume non-static case
  3771     if (is_static) {
  3772       obj_arg_index = 0;
  3773     } else {
  3774       null_check(args->at(0));
  3776     Instruction* offset = args->at(obj_arg_index + 1);
  3777 #ifndef _LP64
  3778     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  3779 #endif
  3780     Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset))
  3781                                : append(new UnsafePrefetchRead (args->at(obj_arg_index), offset));
  3782     compilation()->set_has_unsafe_access(true);
  3784   return InlineUnsafeOps;
  3788 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
  3789   ValueStack* state_before = copy_state_for_exception();
  3790   ValueType* result_type = as_ValueType(callee->return_type());
  3791   assert(result_type->is_int(), "int result");
  3792   Values* args = state()->pop_arguments(callee->arg_size());
  3794   // Pop off some args to speically handle, then push back
  3795   Value newval = args->pop();
  3796   Value cmpval = args->pop();
  3797   Value offset = args->pop();
  3798   Value src = args->pop();
  3799   Value unsafe_obj = args->pop();
  3801   // Separately handle the unsafe arg. It is not needed for code
  3802   // generation, but must be null checked
  3803   null_check(unsafe_obj);
  3805 #ifndef _LP64
  3806   offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  3807 #endif
  3809   args->push(src);
  3810   args->push(offset);
  3811   args->push(cmpval);
  3812   args->push(newval);
  3814   // An unsafe CAS can alias with other field accesses, but we don't
  3815   // know which ones so mark the state as no preserved.  This will
  3816   // cause CSE to invalidate memory across it.
  3817   bool preserves_state = false;
  3818   Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state);
  3819   append_split(result);
  3820   push(result_type, result);
  3821   compilation()->set_has_unsafe_access(true);
  3825 #ifndef PRODUCT
  3826 void GraphBuilder::print_inline_result(ciMethod* callee, bool res) {
  3827   const char sync_char      = callee->is_synchronized()        ? 's' : ' ';
  3828   const char exception_char = callee->has_exception_handlers() ? '!' : ' ';
  3829   const char monitors_char  = callee->has_monitor_bytecodes()  ? 'm' : ' ';
  3830   tty->print("     %c%c%c ", sync_char, exception_char, monitors_char);
  3831   for (int i = 0; i < scope()->level(); i++) tty->print("  ");
  3832   if (res) {
  3833     tty->print("  ");
  3834   } else {
  3835     tty->print("- ");
  3837   tty->print("@ %d  ", bci());
  3838   callee->print_short_name();
  3839   tty->print(" (%d bytes)", callee->code_size());
  3840   if (_inline_bailout_msg) {
  3841     tty->print("  %s", _inline_bailout_msg);
  3843   tty->cr();
  3845   if (res && CIPrintMethodCodes) {
  3846     callee->print_codes();
  3851 void GraphBuilder::print_stats() {
  3852   vmap()->print();
  3854 #endif // PRODUCT
  3856 void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
  3857   append(new ProfileCall(method(), bci(), recv, known_holder));
  3860 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
  3861   append(new ProfileInvoke(callee, state));

mercurial