src/share/vm/c1/c1_GraphBuilder.cpp

Wed, 02 Jul 2014 22:54:18 +0200

author
roland
date
Wed, 02 Jul 2014 22:54:18 +0200
changeset 6746
dda2ae6f9557
parent 6668
45e59fae8f2b
child 6747
ee1c924763d2
permissions
-rw-r--r--

8046542: [I.finalize() calls from methods compiled by C1 do not cause IllegalAccessError on Sparc
Summary: call to Object.finalize() sometimes allowed by compilers on array type
Reviewed-by: iveresov, vlivanov

     1 /*
     2  * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "c1/c1_CFGPrinter.hpp"
    27 #include "c1/c1_Canonicalizer.hpp"
    28 #include "c1/c1_Compilation.hpp"
    29 #include "c1/c1_GraphBuilder.hpp"
    30 #include "c1/c1_InstructionPrinter.hpp"
    31 #include "ci/ciCallSite.hpp"
    32 #include "ci/ciField.hpp"
    33 #include "ci/ciKlass.hpp"
    34 #include "ci/ciMemberName.hpp"
    35 #include "compiler/compileBroker.hpp"
    36 #include "interpreter/bytecode.hpp"
    37 #include "runtime/sharedRuntime.hpp"
    38 #include "runtime/compilationPolicy.hpp"
    39 #include "utilities/bitMap.inline.hpp"
    41 class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
    42  private:
    43   Compilation* _compilation;
    44   IRScope*     _scope;
    46   BlockList    _blocks;                // internal list of all blocks
    47   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
    49   // fields used by mark_loops
    50   BitMap       _active;                // for iteration of control flow graph
    51   BitMap       _visited;               // for iteration of control flow graph
    52   intArray     _loop_map;              // caches the information if a block is contained in a loop
    53   int          _next_loop_index;       // next free loop number
    54   int          _next_block_number;     // for reverse postorder numbering of blocks
    56   // accessors
    57   Compilation*  compilation() const              { return _compilation; }
    58   IRScope*      scope() const                    { return _scope; }
    59   ciMethod*     method() const                   { return scope()->method(); }
    60   XHandlers*    xhandlers() const                { return scope()->xhandlers(); }
    62   // unified bailout support
    63   void          bailout(const char* msg) const   { compilation()->bailout(msg); }
    64   bool          bailed_out() const               { return compilation()->bailed_out(); }
    66   // helper functions
    67   BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
    68   void handle_exceptions(BlockBegin* current, int cur_bci);
    69   void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
    70   void store_one(BlockBegin* current, int local);
    71   void store_two(BlockBegin* current, int local);
    72   void set_entries(int osr_bci);
    73   void set_leaders();
    75   void make_loop_header(BlockBegin* block);
    76   void mark_loops();
    77   int  mark_loops(BlockBegin* b, bool in_subroutine);
    79   // debugging
    80 #ifndef PRODUCT
    81   void print();
    82 #endif
    84  public:
    85   // creation
    86   BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
    88   // accessors for GraphBuilder
    89   BlockList*    bci2block() const                { return _bci2block; }
    90 };
    93 // Implementation of BlockListBuilder
    95 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
    96  : _compilation(compilation)
    97  , _scope(scope)
    98  , _blocks(16)
    99  , _bci2block(new BlockList(scope->method()->code_size(), NULL))
   100  , _next_block_number(0)
   101  , _active()         // size not known yet
   102  , _visited()        // size not known yet
   103  , _next_loop_index(0)
   104  , _loop_map() // size not known yet
   105 {
   106   set_entries(osr_bci);
   107   set_leaders();
   108   CHECK_BAILOUT();
   110   mark_loops();
   111   NOT_PRODUCT(if (PrintInitialBlockList) print());
   113 #ifndef PRODUCT
   114   if (PrintCFGToFile) {
   115     stringStream title;
   116     title.print("BlockListBuilder ");
   117     scope->method()->print_name(&title);
   118     CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false);
   119   }
   120 #endif
   121 }
   124 void BlockListBuilder::set_entries(int osr_bci) {
   125   // generate start blocks
   126   BlockBegin* std_entry = make_block_at(0, NULL);
   127   if (scope()->caller() == NULL) {
   128     std_entry->set(BlockBegin::std_entry_flag);
   129   }
   130   if (osr_bci != -1) {
   131     BlockBegin* osr_entry = make_block_at(osr_bci, NULL);
   132     osr_entry->set(BlockBegin::osr_entry_flag);
   133   }
   135   // generate exception entry blocks
   136   XHandlers* list = xhandlers();
   137   const int n = list->length();
   138   for (int i = 0; i < n; i++) {
   139     XHandler* h = list->handler_at(i);
   140     BlockBegin* entry = make_block_at(h->handler_bci(), NULL);
   141     entry->set(BlockBegin::exception_entry_flag);
   142     h->set_entry_block(entry);
   143   }
   144 }
   147 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
   148   assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
   150   BlockBegin* block = _bci2block->at(cur_bci);
   151   if (block == NULL) {
   152     block = new BlockBegin(cur_bci);
   153     block->init_stores_to_locals(method()->max_locals());
   154     _bci2block->at_put(cur_bci, block);
   155     _blocks.append(block);
   157     assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
   158   }
   160   if (predecessor != NULL) {
   161     if (block->is_set(BlockBegin::exception_entry_flag)) {
   162       BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
   163     }
   165     predecessor->add_successor(block);
   166     block->increment_total_preds();
   167   }
   169   return block;
   170 }
   173 inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
   174   current->stores_to_locals().set_bit(local);
   175 }
   176 inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
   177   store_one(current, local);
   178   store_one(current, local + 1);
   179 }
   182 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
   183   // Draws edges from a block to its exception handlers
   184   XHandlers* list = xhandlers();
   185   const int n = list->length();
   187   for (int i = 0; i < n; i++) {
   188     XHandler* h = list->handler_at(i);
   190     if (h->covers(cur_bci)) {
   191       BlockBegin* entry = h->entry_block();
   192       assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set");
   193       assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
   195       // add each exception handler only once
   196       if (!current->is_successor(entry)) {
   197         current->add_successor(entry);
   198         entry->increment_total_preds();
   199       }
   201       // stop when reaching catchall
   202       if (h->catch_type() == 0) break;
   203     }
   204   }
   205 }
   207 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
   208   // start a new block after jsr-bytecode and link this block into cfg
   209   make_block_at(next_bci, current);
   211   // start a new block at the subroutine entry at mark it with special flag
   212   BlockBegin* sr_block = make_block_at(sr_bci, current);
   213   if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
   214     sr_block->set(BlockBegin::subroutine_entry_flag);
   215   }
   216 }
   219 void BlockListBuilder::set_leaders() {
   220   bool has_xhandlers = xhandlers()->has_handlers();
   221   BlockBegin* current = NULL;
   223   // The information which bci starts a new block simplifies the analysis
   224   // Without it, backward branches could jump to a bci where no block was created
   225   // during bytecode iteration. This would require the creation of a new block at the
   226   // branch target and a modification of the successor lists.
   227   BitMap bci_block_start = method()->bci_block_start();
   229   ciBytecodeStream s(method());
   230   while (s.next() != ciBytecodeStream::EOBC()) {
   231     int cur_bci = s.cur_bci();
   233     if (bci_block_start.at(cur_bci)) {
   234       current = make_block_at(cur_bci, current);
   235     }
   236     assert(current != NULL, "must have current block");
   238     if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
   239       handle_exceptions(current, cur_bci);
   240     }
   242     switch (s.cur_bc()) {
   243       // track stores to local variables for selective creation of phi functions
   244       case Bytecodes::_iinc:     store_one(current, s.get_index()); break;
   245       case Bytecodes::_istore:   store_one(current, s.get_index()); break;
   246       case Bytecodes::_lstore:   store_two(current, s.get_index()); break;
   247       case Bytecodes::_fstore:   store_one(current, s.get_index()); break;
   248       case Bytecodes::_dstore:   store_two(current, s.get_index()); break;
   249       case Bytecodes::_astore:   store_one(current, s.get_index()); break;
   250       case Bytecodes::_istore_0: store_one(current, 0); break;
   251       case Bytecodes::_istore_1: store_one(current, 1); break;
   252       case Bytecodes::_istore_2: store_one(current, 2); break;
   253       case Bytecodes::_istore_3: store_one(current, 3); break;
   254       case Bytecodes::_lstore_0: store_two(current, 0); break;
   255       case Bytecodes::_lstore_1: store_two(current, 1); break;
   256       case Bytecodes::_lstore_2: store_two(current, 2); break;
   257       case Bytecodes::_lstore_3: store_two(current, 3); break;
   258       case Bytecodes::_fstore_0: store_one(current, 0); break;
   259       case Bytecodes::_fstore_1: store_one(current, 1); break;
   260       case Bytecodes::_fstore_2: store_one(current, 2); break;
   261       case Bytecodes::_fstore_3: store_one(current, 3); break;
   262       case Bytecodes::_dstore_0: store_two(current, 0); break;
   263       case Bytecodes::_dstore_1: store_two(current, 1); break;
   264       case Bytecodes::_dstore_2: store_two(current, 2); break;
   265       case Bytecodes::_dstore_3: store_two(current, 3); break;
   266       case Bytecodes::_astore_0: store_one(current, 0); break;
   267       case Bytecodes::_astore_1: store_one(current, 1); break;
   268       case Bytecodes::_astore_2: store_one(current, 2); break;
   269       case Bytecodes::_astore_3: store_one(current, 3); break;
   271       // track bytecodes that affect the control flow
   272       case Bytecodes::_athrow:  // fall through
   273       case Bytecodes::_ret:     // fall through
   274       case Bytecodes::_ireturn: // fall through
   275       case Bytecodes::_lreturn: // fall through
   276       case Bytecodes::_freturn: // fall through
   277       case Bytecodes::_dreturn: // fall through
   278       case Bytecodes::_areturn: // fall through
   279       case Bytecodes::_return:
   280         current = NULL;
   281         break;
   283       case Bytecodes::_ifeq:      // fall through
   284       case Bytecodes::_ifne:      // fall through
   285       case Bytecodes::_iflt:      // fall through
   286       case Bytecodes::_ifge:      // fall through
   287       case Bytecodes::_ifgt:      // fall through
   288       case Bytecodes::_ifle:      // fall through
   289       case Bytecodes::_if_icmpeq: // fall through
   290       case Bytecodes::_if_icmpne: // fall through
   291       case Bytecodes::_if_icmplt: // fall through
   292       case Bytecodes::_if_icmpge: // fall through
   293       case Bytecodes::_if_icmpgt: // fall through
   294       case Bytecodes::_if_icmple: // fall through
   295       case Bytecodes::_if_acmpeq: // fall through
   296       case Bytecodes::_if_acmpne: // fall through
   297       case Bytecodes::_ifnull:    // fall through
   298       case Bytecodes::_ifnonnull:
   299         make_block_at(s.next_bci(), current);
   300         make_block_at(s.get_dest(), current);
   301         current = NULL;
   302         break;
   304       case Bytecodes::_goto:
   305         make_block_at(s.get_dest(), current);
   306         current = NULL;
   307         break;
   309       case Bytecodes::_goto_w:
   310         make_block_at(s.get_far_dest(), current);
   311         current = NULL;
   312         break;
   314       case Bytecodes::_jsr:
   315         handle_jsr(current, s.get_dest(), s.next_bci());
   316         current = NULL;
   317         break;
   319       case Bytecodes::_jsr_w:
   320         handle_jsr(current, s.get_far_dest(), s.next_bci());
   321         current = NULL;
   322         break;
   324       case Bytecodes::_tableswitch: {
   325         // set block for each case
   326         Bytecode_tableswitch sw(&s);
   327         int l = sw.length();
   328         for (int i = 0; i < l; i++) {
   329           make_block_at(cur_bci + sw.dest_offset_at(i), current);
   330         }
   331         make_block_at(cur_bci + sw.default_offset(), current);
   332         current = NULL;
   333         break;
   334       }
   336       case Bytecodes::_lookupswitch: {
   337         // set block for each case
   338         Bytecode_lookupswitch sw(&s);
   339         int l = sw.number_of_pairs();
   340         for (int i = 0; i < l; i++) {
   341           make_block_at(cur_bci + sw.pair_at(i).offset(), current);
   342         }
   343         make_block_at(cur_bci + sw.default_offset(), current);
   344         current = NULL;
   345         break;
   346       }
   347     }
   348   }
   349 }
   352 void BlockListBuilder::mark_loops() {
   353   ResourceMark rm;
   355   _active = BitMap(BlockBegin::number_of_blocks());         _active.clear();
   356   _visited = BitMap(BlockBegin::number_of_blocks());        _visited.clear();
   357   _loop_map = intArray(BlockBegin::number_of_blocks(), 0);
   358   _next_loop_index = 0;
   359   _next_block_number = _blocks.length();
   361   // recursively iterate the control flow graph
   362   mark_loops(_bci2block->at(0), false);
   363   assert(_next_block_number >= 0, "invalid block numbers");
   364 }
   366 void BlockListBuilder::make_loop_header(BlockBegin* block) {
   367   if (block->is_set(BlockBegin::exception_entry_flag)) {
   368     // exception edges may look like loops but don't mark them as such
   369     // since it screws up block ordering.
   370     return;
   371   }
   372   if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
   373     block->set(BlockBegin::parser_loop_header_flag);
   375     assert(_loop_map.at(block->block_id()) == 0, "must not be set yet");
   376     assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer");
   377     _loop_map.at_put(block->block_id(), 1 << _next_loop_index);
   378     if (_next_loop_index < 31) _next_loop_index++;
   379   } else {
   380     // block already marked as loop header
   381     assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
   382   }
   383 }
   385 int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
   386   int block_id = block->block_id();
   388   if (_visited.at(block_id)) {
   389     if (_active.at(block_id)) {
   390       // reached block via backward branch
   391       make_loop_header(block);
   392     }
   393     // return cached loop information for this block
   394     return _loop_map.at(block_id);
   395   }
   397   if (block->is_set(BlockBegin::subroutine_entry_flag)) {
   398     in_subroutine = true;
   399   }
   401   // set active and visited bits before successors are processed
   402   _visited.set_bit(block_id);
   403   _active.set_bit(block_id);
   405   intptr_t loop_state = 0;
   406   for (int i = block->number_of_sux() - 1; i >= 0; i--) {
   407     // recursively process all successors
   408     loop_state |= mark_loops(block->sux_at(i), in_subroutine);
   409   }
   411   // clear active-bit after all successors are processed
   412   _active.clear_bit(block_id);
   414   // reverse-post-order numbering of all blocks
   415   block->set_depth_first_number(_next_block_number);
   416   _next_block_number--;
   418   if (loop_state != 0 || in_subroutine ) {
   419     // block is contained at least in one loop, so phi functions are necessary
   420     // phi functions are also necessary for all locals stored in a subroutine
   421     scope()->requires_phi_function().set_union(block->stores_to_locals());
   422   }
   424   if (block->is_set(BlockBegin::parser_loop_header_flag)) {
   425     int header_loop_state = _loop_map.at(block_id);
   426     assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set");
   428     // If the highest bit is set (i.e. when integer value is negative), the method
   429     // has 32 or more loops. This bit is never cleared because it is used for multiple loops
   430     if (header_loop_state >= 0) {
   431       clear_bits(loop_state, header_loop_state);
   432     }
   433   }
   435   // cache and return loop information for this block
   436   _loop_map.at_put(block_id, loop_state);
   437   return loop_state;
   438 }
   441 #ifndef PRODUCT
   443 int compare_depth_first(BlockBegin** a, BlockBegin** b) {
   444   return (*a)->depth_first_number() - (*b)->depth_first_number();
   445 }
   447 void BlockListBuilder::print() {
   448   tty->print("----- initial block list of BlockListBuilder for method ");
   449   method()->print_short_name();
   450   tty->cr();
   452   // better readability if blocks are sorted in processing order
   453   _blocks.sort(compare_depth_first);
   455   for (int i = 0; i < _blocks.length(); i++) {
   456     BlockBegin* cur = _blocks.at(i);
   457     tty->print("%4d: B%-4d bci: %-4d  preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
   459     tty->print(cur->is_set(BlockBegin::std_entry_flag)               ? " std" : "    ");
   460     tty->print(cur->is_set(BlockBegin::osr_entry_flag)               ? " osr" : "    ");
   461     tty->print(cur->is_set(BlockBegin::exception_entry_flag)         ? " ex" : "   ");
   462     tty->print(cur->is_set(BlockBegin::subroutine_entry_flag)        ? " sr" : "   ");
   463     tty->print(cur->is_set(BlockBegin::parser_loop_header_flag)      ? " lh" : "   ");
   465     if (cur->number_of_sux() > 0) {
   466       tty->print("    sux: ");
   467       for (int j = 0; j < cur->number_of_sux(); j++) {
   468         BlockBegin* sux = cur->sux_at(j);
   469         tty->print("B%d ", sux->block_id());
   470       }
   471     }
   472     tty->cr();
   473   }
   474 }
   476 #endif
   479 // A simple growable array of Values indexed by ciFields
   480 class FieldBuffer: public CompilationResourceObj {
   481  private:
   482   GrowableArray<Value> _values;
   484  public:
   485   FieldBuffer() {}
   487   void kill() {
   488     _values.trunc_to(0);
   489   }
   491   Value at(ciField* field) {
   492     assert(field->holder()->is_loaded(), "must be a loaded field");
   493     int offset = field->offset();
   494     if (offset < _values.length()) {
   495       return _values.at(offset);
   496     } else {
   497       return NULL;
   498     }
   499   }
   501   void at_put(ciField* field, Value value) {
   502     assert(field->holder()->is_loaded(), "must be a loaded field");
   503     int offset = field->offset();
   504     _values.at_put_grow(offset, value, NULL);
   505   }
   507 };
   510 // MemoryBuffer is fairly simple model of the current state of memory.
   511 // It partitions memory into several pieces.  The first piece is
   512 // generic memory where little is known about the owner of the memory.
   513 // This is conceptually represented by the tuple <O, F, V> which says
   514 // that the field F of object O has value V.  This is flattened so
   515 // that F is represented by the offset of the field and the parallel
   516 // arrays _objects and _values are used for O and V.  Loads of O.F can
   517 // simply use V.  Newly allocated objects are kept in a separate list
   518 // along with a parallel array for each object which represents the
   519 // current value of its fields.  Stores of the default value to fields
   520 // which have never been stored to before are eliminated since they
   521 // are redundant.  Once newly allocated objects are stored into
   522 // another object or they are passed out of the current compile they
   523 // are treated like generic memory.
   525 class MemoryBuffer: public CompilationResourceObj {
   526  private:
   527   FieldBuffer                 _values;
   528   GrowableArray<Value>        _objects;
   529   GrowableArray<Value>        _newobjects;
   530   GrowableArray<FieldBuffer*> _fields;
   532  public:
   533   MemoryBuffer() {}
   535   StoreField* store(StoreField* st) {
   536     if (!EliminateFieldAccess) {
   537       return st;
   538     }
   540     Value object = st->obj();
   541     Value value = st->value();
   542     ciField* field = st->field();
   543     if (field->holder()->is_loaded()) {
   544       int offset = field->offset();
   545       int index = _newobjects.find(object);
   546       if (index != -1) {
   547         // newly allocated object with no other stores performed on this field
   548         FieldBuffer* buf = _fields.at(index);
   549         if (buf->at(field) == NULL && is_default_value(value)) {
   550 #ifndef PRODUCT
   551           if (PrintIRDuringConstruction && Verbose) {
   552             tty->print_cr("Eliminated store for object %d:", index);
   553             st->print_line();
   554           }
   555 #endif
   556           return NULL;
   557         } else {
   558           buf->at_put(field, value);
   559         }
   560       } else {
   561         _objects.at_put_grow(offset, object, NULL);
   562         _values.at_put(field, value);
   563       }
   565       store_value(value);
   566     } else {
   567       // if we held onto field names we could alias based on names but
   568       // we don't know what's being stored to so kill it all.
   569       kill();
   570     }
   571     return st;
   572   }
   575   // return true if this value correspond to the default value of a field.
   576   bool is_default_value(Value value) {
   577     Constant* con = value->as_Constant();
   578     if (con) {
   579       switch (con->type()->tag()) {
   580         case intTag:    return con->type()->as_IntConstant()->value() == 0;
   581         case longTag:   return con->type()->as_LongConstant()->value() == 0;
   582         case floatTag:  return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
   583         case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
   584         case objectTag: return con->type() == objectNull;
   585         default:  ShouldNotReachHere();
   586       }
   587     }
   588     return false;
   589   }
   592   // return either the actual value of a load or the load itself
   593   Value load(LoadField* load) {
   594     if (!EliminateFieldAccess) {
   595       return load;
   596     }
   598     if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) {
   599       // can't skip load since value might get rounded as a side effect
   600       return load;
   601     }
   603     ciField* field = load->field();
   604     Value object   = load->obj();
   605     if (field->holder()->is_loaded() && !field->is_volatile()) {
   606       int offset = field->offset();
   607       Value result = NULL;
   608       int index = _newobjects.find(object);
   609       if (index != -1) {
   610         result = _fields.at(index)->at(field);
   611       } else if (_objects.at_grow(offset, NULL) == object) {
   612         result = _values.at(field);
   613       }
   614       if (result != NULL) {
   615 #ifndef PRODUCT
   616         if (PrintIRDuringConstruction && Verbose) {
   617           tty->print_cr("Eliminated load: ");
   618           load->print_line();
   619         }
   620 #endif
   621         assert(result->type()->tag() == load->type()->tag(), "wrong types");
   622         return result;
   623       }
   624     }
   625     return load;
   626   }
   628   // Record this newly allocated object
   629   void new_instance(NewInstance* object) {
   630     int index = _newobjects.length();
   631     _newobjects.append(object);
   632     if (_fields.at_grow(index, NULL) == NULL) {
   633       _fields.at_put(index, new FieldBuffer());
   634     } else {
   635       _fields.at(index)->kill();
   636     }
   637   }
   639   void store_value(Value value) {
   640     int index = _newobjects.find(value);
   641     if (index != -1) {
   642       // stored a newly allocated object into another object.
   643       // Assume we've lost track of it as separate slice of memory.
   644       // We could do better by keeping track of whether individual
   645       // fields could alias each other.
   646       _newobjects.remove_at(index);
   647       // pull out the field info and store it at the end up the list
   648       // of field info list to be reused later.
   649       _fields.append(_fields.at(index));
   650       _fields.remove_at(index);
   651     }
   652   }
   654   void kill() {
   655     _newobjects.trunc_to(0);
   656     _objects.trunc_to(0);
   657     _values.kill();
   658   }
   659 };
   662 // Implementation of GraphBuilder's ScopeData
   664 GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
   665   : _parent(parent)
   666   , _bci2block(NULL)
   667   , _scope(NULL)
   668   , _has_handler(false)
   669   , _stream(NULL)
   670   , _work_list(NULL)
   671   , _parsing_jsr(false)
   672   , _jsr_xhandlers(NULL)
   673   , _caller_stack_size(-1)
   674   , _continuation(NULL)
   675   , _num_returns(0)
   676   , _cleanup_block(NULL)
   677   , _cleanup_return_prev(NULL)
   678   , _cleanup_state(NULL)
   679 {
   680   if (parent != NULL) {
   681     _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
   682   } else {
   683     _max_inline_size = MaxInlineSize;
   684   }
   685   if (_max_inline_size < MaxTrivialSize) {
   686     _max_inline_size = MaxTrivialSize;
   687   }
   688 }
   691 void GraphBuilder::kill_all() {
   692   if (UseLocalValueNumbering) {
   693     vmap()->kill_all();
   694   }
   695   _memory->kill();
   696 }
   699 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
   700   if (parsing_jsr()) {
   701     // It is necessary to clone all blocks associated with a
   702     // subroutine, including those for exception handlers in the scope
   703     // of the method containing the jsr (because those exception
   704     // handlers may contain ret instructions in some cases).
   705     BlockBegin* block = bci2block()->at(bci);
   706     if (block != NULL && block == parent()->bci2block()->at(bci)) {
   707       BlockBegin* new_block = new BlockBegin(block->bci());
   708 #ifndef PRODUCT
   709       if (PrintInitialBlockList) {
   710         tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
   711                       block->block_id(), block->bci(), new_block->block_id());
   712       }
   713 #endif
   714       // copy data from cloned blocked
   715       new_block->set_depth_first_number(block->depth_first_number());
   716       if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
   717       // Preserve certain flags for assertion checking
   718       if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
   719       if (block->is_set(BlockBegin::exception_entry_flag))  new_block->set(BlockBegin::exception_entry_flag);
   721       // copy was_visited_flag to allow early detection of bailouts
   722       // if a block that is used in a jsr has already been visited before,
   723       // it is shared between the normal control flow and a subroutine
   724       // BlockBegin::try_merge returns false when the flag is set, this leads
   725       // to a compilation bailout
   726       if (block->is_set(BlockBegin::was_visited_flag))  new_block->set(BlockBegin::was_visited_flag);
   728       bci2block()->at_put(bci, new_block);
   729       block = new_block;
   730     }
   731     return block;
   732   } else {
   733     return bci2block()->at(bci);
   734   }
   735 }
   738 XHandlers* GraphBuilder::ScopeData::xhandlers() const {
   739   if (_jsr_xhandlers == NULL) {
   740     assert(!parsing_jsr(), "");
   741     return scope()->xhandlers();
   742   }
   743   assert(parsing_jsr(), "");
   744   return _jsr_xhandlers;
   745 }
   748 void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
   749   _scope = scope;
   750   bool parent_has_handler = false;
   751   if (parent() != NULL) {
   752     parent_has_handler = parent()->has_handler();
   753   }
   754   _has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
   755 }
   758 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
   759                                                       Instruction* return_prev,
   760                                                       ValueStack* return_state) {
   761   _cleanup_block       = block;
   762   _cleanup_return_prev = return_prev;
   763   _cleanup_state       = return_state;
   764 }
   767 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
   768   if (_work_list == NULL) {
   769     _work_list = new BlockList();
   770   }
   772   if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
   773     // Do not start parsing the continuation block while in a
   774     // sub-scope
   775     if (parsing_jsr()) {
   776       if (block == jsr_continuation()) {
   777         return;
   778       }
   779     } else {
   780       if (block == continuation()) {
   781         return;
   782       }
   783     }
   784     block->set(BlockBegin::is_on_work_list_flag);
   785     _work_list->push(block);
   787     sort_top_into_worklist(_work_list, block);
   788   }
   789 }
   792 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
   793   assert(worklist->top() == top, "");
   794   // sort block descending into work list
   795   const int dfn = top->depth_first_number();
   796   assert(dfn != -1, "unknown depth first number");
   797   int i = worklist->length()-2;
   798   while (i >= 0) {
   799     BlockBegin* b = worklist->at(i);
   800     if (b->depth_first_number() < dfn) {
   801       worklist->at_put(i+1, b);
   802     } else {
   803       break;
   804     }
   805     i --;
   806   }
   807   if (i >= -1) worklist->at_put(i + 1, top);
   808 }
   811 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
   812   if (is_work_list_empty()) {
   813     return NULL;
   814   }
   815   return _work_list->pop();
   816 }
   819 bool GraphBuilder::ScopeData::is_work_list_empty() const {
   820   return (_work_list == NULL || _work_list->length() == 0);
   821 }
   824 void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
   825   assert(parsing_jsr(), "");
   826   // clone all the exception handlers from the scope
   827   XHandlers* handlers = new XHandlers(scope()->xhandlers());
   828   const int n = handlers->length();
   829   for (int i = 0; i < n; i++) {
   830     // The XHandlers need to be adjusted to dispatch to the cloned
   831     // handler block instead of the default one but the synthetic
   832     // unlocker needs to be handled specially.  The synthetic unlocker
   833     // should be left alone since there can be only one and all code
   834     // should dispatch to the same one.
   835     XHandler* h = handlers->handler_at(i);
   836     assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
   837     h->set_entry_block(block_at(h->handler_bci()));
   838   }
   839   _jsr_xhandlers = handlers;
   840 }
   843 int GraphBuilder::ScopeData::num_returns() {
   844   if (parsing_jsr()) {
   845     return parent()->num_returns();
   846   }
   847   return _num_returns;
   848 }
   851 void GraphBuilder::ScopeData::incr_num_returns() {
   852   if (parsing_jsr()) {
   853     parent()->incr_num_returns();
   854   } else {
   855     ++_num_returns;
   856   }
   857 }
   860 // Implementation of GraphBuilder
   862 #define INLINE_BAILOUT(msg)        { inline_bailout(msg); return false; }
   865 void GraphBuilder::load_constant() {
   866   ciConstant con = stream()->get_constant();
   867   if (con.basic_type() == T_ILLEGAL) {
   868     BAILOUT("could not resolve a constant");
   869   } else {
   870     ValueType* t = illegalType;
   871     ValueStack* patch_state = NULL;
   872     switch (con.basic_type()) {
   873       case T_BOOLEAN: t = new IntConstant     (con.as_boolean()); break;
   874       case T_BYTE   : t = new IntConstant     (con.as_byte   ()); break;
   875       case T_CHAR   : t = new IntConstant     (con.as_char   ()); break;
   876       case T_SHORT  : t = new IntConstant     (con.as_short  ()); break;
   877       case T_INT    : t = new IntConstant     (con.as_int    ()); break;
   878       case T_LONG   : t = new LongConstant    (con.as_long   ()); break;
   879       case T_FLOAT  : t = new FloatConstant   (con.as_float  ()); break;
   880       case T_DOUBLE : t = new DoubleConstant  (con.as_double ()); break;
   881       case T_ARRAY  : t = new ArrayConstant   (con.as_object ()->as_array   ()); break;
   882       case T_OBJECT :
   883        {
   884         ciObject* obj = con.as_object();
   885         if (!obj->is_loaded()
   886             || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
   887           patch_state = copy_state_before();
   888           t = new ObjectConstant(obj);
   889         } else {
   890           assert(obj->is_instance(), "must be java_mirror of klass");
   891           t = new InstanceConstant(obj->as_instance());
   892         }
   893         break;
   894        }
   895       default       : ShouldNotReachHere();
   896     }
   897     Value x;
   898     if (patch_state != NULL) {
   899       x = new Constant(t, patch_state);
   900     } else {
   901       x = new Constant(t);
   902     }
   903     push(t, append(x));
   904   }
   905 }
   908 void GraphBuilder::load_local(ValueType* type, int index) {
   909   Value x = state()->local_at(index);
   910   assert(x != NULL && !x->type()->is_illegal(), "access of illegal local variable");
   911   push(type, x);
   912 }
   915 void GraphBuilder::store_local(ValueType* type, int index) {
   916   Value x = pop(type);
   917   store_local(state(), x, index);
   918 }
   921 void GraphBuilder::store_local(ValueStack* state, Value x, int index) {
   922   if (parsing_jsr()) {
   923     // We need to do additional tracking of the location of the return
   924     // address for jsrs since we don't handle arbitrary jsr/ret
   925     // constructs. Here we are figuring out in which circumstances we
   926     // need to bail out.
   927     if (x->type()->is_address()) {
   928       scope_data()->set_jsr_return_address_local(index);
   930       // Also check parent jsrs (if any) at this time to see whether
   931       // they are using this local. We don't handle skipping over a
   932       // ret.
   933       for (ScopeData* cur_scope_data = scope_data()->parent();
   934            cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
   935            cur_scope_data = cur_scope_data->parent()) {
   936         if (cur_scope_data->jsr_return_address_local() == index) {
   937           BAILOUT("subroutine overwrites return address from previous subroutine");
   938         }
   939       }
   940     } else if (index == scope_data()->jsr_return_address_local()) {
   941       scope_data()->set_jsr_return_address_local(-1);
   942     }
   943   }
   945   state->store_local(index, round_fp(x));
   946 }
   949 void GraphBuilder::load_indexed(BasicType type) {
   950   // In case of in block code motion in range check elimination
   951   ValueStack* state_before = copy_state_indexed_access();
   952   compilation()->set_has_access_indexed(true);
   953   Value index = ipop();
   954   Value array = apop();
   955   Value length = NULL;
   956   if (CSEArrayLength ||
   957       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
   958       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
   959     length = append(new ArrayLength(array, state_before));
   960   }
   961   push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));
   962 }
   965 void GraphBuilder::store_indexed(BasicType type) {
   966   // In case of in block code motion in range check elimination
   967   ValueStack* state_before = copy_state_indexed_access();
   968   compilation()->set_has_access_indexed(true);
   969   Value value = pop(as_ValueType(type));
   970   Value index = ipop();
   971   Value array = apop();
   972   Value length = NULL;
   973   if (CSEArrayLength ||
   974       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
   975       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
   976     length = append(new ArrayLength(array, state_before));
   977   }
   978   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before);
   979   append(result);
   980   _memory->store_value(value);
   982   if (type == T_OBJECT && is_profiling()) {
   983     // Note that we'd collect profile data in this method if we wanted it.
   984     compilation()->set_would_profile(true);
   986     if (profile_checkcasts()) {
   987       result->set_profiled_method(method());
   988       result->set_profiled_bci(bci());
   989       result->set_should_profile(true);
   990     }
   991   }
   992 }
   995 void GraphBuilder::stack_op(Bytecodes::Code code) {
   996   switch (code) {
   997     case Bytecodes::_pop:
   998       { state()->raw_pop();
   999       }
  1000       break;
  1001     case Bytecodes::_pop2:
  1002       { state()->raw_pop();
  1003         state()->raw_pop();
  1005       break;
  1006     case Bytecodes::_dup:
  1007       { Value w = state()->raw_pop();
  1008         state()->raw_push(w);
  1009         state()->raw_push(w);
  1011       break;
  1012     case Bytecodes::_dup_x1:
  1013       { Value w1 = state()->raw_pop();
  1014         Value w2 = state()->raw_pop();
  1015         state()->raw_push(w1);
  1016         state()->raw_push(w2);
  1017         state()->raw_push(w1);
  1019       break;
  1020     case Bytecodes::_dup_x2:
  1021       { Value w1 = state()->raw_pop();
  1022         Value w2 = state()->raw_pop();
  1023         Value w3 = state()->raw_pop();
  1024         state()->raw_push(w1);
  1025         state()->raw_push(w3);
  1026         state()->raw_push(w2);
  1027         state()->raw_push(w1);
  1029       break;
  1030     case Bytecodes::_dup2:
  1031       { Value w1 = state()->raw_pop();
  1032         Value w2 = state()->raw_pop();
  1033         state()->raw_push(w2);
  1034         state()->raw_push(w1);
  1035         state()->raw_push(w2);
  1036         state()->raw_push(w1);
  1038       break;
  1039     case Bytecodes::_dup2_x1:
  1040       { Value w1 = state()->raw_pop();
  1041         Value w2 = state()->raw_pop();
  1042         Value w3 = state()->raw_pop();
  1043         state()->raw_push(w2);
  1044         state()->raw_push(w1);
  1045         state()->raw_push(w3);
  1046         state()->raw_push(w2);
  1047         state()->raw_push(w1);
  1049       break;
  1050     case Bytecodes::_dup2_x2:
  1051       { Value w1 = state()->raw_pop();
  1052         Value w2 = state()->raw_pop();
  1053         Value w3 = state()->raw_pop();
  1054         Value w4 = state()->raw_pop();
  1055         state()->raw_push(w2);
  1056         state()->raw_push(w1);
  1057         state()->raw_push(w4);
  1058         state()->raw_push(w3);
  1059         state()->raw_push(w2);
  1060         state()->raw_push(w1);
  1062       break;
  1063     case Bytecodes::_swap:
  1064       { Value w1 = state()->raw_pop();
  1065         Value w2 = state()->raw_pop();
  1066         state()->raw_push(w1);
  1067         state()->raw_push(w2);
  1069       break;
  1070     default:
  1071       ShouldNotReachHere();
  1072       break;
  1077 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) {
  1078   Value y = pop(type);
  1079   Value x = pop(type);
  1080   // NOTE: strictfp can be queried from current method since we don't
  1081   // inline methods with differing strictfp bits
  1082   Value res = new ArithmeticOp(code, x, y, method()->is_strict(), state_before);
  1083   // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
  1084   res = append(res);
  1085   if (method()->is_strict()) {
  1086     res = round_fp(res);
  1088   push(type, res);
  1092 void GraphBuilder::negate_op(ValueType* type) {
  1093   push(type, append(new NegateOp(pop(type))));
  1097 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
  1098   Value s = ipop();
  1099   Value x = pop(type);
  1100   // try to simplify
  1101   // Note: This code should go into the canonicalizer as soon as it can
  1102   //       can handle canonicalized forms that contain more than one node.
  1103   if (CanonicalizeNodes && code == Bytecodes::_iushr) {
  1104     // pattern: x >>> s
  1105     IntConstant* s1 = s->type()->as_IntConstant();
  1106     if (s1 != NULL) {
  1107       // pattern: x >>> s1, with s1 constant
  1108       ShiftOp* l = x->as_ShiftOp();
  1109       if (l != NULL && l->op() == Bytecodes::_ishl) {
  1110         // pattern: (a << b) >>> s1
  1111         IntConstant* s0 = l->y()->type()->as_IntConstant();
  1112         if (s0 != NULL) {
  1113           // pattern: (a << s0) >>> s1
  1114           const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
  1115           const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
  1116           if (s0c == s1c) {
  1117             if (s0c == 0) {
  1118               // pattern: (a << 0) >>> 0 => simplify to: a
  1119               ipush(l->x());
  1120             } else {
  1121               // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
  1122               assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
  1123               const int m = (1 << (BitsPerInt - s0c)) - 1;
  1124               Value s = append(new Constant(new IntConstant(m)));
  1125               ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
  1127             return;
  1133   // could not simplify
  1134   push(type, append(new ShiftOp(code, x, s)));
  1138 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
  1139   Value y = pop(type);
  1140   Value x = pop(type);
  1141   push(type, append(new LogicOp(code, x, y)));
  1145 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
  1146   ValueStack* state_before = copy_state_before();
  1147   Value y = pop(type);
  1148   Value x = pop(type);
  1149   ipush(append(new CompareOp(code, x, y, state_before)));
  1153 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
  1154   push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
  1158 void GraphBuilder::increment() {
  1159   int index = stream()->get_index();
  1160   int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
  1161   load_local(intType, index);
  1162   ipush(append(new Constant(new IntConstant(delta))));
  1163   arithmetic_op(intType, Bytecodes::_iadd);
  1164   store_local(intType, index);
  1168 void GraphBuilder::_goto(int from_bci, int to_bci) {
  1169   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
  1170   if (is_profiling()) {
  1171     compilation()->set_would_profile(true);
  1172     x->set_profiled_bci(bci());
  1173     if (profile_branches()) {
  1174       x->set_profiled_method(method());
  1175       x->set_should_profile(true);
  1178   append(x);
  1182 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
  1183   BlockBegin* tsux = block_at(stream()->get_dest());
  1184   BlockBegin* fsux = block_at(stream()->next_bci());
  1185   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
  1186   // In case of loop invariant code motion or predicate insertion
  1187   // before the body of a loop the state is needed
  1188   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb));
  1190   assert(i->as_Goto() == NULL ||
  1191          (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
  1192          (i->as_Goto()->sux_at(0) == fsux  && i->as_Goto()->is_safepoint() == fsux->bci() < stream()->cur_bci()),
  1193          "safepoint state of Goto returned by canonicalizer incorrect");
  1195   if (is_profiling()) {
  1196     If* if_node = i->as_If();
  1197     if (if_node != NULL) {
  1198       // Note that we'd collect profile data in this method if we wanted it.
  1199       compilation()->set_would_profile(true);
  1200       // At level 2 we need the proper bci to count backedges
  1201       if_node->set_profiled_bci(bci());
  1202       if (profile_branches()) {
  1203         // Successors can be rotated by the canonicalizer, check for this case.
  1204         if_node->set_profiled_method(method());
  1205         if_node->set_should_profile(true);
  1206         if (if_node->tsux() == fsux) {
  1207           if_node->set_swapped(true);
  1210       return;
  1213     // Check if this If was reduced to Goto.
  1214     Goto *goto_node = i->as_Goto();
  1215     if (goto_node != NULL) {
  1216       compilation()->set_would_profile(true);
  1217       goto_node->set_profiled_bci(bci());
  1218       if (profile_branches()) {
  1219         goto_node->set_profiled_method(method());
  1220         goto_node->set_should_profile(true);
  1221         // Find out which successor is used.
  1222         if (goto_node->default_sux() == tsux) {
  1223           goto_node->set_direction(Goto::taken);
  1224         } else if (goto_node->default_sux() == fsux) {
  1225           goto_node->set_direction(Goto::not_taken);
  1226         } else {
  1227           ShouldNotReachHere();
  1230       return;
  1236 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
  1237   Value y = append(new Constant(intZero));
  1238   ValueStack* state_before = copy_state_before();
  1239   Value x = ipop();
  1240   if_node(x, cond, y, state_before);
  1244 void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
  1245   Value y = append(new Constant(objectNull));
  1246   ValueStack* state_before = copy_state_before();
  1247   Value x = apop();
  1248   if_node(x, cond, y, state_before);
  1252 void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
  1253   ValueStack* state_before = copy_state_before();
  1254   Value y = pop(type);
  1255   Value x = pop(type);
  1256   if_node(x, cond, y, state_before);
  1260 void GraphBuilder::jsr(int dest) {
  1261   // We only handle well-formed jsrs (those which are "block-structured").
  1262   // If the bytecodes are strange (jumping out of a jsr block) then we
  1263   // might end up trying to re-parse a block containing a jsr which
  1264   // has already been activated. Watch for this case and bail out.
  1265   for (ScopeData* cur_scope_data = scope_data();
  1266        cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
  1267        cur_scope_data = cur_scope_data->parent()) {
  1268     if (cur_scope_data->jsr_entry_bci() == dest) {
  1269       BAILOUT("too-complicated jsr/ret structure");
  1273   push(addressType, append(new Constant(new AddressConstant(next_bci()))));
  1274   if (!try_inline_jsr(dest)) {
  1275     return; // bailed out while parsing and inlining subroutine
  1280 void GraphBuilder::ret(int local_index) {
  1281   if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
  1283   if (local_index != scope_data()->jsr_return_address_local()) {
  1284     BAILOUT("can not handle complicated jsr/ret constructs");
  1287   // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
  1288   append(new Goto(scope_data()->jsr_continuation(), false));
  1292 void GraphBuilder::table_switch() {
  1293   Bytecode_tableswitch sw(stream());
  1294   const int l = sw.length();
  1295   if (CanonicalizeNodes && l == 1) {
  1296     // total of 2 successors => use If instead of switch
  1297     // Note: This code should go into the canonicalizer as soon as it can
  1298     //       can handle canonicalized forms that contain more than one node.
  1299     Value key = append(new Constant(new IntConstant(sw.low_key())));
  1300     BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
  1301     BlockBegin* fsux = block_at(bci() + sw.default_offset());
  1302     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
  1303     // In case of loop invariant code motion or predicate insertion
  1304     // before the body of a loop the state is needed
  1305     ValueStack* state_before = copy_state_if_bb(is_bb);
  1306     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
  1307   } else {
  1308     // collect successors
  1309     BlockList* sux = new BlockList(l + 1, NULL);
  1310     int i;
  1311     bool has_bb = false;
  1312     for (i = 0; i < l; i++) {
  1313       sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
  1314       if (sw.dest_offset_at(i) < 0) has_bb = true;
  1316     // add default successor
  1317     if (sw.default_offset() < 0) has_bb = true;
  1318     sux->at_put(i, block_at(bci() + sw.default_offset()));
  1319     // In case of loop invariant code motion or predicate insertion
  1320     // before the body of a loop the state is needed
  1321     ValueStack* state_before = copy_state_if_bb(has_bb);
  1322     Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
  1323 #ifdef ASSERT
  1324     if (res->as_Goto()) {
  1325       for (i = 0; i < l; i++) {
  1326         if (sux->at(i) == res->as_Goto()->sux_at(0)) {
  1327           assert(res->as_Goto()->is_safepoint() == sw.dest_offset_at(i) < 0, "safepoint state of Goto returned by canonicalizer incorrect");
  1331 #endif
  1336 void GraphBuilder::lookup_switch() {
  1337   Bytecode_lookupswitch sw(stream());
  1338   const int l = sw.number_of_pairs();
  1339   if (CanonicalizeNodes && l == 1) {
  1340     // total of 2 successors => use If instead of switch
  1341     // Note: This code should go into the canonicalizer as soon as it can
  1342     //       can handle canonicalized forms that contain more than one node.
  1343     // simplify to If
  1344     LookupswitchPair pair = sw.pair_at(0);
  1345     Value key = append(new Constant(new IntConstant(pair.match())));
  1346     BlockBegin* tsux = block_at(bci() + pair.offset());
  1347     BlockBegin* fsux = block_at(bci() + sw.default_offset());
  1348     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
  1349     // In case of loop invariant code motion or predicate insertion
  1350     // before the body of a loop the state is needed
  1351     ValueStack* state_before = copy_state_if_bb(is_bb);;
  1352     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
  1353   } else {
  1354     // collect successors & keys
  1355     BlockList* sux = new BlockList(l + 1, NULL);
  1356     intArray* keys = new intArray(l, 0);
  1357     int i;
  1358     bool has_bb = false;
  1359     for (i = 0; i < l; i++) {
  1360       LookupswitchPair pair = sw.pair_at(i);
  1361       if (pair.offset() < 0) has_bb = true;
  1362       sux->at_put(i, block_at(bci() + pair.offset()));
  1363       keys->at_put(i, pair.match());
  1365     // add default successor
  1366     if (sw.default_offset() < 0) has_bb = true;
  1367     sux->at_put(i, block_at(bci() + sw.default_offset()));
  1368     // In case of loop invariant code motion or predicate insertion
  1369     // before the body of a loop the state is needed
  1370     ValueStack* state_before = copy_state_if_bb(has_bb);
  1371     Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
  1372 #ifdef ASSERT
  1373     if (res->as_Goto()) {
  1374       for (i = 0; i < l; i++) {
  1375         if (sux->at(i) == res->as_Goto()->sux_at(0)) {
  1376           assert(res->as_Goto()->is_safepoint() == sw.pair_at(i).offset() < 0, "safepoint state of Goto returned by canonicalizer incorrect");
  1380 #endif
  1384 void GraphBuilder::call_register_finalizer() {
  1385   // If the receiver requires finalization then emit code to perform
  1386   // the registration on return.
  1388   // Gather some type information about the receiver
  1389   Value receiver = state()->local_at(0);
  1390   assert(receiver != NULL, "must have a receiver");
  1391   ciType* declared_type = receiver->declared_type();
  1392   ciType* exact_type = receiver->exact_type();
  1393   if (exact_type == NULL &&
  1394       receiver->as_Local() &&
  1395       receiver->as_Local()->java_index() == 0) {
  1396     ciInstanceKlass* ik = compilation()->method()->holder();
  1397     if (ik->is_final()) {
  1398       exact_type = ik;
  1399     } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
  1400       // test class is leaf class
  1401       compilation()->dependency_recorder()->assert_leaf_type(ik);
  1402       exact_type = ik;
  1403     } else {
  1404       declared_type = ik;
  1408   // see if we know statically that registration isn't required
  1409   bool needs_check = true;
  1410   if (exact_type != NULL) {
  1411     needs_check = exact_type->as_instance_klass()->has_finalizer();
  1412   } else if (declared_type != NULL) {
  1413     ciInstanceKlass* ik = declared_type->as_instance_klass();
  1414     if (!Dependencies::has_finalizable_subclass(ik)) {
  1415       compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
  1416       needs_check = false;
  1420   if (needs_check) {
  1421     // Perform the registration of finalizable objects.
  1422     ValueStack* state_before = copy_state_for_exception();
  1423     load_local(objectType, 0);
  1424     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
  1425                                state()->pop_arguments(1),
  1426                                true, state_before, true));
  1431 void GraphBuilder::method_return(Value x) {
  1432   if (RegisterFinalizersAtInit &&
  1433       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
  1434     call_register_finalizer();
  1437   bool need_mem_bar = false;
  1438   if (method()->name() == ciSymbol::object_initializer_name() &&
  1439       scope()->wrote_final()) {
  1440     need_mem_bar = true;
  1443   // Check to see whether we are inlining. If so, Return
  1444   // instructions become Gotos to the continuation point.
  1445   if (continuation() != NULL) {
  1446     assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
  1448     if (compilation()->env()->dtrace_method_probes()) {
  1449       // Report exit from inline methods
  1450       Values* args = new Values(1);
  1451       args->push(append(new Constant(new MethodConstant(method()))));
  1452       append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
  1455     // If the inlined method is synchronized, the monitor must be
  1456     // released before we jump to the continuation block.
  1457     if (method()->is_synchronized()) {
  1458       assert(state()->locks_size() == 1, "receiver must be locked here");
  1459       monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
  1462     if (need_mem_bar) {
  1463       append(new MemBar(lir_membar_storestore));
  1466     // State at end of inlined method is the state of the caller
  1467     // without the method parameters on stack, including the
  1468     // return value, if any, of the inlined method on operand stack.
  1469     int invoke_bci = state()->caller_state()->bci();
  1470     set_state(state()->caller_state()->copy_for_parsing());
  1471     if (x != NULL) {
  1472       state()->push(x->type(), x);
  1473       if (profile_return() && x->type()->is_object_kind()) {
  1474         ciMethod* caller = state()->scope()->method();
  1475         ciMethodData* md = caller->method_data_or_null();
  1476         ciProfileData* data = md->bci_to_data(invoke_bci);
  1477         if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
  1478           bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
  1479           // May not be true in case of an inlined call through a method handle intrinsic.
  1480           if (has_return) {
  1481             profile_return_type(x, method(), caller, invoke_bci);
  1486     Goto* goto_callee = new Goto(continuation(), false);
  1488     // See whether this is the first return; if so, store off some
  1489     // of the state for later examination
  1490     if (num_returns() == 0) {
  1491       set_inline_cleanup_info();
  1494     // The current bci() is in the wrong scope, so use the bci() of
  1495     // the continuation point.
  1496     append_with_bci(goto_callee, scope_data()->continuation()->bci());
  1497     incr_num_returns();
  1498     return;
  1501   state()->truncate_stack(0);
  1502   if (method()->is_synchronized()) {
  1503     // perform the unlocking before exiting the method
  1504     Value receiver;
  1505     if (!method()->is_static()) {
  1506       receiver = _initial_state->local_at(0);
  1507     } else {
  1508       receiver = append(new Constant(new ClassConstant(method()->holder())));
  1510     append_split(new MonitorExit(receiver, state()->unlock()));
  1513   if (need_mem_bar) {
  1514       append(new MemBar(lir_membar_storestore));
  1517   append(new Return(x));
  1521 void GraphBuilder::access_field(Bytecodes::Code code) {
  1522   bool will_link;
  1523   ciField* field = stream()->get_field(will_link);
  1524   ciInstanceKlass* holder = field->holder();
  1525   BasicType field_type = field->type()->basic_type();
  1526   ValueType* type = as_ValueType(field_type);
  1527   // call will_link again to determine if the field is valid.
  1528   const bool needs_patching = !holder->is_loaded() ||
  1529                               !field->will_link(method()->holder(), code) ||
  1530                               PatchALot;
  1532   ValueStack* state_before = NULL;
  1533   if (!holder->is_initialized() || needs_patching) {
  1534     // save state before instruction for debug info when
  1535     // deoptimization happens during patching
  1536     state_before = copy_state_before();
  1539   Value obj = NULL;
  1540   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
  1541     if (state_before != NULL) {
  1542       // build a patching constant
  1543       obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
  1544     } else {
  1545       obj = new Constant(new InstanceConstant(holder->java_mirror()));
  1549   if (field->is_final() && (code == Bytecodes::_putfield)) {
  1550     scope()->set_wrote_final();
  1553   const int offset = !needs_patching ? field->offset() : -1;
  1554   switch (code) {
  1555     case Bytecodes::_getstatic: {
  1556       // check for compile-time constants, i.e., initialized static final fields
  1557       Instruction* constant = NULL;
  1558       if (field->is_constant() && !PatchALot) {
  1559         ciConstant field_val = field->constant_value();
  1560         BasicType field_type = field_val.basic_type();
  1561         switch (field_type) {
  1562         case T_ARRAY:
  1563         case T_OBJECT:
  1564           if (field_val.as_object()->should_be_constant()) {
  1565             constant = new Constant(as_ValueType(field_val));
  1567           break;
  1569         default:
  1570           constant = new Constant(as_ValueType(field_val));
  1573       if (constant != NULL) {
  1574         push(type, append(constant));
  1575       } else {
  1576         if (state_before == NULL) {
  1577           state_before = copy_state_for_exception();
  1579         push(type, append(new LoadField(append(obj), offset, field, true,
  1580                                         state_before, needs_patching)));
  1582       break;
  1584     case Bytecodes::_putstatic:
  1585       { Value val = pop(type);
  1586         if (state_before == NULL) {
  1587           state_before = copy_state_for_exception();
  1589         append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
  1591       break;
  1592     case Bytecodes::_getfield: {
  1593       // Check for compile-time constants, i.e., trusted final non-static fields.
  1594       Instruction* constant = NULL;
  1595       obj = apop();
  1596       ObjectType* obj_type = obj->type()->as_ObjectType();
  1597       if (obj_type->is_constant() && !PatchALot) {
  1598         ciObject* const_oop = obj_type->constant_value();
  1599         if (!const_oop->is_null_object() && const_oop->is_loaded()) {
  1600           if (field->is_constant()) {
  1601             ciConstant field_val = field->constant_value_of(const_oop);
  1602             BasicType field_type = field_val.basic_type();
  1603             switch (field_type) {
  1604             case T_ARRAY:
  1605             case T_OBJECT:
  1606               if (field_val.as_object()->should_be_constant()) {
  1607                 constant = new Constant(as_ValueType(field_val));
  1609               break;
  1610             default:
  1611               constant = new Constant(as_ValueType(field_val));
  1613           } else {
  1614             // For CallSite objects treat the target field as a compile time constant.
  1615             if (const_oop->is_call_site()) {
  1616               ciCallSite* call_site = const_oop->as_call_site();
  1617               if (field->is_call_site_target()) {
  1618                 ciMethodHandle* target = call_site->get_target();
  1619                 if (target != NULL) {  // just in case
  1620                   ciConstant field_val(T_OBJECT, target);
  1621                   constant = new Constant(as_ValueType(field_val));
  1622                   // Add a dependence for invalidation of the optimization.
  1623                   if (!call_site->is_constant_call_site()) {
  1624                     dependency_recorder()->assert_call_site_target_value(call_site, target);
  1632       if (constant != NULL) {
  1633         push(type, append(constant));
  1634       } else {
  1635         if (state_before == NULL) {
  1636           state_before = copy_state_for_exception();
  1638         LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
  1639         Value replacement = !needs_patching ? _memory->load(load) : load;
  1640         if (replacement != load) {
  1641           assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
  1642           push(type, replacement);
  1643         } else {
  1644           push(type, append(load));
  1647       break;
  1649     case Bytecodes::_putfield: {
  1650       Value val = pop(type);
  1651       obj = apop();
  1652       if (state_before == NULL) {
  1653         state_before = copy_state_for_exception();
  1655       StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching);
  1656       if (!needs_patching) store = _memory->store(store);
  1657       if (store != NULL) {
  1658         append(store);
  1660       break;
  1662     default:
  1663       ShouldNotReachHere();
  1664       break;
  1669 Dependencies* GraphBuilder::dependency_recorder() const {
  1670   assert(DeoptC1, "need debug information");
  1671   return compilation()->dependency_recorder();
  1674 // How many arguments do we want to profile?
  1675 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
  1676   int n = 0;
  1677   bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
  1678   start = has_receiver ? 1 : 0;
  1679   if (profile_arguments()) {
  1680     ciProfileData* data = method()->method_data()->bci_to_data(bci());
  1681     if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
  1682       n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
  1685   // If we are inlining then we need to collect arguments to profile parameters for the target
  1686   if (profile_parameters() && target != NULL) {
  1687     if (target->method_data() != NULL && target->method_data()->parameters_type_data() != NULL) {
  1688       // The receiver is profiled on method entry so it's included in
  1689       // the number of parameters but here we're only interested in
  1690       // actual arguments.
  1691       n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start);
  1694   if (n > 0) {
  1695     return new Values(n);
  1697   return NULL;
  1700 void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) {
  1701 #ifdef ASSERT
  1702   bool ignored_will_link;
  1703   ciSignature* declared_signature = NULL;
  1704   ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
  1705   assert(expected == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
  1706 #endif
  1709 // Collect arguments that we want to profile in a list
  1710 Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) {
  1711   int start = 0;
  1712   Values* obj_args = args_list_for_profiling(target, start, may_have_receiver);
  1713   if (obj_args == NULL) {
  1714     return NULL;
  1716   int s = obj_args->size();
  1717   // if called through method handle invoke, some arguments may have been popped
  1718   for (int i = start, j = 0; j < s && i < args->length(); i++) {
  1719     if (args->at(i)->type()->is_object_kind()) {
  1720       obj_args->push(args->at(i));
  1721       j++;
  1724   check_args_for_profiling(obj_args, s);
  1725   return obj_args;
  1729 void GraphBuilder::invoke(Bytecodes::Code code) {
  1730   bool will_link;
  1731   ciSignature* declared_signature = NULL;
  1732   ciMethod*             target = stream()->get_method(will_link, &declared_signature);
  1733   ciKlass*              holder = stream()->get_declared_method_holder();
  1734   const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
  1735   assert(declared_signature != NULL, "cannot be null");
  1737   if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
  1738     BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
  1741   // we have to make sure the argument size (incl. the receiver)
  1742   // is correct for compilation (the call would fail later during
  1743   // linkage anyway) - was bug (gri 7/28/99)
  1745     // Use raw to get rewritten bytecode.
  1746     const bool is_invokestatic = bc_raw == Bytecodes::_invokestatic;
  1747     const bool allow_static =
  1748           is_invokestatic ||
  1749           bc_raw == Bytecodes::_invokehandle ||
  1750           bc_raw == Bytecodes::_invokedynamic;
  1751     if (target->is_loaded()) {
  1752       if (( target->is_static() && !allow_static) ||
  1753           (!target->is_static() &&  is_invokestatic)) {
  1754         BAILOUT("will cause link error");
  1758   ciInstanceKlass* klass = target->holder();
  1760   // check if CHA possible: if so, change the code to invoke_special
  1761   ciInstanceKlass* calling_klass = method()->holder();
  1762   ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
  1763   ciInstanceKlass* actual_recv = callee_holder;
  1765   CompileLog* log = compilation()->log();
  1766   if (log != NULL)
  1767       log->elem("call method='%d' instr='%s'",
  1768                 log->identify(target),
  1769                 Bytecodes::name(code));
  1771   // Some methods are obviously bindable without any type checks so
  1772   // convert them directly to an invokespecial or invokestatic.
  1773   if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
  1774     switch (bc_raw) {
  1775     case Bytecodes::_invokevirtual:
  1776       code = Bytecodes::_invokespecial;
  1777       break;
  1778     case Bytecodes::_invokehandle:
  1779       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
  1780       break;
  1782   } else {
  1783     if (bc_raw == Bytecodes::_invokehandle) {
  1784       assert(!will_link, "should come here only for unlinked call");
  1785       code = Bytecodes::_invokespecial;
  1789   // Push appendix argument (MethodType, CallSite, etc.), if one.
  1790   bool patch_for_appendix = false;
  1791   int patching_appendix_arg = 0;
  1792   if (C1PatchInvokeDynamic &&
  1793       (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
  1794     Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
  1795     apush(arg);
  1796     patch_for_appendix = true;
  1797     patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
  1798   } else if (stream()->has_appendix()) {
  1799     ciObject* appendix = stream()->get_appendix();
  1800     Value arg = append(new Constant(new ObjectConstant(appendix)));
  1801     apush(arg);
  1804   // NEEDS_CLEANUP
  1805   // I've added the target->is_loaded() test below but I don't really understand
  1806   // how klass->is_loaded() can be true and yet target->is_loaded() is false.
  1807   // this happened while running the JCK invokevirtual tests under doit.  TKR
  1808   ciMethod* cha_monomorphic_target = NULL;
  1809   ciMethod* exact_target = NULL;
  1810   Value better_receiver = NULL;
  1811   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
  1812       !(// %%% FIXME: Are both of these relevant?
  1813         target->is_method_handle_intrinsic() ||
  1814         target->is_compiled_lambda_form()) &&
  1815       !patch_for_appendix) {
  1816     Value receiver = NULL;
  1817     ciInstanceKlass* receiver_klass = NULL;
  1818     bool type_is_exact = false;
  1819     // try to find a precise receiver type
  1820     if (will_link && !target->is_static()) {
  1821       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
  1822       receiver = state()->stack_at(index);
  1823       ciType* type = receiver->exact_type();
  1824       if (type != NULL && type->is_loaded() &&
  1825           type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
  1826         receiver_klass = (ciInstanceKlass*) type;
  1827         type_is_exact = true;
  1829       if (type == NULL) {
  1830         type = receiver->declared_type();
  1831         if (type != NULL && type->is_loaded() &&
  1832             type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
  1833           receiver_klass = (ciInstanceKlass*) type;
  1834           if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) {
  1835             // Insert a dependency on this type since
  1836             // find_monomorphic_target may assume it's already done.
  1837             dependency_recorder()->assert_leaf_type(receiver_klass);
  1838             type_is_exact = true;
  1843     if (receiver_klass != NULL && type_is_exact &&
  1844         receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) {
  1845       // If we have the exact receiver type we can bind directly to
  1846       // the method to call.
  1847       exact_target = target->resolve_invoke(calling_klass, receiver_klass);
  1848       if (exact_target != NULL) {
  1849         target = exact_target;
  1850         code = Bytecodes::_invokespecial;
  1853     if (receiver_klass != NULL &&
  1854         receiver_klass->is_subtype_of(actual_recv) &&
  1855         actual_recv->is_initialized()) {
  1856       actual_recv = receiver_klass;
  1859     if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
  1860         (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
  1861       // Use CHA on the receiver to select a more precise method.
  1862       cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
  1863     } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) {
  1864       // if there is only one implementor of this interface then we
  1865       // may be able bind this invoke directly to the implementing
  1866       // klass but we need both a dependence on the single interface
  1867       // and on the method we bind to.  Additionally since all we know
  1868       // about the receiver type is the it's supposed to implement the
  1869       // interface we have to insert a check that it's the class we
  1870       // expect.  Interface types are not checked by the verifier so
  1871       // they are roughly equivalent to Object.
  1872       ciInstanceKlass* singleton = NULL;
  1873       if (target->holder()->nof_implementors() == 1) {
  1874         singleton = target->holder()->implementor();
  1875         assert(singleton != NULL && singleton != target->holder(),
  1876                "just checking");
  1878         assert(holder->is_interface(), "invokeinterface to non interface?");
  1879         ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder;
  1880         // the number of implementors for decl_interface is less or
  1881         // equal to the number of implementors for target->holder() so
  1882         // if number of implementors of target->holder() == 1 then
  1883         // number of implementors for decl_interface is 0 or 1. If
  1884         // it's 0 then no class implements decl_interface and there's
  1885         // no point in inlining.
  1886         if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_default_methods()) {
  1887           singleton = NULL;
  1890       if (singleton) {
  1891         cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
  1892         if (cha_monomorphic_target != NULL) {
  1893           // If CHA is able to bind this invoke then update the class
  1894           // to match that class, otherwise klass will refer to the
  1895           // interface.
  1896           klass = cha_monomorphic_target->holder();
  1897           actual_recv = target->holder();
  1899           // insert a check it's really the expected class.
  1900           CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
  1901           c->set_incompatible_class_change_check();
  1902           c->set_direct_compare(klass->is_final());
  1903           // pass the result of the checkcast so that the compiler has
  1904           // more accurate type info in the inlinee
  1905           better_receiver = append_split(c);
  1911   if (cha_monomorphic_target != NULL) {
  1912     if (cha_monomorphic_target->is_abstract()) {
  1913       // Do not optimize for abstract methods
  1914       cha_monomorphic_target = NULL;
  1918   if (cha_monomorphic_target != NULL) {
  1919     if (!(target->is_final_method())) {
  1920       // If we inlined because CHA revealed only a single target method,
  1921       // then we are dependent on that target method not getting overridden
  1922       // by dynamic class loading.  Be sure to test the "static" receiver
  1923       // dest_method here, as opposed to the actual receiver, which may
  1924       // falsely lead us to believe that the receiver is final or private.
  1925       dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target);
  1927     code = Bytecodes::_invokespecial;
  1930   // check if we could do inlining
  1931   if (!PatchALot && Inline && klass->is_loaded() &&
  1932       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
  1933       && target->is_loaded()
  1934       && !patch_for_appendix) {
  1935     // callee is known => check if we have static binding
  1936     assert(target->is_loaded(), "callee must be known");
  1937     if (code == Bytecodes::_invokestatic  ||
  1938         code == Bytecodes::_invokespecial ||
  1939         code == Bytecodes::_invokevirtual && target->is_final_method() ||
  1940         code == Bytecodes::_invokedynamic) {
  1941       ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
  1942       // static binding => check if callee is ok
  1943       bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
  1945       CHECK_BAILOUT();
  1946       clear_inline_bailout();
  1948       if (success) {
  1949         // Register dependence if JVMTI has either breakpoint
  1950         // setting or hotswapping of methods capabilities since they may
  1951         // cause deoptimization.
  1952         if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) {
  1953           dependency_recorder()->assert_evol_method(inline_target);
  1955         return;
  1957     } else {
  1958       print_inlining(target, "no static binding", /*success*/ false);
  1960   } else {
  1961     print_inlining(target, "not inlineable", /*success*/ false);
  1964   // If we attempted an inline which did not succeed because of a
  1965   // bailout during construction of the callee graph, the entire
  1966   // compilation has to be aborted. This is fairly rare and currently
  1967   // seems to only occur for jasm-generated classes which contain
  1968   // jsr/ret pairs which are not associated with finally clauses and
  1969   // do not have exception handlers in the containing method, and are
  1970   // therefore not caught early enough to abort the inlining without
  1971   // corrupting the graph. (We currently bail out with a non-empty
  1972   // stack at a ret in these situations.)
  1973   CHECK_BAILOUT();
  1975   // inlining not successful => standard invoke
  1976   bool is_loaded = target->is_loaded();
  1977   ValueType* result_type = as_ValueType(declared_signature->return_type());
  1978   ValueStack* state_before = copy_state_exhandling();
  1980   // The bytecode (code) might change in this method so we are checking this very late.
  1981   const bool has_receiver =
  1982     code == Bytecodes::_invokespecial   ||
  1983     code == Bytecodes::_invokevirtual   ||
  1984     code == Bytecodes::_invokeinterface;
  1985   Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
  1986   Value recv = has_receiver ? apop() : NULL;
  1987   int vtable_index = Method::invalid_vtable_index;
  1989 #ifdef SPARC
  1990   // Currently only supported on Sparc.
  1991   // The UseInlineCaches only controls dispatch to invokevirtuals for
  1992   // loaded classes which we weren't able to statically bind.
  1993   if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
  1994       && !target->can_be_statically_bound()) {
  1995     // Find a vtable index if one is available
  1996     // For arrays, callee_holder is Object. Resolving the call with
  1997     // Object would allow an illegal call to finalize() on an
  1998     // array. We use holder instead: illegal calls to finalize() won't
  1999     // be compiled as vtable calls (IC call resolution will catch the
  2000     // illegal call) and the few legal calls on array types won't be
  2001     // either.
  2002     vtable_index = target->resolve_vtable_index(calling_klass, holder);
  2004 #endif
  2006   if (recv != NULL &&
  2007       (code == Bytecodes::_invokespecial ||
  2008        !is_loaded || target->is_final())) {
  2009     // invokespecial always needs a NULL check.  invokevirtual where
  2010     // the target is final or where it's not known that whether the
  2011     // target is final requires a NULL check.  Otherwise normal
  2012     // invokevirtual will perform the null check during the lookup
  2013     // logic or the unverified entry point.  Profiling of calls
  2014     // requires that the null check is performed in all cases.
  2015     null_check(recv);
  2018   if (is_profiling()) {
  2019     if (recv != NULL && profile_calls()) {
  2020       null_check(recv);
  2022     // Note that we'd collect profile data in this method if we wanted it.
  2023     compilation()->set_would_profile(true);
  2025     if (profile_calls()) {
  2026       assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
  2027       ciKlass* target_klass = NULL;
  2028       if (cha_monomorphic_target != NULL) {
  2029         target_klass = cha_monomorphic_target->holder();
  2030       } else if (exact_target != NULL) {
  2031         target_klass = exact_target->holder();
  2033       profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false);
  2037   Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
  2038   // push result
  2039   append_split(result);
  2041   if (result_type != voidType) {
  2042     if (method()->is_strict()) {
  2043       push(result_type, round_fp(result));
  2044     } else {
  2045       push(result_type, result);
  2048   if (profile_return() && result_type->is_object_kind()) {
  2049     profile_return_type(result, target);
  2054 void GraphBuilder::new_instance(int klass_index) {
  2055   ValueStack* state_before = copy_state_exhandling();
  2056   bool will_link;
  2057   ciKlass* klass = stream()->get_klass(will_link);
  2058   assert(klass->is_instance_klass(), "must be an instance klass");
  2059   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before);
  2060   _memory->new_instance(new_instance);
  2061   apush(append_split(new_instance));
  2065 void GraphBuilder::new_type_array() {
  2066   ValueStack* state_before = copy_state_exhandling();
  2067   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before)));
  2071 void GraphBuilder::new_object_array() {
  2072   bool will_link;
  2073   ciKlass* klass = stream()->get_klass(will_link);
  2074   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
  2075   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
  2076   apush(append_split(n));
  2080 bool GraphBuilder::direct_compare(ciKlass* k) {
  2081   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
  2082     ciInstanceKlass* ik = k->as_instance_klass();
  2083     if (ik->is_final()) {
  2084       return true;
  2085     } else {
  2086       if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
  2087         // test class is leaf class
  2088         dependency_recorder()->assert_leaf_type(ik);
  2089         return true;
  2093   return false;
  2097 void GraphBuilder::check_cast(int klass_index) {
  2098   bool will_link;
  2099   ciKlass* klass = stream()->get_klass(will_link);
  2100   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception();
  2101   CheckCast* c = new CheckCast(klass, apop(), state_before);
  2102   apush(append_split(c));
  2103   c->set_direct_compare(direct_compare(klass));
  2105   if (is_profiling()) {
  2106     // Note that we'd collect profile data in this method if we wanted it.
  2107     compilation()->set_would_profile(true);
  2109     if (profile_checkcasts()) {
  2110       c->set_profiled_method(method());
  2111       c->set_profiled_bci(bci());
  2112       c->set_should_profile(true);
  2118 void GraphBuilder::instance_of(int klass_index) {
  2119   bool will_link;
  2120   ciKlass* klass = stream()->get_klass(will_link);
  2121   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
  2122   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
  2123   ipush(append_split(i));
  2124   i->set_direct_compare(direct_compare(klass));
  2126   if (is_profiling()) {
  2127     // Note that we'd collect profile data in this method if we wanted it.
  2128     compilation()->set_would_profile(true);
  2130     if (profile_checkcasts()) {
  2131       i->set_profiled_method(method());
  2132       i->set_profiled_bci(bci());
  2133       i->set_should_profile(true);
  2139 void GraphBuilder::monitorenter(Value x, int bci) {
  2140   // save state before locking in case of deoptimization after a NullPointerException
  2141   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
  2142   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
  2143   kill_all();
  2147 void GraphBuilder::monitorexit(Value x, int bci) {
  2148   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
  2149   kill_all();
  2153 void GraphBuilder::new_multi_array(int dimensions) {
  2154   bool will_link;
  2155   ciKlass* klass = stream()->get_klass(will_link);
  2156   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
  2158   Values* dims = new Values(dimensions, NULL);
  2159   // fill in all dimensions
  2160   int i = dimensions;
  2161   while (i-- > 0) dims->at_put(i, ipop());
  2162   // create array
  2163   NewArray* n = new NewMultiArray(klass, dims, state_before);
  2164   apush(append_split(n));
  2168 void GraphBuilder::throw_op(int bci) {
  2169   // We require that the debug info for a Throw be the "state before"
  2170   // the Throw (i.e., exception oop is still on TOS)
  2171   ValueStack* state_before = copy_state_before_with_bci(bci);
  2172   Throw* t = new Throw(apop(), state_before);
  2173   // operand stack not needed after a throw
  2174   state()->truncate_stack(0);
  2175   append_with_bci(t, bci);
  2179 Value GraphBuilder::round_fp(Value fp_value) {
  2180   // no rounding needed if SSE2 is used
  2181   if (RoundFPResults && UseSSE < 2) {
  2182     // Must currently insert rounding node for doubleword values that
  2183     // are results of expressions (i.e., not loads from memory or
  2184     // constants)
  2185     if (fp_value->type()->tag() == doubleTag &&
  2186         fp_value->as_Constant() == NULL &&
  2187         fp_value->as_Local() == NULL &&       // method parameters need no rounding
  2188         fp_value->as_RoundFP() == NULL) {
  2189       return append(new RoundFP(fp_value));
  2192   return fp_value;
  2196 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
  2197   Canonicalizer canon(compilation(), instr, bci);
  2198   Instruction* i1 = canon.canonical();
  2199   if (i1->is_linked() || !i1->can_be_linked()) {
  2200     // Canonicalizer returned an instruction which was already
  2201     // appended so simply return it.
  2202     return i1;
  2205   if (UseLocalValueNumbering) {
  2206     // Lookup the instruction in the ValueMap and add it to the map if
  2207     // it's not found.
  2208     Instruction* i2 = vmap()->find_insert(i1);
  2209     if (i2 != i1) {
  2210       // found an entry in the value map, so just return it.
  2211       assert(i2->is_linked(), "should already be linked");
  2212       return i2;
  2214     ValueNumberingEffects vne(vmap());
  2215     i1->visit(&vne);
  2218   // i1 was not eliminated => append it
  2219   assert(i1->next() == NULL, "shouldn't already be linked");
  2220   _last = _last->set_next(i1, canon.bci());
  2222   if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) {
  2223     // set the bailout state but complete normal processing.  We
  2224     // might do a little more work before noticing the bailout so we
  2225     // want processing to continue normally until it's noticed.
  2226     bailout("Method and/or inlining is too large");
  2229 #ifndef PRODUCT
  2230   if (PrintIRDuringConstruction) {
  2231     InstructionPrinter ip;
  2232     ip.print_line(i1);
  2233     if (Verbose) {
  2234       state()->print();
  2237 #endif
  2239   // save state after modification of operand stack for StateSplit instructions
  2240   StateSplit* s = i1->as_StateSplit();
  2241   if (s != NULL) {
  2242     if (EliminateFieldAccess) {
  2243       Intrinsic* intrinsic = s->as_Intrinsic();
  2244       if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
  2245         _memory->kill();
  2248     s->set_state(state()->copy(ValueStack::StateAfter, canon.bci()));
  2251   // set up exception handlers for this instruction if necessary
  2252   if (i1->can_trap()) {
  2253     i1->set_exception_handlers(handle_exception(i1));
  2254     assert(i1->exception_state() != NULL || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state");
  2256   return i1;
  2260 Instruction* GraphBuilder::append(Instruction* instr) {
  2261   assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used");
  2262   return append_with_bci(instr, bci());
  2266 Instruction* GraphBuilder::append_split(StateSplit* instr) {
  2267   return append_with_bci(instr, bci());
  2271 void GraphBuilder::null_check(Value value) {
  2272   if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) {
  2273     return;
  2274   } else {
  2275     Constant* con = value->as_Constant();
  2276     if (con) {
  2277       ObjectType* c = con->type()->as_ObjectType();
  2278       if (c && c->is_loaded()) {
  2279         ObjectConstant* oc = c->as_ObjectConstant();
  2280         if (!oc || !oc->value()->is_null_object()) {
  2281           return;
  2286   append(new NullCheck(value, copy_state_for_exception()));
  2291 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
  2292   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) {
  2293     assert(instruction->exception_state() == NULL
  2294            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
  2295            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->jvmti_can_access_local_variables()),
  2296            "exception_state should be of exception kind");
  2297     return new XHandlers();
  2300   XHandlers*  exception_handlers = new XHandlers();
  2301   ScopeData*  cur_scope_data = scope_data();
  2302   ValueStack* cur_state = instruction->state_before();
  2303   ValueStack* prev_state = NULL;
  2304   int scope_count = 0;
  2306   assert(cur_state != NULL, "state_before must be set");
  2307   do {
  2308     int cur_bci = cur_state->bci();
  2309     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
  2310     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
  2312     // join with all potential exception handlers
  2313     XHandlers* list = cur_scope_data->xhandlers();
  2314     const int n = list->length();
  2315     for (int i = 0; i < n; i++) {
  2316       XHandler* h = list->handler_at(i);
  2317       if (h->covers(cur_bci)) {
  2318         // h is a potential exception handler => join it
  2319         compilation()->set_has_exception_handlers(true);
  2321         BlockBegin* entry = h->entry_block();
  2322         if (entry == block()) {
  2323           // It's acceptable for an exception handler to cover itself
  2324           // but we don't handle that in the parser currently.  It's
  2325           // very rare so we bailout instead of trying to handle it.
  2326           BAILOUT_("exception handler covers itself", exception_handlers);
  2328         assert(entry->bci() == h->handler_bci(), "must match");
  2329         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
  2331         // previously this was a BAILOUT, but this is not necessary
  2332         // now because asynchronous exceptions are not handled this way.
  2333         assert(entry->state() == NULL || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match");
  2335         // xhandler start with an empty expression stack
  2336         if (cur_state->stack_size() != 0) {
  2337           cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
  2339         if (instruction->exception_state() == NULL) {
  2340           instruction->set_exception_state(cur_state);
  2343         // Note: Usually this join must work. However, very
  2344         // complicated jsr-ret structures where we don't ret from
  2345         // the subroutine can cause the objects on the monitor
  2346         // stacks to not match because blocks can be parsed twice.
  2347         // The only test case we've seen so far which exhibits this
  2348         // problem is caught by the infinite recursion test in
  2349         // GraphBuilder::jsr() if the join doesn't work.
  2350         if (!entry->try_merge(cur_state)) {
  2351           BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
  2354         // add current state for correct handling of phi functions at begin of xhandler
  2355         int phi_operand = entry->add_exception_state(cur_state);
  2357         // add entry to the list of xhandlers of this block
  2358         _block->add_exception_handler(entry);
  2360         // add back-edge from xhandler entry to this block
  2361         if (!entry->is_predecessor(_block)) {
  2362           entry->add_predecessor(_block);
  2365         // clone XHandler because phi_operand and scope_count can not be shared
  2366         XHandler* new_xhandler = new XHandler(h);
  2367         new_xhandler->set_phi_operand(phi_operand);
  2368         new_xhandler->set_scope_count(scope_count);
  2369         exception_handlers->append(new_xhandler);
  2371         // fill in exception handler subgraph lazily
  2372         assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet");
  2373         cur_scope_data->add_to_work_list(entry);
  2375         // stop when reaching catchall
  2376         if (h->catch_type() == 0) {
  2377           return exception_handlers;
  2382     if (exception_handlers->length() == 0) {
  2383       // This scope and all callees do not handle exceptions, so the local
  2384       // variables of this scope are not needed. However, the scope itself is
  2385       // required for a correct exception stack trace -> clear out the locals.
  2386       if (_compilation->env()->jvmti_can_access_local_variables()) {
  2387         cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
  2388       } else {
  2389         cur_state = cur_state->copy(ValueStack::EmptyExceptionState, cur_state->bci());
  2391       if (prev_state != NULL) {
  2392         prev_state->set_caller_state(cur_state);
  2394       if (instruction->exception_state() == NULL) {
  2395         instruction->set_exception_state(cur_state);
  2399     // Set up iteration for next time.
  2400     // If parsing a jsr, do not grab exception handlers from the
  2401     // parent scopes for this method (already got them, and they
  2402     // needed to be cloned)
  2404     while (cur_scope_data->parsing_jsr()) {
  2405       cur_scope_data = cur_scope_data->parent();
  2408     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
  2409     assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler");
  2411     prev_state = cur_state;
  2412     cur_state = cur_state->caller_state();
  2413     cur_scope_data = cur_scope_data->parent();
  2414     scope_count++;
  2415   } while (cur_scope_data != NULL);
  2417   return exception_handlers;
  2421 // Helper class for simplifying Phis.
  2422 class PhiSimplifier : public BlockClosure {
  2423  private:
  2424   bool _has_substitutions;
  2425   Value simplify(Value v);
  2427  public:
  2428   PhiSimplifier(BlockBegin* start) : _has_substitutions(false) {
  2429     start->iterate_preorder(this);
  2430     if (_has_substitutions) {
  2431       SubstitutionResolver sr(start);
  2434   void block_do(BlockBegin* b);
  2435   bool has_substitutions() const { return _has_substitutions; }
  2436 };
  2439 Value PhiSimplifier::simplify(Value v) {
  2440   Phi* phi = v->as_Phi();
  2442   if (phi == NULL) {
  2443     // no phi function
  2444     return v;
  2445   } else if (v->has_subst()) {
  2446     // already substituted; subst can be phi itself -> simplify
  2447     return simplify(v->subst());
  2448   } else if (phi->is_set(Phi::cannot_simplify)) {
  2449     // already tried to simplify phi before
  2450     return phi;
  2451   } else if (phi->is_set(Phi::visited)) {
  2452     // break cycles in phi functions
  2453     return phi;
  2454   } else if (phi->type()->is_illegal()) {
  2455     // illegal phi functions are ignored anyway
  2456     return phi;
  2458   } else {
  2459     // mark phi function as processed to break cycles in phi functions
  2460     phi->set(Phi::visited);
  2462     // simplify x = [y, x] and x = [y, y] to y
  2463     Value subst = NULL;
  2464     int opd_count = phi->operand_count();
  2465     for (int i = 0; i < opd_count; i++) {
  2466       Value opd = phi->operand_at(i);
  2467       assert(opd != NULL, "Operand must exist!");
  2469       if (opd->type()->is_illegal()) {
  2470         // if one operand is illegal, the entire phi function is illegal
  2471         phi->make_illegal();
  2472         phi->clear(Phi::visited);
  2473         return phi;
  2476       Value new_opd = simplify(opd);
  2477       assert(new_opd != NULL, "Simplified operand must exist!");
  2479       if (new_opd != phi && new_opd != subst) {
  2480         if (subst == NULL) {
  2481           subst = new_opd;
  2482         } else {
  2483           // no simplification possible
  2484           phi->set(Phi::cannot_simplify);
  2485           phi->clear(Phi::visited);
  2486           return phi;
  2491     // sucessfully simplified phi function
  2492     assert(subst != NULL, "illegal phi function");
  2493     _has_substitutions = true;
  2494     phi->clear(Phi::visited);
  2495     phi->set_subst(subst);
  2497 #ifndef PRODUCT
  2498     if (PrintPhiFunctions) {
  2499       tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
  2501 #endif
  2503     return subst;
  2508 void PhiSimplifier::block_do(BlockBegin* b) {
  2509   for_each_phi_fun(b, phi,
  2510     simplify(phi);
  2511   );
  2513 #ifdef ASSERT
  2514   for_each_phi_fun(b, phi,
  2515                    assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
  2516   );
  2518   ValueStack* state = b->state()->caller_state();
  2519   for_each_state_value(state, value,
  2520     Phi* phi = value->as_Phi();
  2521     assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
  2522   );
  2523 #endif
  2526 // This method is called after all blocks are filled with HIR instructions
  2527 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
  2528 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) {
  2529   PhiSimplifier simplifier(start);
  2533 void GraphBuilder::connect_to_end(BlockBegin* beg) {
  2534   // setup iteration
  2535   kill_all();
  2536   _block = beg;
  2537   _state = beg->state()->copy_for_parsing();
  2538   _last  = beg;
  2539   iterate_bytecodes_for_block(beg->bci());
  2543 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
  2544 #ifndef PRODUCT
  2545   if (PrintIRDuringConstruction) {
  2546     tty->cr();
  2547     InstructionPrinter ip;
  2548     ip.print_instr(_block); tty->cr();
  2549     ip.print_stack(_block->state()); tty->cr();
  2550     ip.print_inline_level(_block);
  2551     ip.print_head();
  2552     tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size());
  2554 #endif
  2555   _skip_block = false;
  2556   assert(state() != NULL, "ValueStack missing!");
  2557   CompileLog* log = compilation()->log();
  2558   ciBytecodeStream s(method());
  2559   s.reset_to_bci(bci);
  2560   int prev_bci = bci;
  2561   scope_data()->set_stream(&s);
  2562   // iterate
  2563   Bytecodes::Code code = Bytecodes::_illegal;
  2564   bool push_exception = false;
  2566   if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) {
  2567     // first thing in the exception entry block should be the exception object.
  2568     push_exception = true;
  2571   while (!bailed_out() && last()->as_BlockEnd() == NULL &&
  2572          (code = stream()->next()) != ciBytecodeStream::EOBC() &&
  2573          (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
  2574     assert(state()->kind() == ValueStack::Parsing, "invalid state kind");
  2576     if (log != NULL)
  2577       log->set_context("bc code='%d' bci='%d'", (int)code, s.cur_bci());
  2579     // Check for active jsr during OSR compilation
  2580     if (compilation()->is_osr_compile()
  2581         && scope()->is_top_scope()
  2582         && parsing_jsr()
  2583         && s.cur_bci() == compilation()->osr_bci()) {
  2584       bailout("OSR not supported while a jsr is active");
  2587     if (push_exception) {
  2588       apush(append(new ExceptionObject()));
  2589       push_exception = false;
  2592     // handle bytecode
  2593     switch (code) {
  2594       case Bytecodes::_nop            : /* nothing to do */ break;
  2595       case Bytecodes::_aconst_null    : apush(append(new Constant(objectNull            ))); break;
  2596       case Bytecodes::_iconst_m1      : ipush(append(new Constant(new IntConstant   (-1)))); break;
  2597       case Bytecodes::_iconst_0       : ipush(append(new Constant(intZero               ))); break;
  2598       case Bytecodes::_iconst_1       : ipush(append(new Constant(intOne                ))); break;
  2599       case Bytecodes::_iconst_2       : ipush(append(new Constant(new IntConstant   ( 2)))); break;
  2600       case Bytecodes::_iconst_3       : ipush(append(new Constant(new IntConstant   ( 3)))); break;
  2601       case Bytecodes::_iconst_4       : ipush(append(new Constant(new IntConstant   ( 4)))); break;
  2602       case Bytecodes::_iconst_5       : ipush(append(new Constant(new IntConstant   ( 5)))); break;
  2603       case Bytecodes::_lconst_0       : lpush(append(new Constant(new LongConstant  ( 0)))); break;
  2604       case Bytecodes::_lconst_1       : lpush(append(new Constant(new LongConstant  ( 1)))); break;
  2605       case Bytecodes::_fconst_0       : fpush(append(new Constant(new FloatConstant ( 0)))); break;
  2606       case Bytecodes::_fconst_1       : fpush(append(new Constant(new FloatConstant ( 1)))); break;
  2607       case Bytecodes::_fconst_2       : fpush(append(new Constant(new FloatConstant ( 2)))); break;
  2608       case Bytecodes::_dconst_0       : dpush(append(new Constant(new DoubleConstant( 0)))); break;
  2609       case Bytecodes::_dconst_1       : dpush(append(new Constant(new DoubleConstant( 1)))); break;
  2610       case Bytecodes::_bipush         : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
  2611       case Bytecodes::_sipush         : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
  2612       case Bytecodes::_ldc            : // fall through
  2613       case Bytecodes::_ldc_w          : // fall through
  2614       case Bytecodes::_ldc2_w         : load_constant(); break;
  2615       case Bytecodes::_iload          : load_local(intType     , s.get_index()); break;
  2616       case Bytecodes::_lload          : load_local(longType    , s.get_index()); break;
  2617       case Bytecodes::_fload          : load_local(floatType   , s.get_index()); break;
  2618       case Bytecodes::_dload          : load_local(doubleType  , s.get_index()); break;
  2619       case Bytecodes::_aload          : load_local(instanceType, s.get_index()); break;
  2620       case Bytecodes::_iload_0        : load_local(intType   , 0); break;
  2621       case Bytecodes::_iload_1        : load_local(intType   , 1); break;
  2622       case Bytecodes::_iload_2        : load_local(intType   , 2); break;
  2623       case Bytecodes::_iload_3        : load_local(intType   , 3); break;
  2624       case Bytecodes::_lload_0        : load_local(longType  , 0); break;
  2625       case Bytecodes::_lload_1        : load_local(longType  , 1); break;
  2626       case Bytecodes::_lload_2        : load_local(longType  , 2); break;
  2627       case Bytecodes::_lload_3        : load_local(longType  , 3); break;
  2628       case Bytecodes::_fload_0        : load_local(floatType , 0); break;
  2629       case Bytecodes::_fload_1        : load_local(floatType , 1); break;
  2630       case Bytecodes::_fload_2        : load_local(floatType , 2); break;
  2631       case Bytecodes::_fload_3        : load_local(floatType , 3); break;
  2632       case Bytecodes::_dload_0        : load_local(doubleType, 0); break;
  2633       case Bytecodes::_dload_1        : load_local(doubleType, 1); break;
  2634       case Bytecodes::_dload_2        : load_local(doubleType, 2); break;
  2635       case Bytecodes::_dload_3        : load_local(doubleType, 3); break;
  2636       case Bytecodes::_aload_0        : load_local(objectType, 0); break;
  2637       case Bytecodes::_aload_1        : load_local(objectType, 1); break;
  2638       case Bytecodes::_aload_2        : load_local(objectType, 2); break;
  2639       case Bytecodes::_aload_3        : load_local(objectType, 3); break;
  2640       case Bytecodes::_iaload         : load_indexed(T_INT   ); break;
  2641       case Bytecodes::_laload         : load_indexed(T_LONG  ); break;
  2642       case Bytecodes::_faload         : load_indexed(T_FLOAT ); break;
  2643       case Bytecodes::_daload         : load_indexed(T_DOUBLE); break;
  2644       case Bytecodes::_aaload         : load_indexed(T_OBJECT); break;
  2645       case Bytecodes::_baload         : load_indexed(T_BYTE  ); break;
  2646       case Bytecodes::_caload         : load_indexed(T_CHAR  ); break;
  2647       case Bytecodes::_saload         : load_indexed(T_SHORT ); break;
  2648       case Bytecodes::_istore         : store_local(intType   , s.get_index()); break;
  2649       case Bytecodes::_lstore         : store_local(longType  , s.get_index()); break;
  2650       case Bytecodes::_fstore         : store_local(floatType , s.get_index()); break;
  2651       case Bytecodes::_dstore         : store_local(doubleType, s.get_index()); break;
  2652       case Bytecodes::_astore         : store_local(objectType, s.get_index()); break;
  2653       case Bytecodes::_istore_0       : store_local(intType   , 0); break;
  2654       case Bytecodes::_istore_1       : store_local(intType   , 1); break;
  2655       case Bytecodes::_istore_2       : store_local(intType   , 2); break;
  2656       case Bytecodes::_istore_3       : store_local(intType   , 3); break;
  2657       case Bytecodes::_lstore_0       : store_local(longType  , 0); break;
  2658       case Bytecodes::_lstore_1       : store_local(longType  , 1); break;
  2659       case Bytecodes::_lstore_2       : store_local(longType  , 2); break;
  2660       case Bytecodes::_lstore_3       : store_local(longType  , 3); break;
  2661       case Bytecodes::_fstore_0       : store_local(floatType , 0); break;
  2662       case Bytecodes::_fstore_1       : store_local(floatType , 1); break;
  2663       case Bytecodes::_fstore_2       : store_local(floatType , 2); break;
  2664       case Bytecodes::_fstore_3       : store_local(floatType , 3); break;
  2665       case Bytecodes::_dstore_0       : store_local(doubleType, 0); break;
  2666       case Bytecodes::_dstore_1       : store_local(doubleType, 1); break;
  2667       case Bytecodes::_dstore_2       : store_local(doubleType, 2); break;
  2668       case Bytecodes::_dstore_3       : store_local(doubleType, 3); break;
  2669       case Bytecodes::_astore_0       : store_local(objectType, 0); break;
  2670       case Bytecodes::_astore_1       : store_local(objectType, 1); break;
  2671       case Bytecodes::_astore_2       : store_local(objectType, 2); break;
  2672       case Bytecodes::_astore_3       : store_local(objectType, 3); break;
  2673       case Bytecodes::_iastore        : store_indexed(T_INT   ); break;
  2674       case Bytecodes::_lastore        : store_indexed(T_LONG  ); break;
  2675       case Bytecodes::_fastore        : store_indexed(T_FLOAT ); break;
  2676       case Bytecodes::_dastore        : store_indexed(T_DOUBLE); break;
  2677       case Bytecodes::_aastore        : store_indexed(T_OBJECT); break;
  2678       case Bytecodes::_bastore        : store_indexed(T_BYTE  ); break;
  2679       case Bytecodes::_castore        : store_indexed(T_CHAR  ); break;
  2680       case Bytecodes::_sastore        : store_indexed(T_SHORT ); break;
  2681       case Bytecodes::_pop            : // fall through
  2682       case Bytecodes::_pop2           : // fall through
  2683       case Bytecodes::_dup            : // fall through
  2684       case Bytecodes::_dup_x1         : // fall through
  2685       case Bytecodes::_dup_x2         : // fall through
  2686       case Bytecodes::_dup2           : // fall through
  2687       case Bytecodes::_dup2_x1        : // fall through
  2688       case Bytecodes::_dup2_x2        : // fall through
  2689       case Bytecodes::_swap           : stack_op(code); break;
  2690       case Bytecodes::_iadd           : arithmetic_op(intType   , code); break;
  2691       case Bytecodes::_ladd           : arithmetic_op(longType  , code); break;
  2692       case Bytecodes::_fadd           : arithmetic_op(floatType , code); break;
  2693       case Bytecodes::_dadd           : arithmetic_op(doubleType, code); break;
  2694       case Bytecodes::_isub           : arithmetic_op(intType   , code); break;
  2695       case Bytecodes::_lsub           : arithmetic_op(longType  , code); break;
  2696       case Bytecodes::_fsub           : arithmetic_op(floatType , code); break;
  2697       case Bytecodes::_dsub           : arithmetic_op(doubleType, code); break;
  2698       case Bytecodes::_imul           : arithmetic_op(intType   , code); break;
  2699       case Bytecodes::_lmul           : arithmetic_op(longType  , code); break;
  2700       case Bytecodes::_fmul           : arithmetic_op(floatType , code); break;
  2701       case Bytecodes::_dmul           : arithmetic_op(doubleType, code); break;
  2702       case Bytecodes::_idiv           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
  2703       case Bytecodes::_ldiv           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
  2704       case Bytecodes::_fdiv           : arithmetic_op(floatType , code); break;
  2705       case Bytecodes::_ddiv           : arithmetic_op(doubleType, code); break;
  2706       case Bytecodes::_irem           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
  2707       case Bytecodes::_lrem           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
  2708       case Bytecodes::_frem           : arithmetic_op(floatType , code); break;
  2709       case Bytecodes::_drem           : arithmetic_op(doubleType, code); break;
  2710       case Bytecodes::_ineg           : negate_op(intType   ); break;
  2711       case Bytecodes::_lneg           : negate_op(longType  ); break;
  2712       case Bytecodes::_fneg           : negate_op(floatType ); break;
  2713       case Bytecodes::_dneg           : negate_op(doubleType); break;
  2714       case Bytecodes::_ishl           : shift_op(intType , code); break;
  2715       case Bytecodes::_lshl           : shift_op(longType, code); break;
  2716       case Bytecodes::_ishr           : shift_op(intType , code); break;
  2717       case Bytecodes::_lshr           : shift_op(longType, code); break;
  2718       case Bytecodes::_iushr          : shift_op(intType , code); break;
  2719       case Bytecodes::_lushr          : shift_op(longType, code); break;
  2720       case Bytecodes::_iand           : logic_op(intType , code); break;
  2721       case Bytecodes::_land           : logic_op(longType, code); break;
  2722       case Bytecodes::_ior            : logic_op(intType , code); break;
  2723       case Bytecodes::_lor            : logic_op(longType, code); break;
  2724       case Bytecodes::_ixor           : logic_op(intType , code); break;
  2725       case Bytecodes::_lxor           : logic_op(longType, code); break;
  2726       case Bytecodes::_iinc           : increment(); break;
  2727       case Bytecodes::_i2l            : convert(code, T_INT   , T_LONG  ); break;
  2728       case Bytecodes::_i2f            : convert(code, T_INT   , T_FLOAT ); break;
  2729       case Bytecodes::_i2d            : convert(code, T_INT   , T_DOUBLE); break;
  2730       case Bytecodes::_l2i            : convert(code, T_LONG  , T_INT   ); break;
  2731       case Bytecodes::_l2f            : convert(code, T_LONG  , T_FLOAT ); break;
  2732       case Bytecodes::_l2d            : convert(code, T_LONG  , T_DOUBLE); break;
  2733       case Bytecodes::_f2i            : convert(code, T_FLOAT , T_INT   ); break;
  2734       case Bytecodes::_f2l            : convert(code, T_FLOAT , T_LONG  ); break;
  2735       case Bytecodes::_f2d            : convert(code, T_FLOAT , T_DOUBLE); break;
  2736       case Bytecodes::_d2i            : convert(code, T_DOUBLE, T_INT   ); break;
  2737       case Bytecodes::_d2l            : convert(code, T_DOUBLE, T_LONG  ); break;
  2738       case Bytecodes::_d2f            : convert(code, T_DOUBLE, T_FLOAT ); break;
  2739       case Bytecodes::_i2b            : convert(code, T_INT   , T_BYTE  ); break;
  2740       case Bytecodes::_i2c            : convert(code, T_INT   , T_CHAR  ); break;
  2741       case Bytecodes::_i2s            : convert(code, T_INT   , T_SHORT ); break;
  2742       case Bytecodes::_lcmp           : compare_op(longType  , code); break;
  2743       case Bytecodes::_fcmpl          : compare_op(floatType , code); break;
  2744       case Bytecodes::_fcmpg          : compare_op(floatType , code); break;
  2745       case Bytecodes::_dcmpl          : compare_op(doubleType, code); break;
  2746       case Bytecodes::_dcmpg          : compare_op(doubleType, code); break;
  2747       case Bytecodes::_ifeq           : if_zero(intType   , If::eql); break;
  2748       case Bytecodes::_ifne           : if_zero(intType   , If::neq); break;
  2749       case Bytecodes::_iflt           : if_zero(intType   , If::lss); break;
  2750       case Bytecodes::_ifge           : if_zero(intType   , If::geq); break;
  2751       case Bytecodes::_ifgt           : if_zero(intType   , If::gtr); break;
  2752       case Bytecodes::_ifle           : if_zero(intType   , If::leq); break;
  2753       case Bytecodes::_if_icmpeq      : if_same(intType   , If::eql); break;
  2754       case Bytecodes::_if_icmpne      : if_same(intType   , If::neq); break;
  2755       case Bytecodes::_if_icmplt      : if_same(intType   , If::lss); break;
  2756       case Bytecodes::_if_icmpge      : if_same(intType   , If::geq); break;
  2757       case Bytecodes::_if_icmpgt      : if_same(intType   , If::gtr); break;
  2758       case Bytecodes::_if_icmple      : if_same(intType   , If::leq); break;
  2759       case Bytecodes::_if_acmpeq      : if_same(objectType, If::eql); break;
  2760       case Bytecodes::_if_acmpne      : if_same(objectType, If::neq); break;
  2761       case Bytecodes::_goto           : _goto(s.cur_bci(), s.get_dest()); break;
  2762       case Bytecodes::_jsr            : jsr(s.get_dest()); break;
  2763       case Bytecodes::_ret            : ret(s.get_index()); break;
  2764       case Bytecodes::_tableswitch    : table_switch(); break;
  2765       case Bytecodes::_lookupswitch   : lookup_switch(); break;
  2766       case Bytecodes::_ireturn        : method_return(ipop()); break;
  2767       case Bytecodes::_lreturn        : method_return(lpop()); break;
  2768       case Bytecodes::_freturn        : method_return(fpop()); break;
  2769       case Bytecodes::_dreturn        : method_return(dpop()); break;
  2770       case Bytecodes::_areturn        : method_return(apop()); break;
  2771       case Bytecodes::_return         : method_return(NULL  ); break;
  2772       case Bytecodes::_getstatic      : // fall through
  2773       case Bytecodes::_putstatic      : // fall through
  2774       case Bytecodes::_getfield       : // fall through
  2775       case Bytecodes::_putfield       : access_field(code); break;
  2776       case Bytecodes::_invokevirtual  : // fall through
  2777       case Bytecodes::_invokespecial  : // fall through
  2778       case Bytecodes::_invokestatic   : // fall through
  2779       case Bytecodes::_invokedynamic  : // fall through
  2780       case Bytecodes::_invokeinterface: invoke(code); break;
  2781       case Bytecodes::_new            : new_instance(s.get_index_u2()); break;
  2782       case Bytecodes::_newarray       : new_type_array(); break;
  2783       case Bytecodes::_anewarray      : new_object_array(); break;
  2784       case Bytecodes::_arraylength    : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; }
  2785       case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
  2786       case Bytecodes::_checkcast      : check_cast(s.get_index_u2()); break;
  2787       case Bytecodes::_instanceof     : instance_of(s.get_index_u2()); break;
  2788       case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
  2789       case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
  2790       case Bytecodes::_wide           : ShouldNotReachHere(); break;
  2791       case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break;
  2792       case Bytecodes::_ifnull         : if_null(objectType, If::eql); break;
  2793       case Bytecodes::_ifnonnull      : if_null(objectType, If::neq); break;
  2794       case Bytecodes::_goto_w         : _goto(s.cur_bci(), s.get_far_dest()); break;
  2795       case Bytecodes::_jsr_w          : jsr(s.get_far_dest()); break;
  2796       case Bytecodes::_breakpoint     : BAILOUT_("concurrent setting of breakpoint", NULL);
  2797       default                         : ShouldNotReachHere(); break;
  2800     if (log != NULL)
  2801       log->clear_context(); // skip marker if nothing was printed
  2803     // save current bci to setup Goto at the end
  2804     prev_bci = s.cur_bci();
  2807   CHECK_BAILOUT_(NULL);
  2808   // stop processing of this block (see try_inline_full)
  2809   if (_skip_block) {
  2810     _skip_block = false;
  2811     assert(_last && _last->as_BlockEnd(), "");
  2812     return _last->as_BlockEnd();
  2814   // if there are any, check if last instruction is a BlockEnd instruction
  2815   BlockEnd* end = last()->as_BlockEnd();
  2816   if (end == NULL) {
  2817     // all blocks must end with a BlockEnd instruction => add a Goto
  2818     end = new Goto(block_at(s.cur_bci()), false);
  2819     append(end);
  2821   assert(end == last()->as_BlockEnd(), "inconsistency");
  2823   assert(end->state() != NULL, "state must already be present");
  2824   assert(end->as_Return() == NULL || end->as_Throw() == NULL || end->state()->stack_size() == 0, "stack not needed for return and throw");
  2826   // connect to begin & set state
  2827   // NOTE that inlining may have changed the block we are parsing
  2828   block()->set_end(end);
  2829   // propagate state
  2830   for (int i = end->number_of_sux() - 1; i >= 0; i--) {
  2831     BlockBegin* sux = end->sux_at(i);
  2832     assert(sux->is_predecessor(block()), "predecessor missing");
  2833     // be careful, bailout if bytecodes are strange
  2834     if (!sux->try_merge(end->state())) BAILOUT_("block join failed", NULL);
  2835     scope_data()->add_to_work_list(end->sux_at(i));
  2838   scope_data()->set_stream(NULL);
  2840   // done
  2841   return end;
  2845 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) {
  2846   do {
  2847     if (start_in_current_block_for_inlining && !bailed_out()) {
  2848       iterate_bytecodes_for_block(0);
  2849       start_in_current_block_for_inlining = false;
  2850     } else {
  2851       BlockBegin* b;
  2852       while ((b = scope_data()->remove_from_work_list()) != NULL) {
  2853         if (!b->is_set(BlockBegin::was_visited_flag)) {
  2854           if (b->is_set(BlockBegin::osr_entry_flag)) {
  2855             // we're about to parse the osr entry block, so make sure
  2856             // we setup the OSR edge leading into this block so that
  2857             // Phis get setup correctly.
  2858             setup_osr_entry_block();
  2859             // this is no longer the osr entry block, so clear it.
  2860             b->clear(BlockBegin::osr_entry_flag);
  2862           b->set(BlockBegin::was_visited_flag);
  2863           connect_to_end(b);
  2867   } while (!bailed_out() && !scope_data()->is_work_list_empty());
  2871 bool GraphBuilder::_can_trap      [Bytecodes::number_of_java_codes];
  2873 void GraphBuilder::initialize() {
  2874   // the following bytecodes are assumed to potentially
  2875   // throw exceptions in compiled code - note that e.g.
  2876   // monitorexit & the return bytecodes do not throw
  2877   // exceptions since monitor pairing proved that they
  2878   // succeed (if monitor pairing succeeded)
  2879   Bytecodes::Code can_trap_list[] =
  2880     { Bytecodes::_ldc
  2881     , Bytecodes::_ldc_w
  2882     , Bytecodes::_ldc2_w
  2883     , Bytecodes::_iaload
  2884     , Bytecodes::_laload
  2885     , Bytecodes::_faload
  2886     , Bytecodes::_daload
  2887     , Bytecodes::_aaload
  2888     , Bytecodes::_baload
  2889     , Bytecodes::_caload
  2890     , Bytecodes::_saload
  2891     , Bytecodes::_iastore
  2892     , Bytecodes::_lastore
  2893     , Bytecodes::_fastore
  2894     , Bytecodes::_dastore
  2895     , Bytecodes::_aastore
  2896     , Bytecodes::_bastore
  2897     , Bytecodes::_castore
  2898     , Bytecodes::_sastore
  2899     , Bytecodes::_idiv
  2900     , Bytecodes::_ldiv
  2901     , Bytecodes::_irem
  2902     , Bytecodes::_lrem
  2903     , Bytecodes::_getstatic
  2904     , Bytecodes::_putstatic
  2905     , Bytecodes::_getfield
  2906     , Bytecodes::_putfield
  2907     , Bytecodes::_invokevirtual
  2908     , Bytecodes::_invokespecial
  2909     , Bytecodes::_invokestatic
  2910     , Bytecodes::_invokedynamic
  2911     , Bytecodes::_invokeinterface
  2912     , Bytecodes::_new
  2913     , Bytecodes::_newarray
  2914     , Bytecodes::_anewarray
  2915     , Bytecodes::_arraylength
  2916     , Bytecodes::_athrow
  2917     , Bytecodes::_checkcast
  2918     , Bytecodes::_instanceof
  2919     , Bytecodes::_monitorenter
  2920     , Bytecodes::_multianewarray
  2921     };
  2923   // inititialize trap tables
  2924   for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
  2925     _can_trap[i] = false;
  2927   // set standard trap info
  2928   for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
  2929     _can_trap[can_trap_list[j]] = true;
  2934 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
  2935   assert(entry->is_set(f), "entry/flag mismatch");
  2936   // create header block
  2937   BlockBegin* h = new BlockBegin(entry->bci());
  2938   h->set_depth_first_number(0);
  2940   Value l = h;
  2941   BlockEnd* g = new Goto(entry, false);
  2942   l->set_next(g, entry->bci());
  2943   h->set_end(g);
  2944   h->set(f);
  2945   // setup header block end state
  2946   ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis)
  2947   assert(s->stack_is_empty(), "must have empty stack at entry point");
  2948   g->set_state(s);
  2949   return h;
  2954 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
  2955   BlockBegin* start = new BlockBegin(0);
  2957   // This code eliminates the empty start block at the beginning of
  2958   // each method.  Previously, each method started with the
  2959   // start-block created below, and this block was followed by the
  2960   // header block that was always empty.  This header block is only
  2961   // necesary if std_entry is also a backward branch target because
  2962   // then phi functions may be necessary in the header block.  It's
  2963   // also necessary when profiling so that there's a single block that
  2964   // can increment the interpreter_invocation_count.
  2965   BlockBegin* new_header_block;
  2966   if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
  2967     new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
  2968   } else {
  2969     new_header_block = std_entry;
  2972   // setup start block (root for the IR graph)
  2973   Base* base =
  2974     new Base(
  2975       new_header_block,
  2976       osr_entry
  2977     );
  2978   start->set_next(base, 0);
  2979   start->set_end(base);
  2980   // create & setup state for start block
  2981   start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
  2982   base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
  2984   if (base->std_entry()->state() == NULL) {
  2985     // setup states for header blocks
  2986     base->std_entry()->merge(state);
  2989   assert(base->std_entry()->state() != NULL, "");
  2990   return start;
  2994 void GraphBuilder::setup_osr_entry_block() {
  2995   assert(compilation()->is_osr_compile(), "only for osrs");
  2997   int osr_bci = compilation()->osr_bci();
  2998   ciBytecodeStream s(method());
  2999   s.reset_to_bci(osr_bci);
  3000   s.next();
  3001   scope_data()->set_stream(&s);
  3003   // create a new block to be the osr setup code
  3004   _osr_entry = new BlockBegin(osr_bci);
  3005   _osr_entry->set(BlockBegin::osr_entry_flag);
  3006   _osr_entry->set_depth_first_number(0);
  3007   BlockBegin* target = bci2block()->at(osr_bci);
  3008   assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there");
  3009   // the osr entry has no values for locals
  3010   ValueStack* state = target->state()->copy();
  3011   _osr_entry->set_state(state);
  3013   kill_all();
  3014   _block = _osr_entry;
  3015   _state = _osr_entry->state()->copy();
  3016   assert(_state->bci() == osr_bci, "mismatch");
  3017   _last  = _osr_entry;
  3018   Value e = append(new OsrEntry());
  3019   e->set_needs_null_check(false);
  3021   // OSR buffer is
  3022   //
  3023   // locals[nlocals-1..0]
  3024   // monitors[number_of_locks-1..0]
  3025   //
  3026   // locals is a direct copy of the interpreter frame so in the osr buffer
  3027   // so first slot in the local array is the last local from the interpreter
  3028   // and last slot is local[0] (receiver) from the interpreter
  3029   //
  3030   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
  3031   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
  3032   // in the interpreter frame (the method lock if a sync method)
  3034   // Initialize monitors in the compiled activation.
  3036   int index;
  3037   Value local;
  3039   // find all the locals that the interpreter thinks contain live oops
  3040   const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
  3042   // compute the offset into the locals so that we can treat the buffer
  3043   // as if the locals were still in the interpreter frame
  3044   int locals_offset = BytesPerWord * (method()->max_locals() - 1);
  3045   for_each_local_value(state, index, local) {
  3046     int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord;
  3047     Value get;
  3048     if (local->type()->is_object_kind() && !live_oops.at(index)) {
  3049       // The interpreter thinks this local is dead but the compiler
  3050       // doesn't so pretend that the interpreter passed in null.
  3051       get = append(new Constant(objectNull));
  3052     } else {
  3053       get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
  3054                                     append(new Constant(new IntConstant(offset))),
  3055                                     0,
  3056                                     true /*unaligned*/, true /*wide*/));
  3058     _state->store_local(index, get);
  3061   // the storage for the OSR buffer is freed manually in the LIRGenerator.
  3063   assert(state->caller_state() == NULL, "should be top scope");
  3064   state->clear_locals();
  3065   Goto* g = new Goto(target, false);
  3066   append(g);
  3067   _osr_entry->set_end(g);
  3068   target->merge(_osr_entry->end()->state());
  3070   scope_data()->set_stream(NULL);
  3074 ValueStack* GraphBuilder::state_at_entry() {
  3075   ValueStack* state = new ValueStack(scope(), NULL);
  3077   // Set up locals for receiver
  3078   int idx = 0;
  3079   if (!method()->is_static()) {
  3080     // we should always see the receiver
  3081     state->store_local(idx, new Local(method()->holder(), objectType, idx));
  3082     idx = 1;
  3085   // Set up locals for incoming arguments
  3086   ciSignature* sig = method()->signature();
  3087   for (int i = 0; i < sig->count(); i++) {
  3088     ciType* type = sig->type_at(i);
  3089     BasicType basic_type = type->basic_type();
  3090     // don't allow T_ARRAY to propagate into locals types
  3091     if (basic_type == T_ARRAY) basic_type = T_OBJECT;
  3092     ValueType* vt = as_ValueType(basic_type);
  3093     state->store_local(idx, new Local(type, vt, idx));
  3094     idx += type->size();
  3097   // lock synchronized method
  3098   if (method()->is_synchronized()) {
  3099     state->lock(NULL);
  3102   return state;
  3106 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
  3107   : _scope_data(NULL)
  3108   , _instruction_count(0)
  3109   , _osr_entry(NULL)
  3110   , _memory(new MemoryBuffer())
  3111   , _compilation(compilation)
  3112   , _inline_bailout_msg(NULL)
  3114   int osr_bci = compilation->osr_bci();
  3116   // determine entry points and bci2block mapping
  3117   BlockListBuilder blm(compilation, scope, osr_bci);
  3118   CHECK_BAILOUT();
  3120   BlockList* bci2block = blm.bci2block();
  3121   BlockBegin* start_block = bci2block->at(0);
  3123   push_root_scope(scope, bci2block, start_block);
  3125   // setup state for std entry
  3126   _initial_state = state_at_entry();
  3127   start_block->merge(_initial_state);
  3129   // complete graph
  3130   _vmap        = new ValueMap();
  3131   switch (scope->method()->intrinsic_id()) {
  3132   case vmIntrinsics::_dabs          : // fall through
  3133   case vmIntrinsics::_dsqrt         : // fall through
  3134   case vmIntrinsics::_dsin          : // fall through
  3135   case vmIntrinsics::_dcos          : // fall through
  3136   case vmIntrinsics::_dtan          : // fall through
  3137   case vmIntrinsics::_dlog          : // fall through
  3138   case vmIntrinsics::_dlog10        : // fall through
  3139   case vmIntrinsics::_dexp          : // fall through
  3140   case vmIntrinsics::_dpow          : // fall through
  3142       // Compiles where the root method is an intrinsic need a special
  3143       // compilation environment because the bytecodes for the method
  3144       // shouldn't be parsed during the compilation, only the special
  3145       // Intrinsic node should be emitted.  If this isn't done the the
  3146       // code for the inlined version will be different than the root
  3147       // compiled version which could lead to monotonicity problems on
  3148       // intel.
  3150       // Set up a stream so that appending instructions works properly.
  3151       ciBytecodeStream s(scope->method());
  3152       s.reset_to_bci(0);
  3153       scope_data()->set_stream(&s);
  3154       s.next();
  3156       // setup the initial block state
  3157       _block = start_block;
  3158       _state = start_block->state()->copy_for_parsing();
  3159       _last  = start_block;
  3160       load_local(doubleType, 0);
  3161       if (scope->method()->intrinsic_id() == vmIntrinsics::_dpow) {
  3162         load_local(doubleType, 2);
  3165       // Emit the intrinsic node.
  3166       bool result = try_inline_intrinsics(scope->method());
  3167       if (!result) BAILOUT("failed to inline intrinsic");
  3168       method_return(dpop());
  3170       // connect the begin and end blocks and we're all done.
  3171       BlockEnd* end = last()->as_BlockEnd();
  3172       block()->set_end(end);
  3173       break;
  3176   case vmIntrinsics::_Reference_get:
  3179         // With java.lang.ref.reference.get() we must go through the
  3180         // intrinsic - when G1 is enabled - even when get() is the root
  3181         // method of the compile so that, if necessary, the value in
  3182         // the referent field of the reference object gets recorded by
  3183         // the pre-barrier code.
  3184         // Specifically, if G1 is enabled, the value in the referent
  3185         // field is recorded by the G1 SATB pre barrier. This will
  3186         // result in the referent being marked live and the reference
  3187         // object removed from the list of discovered references during
  3188         // reference processing.
  3190         // Also we need intrinsic to prevent commoning reads from this field
  3191         // across safepoint since GC can change its value.
  3193         // Set up a stream so that appending instructions works properly.
  3194         ciBytecodeStream s(scope->method());
  3195         s.reset_to_bci(0);
  3196         scope_data()->set_stream(&s);
  3197         s.next();
  3199         // setup the initial block state
  3200         _block = start_block;
  3201         _state = start_block->state()->copy_for_parsing();
  3202         _last  = start_block;
  3203         load_local(objectType, 0);
  3205         // Emit the intrinsic node.
  3206         bool result = try_inline_intrinsics(scope->method());
  3207         if (!result) BAILOUT("failed to inline intrinsic");
  3208         method_return(apop());
  3210         // connect the begin and end blocks and we're all done.
  3211         BlockEnd* end = last()->as_BlockEnd();
  3212         block()->set_end(end);
  3213         break;
  3215       // Otherwise, fall thru
  3218   default:
  3219     scope_data()->add_to_work_list(start_block);
  3220     iterate_all_blocks();
  3221     break;
  3223   CHECK_BAILOUT();
  3225   _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
  3227   eliminate_redundant_phis(_start);
  3229   NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats());
  3230   // for osr compile, bailout if some requirements are not fulfilled
  3231   if (osr_bci != -1) {
  3232     BlockBegin* osr_block = blm.bci2block()->at(osr_bci);
  3233     assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile");
  3235     // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
  3236     if (!osr_block->state()->stack_is_empty()) {
  3237       BAILOUT("stack not empty at OSR entry point");
  3240 #ifndef PRODUCT
  3241   if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count);
  3242 #endif
  3246 ValueStack* GraphBuilder::copy_state_before() {
  3247   return copy_state_before_with_bci(bci());
  3250 ValueStack* GraphBuilder::copy_state_exhandling() {
  3251   return copy_state_exhandling_with_bci(bci());
  3254 ValueStack* GraphBuilder::copy_state_for_exception() {
  3255   return copy_state_for_exception_with_bci(bci());
  3258 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) {
  3259   return state()->copy(ValueStack::StateBefore, bci);
  3262 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) {
  3263   if (!has_handler()) return NULL;
  3264   return state()->copy(ValueStack::StateBefore, bci);
  3267 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) {
  3268   ValueStack* s = copy_state_exhandling_with_bci(bci);
  3269   if (s == NULL) {
  3270     if (_compilation->env()->jvmti_can_access_local_variables()) {
  3271       s = state()->copy(ValueStack::ExceptionState, bci);
  3272     } else {
  3273       s = state()->copy(ValueStack::EmptyExceptionState, bci);
  3276   return s;
  3279 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
  3280   int recur_level = 0;
  3281   for (IRScope* s = scope(); s != NULL; s = s->caller()) {
  3282     if (s->method() == cur_callee) {
  3283       ++recur_level;
  3286   return recur_level;
  3290 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
  3291   const char* msg = NULL;
  3293   // clear out any existing inline bailout condition
  3294   clear_inline_bailout();
  3296   // exclude methods we don't want to inline
  3297   msg = should_not_inline(callee);
  3298   if (msg != NULL) {
  3299     print_inlining(callee, msg, /*success*/ false);
  3300     return false;
  3303   // method handle invokes
  3304   if (callee->is_method_handle_intrinsic()) {
  3305     return try_method_handle_inline(callee);
  3308   // handle intrinsics
  3309   if (callee->intrinsic_id() != vmIntrinsics::_none) {
  3310     if (try_inline_intrinsics(callee)) {
  3311       print_inlining(callee, "intrinsic");
  3312       return true;
  3314     // try normal inlining
  3317   // certain methods cannot be parsed at all
  3318   msg = check_can_parse(callee);
  3319   if (msg != NULL) {
  3320     print_inlining(callee, msg, /*success*/ false);
  3321     return false;
  3324   // If bytecode not set use the current one.
  3325   if (bc == Bytecodes::_illegal) {
  3326     bc = code();
  3328   if (try_inline_full(callee, holder_known, bc, receiver))
  3329     return true;
  3331   // Entire compilation could fail during try_inline_full call.
  3332   // In that case printing inlining decision info is useless.
  3333   if (!bailed_out())
  3334     print_inlining(callee, _inline_bailout_msg, /*success*/ false);
  3336   return false;
  3340 const char* GraphBuilder::check_can_parse(ciMethod* callee) const {
  3341   // Certain methods cannot be parsed at all:
  3342   if ( callee->is_native())            return "native method";
  3343   if ( callee->is_abstract())          return "abstract method";
  3344   if (!callee->can_be_compiled())      return "not compilable (disabled)";
  3345   return NULL;
  3349 // negative filter: should callee NOT be inlined?  returns NULL, ok to inline, or rejection msg
  3350 const char* GraphBuilder::should_not_inline(ciMethod* callee) const {
  3351   if ( callee->should_exclude())       return "excluded by CompilerOracle";
  3352   if ( callee->should_not_inline())    return "disallowed by CompilerOracle";
  3353   if ( callee->dont_inline())          return "don't inline by annotation";
  3354   return NULL;
  3358 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
  3359   if (callee->is_synchronized()) {
  3360     // We don't currently support any synchronized intrinsics
  3361     return false;
  3364   // callee seems like a good candidate
  3365   // determine id
  3366   vmIntrinsics::ID id = callee->intrinsic_id();
  3367   if (!InlineNatives && id != vmIntrinsics::_Reference_get) {
  3368     // InlineNatives does not control Reference.get
  3369     INLINE_BAILOUT("intrinsic method inlining disabled");
  3371   bool preserves_state = false;
  3372   bool cantrap = true;
  3373   switch (id) {
  3374     case vmIntrinsics::_arraycopy:
  3375       if (!InlineArrayCopy) return false;
  3376       break;
  3378 #ifdef TRACE_HAVE_INTRINSICS
  3379     case vmIntrinsics::_classID:
  3380     case vmIntrinsics::_threadID:
  3381       preserves_state = true;
  3382       cantrap = true;
  3383       break;
  3385     case vmIntrinsics::_counterTime:
  3386       preserves_state = true;
  3387       cantrap = false;
  3388       break;
  3389 #endif
  3391     case vmIntrinsics::_currentTimeMillis:
  3392     case vmIntrinsics::_nanoTime:
  3393       preserves_state = true;
  3394       cantrap = false;
  3395       break;
  3397     case vmIntrinsics::_floatToRawIntBits   :
  3398     case vmIntrinsics::_intBitsToFloat      :
  3399     case vmIntrinsics::_doubleToRawLongBits :
  3400     case vmIntrinsics::_longBitsToDouble    :
  3401       if (!InlineMathNatives) return false;
  3402       preserves_state = true;
  3403       cantrap = false;
  3404       break;
  3406     case vmIntrinsics::_getClass      :
  3407     case vmIntrinsics::_isInstance    :
  3408       if (!InlineClassNatives) return false;
  3409       preserves_state = true;
  3410       break;
  3412     case vmIntrinsics::_currentThread :
  3413       if (!InlineThreadNatives) return false;
  3414       preserves_state = true;
  3415       cantrap = false;
  3416       break;
  3418     case vmIntrinsics::_dabs          : // fall through
  3419     case vmIntrinsics::_dsqrt         : // fall through
  3420     case vmIntrinsics::_dsin          : // fall through
  3421     case vmIntrinsics::_dcos          : // fall through
  3422     case vmIntrinsics::_dtan          : // fall through
  3423     case vmIntrinsics::_dlog          : // fall through
  3424     case vmIntrinsics::_dlog10        : // fall through
  3425     case vmIntrinsics::_dexp          : // fall through
  3426     case vmIntrinsics::_dpow          : // fall through
  3427       if (!InlineMathNatives) return false;
  3428       cantrap = false;
  3429       preserves_state = true;
  3430       break;
  3432     // Use special nodes for Unsafe instructions so we can more easily
  3433     // perform an address-mode optimization on the raw variants
  3434     case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT,  false);
  3435     case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
  3436     case vmIntrinsics::_getByte   : return append_unsafe_get_obj(callee, T_BYTE,    false);
  3437     case vmIntrinsics::_getShort  : return append_unsafe_get_obj(callee, T_SHORT,   false);
  3438     case vmIntrinsics::_getChar   : return append_unsafe_get_obj(callee, T_CHAR,    false);
  3439     case vmIntrinsics::_getInt    : return append_unsafe_get_obj(callee, T_INT,     false);
  3440     case vmIntrinsics::_getLong   : return append_unsafe_get_obj(callee, T_LONG,    false);
  3441     case vmIntrinsics::_getFloat  : return append_unsafe_get_obj(callee, T_FLOAT,   false);
  3442     case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE,  false);
  3444     case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT,  false);
  3445     case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
  3446     case vmIntrinsics::_putByte   : return append_unsafe_put_obj(callee, T_BYTE,    false);
  3447     case vmIntrinsics::_putShort  : return append_unsafe_put_obj(callee, T_SHORT,   false);
  3448     case vmIntrinsics::_putChar   : return append_unsafe_put_obj(callee, T_CHAR,    false);
  3449     case vmIntrinsics::_putInt    : return append_unsafe_put_obj(callee, T_INT,     false);
  3450     case vmIntrinsics::_putLong   : return append_unsafe_put_obj(callee, T_LONG,    false);
  3451     case vmIntrinsics::_putFloat  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
  3452     case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false);
  3454     case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true);
  3455     case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
  3456     case vmIntrinsics::_getByteVolatile   : return append_unsafe_get_obj(callee, T_BYTE,    true);
  3457     case vmIntrinsics::_getShortVolatile  : return append_unsafe_get_obj(callee, T_SHORT,   true);
  3458     case vmIntrinsics::_getCharVolatile   : return append_unsafe_get_obj(callee, T_CHAR,    true);
  3459     case vmIntrinsics::_getIntVolatile    : return append_unsafe_get_obj(callee, T_INT,     true);
  3460     case vmIntrinsics::_getLongVolatile   : return append_unsafe_get_obj(callee, T_LONG,    true);
  3461     case vmIntrinsics::_getFloatVolatile  : return append_unsafe_get_obj(callee, T_FLOAT,   true);
  3462     case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE,  true);
  3464     case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT,  true);
  3465     case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
  3466     case vmIntrinsics::_putByteVolatile   : return append_unsafe_put_obj(callee, T_BYTE,    true);
  3467     case vmIntrinsics::_putShortVolatile  : return append_unsafe_put_obj(callee, T_SHORT,   true);
  3468     case vmIntrinsics::_putCharVolatile   : return append_unsafe_put_obj(callee, T_CHAR,    true);
  3469     case vmIntrinsics::_putIntVolatile    : return append_unsafe_put_obj(callee, T_INT,     true);
  3470     case vmIntrinsics::_putLongVolatile   : return append_unsafe_put_obj(callee, T_LONG,    true);
  3471     case vmIntrinsics::_putFloatVolatile  : return append_unsafe_put_obj(callee, T_FLOAT,   true);
  3472     case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE,  true);
  3474     case vmIntrinsics::_getByte_raw   : return append_unsafe_get_raw(callee, T_BYTE);
  3475     case vmIntrinsics::_getShort_raw  : return append_unsafe_get_raw(callee, T_SHORT);
  3476     case vmIntrinsics::_getChar_raw   : return append_unsafe_get_raw(callee, T_CHAR);
  3477     case vmIntrinsics::_getInt_raw    : return append_unsafe_get_raw(callee, T_INT);
  3478     case vmIntrinsics::_getLong_raw   : return append_unsafe_get_raw(callee, T_LONG);
  3479     case vmIntrinsics::_getFloat_raw  : return append_unsafe_get_raw(callee, T_FLOAT);
  3480     case vmIntrinsics::_getDouble_raw : return append_unsafe_get_raw(callee, T_DOUBLE);
  3482     case vmIntrinsics::_putByte_raw   : return append_unsafe_put_raw(callee, T_BYTE);
  3483     case vmIntrinsics::_putShort_raw  : return append_unsafe_put_raw(callee, T_SHORT);
  3484     case vmIntrinsics::_putChar_raw   : return append_unsafe_put_raw(callee, T_CHAR);
  3485     case vmIntrinsics::_putInt_raw    : return append_unsafe_put_raw(callee, T_INT);
  3486     case vmIntrinsics::_putLong_raw   : return append_unsafe_put_raw(callee, T_LONG);
  3487     case vmIntrinsics::_putFloat_raw  : return append_unsafe_put_raw(callee, T_FLOAT);
  3488     case vmIntrinsics::_putDouble_raw : return append_unsafe_put_raw(callee, T_DOUBLE);
  3490     case vmIntrinsics::_prefetchRead        : return append_unsafe_prefetch(callee, false, false);
  3491     case vmIntrinsics::_prefetchWrite       : return append_unsafe_prefetch(callee, false, true);
  3492     case vmIntrinsics::_prefetchReadStatic  : return append_unsafe_prefetch(callee, true,  false);
  3493     case vmIntrinsics::_prefetchWriteStatic : return append_unsafe_prefetch(callee, true,  true);
  3495     case vmIntrinsics::_checkIndex    :
  3496       if (!InlineNIOCheckIndex) return false;
  3497       preserves_state = true;
  3498       break;
  3499     case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT,  true);
  3500     case vmIntrinsics::_putOrderedInt    : return append_unsafe_put_obj(callee, T_INT,     true);
  3501     case vmIntrinsics::_putOrderedLong   : return append_unsafe_put_obj(callee, T_LONG,    true);
  3503     case vmIntrinsics::_compareAndSwapLong:
  3504       if (!VM_Version::supports_cx8()) return false;
  3505       // fall through
  3506     case vmIntrinsics::_compareAndSwapInt:
  3507     case vmIntrinsics::_compareAndSwapObject:
  3508       append_unsafe_CAS(callee);
  3509       return true;
  3511     case vmIntrinsics::_getAndAddInt:
  3512       if (!VM_Version::supports_atomic_getadd4()) {
  3513         return false;
  3515       return append_unsafe_get_and_set_obj(callee, true);
  3516     case vmIntrinsics::_getAndAddLong:
  3517       if (!VM_Version::supports_atomic_getadd8()) {
  3518         return false;
  3520       return append_unsafe_get_and_set_obj(callee, true);
  3521     case vmIntrinsics::_getAndSetInt:
  3522       if (!VM_Version::supports_atomic_getset4()) {
  3523         return false;
  3525       return append_unsafe_get_and_set_obj(callee, false);
  3526     case vmIntrinsics::_getAndSetLong:
  3527       if (!VM_Version::supports_atomic_getset8()) {
  3528         return false;
  3530       return append_unsafe_get_and_set_obj(callee, false);
  3531     case vmIntrinsics::_getAndSetObject:
  3532 #ifdef _LP64
  3533       if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) {
  3534         return false;
  3536       if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) {
  3537         return false;
  3539 #else
  3540       if (!VM_Version::supports_atomic_getset4()) {
  3541         return false;
  3543 #endif
  3544       return append_unsafe_get_and_set_obj(callee, false);
  3546     case vmIntrinsics::_Reference_get:
  3547       // Use the intrinsic version of Reference.get() so that the value in
  3548       // the referent field can be registered by the G1 pre-barrier code.
  3549       // Also to prevent commoning reads from this field across safepoint
  3550       // since GC can change its value.
  3551       preserves_state = true;
  3552       break;
  3554     case vmIntrinsics::_updateCRC32:
  3555     case vmIntrinsics::_updateBytesCRC32:
  3556     case vmIntrinsics::_updateByteBufferCRC32:
  3557       if (!UseCRC32Intrinsics) return false;
  3558       cantrap = false;
  3559       preserves_state = true;
  3560       break;
  3562     case vmIntrinsics::_loadFence :
  3563     case vmIntrinsics::_storeFence:
  3564     case vmIntrinsics::_fullFence :
  3565       break;
  3567     default                       : return false; // do not inline
  3569   // create intrinsic node
  3570   const bool has_receiver = !callee->is_static();
  3571   ValueType* result_type = as_ValueType(callee->return_type());
  3572   ValueStack* state_before = copy_state_for_exception();
  3574   Values* args = state()->pop_arguments(callee->arg_size());
  3576   if (is_profiling()) {
  3577     // Don't profile in the special case where the root method
  3578     // is the intrinsic
  3579     if (callee != method()) {
  3580       // Note that we'd collect profile data in this method if we wanted it.
  3581       compilation()->set_would_profile(true);
  3582       if (profile_calls()) {
  3583         Value recv = NULL;
  3584         if (has_receiver) {
  3585           recv = args->at(0);
  3586           null_check(recv);
  3588         profile_call(callee, recv, NULL, collect_args_for_profiling(args, callee, true), true);
  3593   Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, state_before,
  3594                                     preserves_state, cantrap);
  3595   // append instruction & push result
  3596   Value value = append_split(result);
  3597   if (result_type != voidType) push(result_type, value);
  3599   if (callee != method() && profile_return() && result_type->is_object_kind()) {
  3600     profile_return_type(result, callee);
  3603   // done
  3604   return true;
  3608 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) {
  3609   // Introduce a new callee continuation point - all Ret instructions
  3610   // will be replaced with Gotos to this point.
  3611   BlockBegin* cont = block_at(next_bci());
  3612   assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr");
  3614   // Note: can not assign state to continuation yet, as we have to
  3615   // pick up the state from the Ret instructions.
  3617   // Push callee scope
  3618   push_scope_for_jsr(cont, jsr_dest_bci);
  3620   // Temporarily set up bytecode stream so we can append instructions
  3621   // (only using the bci of this stream)
  3622   scope_data()->set_stream(scope_data()->parent()->stream());
  3624   BlockBegin* jsr_start_block = block_at(jsr_dest_bci);
  3625   assert(jsr_start_block != NULL, "jsr start block must exist");
  3626   assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
  3627   Goto* goto_sub = new Goto(jsr_start_block, false);
  3628   // Must copy state to avoid wrong sharing when parsing bytecodes
  3629   assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block");
  3630   jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci));
  3631   append(goto_sub);
  3632   _block->set_end(goto_sub);
  3633   _last = _block = jsr_start_block;
  3635   // Clear out bytecode stream
  3636   scope_data()->set_stream(NULL);
  3638   scope_data()->add_to_work_list(jsr_start_block);
  3640   // Ready to resume parsing in subroutine
  3641   iterate_all_blocks();
  3643   // If we bailed out during parsing, return immediately (this is bad news)
  3644   CHECK_BAILOUT_(false);
  3646   // Detect whether the continuation can actually be reached. If not,
  3647   // it has not had state set by the join() operations in
  3648   // iterate_bytecodes_for_block()/ret() and we should not touch the
  3649   // iteration state. The calling activation of
  3650   // iterate_bytecodes_for_block will then complete normally.
  3651   if (cont->state() != NULL) {
  3652     if (!cont->is_set(BlockBegin::was_visited_flag)) {
  3653       // add continuation to work list instead of parsing it immediately
  3654       scope_data()->parent()->add_to_work_list(cont);
  3658   assert(jsr_continuation() == cont, "continuation must not have changed");
  3659   assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) ||
  3660          jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag),
  3661          "continuation can only be visited in case of backward branches");
  3662   assert(_last && _last->as_BlockEnd(), "block must have end");
  3664   // continuation is in work list, so end iteration of current block
  3665   _skip_block = true;
  3666   pop_scope_for_jsr();
  3668   return true;
  3672 // Inline the entry of a synchronized method as a monitor enter and
  3673 // register the exception handler which releases the monitor if an
  3674 // exception is thrown within the callee. Note that the monitor enter
  3675 // cannot throw an exception itself, because the receiver is
  3676 // guaranteed to be non-null by the explicit null check at the
  3677 // beginning of inlining.
  3678 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
  3679   assert(lock != NULL && sync_handler != NULL, "lock or handler missing");
  3681   monitorenter(lock, SynchronizationEntryBCI);
  3682   assert(_last->as_MonitorEnter() != NULL, "monitor enter expected");
  3683   _last->set_needs_null_check(false);
  3685   sync_handler->set(BlockBegin::exception_entry_flag);
  3686   sync_handler->set(BlockBegin::is_on_work_list_flag);
  3688   ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
  3689   XHandler* h = new XHandler(desc);
  3690   h->set_entry_block(sync_handler);
  3691   scope_data()->xhandlers()->append(h);
  3692   scope_data()->set_has_handler();
  3696 // If an exception is thrown and not handled within an inlined
  3697 // synchronized method, the monitor must be released before the
  3698 // exception is rethrown in the outer scope. Generate the appropriate
  3699 // instructions here.
  3700 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) {
  3701   BlockBegin* orig_block = _block;
  3702   ValueStack* orig_state = _state;
  3703   Instruction* orig_last = _last;
  3704   _last = _block = sync_handler;
  3705   _state = sync_handler->state()->copy();
  3707   assert(sync_handler != NULL, "handler missing");
  3708   assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here");
  3710   assert(lock != NULL || default_handler, "lock or handler missing");
  3712   XHandler* h = scope_data()->xhandlers()->remove_last();
  3713   assert(h->entry_block() == sync_handler, "corrupt list of handlers");
  3715   block()->set(BlockBegin::was_visited_flag);
  3716   Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI);
  3717   assert(exception->is_pinned(), "must be");
  3719   int bci = SynchronizationEntryBCI;
  3720   if (compilation()->env()->dtrace_method_probes()) {
  3721     // Report exit from inline methods.  We don't have a stream here
  3722     // so pass an explicit bci of SynchronizationEntryBCI.
  3723     Values* args = new Values(1);
  3724     args->push(append_with_bci(new Constant(new MethodConstant(method())), bci));
  3725     append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci);
  3728   if (lock) {
  3729     assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
  3730     if (!lock->is_linked()) {
  3731       lock = append_with_bci(lock, bci);
  3734     // exit the monitor in the context of the synchronized method
  3735     monitorexit(lock, bci);
  3737     // exit the context of the synchronized method
  3738     if (!default_handler) {
  3739       pop_scope();
  3740       bci = _state->caller_state()->bci();
  3741       _state = _state->caller_state()->copy_for_parsing();
  3745   // perform the throw as if at the the call site
  3746   apush(exception);
  3747   throw_op(bci);
  3749   BlockEnd* end = last()->as_BlockEnd();
  3750   block()->set_end(end);
  3752   _block = orig_block;
  3753   _state = orig_state;
  3754   _last = orig_last;
  3758 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
  3759   assert(!callee->is_native(), "callee must not be native");
  3760   if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
  3761     INLINE_BAILOUT("inlining prohibited by policy");
  3763   // first perform tests of things it's not possible to inline
  3764   if (callee->has_exception_handlers() &&
  3765       !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
  3766   if (callee->is_synchronized() &&
  3767       !InlineSynchronizedMethods         ) INLINE_BAILOUT("callee is synchronized");
  3768   if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet");
  3769   if (!callee->has_balanced_monitors())    INLINE_BAILOUT("callee's monitors do not match");
  3771   // Proper inlining of methods with jsrs requires a little more work.
  3772   if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
  3774   // When SSE2 is used on intel, then no special handling is needed
  3775   // for strictfp because the enum-constant is fixed at compile time,
  3776   // the check for UseSSE2 is needed here
  3777   if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
  3778     INLINE_BAILOUT("caller and callee have different strict fp requirements");
  3781   if (is_profiling() && !callee->ensure_method_data()) {
  3782     INLINE_BAILOUT("mdo allocation failed");
  3785   // now perform tests that are based on flag settings
  3786   if (callee->force_inline() || callee->should_inline()) {
  3787     if (inline_level() > MaxForceInlineLevel                    ) INLINE_BAILOUT("MaxForceInlineLevel");
  3788     if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
  3790     const char* msg = "";
  3791     if (callee->force_inline())  msg = "force inline by annotation";
  3792     if (callee->should_inline()) msg = "force inline by CompileOracle";
  3793     print_inlining(callee, msg);
  3794   } else {
  3795     // use heuristic controls on inlining
  3796     if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("inlining too deep");
  3797     if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
  3798     if (callee->code_size_for_inlining() > max_inline_size()    ) INLINE_BAILOUT("callee is too large");
  3800     // don't inline throwable methods unless the inlining tree is rooted in a throwable class
  3801     if (callee->name() == ciSymbol::object_initializer_name() &&
  3802         callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
  3803       // Throwable constructor call
  3804       IRScope* top = scope();
  3805       while (top->caller() != NULL) {
  3806         top = top->caller();
  3808       if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
  3809         INLINE_BAILOUT("don't inline Throwable constructors");
  3813     if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
  3814       INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
  3816     // printing
  3817     print_inlining(callee);
  3820   // NOTE: Bailouts from this point on, which occur at the
  3821   // GraphBuilder level, do not cause bailout just of the inlining but
  3822   // in fact of the entire compilation.
  3824   BlockBegin* orig_block = block();
  3826   const bool is_invokedynamic = bc == Bytecodes::_invokedynamic;
  3827   const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic);
  3829   const int args_base = state()->stack_size() - callee->arg_size();
  3830   assert(args_base >= 0, "stack underflow during inlining");
  3832   // Insert null check if necessary
  3833   Value recv = NULL;
  3834   if (has_receiver) {
  3835     // note: null check must happen even if first instruction of callee does
  3836     //       an implicit null check since the callee is in a different scope
  3837     //       and we must make sure exception handling does the right thing
  3838     assert(!callee->is_static(), "callee must not be static");
  3839     assert(callee->arg_size() > 0, "must have at least a receiver");
  3840     recv = state()->stack_at(args_base);
  3841     null_check(recv);
  3844   if (is_profiling()) {
  3845     // Note that we'd collect profile data in this method if we wanted it.
  3846     // this may be redundant here...
  3847     compilation()->set_would_profile(true);
  3849     if (profile_calls()) {
  3850       int start = 0;
  3851       Values* obj_args = args_list_for_profiling(callee, start, has_receiver);
  3852       if (obj_args != NULL) {
  3853         int s = obj_args->size();
  3854         // if called through method handle invoke, some arguments may have been popped
  3855         for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) {
  3856           Value v = state()->stack_at_inc(i);
  3857           if (v->type()->is_object_kind()) {
  3858             obj_args->push(v);
  3859             j++;
  3862         check_args_for_profiling(obj_args, s);
  3864       profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
  3868   // Introduce a new callee continuation point - if the callee has
  3869   // more than one return instruction or the return does not allow
  3870   // fall-through of control flow, all return instructions of the
  3871   // callee will need to be replaced by Goto's pointing to this
  3872   // continuation point.
  3873   BlockBegin* cont = block_at(next_bci());
  3874   bool continuation_existed = true;
  3875   if (cont == NULL) {
  3876     cont = new BlockBegin(next_bci());
  3877     // low number so that continuation gets parsed as early as possible
  3878     cont->set_depth_first_number(0);
  3879 #ifndef PRODUCT
  3880     if (PrintInitialBlockList) {
  3881       tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
  3882                     cont->block_id(), cont->bci(), bci());
  3884 #endif
  3885     continuation_existed = false;
  3887   // Record number of predecessors of continuation block before
  3888   // inlining, to detect if inlined method has edges to its
  3889   // continuation after inlining.
  3890   int continuation_preds = cont->number_of_preds();
  3892   // Push callee scope
  3893   push_scope(callee, cont);
  3895   // the BlockListBuilder for the callee could have bailed out
  3896   if (bailed_out())
  3897       return false;
  3899   // Temporarily set up bytecode stream so we can append instructions
  3900   // (only using the bci of this stream)
  3901   scope_data()->set_stream(scope_data()->parent()->stream());
  3903   // Pass parameters into callee state: add assignments
  3904   // note: this will also ensure that all arguments are computed before being passed
  3905   ValueStack* callee_state = state();
  3906   ValueStack* caller_state = state()->caller_state();
  3907   for (int i = args_base; i < caller_state->stack_size(); ) {
  3908     const int arg_no = i - args_base;
  3909     Value arg = caller_state->stack_at_inc(i);
  3910     store_local(callee_state, arg, arg_no);
  3913   // Remove args from stack.
  3914   // Note that we preserve locals state in case we can use it later
  3915   // (see use of pop_scope() below)
  3916   caller_state->truncate_stack(args_base);
  3917   assert(callee_state->stack_size() == 0, "callee stack must be empty");
  3919   Value lock;
  3920   BlockBegin* sync_handler;
  3922   // Inline the locking of the receiver if the callee is synchronized
  3923   if (callee->is_synchronized()) {
  3924     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
  3925                                : state()->local_at(0);
  3926     sync_handler = new BlockBegin(SynchronizationEntryBCI);
  3927     inline_sync_entry(lock, sync_handler);
  3930   if (compilation()->env()->dtrace_method_probes()) {
  3931     Values* args = new Values(1);
  3932     args->push(append(new Constant(new MethodConstant(method()))));
  3933     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
  3936   if (profile_inlined_calls()) {
  3937     profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
  3940   BlockBegin* callee_start_block = block_at(0);
  3941   if (callee_start_block != NULL) {
  3942     assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
  3943     Goto* goto_callee = new Goto(callee_start_block, false);
  3944     // The state for this goto is in the scope of the callee, so use
  3945     // the entry bci for the callee instead of the call site bci.
  3946     append_with_bci(goto_callee, 0);
  3947     _block->set_end(goto_callee);
  3948     callee_start_block->merge(callee_state);
  3950     _last = _block = callee_start_block;
  3952     scope_data()->add_to_work_list(callee_start_block);
  3955   // Clear out bytecode stream
  3956   scope_data()->set_stream(NULL);
  3958   // Ready to resume parsing in callee (either in the same block we
  3959   // were in before or in the callee's start block)
  3960   iterate_all_blocks(callee_start_block == NULL);
  3962   // If we bailed out during parsing, return immediately (this is bad news)
  3963   if (bailed_out())
  3964       return false;
  3966   // iterate_all_blocks theoretically traverses in random order; in
  3967   // practice, we have only traversed the continuation if we are
  3968   // inlining into a subroutine
  3969   assert(continuation_existed ||
  3970          !continuation()->is_set(BlockBegin::was_visited_flag),
  3971          "continuation should not have been parsed yet if we created it");
  3973   // At this point we are almost ready to return and resume parsing of
  3974   // the caller back in the GraphBuilder. The only thing we want to do
  3975   // first is an optimization: during parsing of the callee we
  3976   // generated at least one Goto to the continuation block. If we
  3977   // generated exactly one, and if the inlined method spanned exactly
  3978   // one block (and we didn't have to Goto its entry), then we snip
  3979   // off the Goto to the continuation, allowing control to fall
  3980   // through back into the caller block and effectively performing
  3981   // block merging. This allows load elimination and CSE to take place
  3982   // across multiple callee scopes if they are relatively simple, and
  3983   // is currently essential to making inlining profitable.
  3984   if (num_returns() == 1
  3985       && block() == orig_block
  3986       && block() == inline_cleanup_block()) {
  3987     _last  = inline_cleanup_return_prev();
  3988     _state = inline_cleanup_state();
  3989   } else if (continuation_preds == cont->number_of_preds()) {
  3990     // Inlining caused that the instructions after the invoke in the
  3991     // caller are not reachable any more. So skip filling this block
  3992     // with instructions!
  3993     assert(cont == continuation(), "");
  3994     assert(_last && _last->as_BlockEnd(), "");
  3995     _skip_block = true;
  3996   } else {
  3997     // Resume parsing in continuation block unless it was already parsed.
  3998     // Note that if we don't change _last here, iteration in
  3999     // iterate_bytecodes_for_block will stop when we return.
  4000     if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
  4001       // add continuation to work list instead of parsing it immediately
  4002       assert(_last && _last->as_BlockEnd(), "");
  4003       scope_data()->parent()->add_to_work_list(continuation());
  4004       _skip_block = true;
  4008   // Fill the exception handler for synchronized methods with instructions
  4009   if (callee->is_synchronized() && sync_handler->state() != NULL) {
  4010     fill_sync_handler(lock, sync_handler);
  4011   } else {
  4012     pop_scope();
  4015   compilation()->notice_inlined_method(callee);
  4017   return true;
  4021 bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
  4022   ValueStack* state_before = state()->copy_for_parsing();
  4023   vmIntrinsics::ID iid = callee->intrinsic_id();
  4024   switch (iid) {
  4025   case vmIntrinsics::_invokeBasic:
  4027       // get MethodHandle receiver
  4028       const int args_base = state()->stack_size() - callee->arg_size();
  4029       ValueType* type = state()->stack_at(args_base)->type();
  4030       if (type->is_constant()) {
  4031         ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget();
  4032         // We don't do CHA here so only inline static and statically bindable methods.
  4033         if (target->is_static() || target->can_be_statically_bound()) {
  4034           Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
  4035           if (try_inline(target, /*holder_known*/ true, bc)) {
  4036             return true;
  4038         } else {
  4039           print_inlining(target, "not static or statically bindable", /*success*/ false);
  4041       } else {
  4042         print_inlining(callee, "receiver not constant", /*success*/ false);
  4045     break;
  4047   case vmIntrinsics::_linkToVirtual:
  4048   case vmIntrinsics::_linkToStatic:
  4049   case vmIntrinsics::_linkToSpecial:
  4050   case vmIntrinsics::_linkToInterface:
  4052       // pop MemberName argument
  4053       const int args_base = state()->stack_size() - callee->arg_size();
  4054       ValueType* type = apop()->type();
  4055       if (type->is_constant()) {
  4056         ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget();
  4057         // If the target is another method handle invoke try recursivly to get
  4058         // a better target.
  4059         if (target->is_method_handle_intrinsic()) {
  4060           if (try_method_handle_inline(target)) {
  4061             return true;
  4063         } else {
  4064           ciSignature* signature = target->signature();
  4065           const int receiver_skip = target->is_static() ? 0 : 1;
  4066           // Cast receiver to its type.
  4067           if (!target->is_static()) {
  4068             ciKlass* tk = signature->accessing_klass();
  4069             Value obj = state()->stack_at(args_base);
  4070             if (obj->exact_type() == NULL &&
  4071                 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
  4072               TypeCast* c = new TypeCast(tk, obj, state_before);
  4073               append(c);
  4074               state()->stack_at_put(args_base, c);
  4077           // Cast reference arguments to its type.
  4078           for (int i = 0, j = 0; i < signature->count(); i++) {
  4079             ciType* t = signature->type_at(i);
  4080             if (t->is_klass()) {
  4081               ciKlass* tk = t->as_klass();
  4082               Value obj = state()->stack_at(args_base + receiver_skip + j);
  4083               if (obj->exact_type() == NULL &&
  4084                   obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
  4085                 TypeCast* c = new TypeCast(t, obj, state_before);
  4086                 append(c);
  4087                 state()->stack_at_put(args_base + receiver_skip + j, c);
  4090             j += t->size();  // long and double take two slots
  4092           // We don't do CHA here so only inline static and statically bindable methods.
  4093           if (target->is_static() || target->can_be_statically_bound()) {
  4094             Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
  4095             if (try_inline(target, /*holder_known*/ true, bc)) {
  4096               return true;
  4098           } else {
  4099             print_inlining(target, "not static or statically bindable", /*success*/ false);
  4102       } else {
  4103         print_inlining(callee, "MemberName not constant", /*success*/ false);
  4106     break;
  4108   default:
  4109     fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
  4110     break;
  4112   set_state(state_before);
  4113   return false;
  4117 void GraphBuilder::inline_bailout(const char* msg) {
  4118   assert(msg != NULL, "inline bailout msg must exist");
  4119   _inline_bailout_msg = msg;
  4123 void GraphBuilder::clear_inline_bailout() {
  4124   _inline_bailout_msg = NULL;
  4128 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) {
  4129   ScopeData* data = new ScopeData(NULL);
  4130   data->set_scope(scope);
  4131   data->set_bci2block(bci2block);
  4132   _scope_data = data;
  4133   _block = start;
  4137 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) {
  4138   IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false);
  4139   scope()->add_callee(callee_scope);
  4141   BlockListBuilder blb(compilation(), callee_scope, -1);
  4142   CHECK_BAILOUT();
  4144   if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) {
  4145     // this scope can be inlined directly into the caller so remove
  4146     // the block at bci 0.
  4147     blb.bci2block()->at_put(0, NULL);
  4150   set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci())));
  4152   ScopeData* data = new ScopeData(scope_data());
  4153   data->set_scope(callee_scope);
  4154   data->set_bci2block(blb.bci2block());
  4155   data->set_continuation(continuation);
  4156   _scope_data = data;
  4160 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) {
  4161   ScopeData* data = new ScopeData(scope_data());
  4162   data->set_parsing_jsr();
  4163   data->set_jsr_entry_bci(jsr_dest_bci);
  4164   data->set_jsr_return_address_local(-1);
  4165   // Must clone bci2block list as we will be mutating it in order to
  4166   // properly clone all blocks in jsr region as well as exception
  4167   // handlers containing rets
  4168   BlockList* new_bci2block = new BlockList(bci2block()->length());
  4169   new_bci2block->push_all(bci2block());
  4170   data->set_bci2block(new_bci2block);
  4171   data->set_scope(scope());
  4172   data->setup_jsr_xhandlers();
  4173   data->set_continuation(continuation());
  4174   data->set_jsr_continuation(jsr_continuation);
  4175   _scope_data = data;
  4179 void GraphBuilder::pop_scope() {
  4180   int number_of_locks = scope()->number_of_locks();
  4181   _scope_data = scope_data()->parent();
  4182   // accumulate minimum number of monitor slots to be reserved
  4183   scope()->set_min_number_of_locks(number_of_locks);
  4187 void GraphBuilder::pop_scope_for_jsr() {
  4188   _scope_data = scope_data()->parent();
  4191 bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
  4192   if (InlineUnsafeOps) {
  4193     Values* args = state()->pop_arguments(callee->arg_size());
  4194     null_check(args->at(0));
  4195     Instruction* offset = args->at(2);
  4196 #ifndef _LP64
  4197     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  4198 #endif
  4199     Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile));
  4200     push(op->type(), op);
  4201     compilation()->set_has_unsafe_access(true);
  4203   return InlineUnsafeOps;
  4207 bool GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) {
  4208   if (InlineUnsafeOps) {
  4209     Values* args = state()->pop_arguments(callee->arg_size());
  4210     null_check(args->at(0));
  4211     Instruction* offset = args->at(2);
  4212 #ifndef _LP64
  4213     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  4214 #endif
  4215     Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile));
  4216     compilation()->set_has_unsafe_access(true);
  4217     kill_all();
  4219   return InlineUnsafeOps;
  4223 bool GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) {
  4224   if (InlineUnsafeOps) {
  4225     Values* args = state()->pop_arguments(callee->arg_size());
  4226     null_check(args->at(0));
  4227     Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false));
  4228     push(op->type(), op);
  4229     compilation()->set_has_unsafe_access(true);
  4231   return InlineUnsafeOps;
  4235 bool GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) {
  4236   if (InlineUnsafeOps) {
  4237     Values* args = state()->pop_arguments(callee->arg_size());
  4238     null_check(args->at(0));
  4239     Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2)));
  4240     compilation()->set_has_unsafe_access(true);
  4242   return InlineUnsafeOps;
  4246 bool GraphBuilder::append_unsafe_prefetch(ciMethod* callee, bool is_static, bool is_store) {
  4247   if (InlineUnsafeOps) {
  4248     Values* args = state()->pop_arguments(callee->arg_size());
  4249     int obj_arg_index = 1; // Assume non-static case
  4250     if (is_static) {
  4251       obj_arg_index = 0;
  4252     } else {
  4253       null_check(args->at(0));
  4255     Instruction* offset = args->at(obj_arg_index + 1);
  4256 #ifndef _LP64
  4257     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  4258 #endif
  4259     Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset))
  4260                                : append(new UnsafePrefetchRead (args->at(obj_arg_index), offset));
  4261     compilation()->set_has_unsafe_access(true);
  4263   return InlineUnsafeOps;
  4267 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
  4268   ValueStack* state_before = copy_state_for_exception();
  4269   ValueType* result_type = as_ValueType(callee->return_type());
  4270   assert(result_type->is_int(), "int result");
  4271   Values* args = state()->pop_arguments(callee->arg_size());
  4273   // Pop off some args to speically handle, then push back
  4274   Value newval = args->pop();
  4275   Value cmpval = args->pop();
  4276   Value offset = args->pop();
  4277   Value src = args->pop();
  4278   Value unsafe_obj = args->pop();
  4280   // Separately handle the unsafe arg. It is not needed for code
  4281   // generation, but must be null checked
  4282   null_check(unsafe_obj);
  4284 #ifndef _LP64
  4285   offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  4286 #endif
  4288   args->push(src);
  4289   args->push(offset);
  4290   args->push(cmpval);
  4291   args->push(newval);
  4293   // An unsafe CAS can alias with other field accesses, but we don't
  4294   // know which ones so mark the state as no preserved.  This will
  4295   // cause CSE to invalidate memory across it.
  4296   bool preserves_state = false;
  4297   Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state);
  4298   append_split(result);
  4299   push(result_type, result);
  4300   compilation()->set_has_unsafe_access(true);
  4304 void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) {
  4305   CompileLog* log = compilation()->log();
  4306   if (log != NULL) {
  4307     if (success) {
  4308       if (msg != NULL)
  4309         log->inline_success(msg);
  4310       else
  4311         log->inline_success("receiver is statically known");
  4312     } else {
  4313       if (msg != NULL)
  4314         log->inline_fail(msg);
  4315       else
  4316         log->inline_fail("reason unknown");
  4320   if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
  4321     return;
  4323   CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
  4324   if (success && CIPrintMethodCodes) {
  4325     callee->print_codes();
  4329 bool GraphBuilder::append_unsafe_get_and_set_obj(ciMethod* callee, bool is_add) {
  4330   if (InlineUnsafeOps) {
  4331     Values* args = state()->pop_arguments(callee->arg_size());
  4332     BasicType t = callee->return_type()->basic_type();
  4333     null_check(args->at(0));
  4334     Instruction* offset = args->at(2);
  4335 #ifndef _LP64
  4336     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
  4337 #endif
  4338     Instruction* op = append(new UnsafeGetAndSetObject(t, args->at(1), offset, args->at(3), is_add));
  4339     compilation()->set_has_unsafe_access(true);
  4340     kill_all();
  4341     push(op->type(), op);
  4343   return InlineUnsafeOps;
  4346 #ifndef PRODUCT
  4347 void GraphBuilder::print_stats() {
  4348   vmap()->print();
  4350 #endif // PRODUCT
  4352 void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
  4353   assert(known_holder == NULL || (known_holder->is_instance_klass() &&
  4354                                   (!known_holder->is_interface() ||
  4355                                    ((ciInstanceKlass*)known_holder)->has_default_methods())), "should be default method");
  4356   if (known_holder != NULL) {
  4357     if (known_holder->exact_klass() == NULL) {
  4358       known_holder = compilation()->cha_exact_type(known_holder);
  4362   append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
  4365 void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
  4366   assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
  4367   if (m == NULL) {
  4368     m = method();
  4370   if (invoke_bci < 0) {
  4371     invoke_bci = bci();
  4373   ciMethodData* md = m->method_data_or_null();
  4374   ciProfileData* data = md->bci_to_data(invoke_bci);
  4375   if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
  4376     append(new ProfileReturnType(m , invoke_bci, callee, ret));
  4380 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
  4381   append(new ProfileInvoke(callee, state));

mercurial