src/share/vm/opto/parse1.cpp

Fri, 20 Aug 2010 23:40:30 -0700

author
jrose
date
Fri, 20 Aug 2010 23:40:30 -0700
changeset 2101
4b29a725c43c
parent 1964
4311f23817fd
child 2314
f95d63e2154a
permissions
-rw-r--r--

6912064: type profiles need to be exploited more for dynamic language support
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_parse1.cpp.incl"
    28 // Static array so we can figure out which bytecodes stop us from compiling
    29 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
    30 // and eventually should be encapsulated in a proper class (gri 8/18/98).
    32 int nodes_created              = 0;
    33 int methods_parsed             = 0;
    34 int methods_seen               = 0;
    35 int blocks_parsed              = 0;
    36 int blocks_seen                = 0;
    38 int explicit_null_checks_inserted = 0;
    39 int explicit_null_checks_elided   = 0;
    40 int all_null_checks_found         = 0, implicit_null_checks              = 0;
    41 int implicit_null_throws          = 0;
    43 int reclaim_idx  = 0;
    44 int reclaim_in   = 0;
    45 int reclaim_node = 0;
    47 #ifndef PRODUCT
    48 bool Parse::BytecodeParseHistogram::_initialized = false;
    49 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes];
    50 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes];
    51 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes];
    52 uint Parse::BytecodeParseHistogram::_new_values       [Bytecodes::number_of_codes];
    53 #endif
    55 //------------------------------print_statistics-------------------------------
    56 #ifndef PRODUCT
    57 void Parse::print_statistics() {
    58   tty->print_cr("--- Compiler Statistics ---");
    59   tty->print("Methods seen: %d  Methods parsed: %d", methods_seen, methods_parsed);
    60   tty->print("  Nodes created: %d", nodes_created);
    61   tty->cr();
    62   if (methods_seen != methods_parsed)
    63     tty->print_cr("Reasons for parse failures (NOT cumulative):");
    64   tty->print_cr("Blocks parsed: %d  Blocks seen: %d", blocks_parsed, blocks_seen);
    66   if( explicit_null_checks_inserted )
    67     tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found);
    68   if( all_null_checks_found )
    69     tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
    70                   (100*implicit_null_checks)/all_null_checks_found);
    71   if( implicit_null_throws )
    72     tty->print_cr("%d implicit null exceptions at runtime",
    73                   implicit_null_throws);
    75   if( PrintParseStatistics && BytecodeParseHistogram::initialized() ) {
    76     BytecodeParseHistogram::print();
    77   }
    78 }
    79 #endif
    81 //------------------------------ON STACK REPLACEMENT---------------------------
    83 // Construct a node which can be used to get incoming state for
    84 // on stack replacement.
    85 Node *Parse::fetch_interpreter_state(int index,
    86                                      BasicType bt,
    87                                      Node *local_addrs,
    88                                      Node *local_addrs_base) {
    89   Node *mem = memory(Compile::AliasIdxRaw);
    90   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
    91   Node *ctl = control();
    93   // Very similar to LoadNode::make, except we handle un-aligned longs and
    94   // doubles on Sparc.  Intel can handle them just fine directly.
    95   Node *l;
    96   switch( bt ) {                // Signature is flattened
    97   case T_INT:     l = new (C, 3) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
    98   case T_FLOAT:   l = new (C, 3) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
    99   case T_ADDRESS: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM  ); break;
   100   case T_OBJECT:  l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
   101   case T_LONG:
   102   case T_DOUBLE: {
   103     // Since arguments are in reverse order, the argument address 'adr'
   104     // refers to the back half of the long/double.  Recompute adr.
   105     adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize );
   106     if( Matcher::misaligned_doubles_ok ) {
   107       l = (bt == T_DOUBLE)
   108         ? (Node*)new (C, 3) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
   109         : (Node*)new (C, 3) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
   110     } else {
   111       l = (bt == T_DOUBLE)
   112         ? (Node*)new (C, 3) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
   113         : (Node*)new (C, 3) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
   114     }
   115     break;
   116   }
   117   default: ShouldNotReachHere();
   118   }
   119   return _gvn.transform(l);
   120 }
   122 // Helper routine to prevent the interpreter from handing
   123 // unexpected typestate to an OSR method.
   124 // The Node l is a value newly dug out of the interpreter frame.
   125 // The type is the type predicted by ciTypeFlow.  Note that it is
   126 // not a general type, but can only come from Type::get_typeflow_type.
   127 // The safepoint is a map which will feed an uncommon trap.
   128 Node* Parse::check_interpreter_type(Node* l, const Type* type,
   129                                     SafePointNode* &bad_type_exit) {
   131   const TypeOopPtr* tp = type->isa_oopptr();
   133   // TypeFlow may assert null-ness if a type appears unloaded.
   134   if (type == TypePtr::NULL_PTR ||
   135       (tp != NULL && !tp->klass()->is_loaded())) {
   136     // Value must be null, not a real oop.
   137     Node* chk = _gvn.transform( new (C, 3) CmpPNode(l, null()) );
   138     Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, BoolTest::eq) );
   139     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
   140     set_control(_gvn.transform( new (C, 1) IfTrueNode(iff) ));
   141     Node* bad_type = _gvn.transform( new (C, 1) IfFalseNode(iff) );
   142     bad_type_exit->control()->add_req(bad_type);
   143     l = null();
   144   }
   146   // Typeflow can also cut off paths from the CFG, based on
   147   // types which appear unloaded, or call sites which appear unlinked.
   148   // When paths are cut off, values at later merge points can rise
   149   // toward more specific classes.  Make sure these specific classes
   150   // are still in effect.
   151   if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
   152     // TypeFlow asserted a specific object type.  Value must have that type.
   153     Node* bad_type_ctrl = NULL;
   154     l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
   155     bad_type_exit->control()->add_req(bad_type_ctrl);
   156   }
   158   BasicType bt_l = _gvn.type(l)->basic_type();
   159   BasicType bt_t = type->basic_type();
   160   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
   161   return l;
   162 }
   164 // Helper routine which sets up elements of the initial parser map when
   165 // performing a parse for on stack replacement.  Add values into map.
   166 // The only parameter contains the address of a interpreter arguments.
   167 void Parse::load_interpreter_state(Node* osr_buf) {
   168   int index;
   169   int max_locals = jvms()->loc_size();
   170   int max_stack  = jvms()->stk_size();
   173   // Mismatch between method and jvms can occur since map briefly held
   174   // an OSR entry state (which takes up one RawPtr word).
   175   assert(max_locals == method()->max_locals(), "sanity");
   176   assert(max_stack  >= method()->max_stack(),  "sanity");
   177   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
   178   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
   180   // Find the start block.
   181   Block* osr_block = start_block();
   182   assert(osr_block->start() == osr_bci(), "sanity");
   184   // Set initial BCI.
   185   set_parse_bci(osr_block->start());
   187   // Set initial stack depth.
   188   set_sp(osr_block->start_sp());
   190   // Check bailouts.  We currently do not perform on stack replacement
   191   // of loops in catch blocks or loops which branch with a non-empty stack.
   192   if (sp() != 0) {
   193     C->record_method_not_compilable("OSR starts with non-empty stack");
   194     return;
   195   }
   196   // Do not OSR inside finally clauses:
   197   if (osr_block->has_trap_at(osr_block->start())) {
   198     C->record_method_not_compilable("OSR starts with an immediate trap");
   199     return;
   200   }
   202   // Commute monitors from interpreter frame to compiler frame.
   203   assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
   204   int mcnt = osr_block->flow()->monitor_count();
   205   Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
   206   for (index = 0; index < mcnt; index++) {
   207     // Make a BoxLockNode for the monitor.
   208     Node *box = _gvn.transform(new (C, 1) BoxLockNode(next_monitor()));
   211     // Displaced headers and locked objects are interleaved in the
   212     // temp OSR buffer.  We only copy the locked objects out here.
   213     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
   214     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
   215     // Try and copy the displaced header to the BoxNode
   216     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
   219     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw);
   221     // Build a bogus FastLockNode (no code will be generated) and push the
   222     // monitor into our debug info.
   223     const FastLockNode *flock = _gvn.transform(new (C, 3) FastLockNode( 0, lock_object, box ))->as_FastLock();
   224     map()->push_monitor(flock);
   226     // If the lock is our method synchronization lock, tuck it away in
   227     // _sync_lock for return and rethrow exit paths.
   228     if (index == 0 && method()->is_synchronized()) {
   229       _synch_lock = flock;
   230     }
   231   }
   233   // Use the raw liveness computation to make sure that unexpected
   234   // values don't propagate into the OSR frame.
   235   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
   236   if (!live_locals.is_valid()) {
   237     // Degenerate or breakpointed method.
   238     C->record_method_not_compilable("OSR in empty or breakpointed method");
   239     return;
   240   }
   242   // Extract the needed locals from the interpreter frame.
   243   Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
   245   // find all the locals that the interpreter thinks contain live oops
   246   const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci());
   247   for (index = 0; index < max_locals; index++) {
   249     if (!live_locals.at(index)) {
   250       continue;
   251     }
   253     const Type *type = osr_block->local_type_at(index);
   255     if (type->isa_oopptr() != NULL) {
   257       // 6403625: Verify that the interpreter oopMap thinks that the oop is live
   258       // else we might load a stale oop if the MethodLiveness disagrees with the
   259       // result of the interpreter. If the interpreter says it is dead we agree
   260       // by making the value go to top.
   261       //
   263       if (!live_oops.at(index)) {
   264         if (C->log() != NULL) {
   265           C->log()->elem("OSR_mismatch local_index='%d'",index);
   266         }
   267         set_local(index, null());
   268         // and ignore it for the loads
   269         continue;
   270       }
   271     }
   273     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
   274     if (type == Type::TOP || type == Type::HALF) {
   275       continue;
   276     }
   277     // If the type falls to bottom, then this must be a local that
   278     // is mixing ints and oops or some such.  Forcing it to top
   279     // makes it go dead.
   280     if (type == Type::BOTTOM) {
   281       continue;
   282     }
   283     // Construct code to access the appropriate local.
   284     BasicType bt = type->basic_type();
   285     if (type == TypePtr::NULL_PTR) {
   286       // Ptr types are mixed together with T_ADDRESS but NULL is
   287       // really for T_OBJECT types so correct it.
   288       bt = T_OBJECT;
   289     }
   290     Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
   291     set_local(index, value);
   292   }
   294   // Extract the needed stack entries from the interpreter frame.
   295   for (index = 0; index < sp(); index++) {
   296     const Type *type = osr_block->stack_type_at(index);
   297     if (type != Type::TOP) {
   298       // Currently the compiler bails out when attempting to on stack replace
   299       // at a bci with a non-empty stack.  We should not reach here.
   300       ShouldNotReachHere();
   301     }
   302   }
   304   // End the OSR migration
   305   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
   306                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
   307                     "OSR_migration_end", TypeRawPtr::BOTTOM,
   308                     osr_buf);
   310   // Now that the interpreter state is loaded, make sure it will match
   311   // at execution time what the compiler is expecting now:
   312   SafePointNode* bad_type_exit = clone_map();
   313   bad_type_exit->set_control(new (C, 1) RegionNode(1));
   315   assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
   316   for (index = 0; index < max_locals; index++) {
   317     if (stopped())  break;
   318     Node* l = local(index);
   319     if (l->is_top())  continue;  // nothing here
   320     const Type *type = osr_block->local_type_at(index);
   321     if (type->isa_oopptr() != NULL) {
   322       if (!live_oops.at(index)) {
   323         // skip type check for dead oops
   324         continue;
   325       }
   326     }
   327     if (osr_block->flow()->local_type_at(index)->is_return_address()) {
   328       // In our current system it's illegal for jsr addresses to be
   329       // live into an OSR entry point because the compiler performs
   330       // inlining of jsrs.  ciTypeFlow has a bailout that detect this
   331       // case and aborts the compile if addresses are live into an OSR
   332       // entry point.  Because of that we can assume that any address
   333       // locals at the OSR entry point are dead.  Method liveness
   334       // isn't precise enought to figure out that they are dead in all
   335       // cases so simply skip checking address locals all
   336       // together. Any type check is guaranteed to fail since the
   337       // interpreter type is the result of a load which might have any
   338       // value and the expected type is a constant.
   339       continue;
   340     }
   341     set_local(index, check_interpreter_type(l, type, bad_type_exit));
   342   }
   344   for (index = 0; index < sp(); index++) {
   345     if (stopped())  break;
   346     Node* l = stack(index);
   347     if (l->is_top())  continue;  // nothing here
   348     const Type *type = osr_block->stack_type_at(index);
   349     set_stack(index, check_interpreter_type(l, type, bad_type_exit));
   350   }
   352   if (bad_type_exit->control()->req() > 1) {
   353     // Build an uncommon trap here, if any inputs can be unexpected.
   354     bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
   355     record_for_igvn(bad_type_exit->control());
   356     SafePointNode* types_are_good = map();
   357     set_map(bad_type_exit);
   358     // The unexpected type happens because a new edge is active
   359     // in the CFG, which typeflow had previously ignored.
   360     // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
   361     // This x will be typed as Integer if notReached is not yet linked.
   362     uncommon_trap(Deoptimization::Reason_unreached,
   363                   Deoptimization::Action_reinterpret);
   364     set_map(types_are_good);
   365   }
   366 }
   368 //------------------------------Parse------------------------------------------
   369 // Main parser constructor.
   370 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
   371   : _exits(caller)
   372 {
   373   // Init some variables
   374   _caller = caller;
   375   _method = parse_method;
   376   _expected_uses = expected_uses;
   377   _depth = 1 + (caller->has_method() ? caller->depth() : 0);
   378   _wrote_final = false;
   379   _entry_bci = InvocationEntryBci;
   380   _tf = NULL;
   381   _block = NULL;
   382   debug_only(_block_count = -1);
   383   debug_only(_blocks = (Block*)-1);
   384 #ifndef PRODUCT
   385   if (PrintCompilation || PrintOpto) {
   386     // Make sure I have an inline tree, so I can print messages about it.
   387     JVMState* ilt_caller = is_osr_parse() ? caller->caller() : caller;
   388     InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method, true);
   389   }
   390   _max_switch_depth = 0;
   391   _est_switch_depth = 0;
   392 #endif
   394   _tf = TypeFunc::make(method());
   395   _iter.reset_to_method(method());
   396   _flow = method()->get_flow_analysis();
   397   if (_flow->failing()) {
   398     C->record_method_not_compilable_all_tiers(_flow->failure_reason());
   399   }
   401 #ifndef PRODUCT
   402   if (_flow->has_irreducible_entry()) {
   403     C->set_parsed_irreducible_loop(true);
   404   }
   405 #endif
   407   if (_expected_uses <= 0) {
   408     _prof_factor = 1;
   409   } else {
   410     float prof_total = parse_method->interpreter_invocation_count();
   411     if (prof_total <= _expected_uses) {
   412       _prof_factor = 1;
   413     } else {
   414       _prof_factor = _expected_uses / prof_total;
   415     }
   416   }
   418   CompileLog* log = C->log();
   419   if (log != NULL) {
   420     log->begin_head("parse method='%d' uses='%g'",
   421                     log->identify(parse_method), expected_uses);
   422     if (depth() == 1 && C->is_osr_compilation()) {
   423       log->print(" osr_bci='%d'", C->entry_bci());
   424     }
   425     log->stamp();
   426     log->end_head();
   427   }
   429   // Accumulate deoptimization counts.
   430   // (The range_check and store_check counts are checked elsewhere.)
   431   ciMethodData* md = method()->method_data();
   432   for (uint reason = 0; reason < md->trap_reason_limit(); reason++) {
   433     uint md_count = md->trap_count(reason);
   434     if (md_count != 0) {
   435       if (md_count == md->trap_count_limit())
   436         md_count += md->overflow_trap_count();
   437       uint total_count = C->trap_count(reason);
   438       uint old_count   = total_count;
   439       total_count += md_count;
   440       // Saturate the add if it overflows.
   441       if (total_count < old_count || total_count < md_count)
   442         total_count = (uint)-1;
   443       C->set_trap_count(reason, total_count);
   444       if (log != NULL)
   445         log->elem("observe trap='%s' count='%d' total='%d'",
   446                   Deoptimization::trap_reason_name(reason),
   447                   md_count, total_count);
   448     }
   449   }
   450   // Accumulate total sum of decompilations, also.
   451   C->set_decompile_count(C->decompile_count() + md->decompile_count());
   453   _count_invocations = C->do_count_invocations();
   454   _method_data_update = C->do_method_data_update();
   456   if (log != NULL && method()->has_exception_handlers()) {
   457     log->elem("observe that='has_exception_handlers'");
   458   }
   460   assert(method()->can_be_compiled(),       "Can not parse this method, cutout earlier");
   461   assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
   463   // Always register dependence if JVMTI is enabled, because
   464   // either breakpoint setting or hotswapping of methods may
   465   // cause deoptimization.
   466   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
   467     C->dependencies()->assert_evol_method(method());
   468   }
   470   methods_seen++;
   472   // Do some special top-level things.
   473   if (depth() == 1 && C->is_osr_compilation()) {
   474     _entry_bci = C->entry_bci();
   475     _flow = method()->get_osr_flow_analysis(osr_bci());
   476     if (_flow->failing()) {
   477       C->record_method_not_compilable(_flow->failure_reason());
   478 #ifndef PRODUCT
   479       if (PrintOpto && (Verbose || WizardMode)) {
   480         tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
   481         if (Verbose) {
   482           method()->print_oop();
   483           method()->print_codes();
   484           _flow->print();
   485         }
   486       }
   487 #endif
   488     }
   489     _tf = C->tf();     // the OSR entry type is different
   490   }
   492 #ifdef ASSERT
   493   if (depth() == 1) {
   494     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
   495     if (C->tf() != tf()) {
   496       MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
   497       assert(C->env()->system_dictionary_modification_counter_changed(),
   498              "Must invalidate if TypeFuncs differ");
   499     }
   500   } else {
   501     assert(!this->is_osr_parse(), "no recursive OSR");
   502   }
   503 #endif
   505   methods_parsed++;
   506 #ifndef PRODUCT
   507   // add method size here to guarantee that inlined methods are added too
   508   if (TimeCompiler)
   509     _total_bytes_compiled += method()->code_size();
   511   show_parse_info();
   512 #endif
   514   if (failing()) {
   515     if (log)  log->done("parse");
   516     return;
   517   }
   519   gvn().set_type(root(), root()->bottom_type());
   520   gvn().transform(top());
   522   // Import the results of the ciTypeFlow.
   523   init_blocks();
   525   // Merge point for all normal exits
   526   build_exits();
   528   // Setup the initial JVM state map.
   529   SafePointNode* entry_map = create_entry_map();
   531   // Check for bailouts during map initialization
   532   if (failing() || entry_map == NULL) {
   533     if (log)  log->done("parse");
   534     return;
   535   }
   537   Node_Notes* caller_nn = C->default_node_notes();
   538   // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
   539   if (DebugInlinedCalls || depth() == 1) {
   540     C->set_default_node_notes(make_node_notes(caller_nn));
   541   }
   543   if (is_osr_parse()) {
   544     Node* osr_buf = entry_map->in(TypeFunc::Parms+0);
   545     entry_map->set_req(TypeFunc::Parms+0, top());
   546     set_map(entry_map);
   547     load_interpreter_state(osr_buf);
   548   } else {
   549     set_map(entry_map);
   550     do_method_entry();
   551   }
   553   // Check for bailouts during method entry.
   554   if (failing()) {
   555     if (log)  log->done("parse");
   556     C->set_default_node_notes(caller_nn);
   557     return;
   558   }
   560   entry_map = map();  // capture any changes performed by method setup code
   561   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
   563   // We begin parsing as if we have just encountered a jump to the
   564   // method entry.
   565   Block* entry_block = start_block();
   566   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
   567   set_map_clone(entry_map);
   568   merge_common(entry_block, entry_block->next_path_num());
   570 #ifndef PRODUCT
   571   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
   572   set_parse_histogram( parse_histogram_obj );
   573 #endif
   575   // Parse all the basic blocks.
   576   do_all_blocks();
   578   C->set_default_node_notes(caller_nn);
   580   // Check for bailouts during conversion to graph
   581   if (failing()) {
   582     if (log)  log->done("parse");
   583     return;
   584   }
   586   // Fix up all exiting control flow.
   587   set_map(entry_map);
   588   do_exits();
   590   if (log)  log->done("parse nodes='%d' memory='%d'",
   591                       C->unique(), C->node_arena()->used());
   592 }
   594 //---------------------------do_all_blocks-------------------------------------
   595 void Parse::do_all_blocks() {
   596   bool has_irreducible = flow()->has_irreducible_entry();
   598   // Walk over all blocks in Reverse Post-Order.
   599   while (true) {
   600     bool progress = false;
   601     for (int rpo = 0; rpo < block_count(); rpo++) {
   602       Block* block = rpo_at(rpo);
   604       if (block->is_parsed()) continue;
   606       if (!block->is_merged()) {
   607         // Dead block, no state reaches this block
   608         continue;
   609       }
   611       // Prepare to parse this block.
   612       load_state_from(block);
   614       if (stopped()) {
   615         // Block is dead.
   616         continue;
   617       }
   619       blocks_parsed++;
   621       progress = true;
   622       if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) {
   623         // Not all preds have been parsed.  We must build phis everywhere.
   624         // (Note that dead locals do not get phis built, ever.)
   625         ensure_phis_everywhere();
   627         // Leave behind an undisturbed copy of the map, for future merges.
   628         set_map(clone_map());
   629       }
   631       if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
   632         // In the absence of irreducible loops, the Region and Phis
   633         // associated with a merge that doesn't involve a backedge can
   634         // be simplified now since the RPO parsing order guarantees
   635         // that any path which was supposed to reach here has already
   636         // been parsed or must be dead.
   637         Node* c = control();
   638         Node* result = _gvn.transform_no_reclaim(control());
   639         if (c != result && TraceOptoParse) {
   640           tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx);
   641         }
   642         if (result != top()) {
   643           record_for_igvn(result);
   644         }
   645       }
   647       // Parse the block.
   648       do_one_block();
   650       // Check for bailouts.
   651       if (failing())  return;
   652     }
   654     // with irreducible loops multiple passes might be necessary to parse everything
   655     if (!has_irreducible || !progress) {
   656       break;
   657     }
   658   }
   660   blocks_seen += block_count();
   662 #ifndef PRODUCT
   663   // Make sure there are no half-processed blocks remaining.
   664   // Every remaining unprocessed block is dead and may be ignored now.
   665   for (int rpo = 0; rpo < block_count(); rpo++) {
   666     Block* block = rpo_at(rpo);
   667     if (!block->is_parsed()) {
   668       if (TraceOptoParse) {
   669         tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
   670       }
   671       assert(!block->is_merged(), "no half-processed blocks");
   672     }
   673   }
   674 #endif
   675 }
   677 //-------------------------------build_exits----------------------------------
   678 // Build normal and exceptional exit merge points.
   679 void Parse::build_exits() {
   680   // make a clone of caller to prevent sharing of side-effects
   681   _exits.set_map(_exits.clone_map());
   682   _exits.clean_stack(_exits.sp());
   683   _exits.sync_jvms();
   685   RegionNode* region = new (C, 1) RegionNode(1);
   686   record_for_igvn(region);
   687   gvn().set_type_bottom(region);
   688   _exits.set_control(region);
   690   // Note:  iophi and memphi are not transformed until do_exits.
   691   Node* iophi  = new (C, region->req()) PhiNode(region, Type::ABIO);
   692   Node* memphi = new (C, region->req()) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
   693   _exits.set_i_o(iophi);
   694   _exits.set_all_memory(memphi);
   696   // Add a return value to the exit state.  (Do not push it yet.)
   697   if (tf()->range()->cnt() > TypeFunc::Parms) {
   698     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
   699     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
   700     // becomes loaded during the subsequent parsing, the loaded and unloaded
   701     // types will not join when we transform and push in do_exits().
   702     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
   703     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
   704       ret_type = TypeOopPtr::BOTTOM;
   705     }
   706     int         ret_size = type2size[ret_type->basic_type()];
   707     Node*       ret_phi  = new (C, region->req()) PhiNode(region, ret_type);
   708     _exits.ensure_stack(ret_size);
   709     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
   710     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
   711     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
   712     // Note:  ret_phi is not yet pushed, until do_exits.
   713   }
   714 }
   717 //----------------------------build_start_state-------------------------------
   718 // Construct a state which contains only the incoming arguments from an
   719 // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
   720 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
   721   int        arg_size = tf->domain()->cnt();
   722   int        max_size = MAX2(arg_size, (int)tf->range()->cnt());
   723   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
   724   SafePointNode* map  = new (this, max_size) SafePointNode(max_size, NULL);
   725   record_for_igvn(map);
   726   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
   727   Node_Notes* old_nn = default_node_notes();
   728   if (old_nn != NULL && has_method()) {
   729     Node_Notes* entry_nn = old_nn->clone(this);
   730     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
   731     entry_jvms->set_offsets(0);
   732     entry_jvms->set_bci(entry_bci());
   733     entry_nn->set_jvms(entry_jvms);
   734     set_default_node_notes(entry_nn);
   735   }
   736   uint i;
   737   for (i = 0; i < (uint)arg_size; i++) {
   738     Node* parm = initial_gvn()->transform(new (this, 1) ParmNode(start, i));
   739     map->init_req(i, parm);
   740     // Record all these guys for later GVN.
   741     record_for_igvn(parm);
   742   }
   743   for (; i < map->req(); i++) {
   744     map->init_req(i, top());
   745   }
   746   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
   747   set_default_node_notes(old_nn);
   748   map->set_jvms(jvms);
   749   jvms->set_map(map);
   750   return jvms;
   751 }
   753 //-----------------------------make_node_notes---------------------------------
   754 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
   755   if (caller_nn == NULL)  return NULL;
   756   Node_Notes* nn = caller_nn->clone(C);
   757   JVMState* caller_jvms = nn->jvms();
   758   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
   759   jvms->set_offsets(0);
   760   jvms->set_bci(_entry_bci);
   761   nn->set_jvms(jvms);
   762   return nn;
   763 }
   766 //--------------------------return_values--------------------------------------
   767 void Compile::return_values(JVMState* jvms) {
   768   GraphKit kit(jvms);
   769   Node* ret = new (this, TypeFunc::Parms) ReturnNode(TypeFunc::Parms,
   770                              kit.control(),
   771                              kit.i_o(),
   772                              kit.reset_memory(),
   773                              kit.frameptr(),
   774                              kit.returnadr());
   775   // Add zero or 1 return values
   776   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
   777   if (ret_size > 0) {
   778     kit.inc_sp(-ret_size);  // pop the return value(s)
   779     kit.sync_jvms();
   780     ret->add_req(kit.argument(0));
   781     // Note:  The second dummy edge is not needed by a ReturnNode.
   782   }
   783   // bind it to root
   784   root()->add_req(ret);
   785   record_for_igvn(ret);
   786   initial_gvn()->transform_no_reclaim(ret);
   787 }
   789 //------------------------rethrow_exceptions-----------------------------------
   790 // Bind all exception states in the list into a single RethrowNode.
   791 void Compile::rethrow_exceptions(JVMState* jvms) {
   792   GraphKit kit(jvms);
   793   if (!kit.has_exceptions())  return;  // nothing to generate
   794   // Load my combined exception state into the kit, with all phis transformed:
   795   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
   796   Node* ex_oop = kit.use_exception_state(ex_map);
   797   RethrowNode* exit = new (this, TypeFunc::Parms + 1) RethrowNode(kit.control(),
   798                                       kit.i_o(), kit.reset_memory(),
   799                                       kit.frameptr(), kit.returnadr(),
   800                                       // like a return but with exception input
   801                                       ex_oop);
   802   // bind to root
   803   root()->add_req(exit);
   804   record_for_igvn(exit);
   805   initial_gvn()->transform_no_reclaim(exit);
   806 }
   808 //---------------------------do_exceptions-------------------------------------
   809 // Process exceptions arising from the current bytecode.
   810 // Send caught exceptions to the proper handler within this method.
   811 // Unhandled exceptions feed into _exit.
   812 void Parse::do_exceptions() {
   813   if (!has_exceptions())  return;
   815   if (failing()) {
   816     // Pop them all off and throw them away.
   817     while (pop_exception_state() != NULL) ;
   818     return;
   819   }
   821   PreserveJVMState pjvms(this, false);
   823   SafePointNode* ex_map;
   824   while ((ex_map = pop_exception_state()) != NULL) {
   825     if (!method()->has_exception_handlers()) {
   826       // Common case:  Transfer control outward.
   827       // Doing it this early allows the exceptions to common up
   828       // even between adjacent method calls.
   829       throw_to_exit(ex_map);
   830     } else {
   831       // Have to look at the exception first.
   832       assert(stopped(), "catch_inline_exceptions trashes the map");
   833       catch_inline_exceptions(ex_map);
   834       stop_and_kill_map();      // we used up this exception state; kill it
   835     }
   836   }
   838   // We now return to our regularly scheduled program:
   839 }
   841 //---------------------------throw_to_exit-------------------------------------
   842 // Merge the given map into an exception exit from this method.
   843 // The exception exit will handle any unlocking of receiver.
   844 // The ex_oop must be saved within the ex_map, unlike merge_exception.
   845 void Parse::throw_to_exit(SafePointNode* ex_map) {
   846   // Pop the JVMS to (a copy of) the caller.
   847   GraphKit caller;
   848   caller.set_map_clone(_caller->map());
   849   caller.set_bci(_caller->bci());
   850   caller.set_sp(_caller->sp());
   851   // Copy out the standard machine state:
   852   for (uint i = 0; i < TypeFunc::Parms; i++) {
   853     caller.map()->set_req(i, ex_map->in(i));
   854   }
   855   // ...and the exception:
   856   Node*          ex_oop        = saved_ex_oop(ex_map);
   857   SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop);
   858   // Finally, collect the new exception state in my exits:
   859   _exits.add_exception_state(caller_ex_map);
   860 }
   862 //------------------------------do_exits---------------------------------------
   863 void Parse::do_exits() {
   864   set_parse_bci(InvocationEntryBci);
   866   // Now peephole on the return bits
   867   Node* region = _exits.control();
   868   _exits.set_control(gvn().transform(region));
   870   Node* iophi = _exits.i_o();
   871   _exits.set_i_o(gvn().transform(iophi));
   873   if (wrote_final()) {
   874     // This method (which must be a constructor by the rules of Java)
   875     // wrote a final.  The effects of all initializations must be
   876     // committed to memory before any code after the constructor
   877     // publishes the reference to the newly constructor object.
   878     // Rather than wait for the publication, we simply block the
   879     // writes here.  Rather than put a barrier on only those writes
   880     // which are required to complete, we force all writes to complete.
   881     //
   882     // "All bets are off" unless the first publication occurs after a
   883     // normal return from the constructor.  We do not attempt to detect
   884     // such unusual early publications.  But no barrier is needed on
   885     // exceptional returns, since they cannot publish normally.
   886     //
   887     _exits.insert_mem_bar(Op_MemBarRelease);
   888 #ifndef PRODUCT
   889     if (PrintOpto && (Verbose || WizardMode)) {
   890       method()->print_name();
   891       tty->print_cr(" writes finals and needs a memory barrier");
   892     }
   893 #endif
   894   }
   896   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
   897     // transform each slice of the original memphi:
   898     mms.set_memory(_gvn.transform(mms.memory()));
   899   }
   901   if (tf()->range()->cnt() > TypeFunc::Parms) {
   902     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
   903     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
   904     assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty(), "return value must be well defined");
   905     _exits.push_node(ret_type->basic_type(), ret_phi);
   906   }
   908   // Note:  Logic for creating and optimizing the ReturnNode is in Compile.
   910   // Unlock along the exceptional paths.
   911   // This is done late so that we can common up equivalent exceptions
   912   // (e.g., null checks) arising from multiple points within this method.
   913   // See GraphKit::add_exception_state, which performs the commoning.
   914   bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode;
   916   // record exit from a method if compiled while Dtrace is turned on.
   917   if (do_synch || C->env()->dtrace_method_probes()) {
   918     // First move the exception list out of _exits:
   919     GraphKit kit(_exits.transfer_exceptions_into_jvms());
   920     SafePointNode* normal_map = kit.map();  // keep this guy safe
   921     // Now re-collect the exceptions into _exits:
   922     SafePointNode* ex_map;
   923     while ((ex_map = kit.pop_exception_state()) != NULL) {
   924       Node* ex_oop = kit.use_exception_state(ex_map);
   925       // Force the exiting JVM state to have this method at InvocationEntryBci.
   926       // The exiting JVM state is otherwise a copy of the calling JVMS.
   927       JVMState* caller = kit.jvms();
   928       JVMState* ex_jvms = caller->clone_shallow(C);
   929       ex_jvms->set_map(kit.clone_map());
   930       ex_jvms->map()->set_jvms(ex_jvms);
   931       ex_jvms->set_bci(   InvocationEntryBci);
   932       kit.set_jvms(ex_jvms);
   933       if (do_synch) {
   934         // Add on the synchronized-method box/object combo
   935         kit.map()->push_monitor(_synch_lock);
   936         // Unlock!
   937         kit.shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
   938       }
   939       if (C->env()->dtrace_method_probes()) {
   940         kit.make_dtrace_method_exit(method());
   941       }
   942       // Done with exception-path processing.
   943       ex_map = kit.make_exception_state(ex_oop);
   944       assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
   945       // Pop the last vestige of this method:
   946       ex_map->set_jvms(caller->clone_shallow(C));
   947       ex_map->jvms()->set_map(ex_map);
   948       _exits.push_exception_state(ex_map);
   949     }
   950     assert(_exits.map() == normal_map, "keep the same return state");
   951   }
   953   {
   954     // Capture very early exceptions (receiver null checks) from caller JVMS
   955     GraphKit caller(_caller);
   956     SafePointNode* ex_map;
   957     while ((ex_map = caller.pop_exception_state()) != NULL) {
   958       _exits.add_exception_state(ex_map);
   959     }
   960   }
   961 }
   963 //-----------------------------create_entry_map-------------------------------
   964 // Initialize our parser map to contain the types at method entry.
   965 // For OSR, the map contains a single RawPtr parameter.
   966 // Initial monitor locking for sync. methods is performed by do_method_entry.
   967 SafePointNode* Parse::create_entry_map() {
   968   // Check for really stupid bail-out cases.
   969   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
   970   if (len >= 32760) {
   971     C->record_method_not_compilable_all_tiers("too many local variables");
   972     return NULL;
   973   }
   975   // If this is an inlined method, we may have to do a receiver null check.
   976   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
   977     GraphKit kit(_caller);
   978     kit.null_check_receiver(method());
   979     _caller = kit.transfer_exceptions_into_jvms();
   980     if (kit.stopped()) {
   981       _exits.add_exception_states_from(_caller);
   982       _exits.set_jvms(_caller);
   983       return NULL;
   984     }
   985   }
   987   assert(method() != NULL, "parser must have a method");
   989   // Create an initial safepoint to hold JVM state during parsing
   990   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
   991   set_map(new (C, len) SafePointNode(len, jvms));
   992   jvms->set_map(map());
   993   record_for_igvn(map());
   994   assert(jvms->endoff() == len, "correct jvms sizing");
   996   SafePointNode* inmap = _caller->map();
   997   assert(inmap != NULL, "must have inmap");
   999   uint i;
  1001   // Pass thru the predefined input parameters.
  1002   for (i = 0; i < TypeFunc::Parms; i++) {
  1003     map()->init_req(i, inmap->in(i));
  1006   if (depth() == 1) {
  1007     assert(map()->memory()->Opcode() == Op_Parm, "");
  1008     // Insert the memory aliasing node
  1009     set_all_memory(reset_memory());
  1011   assert(merged_memory(), "");
  1013   // Now add the locals which are initially bound to arguments:
  1014   uint arg_size = tf()->domain()->cnt();
  1015   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
  1016   for (i = TypeFunc::Parms; i < arg_size; i++) {
  1017     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
  1020   // Clear out the rest of the map (locals and stack)
  1021   for (i = arg_size; i < len; i++) {
  1022     map()->init_req(i, top());
  1025   SafePointNode* entry_map = stop();
  1026   return entry_map;
  1029 //-----------------------------do_method_entry--------------------------------
  1030 // Emit any code needed in the pseudo-block before BCI zero.
  1031 // The main thing to do is lock the receiver of a synchronized method.
  1032 void Parse::do_method_entry() {
  1033   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
  1034   set_sp(0);                      // Java Stack Pointer
  1036   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
  1038   if (C->env()->dtrace_method_probes()) {
  1039     make_dtrace_method_entry(method());
  1042   // If the method is synchronized, we need to construct a lock node, attach
  1043   // it to the Start node, and pin it there.
  1044   if (method()->is_synchronized()) {
  1045     // Insert a FastLockNode right after the Start which takes as arguments
  1046     // the current thread pointer, the "this" pointer & the address of the
  1047     // stack slot pair used for the lock.  The "this" pointer is a projection
  1048     // off the start node, but the locking spot has to be constructed by
  1049     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
  1050     // becomes the second argument to the FastLockNode call.  The
  1051     // FastLockNode becomes the new control parent to pin it to the start.
  1053     // Setup Object Pointer
  1054     Node *lock_obj = NULL;
  1055     if(method()->is_static()) {
  1056       ciInstance* mirror = _method->holder()->java_mirror();
  1057       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
  1058       lock_obj = makecon(t_lock);
  1059     } else {                  // Else pass the "this" pointer,
  1060       lock_obj = local(0);    // which is Parm0 from StartNode
  1062     // Clear out dead values from the debug info.
  1063     kill_dead_locals();
  1064     // Build the FastLockNode
  1065     _synch_lock = shared_lock(lock_obj);
  1068   if (depth() == 1) {
  1069     increment_and_test_invocation_counter(Tier2CompileThreshold);
  1073 //------------------------------init_blocks------------------------------------
  1074 // Initialize our parser map to contain the types/monitors at method entry.
  1075 void Parse::init_blocks() {
  1076   // Create the blocks.
  1077   _block_count = flow()->block_count();
  1078   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
  1079   Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count);
  1081   int rpo;
  1083   // Initialize the structs.
  1084   for (rpo = 0; rpo < block_count(); rpo++) {
  1085     Block* block = rpo_at(rpo);
  1086     block->init_node(this, rpo);
  1089   // Collect predecessor and successor information.
  1090   for (rpo = 0; rpo < block_count(); rpo++) {
  1091     Block* block = rpo_at(rpo);
  1092     block->init_graph(this);
  1096 //-------------------------------init_node-------------------------------------
  1097 void Parse::Block::init_node(Parse* outer, int rpo) {
  1098   _flow = outer->flow()->rpo_at(rpo);
  1099   _pred_count = 0;
  1100   _preds_parsed = 0;
  1101   _count = 0;
  1102   assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
  1103   assert(!(is_merged() || is_parsed() || is_handler()), "sanity");
  1104   assert(_live_locals.size() == 0, "sanity");
  1106   // entry point has additional predecessor
  1107   if (flow()->is_start())  _pred_count++;
  1108   assert(flow()->is_start() == (this == outer->start_block()), "");
  1111 //-------------------------------init_graph------------------------------------
  1112 void Parse::Block::init_graph(Parse* outer) {
  1113   // Create the successor list for this parser block.
  1114   GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors();
  1115   GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
  1116   int ns = tfs->length();
  1117   int ne = tfe->length();
  1118   _num_successors = ns;
  1119   _all_successors = ns+ne;
  1120   _successors = (ns+ne == 0) ? NULL : NEW_RESOURCE_ARRAY(Block*, ns+ne);
  1121   int p = 0;
  1122   for (int i = 0; i < ns+ne; i++) {
  1123     ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
  1124     Block* block2 = outer->rpo_at(tf2->rpo());
  1125     _successors[i] = block2;
  1127     // Accumulate pred info for the other block, too.
  1128     if (i < ns) {
  1129       block2->_pred_count++;
  1130     } else {
  1131       block2->_is_handler = true;
  1134     #ifdef ASSERT
  1135     // A block's successors must be distinguishable by BCI.
  1136     // That is, no bytecode is allowed to branch to two different
  1137     // clones of the same code location.
  1138     for (int j = 0; j < i; j++) {
  1139       Block* block1 = _successors[j];
  1140       if (block1 == block2)  continue;  // duplicates are OK
  1141       assert(block1->start() != block2->start(), "successors have unique bcis");
  1143     #endif
  1146   // Note: We never call next_path_num along exception paths, so they
  1147   // never get processed as "ready".  Also, the input phis of exception
  1148   // handlers get specially processed, so that
  1151 //---------------------------successor_for_bci---------------------------------
  1152 Parse::Block* Parse::Block::successor_for_bci(int bci) {
  1153   for (int i = 0; i < all_successors(); i++) {
  1154     Block* block2 = successor_at(i);
  1155     if (block2->start() == bci)  return block2;
  1157   // We can actually reach here if ciTypeFlow traps out a block
  1158   // due to an unloaded class, and concurrently with compilation the
  1159   // class is then loaded, so that a later phase of the parser is
  1160   // able to see more of the bytecode CFG.  Or, the flow pass and
  1161   // the parser can have a minor difference of opinion about executability
  1162   // of bytecodes.  For example, "obj.field = null" is executable even
  1163   // if the field's type is an unloaded class; the flow pass used to
  1164   // make a trap for such code.
  1165   return NULL;
  1169 //-----------------------------stack_type_at-----------------------------------
  1170 const Type* Parse::Block::stack_type_at(int i) const {
  1171   return get_type(flow()->stack_type_at(i));
  1175 //-----------------------------local_type_at-----------------------------------
  1176 const Type* Parse::Block::local_type_at(int i) const {
  1177   // Make dead locals fall to bottom.
  1178   if (_live_locals.size() == 0) {
  1179     MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start());
  1180     // This bitmap can be zero length if we saw a breakpoint.
  1181     // In such cases, pretend they are all live.
  1182     ((Block*)this)->_live_locals = live_locals;
  1184   if (_live_locals.size() > 0 && !_live_locals.at(i))
  1185     return Type::BOTTOM;
  1187   return get_type(flow()->local_type_at(i));
  1191 #ifndef PRODUCT
  1193 //----------------------------name_for_bc--------------------------------------
  1194 // helper method for BytecodeParseHistogram
  1195 static const char* name_for_bc(int i) {
  1196   return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx";
  1199 //----------------------------BytecodeParseHistogram------------------------------------
  1200 Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) {
  1201   _parser   = p;
  1202   _compiler = c;
  1203   if( ! _initialized ) { _initialized = true; reset(); }
  1206 //----------------------------current_count------------------------------------
  1207 int Parse::BytecodeParseHistogram::current_count(BPHType bph_type) {
  1208   switch( bph_type ) {
  1209   case BPH_transforms: { return _parser->gvn().made_progress(); }
  1210   case BPH_values:     { return _parser->gvn().made_new_values(); }
  1211   default: { ShouldNotReachHere(); return 0; }
  1215 //----------------------------initialized--------------------------------------
  1216 bool Parse::BytecodeParseHistogram::initialized() { return _initialized; }
  1218 //----------------------------reset--------------------------------------------
  1219 void Parse::BytecodeParseHistogram::reset() {
  1220   int i = Bytecodes::number_of_codes;
  1221   while (i-- > 0) { _bytecodes_parsed[i] = 0; _nodes_constructed[i] = 0; _nodes_transformed[i] = 0; _new_values[i] = 0; }
  1224 //----------------------------set_initial_state--------------------------------
  1225 // Record info when starting to parse one bytecode
  1226 void Parse::BytecodeParseHistogram::set_initial_state( Bytecodes::Code bc ) {
  1227   if( PrintParseStatistics && !_parser->is_osr_parse() ) {
  1228     _initial_bytecode    = bc;
  1229     _initial_node_count  = _compiler->unique();
  1230     _initial_transforms  = current_count(BPH_transforms);
  1231     _initial_values      = current_count(BPH_values);
  1235 //----------------------------record_change--------------------------------
  1236 // Record results of parsing one bytecode
  1237 void Parse::BytecodeParseHistogram::record_change() {
  1238   if( PrintParseStatistics && !_parser->is_osr_parse() ) {
  1239     ++_bytecodes_parsed[_initial_bytecode];
  1240     _nodes_constructed [_initial_bytecode] += (_compiler->unique() - _initial_node_count);
  1241     _nodes_transformed [_initial_bytecode] += (current_count(BPH_transforms) - _initial_transforms);
  1242     _new_values        [_initial_bytecode] += (current_count(BPH_values)     - _initial_values);
  1247 //----------------------------print--------------------------------------------
  1248 void Parse::BytecodeParseHistogram::print(float cutoff) {
  1249   ResourceMark rm;
  1250   // print profile
  1251   int total  = 0;
  1252   int i      = 0;
  1253   for( i = 0; i < Bytecodes::number_of_codes; ++i ) { total += _bytecodes_parsed[i]; }
  1254   int abs_sum = 0;
  1255   tty->cr();   //0123456789012345678901234567890123456789012345678901234567890123456789
  1256   tty->print_cr("Histogram of %d parsed bytecodes:", total);
  1257   if( total == 0 ) { return; }
  1258   tty->cr();
  1259   tty->print_cr("absolute:  count of compiled bytecodes of this type");
  1260   tty->print_cr("relative:  percentage contribution to compiled nodes");
  1261   tty->print_cr("nodes   :  Average number of nodes constructed per bytecode");
  1262   tty->print_cr("rnodes  :  Significance towards total nodes constructed, (nodes*relative)");
  1263   tty->print_cr("transforms: Average amount of tranform progress per bytecode compiled");
  1264   tty->print_cr("values  :  Average number of node values improved per bytecode");
  1265   tty->print_cr("name    :  Bytecode name");
  1266   tty->cr();
  1267   tty->print_cr("  absolute  relative   nodes  rnodes  transforms  values   name");
  1268   tty->print_cr("----------------------------------------------------------------------");
  1269   while (--i > 0) {
  1270     int       abs = _bytecodes_parsed[i];
  1271     float     rel = abs * 100.0F / total;
  1272     float   nodes = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_constructed[i])/_bytecodes_parsed[i];
  1273     float  rnodes = _bytecodes_parsed[i] == 0 ? 0 :  rel * nodes;
  1274     float  xforms = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_transformed[i])/_bytecodes_parsed[i];
  1275     float  values = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _new_values       [i])/_bytecodes_parsed[i];
  1276     if (cutoff <= rel) {
  1277       tty->print_cr("%10d  %7.2f%%  %6.1f  %6.2f   %6.1f   %6.1f     %s", abs, rel, nodes, rnodes, xforms, values, name_for_bc(i));
  1278       abs_sum += abs;
  1281   tty->print_cr("----------------------------------------------------------------------");
  1282   float rel_sum = abs_sum * 100.0F / total;
  1283   tty->print_cr("%10d  %7.2f%%    (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff);
  1284   tty->print_cr("----------------------------------------------------------------------");
  1285   tty->cr();
  1287 #endif
  1289 //----------------------------load_state_from----------------------------------
  1290 // Load block/map/sp.  But not do not touch iter/bci.
  1291 void Parse::load_state_from(Block* block) {
  1292   set_block(block);
  1293   // load the block's JVM state:
  1294   set_map(block->start_map());
  1295   set_sp( block->start_sp());
  1299 //-----------------------------record_state------------------------------------
  1300 void Parse::Block::record_state(Parse* p) {
  1301   assert(!is_merged(), "can only record state once, on 1st inflow");
  1302   assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
  1303   set_start_map(p->stop());
  1307 //------------------------------do_one_block-----------------------------------
  1308 void Parse::do_one_block() {
  1309   if (TraceOptoParse) {
  1310     Block *b = block();
  1311     int ns = b->num_successors();
  1312     int nt = b->all_successors();
  1314     tty->print("Parsing block #%d at bci [%d,%d), successors: ",
  1315                   block()->rpo(), block()->start(), block()->limit());
  1316     for (int i = 0; i < nt; i++) {
  1317       tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo());
  1319     if (b->is_loop_head()) tty->print("  lphd");
  1320     tty->print_cr("");
  1323   assert(block()->is_merged(), "must be merged before being parsed");
  1324   block()->mark_parsed();
  1325   ++_blocks_parsed;
  1327   // Set iterator to start of block.
  1328   iter().reset_to_bci(block()->start());
  1330   CompileLog* log = C->log();
  1332   // Parse bytecodes
  1333   while (!stopped() && !failing()) {
  1334     iter().next();
  1336     // Learn the current bci from the iterator:
  1337     set_parse_bci(iter().cur_bci());
  1339     if (bci() == block()->limit()) {
  1340       // insert a predicate if it falls through to a loop head block
  1341       if (should_add_predicate(bci())){
  1342         add_predicate();
  1344       // Do not walk into the next block until directed by do_all_blocks.
  1345       merge(bci());
  1346       break;
  1348     assert(bci() < block()->limit(), "bci still in block");
  1350     if (log != NULL) {
  1351       // Output an optional context marker, to help place actions
  1352       // that occur during parsing of this BC.  If there is no log
  1353       // output until the next context string, this context string
  1354       // will be silently ignored.
  1355       log->context()->reset();
  1356       log->context()->print_cr("<bc code='%d' bci='%d'/>", (int)bc(), bci());
  1359     if (block()->has_trap_at(bci())) {
  1360       // We must respect the flow pass's traps, because it will refuse
  1361       // to produce successors for trapping blocks.
  1362       int trap_index = block()->flow()->trap_index();
  1363       assert(trap_index != 0, "trap index must be valid");
  1364       uncommon_trap(trap_index);
  1365       break;
  1368     NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); );
  1370 #ifdef ASSERT
  1371     int pre_bc_sp = sp();
  1372     int inputs, depth;
  1373     bool have_se = !stopped() && compute_stack_effects(inputs, depth);
  1374     assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC");
  1375 #endif //ASSERT
  1377     do_one_bytecode();
  1379     assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth, "correct depth prediction");
  1381     do_exceptions();
  1383     NOT_PRODUCT( parse_histogram()->record_change(); );
  1385     if (log != NULL)  log->context()->reset();  // done w/ this one
  1387     // Fall into next bytecode.  Each bytecode normally has 1 sequential
  1388     // successor which is typically made ready by visiting this bytecode.
  1389     // If the successor has several predecessors, then it is a merge
  1390     // point, starts a new basic block, and is handled like other basic blocks.
  1395 //------------------------------merge------------------------------------------
  1396 void Parse::set_parse_bci(int bci) {
  1397   set_bci(bci);
  1398   Node_Notes* nn = C->default_node_notes();
  1399   if (nn == NULL)  return;
  1401   // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
  1402   if (!DebugInlinedCalls && depth() > 1) {
  1403     return;
  1406   // Update the JVMS annotation, if present.
  1407   JVMState* jvms = nn->jvms();
  1408   if (jvms != NULL && jvms->bci() != bci) {
  1409     // Update the JVMS.
  1410     jvms = jvms->clone_shallow(C);
  1411     jvms->set_bci(bci);
  1412     nn->set_jvms(jvms);
  1416 //------------------------------merge------------------------------------------
  1417 // Merge the current mapping into the basic block starting at bci
  1418 void Parse::merge(int target_bci) {
  1419   Block* target = successor_for_bci(target_bci);
  1420   if (target == NULL) { handle_missing_successor(target_bci); return; }
  1421   assert(!target->is_ready(), "our arrival must be expected");
  1422   int pnum = target->next_path_num();
  1423   merge_common(target, pnum);
  1426 //-------------------------merge_new_path--------------------------------------
  1427 // Merge the current mapping into the basic block, using a new path
  1428 void Parse::merge_new_path(int target_bci) {
  1429   Block* target = successor_for_bci(target_bci);
  1430   if (target == NULL) { handle_missing_successor(target_bci); return; }
  1431   assert(!target->is_ready(), "new path into frozen graph");
  1432   int pnum = target->add_new_path();
  1433   merge_common(target, pnum);
  1436 //-------------------------merge_exception-------------------------------------
  1437 // Merge the current mapping into the basic block starting at bci
  1438 // The ex_oop must be pushed on the stack, unlike throw_to_exit.
  1439 void Parse::merge_exception(int target_bci) {
  1440   assert(sp() == 1, "must have only the throw exception on the stack");
  1441   Block* target = successor_for_bci(target_bci);
  1442   if (target == NULL) { handle_missing_successor(target_bci); return; }
  1443   assert(target->is_handler(), "exceptions are handled by special blocks");
  1444   int pnum = target->add_new_path();
  1445   merge_common(target, pnum);
  1448 //--------------------handle_missing_successor---------------------------------
  1449 void Parse::handle_missing_successor(int target_bci) {
  1450 #ifndef PRODUCT
  1451   Block* b = block();
  1452   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
  1453   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
  1454 #endif
  1455   ShouldNotReachHere();
  1458 //--------------------------merge_common---------------------------------------
  1459 void Parse::merge_common(Parse::Block* target, int pnum) {
  1460   if (TraceOptoParse) {
  1461     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
  1464   // Zap extra stack slots to top
  1465   assert(sp() == target->start_sp(), "");
  1466   clean_stack(sp());
  1468   if (!target->is_merged()) {   // No prior mapping at this bci
  1469     if (TraceOptoParse) { tty->print(" with empty state");  }
  1471     // If this path is dead, do not bother capturing it as a merge.
  1472     // It is "as if" we had 1 fewer predecessors from the beginning.
  1473     if (stopped()) {
  1474       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
  1475       return;
  1478     // Record that a new block has been merged.
  1479     ++_blocks_merged;
  1481     // Make a region if we know there are multiple or unpredictable inputs.
  1482     // (Also, if this is a plain fall-through, we might see another region,
  1483     // which must not be allowed into this block's map.)
  1484     if (pnum > PhiNode::Input         // Known multiple inputs.
  1485         || target->is_handler()       // These have unpredictable inputs.
  1486         || target->is_loop_head()     // Known multiple inputs
  1487         || control()->is_Region()) {  // We must hide this guy.
  1488       // Add a Region to start the new basic block.  Phis will be added
  1489       // later lazily.
  1490       int edges = target->pred_count();
  1491       if (edges < pnum)  edges = pnum;  // might be a new path!
  1492       Node *r = new (C, edges+1) RegionNode(edges+1);
  1493       gvn().set_type(r, Type::CONTROL);
  1494       record_for_igvn(r);
  1495       // zap all inputs to NULL for debugging (done in Node(uint) constructor)
  1496       // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
  1497       r->init_req(pnum, control());
  1498       set_control(r);
  1501     // Convert the existing Parser mapping into a mapping at this bci.
  1502     store_state_to(target);
  1503     assert(target->is_merged(), "do not come here twice");
  1505   } else {                      // Prior mapping at this bci
  1506     if (TraceOptoParse) {  tty->print(" with previous state"); }
  1508     // We must not manufacture more phis if the target is already parsed.
  1509     bool nophi = target->is_parsed();
  1511     SafePointNode* newin = map();// Hang on to incoming mapping
  1512     Block* save_block = block(); // Hang on to incoming block;
  1513     load_state_from(target);    // Get prior mapping
  1515     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
  1516     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
  1517     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
  1518     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
  1520     // Iterate over my current mapping and the old mapping.
  1521     // Where different, insert Phi functions.
  1522     // Use any existing Phi functions.
  1523     assert(control()->is_Region(), "must be merging to a region");
  1524     RegionNode* r = control()->as_Region();
  1526     // Compute where to merge into
  1527     // Merge incoming control path
  1528     r->init_req(pnum, newin->control());
  1530     if (pnum == 1) {            // Last merge for this Region?
  1531       if (!block()->flow()->is_irreducible_entry()) {
  1532         Node* result = _gvn.transform_no_reclaim(r);
  1533         if (r != result && TraceOptoParse) {
  1534           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
  1537       record_for_igvn(r);
  1540     // Update all the non-control inputs to map:
  1541     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
  1542     bool check_elide_phi = target->is_SEL_backedge(save_block);
  1543     for (uint j = 1; j < newin->req(); j++) {
  1544       Node* m = map()->in(j);   // Current state of target.
  1545       Node* n = newin->in(j);   // Incoming change to target state.
  1546       PhiNode* phi;
  1547       if (m->is_Phi() && m->as_Phi()->region() == r)
  1548         phi = m->as_Phi();
  1549       else
  1550         phi = NULL;
  1551       if (m != n) {             // Different; must merge
  1552         switch (j) {
  1553         // Frame pointer and Return Address never changes
  1554         case TypeFunc::FramePtr:// Drop m, use the original value
  1555         case TypeFunc::ReturnAdr:
  1556           break;
  1557         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
  1558           assert(phi == NULL, "the merge contains phis, not vice versa");
  1559           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
  1560           continue;
  1561         default:                // All normal stuff
  1562           if (phi == NULL) {
  1563             if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
  1564               phi = ensure_phi(j, nophi);
  1567           break;
  1570       // At this point, n might be top if:
  1571       //  - there is no phi (because TypeFlow detected a conflict), or
  1572       //  - the corresponding control edges is top (a dead incoming path)
  1573       // It is a bug if we create a phi which sees a garbage value on a live path.
  1575       if (phi != NULL) {
  1576         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
  1577         assert(phi->region() == r, "");
  1578         phi->set_req(pnum, n);  // Then add 'n' to the merge
  1579         if (pnum == PhiNode::Input) {
  1580           // Last merge for this Phi.
  1581           // So far, Phis have had a reasonable type from ciTypeFlow.
  1582           // Now _gvn will join that with the meet of current inputs.
  1583           // BOTTOM is never permissible here, 'cause pessimistically
  1584           // Phis of pointers cannot lose the basic pointer type.
  1585           debug_only(const Type* bt1 = phi->bottom_type());
  1586           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
  1587           map()->set_req(j, _gvn.transform_no_reclaim(phi));
  1588           debug_only(const Type* bt2 = phi->bottom_type());
  1589           assert(bt2->higher_equal(bt1), "must be consistent with type-flow");
  1590           record_for_igvn(phi);
  1593     } // End of for all values to be merged
  1595     if (pnum == PhiNode::Input &&
  1596         !r->in(0)) {         // The occasional useless Region
  1597       assert(control() == r, "");
  1598       set_control(r->nonnull_req());
  1601     // newin has been subsumed into the lazy merge, and is now dead.
  1602     set_block(save_block);
  1604     stop();                     // done with this guy, for now
  1607   if (TraceOptoParse) {
  1608     tty->print_cr(" on path %d", pnum);
  1611   // Done with this parser state.
  1612   assert(stopped(), "");
  1616 //--------------------------merge_memory_edges---------------------------------
  1617 void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
  1618   // (nophi means we must not create phis, because we already parsed here)
  1619   assert(n != NULL, "");
  1620   // Merge the inputs to the MergeMems
  1621   MergeMemNode* m = merged_memory();
  1623   assert(control()->is_Region(), "must be merging to a region");
  1624   RegionNode* r = control()->as_Region();
  1626   PhiNode* base = NULL;
  1627   MergeMemNode* remerge = NULL;
  1628   for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) {
  1629     Node *p = mms.force_memory();
  1630     Node *q = mms.memory2();
  1631     if (mms.is_empty() && nophi) {
  1632       // Trouble:  No new splits allowed after a loop body is parsed.
  1633       // Instead, wire the new split into a MergeMem on the backedge.
  1634       // The optimizer will sort it out, slicing the phi.
  1635       if (remerge == NULL) {
  1636         assert(base != NULL, "");
  1637         assert(base->in(0) != NULL, "should not be xformed away");
  1638         remerge = MergeMemNode::make(C, base->in(pnum));
  1639         gvn().set_type(remerge, Type::MEMORY);
  1640         base->set_req(pnum, remerge);
  1642       remerge->set_memory_at(mms.alias_idx(), q);
  1643       continue;
  1645     assert(!q->is_MergeMem(), "");
  1646     PhiNode* phi;
  1647     if (p != q) {
  1648       phi = ensure_memory_phi(mms.alias_idx(), nophi);
  1649     } else {
  1650       if (p->is_Phi() && p->as_Phi()->region() == r)
  1651         phi = p->as_Phi();
  1652       else
  1653         phi = NULL;
  1655     // Insert q into local phi
  1656     if (phi != NULL) {
  1657       assert(phi->region() == r, "");
  1658       p = phi;
  1659       phi->set_req(pnum, q);
  1660       if (mms.at_base_memory()) {
  1661         base = phi;  // delay transforming it
  1662       } else if (pnum == 1) {
  1663         record_for_igvn(phi);
  1664         p = _gvn.transform_no_reclaim(phi);
  1666       mms.set_memory(p);// store back through the iterator
  1669   // Transform base last, in case we must fiddle with remerging.
  1670   if (base != NULL && pnum == 1) {
  1671     record_for_igvn(base);
  1672     m->set_base_memory( _gvn.transform_no_reclaim(base) );
  1677 //------------------------ensure_phis_everywhere-------------------------------
  1678 void Parse::ensure_phis_everywhere() {
  1679   ensure_phi(TypeFunc::I_O);
  1681   // Ensure a phi on all currently known memories.
  1682   for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
  1683     ensure_memory_phi(mms.alias_idx());
  1684     debug_only(mms.set_memory());  // keep the iterator happy
  1687   // Note:  This is our only chance to create phis for memory slices.
  1688   // If we miss a slice that crops up later, it will have to be
  1689   // merged into the base-memory phi that we are building here.
  1690   // Later, the optimizer will comb out the knot, and build separate
  1691   // phi-loops for each memory slice that matters.
  1693   // Monitors must nest nicely and not get confused amongst themselves.
  1694   // Phi-ify everything up to the monitors, though.
  1695   uint monoff = map()->jvms()->monoff();
  1696   uint nof_monitors = map()->jvms()->nof_monitors();
  1698   assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms");
  1699   bool check_elide_phi = block()->is_SEL_head();
  1700   for (uint i = TypeFunc::Parms; i < monoff; i++) {
  1701     if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) {
  1702       ensure_phi(i);
  1706   // Even monitors need Phis, though they are well-structured.
  1707   // This is true for OSR methods, and also for the rare cases where
  1708   // a monitor object is the subject of a replace_in_map operation.
  1709   // See bugs 4426707 and 5043395.
  1710   for (uint m = 0; m < nof_monitors; m++) {
  1711     ensure_phi(map()->jvms()->monitor_obj_offset(m));
  1716 //-----------------------------add_new_path------------------------------------
  1717 // Add a previously unaccounted predecessor to this block.
  1718 int Parse::Block::add_new_path() {
  1719   // If there is no map, return the lowest unused path number.
  1720   if (!is_merged())  return pred_count()+1;  // there will be a map shortly
  1722   SafePointNode* map = start_map();
  1723   if (!map->control()->is_Region())
  1724     return pred_count()+1;  // there may be a region some day
  1725   RegionNode* r = map->control()->as_Region();
  1727   // Add new path to the region.
  1728   uint pnum = r->req();
  1729   r->add_req(NULL);
  1731   for (uint i = 1; i < map->req(); i++) {
  1732     Node* n = map->in(i);
  1733     if (i == TypeFunc::Memory) {
  1734       // Ensure a phi on all currently known memories.
  1735       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
  1736         Node* phi = mms.memory();
  1737         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
  1738           assert(phi->req() == pnum, "must be same size as region");
  1739           phi->add_req(NULL);
  1742     } else {
  1743       if (n->is_Phi() && n->as_Phi()->region() == r) {
  1744         assert(n->req() == pnum, "must be same size as region");
  1745         n->add_req(NULL);
  1750   return pnum;
  1753 //------------------------------ensure_phi-------------------------------------
  1754 // Turn the idx'th entry of the current map into a Phi
  1755 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
  1756   SafePointNode* map = this->map();
  1757   Node* region = map->control();
  1758   assert(region->is_Region(), "");
  1760   Node* o = map->in(idx);
  1761   assert(o != NULL, "");
  1763   if (o == top())  return NULL; // TOP always merges into TOP
  1765   if (o->is_Phi() && o->as_Phi()->region() == region) {
  1766     return o->as_Phi();
  1769   // Now use a Phi here for merging
  1770   assert(!nocreate, "Cannot build a phi for a block already parsed.");
  1771   const JVMState* jvms = map->jvms();
  1772   const Type* t;
  1773   if (jvms->is_loc(idx)) {
  1774     t = block()->local_type_at(idx - jvms->locoff());
  1775   } else if (jvms->is_stk(idx)) {
  1776     t = block()->stack_type_at(idx - jvms->stkoff());
  1777   } else if (jvms->is_mon(idx)) {
  1778     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
  1779     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
  1780   } else if ((uint)idx < TypeFunc::Parms) {
  1781     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
  1782   } else {
  1783     assert(false, "no type information for this phi");
  1786   // If the type falls to bottom, then this must be a local that
  1787   // is mixing ints and oops or some such.  Forcing it to top
  1788   // makes it go dead.
  1789   if (t == Type::BOTTOM) {
  1790     map->set_req(idx, top());
  1791     return NULL;
  1794   // Do not create phis for top either.
  1795   // A top on a non-null control flow must be an unused even after the.phi.
  1796   if (t == Type::TOP || t == Type::HALF) {
  1797     map->set_req(idx, top());
  1798     return NULL;
  1801   PhiNode* phi = PhiNode::make(region, o, t);
  1802   gvn().set_type(phi, t);
  1803   if (C->do_escape_analysis()) record_for_igvn(phi);
  1804   map->set_req(idx, phi);
  1805   return phi;
  1808 //--------------------------ensure_memory_phi----------------------------------
  1809 // Turn the idx'th slice of the current memory into a Phi
  1810 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
  1811   MergeMemNode* mem = merged_memory();
  1812   Node* region = control();
  1813   assert(region->is_Region(), "");
  1815   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
  1816   assert(o != NULL && o != top(), "");
  1818   PhiNode* phi;
  1819   if (o->is_Phi() && o->as_Phi()->region() == region) {
  1820     phi = o->as_Phi();
  1821     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
  1822       // clone the shared base memory phi to make a new memory split
  1823       assert(!nocreate, "Cannot build a phi for a block already parsed.");
  1824       const Type* t = phi->bottom_type();
  1825       const TypePtr* adr_type = C->get_adr_type(idx);
  1826       phi = phi->slice_memory(adr_type);
  1827       gvn().set_type(phi, t);
  1829     return phi;
  1832   // Now use a Phi here for merging
  1833   assert(!nocreate, "Cannot build a phi for a block already parsed.");
  1834   const Type* t = o->bottom_type();
  1835   const TypePtr* adr_type = C->get_adr_type(idx);
  1836   phi = PhiNode::make(region, o, t, adr_type);
  1837   gvn().set_type(phi, t);
  1838   if (idx == Compile::AliasIdxBot)
  1839     mem->set_base_memory(phi);
  1840   else
  1841     mem->set_memory_at(idx, phi);
  1842   return phi;
  1845 //------------------------------call_register_finalizer-----------------------
  1846 // Check the klass of the receiver and call register_finalizer if the
  1847 // class need finalization.
  1848 void Parse::call_register_finalizer() {
  1849   Node* receiver = local(0);
  1850   assert(receiver != NULL && receiver->bottom_type()->isa_instptr() != NULL,
  1851          "must have non-null instance type");
  1853   const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
  1854   if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) {
  1855     // The type isn't known exactly so see if CHA tells us anything.
  1856     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
  1857     if (!Dependencies::has_finalizable_subclass(ik)) {
  1858       // No finalizable subclasses so skip the dynamic check.
  1859       C->dependencies()->assert_has_no_finalizable_subclasses(ik);
  1860       return;
  1864   // Insert a dynamic test for whether the instance needs
  1865   // finalization.  In general this will fold up since the concrete
  1866   // class is often visible so the access flags are constant.
  1867   Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
  1868   Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
  1870   Node* access_flags_addr = basic_plus_adr(klass, klass, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
  1871   Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT);
  1873   Node* mask  = _gvn.transform(new (C, 3) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
  1874   Node* check = _gvn.transform(new (C, 3) CmpINode(mask, intcon(0)));
  1875   Node* test  = _gvn.transform(new (C, 2) BoolNode(check, BoolTest::ne));
  1877   IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
  1879   RegionNode* result_rgn = new (C, 3) RegionNode(3);
  1880   record_for_igvn(result_rgn);
  1882   Node *skip_register = _gvn.transform(new (C, 1) IfFalseNode(iff));
  1883   result_rgn->init_req(1, skip_register);
  1885   Node *needs_register = _gvn.transform(new (C, 1) IfTrueNode(iff));
  1886   set_control(needs_register);
  1887   if (stopped()) {
  1888     // There is no slow path.
  1889     result_rgn->init_req(2, top());
  1890   } else {
  1891     Node *call = make_runtime_call(RC_NO_LEAF,
  1892                                    OptoRuntime::register_finalizer_Type(),
  1893                                    OptoRuntime::register_finalizer_Java(),
  1894                                    NULL, TypePtr::BOTTOM,
  1895                                    receiver);
  1896     make_slow_call_ex(call, env()->Throwable_klass(), true);
  1898     Node* fast_io  = call->in(TypeFunc::I_O);
  1899     Node* fast_mem = call->in(TypeFunc::Memory);
  1900     // These two phis are pre-filled with copies of of the fast IO and Memory
  1901     Node* io_phi   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
  1902     Node* mem_phi  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
  1904     result_rgn->init_req(2, control());
  1905     io_phi    ->init_req(2, i_o());
  1906     mem_phi   ->init_req(2, reset_memory());
  1908     set_all_memory( _gvn.transform(mem_phi) );
  1909     set_i_o(        _gvn.transform(io_phi) );
  1912   set_control( _gvn.transform(result_rgn) );
  1915 //------------------------------return_current---------------------------------
  1916 // Append current _map to _exit_return
  1917 void Parse::return_current(Node* value) {
  1918   if (RegisterFinalizersAtInit &&
  1919       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
  1920     call_register_finalizer();
  1923   // Do not set_parse_bci, so that return goo is credited to the return insn.
  1924   set_bci(InvocationEntryBci);
  1925   if (method()->is_synchronized() && GenerateSynchronizationCode) {
  1926     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
  1928   if (C->env()->dtrace_method_probes()) {
  1929     make_dtrace_method_exit(method());
  1931   SafePointNode* exit_return = _exits.map();
  1932   exit_return->in( TypeFunc::Control  )->add_req( control() );
  1933   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
  1934   Node *mem = exit_return->in( TypeFunc::Memory   );
  1935   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
  1936     if (mms.is_empty()) {
  1937       // get a copy of the base memory, and patch just this one input
  1938       const TypePtr* adr_type = mms.adr_type(C);
  1939       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
  1940       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
  1941       gvn().set_type_bottom(phi);
  1942       phi->del_req(phi->req()-1);  // prepare to re-patch
  1943       mms.set_memory(phi);
  1945     mms.memory()->add_req(mms.memory2());
  1948   // frame pointer is always same, already captured
  1949   if (value != NULL) {
  1950     // If returning oops to an interface-return, there is a silent free
  1951     // cast from oop to interface allowed by the Verifier.  Make it explicit
  1952     // here.
  1953     Node* phi = _exits.argument(0);
  1954     const TypeInstPtr *tr = phi->bottom_type()->isa_instptr();
  1955     if( tr && tr->klass()->is_loaded() &&
  1956         tr->klass()->is_interface() ) {
  1957       const TypeInstPtr *tp = value->bottom_type()->isa_instptr();
  1958       if (tp && tp->klass()->is_loaded() &&
  1959           !tp->klass()->is_interface()) {
  1960         // sharpen the type eagerly; this eases certain assert checking
  1961         if (tp->higher_equal(TypeInstPtr::NOTNULL))
  1962           tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr();
  1963         value = _gvn.transform(new (C, 2) CheckCastPPNode(0,value,tr));
  1966     phi->add_req(value);
  1969   stop_and_kill_map();          // This CFG path dies here
  1973 //------------------------------add_safepoint----------------------------------
  1974 void Parse::add_safepoint() {
  1975   // See if we can avoid this safepoint.  No need for a SafePoint immediately
  1976   // after a Call (except Leaf Call) or another SafePoint.
  1977   Node *proj = control();
  1978   bool add_poll_param = SafePointNode::needs_polling_address_input();
  1979   uint parms = add_poll_param ? TypeFunc::Parms+1 : TypeFunc::Parms;
  1980   if( proj->is_Proj() ) {
  1981     Node *n0 = proj->in(0);
  1982     if( n0->is_Catch() ) {
  1983       n0 = n0->in(0)->in(0);
  1984       assert( n0->is_Call(), "expect a call here" );
  1986     if( n0->is_Call() ) {
  1987       if( n0->as_Call()->guaranteed_safepoint() )
  1988         return;
  1989     } else if( n0->is_SafePoint() && n0->req() >= parms ) {
  1990       return;
  1994   // Clear out dead values from the debug info.
  1995   kill_dead_locals();
  1997   // Clone the JVM State
  1998   SafePointNode *sfpnt = new (C, parms) SafePointNode(parms, NULL);
  2000   // Capture memory state BEFORE a SafePoint.  Since we can block at a
  2001   // SafePoint we need our GC state to be safe; i.e. we need all our current
  2002   // write barriers (card marks) to not float down after the SafePoint so we
  2003   // must read raw memory.  Likewise we need all oop stores to match the card
  2004   // marks.  If deopt can happen, we need ALL stores (we need the correct JVM
  2005   // state on a deopt).
  2007   // We do not need to WRITE the memory state after a SafePoint.  The control
  2008   // edge will keep card-marks and oop-stores from floating up from below a
  2009   // SafePoint and our true dependency added here will keep them from floating
  2010   // down below a SafePoint.
  2012   // Clone the current memory state
  2013   Node* mem = MergeMemNode::make(C, map()->memory());
  2015   mem = _gvn.transform(mem);
  2017   // Pass control through the safepoint
  2018   sfpnt->init_req(TypeFunc::Control  , control());
  2019   // Fix edges normally used by a call
  2020   sfpnt->init_req(TypeFunc::I_O      , top() );
  2021   sfpnt->init_req(TypeFunc::Memory   , mem   );
  2022   sfpnt->init_req(TypeFunc::ReturnAdr, top() );
  2023   sfpnt->init_req(TypeFunc::FramePtr , top() );
  2025   // Create a node for the polling address
  2026   if( add_poll_param ) {
  2027     Node *polladr = ConPNode::make(C, (address)os::get_polling_page());
  2028     sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
  2031   // Fix up the JVM State edges
  2032   add_safepoint_edges(sfpnt);
  2033   Node *transformed_sfpnt = _gvn.transform(sfpnt);
  2034   set_control(transformed_sfpnt);
  2036   // Provide an edge from root to safepoint.  This makes the safepoint
  2037   // appear useful until the parse has completed.
  2038   if( OptoRemoveUseless && transformed_sfpnt->is_SafePoint() ) {
  2039     assert(C->root() != NULL, "Expect parse is still valid");
  2040     C->root()->add_prec(transformed_sfpnt);
  2044 //------------------------------should_add_predicate--------------------------
  2045 bool Parse::should_add_predicate(int target_bci) {
  2046   if (!UseLoopPredicate) return false;
  2047   Block* target = successor_for_bci(target_bci);
  2048   if (target != NULL          &&
  2049       target->is_loop_head()  &&
  2050       block()->rpo() < target->rpo()) {
  2051     return true;
  2053   return false;
  2056 //------------------------------add_predicate---------------------------------
  2057 void Parse::add_predicate() {
  2058   assert(UseLoopPredicate,"use only for loop predicate");
  2059   Node *cont    = _gvn.intcon(1);
  2060   Node* opq     = _gvn.transform(new (C, 2) Opaque1Node(C, cont));
  2061   Node *bol     = _gvn.transform(new (C, 2) Conv2BNode(opq));
  2062   IfNode* iff   = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
  2063   Node* iffalse = _gvn.transform(new (C, 1) IfFalseNode(iff));
  2064   C->add_predicate_opaq(opq);
  2066     PreserveJVMState pjvms(this);
  2067     set_control(iffalse);
  2068     uncommon_trap(Deoptimization::Reason_predicate,
  2069                   Deoptimization::Action_maybe_recompile);
  2071   Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff));
  2072   set_control(iftrue);
  2075 #ifndef PRODUCT
  2076 //------------------------show_parse_info--------------------------------------
  2077 void Parse::show_parse_info() {
  2078   InlineTree* ilt = NULL;
  2079   if (C->ilt() != NULL) {
  2080     JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller();
  2081     ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method());
  2083   if (PrintCompilation && Verbose) {
  2084     if (depth() == 1) {
  2085       if( ilt->count_inlines() ) {
  2086         tty->print("    __inlined %d (%d bytes)", ilt->count_inlines(),
  2087                      ilt->count_inline_bcs());
  2088         tty->cr();
  2090     } else {
  2091       if (method()->is_synchronized())         tty->print("s");
  2092       if (method()->has_exception_handlers())  tty->print("!");
  2093       // Check this is not the final compiled version
  2094       if (C->trap_can_recompile()) {
  2095         tty->print("-");
  2096       } else {
  2097         tty->print(" ");
  2099       method()->print_short_name();
  2100       if (is_osr_parse()) {
  2101         tty->print(" @ %d", osr_bci());
  2103       tty->print(" (%d bytes)",method()->code_size());
  2104       if (ilt->count_inlines()) {
  2105         tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
  2106                    ilt->count_inline_bcs());
  2108       tty->cr();
  2111   if (PrintOpto && (depth() == 1 || PrintOptoInlining)) {
  2112     // Print that we succeeded; suppress this message on the first osr parse.
  2114     if (method()->is_synchronized())         tty->print("s");
  2115     if (method()->has_exception_handlers())  tty->print("!");
  2116     // Check this is not the final compiled version
  2117     if (C->trap_can_recompile() && depth() == 1) {
  2118       tty->print("-");
  2119     } else {
  2120       tty->print(" ");
  2122     if( depth() != 1 ) { tty->print("   "); }  // missing compile count
  2123     for (int i = 1; i < depth(); ++i) { tty->print("  "); }
  2124     method()->print_short_name();
  2125     if (is_osr_parse()) {
  2126       tty->print(" @ %d", osr_bci());
  2128     if (ilt->caller_bci() != -1) {
  2129       tty->print(" @ %d", ilt->caller_bci());
  2131     tty->print(" (%d bytes)",method()->code_size());
  2132     if (ilt->count_inlines()) {
  2133       tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
  2134                  ilt->count_inline_bcs());
  2136     tty->cr();
  2141 //------------------------------dump-------------------------------------------
  2142 // Dump information associated with the bytecodes of current _method
  2143 void Parse::dump() {
  2144   if( method() != NULL ) {
  2145     // Iterate over bytecodes
  2146     ciBytecodeStream iter(method());
  2147     for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) {
  2148       dump_bci( iter.cur_bci() );
  2149       tty->cr();
  2154 // Dump information associated with a byte code index, 'bci'
  2155 void Parse::dump_bci(int bci) {
  2156   // Output info on merge-points, cloning, and within _jsr..._ret
  2157   // NYI
  2158   tty->print(" bci:%d", bci);
  2161 #endif

mercurial