src/share/vm/opto/graphKit.cpp

Wed, 24 Apr 2013 14:48:43 -0700

author
johnc
date
Wed, 24 Apr 2013 14:48:43 -0700
changeset 5017
d50cc62e94ff
parent 4868
30f42e691e70
child 5110
6f3fd5150b67
permissions
-rw-r--r--

8012715: G1: GraphKit accesses PtrQueue::_index as int but is size_t
Summary: In graphKit INT operations were generated to access PtrQueue::_index which has type size_t. This is 64 bit on 64-bit machines. No problems occur on little endian machines as long as the index fits into 32 bit, but on big endian machines the upper part is read, which is zero. This leads to unnecessary branches to the slow path in the runtime.
Reviewed-by: twisti, johnc
Contributed-by: Martin Doerr <martin.doerr@sap.com>

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "compiler/compileLog.hpp"
    27 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    28 #include "gc_implementation/g1/heapRegion.hpp"
    29 #include "gc_interface/collectedHeap.hpp"
    30 #include "memory/barrierSet.hpp"
    31 #include "memory/cardTableModRefBS.hpp"
    32 #include "opto/addnode.hpp"
    33 #include "opto/graphKit.hpp"
    34 #include "opto/idealKit.hpp"
    35 #include "opto/locknode.hpp"
    36 #include "opto/machnode.hpp"
    37 #include "opto/parse.hpp"
    38 #include "opto/rootnode.hpp"
    39 #include "opto/runtime.hpp"
    40 #include "runtime/deoptimization.hpp"
    41 #include "runtime/sharedRuntime.hpp"
    43 //----------------------------GraphKit-----------------------------------------
    44 // Main utility constructor.
    45 GraphKit::GraphKit(JVMState* jvms)
    46   : Phase(Phase::Parser),
    47     _env(C->env()),
    48     _gvn(*C->initial_gvn())
    49 {
    50   _exceptions = jvms->map()->next_exception();
    51   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
    52   set_jvms(jvms);
    53 }
    55 // Private constructor for parser.
    56 GraphKit::GraphKit()
    57   : Phase(Phase::Parser),
    58     _env(C->env()),
    59     _gvn(*C->initial_gvn())
    60 {
    61   _exceptions = NULL;
    62   set_map(NULL);
    63   debug_only(_sp = -99);
    64   debug_only(set_bci(-99));
    65 }
    69 //---------------------------clean_stack---------------------------------------
    70 // Clear away rubbish from the stack area of the JVM state.
    71 // This destroys any arguments that may be waiting on the stack.
    72 void GraphKit::clean_stack(int from_sp) {
    73   SafePointNode* map      = this->map();
    74   JVMState*      jvms     = this->jvms();
    75   int            stk_size = jvms->stk_size();
    76   int            stkoff   = jvms->stkoff();
    77   Node*          top      = this->top();
    78   for (int i = from_sp; i < stk_size; i++) {
    79     if (map->in(stkoff + i) != top) {
    80       map->set_req(stkoff + i, top);
    81     }
    82   }
    83 }
    86 //--------------------------------sync_jvms-----------------------------------
    87 // Make sure our current jvms agrees with our parse state.
    88 JVMState* GraphKit::sync_jvms() const {
    89   JVMState* jvms = this->jvms();
    90   jvms->set_bci(bci());       // Record the new bci in the JVMState
    91   jvms->set_sp(sp());         // Record the new sp in the JVMState
    92   assert(jvms_in_sync(), "jvms is now in sync");
    93   return jvms;
    94 }
    96 //--------------------------------sync_jvms_for_reexecute---------------------
    97 // Make sure our current jvms agrees with our parse state.  This version
    98 // uses the reexecute_sp for reexecuting bytecodes.
    99 JVMState* GraphKit::sync_jvms_for_reexecute() {
   100   JVMState* jvms = this->jvms();
   101   jvms->set_bci(bci());          // Record the new bci in the JVMState
   102   jvms->set_sp(reexecute_sp());  // Record the new sp in the JVMState
   103   return jvms;
   104 }
   106 #ifdef ASSERT
   107 bool GraphKit::jvms_in_sync() const {
   108   Parse* parse = is_Parse();
   109   if (parse == NULL) {
   110     if (bci() !=      jvms()->bci())          return false;
   111     if (sp()  != (int)jvms()->sp())           return false;
   112     return true;
   113   }
   114   if (jvms()->method() != parse->method())    return false;
   115   if (jvms()->bci()    != parse->bci())       return false;
   116   int jvms_sp = jvms()->sp();
   117   if (jvms_sp          != parse->sp())        return false;
   118   int jvms_depth = jvms()->depth();
   119   if (jvms_depth       != parse->depth())     return false;
   120   return true;
   121 }
   123 // Local helper checks for special internal merge points
   124 // used to accumulate and merge exception states.
   125 // They are marked by the region's in(0) edge being the map itself.
   126 // Such merge points must never "escape" into the parser at large,
   127 // until they have been handed to gvn.transform.
   128 static bool is_hidden_merge(Node* reg) {
   129   if (reg == NULL)  return false;
   130   if (reg->is_Phi()) {
   131     reg = reg->in(0);
   132     if (reg == NULL)  return false;
   133   }
   134   return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root();
   135 }
   137 void GraphKit::verify_map() const {
   138   if (map() == NULL)  return;  // null map is OK
   139   assert(map()->req() <= jvms()->endoff(), "no extra garbage on map");
   140   assert(!map()->has_exceptions(),    "call add_exception_states_from 1st");
   141   assert(!is_hidden_merge(control()), "call use_exception_state, not set_map");
   142 }
   144 void GraphKit::verify_exception_state(SafePointNode* ex_map) {
   145   assert(ex_map->next_exception() == NULL, "not already part of a chain");
   146   assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop");
   147 }
   148 #endif
   150 //---------------------------stop_and_kill_map---------------------------------
   151 // Set _map to NULL, signalling a stop to further bytecode execution.
   152 // First smash the current map's control to a constant, to mark it dead.
   153 void GraphKit::stop_and_kill_map() {
   154   SafePointNode* dead_map = stop();
   155   if (dead_map != NULL) {
   156     dead_map->disconnect_inputs(NULL, C); // Mark the map as killed.
   157     assert(dead_map->is_killed(), "must be so marked");
   158   }
   159 }
   162 //--------------------------------stopped--------------------------------------
   163 // Tell if _map is NULL, or control is top.
   164 bool GraphKit::stopped() {
   165   if (map() == NULL)           return true;
   166   else if (control() == top()) return true;
   167   else                         return false;
   168 }
   171 //-----------------------------has_ex_handler----------------------------------
   172 // Tell if this method or any caller method has exception handlers.
   173 bool GraphKit::has_ex_handler() {
   174   for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) {
   175     if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) {
   176       return true;
   177     }
   178   }
   179   return false;
   180 }
   182 //------------------------------save_ex_oop------------------------------------
   183 // Save an exception without blowing stack contents or other JVM state.
   184 void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) {
   185   assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again");
   186   ex_map->add_req(ex_oop);
   187   debug_only(verify_exception_state(ex_map));
   188 }
   190 inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) {
   191   assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there");
   192   Node* ex_oop = ex_map->in(ex_map->req()-1);
   193   if (clear_it)  ex_map->del_req(ex_map->req()-1);
   194   return ex_oop;
   195 }
   197 //-----------------------------saved_ex_oop------------------------------------
   198 // Recover a saved exception from its map.
   199 Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) {
   200   return common_saved_ex_oop(ex_map, false);
   201 }
   203 //--------------------------clear_saved_ex_oop---------------------------------
   204 // Erase a previously saved exception from its map.
   205 Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) {
   206   return common_saved_ex_oop(ex_map, true);
   207 }
   209 #ifdef ASSERT
   210 //---------------------------has_saved_ex_oop----------------------------------
   211 // Erase a previously saved exception from its map.
   212 bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) {
   213   return ex_map->req() == ex_map->jvms()->endoff()+1;
   214 }
   215 #endif
   217 //-------------------------make_exception_state--------------------------------
   218 // Turn the current JVM state into an exception state, appending the ex_oop.
   219 SafePointNode* GraphKit::make_exception_state(Node* ex_oop) {
   220   sync_jvms();
   221   SafePointNode* ex_map = stop();  // do not manipulate this map any more
   222   set_saved_ex_oop(ex_map, ex_oop);
   223   return ex_map;
   224 }
   227 //--------------------------add_exception_state--------------------------------
   228 // Add an exception to my list of exceptions.
   229 void GraphKit::add_exception_state(SafePointNode* ex_map) {
   230   if (ex_map == NULL || ex_map->control() == top()) {
   231     return;
   232   }
   233 #ifdef ASSERT
   234   verify_exception_state(ex_map);
   235   if (has_exceptions()) {
   236     assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place");
   237   }
   238 #endif
   240   // If there is already an exception of exactly this type, merge with it.
   241   // In particular, null-checks and other low-level exceptions common up here.
   242   Node*       ex_oop  = saved_ex_oop(ex_map);
   243   const Type* ex_type = _gvn.type(ex_oop);
   244   if (ex_oop == top()) {
   245     // No action needed.
   246     return;
   247   }
   248   assert(ex_type->isa_instptr(), "exception must be an instance");
   249   for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) {
   250     const Type* ex_type2 = _gvn.type(saved_ex_oop(e2));
   251     // We check sp also because call bytecodes can generate exceptions
   252     // both before and after arguments are popped!
   253     if (ex_type2 == ex_type
   254         && e2->_jvms->sp() == ex_map->_jvms->sp()) {
   255       combine_exception_states(ex_map, e2);
   256       return;
   257     }
   258   }
   260   // No pre-existing exception of the same type.  Chain it on the list.
   261   push_exception_state(ex_map);
   262 }
   264 //-----------------------add_exception_states_from-----------------------------
   265 void GraphKit::add_exception_states_from(JVMState* jvms) {
   266   SafePointNode* ex_map = jvms->map()->next_exception();
   267   if (ex_map != NULL) {
   268     jvms->map()->set_next_exception(NULL);
   269     for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) {
   270       next_map = ex_map->next_exception();
   271       ex_map->set_next_exception(NULL);
   272       add_exception_state(ex_map);
   273     }
   274   }
   275 }
   277 //-----------------------transfer_exceptions_into_jvms-------------------------
   278 JVMState* GraphKit::transfer_exceptions_into_jvms() {
   279   if (map() == NULL) {
   280     // We need a JVMS to carry the exceptions, but the map has gone away.
   281     // Create a scratch JVMS, cloned from any of the exception states...
   282     if (has_exceptions()) {
   283       _map = _exceptions;
   284       _map = clone_map();
   285       _map->set_next_exception(NULL);
   286       clear_saved_ex_oop(_map);
   287       debug_only(verify_map());
   288     } else {
   289       // ...or created from scratch
   290       JVMState* jvms = new (C) JVMState(_method, NULL);
   291       jvms->set_bci(_bci);
   292       jvms->set_sp(_sp);
   293       jvms->set_map(new (C) SafePointNode(TypeFunc::Parms, jvms));
   294       set_jvms(jvms);
   295       for (uint i = 0; i < map()->req(); i++)  map()->init_req(i, top());
   296       set_all_memory(top());
   297       while (map()->req() < jvms->endoff())  map()->add_req(top());
   298     }
   299     // (This is a kludge, in case you didn't notice.)
   300     set_control(top());
   301   }
   302   JVMState* jvms = sync_jvms();
   303   assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet");
   304   jvms->map()->set_next_exception(_exceptions);
   305   _exceptions = NULL;   // done with this set of exceptions
   306   return jvms;
   307 }
   309 static inline void add_n_reqs(Node* dstphi, Node* srcphi) {
   310   assert(is_hidden_merge(dstphi), "must be a special merge node");
   311   assert(is_hidden_merge(srcphi), "must be a special merge node");
   312   uint limit = srcphi->req();
   313   for (uint i = PhiNode::Input; i < limit; i++) {
   314     dstphi->add_req(srcphi->in(i));
   315   }
   316 }
   317 static inline void add_one_req(Node* dstphi, Node* src) {
   318   assert(is_hidden_merge(dstphi), "must be a special merge node");
   319   assert(!is_hidden_merge(src), "must not be a special merge node");
   320   dstphi->add_req(src);
   321 }
   323 //-----------------------combine_exception_states------------------------------
   324 // This helper function combines exception states by building phis on a
   325 // specially marked state-merging region.  These regions and phis are
   326 // untransformed, and can build up gradually.  The region is marked by
   327 // having a control input of its exception map, rather than NULL.  Such
   328 // regions do not appear except in this function, and in use_exception_state.
   329 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {
   330   if (failing())  return;  // dying anyway...
   331   JVMState* ex_jvms = ex_map->_jvms;
   332   assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
   333   assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
   334   assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
   335   assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
   336   assert(ex_map->req() == phi_map->req(), "matching maps");
   337   uint tos = ex_jvms->stkoff() + ex_jvms->sp();
   338   Node*         hidden_merge_mark = root();
   339   Node*         region  = phi_map->control();
   340   MergeMemNode* phi_mem = phi_map->merged_memory();
   341   MergeMemNode* ex_mem  = ex_map->merged_memory();
   342   if (region->in(0) != hidden_merge_mark) {
   343     // The control input is not (yet) a specially-marked region in phi_map.
   344     // Make it so, and build some phis.
   345     region = new (C) RegionNode(2);
   346     _gvn.set_type(region, Type::CONTROL);
   347     region->set_req(0, hidden_merge_mark);  // marks an internal ex-state
   348     region->init_req(1, phi_map->control());
   349     phi_map->set_control(region);
   350     Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
   351     record_for_igvn(io_phi);
   352     _gvn.set_type(io_phi, Type::ABIO);
   353     phi_map->set_i_o(io_phi);
   354     for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {
   355       Node* m = mms.memory();
   356       Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C));
   357       record_for_igvn(m_phi);
   358       _gvn.set_type(m_phi, Type::MEMORY);
   359       mms.set_memory(m_phi);
   360     }
   361   }
   363   // Either or both of phi_map and ex_map might already be converted into phis.
   364   Node* ex_control = ex_map->control();
   365   // if there is special marking on ex_map also, we add multiple edges from src
   366   bool add_multiple = (ex_control->in(0) == hidden_merge_mark);
   367   // how wide was the destination phi_map, originally?
   368   uint orig_width = region->req();
   370   if (add_multiple) {
   371     add_n_reqs(region, ex_control);
   372     add_n_reqs(phi_map->i_o(), ex_map->i_o());
   373   } else {
   374     // ex_map has no merges, so we just add single edges everywhere
   375     add_one_req(region, ex_control);
   376     add_one_req(phi_map->i_o(), ex_map->i_o());
   377   }
   378   for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) {
   379     if (mms.is_empty()) {
   380       // get a copy of the base memory, and patch some inputs into it
   381       const TypePtr* adr_type = mms.adr_type(C);
   382       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
   383       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
   384       mms.set_memory(phi);
   385       // Prepare to append interesting stuff onto the newly sliced phi:
   386       while (phi->req() > orig_width)  phi->del_req(phi->req()-1);
   387     }
   388     // Append stuff from ex_map:
   389     if (add_multiple) {
   390       add_n_reqs(mms.memory(), mms.memory2());
   391     } else {
   392       add_one_req(mms.memory(), mms.memory2());
   393     }
   394   }
   395   uint limit = ex_map->req();
   396   for (uint i = TypeFunc::Parms; i < limit; i++) {
   397     // Skip everything in the JVMS after tos.  (The ex_oop follows.)
   398     if (i == tos)  i = ex_jvms->monoff();
   399     Node* src = ex_map->in(i);
   400     Node* dst = phi_map->in(i);
   401     if (src != dst) {
   402       PhiNode* phi;
   403       if (dst->in(0) != region) {
   404         dst = phi = PhiNode::make(region, dst, _gvn.type(dst));
   405         record_for_igvn(phi);
   406         _gvn.set_type(phi, phi->type());
   407         phi_map->set_req(i, dst);
   408         // Prepare to append interesting stuff onto the new phi:
   409         while (dst->req() > orig_width)  dst->del_req(dst->req()-1);
   410       } else {
   411         assert(dst->is_Phi(), "nobody else uses a hidden region");
   412         phi = (PhiNode*)dst;
   413       }
   414       if (add_multiple && src->in(0) == ex_control) {
   415         // Both are phis.
   416         add_n_reqs(dst, src);
   417       } else {
   418         while (dst->req() < region->req())  add_one_req(dst, src);
   419       }
   420       const Type* srctype = _gvn.type(src);
   421       if (phi->type() != srctype) {
   422         const Type* dsttype = phi->type()->meet(srctype);
   423         if (phi->type() != dsttype) {
   424           phi->set_type(dsttype);
   425           _gvn.set_type(phi, dsttype);
   426         }
   427       }
   428     }
   429   }
   430 }
   432 //--------------------------use_exception_state--------------------------------
   433 Node* GraphKit::use_exception_state(SafePointNode* phi_map) {
   434   if (failing()) { stop(); return top(); }
   435   Node* region = phi_map->control();
   436   Node* hidden_merge_mark = root();
   437   assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation");
   438   Node* ex_oop = clear_saved_ex_oop(phi_map);
   439   if (region->in(0) == hidden_merge_mark) {
   440     // Special marking for internal ex-states.  Process the phis now.
   441     region->set_req(0, region);  // now it's an ordinary region
   442     set_jvms(phi_map->jvms());   // ...so now we can use it as a map
   443     // Note: Setting the jvms also sets the bci and sp.
   444     set_control(_gvn.transform(region));
   445     uint tos = jvms()->stkoff() + sp();
   446     for (uint i = 1; i < tos; i++) {
   447       Node* x = phi_map->in(i);
   448       if (x->in(0) == region) {
   449         assert(x->is_Phi(), "expected a special phi");
   450         phi_map->set_req(i, _gvn.transform(x));
   451       }
   452     }
   453     for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
   454       Node* x = mms.memory();
   455       if (x->in(0) == region) {
   456         assert(x->is_Phi(), "nobody else uses a hidden region");
   457         mms.set_memory(_gvn.transform(x));
   458       }
   459     }
   460     if (ex_oop->in(0) == region) {
   461       assert(ex_oop->is_Phi(), "expected a special phi");
   462       ex_oop = _gvn.transform(ex_oop);
   463     }
   464   } else {
   465     set_jvms(phi_map->jvms());
   466   }
   468   assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared");
   469   assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared");
   470   return ex_oop;
   471 }
   473 //---------------------------------java_bc-------------------------------------
   474 Bytecodes::Code GraphKit::java_bc() const {
   475   ciMethod* method = this->method();
   476   int       bci    = this->bci();
   477   if (method != NULL && bci != InvocationEntryBci)
   478     return method->java_code_at_bci(bci);
   479   else
   480     return Bytecodes::_illegal;
   481 }
   483 void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
   484                                                           bool must_throw) {
   485     // if the exception capability is set, then we will generate code
   486     // to check the JavaThread.should_post_on_exceptions flag to see
   487     // if we actually need to report exception events (for this
   488     // thread).  If we don't need to report exception events, we will
   489     // take the normal fast path provided by add_exception_events.  If
   490     // exception event reporting is enabled for this thread, we will
   491     // take the uncommon_trap in the BuildCutout below.
   493     // first must access the should_post_on_exceptions_flag in this thread's JavaThread
   494     Node* jthread = _gvn.transform(new (C) ThreadLocalNode());
   495     Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
   496     Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false);
   498     // Test the should_post_on_exceptions_flag vs. 0
   499     Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) );
   500     Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
   502     // Branch to slow_path if should_post_on_exceptions_flag was true
   503     { BuildCutout unless(this, tst, PROB_MAX);
   504       // Do not try anything fancy if we're notifying the VM on every throw.
   505       // Cf. case Bytecodes::_athrow in parse2.cpp.
   506       uncommon_trap(reason, Deoptimization::Action_none,
   507                     (ciKlass*)NULL, (char*)NULL, must_throw);
   508     }
   510 }
   512 //------------------------------builtin_throw----------------------------------
   513 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
   514   bool must_throw = true;
   516   if (env()->jvmti_can_post_on_exceptions()) {
   517     // check if we must post exception events, take uncommon trap if so
   518     uncommon_trap_if_should_post_on_exceptions(reason, must_throw);
   519     // here if should_post_on_exceptions is false
   520     // continue on with the normal codegen
   521   }
   523   // If this particular condition has not yet happened at this
   524   // bytecode, then use the uncommon trap mechanism, and allow for
   525   // a future recompilation if several traps occur here.
   526   // If the throw is hot, try to use a more complicated inline mechanism
   527   // which keeps execution inside the compiled code.
   528   bool treat_throw_as_hot = false;
   529   ciMethodData* md = method()->method_data();
   531   if (ProfileTraps) {
   532     if (too_many_traps(reason)) {
   533       treat_throw_as_hot = true;
   534     }
   535     // (If there is no MDO at all, assume it is early in
   536     // execution, and that any deopts are part of the
   537     // startup transient, and don't need to be remembered.)
   539     // Also, if there is a local exception handler, treat all throws
   540     // as hot if there has been at least one in this method.
   541     if (C->trap_count(reason) != 0
   542         && method()->method_data()->trap_count(reason) != 0
   543         && has_ex_handler()) {
   544         treat_throw_as_hot = true;
   545     }
   546   }
   548   // If this throw happens frequently, an uncommon trap might cause
   549   // a performance pothole.  If there is a local exception handler,
   550   // and if this particular bytecode appears to be deoptimizing often,
   551   // let us handle the throw inline, with a preconstructed instance.
   552   // Note:   If the deopt count has blown up, the uncommon trap
   553   // runtime is going to flush this nmethod, not matter what.
   554   if (treat_throw_as_hot
   555       && (!StackTraceInThrowable || OmitStackTraceInFastThrow)) {
   556     // If the throw is local, we use a pre-existing instance and
   557     // punt on the backtrace.  This would lead to a missing backtrace
   558     // (a repeat of 4292742) if the backtrace object is ever asked
   559     // for its backtrace.
   560     // Fixing this remaining case of 4292742 requires some flavor of
   561     // escape analysis.  Leave that for the future.
   562     ciInstance* ex_obj = NULL;
   563     switch (reason) {
   564     case Deoptimization::Reason_null_check:
   565       ex_obj = env()->NullPointerException_instance();
   566       break;
   567     case Deoptimization::Reason_div0_check:
   568       ex_obj = env()->ArithmeticException_instance();
   569       break;
   570     case Deoptimization::Reason_range_check:
   571       ex_obj = env()->ArrayIndexOutOfBoundsException_instance();
   572       break;
   573     case Deoptimization::Reason_class_check:
   574       if (java_bc() == Bytecodes::_aastore) {
   575         ex_obj = env()->ArrayStoreException_instance();
   576       } else {
   577         ex_obj = env()->ClassCastException_instance();
   578       }
   579       break;
   580     }
   581     if (failing()) { stop(); return; }  // exception allocation might fail
   582     if (ex_obj != NULL) {
   583       // Cheat with a preallocated exception object.
   584       if (C->log() != NULL)
   585         C->log()->elem("hot_throw preallocated='1' reason='%s'",
   586                        Deoptimization::trap_reason_name(reason));
   587       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
   588       Node*              ex_node = _gvn.transform( ConNode::make(C, ex_con) );
   590       // Clear the detail message of the preallocated exception object.
   591       // Weblogic sometimes mutates the detail message of exceptions
   592       // using reflection.
   593       int offset = java_lang_Throwable::get_detailMessage_offset();
   594       const TypePtr* adr_typ = ex_con->add_offset(offset);
   596       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
   597       const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
   598       Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT);
   600       add_exception_state(make_exception_state(ex_node));
   601       return;
   602     }
   603   }
   605   // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
   606   // It won't be much cheaper than bailing to the interp., since we'll
   607   // have to pass up all the debug-info, and the runtime will have to
   608   // create the stack trace.
   610   // Usual case:  Bail to interpreter.
   611   // Reserve the right to recompile if we haven't seen anything yet.
   613   Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
   614   if (treat_throw_as_hot
   615       && (method()->method_data()->trap_recompiled_at(bci())
   616           || C->too_many_traps(reason))) {
   617     // We cannot afford to take more traps here.  Suffer in the interpreter.
   618     if (C->log() != NULL)
   619       C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",
   620                      Deoptimization::trap_reason_name(reason),
   621                      C->trap_count(reason));
   622     action = Deoptimization::Action_none;
   623   }
   625   // "must_throw" prunes the JVM state to include only the stack, if there
   626   // are no local exception handlers.  This should cut down on register
   627   // allocation time and code size, by drastically reducing the number
   628   // of in-edges on the call to the uncommon trap.
   630   uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw);
   631 }
   634 //----------------------------PreserveJVMState---------------------------------
   635 PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
   636   debug_only(kit->verify_map());
   637   _kit    = kit;
   638   _map    = kit->map();   // preserve the map
   639   _sp     = kit->sp();
   640   kit->set_map(clone_map ? kit->clone_map() : NULL);
   641 #ifdef ASSERT
   642   _bci    = kit->bci();
   643   Parse* parser = kit->is_Parse();
   644   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
   645   _block  = block;
   646 #endif
   647 }
   648 PreserveJVMState::~PreserveJVMState() {
   649   GraphKit* kit = _kit;
   650 #ifdef ASSERT
   651   assert(kit->bci() == _bci, "bci must not shift");
   652   Parse* parser = kit->is_Parse();
   653   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
   654   assert(block == _block,    "block must not shift");
   655 #endif
   656   kit->set_map(_map);
   657   kit->set_sp(_sp);
   658 }
   661 //-----------------------------BuildCutout-------------------------------------
   662 BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt)
   663   : PreserveJVMState(kit)
   664 {
   665   assert(p->is_Con() || p->is_Bool(), "test must be a bool");
   666   SafePointNode* outer_map = _map;   // preserved map is caller's
   667   SafePointNode* inner_map = kit->map();
   668   IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt);
   669   outer_map->set_control(kit->gvn().transform( new (kit->C) IfTrueNode(iff) ));
   670   inner_map->set_control(kit->gvn().transform( new (kit->C) IfFalseNode(iff) ));
   671 }
   672 BuildCutout::~BuildCutout() {
   673   GraphKit* kit = _kit;
   674   assert(kit->stopped(), "cutout code must stop, throw, return, etc.");
   675 }
   677 //---------------------------PreserveReexecuteState----------------------------
   678 PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) {
   679   assert(!kit->stopped(), "must call stopped() before");
   680   _kit    =    kit;
   681   _sp     =    kit->sp();
   682   _reexecute = kit->jvms()->_reexecute;
   683 }
   684 PreserveReexecuteState::~PreserveReexecuteState() {
   685   if (_kit->stopped()) return;
   686   _kit->jvms()->_reexecute = _reexecute;
   687   _kit->set_sp(_sp);
   688 }
   690 //------------------------------clone_map--------------------------------------
   691 // Implementation of PreserveJVMState
   692 //
   693 // Only clone_map(...) here. If this function is only used in the
   694 // PreserveJVMState class we may want to get rid of this extra
   695 // function eventually and do it all there.
   697 SafePointNode* GraphKit::clone_map() {
   698   if (map() == NULL)  return NULL;
   700   // Clone the memory edge first
   701   Node* mem = MergeMemNode::make(C, map()->memory());
   702   gvn().set_type_bottom(mem);
   704   SafePointNode *clonemap = (SafePointNode*)map()->clone();
   705   JVMState* jvms = this->jvms();
   706   JVMState* clonejvms = jvms->clone_shallow(C);
   707   clonemap->set_memory(mem);
   708   clonemap->set_jvms(clonejvms);
   709   clonejvms->set_map(clonemap);
   710   record_for_igvn(clonemap);
   711   gvn().set_type_bottom(clonemap);
   712   return clonemap;
   713 }
   716 //-----------------------------set_map_clone-----------------------------------
   717 void GraphKit::set_map_clone(SafePointNode* m) {
   718   _map = m;
   719   _map = clone_map();
   720   _map->set_next_exception(NULL);
   721   debug_only(verify_map());
   722 }
   725 //----------------------------kill_dead_locals---------------------------------
   726 // Detect any locals which are known to be dead, and force them to top.
   727 void GraphKit::kill_dead_locals() {
   728   // Consult the liveness information for the locals.  If any
   729   // of them are unused, then they can be replaced by top().  This
   730   // should help register allocation time and cut down on the size
   731   // of the deoptimization information.
   733   // This call is made from many of the bytecode handling
   734   // subroutines called from the Big Switch in do_one_bytecode.
   735   // Every bytecode which might include a slow path is responsible
   736   // for killing its dead locals.  The more consistent we
   737   // are about killing deads, the fewer useless phis will be
   738   // constructed for them at various merge points.
   740   // bci can be -1 (InvocationEntryBci).  We return the entry
   741   // liveness for the method.
   743   if (method() == NULL || method()->code_size() == 0) {
   744     // We are building a graph for a call to a native method.
   745     // All locals are live.
   746     return;
   747   }
   749   ResourceMark rm;
   751   // Consult the liveness information for the locals.  If any
   752   // of them are unused, then they can be replaced by top().  This
   753   // should help register allocation time and cut down on the size
   754   // of the deoptimization information.
   755   MethodLivenessResult live_locals = method()->liveness_at_bci(bci());
   757   int len = (int)live_locals.size();
   758   assert(len <= jvms()->loc_size(), "too many live locals");
   759   for (int local = 0; local < len; local++) {
   760     if (!live_locals.at(local)) {
   761       set_local(local, top());
   762     }
   763   }
   764 }
   766 #ifdef ASSERT
   767 //-------------------------dead_locals_are_killed------------------------------
   768 // Return true if all dead locals are set to top in the map.
   769 // Used to assert "clean" debug info at various points.
   770 bool GraphKit::dead_locals_are_killed() {
   771   if (method() == NULL || method()->code_size() == 0) {
   772     // No locals need to be dead, so all is as it should be.
   773     return true;
   774   }
   776   // Make sure somebody called kill_dead_locals upstream.
   777   ResourceMark rm;
   778   for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
   779     if (jvms->loc_size() == 0)  continue;  // no locals to consult
   780     SafePointNode* map = jvms->map();
   781     ciMethod* method = jvms->method();
   782     int       bci    = jvms->bci();
   783     if (jvms == this->jvms()) {
   784       bci = this->bci();  // it might not yet be synched
   785     }
   786     MethodLivenessResult live_locals = method->liveness_at_bci(bci);
   787     int len = (int)live_locals.size();
   788     if (!live_locals.is_valid() || len == 0)
   789       // This method is trivial, or is poisoned by a breakpoint.
   790       return true;
   791     assert(len == jvms->loc_size(), "live map consistent with locals map");
   792     for (int local = 0; local < len; local++) {
   793       if (!live_locals.at(local) && map->local(jvms, local) != top()) {
   794         if (PrintMiscellaneous && (Verbose || WizardMode)) {
   795           tty->print_cr("Zombie local %d: ", local);
   796           jvms->dump();
   797         }
   798         return false;
   799       }
   800     }
   801   }
   802   return true;
   803 }
   805 #endif //ASSERT
   807 // Helper function for enforcing certain bytecodes to reexecute if
   808 // deoptimization happens
   809 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
   810   ciMethod* cur_method = jvms->method();
   811   int       cur_bci   = jvms->bci();
   812   if (cur_method != NULL && cur_bci != InvocationEntryBci) {
   813     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
   814     return Interpreter::bytecode_should_reexecute(code) ||
   815            is_anewarray && code == Bytecodes::_multianewarray;
   816     // Reexecute _multianewarray bytecode which was replaced with
   817     // sequence of [a]newarray. See Parse::do_multianewarray().
   818     //
   819     // Note: interpreter should not have it set since this optimization
   820     // is limited by dimensions and guarded by flag so in some cases
   821     // multianewarray() runtime calls will be generated and
   822     // the bytecode should not be reexecutes (stack will not be reset).
   823   } else
   824     return false;
   825 }
   827 // Helper function for adding JVMState and debug information to node
   828 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
   829   // Add the safepoint edges to the call (or other safepoint).
   831   // Make sure dead locals are set to top.  This
   832   // should help register allocation time and cut down on the size
   833   // of the deoptimization information.
   834   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
   836   // Walk the inline list to fill in the correct set of JVMState's
   837   // Also fill in the associated edges for each JVMState.
   839   // If the bytecode needs to be reexecuted we need to put
   840   // the arguments back on the stack.
   841   const bool should_reexecute = jvms()->should_reexecute();
   842   JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();
   844   // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
   845   // undefined if the bci is different.  This is normal for Parse but it
   846   // should not happen for LibraryCallKit because only one bci is processed.
   847   assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),
   848          "in LibraryCallKit the reexecute bit should not change");
   850   // If we are guaranteed to throw, we can prune everything but the
   851   // input to the current bytecode.
   852   bool can_prune_locals = false;
   853   uint stack_slots_not_pruned = 0;
   854   int inputs = 0, depth = 0;
   855   if (must_throw) {
   856     assert(method() == youngest_jvms->method(), "sanity");
   857     if (compute_stack_effects(inputs, depth)) {
   858       can_prune_locals = true;
   859       stack_slots_not_pruned = inputs;
   860     }
   861   }
   863   if (env()->jvmti_can_access_local_variables()) {
   864     // At any safepoint, this method can get breakpointed, which would
   865     // then require an immediate deoptimization.
   866     can_prune_locals = false;  // do not prune locals
   867     stack_slots_not_pruned = 0;
   868   }
   870   // do not scribble on the input jvms
   871   JVMState* out_jvms = youngest_jvms->clone_deep(C);
   872   call->set_jvms(out_jvms); // Start jvms list for call node
   874   // For a known set of bytecodes, the interpreter should reexecute them if
   875   // deoptimization happens. We set the reexecute state for them here
   876   if (out_jvms->is_reexecute_undefined() && //don't change if already specified
   877       should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
   878     out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
   879   }
   881   // Presize the call:
   882   DEBUG_ONLY(uint non_debug_edges = call->req());
   883   call->add_req_batch(top(), youngest_jvms->debug_depth());
   884   assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
   886   // Set up edges so that the call looks like this:
   887   //  Call [state:] ctl io mem fptr retadr
   888   //       [parms:] parm0 ... parmN
   889   //       [root:]  loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
   890   //    [...mid:]   loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
   891   //       [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
   892   // Note that caller debug info precedes callee debug info.
   894   // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
   895   uint debug_ptr = call->req();
   897   // Loop over the map input edges associated with jvms, add them
   898   // to the call node, & reset all offsets to match call node array.
   899   for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {
   900     uint debug_end   = debug_ptr;
   901     uint debug_start = debug_ptr - in_jvms->debug_size();
   902     debug_ptr = debug_start;  // back up the ptr
   904     uint p = debug_start;  // walks forward in [debug_start, debug_end)
   905     uint j, k, l;
   906     SafePointNode* in_map = in_jvms->map();
   907     out_jvms->set_map(call);
   909     if (can_prune_locals) {
   910       assert(in_jvms->method() == out_jvms->method(), "sanity");
   911       // If the current throw can reach an exception handler in this JVMS,
   912       // then we must keep everything live that can reach that handler.
   913       // As a quick and dirty approximation, we look for any handlers at all.
   914       if (in_jvms->method()->has_exception_handlers()) {
   915         can_prune_locals = false;
   916       }
   917     }
   919     // Add the Locals
   920     k = in_jvms->locoff();
   921     l = in_jvms->loc_size();
   922     out_jvms->set_locoff(p);
   923     if (!can_prune_locals) {
   924       for (j = 0; j < l; j++)
   925         call->set_req(p++, in_map->in(k+j));
   926     } else {
   927       p += l;  // already set to top above by add_req_batch
   928     }
   930     // Add the Expression Stack
   931     k = in_jvms->stkoff();
   932     l = in_jvms->sp();
   933     out_jvms->set_stkoff(p);
   934     if (!can_prune_locals) {
   935       for (j = 0; j < l; j++)
   936         call->set_req(p++, in_map->in(k+j));
   937     } else if (can_prune_locals && stack_slots_not_pruned != 0) {
   938       // Divide stack into {S0,...,S1}, where S0 is set to top.
   939       uint s1 = stack_slots_not_pruned;
   940       stack_slots_not_pruned = 0;  // for next iteration
   941       if (s1 > l)  s1 = l;
   942       uint s0 = l - s1;
   943       p += s0;  // skip the tops preinstalled by add_req_batch
   944       for (j = s0; j < l; j++)
   945         call->set_req(p++, in_map->in(k+j));
   946     } else {
   947       p += l;  // already set to top above by add_req_batch
   948     }
   950     // Add the Monitors
   951     k = in_jvms->monoff();
   952     l = in_jvms->mon_size();
   953     out_jvms->set_monoff(p);
   954     for (j = 0; j < l; j++)
   955       call->set_req(p++, in_map->in(k+j));
   957     // Copy any scalar object fields.
   958     k = in_jvms->scloff();
   959     l = in_jvms->scl_size();
   960     out_jvms->set_scloff(p);
   961     for (j = 0; j < l; j++)
   962       call->set_req(p++, in_map->in(k+j));
   964     // Finish the new jvms.
   965     out_jvms->set_endoff(p);
   967     assert(out_jvms->endoff()     == debug_end,             "fill ptr must match");
   968     assert(out_jvms->depth()      == in_jvms->depth(),      "depth must match");
   969     assert(out_jvms->loc_size()   == in_jvms->loc_size(),   "size must match");
   970     assert(out_jvms->mon_size()   == in_jvms->mon_size(),   "size must match");
   971     assert(out_jvms->scl_size()   == in_jvms->scl_size(),   "size must match");
   972     assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
   974     // Update the two tail pointers in parallel.
   975     out_jvms = out_jvms->caller();
   976     in_jvms  = in_jvms->caller();
   977   }
   979   assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
   981   // Test the correctness of JVMState::debug_xxx accessors:
   982   assert(call->jvms()->debug_start() == non_debug_edges, "");
   983   assert(call->jvms()->debug_end()   == call->req(), "");
   984   assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
   985 }
   987 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
   988   Bytecodes::Code code = java_bc();
   989   if (code == Bytecodes::_wide) {
   990     code = method()->java_code_at_bci(bci() + 1);
   991   }
   993   BasicType rtype = T_ILLEGAL;
   994   int       rsize = 0;
   996   if (code != Bytecodes::_illegal) {
   997     depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
   998     rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
   999     if (rtype < T_CONFLICT)
  1000       rsize = type2size[rtype];
  1003   switch (code) {
  1004   case Bytecodes::_illegal:
  1005     return false;
  1007   case Bytecodes::_ldc:
  1008   case Bytecodes::_ldc_w:
  1009   case Bytecodes::_ldc2_w:
  1010     inputs = 0;
  1011     break;
  1013   case Bytecodes::_dup:         inputs = 1;  break;
  1014   case Bytecodes::_dup_x1:      inputs = 2;  break;
  1015   case Bytecodes::_dup_x2:      inputs = 3;  break;
  1016   case Bytecodes::_dup2:        inputs = 2;  break;
  1017   case Bytecodes::_dup2_x1:     inputs = 3;  break;
  1018   case Bytecodes::_dup2_x2:     inputs = 4;  break;
  1019   case Bytecodes::_swap:        inputs = 2;  break;
  1020   case Bytecodes::_arraylength: inputs = 1;  break;
  1022   case Bytecodes::_getstatic:
  1023   case Bytecodes::_putstatic:
  1024   case Bytecodes::_getfield:
  1025   case Bytecodes::_putfield:
  1027       bool ignored_will_link;
  1028       ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
  1029       int      size  = field->type()->size();
  1030       bool is_get = (depth >= 0), is_static = (depth & 1);
  1031       inputs = (is_static ? 0 : 1);
  1032       if (is_get) {
  1033         depth = size - inputs;
  1034       } else {
  1035         inputs += size;        // putxxx pops the value from the stack
  1036         depth = - inputs;
  1039     break;
  1041   case Bytecodes::_invokevirtual:
  1042   case Bytecodes::_invokespecial:
  1043   case Bytecodes::_invokestatic:
  1044   case Bytecodes::_invokedynamic:
  1045   case Bytecodes::_invokeinterface:
  1047       bool ignored_will_link;
  1048       ciSignature* declared_signature = NULL;
  1049       ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
  1050       assert(declared_signature != NULL, "cannot be null");
  1051       inputs   = declared_signature->arg_size_for_bc(code);
  1052       int size = declared_signature->return_type()->size();
  1053       depth = size - inputs;
  1055     break;
  1057   case Bytecodes::_multianewarray:
  1059       ciBytecodeStream iter(method());
  1060       iter.reset_to_bci(bci());
  1061       iter.next();
  1062       inputs = iter.get_dimensions();
  1063       assert(rsize == 1, "");
  1064       depth = rsize - inputs;
  1066     break;
  1068   case Bytecodes::_ireturn:
  1069   case Bytecodes::_lreturn:
  1070   case Bytecodes::_freturn:
  1071   case Bytecodes::_dreturn:
  1072   case Bytecodes::_areturn:
  1073     assert(rsize = -depth, "");
  1074     inputs = rsize;
  1075     break;
  1077   case Bytecodes::_jsr:
  1078   case Bytecodes::_jsr_w:
  1079     inputs = 0;
  1080     depth  = 1;                  // S.B. depth=1, not zero
  1081     break;
  1083   default:
  1084     // bytecode produces a typed result
  1085     inputs = rsize - depth;
  1086     assert(inputs >= 0, "");
  1087     break;
  1090 #ifdef ASSERT
  1091   // spot check
  1092   int outputs = depth + inputs;
  1093   assert(outputs >= 0, "sanity");
  1094   switch (code) {
  1095   case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break;
  1096   case Bytecodes::_athrow:    assert(inputs == 1 && outputs == 0, ""); break;
  1097   case Bytecodes::_aload_0:   assert(inputs == 0 && outputs == 1, ""); break;
  1098   case Bytecodes::_return:    assert(inputs == 0 && outputs == 0, ""); break;
  1099   case Bytecodes::_drem:      assert(inputs == 4 && outputs == 2, ""); break;
  1101 #endif //ASSERT
  1103   return true;
  1108 //------------------------------basic_plus_adr---------------------------------
  1109 Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) {
  1110   // short-circuit a common case
  1111   if (offset == intcon(0))  return ptr;
  1112   return _gvn.transform( new (C) AddPNode(base, ptr, offset) );
  1115 Node* GraphKit::ConvI2L(Node* offset) {
  1116   // short-circuit a common case
  1117   jint offset_con = find_int_con(offset, Type::OffsetBot);
  1118   if (offset_con != Type::OffsetBot) {
  1119     return longcon((jlong) offset_con);
  1121   return _gvn.transform( new (C) ConvI2LNode(offset));
  1123 Node* GraphKit::ConvL2I(Node* offset) {
  1124   // short-circuit a common case
  1125   jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
  1126   if (offset_con != (jlong)Type::OffsetBot) {
  1127     return intcon((int) offset_con);
  1129   return _gvn.transform( new (C) ConvL2INode(offset));
  1132 //-------------------------load_object_klass-----------------------------------
  1133 Node* GraphKit::load_object_klass(Node* obj) {
  1134   // Special-case a fresh allocation to avoid building nodes:
  1135   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
  1136   if (akls != NULL)  return akls;
  1137   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
  1138   return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
  1141 //-------------------------load_array_length-----------------------------------
  1142 Node* GraphKit::load_array_length(Node* array) {
  1143   // Special-case a fresh allocation to avoid building nodes:
  1144   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
  1145   Node *alen;
  1146   if (alloc == NULL) {
  1147     Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
  1148     alen = _gvn.transform( new (C) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
  1149   } else {
  1150     alen = alloc->Ideal_length();
  1151     Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn);
  1152     if (ccast != alen) {
  1153       alen = _gvn.transform(ccast);
  1156   return alen;
  1159 //------------------------------do_null_check----------------------------------
  1160 // Helper function to do a NULL pointer check.  Returned value is
  1161 // the incoming address with NULL casted away.  You are allowed to use the
  1162 // not-null value only if you are control dependent on the test.
  1163 extern int explicit_null_checks_inserted,
  1164            explicit_null_checks_elided;
  1165 Node* GraphKit::null_check_common(Node* value, BasicType type,
  1166                                   // optional arguments for variations:
  1167                                   bool assert_null,
  1168                                   Node* *null_control) {
  1169   assert(!assert_null || null_control == NULL, "not both at once");
  1170   if (stopped())  return top();
  1171   if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) {
  1172     // For some performance testing, we may wish to suppress null checking.
  1173     value = cast_not_null(value);   // Make it appear to be non-null (4962416).
  1174     return value;
  1176   explicit_null_checks_inserted++;
  1178   // Construct NULL check
  1179   Node *chk = NULL;
  1180   switch(type) {
  1181     case T_LONG   : chk = new (C) CmpLNode(value, _gvn.zerocon(T_LONG)); break;
  1182     case T_INT    : chk = new (C) CmpINode(value, _gvn.intcon(0)); break;
  1183     case T_ARRAY  : // fall through
  1184       type = T_OBJECT;  // simplify further tests
  1185     case T_OBJECT : {
  1186       const Type *t = _gvn.type( value );
  1188       const TypeOopPtr* tp = t->isa_oopptr();
  1189       if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()
  1190           // Only for do_null_check, not any of its siblings:
  1191           && !assert_null && null_control == NULL) {
  1192         // Usually, any field access or invocation on an unloaded oop type
  1193         // will simply fail to link, since the statically linked class is
  1194         // likely also to be unloaded.  However, in -Xcomp mode, sometimes
  1195         // the static class is loaded but the sharper oop type is not.
  1196         // Rather than checking for this obscure case in lots of places,
  1197         // we simply observe that a null check on an unloaded class
  1198         // will always be followed by a nonsense operation, so we
  1199         // can just issue the uncommon trap here.
  1200         // Our access to the unloaded class will only be correct
  1201         // after it has been loaded and initialized, which requires
  1202         // a trip through the interpreter.
  1203 #ifndef PRODUCT
  1204         if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); }
  1205 #endif
  1206         uncommon_trap(Deoptimization::Reason_unloaded,
  1207                       Deoptimization::Action_reinterpret,
  1208                       tp->klass(), "!loaded");
  1209         return top();
  1212       if (assert_null) {
  1213         // See if the type is contained in NULL_PTR.
  1214         // If so, then the value is already null.
  1215         if (t->higher_equal(TypePtr::NULL_PTR)) {
  1216           explicit_null_checks_elided++;
  1217           return value;           // Elided null assert quickly!
  1219       } else {
  1220         // See if mixing in the NULL pointer changes type.
  1221         // If so, then the NULL pointer was not allowed in the original
  1222         // type.  In other words, "value" was not-null.
  1223         if (t->meet(TypePtr::NULL_PTR) != t) {
  1224           // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
  1225           explicit_null_checks_elided++;
  1226           return value;           // Elided null check quickly!
  1229       chk = new (C) CmpPNode( value, null() );
  1230       break;
  1233     default:
  1234       fatal(err_msg_res("unexpected type: %s", type2name(type)));
  1236   assert(chk != NULL, "sanity check");
  1237   chk = _gvn.transform(chk);
  1239   BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
  1240   BoolNode *btst = new (C) BoolNode( chk, btest);
  1241   Node   *tst = _gvn.transform( btst );
  1243   //-----------
  1244   // if peephole optimizations occurred, a prior test existed.
  1245   // If a prior test existed, maybe it dominates as we can avoid this test.
  1246   if (tst != btst && type == T_OBJECT) {
  1247     // At this point we want to scan up the CFG to see if we can
  1248     // find an identical test (and so avoid this test altogether).
  1249     Node *cfg = control();
  1250     int depth = 0;
  1251     while( depth < 16 ) {       // Limit search depth for speed
  1252       if( cfg->Opcode() == Op_IfTrue &&
  1253           cfg->in(0)->in(1) == tst ) {
  1254         // Found prior test.  Use "cast_not_null" to construct an identical
  1255         // CastPP (and hence hash to) as already exists for the prior test.
  1256         // Return that casted value.
  1257         if (assert_null) {
  1258           replace_in_map(value, null());
  1259           return null();  // do not issue the redundant test
  1261         Node *oldcontrol = control();
  1262         set_control(cfg);
  1263         Node *res = cast_not_null(value);
  1264         set_control(oldcontrol);
  1265         explicit_null_checks_elided++;
  1266         return res;
  1268       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
  1269       if (cfg == NULL)  break;  // Quit at region nodes
  1270       depth++;
  1274   //-----------
  1275   // Branch to failure if null
  1276   float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
  1277   Deoptimization::DeoptReason reason;
  1278   if (assert_null)
  1279     reason = Deoptimization::Reason_null_assert;
  1280   else if (type == T_OBJECT)
  1281     reason = Deoptimization::Reason_null_check;
  1282   else
  1283     reason = Deoptimization::Reason_div0_check;
  1285   // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
  1286   // ciMethodData::has_trap_at will return a conservative -1 if any
  1287   // must-be-null assertion has failed.  This could cause performance
  1288   // problems for a method after its first do_null_assert failure.
  1289   // Consider using 'Reason_class_check' instead?
  1291   // To cause an implicit null check, we set the not-null probability
  1292   // to the maximum (PROB_MAX).  For an explicit check the probability
  1293   // is set to a smaller value.
  1294   if (null_control != NULL || too_many_traps(reason)) {
  1295     // probability is less likely
  1296     ok_prob =  PROB_LIKELY_MAG(3);
  1297   } else if (!assert_null &&
  1298              (ImplicitNullCheckThreshold > 0) &&
  1299              method() != NULL &&
  1300              (method()->method_data()->trap_count(reason)
  1301               >= (uint)ImplicitNullCheckThreshold)) {
  1302     ok_prob =  PROB_LIKELY_MAG(3);
  1305   if (null_control != NULL) {
  1306     IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN);
  1307     Node* null_true = _gvn.transform( new (C) IfFalseNode(iff));
  1308     set_control(      _gvn.transform( new (C) IfTrueNode(iff)));
  1309     if (null_true == top())
  1310       explicit_null_checks_elided++;
  1311     (*null_control) = null_true;
  1312   } else {
  1313     BuildCutout unless(this, tst, ok_prob);
  1314     // Check for optimizer eliding test at parse time
  1315     if (stopped()) {
  1316       // Failure not possible; do not bother making uncommon trap.
  1317       explicit_null_checks_elided++;
  1318     } else if (assert_null) {
  1319       uncommon_trap(reason,
  1320                     Deoptimization::Action_make_not_entrant,
  1321                     NULL, "assert_null");
  1322     } else {
  1323       replace_in_map(value, zerocon(type));
  1324       builtin_throw(reason);
  1328   // Must throw exception, fall-thru not possible?
  1329   if (stopped()) {
  1330     return top();               // No result
  1333   if (assert_null) {
  1334     // Cast obj to null on this path.
  1335     replace_in_map(value, zerocon(type));
  1336     return zerocon(type);
  1339   // Cast obj to not-null on this path, if there is no null_control.
  1340   // (If there is a null_control, a non-null value may come back to haunt us.)
  1341   if (type == T_OBJECT) {
  1342     Node* cast = cast_not_null(value, false);
  1343     if (null_control == NULL || (*null_control) == top())
  1344       replace_in_map(value, cast);
  1345     value = cast;
  1348   return value;
  1352 //------------------------------cast_not_null----------------------------------
  1353 // Cast obj to not-null on this path
  1354 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
  1355   const Type *t = _gvn.type(obj);
  1356   const Type *t_not_null = t->join(TypePtr::NOTNULL);
  1357   // Object is already not-null?
  1358   if( t == t_not_null ) return obj;
  1360   Node *cast = new (C) CastPPNode(obj,t_not_null);
  1361   cast->init_req(0, control());
  1362   cast = _gvn.transform( cast );
  1364   // Scan for instances of 'obj' in the current JVM mapping.
  1365   // These instances are known to be not-null after the test.
  1366   if (do_replace_in_map)
  1367     replace_in_map(obj, cast);
  1369   return cast;                  // Return casted value
  1373 //--------------------------replace_in_map-------------------------------------
  1374 void GraphKit::replace_in_map(Node* old, Node* neww) {
  1375   this->map()->replace_edge(old, neww);
  1377   // Note: This operation potentially replaces any edge
  1378   // on the map.  This includes locals, stack, and monitors
  1379   // of the current (innermost) JVM state.
  1381   // We can consider replacing in caller maps.
  1382   // The idea would be that an inlined function's null checks
  1383   // can be shared with the entire inlining tree.
  1384   // The expense of doing this is that the PreserveJVMState class
  1385   // would have to preserve caller states too, with a deep copy.
  1389 //=============================================================================
  1390 //--------------------------------memory---------------------------------------
  1391 Node* GraphKit::memory(uint alias_idx) {
  1392   MergeMemNode* mem = merged_memory();
  1393   Node* p = mem->memory_at(alias_idx);
  1394   _gvn.set_type(p, Type::MEMORY);  // must be mapped
  1395   return p;
  1398 //-----------------------------reset_memory------------------------------------
  1399 Node* GraphKit::reset_memory() {
  1400   Node* mem = map()->memory();
  1401   // do not use this node for any more parsing!
  1402   debug_only( map()->set_memory((Node*)NULL) );
  1403   return _gvn.transform( mem );
  1406 //------------------------------set_all_memory---------------------------------
  1407 void GraphKit::set_all_memory(Node* newmem) {
  1408   Node* mergemem = MergeMemNode::make(C, newmem);
  1409   gvn().set_type_bottom(mergemem);
  1410   map()->set_memory(mergemem);
  1413 //------------------------------set_all_memory_call----------------------------
  1414 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
  1415   Node* newmem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
  1416   set_all_memory(newmem);
  1419 //=============================================================================
  1420 //
  1421 // parser factory methods for MemNodes
  1422 //
  1423 // These are layered on top of the factory methods in LoadNode and StoreNode,
  1424 // and integrate with the parser's memory state and _gvn engine.
  1425 //
  1427 // factory methods in "int adr_idx"
  1428 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
  1429                           int adr_idx,
  1430                           bool require_atomic_access) {
  1431   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
  1432   const TypePtr* adr_type = NULL; // debug-mode-only argument
  1433   debug_only(adr_type = C->get_adr_type(adr_idx));
  1434   Node* mem = memory(adr_idx);
  1435   Node* ld;
  1436   if (require_atomic_access && bt == T_LONG) {
  1437     ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
  1438   } else {
  1439     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
  1441   return _gvn.transform(ld);
  1444 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
  1445                                 int adr_idx,
  1446                                 bool require_atomic_access) {
  1447   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
  1448   const TypePtr* adr_type = NULL;
  1449   debug_only(adr_type = C->get_adr_type(adr_idx));
  1450   Node *mem = memory(adr_idx);
  1451   Node* st;
  1452   if (require_atomic_access && bt == T_LONG) {
  1453     st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
  1454   } else {
  1455     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
  1457   st = _gvn.transform(st);
  1458   set_memory(st, adr_idx);
  1459   // Back-to-back stores can only remove intermediate store with DU info
  1460   // so push on worklist for optimizer.
  1461   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
  1462     record_for_igvn(st);
  1464   return st;
  1468 void GraphKit::pre_barrier(bool do_load,
  1469                            Node* ctl,
  1470                            Node* obj,
  1471                            Node* adr,
  1472                            uint  adr_idx,
  1473                            Node* val,
  1474                            const TypeOopPtr* val_type,
  1475                            Node* pre_val,
  1476                            BasicType bt) {
  1478   BarrierSet* bs = Universe::heap()->barrier_set();
  1479   set_control(ctl);
  1480   switch (bs->kind()) {
  1481     case BarrierSet::G1SATBCT:
  1482     case BarrierSet::G1SATBCTLogging:
  1483       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
  1484       break;
  1486     case BarrierSet::CardTableModRef:
  1487     case BarrierSet::CardTableExtension:
  1488     case BarrierSet::ModRef:
  1489       break;
  1491     case BarrierSet::Other:
  1492     default      :
  1493       ShouldNotReachHere();
  1498 void GraphKit::post_barrier(Node* ctl,
  1499                             Node* store,
  1500                             Node* obj,
  1501                             Node* adr,
  1502                             uint  adr_idx,
  1503                             Node* val,
  1504                             BasicType bt,
  1505                             bool use_precise) {
  1506   BarrierSet* bs = Universe::heap()->barrier_set();
  1507   set_control(ctl);
  1508   switch (bs->kind()) {
  1509     case BarrierSet::G1SATBCT:
  1510     case BarrierSet::G1SATBCTLogging:
  1511       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
  1512       break;
  1514     case BarrierSet::CardTableModRef:
  1515     case BarrierSet::CardTableExtension:
  1516       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
  1517       break;
  1519     case BarrierSet::ModRef:
  1520       break;
  1522     case BarrierSet::Other:
  1523     default      :
  1524       ShouldNotReachHere();
  1529 Node* GraphKit::store_oop(Node* ctl,
  1530                           Node* obj,
  1531                           Node* adr,
  1532                           const TypePtr* adr_type,
  1533                           Node* val,
  1534                           const TypeOopPtr* val_type,
  1535                           BasicType bt,
  1536                           bool use_precise) {
  1537   // Transformation of a value which could be NULL pointer (CastPP #NULL)
  1538   // could be delayed during Parse (for example, in adjust_map_after_if()).
  1539   // Execute transformation here to avoid barrier generation in such case.
  1540   if (_gvn.type(val) == TypePtr::NULL_PTR)
  1541     val = _gvn.makecon(TypePtr::NULL_PTR);
  1543   set_control(ctl);
  1544   if (stopped()) return top(); // Dead path ?
  1546   assert(bt == T_OBJECT, "sanity");
  1547   assert(val != NULL, "not dead path");
  1548   uint adr_idx = C->get_alias_index(adr_type);
  1549   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
  1551   pre_barrier(true /* do_load */,
  1552               control(), obj, adr, adr_idx, val, val_type,
  1553               NULL /* pre_val */,
  1554               bt);
  1556   Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
  1557   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
  1558   return store;
  1561 // Could be an array or object we don't know at compile time (unsafe ref.)
  1562 Node* GraphKit::store_oop_to_unknown(Node* ctl,
  1563                              Node* obj,   // containing obj
  1564                              Node* adr,  // actual adress to store val at
  1565                              const TypePtr* adr_type,
  1566                              Node* val,
  1567                              BasicType bt) {
  1568   Compile::AliasType* at = C->alias_type(adr_type);
  1569   const TypeOopPtr* val_type = NULL;
  1570   if (adr_type->isa_instptr()) {
  1571     if (at->field() != NULL) {
  1572       // known field.  This code is a copy of the do_put_xxx logic.
  1573       ciField* field = at->field();
  1574       if (!field->type()->is_loaded()) {
  1575         val_type = TypeInstPtr::BOTTOM;
  1576       } else {
  1577         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
  1580   } else if (adr_type->isa_aryptr()) {
  1581     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
  1583   if (val_type == NULL) {
  1584     val_type = TypeInstPtr::BOTTOM;
  1586   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
  1590 //-------------------------array_element_address-------------------------
  1591 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
  1592                                       const TypeInt* sizetype) {
  1593   uint shift  = exact_log2(type2aelembytes(elembt));
  1594   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
  1596   // short-circuit a common case (saves lots of confusing waste motion)
  1597   jint idx_con = find_int_con(idx, -1);
  1598   if (idx_con >= 0) {
  1599     intptr_t offset = header + ((intptr_t)idx_con << shift);
  1600     return basic_plus_adr(ary, offset);
  1603   // must be correct type for alignment purposes
  1604   Node* base  = basic_plus_adr(ary, header);
  1605 #ifdef _LP64
  1606   // The scaled index operand to AddP must be a clean 64-bit value.
  1607   // Java allows a 32-bit int to be incremented to a negative
  1608   // value, which appears in a 64-bit register as a large
  1609   // positive number.  Using that large positive number as an
  1610   // operand in pointer arithmetic has bad consequences.
  1611   // On the other hand, 32-bit overflow is rare, and the possibility
  1612   // can often be excluded, if we annotate the ConvI2L node with
  1613   // a type assertion that its value is known to be a small positive
  1614   // number.  (The prior range check has ensured this.)
  1615   // This assertion is used by ConvI2LNode::Ideal.
  1616   int index_max = max_jint - 1;  // array size is max_jint, index is one less
  1617   if (sizetype != NULL)  index_max = sizetype->_hi - 1;
  1618   const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
  1619   idx = _gvn.transform( new (C) ConvI2LNode(idx, lidxtype) );
  1620 #endif
  1621   Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
  1622   return basic_plus_adr(ary, base, scale);
  1625 //-------------------------load_array_element-------------------------
  1626 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
  1627   const Type* elemtype = arytype->elem();
  1628   BasicType elembt = elemtype->array_element_basic_type();
  1629   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
  1630   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype);
  1631   return ld;
  1634 //-------------------------set_arguments_for_java_call-------------------------
  1635 // Arguments (pre-popped from the stack) are taken from the JVMS.
  1636 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
  1637   // Add the call arguments:
  1638   uint nargs = call->method()->arg_size();
  1639   for (uint i = 0; i < nargs; i++) {
  1640     Node* arg = argument(i);
  1641     call->init_req(i + TypeFunc::Parms, arg);
  1645 //---------------------------set_edges_for_java_call---------------------------
  1646 // Connect a newly created call into the current JVMS.
  1647 // A return value node (if any) is returned from set_edges_for_java_call.
  1648 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
  1650   // Add the predefined inputs:
  1651   call->init_req( TypeFunc::Control, control() );
  1652   call->init_req( TypeFunc::I_O    , i_o() );
  1653   call->init_req( TypeFunc::Memory , reset_memory() );
  1654   call->init_req( TypeFunc::FramePtr, frameptr() );
  1655   call->init_req( TypeFunc::ReturnAdr, top() );
  1657   add_safepoint_edges(call, must_throw);
  1659   Node* xcall = _gvn.transform(call);
  1661   if (xcall == top()) {
  1662     set_control(top());
  1663     return;
  1665   assert(xcall == call, "call identity is stable");
  1667   // Re-use the current map to produce the result.
  1669   set_control(_gvn.transform(new (C) ProjNode(call, TypeFunc::Control)));
  1670   set_i_o(    _gvn.transform(new (C) ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
  1671   set_all_memory_call(xcall, separate_io_proj);
  1673   //return xcall;   // no need, caller already has it
  1676 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
  1677   if (stopped())  return top();  // maybe the call folded up?
  1679   // Capture the return value, if any.
  1680   Node* ret;
  1681   if (call->method() == NULL ||
  1682       call->method()->return_type()->basic_type() == T_VOID)
  1683         ret = top();
  1684   else  ret = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
  1686   // Note:  Since any out-of-line call can produce an exception,
  1687   // we always insert an I_O projection from the call into the result.
  1689   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
  1691   if (separate_io_proj) {
  1692     // The caller requested separate projections be used by the fall
  1693     // through and exceptional paths, so replace the projections for
  1694     // the fall through path.
  1695     set_i_o(_gvn.transform( new (C) ProjNode(call, TypeFunc::I_O) ));
  1696     set_all_memory(_gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) ));
  1698   return ret;
  1701 //--------------------set_predefined_input_for_runtime_call--------------------
  1702 // Reading and setting the memory state is way conservative here.
  1703 // The real problem is that I am not doing real Type analysis on memory,
  1704 // so I cannot distinguish card mark stores from other stores.  Across a GC
  1705 // point the Store Barrier and the card mark memory has to agree.  I cannot
  1706 // have a card mark store and its barrier split across the GC point from
  1707 // either above or below.  Here I get that to happen by reading ALL of memory.
  1708 // A better answer would be to separate out card marks from other memory.
  1709 // For now, return the input memory state, so that it can be reused
  1710 // after the call, if this call has restricted memory effects.
  1711 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
  1712   // Set fixed predefined input arguments
  1713   Node* memory = reset_memory();
  1714   call->init_req( TypeFunc::Control,   control()  );
  1715   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
  1716   call->init_req( TypeFunc::Memory,    memory     ); // may gc ptrs
  1717   call->init_req( TypeFunc::FramePtr,  frameptr() );
  1718   call->init_req( TypeFunc::ReturnAdr, top()      );
  1719   return memory;
  1722 //-------------------set_predefined_output_for_runtime_call--------------------
  1723 // Set control and memory (not i_o) from the call.
  1724 // If keep_mem is not NULL, use it for the output state,
  1725 // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM.
  1726 // If hook_mem is NULL, this call produces no memory effects at all.
  1727 // If hook_mem is a Java-visible memory slice (such as arraycopy operands),
  1728 // then only that memory slice is taken from the call.
  1729 // In the last case, we must put an appropriate memory barrier before
  1730 // the call, so as to create the correct anti-dependencies on loads
  1731 // preceding the call.
  1732 void GraphKit::set_predefined_output_for_runtime_call(Node* call,
  1733                                                       Node* keep_mem,
  1734                                                       const TypePtr* hook_mem) {
  1735   // no i/o
  1736   set_control(_gvn.transform( new (C) ProjNode(call,TypeFunc::Control) ));
  1737   if (keep_mem) {
  1738     // First clone the existing memory state
  1739     set_all_memory(keep_mem);
  1740     if (hook_mem != NULL) {
  1741       // Make memory for the call
  1742       Node* mem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) );
  1743       // Set the RawPtr memory state only.  This covers all the heap top/GC stuff
  1744       // We also use hook_mem to extract specific effects from arraycopy stubs.
  1745       set_memory(mem, hook_mem);
  1747     // ...else the call has NO memory effects.
  1749     // Make sure the call advertises its memory effects precisely.
  1750     // This lets us build accurate anti-dependences in gcm.cpp.
  1751     assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem),
  1752            "call node must be constructed correctly");
  1753   } else {
  1754     assert(hook_mem == NULL, "");
  1755     // This is not a "slow path" call; all memory comes from the call.
  1756     set_all_memory_call(call);
  1761 // Replace the call with the current state of the kit.
  1762 void GraphKit::replace_call(CallNode* call, Node* result) {
  1763   JVMState* ejvms = NULL;
  1764   if (has_exceptions()) {
  1765     ejvms = transfer_exceptions_into_jvms();
  1768   SafePointNode* final_state = stop();
  1770   // Find all the needed outputs of this call
  1771   CallProjections callprojs;
  1772   call->extract_projections(&callprojs, true);
  1774   Node* init_mem = call->in(TypeFunc::Memory);
  1775   Node* final_mem = final_state->in(TypeFunc::Memory);
  1776   Node* final_ctl = final_state->in(TypeFunc::Control);
  1777   Node* final_io = final_state->in(TypeFunc::I_O);
  1779   // Replace all the old call edges with the edges from the inlining result
  1780   if (callprojs.fallthrough_catchproj != NULL) {
  1781     C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
  1783   if (callprojs.fallthrough_memproj != NULL) {
  1784     C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
  1786   if (callprojs.fallthrough_ioproj != NULL) {
  1787     C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_io);
  1790   // Replace the result with the new result if it exists and is used
  1791   if (callprojs.resproj != NULL && result != NULL) {
  1792     C->gvn_replace_by(callprojs.resproj, result);
  1795   if (ejvms == NULL) {
  1796     // No exception edges to simply kill off those paths
  1797     if (callprojs.catchall_catchproj != NULL) {
  1798       C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
  1800     if (callprojs.catchall_memproj != NULL) {
  1801       C->gvn_replace_by(callprojs.catchall_memproj,   C->top());
  1803     if (callprojs.catchall_ioproj != NULL) {
  1804       C->gvn_replace_by(callprojs.catchall_ioproj,    C->top());
  1806     // Replace the old exception object with top
  1807     if (callprojs.exobj != NULL) {
  1808       C->gvn_replace_by(callprojs.exobj, C->top());
  1810   } else {
  1811     GraphKit ekit(ejvms);
  1813     // Load my combined exception state into the kit, with all phis transformed:
  1814     SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
  1816     Node* ex_oop = ekit.use_exception_state(ex_map);
  1817     if (callprojs.catchall_catchproj != NULL) {
  1818       C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
  1820     if (callprojs.catchall_memproj != NULL) {
  1821       C->gvn_replace_by(callprojs.catchall_memproj,   ekit.reset_memory());
  1823     if (callprojs.catchall_ioproj != NULL) {
  1824       C->gvn_replace_by(callprojs.catchall_ioproj,    ekit.i_o());
  1827     // Replace the old exception object with the newly created one
  1828     if (callprojs.exobj != NULL) {
  1829       C->gvn_replace_by(callprojs.exobj, ex_oop);
  1833   // Disconnect the call from the graph
  1834   call->disconnect_inputs(NULL, C);
  1835   C->gvn_replace_by(call, C->top());
  1837   // Clean up any MergeMems that feed other MergeMems since the
  1838   // optimizer doesn't like that.
  1839   if (final_mem->is_MergeMem()) {
  1840     Node_List wl;
  1841     for (SimpleDUIterator i(final_mem); i.has_next(); i.next()) {
  1842       Node* m = i.get();
  1843       if (m->is_MergeMem() && !wl.contains(m)) {
  1844         wl.push(m);
  1847     while (wl.size()  > 0) {
  1848       _gvn.transform(wl.pop());
  1854 //------------------------------increment_counter------------------------------
  1855 // for statistics: increment a VM counter by 1
  1857 void GraphKit::increment_counter(address counter_addr) {
  1858   Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
  1859   increment_counter(adr1);
  1862 void GraphKit::increment_counter(Node* counter_addr) {
  1863   int adr_type = Compile::AliasIdxRaw;
  1864   Node* ctrl = control();
  1865   Node* cnt  = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type);
  1866   Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
  1867   store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type );
  1871 //------------------------------uncommon_trap----------------------------------
  1872 // Bail out to the interpreter in mid-method.  Implemented by calling the
  1873 // uncommon_trap blob.  This helper function inserts a runtime call with the
  1874 // right debug info.
  1875 void GraphKit::uncommon_trap(int trap_request,
  1876                              ciKlass* klass, const char* comment,
  1877                              bool must_throw,
  1878                              bool keep_exact_action) {
  1879   if (failing())  stop();
  1880   if (stopped())  return; // trap reachable?
  1882   // Note:  If ProfileTraps is true, and if a deopt. actually
  1883   // occurs here, the runtime will make sure an MDO exists.  There is
  1884   // no need to call method()->ensure_method_data() at this point.
  1886   // Set the stack pointer to the right value for reexecution:
  1887   set_sp(reexecute_sp());
  1889 #ifdef ASSERT
  1890   if (!must_throw) {
  1891     // Make sure the stack has at least enough depth to execute
  1892     // the current bytecode.
  1893     int inputs, ignored_depth;
  1894     if (compute_stack_effects(inputs, ignored_depth)) {
  1895       assert(sp() >= inputs, err_msg_res("must have enough JVMS stack to execute %s: sp=%d, inputs=%d",
  1896              Bytecodes::name(java_bc()), sp(), inputs));
  1899 #endif
  1901   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
  1902   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
  1904   switch (action) {
  1905   case Deoptimization::Action_maybe_recompile:
  1906   case Deoptimization::Action_reinterpret:
  1907     // Temporary fix for 6529811 to allow virtual calls to be sure they
  1908     // get the chance to go from mono->bi->mega
  1909     if (!keep_exact_action &&
  1910         Deoptimization::trap_request_index(trap_request) < 0 &&
  1911         too_many_recompiles(reason)) {
  1912       // This BCI is causing too many recompilations.
  1913       action = Deoptimization::Action_none;
  1914       trap_request = Deoptimization::make_trap_request(reason, action);
  1915     } else {
  1916       C->set_trap_can_recompile(true);
  1918     break;
  1919   case Deoptimization::Action_make_not_entrant:
  1920     C->set_trap_can_recompile(true);
  1921     break;
  1922 #ifdef ASSERT
  1923   case Deoptimization::Action_none:
  1924   case Deoptimization::Action_make_not_compilable:
  1925     break;
  1926   default:
  1927     fatal(err_msg_res("unknown action %d: %s", action, Deoptimization::trap_action_name(action)));
  1928     break;
  1929 #endif
  1932   if (TraceOptoParse) {
  1933     char buf[100];
  1934     tty->print_cr("Uncommon trap %s at bci:%d",
  1935                   Deoptimization::format_trap_request(buf, sizeof(buf),
  1936                                                       trap_request), bci());
  1939   CompileLog* log = C->log();
  1940   if (log != NULL) {
  1941     int kid = (klass == NULL)? -1: log->identify(klass);
  1942     log->begin_elem("uncommon_trap bci='%d'", bci());
  1943     char buf[100];
  1944     log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),
  1945                                                           trap_request));
  1946     if (kid >= 0)         log->print(" klass='%d'", kid);
  1947     if (comment != NULL)  log->print(" comment='%s'", comment);
  1948     log->end_elem();
  1951   // Make sure any guarding test views this path as very unlikely
  1952   Node *i0 = control()->in(0);
  1953   if (i0 != NULL && i0->is_If()) {        // Found a guarding if test?
  1954     IfNode *iff = i0->as_If();
  1955     float f = iff->_prob;   // Get prob
  1956     if (control()->Opcode() == Op_IfTrue) {
  1957       if (f > PROB_UNLIKELY_MAG(4))
  1958         iff->_prob = PROB_MIN;
  1959     } else {
  1960       if (f < PROB_LIKELY_MAG(4))
  1961         iff->_prob = PROB_MAX;
  1965   // Clear out dead values from the debug info.
  1966   kill_dead_locals();
  1968   // Now insert the uncommon trap subroutine call
  1969   address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
  1970   const TypePtr* no_memory_effects = NULL;
  1971   // Pass the index of the class to be loaded
  1972   Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
  1973                                  (must_throw ? RC_MUST_THROW : 0),
  1974                                  OptoRuntime::uncommon_trap_Type(),
  1975                                  call_addr, "uncommon_trap", no_memory_effects,
  1976                                  intcon(trap_request));
  1977   assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request,
  1978          "must extract request correctly from the graph");
  1979   assert(trap_request != 0, "zero value reserved by uncommon_trap_request");
  1981   call->set_req(TypeFunc::ReturnAdr, returnadr());
  1982   // The debug info is the only real input to this call.
  1984   // Halt-and-catch fire here.  The above call should never return!
  1985   HaltNode* halt = new(C) HaltNode(control(), frameptr());
  1986   _gvn.set_type_bottom(halt);
  1987   root()->add_req(halt);
  1989   stop_and_kill_map();
  1993 //--------------------------just_allocated_object------------------------------
  1994 // Report the object that was just allocated.
  1995 // It must be the case that there are no intervening safepoints.
  1996 // We use this to determine if an object is so "fresh" that
  1997 // it does not require card marks.
  1998 Node* GraphKit::just_allocated_object(Node* current_control) {
  1999   if (C->recent_alloc_ctl() == current_control)
  2000     return C->recent_alloc_obj();
  2001   return NULL;
  2005 void GraphKit::round_double_arguments(ciMethod* dest_method) {
  2006   // (Note:  TypeFunc::make has a cache that makes this fast.)
  2007   const TypeFunc* tf    = TypeFunc::make(dest_method);
  2008   int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
  2009   for (int j = 0; j < nargs; j++) {
  2010     const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
  2011     if( targ->basic_type() == T_DOUBLE ) {
  2012       // If any parameters are doubles, they must be rounded before
  2013       // the call, dstore_rounding does gvn.transform
  2014       Node *arg = argument(j);
  2015       arg = dstore_rounding(arg);
  2016       set_argument(j, arg);
  2021 void GraphKit::round_double_result(ciMethod* dest_method) {
  2022   // A non-strict method may return a double value which has an extended
  2023   // exponent, but this must not be visible in a caller which is 'strict'
  2024   // If a strict caller invokes a non-strict callee, round a double result
  2026   BasicType result_type = dest_method->return_type()->basic_type();
  2027   assert( method() != NULL, "must have caller context");
  2028   if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) {
  2029     // Destination method's return value is on top of stack
  2030     // dstore_rounding() does gvn.transform
  2031     Node *result = pop_pair();
  2032     result = dstore_rounding(result);
  2033     push_pair(result);
  2037 // rounding for strict float precision conformance
  2038 Node* GraphKit::precision_rounding(Node* n) {
  2039   return UseStrictFP && _method->flags().is_strict()
  2040     && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding
  2041     ? _gvn.transform( new (C) RoundFloatNode(0, n) )
  2042     : n;
  2045 // rounding for strict double precision conformance
  2046 Node* GraphKit::dprecision_rounding(Node *n) {
  2047   return UseStrictFP && _method->flags().is_strict()
  2048     && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding
  2049     ? _gvn.transform( new (C) RoundDoubleNode(0, n) )
  2050     : n;
  2053 // rounding for non-strict double stores
  2054 Node* GraphKit::dstore_rounding(Node* n) {
  2055   return Matcher::strict_fp_requires_explicit_rounding
  2056     && UseSSE <= 1
  2057     ? _gvn.transform( new (C) RoundDoubleNode(0, n) )
  2058     : n;
  2061 //=============================================================================
  2062 // Generate a fast path/slow path idiom.  Graph looks like:
  2063 // [foo] indicates that 'foo' is a parameter
  2064 //
  2065 //              [in]     NULL
  2066 //                 \    /
  2067 //                  CmpP
  2068 //                  Bool ne
  2069 //                   If
  2070 //                  /  \
  2071 //              True    False-<2>
  2072 //              / |
  2073 //             /  cast_not_null
  2074 //           Load  |    |   ^
  2075 //        [fast_test]   |   |
  2076 // gvn to   opt_test    |   |
  2077 //          /    \      |  <1>
  2078 //      True     False  |
  2079 //        |         \\  |
  2080 //   [slow_call]     \[fast_result]
  2081 //    Ctl   Val       \      \
  2082 //     |               \      \
  2083 //    Catch       <1>   \      \
  2084 //   /    \        ^     \      \
  2085 //  Ex    No_Ex    |      \      \
  2086 //  |       \   \  |       \ <2>  \
  2087 //  ...      \  [slow_res] |  |    \   [null_result]
  2088 //            \         \--+--+---  |  |
  2089 //             \           | /    \ | /
  2090 //              --------Region     Phi
  2091 //
  2092 //=============================================================================
  2093 // Code is structured as a series of driver functions all called 'do_XXX' that
  2094 // call a set of helper functions.  Helper functions first, then drivers.
  2096 //------------------------------null_check_oop---------------------------------
  2097 // Null check oop.  Set null-path control into Region in slot 3.
  2098 // Make a cast-not-nullness use the other not-null control.  Return cast.
  2099 Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
  2100                                bool never_see_null) {
  2101   // Initial NULL check taken path
  2102   (*null_control) = top();
  2103   Node* cast = null_check_common(value, T_OBJECT, false, null_control);
  2105   // Generate uncommon_trap:
  2106   if (never_see_null && (*null_control) != top()) {
  2107     // If we see an unexpected null at a check-cast we record it and force a
  2108     // recompile; the offending check-cast will be compiled to handle NULLs.
  2109     // If we see more than one offending BCI, then all checkcasts in the
  2110     // method will be compiled to handle NULLs.
  2111     PreserveJVMState pjvms(this);
  2112     set_control(*null_control);
  2113     replace_in_map(value, null());
  2114     uncommon_trap(Deoptimization::Reason_null_check,
  2115                   Deoptimization::Action_make_not_entrant);
  2116     (*null_control) = top();    // NULL path is dead
  2119   // Cast away null-ness on the result
  2120   return cast;
  2123 //------------------------------opt_iff----------------------------------------
  2124 // Optimize the fast-check IfNode.  Set the fast-path region slot 2.
  2125 // Return slow-path control.
  2126 Node* GraphKit::opt_iff(Node* region, Node* iff) {
  2127   IfNode *opt_iff = _gvn.transform(iff)->as_If();
  2129   // Fast path taken; set region slot 2
  2130   Node *fast_taken = _gvn.transform( new (C) IfFalseNode(opt_iff) );
  2131   region->init_req(2,fast_taken); // Capture fast-control
  2133   // Fast path not-taken, i.e. slow path
  2134   Node *slow_taken = _gvn.transform( new (C) IfTrueNode(opt_iff) );
  2135   return slow_taken;
  2138 //-----------------------------make_runtime_call-------------------------------
  2139 Node* GraphKit::make_runtime_call(int flags,
  2140                                   const TypeFunc* call_type, address call_addr,
  2141                                   const char* call_name,
  2142                                   const TypePtr* adr_type,
  2143                                   // The following parms are all optional.
  2144                                   // The first NULL ends the list.
  2145                                   Node* parm0, Node* parm1,
  2146                                   Node* parm2, Node* parm3,
  2147                                   Node* parm4, Node* parm5,
  2148                                   Node* parm6, Node* parm7) {
  2149   // Slow-path call
  2150   bool is_leaf = !(flags & RC_NO_LEAF);
  2151   bool has_io  = (!is_leaf && !(flags & RC_NO_IO));
  2152   if (call_name == NULL) {
  2153     assert(!is_leaf, "must supply name for leaf");
  2154     call_name = OptoRuntime::stub_name(call_addr);
  2156   CallNode* call;
  2157   if (!is_leaf) {
  2158     call = new(C) CallStaticJavaNode(call_type, call_addr, call_name,
  2159                                            bci(), adr_type);
  2160   } else if (flags & RC_NO_FP) {
  2161     call = new(C) CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
  2162   } else {
  2163     call = new(C) CallLeafNode(call_type, call_addr, call_name, adr_type);
  2166   // The following is similar to set_edges_for_java_call,
  2167   // except that the memory effects of the call are restricted to AliasIdxRaw.
  2169   // Slow path call has no side-effects, uses few values
  2170   bool wide_in  = !(flags & RC_NARROW_MEM);
  2171   bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
  2173   Node* prev_mem = NULL;
  2174   if (wide_in) {
  2175     prev_mem = set_predefined_input_for_runtime_call(call);
  2176   } else {
  2177     assert(!wide_out, "narrow in => narrow out");
  2178     Node* narrow_mem = memory(adr_type);
  2179     prev_mem = reset_memory();
  2180     map()->set_memory(narrow_mem);
  2181     set_predefined_input_for_runtime_call(call);
  2184   // Hook each parm in order.  Stop looking at the first NULL.
  2185   if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
  2186   if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
  2187   if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
  2188   if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
  2189   if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
  2190   if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
  2191   if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
  2192   if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
  2193     /* close each nested if ===> */  } } } } } } } }
  2194   assert(call->in(call->req()-1) != NULL, "must initialize all parms");
  2196   if (!is_leaf) {
  2197     // Non-leaves can block and take safepoints:
  2198     add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0));
  2200   // Non-leaves can throw exceptions:
  2201   if (has_io) {
  2202     call->set_req(TypeFunc::I_O, i_o());
  2205   if (flags & RC_UNCOMMON) {
  2206     // Set the count to a tiny probability.  Cf. Estimate_Block_Frequency.
  2207     // (An "if" probability corresponds roughly to an unconditional count.
  2208     // Sort of.)
  2209     call->set_cnt(PROB_UNLIKELY_MAG(4));
  2212   Node* c = _gvn.transform(call);
  2213   assert(c == call, "cannot disappear");
  2215   if (wide_out) {
  2216     // Slow path call has full side-effects.
  2217     set_predefined_output_for_runtime_call(call);
  2218   } else {
  2219     // Slow path call has few side-effects, and/or sets few values.
  2220     set_predefined_output_for_runtime_call(call, prev_mem, adr_type);
  2223   if (has_io) {
  2224     set_i_o(_gvn.transform(new (C) ProjNode(call, TypeFunc::I_O)));
  2226   return call;
  2230 //------------------------------merge_memory-----------------------------------
  2231 // Merge memory from one path into the current memory state.
  2232 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
  2233   for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
  2234     Node* old_slice = mms.force_memory();
  2235     Node* new_slice = mms.memory2();
  2236     if (old_slice != new_slice) {
  2237       PhiNode* phi;
  2238       if (new_slice->is_Phi() && new_slice->as_Phi()->region() == region) {
  2239         phi = new_slice->as_Phi();
  2240         #ifdef ASSERT
  2241         if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region)
  2242           old_slice = old_slice->in(new_path);
  2243         // Caller is responsible for ensuring that any pre-existing
  2244         // phis are already aware of old memory.
  2245         int old_path = (new_path > 1) ? 1 : 2;  // choose old_path != new_path
  2246         assert(phi->in(old_path) == old_slice, "pre-existing phis OK");
  2247         #endif
  2248         mms.set_memory(phi);
  2249       } else {
  2250         phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));
  2251         _gvn.set_type(phi, Type::MEMORY);
  2252         phi->set_req(new_path, new_slice);
  2253         mms.set_memory(_gvn.transform(phi));  // assume it is complete
  2259 //------------------------------make_slow_call_ex------------------------------
  2260 // Make the exception handler hookups for the slow call
  2261 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj) {
  2262   if (stopped())  return;
  2264   // Make a catch node with just two handlers:  fall-through and catch-all
  2265   Node* i_o  = _gvn.transform( new (C) ProjNode(call, TypeFunc::I_O, separate_io_proj) );
  2266   Node* catc = _gvn.transform( new (C) CatchNode(control(), i_o, 2) );
  2267   Node* norm = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) );
  2268   Node* excp = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::catch_all_index,    CatchProjNode::no_handler_bci) );
  2270   { PreserveJVMState pjvms(this);
  2271     set_control(excp);
  2272     set_i_o(i_o);
  2274     if (excp != top()) {
  2275       // Create an exception state also.
  2276       // Use an exact type if the caller has specified a specific exception.
  2277       const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);
  2278       Node*       ex_oop  = new (C) CreateExNode(ex_type, control(), i_o);
  2279       add_exception_state(make_exception_state(_gvn.transform(ex_oop)));
  2283   // Get the no-exception control from the CatchNode.
  2284   set_control(norm);
  2288 //-------------------------------gen_subtype_check-----------------------------
  2289 // Generate a subtyping check.  Takes as input the subtype and supertype.
  2290 // Returns 2 values: sets the default control() to the true path and returns
  2291 // the false path.  Only reads invariant memory; sets no (visible) memory.
  2292 // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding
  2293 // but that's not exposed to the optimizer.  This call also doesn't take in an
  2294 // Object; if you wish to check an Object you need to load the Object's class
  2295 // prior to coming here.
  2296 Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
  2297   // Fast check for identical types, perhaps identical constants.
  2298   // The types can even be identical non-constants, in cases
  2299   // involving Array.newInstance, Object.clone, etc.
  2300   if (subklass == superklass)
  2301     return top();             // false path is dead; no test needed.
  2303   if (_gvn.type(superklass)->singleton()) {
  2304     ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
  2305     ciKlass* subk   = _gvn.type(subklass)->is_klassptr()->klass();
  2307     // In the common case of an exact superklass, try to fold up the
  2308     // test before generating code.  You may ask, why not just generate
  2309     // the code and then let it fold up?  The answer is that the generated
  2310     // code will necessarily include null checks, which do not always
  2311     // completely fold away.  If they are also needless, then they turn
  2312     // into a performance loss.  Example:
  2313     //    Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
  2314     // Here, the type of 'fa' is often exact, so the store check
  2315     // of fa[1]=x will fold up, without testing the nullness of x.
  2316     switch (static_subtype_check(superk, subk)) {
  2317     case SSC_always_false:
  2319         Node* always_fail = control();
  2320         set_control(top());
  2321         return always_fail;
  2323     case SSC_always_true:
  2324       return top();
  2325     case SSC_easy_test:
  2327         // Just do a direct pointer compare and be done.
  2328         Node* cmp = _gvn.transform( new(C) CmpPNode(subklass, superklass) );
  2329         Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
  2330         IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
  2331         set_control( _gvn.transform( new(C) IfTrueNode (iff) ) );
  2332         return       _gvn.transform( new(C) IfFalseNode(iff) );
  2334     case SSC_full_test:
  2335       break;
  2336     default:
  2337       ShouldNotReachHere();
  2341   // %%% Possible further optimization:  Even if the superklass is not exact,
  2342   // if the subklass is the unique subtype of the superklass, the check
  2343   // will always succeed.  We could leave a dependency behind to ensure this.
  2345   // First load the super-klass's check-offset
  2346   Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
  2347   Node *chk_off = _gvn.transform( new (C) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
  2348   int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
  2349   bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
  2351   // Load from the sub-klass's super-class display list, or a 1-word cache of
  2352   // the secondary superclass list, or a failing value with a sentinel offset
  2353   // if the super-klass is an interface or exceptionally deep in the Java
  2354   // hierarchy and we have to scan the secondary superclass list the hard way.
  2355   // Worst-case type is a little odd: NULL is allowed as a result (usually
  2356   // klass loads can never produce a NULL).
  2357   Node *chk_off_X = ConvI2X(chk_off);
  2358   Node *p2 = _gvn.transform( new (C) AddPNode(subklass,subklass,chk_off_X) );
  2359   // For some types like interfaces the following loadKlass is from a 1-word
  2360   // cache which is mutable so can't use immutable memory.  Other
  2361   // types load from the super-class display table which is immutable.
  2362   Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
  2363   Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
  2365   // Compile speed common case: ARE a subtype and we canNOT fail
  2366   if( superklass == nkls )
  2367     return top();             // false path is dead; no test needed.
  2369   // See if we get an immediate positive hit.  Happens roughly 83% of the
  2370   // time.  Test to see if the value loaded just previously from the subklass
  2371   // is exactly the superklass.
  2372   Node *cmp1 = _gvn.transform( new (C) CmpPNode( superklass, nkls ) );
  2373   Node *bol1 = _gvn.transform( new (C) BoolNode( cmp1, BoolTest::eq ) );
  2374   IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN );
  2375   Node *iftrue1 = _gvn.transform( new (C) IfTrueNode ( iff1 ) );
  2376   set_control(    _gvn.transform( new (C) IfFalseNode( iff1 ) ) );
  2378   // Compile speed common case: Check for being deterministic right now.  If
  2379   // chk_off is a constant and not equal to cacheoff then we are NOT a
  2380   // subklass.  In this case we need exactly the 1 test above and we can
  2381   // return those results immediately.
  2382   if (!might_be_cache) {
  2383     Node* not_subtype_ctrl = control();
  2384     set_control(iftrue1); // We need exactly the 1 test above
  2385     return not_subtype_ctrl;
  2388   // Gather the various success & failures here
  2389   RegionNode *r_ok_subtype = new (C) RegionNode(4);
  2390   record_for_igvn(r_ok_subtype);
  2391   RegionNode *r_not_subtype = new (C) RegionNode(3);
  2392   record_for_igvn(r_not_subtype);
  2394   r_ok_subtype->init_req(1, iftrue1);
  2396   // Check for immediate negative hit.  Happens roughly 11% of the time (which
  2397   // is roughly 63% of the remaining cases).  Test to see if the loaded
  2398   // check-offset points into the subklass display list or the 1-element
  2399   // cache.  If it points to the display (and NOT the cache) and the display
  2400   // missed then it's not a subtype.
  2401   Node *cacheoff = _gvn.intcon(cacheoff_con);
  2402   Node *cmp2 = _gvn.transform( new (C) CmpINode( chk_off, cacheoff ) );
  2403   Node *bol2 = _gvn.transform( new (C) BoolNode( cmp2, BoolTest::ne ) );
  2404   IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN );
  2405   r_not_subtype->init_req(1, _gvn.transform( new (C) IfTrueNode (iff2) ) );
  2406   set_control(                _gvn.transform( new (C) IfFalseNode(iff2) ) );
  2408   // Check for self.  Very rare to get here, but it is taken 1/3 the time.
  2409   // No performance impact (too rare) but allows sharing of secondary arrays
  2410   // which has some footprint reduction.
  2411   Node *cmp3 = _gvn.transform( new (C) CmpPNode( subklass, superklass ) );
  2412   Node *bol3 = _gvn.transform( new (C) BoolNode( cmp3, BoolTest::eq ) );
  2413   IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN );
  2414   r_ok_subtype->init_req(2, _gvn.transform( new (C) IfTrueNode ( iff3 ) ) );
  2415   set_control(               _gvn.transform( new (C) IfFalseNode( iff3 ) ) );
  2417   // -- Roads not taken here: --
  2418   // We could also have chosen to perform the self-check at the beginning
  2419   // of this code sequence, as the assembler does.  This would not pay off
  2420   // the same way, since the optimizer, unlike the assembler, can perform
  2421   // static type analysis to fold away many successful self-checks.
  2422   // Non-foldable self checks work better here in second position, because
  2423   // the initial primary superclass check subsumes a self-check for most
  2424   // types.  An exception would be a secondary type like array-of-interface,
  2425   // which does not appear in its own primary supertype display.
  2426   // Finally, we could have chosen to move the self-check into the
  2427   // PartialSubtypeCheckNode, and from there out-of-line in a platform
  2428   // dependent manner.  But it is worthwhile to have the check here,
  2429   // where it can be perhaps be optimized.  The cost in code space is
  2430   // small (register compare, branch).
  2432   // Now do a linear scan of the secondary super-klass array.  Again, no real
  2433   // performance impact (too rare) but it's gotta be done.
  2434   // Since the code is rarely used, there is no penalty for moving it
  2435   // out of line, and it can only improve I-cache density.
  2436   // The decision to inline or out-of-line this final check is platform
  2437   // dependent, and is found in the AD file definition of PartialSubtypeCheck.
  2438   Node* psc = _gvn.transform(
  2439     new (C) PartialSubtypeCheckNode(control(), subklass, superklass) );
  2441   Node *cmp4 = _gvn.transform( new (C) CmpPNode( psc, null() ) );
  2442   Node *bol4 = _gvn.transform( new (C) BoolNode( cmp4, BoolTest::ne ) );
  2443   IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN );
  2444   r_not_subtype->init_req(2, _gvn.transform( new (C) IfTrueNode (iff4) ) );
  2445   r_ok_subtype ->init_req(3, _gvn.transform( new (C) IfFalseNode(iff4) ) );
  2447   // Return false path; set default control to true path.
  2448   set_control( _gvn.transform(r_ok_subtype) );
  2449   return _gvn.transform(r_not_subtype);
  2452 //----------------------------static_subtype_check-----------------------------
  2453 // Shortcut important common cases when superklass is exact:
  2454 // (0) superklass is java.lang.Object (can occur in reflective code)
  2455 // (1) subklass is already limited to a subtype of superklass => always ok
  2456 // (2) subklass does not overlap with superklass => always fail
  2457 // (3) superklass has NO subtypes and we can check with a simple compare.
  2458 int GraphKit::static_subtype_check(ciKlass* superk, ciKlass* subk) {
  2459   if (StressReflectiveCode) {
  2460     return SSC_full_test;       // Let caller generate the general case.
  2463   if (superk == env()->Object_klass()) {
  2464     return SSC_always_true;     // (0) this test cannot fail
  2467   ciType* superelem = superk;
  2468   if (superelem->is_array_klass())
  2469     superelem = superelem->as_array_klass()->base_element_type();
  2471   if (!subk->is_interface()) {  // cannot trust static interface types yet
  2472     if (subk->is_subtype_of(superk)) {
  2473       return SSC_always_true;   // (1) false path dead; no dynamic test needed
  2475     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
  2476         !superk->is_subtype_of(subk)) {
  2477       return SSC_always_false;
  2481   // If casting to an instance klass, it must have no subtypes
  2482   if (superk->is_interface()) {
  2483     // Cannot trust interfaces yet.
  2484     // %%% S.B. superk->nof_implementors() == 1
  2485   } else if (superelem->is_instance_klass()) {
  2486     ciInstanceKlass* ik = superelem->as_instance_klass();
  2487     if (!ik->has_subklass() && !ik->is_interface()) {
  2488       if (!ik->is_final()) {
  2489         // Add a dependency if there is a chance of a later subclass.
  2490         C->dependencies()->assert_leaf_type(ik);
  2492       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
  2494   } else {
  2495     // A primitive array type has no subtypes.
  2496     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
  2499   return SSC_full_test;
  2502 // Profile-driven exact type check:
  2503 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
  2504                                     float prob,
  2505                                     Node* *casted_receiver) {
  2506   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
  2507   Node* recv_klass = load_object_klass(receiver);
  2508   Node* want_klass = makecon(tklass);
  2509   Node* cmp = _gvn.transform( new(C) CmpPNode(recv_klass, want_klass) );
  2510   Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
  2511   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
  2512   set_control( _gvn.transform( new(C) IfTrueNode (iff) ));
  2513   Node* fail = _gvn.transform( new(C) IfFalseNode(iff) );
  2515   const TypeOopPtr* recv_xtype = tklass->as_instance_type();
  2516   assert(recv_xtype->klass_is_exact(), "");
  2518   // Subsume downstream occurrences of receiver with a cast to
  2519   // recv_xtype, since now we know what the type will be.
  2520   Node* cast = new(C) CheckCastPPNode(control(), receiver, recv_xtype);
  2521   (*casted_receiver) = _gvn.transform(cast);
  2522   // (User must make the replace_in_map call.)
  2524   return fail;
  2528 //------------------------------seems_never_null-------------------------------
  2529 // Use null_seen information if it is available from the profile.
  2530 // If we see an unexpected null at a type check we record it and force a
  2531 // recompile; the offending check will be recompiled to handle NULLs.
  2532 // If we see several offending BCIs, then all checks in the
  2533 // method will be recompiled.
  2534 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
  2535   if (UncommonNullCast               // Cutout for this technique
  2536       && obj != null()               // And not the -Xcomp stupid case?
  2537       && !too_many_traps(Deoptimization::Reason_null_check)
  2538       ) {
  2539     if (data == NULL)
  2540       // Edge case:  no mature data.  Be optimistic here.
  2541       return true;
  2542     // If the profile has not seen a null, assume it won't happen.
  2543     assert(java_bc() == Bytecodes::_checkcast ||
  2544            java_bc() == Bytecodes::_instanceof ||
  2545            java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
  2546     return !data->as_BitData()->null_seen();
  2548   return false;
  2551 //------------------------maybe_cast_profiled_receiver-------------------------
  2552 // If the profile has seen exactly one type, narrow to exactly that type.
  2553 // Subsequent type checks will always fold up.
  2554 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
  2555                                              ciProfileData* data,
  2556                                              ciKlass* require_klass) {
  2557   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
  2558   if (data == NULL)  return NULL;
  2560   // Make sure we haven't already deoptimized from this tactic.
  2561   if (too_many_traps(Deoptimization::Reason_class_check))
  2562     return NULL;
  2564   // (No, this isn't a call, but it's enough like a virtual call
  2565   // to use the same ciMethod accessor to get the profile info...)
  2566   ciCallProfile profile = method()->call_profile_at_bci(bci());
  2567   if (profile.count() >= 0 &&         // no cast failures here
  2568       profile.has_receiver(0) &&
  2569       profile.morphism() == 1) {
  2570     ciKlass* exact_kls = profile.receiver(0);
  2571     if (require_klass == NULL ||
  2572         static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
  2573       // If we narrow the type to match what the type profile sees,
  2574       // we can then remove the rest of the cast.
  2575       // This is a win, even if the exact_kls is very specific,
  2576       // because downstream operations, such as method calls,
  2577       // will often benefit from the sharper type.
  2578       Node* exact_obj = not_null_obj; // will get updated in place...
  2579       Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
  2580                                             &exact_obj);
  2581       { PreserveJVMState pjvms(this);
  2582         set_control(slow_ctl);
  2583         uncommon_trap(Deoptimization::Reason_class_check,
  2584                       Deoptimization::Action_maybe_recompile);
  2586       replace_in_map(not_null_obj, exact_obj);
  2587       return exact_obj;
  2589     // assert(ssc == SSC_always_true)... except maybe the profile lied to us.
  2592   return NULL;
  2596 //-------------------------------gen_instanceof--------------------------------
  2597 // Generate an instance-of idiom.  Used by both the instance-of bytecode
  2598 // and the reflective instance-of call.
  2599 Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
  2600   kill_dead_locals();           // Benefit all the uncommon traps
  2601   assert( !stopped(), "dead parse path should be checked in callers" );
  2602   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
  2603          "must check for not-null not-dead klass in callers");
  2605   // Make the merge point
  2606   enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };
  2607   RegionNode* region = new(C) RegionNode(PATH_LIMIT);
  2608   Node*       phi    = new(C) PhiNode(region, TypeInt::BOOL);
  2609   C->set_has_split_ifs(true); // Has chance for split-if optimization
  2611   ciProfileData* data = NULL;
  2612   if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
  2613     data = method()->method_data()->bci_to_data(bci());
  2615   bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
  2616                          && seems_never_null(obj, data));
  2618   // Null check; get casted pointer; set region slot 3
  2619   Node* null_ctl = top();
  2620   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
  2622   // If not_null_obj is dead, only null-path is taken
  2623   if (stopped()) {              // Doing instance-of on a NULL?
  2624     set_control(null_ctl);
  2625     return intcon(0);
  2627   region->init_req(_null_path, null_ctl);
  2628   phi   ->init_req(_null_path, intcon(0)); // Set null path value
  2629   if (null_ctl == top()) {
  2630     // Do this eagerly, so that pattern matches like is_diamond_phi
  2631     // will work even during parsing.
  2632     assert(_null_path == PATH_LIMIT-1, "delete last");
  2633     region->del_req(_null_path);
  2634     phi   ->del_req(_null_path);
  2637   if (ProfileDynamicTypes && data != NULL) {
  2638     Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, NULL);
  2639     if (stopped()) {            // Profile disagrees with this path.
  2640       set_control(null_ctl);    // Null is the only remaining possibility.
  2641       return intcon(0);
  2643     if (cast_obj != NULL)
  2644       not_null_obj = cast_obj;
  2647   // Load the object's klass
  2648   Node* obj_klass = load_object_klass(not_null_obj);
  2650   // Generate the subtype check
  2651   Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass);
  2653   // Plug in the success path to the general merge in slot 1.
  2654   region->init_req(_obj_path, control());
  2655   phi   ->init_req(_obj_path, intcon(1));
  2657   // Plug in the failing path to the general merge in slot 2.
  2658   region->init_req(_fail_path, not_subtype_ctrl);
  2659   phi   ->init_req(_fail_path, intcon(0));
  2661   // Return final merged results
  2662   set_control( _gvn.transform(region) );
  2663   record_for_igvn(region);
  2664   return _gvn.transform(phi);
  2667 //-------------------------------gen_checkcast---------------------------------
  2668 // Generate a checkcast idiom.  Used by both the checkcast bytecode and the
  2669 // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
  2670 // uncommon-trap paths work.  Adjust stack after this call.
  2671 // If failure_control is supplied and not null, it is filled in with
  2672 // the control edge for the cast failure.  Otherwise, an appropriate
  2673 // uncommon trap or exception is thrown.
  2674 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
  2675                               Node* *failure_control) {
  2676   kill_dead_locals();           // Benefit all the uncommon traps
  2677   const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr();
  2678   const Type *toop = TypeOopPtr::make_from_klass(tk->klass());
  2680   // Fast cutout:  Check the case that the cast is vacuously true.
  2681   // This detects the common cases where the test will short-circuit
  2682   // away completely.  We do this before we perform the null check,
  2683   // because if the test is going to turn into zero code, we don't
  2684   // want a residual null check left around.  (Causes a slowdown,
  2685   // for example, in some objArray manipulations, such as a[i]=a[j].)
  2686   if (tk->singleton()) {
  2687     const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
  2688     if (objtp != NULL && objtp->klass() != NULL) {
  2689       switch (static_subtype_check(tk->klass(), objtp->klass())) {
  2690       case SSC_always_true:
  2691         return obj;
  2692       case SSC_always_false:
  2693         // It needs a null check because a null will *pass* the cast check.
  2694         // A non-null value will always produce an exception.
  2695         return null_assert(obj);
  2700   ciProfileData* data = NULL;
  2701   if (failure_control == NULL) {        // use MDO in regular case only
  2702     assert(java_bc() == Bytecodes::_aastore ||
  2703            java_bc() == Bytecodes::_checkcast,
  2704            "interpreter profiles type checks only for these BCs");
  2705     data = method()->method_data()->bci_to_data(bci());
  2708   // Make the merge point
  2709   enum { _obj_path = 1, _null_path, PATH_LIMIT };
  2710   RegionNode* region = new (C) RegionNode(PATH_LIMIT);
  2711   Node*       phi    = new (C) PhiNode(region, toop);
  2712   C->set_has_split_ifs(true); // Has chance for split-if optimization
  2714   // Use null-cast information if it is available
  2715   bool never_see_null = ((failure_control == NULL)  // regular case only
  2716                          && seems_never_null(obj, data));
  2718   // Null check; get casted pointer; set region slot 3
  2719   Node* null_ctl = top();
  2720   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
  2722   // If not_null_obj is dead, only null-path is taken
  2723   if (stopped()) {              // Doing instance-of on a NULL?
  2724     set_control(null_ctl);
  2725     return null();
  2727   region->init_req(_null_path, null_ctl);
  2728   phi   ->init_req(_null_path, null());  // Set null path value
  2729   if (null_ctl == top()) {
  2730     // Do this eagerly, so that pattern matches like is_diamond_phi
  2731     // will work even during parsing.
  2732     assert(_null_path == PATH_LIMIT-1, "delete last");
  2733     region->del_req(_null_path);
  2734     phi   ->del_req(_null_path);
  2737   Node* cast_obj = NULL;
  2738   if (data != NULL &&
  2739       // Counter has never been decremented (due to cast failure).
  2740       // ...This is a reasonable thing to expect.  It is true of
  2741       // all casts inserted by javac to implement generic types.
  2742       data->as_CounterData()->count() >= 0) {
  2743     cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
  2744     if (cast_obj != NULL) {
  2745       if (failure_control != NULL) // failure is now impossible
  2746         (*failure_control) = top();
  2747       // adjust the type of the phi to the exact klass:
  2748       phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
  2752   if (cast_obj == NULL) {
  2753     // Load the object's klass
  2754     Node* obj_klass = load_object_klass(not_null_obj);
  2756     // Generate the subtype check
  2757     Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass );
  2759     // Plug in success path into the merge
  2760     cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(),
  2761                                                          not_null_obj, toop));
  2762     // Failure path ends in uncommon trap (or may be dead - failure impossible)
  2763     if (failure_control == NULL) {
  2764       if (not_subtype_ctrl != top()) { // If failure is possible
  2765         PreserveJVMState pjvms(this);
  2766         set_control(not_subtype_ctrl);
  2767         builtin_throw(Deoptimization::Reason_class_check, obj_klass);
  2769     } else {
  2770       (*failure_control) = not_subtype_ctrl;
  2774   region->init_req(_obj_path, control());
  2775   phi   ->init_req(_obj_path, cast_obj);
  2777   // A merge of NULL or Casted-NotNull obj
  2778   Node* res = _gvn.transform(phi);
  2780   // Note I do NOT always 'replace_in_map(obj,result)' here.
  2781   //  if( tk->klass()->can_be_primary_super()  )
  2782     // This means that if I successfully store an Object into an array-of-String
  2783     // I 'forget' that the Object is really now known to be a String.  I have to
  2784     // do this because we don't have true union types for interfaces - if I store
  2785     // a Baz into an array-of-Interface and then tell the optimizer it's an
  2786     // Interface, I forget that it's also a Baz and cannot do Baz-like field
  2787     // references to it.  FIX THIS WHEN UNION TYPES APPEAR!
  2788   //  replace_in_map( obj, res );
  2790   // Return final merged results
  2791   set_control( _gvn.transform(region) );
  2792   record_for_igvn(region);
  2793   return res;
  2796 //------------------------------next_monitor-----------------------------------
  2797 // What number should be given to the next monitor?
  2798 int GraphKit::next_monitor() {
  2799   int current = jvms()->monitor_depth()* C->sync_stack_slots();
  2800   int next = current + C->sync_stack_slots();
  2801   // Keep the toplevel high water mark current:
  2802   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
  2803   return current;
  2806 //------------------------------insert_mem_bar---------------------------------
  2807 // Memory barrier to avoid floating things around
  2808 // The membar serves as a pinch point between both control and all memory slices.
  2809 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
  2810   MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
  2811   mb->init_req(TypeFunc::Control, control());
  2812   mb->init_req(TypeFunc::Memory,  reset_memory());
  2813   Node* membar = _gvn.transform(mb);
  2814   set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control)));
  2815   set_all_memory_call(membar);
  2816   return membar;
  2819 //-------------------------insert_mem_bar_volatile----------------------------
  2820 // Memory barrier to avoid floating things around
  2821 // The membar serves as a pinch point between both control and memory(alias_idx).
  2822 // If you want to make a pinch point on all memory slices, do not use this
  2823 // function (even with AliasIdxBot); use insert_mem_bar() instead.
  2824 Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) {
  2825   // When Parse::do_put_xxx updates a volatile field, it appends a series
  2826   // of MemBarVolatile nodes, one for *each* volatile field alias category.
  2827   // The first membar is on the same memory slice as the field store opcode.
  2828   // This forces the membar to follow the store.  (Bug 6500685 broke this.)
  2829   // All the other membars (for other volatile slices, including AliasIdxBot,
  2830   // which stands for all unknown volatile slices) are control-dependent
  2831   // on the first membar.  This prevents later volatile loads or stores
  2832   // from sliding up past the just-emitted store.
  2834   MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
  2835   mb->set_req(TypeFunc::Control,control());
  2836   if (alias_idx == Compile::AliasIdxBot) {
  2837     mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
  2838   } else {
  2839     assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
  2840     mb->set_req(TypeFunc::Memory, memory(alias_idx));
  2842   Node* membar = _gvn.transform(mb);
  2843   set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control)));
  2844   if (alias_idx == Compile::AliasIdxBot) {
  2845     merged_memory()->set_base_memory(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Memory)));
  2846   } else {
  2847     set_memory(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Memory)),alias_idx);
  2849   return membar;
  2852 //------------------------------shared_lock------------------------------------
  2853 // Emit locking code.
  2854 FastLockNode* GraphKit::shared_lock(Node* obj) {
  2855   // bci is either a monitorenter bc or InvocationEntryBci
  2856   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
  2857   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
  2859   if( !GenerateSynchronizationCode )
  2860     return NULL;                // Not locking things?
  2861   if (stopped())                // Dead monitor?
  2862     return NULL;
  2864   assert(dead_locals_are_killed(), "should kill locals before sync. point");
  2866   // Box the stack location
  2867   Node* box = _gvn.transform(new (C) BoxLockNode(next_monitor()));
  2868   Node* mem = reset_memory();
  2870   FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock();
  2871   if (PrintPreciseBiasedLockingStatistics) {
  2872     // Create the counters for this fast lock.
  2873     flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
  2875   // Add monitor to debug info for the slow path.  If we block inside the
  2876   // slow path and de-opt, we need the monitor hanging around
  2877   map()->push_monitor( flock );
  2879   const TypeFunc *tf = LockNode::lock_type();
  2880   LockNode *lock = new (C) LockNode(C, tf);
  2882   lock->init_req( TypeFunc::Control, control() );
  2883   lock->init_req( TypeFunc::Memory , mem );
  2884   lock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  2885   lock->init_req( TypeFunc::FramePtr, frameptr() );
  2886   lock->init_req( TypeFunc::ReturnAdr, top() );
  2888   lock->init_req(TypeFunc::Parms + 0, obj);
  2889   lock->init_req(TypeFunc::Parms + 1, box);
  2890   lock->init_req(TypeFunc::Parms + 2, flock);
  2891   add_safepoint_edges(lock);
  2893   lock = _gvn.transform( lock )->as_Lock();
  2895   // lock has no side-effects, sets few values
  2896   set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
  2898   insert_mem_bar(Op_MemBarAcquireLock);
  2900   // Add this to the worklist so that the lock can be eliminated
  2901   record_for_igvn(lock);
  2903 #ifndef PRODUCT
  2904   if (PrintLockStatistics) {
  2905     // Update the counter for this lock.  Don't bother using an atomic
  2906     // operation since we don't require absolute accuracy.
  2907     lock->create_lock_counter(map()->jvms());
  2908     increment_counter(lock->counter()->addr());
  2910 #endif
  2912   return flock;
  2916 //------------------------------shared_unlock----------------------------------
  2917 // Emit unlocking code.
  2918 void GraphKit::shared_unlock(Node* box, Node* obj) {
  2919   // bci is either a monitorenter bc or InvocationEntryBci
  2920   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
  2921   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
  2923   if( !GenerateSynchronizationCode )
  2924     return;
  2925   if (stopped()) {               // Dead monitor?
  2926     map()->pop_monitor();        // Kill monitor from debug info
  2927     return;
  2930   // Memory barrier to avoid floating things down past the locked region
  2931   insert_mem_bar(Op_MemBarReleaseLock);
  2933   const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
  2934   UnlockNode *unlock = new (C) UnlockNode(C, tf);
  2935   uint raw_idx = Compile::AliasIdxRaw;
  2936   unlock->init_req( TypeFunc::Control, control() );
  2937   unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
  2938   unlock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  2939   unlock->init_req( TypeFunc::FramePtr, frameptr() );
  2940   unlock->init_req( TypeFunc::ReturnAdr, top() );
  2942   unlock->init_req(TypeFunc::Parms + 0, obj);
  2943   unlock->init_req(TypeFunc::Parms + 1, box);
  2944   unlock = _gvn.transform(unlock)->as_Unlock();
  2946   Node* mem = reset_memory();
  2948   // unlock has no side-effects, sets few values
  2949   set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
  2951   // Kill monitor from debug info
  2952   map()->pop_monitor( );
  2955 //-------------------------------get_layout_helper-----------------------------
  2956 // If the given klass is a constant or known to be an array,
  2957 // fetch the constant layout helper value into constant_value
  2958 // and return (Node*)NULL.  Otherwise, load the non-constant
  2959 // layout helper value, and return the node which represents it.
  2960 // This two-faced routine is useful because allocation sites
  2961 // almost always feature constant types.
  2962 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
  2963   const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
  2964   if (!StressReflectiveCode && inst_klass != NULL) {
  2965     ciKlass* klass = inst_klass->klass();
  2966     bool    xklass = inst_klass->klass_is_exact();
  2967     if (xklass || klass->is_array_klass()) {
  2968       jint lhelper = klass->layout_helper();
  2969       if (lhelper != Klass::_lh_neutral_value) {
  2970         constant_value = lhelper;
  2971         return (Node*) NULL;
  2975   constant_value = Klass::_lh_neutral_value;  // put in a known value
  2976   Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
  2977   return make_load(NULL, lhp, TypeInt::INT, T_INT);
  2980 // We just put in an allocate/initialize with a big raw-memory effect.
  2981 // Hook selected additional alias categories on the initialization.
  2982 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
  2983                                 MergeMemNode* init_in_merge,
  2984                                 Node* init_out_raw) {
  2985   DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
  2986   assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
  2988   Node* prevmem = kit.memory(alias_idx);
  2989   init_in_merge->set_memory_at(alias_idx, prevmem);
  2990   kit.set_memory(init_out_raw, alias_idx);
  2993 //---------------------------set_output_for_allocation-------------------------
  2994 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
  2995                                           const TypeOopPtr* oop_type) {
  2996   int rawidx = Compile::AliasIdxRaw;
  2997   alloc->set_req( TypeFunc::FramePtr, frameptr() );
  2998   add_safepoint_edges(alloc);
  2999   Node* allocx = _gvn.transform(alloc);
  3000   set_control( _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Control) ) );
  3001   // create memory projection for i_o
  3002   set_memory ( _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
  3003   make_slow_call_ex(allocx, env()->Throwable_klass(), true);
  3005   // create a memory projection as for the normal control path
  3006   Node* malloc = _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Memory));
  3007   set_memory(malloc, rawidx);
  3009   // a normal slow-call doesn't change i_o, but an allocation does
  3010   // we create a separate i_o projection for the normal control path
  3011   set_i_o(_gvn.transform( new (C) ProjNode(allocx, TypeFunc::I_O, false) ) );
  3012   Node* rawoop = _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Parms) );
  3014   // put in an initialization barrier
  3015   InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
  3016                                                  rawoop)->as_Initialize();
  3017   assert(alloc->initialization() == init,  "2-way macro link must work");
  3018   assert(init ->allocation()     == alloc, "2-way macro link must work");
  3020     // Extract memory strands which may participate in the new object's
  3021     // initialization, and source them from the new InitializeNode.
  3022     // This will allow us to observe initializations when they occur,
  3023     // and link them properly (as a group) to the InitializeNode.
  3024     assert(init->in(InitializeNode::Memory) == malloc, "");
  3025     MergeMemNode* minit_in = MergeMemNode::make(C, malloc);
  3026     init->set_req(InitializeNode::Memory, minit_in);
  3027     record_for_igvn(minit_in); // fold it up later, if possible
  3028     Node* minit_out = memory(rawidx);
  3029     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
  3030     if (oop_type->isa_aryptr()) {
  3031       const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
  3032       int            elemidx  = C->get_alias_index(telemref);
  3033       hook_memory_on_init(*this, elemidx, minit_in, minit_out);
  3034     } else if (oop_type->isa_instptr()) {
  3035       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
  3036       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
  3037         ciField* field = ik->nonstatic_field_at(i);
  3038         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
  3039           continue;  // do not bother to track really large numbers of fields
  3040         // Find (or create) the alias category for this field:
  3041         int fieldidx = C->alias_type(field)->index();
  3042         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
  3047   // Cast raw oop to the real thing...
  3048   Node* javaoop = new (C) CheckCastPPNode(control(), rawoop, oop_type);
  3049   javaoop = _gvn.transform(javaoop);
  3050   C->set_recent_alloc(control(), javaoop);
  3051   assert(just_allocated_object(control()) == javaoop, "just allocated");
  3053 #ifdef ASSERT
  3054   { // Verify that the AllocateNode::Ideal_allocation recognizers work:
  3055     assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
  3056            "Ideal_allocation works");
  3057     assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
  3058            "Ideal_allocation works");
  3059     if (alloc->is_AllocateArray()) {
  3060       assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
  3061              "Ideal_allocation works");
  3062       assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
  3063              "Ideal_allocation works");
  3064     } else {
  3065       assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
  3068 #endif //ASSERT
  3070   return javaoop;
  3073 //---------------------------new_instance--------------------------------------
  3074 // This routine takes a klass_node which may be constant (for a static type)
  3075 // or may be non-constant (for reflective code).  It will work equally well
  3076 // for either, and the graph will fold nicely if the optimizer later reduces
  3077 // the type to a constant.
  3078 // The optional arguments are for specialized use by intrinsics:
  3079 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
  3080 //  - If 'return_size_val', report the the total object size to the caller.
  3081 Node* GraphKit::new_instance(Node* klass_node,
  3082                              Node* extra_slow_test,
  3083                              Node* *return_size_val) {
  3084   // Compute size in doublewords
  3085   // The size is always an integral number of doublewords, represented
  3086   // as a positive bytewise size stored in the klass's layout_helper.
  3087   // The layout_helper also encodes (in a low bit) the need for a slow path.
  3088   jint  layout_con = Klass::_lh_neutral_value;
  3089   Node* layout_val = get_layout_helper(klass_node, layout_con);
  3090   int   layout_is_con = (layout_val == NULL);
  3092   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
  3093   // Generate the initial go-slow test.  It's either ALWAYS (return a
  3094   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
  3095   // case) a computed value derived from the layout_helper.
  3096   Node* initial_slow_test = NULL;
  3097   if (layout_is_con) {
  3098     assert(!StressReflectiveCode, "stress mode does not use these paths");
  3099     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
  3100     initial_slow_test = must_go_slow? intcon(1): extra_slow_test;
  3102   } else {   // reflective case
  3103     // This reflective path is used by Unsafe.allocateInstance.
  3104     // (It may be stress-tested by specifying StressReflectiveCode.)
  3105     // Basically, we want to get into the VM is there's an illegal argument.
  3106     Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
  3107     initial_slow_test = _gvn.transform( new (C) AndINode(layout_val, bit) );
  3108     if (extra_slow_test != intcon(0)) {
  3109       initial_slow_test = _gvn.transform( new (C) OrINode(initial_slow_test, extra_slow_test) );
  3111     // (Macro-expander will further convert this to a Bool, if necessary.)
  3114   // Find the size in bytes.  This is easy; it's the layout_helper.
  3115   // The size value must be valid even if the slow path is taken.
  3116   Node* size = NULL;
  3117   if (layout_is_con) {
  3118     size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con));
  3119   } else {   // reflective case
  3120     // This reflective path is used by clone and Unsafe.allocateInstance.
  3121     size = ConvI2X(layout_val);
  3123     // Clear the low bits to extract layout_helper_size_in_bytes:
  3124     assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
  3125     Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
  3126     size = _gvn.transform( new (C) AndXNode(size, mask) );
  3128   if (return_size_val != NULL) {
  3129     (*return_size_val) = size;
  3132   // This is a precise notnull oop of the klass.
  3133   // (Actually, it need not be precise if this is a reflective allocation.)
  3134   // It's what we cast the result to.
  3135   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
  3136   if (!tklass)  tklass = TypeKlassPtr::OBJECT;
  3137   const TypeOopPtr* oop_type = tklass->as_instance_type();
  3139   // Now generate allocation code
  3141   // The entire memory state is needed for slow path of the allocation
  3142   // since GC and deoptimization can happened.
  3143   Node *mem = reset_memory();
  3144   set_all_memory(mem); // Create new memory state
  3146   AllocateNode* alloc
  3147     = new (C) AllocateNode(C, AllocateNode::alloc_type(),
  3148                            control(), mem, i_o(),
  3149                            size, klass_node,
  3150                            initial_slow_test);
  3152   return set_output_for_allocation(alloc, oop_type);
  3155 //-------------------------------new_array-------------------------------------
  3156 // helper for both newarray and anewarray
  3157 // The 'length' parameter is (obviously) the length of the array.
  3158 // See comments on new_instance for the meaning of the other arguments.
  3159 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
  3160                           Node* length,         // number of array elements
  3161                           int   nargs,          // number of arguments to push back for uncommon trap
  3162                           Node* *return_size_val) {
  3163   jint  layout_con = Klass::_lh_neutral_value;
  3164   Node* layout_val = get_layout_helper(klass_node, layout_con);
  3165   int   layout_is_con = (layout_val == NULL);
  3167   if (!layout_is_con && !StressReflectiveCode &&
  3168       !too_many_traps(Deoptimization::Reason_class_check)) {
  3169     // This is a reflective array creation site.
  3170     // Optimistically assume that it is a subtype of Object[],
  3171     // so that we can fold up all the address arithmetic.
  3172     layout_con = Klass::array_layout_helper(T_OBJECT);
  3173     Node* cmp_lh = _gvn.transform( new(C) CmpINode(layout_val, intcon(layout_con)) );
  3174     Node* bol_lh = _gvn.transform( new(C) BoolNode(cmp_lh, BoolTest::eq) );
  3175     { BuildCutout unless(this, bol_lh, PROB_MAX);
  3176       inc_sp(nargs);
  3177       uncommon_trap(Deoptimization::Reason_class_check,
  3178                     Deoptimization::Action_maybe_recompile);
  3180     layout_val = NULL;
  3181     layout_is_con = true;
  3184   // Generate the initial go-slow test.  Make sure we do not overflow
  3185   // if length is huge (near 2Gig) or negative!  We do not need
  3186   // exact double-words here, just a close approximation of needed
  3187   // double-words.  We can't add any offset or rounding bits, lest we
  3188   // take a size -1 of bytes and make it positive.  Use an unsigned
  3189   // compare, so negative sizes look hugely positive.
  3190   int fast_size_limit = FastAllocateSizeLimit;
  3191   if (layout_is_con) {
  3192     assert(!StressReflectiveCode, "stress mode does not use these paths");
  3193     // Increase the size limit if we have exact knowledge of array type.
  3194     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
  3195     fast_size_limit <<= (LogBytesPerLong - log2_esize);
  3198   Node* initial_slow_cmp  = _gvn.transform( new (C) CmpUNode( length, intcon( fast_size_limit ) ) );
  3199   Node* initial_slow_test = _gvn.transform( new (C) BoolNode( initial_slow_cmp, BoolTest::gt ) );
  3200   if (initial_slow_test->is_Bool()) {
  3201     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
  3202     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
  3205   // --- Size Computation ---
  3206   // array_size = round_to_heap(array_header + (length << elem_shift));
  3207   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
  3208   // and round_to(x, y) == ((x + y-1) & ~(y-1))
  3209   // The rounding mask is strength-reduced, if possible.
  3210   int round_mask = MinObjAlignmentInBytes - 1;
  3211   Node* header_size = NULL;
  3212   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
  3213   // (T_BYTE has the weakest alignment and size restrictions...)
  3214   if (layout_is_con) {
  3215     int       hsize  = Klass::layout_helper_header_size(layout_con);
  3216     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
  3217     BasicType etype  = Klass::layout_helper_element_type(layout_con);
  3218     if ((round_mask & ~right_n_bits(eshift)) == 0)
  3219       round_mask = 0;  // strength-reduce it if it goes away completely
  3220     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
  3221     assert(header_size_min <= hsize, "generic minimum is smallest");
  3222     header_size_min = hsize;
  3223     header_size = intcon(hsize + round_mask);
  3224   } else {
  3225     Node* hss   = intcon(Klass::_lh_header_size_shift);
  3226     Node* hsm   = intcon(Klass::_lh_header_size_mask);
  3227     Node* hsize = _gvn.transform( new(C) URShiftINode(layout_val, hss) );
  3228     hsize       = _gvn.transform( new(C) AndINode(hsize, hsm) );
  3229     Node* mask  = intcon(round_mask);
  3230     header_size = _gvn.transform( new(C) AddINode(hsize, mask) );
  3233   Node* elem_shift = NULL;
  3234   if (layout_is_con) {
  3235     int eshift = Klass::layout_helper_log2_element_size(layout_con);
  3236     if (eshift != 0)
  3237       elem_shift = intcon(eshift);
  3238   } else {
  3239     // There is no need to mask or shift this value.
  3240     // The semantics of LShiftINode include an implicit mask to 0x1F.
  3241     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
  3242     elem_shift = layout_val;
  3245   // Transition to native address size for all offset calculations:
  3246   Node* lengthx = ConvI2X(length);
  3247   Node* headerx = ConvI2X(header_size);
  3248 #ifdef _LP64
  3249   { const TypeLong* tllen = _gvn.find_long_type(lengthx);
  3250     if (tllen != NULL && tllen->_lo < 0) {
  3251       // Add a manual constraint to a positive range.  Cf. array_element_address.
  3252       jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
  3253       if (size_max > tllen->_hi)  size_max = tllen->_hi;
  3254       const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin);
  3255       lengthx = _gvn.transform( new (C) ConvI2LNode(length, tlcon));
  3258 #endif
  3260   // Combine header size (plus rounding) and body size.  Then round down.
  3261   // This computation cannot overflow, because it is used only in two
  3262   // places, one where the length is sharply limited, and the other
  3263   // after a successful allocation.
  3264   Node* abody = lengthx;
  3265   if (elem_shift != NULL)
  3266     abody     = _gvn.transform( new(C) LShiftXNode(lengthx, elem_shift) );
  3267   Node* size  = _gvn.transform( new(C) AddXNode(headerx, abody) );
  3268   if (round_mask != 0) {
  3269     Node* mask = MakeConX(~round_mask);
  3270     size       = _gvn.transform( new(C) AndXNode(size, mask) );
  3272   // else if round_mask == 0, the size computation is self-rounding
  3274   if (return_size_val != NULL) {
  3275     // This is the size
  3276     (*return_size_val) = size;
  3279   // Now generate allocation code
  3281   // The entire memory state is needed for slow path of the allocation
  3282   // since GC and deoptimization can happened.
  3283   Node *mem = reset_memory();
  3284   set_all_memory(mem); // Create new memory state
  3286   // Create the AllocateArrayNode and its result projections
  3287   AllocateArrayNode* alloc
  3288     = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(),
  3289                                 control(), mem, i_o(),
  3290                                 size, klass_node,
  3291                                 initial_slow_test,
  3292                                 length);
  3294   // Cast to correct type.  Note that the klass_node may be constant or not,
  3295   // and in the latter case the actual array type will be inexact also.
  3296   // (This happens via a non-constant argument to inline_native_newArray.)
  3297   // In any case, the value of klass_node provides the desired array type.
  3298   const TypeInt* length_type = _gvn.find_int_type(length);
  3299   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
  3300   if (ary_type->isa_aryptr() && length_type != NULL) {
  3301     // Try to get a better type than POS for the size
  3302     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
  3305   Node* javaoop = set_output_for_allocation(alloc, ary_type);
  3307   // Cast length on remaining path to be as narrow as possible
  3308   if (map()->find_edge(length) >= 0) {
  3309     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
  3310     if (ccast != length) {
  3311       _gvn.set_type_bottom(ccast);
  3312       record_for_igvn(ccast);
  3313       replace_in_map(length, ccast);
  3317   return javaoop;
  3320 // The following "Ideal_foo" functions are placed here because they recognize
  3321 // the graph shapes created by the functions immediately above.
  3323 //---------------------------Ideal_allocation----------------------------------
  3324 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
  3325 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
  3326   if (ptr == NULL) {     // reduce dumb test in callers
  3327     return NULL;
  3329   if (ptr->is_CheckCastPP()) {  // strip a raw-to-oop cast
  3330     ptr = ptr->in(1);
  3331     if (ptr == NULL)  return NULL;
  3333   if (ptr->is_Proj()) {
  3334     Node* allo = ptr->in(0);
  3335     if (allo != NULL && allo->is_Allocate()) {
  3336       return allo->as_Allocate();
  3339   // Report failure to match.
  3340   return NULL;
  3343 // Fancy version which also strips off an offset (and reports it to caller).
  3344 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
  3345                                              intptr_t& offset) {
  3346   Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
  3347   if (base == NULL)  return NULL;
  3348   return Ideal_allocation(base, phase);
  3351 // Trace Initialize <- Proj[Parm] <- Allocate
  3352 AllocateNode* InitializeNode::allocation() {
  3353   Node* rawoop = in(InitializeNode::RawAddress);
  3354   if (rawoop->is_Proj()) {
  3355     Node* alloc = rawoop->in(0);
  3356     if (alloc->is_Allocate()) {
  3357       return alloc->as_Allocate();
  3360   return NULL;
  3363 // Trace Allocate -> Proj[Parm] -> Initialize
  3364 InitializeNode* AllocateNode::initialization() {
  3365   ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
  3366   if (rawoop == NULL)  return NULL;
  3367   for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
  3368     Node* init = rawoop->fast_out(i);
  3369     if (init->is_Initialize()) {
  3370       assert(init->as_Initialize()->allocation() == this, "2-way link");
  3371       return init->as_Initialize();
  3374   return NULL;
  3377 // Trace Allocate -> Proj[Parm] -> MemBarStoreStore
  3378 MemBarStoreStoreNode* AllocateNode::storestore() {
  3379   ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
  3380   if (rawoop == NULL)  return NULL;
  3381   for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
  3382     Node* storestore = rawoop->fast_out(i);
  3383     if (storestore->is_MemBarStoreStore()) {
  3384       return storestore->as_MemBarStoreStore();
  3387   return NULL;
  3390 //----------------------------- loop predicates ---------------------------
  3392 //------------------------------add_predicate_impl----------------------------
  3393 void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
  3394   // Too many traps seen?
  3395   if (too_many_traps(reason)) {
  3396 #ifdef ASSERT
  3397     if (TraceLoopPredicate) {
  3398       int tc = C->trap_count(reason);
  3399       tty->print("too many traps=%s tcount=%d in ",
  3400                     Deoptimization::trap_reason_name(reason), tc);
  3401       method()->print(); // which method has too many predicate traps
  3402       tty->cr();
  3404 #endif
  3405     // We cannot afford to take more traps here,
  3406     // do not generate predicate.
  3407     return;
  3410   Node *cont    = _gvn.intcon(1);
  3411   Node* opq     = _gvn.transform(new (C) Opaque1Node(C, cont));
  3412   Node *bol     = _gvn.transform(new (C) Conv2BNode(opq));
  3413   IfNode* iff   = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
  3414   Node* iffalse = _gvn.transform(new (C) IfFalseNode(iff));
  3415   C->add_predicate_opaq(opq);
  3417     PreserveJVMState pjvms(this);
  3418     set_control(iffalse);
  3419     inc_sp(nargs);
  3420     uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
  3422   Node* iftrue = _gvn.transform(new (C) IfTrueNode(iff));
  3423   set_control(iftrue);
  3426 //------------------------------add_predicate---------------------------------
  3427 void GraphKit::add_predicate(int nargs) {
  3428   if (UseLoopPredicate) {
  3429     add_predicate_impl(Deoptimization::Reason_predicate, nargs);
  3431   // loop's limit check predicate should be near the loop.
  3432   if (LoopLimitCheck) {
  3433     add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
  3437 //----------------------------- store barriers ----------------------------
  3438 #define __ ideal.
  3440 void GraphKit::sync_kit(IdealKit& ideal) {
  3441   set_all_memory(__ merged_memory());
  3442   set_i_o(__ i_o());
  3443   set_control(__ ctrl());
  3446 void GraphKit::final_sync(IdealKit& ideal) {
  3447   // Final sync IdealKit and graphKit.
  3448   sync_kit(ideal);
  3451 // vanilla/CMS post barrier
  3452 // Insert a write-barrier store.  This is to let generational GC work; we have
  3453 // to flag all oop-stores before the next GC point.
  3454 void GraphKit::write_barrier_post(Node* oop_store,
  3455                                   Node* obj,
  3456                                   Node* adr,
  3457                                   uint  adr_idx,
  3458                                   Node* val,
  3459                                   bool use_precise) {
  3460   // No store check needed if we're storing a NULL or an old object
  3461   // (latter case is probably a string constant). The concurrent
  3462   // mark sweep garbage collector, however, needs to have all nonNull
  3463   // oop updates flagged via card-marks.
  3464   if (val != NULL && val->is_Con()) {
  3465     // must be either an oop or NULL
  3466     const Type* t = val->bottom_type();
  3467     if (t == TypePtr::NULL_PTR || t == Type::TOP)
  3468       // stores of null never (?) need barriers
  3469       return;
  3472   if (use_ReduceInitialCardMarks()
  3473       && obj == just_allocated_object(control())) {
  3474     // We can skip marks on a freshly-allocated object in Eden.
  3475     // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
  3476     // That routine informs GC to take appropriate compensating steps,
  3477     // upon a slow-path allocation, so as to make this card-mark
  3478     // elision safe.
  3479     return;
  3482   if (!use_precise) {
  3483     // All card marks for a (non-array) instance are in one place:
  3484     adr = obj;
  3486   // (Else it's an array (or unknown), and we want more precise card marks.)
  3487   assert(adr != NULL, "");
  3489   IdealKit ideal(this, true);
  3491   // Convert the pointer to an int prior to doing math on it
  3492   Node* cast = __ CastPX(__ ctrl(), adr);
  3494   // Divide by card size
  3495   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
  3496          "Only one we handle so far.");
  3497   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
  3499   // Combine card table base and card offset
  3500   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
  3502   // Get the alias_index for raw card-mark memory
  3503   int adr_type = Compile::AliasIdxRaw;
  3504   Node*   zero = __ ConI(0); // Dirty card value
  3505   BasicType bt = T_BYTE;
  3507   if (UseCondCardMark) {
  3508     // The classic GC reference write barrier is typically implemented
  3509     // as a store into the global card mark table.  Unfortunately
  3510     // unconditional stores can result in false sharing and excessive
  3511     // coherence traffic as well as false transactional aborts.
  3512     // UseCondCardMark enables MP "polite" conditional card mark
  3513     // stores.  In theory we could relax the load from ctrl() to
  3514     // no_ctrl, but that doesn't buy much latitude.
  3515     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
  3516     __ if_then(card_val, BoolTest::ne, zero);
  3519   // Smash zero into card
  3520   if( !UseConcMarkSweepGC ) {
  3521     __ store(__ ctrl(), card_adr, zero, bt, adr_type);
  3522   } else {
  3523     // Specialized path for CM store barrier
  3524     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
  3527   if (UseCondCardMark) {
  3528     __ end_if();
  3531   // Final sync IdealKit and GraphKit.
  3532   final_sync(ideal);
  3535 // G1 pre/post barriers
  3536 void GraphKit::g1_write_barrier_pre(bool do_load,
  3537                                     Node* obj,
  3538                                     Node* adr,
  3539                                     uint alias_idx,
  3540                                     Node* val,
  3541                                     const TypeOopPtr* val_type,
  3542                                     Node* pre_val,
  3543                                     BasicType bt) {
  3545   // Some sanity checks
  3546   // Note: val is unused in this routine.
  3548   if (do_load) {
  3549     // We need to generate the load of the previous value
  3550     assert(obj != NULL, "must have a base");
  3551     assert(adr != NULL, "where are loading from?");
  3552     assert(pre_val == NULL, "loaded already?");
  3553     assert(val_type != NULL, "need a type");
  3554   } else {
  3555     // In this case both val_type and alias_idx are unused.
  3556     assert(pre_val != NULL, "must be loaded already");
  3557     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
  3559   assert(bt == T_OBJECT, "or we shouldn't be here");
  3561   IdealKit ideal(this, true);
  3563   Node* tls = __ thread(); // ThreadLocalStorage
  3565   Node* no_ctrl = NULL;
  3566   Node* no_base = __ top();
  3567   Node* zero  = __ ConI(0);
  3568   Node* zeroX = __ ConX(0);
  3570   float likely  = PROB_LIKELY(0.999);
  3571   float unlikely  = PROB_UNLIKELY(0.999);
  3573   BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
  3574   assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
  3576   // Offsets into the thread
  3577   const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
  3578                                           PtrQueue::byte_offset_of_active());
  3579   const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
  3580                                           PtrQueue::byte_offset_of_index());
  3581   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
  3582                                           PtrQueue::byte_offset_of_buf());
  3584   // Now the actual pointers into the thread
  3585   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
  3586   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
  3587   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
  3589   // Now some of the values
  3590   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
  3592   // if (!marking)
  3593   __ if_then(marking, BoolTest::ne, zero); {
  3594     BasicType index_bt = TypeX_X->basic_type();
  3595     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
  3596     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
  3598     if (do_load) {
  3599       // load original value
  3600       // alias_idx correct??
  3601       pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
  3604     // if (pre_val != NULL)
  3605     __ if_then(pre_val, BoolTest::ne, null()); {
  3606       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
  3608       // is the queue for this thread full?
  3609       __ if_then(index, BoolTest::ne, zeroX, likely); {
  3611         // decrement the index
  3612         Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
  3614         // Now get the buffer location we will log the previous value into and store it
  3615         Node *log_addr = __ AddP(no_base, buffer, next_index);
  3616         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
  3617         // update the index
  3618         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
  3620       } __ else_(); {
  3622         // logging buffer is full, call the runtime
  3623         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
  3624         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
  3625       } __ end_if();  // (!index)
  3626     } __ end_if();  // (pre_val != NULL)
  3627   } __ end_if();  // (!marking)
  3629   // Final sync IdealKit and GraphKit.
  3630   final_sync(ideal);
  3633 //
  3634 // Update the card table and add card address to the queue
  3635 //
  3636 void GraphKit::g1_mark_card(IdealKit& ideal,
  3637                             Node* card_adr,
  3638                             Node* oop_store,
  3639                             uint oop_alias_idx,
  3640                             Node* index,
  3641                             Node* index_adr,
  3642                             Node* buffer,
  3643                             const TypeFunc* tf) {
  3645   Node* zero  = __ ConI(0);
  3646   Node* zeroX = __ ConX(0);
  3647   Node* no_base = __ top();
  3648   BasicType card_bt = T_BYTE;
  3649   // Smash zero into card. MUST BE ORDERED WRT TO STORE
  3650   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
  3652   //  Now do the queue work
  3653   __ if_then(index, BoolTest::ne, zeroX); {
  3655     Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
  3656     Node* log_addr = __ AddP(no_base, buffer, next_index);
  3658     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
  3659     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
  3661   } __ else_(); {
  3662     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
  3663   } __ end_if();
  3667 void GraphKit::g1_write_barrier_post(Node* oop_store,
  3668                                      Node* obj,
  3669                                      Node* adr,
  3670                                      uint alias_idx,
  3671                                      Node* val,
  3672                                      BasicType bt,
  3673                                      bool use_precise) {
  3674   // If we are writing a NULL then we need no post barrier
  3676   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
  3677     // Must be NULL
  3678     const Type* t = val->bottom_type();
  3679     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
  3680     // No post barrier if writing NULLx
  3681     return;
  3684   if (!use_precise) {
  3685     // All card marks for a (non-array) instance are in one place:
  3686     adr = obj;
  3688   // (Else it's an array (or unknown), and we want more precise card marks.)
  3689   assert(adr != NULL, "");
  3691   IdealKit ideal(this, true);
  3693   Node* tls = __ thread(); // ThreadLocalStorage
  3695   Node* no_base = __ top();
  3696   float likely  = PROB_LIKELY(0.999);
  3697   float unlikely  = PROB_UNLIKELY(0.999);
  3698   Node* zero = __ ConI(0);
  3699   Node* zeroX = __ ConX(0);
  3701   // Get the alias_index for raw card-mark memory
  3702   const TypePtr* card_type = TypeRawPtr::BOTTOM;
  3704   const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
  3706   // Offsets into the thread
  3707   const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
  3708                                      PtrQueue::byte_offset_of_index());
  3709   const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
  3710                                      PtrQueue::byte_offset_of_buf());
  3712   // Pointers into the thread
  3714   Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
  3715   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
  3717   // Now some values
  3718   // Use ctrl to avoid hoisting these values past a safepoint, which could
  3719   // potentially reset these fields in the JavaThread.
  3720   Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
  3721   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
  3723   // Convert the store obj pointer to an int prior to doing math on it
  3724   // Must use ctrl to prevent "integerized oop" existing across safepoint
  3725   Node* cast =  __ CastPX(__ ctrl(), adr);
  3727   // Divide pointer by card size
  3728   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
  3730   // Combine card table base and card offset
  3731   Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
  3733   // If we know the value being stored does it cross regions?
  3735   if (val != NULL) {
  3736     // Does the store cause us to cross regions?
  3738     // Should be able to do an unsigned compare of region_size instead of
  3739     // and extra shift. Do we have an unsigned compare??
  3740     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
  3741     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
  3743     // if (xor_res == 0) same region so skip
  3744     __ if_then(xor_res, BoolTest::ne, zeroX); {
  3746       // No barrier if we are storing a NULL
  3747       __ if_then(val, BoolTest::ne, null(), unlikely); {
  3749         // Ok must mark the card if not already dirty
  3751         // load the original value of the card
  3752         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
  3754         __ if_then(card_val, BoolTest::ne, zero); {
  3755           g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
  3756         } __ end_if();
  3757       } __ end_if();
  3758     } __ end_if();
  3759   } else {
  3760     // Object.clone() instrinsic uses this path.
  3761     g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
  3764   // Final sync IdealKit and GraphKit.
  3765   final_sync(ideal);
  3767 #undef __
  3771 Node* GraphKit::load_String_offset(Node* ctrl, Node* str) {
  3772   if (java_lang_String::has_offset_field()) {
  3773     int offset_offset = java_lang_String::offset_offset_in_bytes();
  3774     const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
  3775                                                        false, NULL, 0);
  3776     const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
  3777     int offset_field_idx = C->get_alias_index(offset_field_type);
  3778     return make_load(ctrl,
  3779                      basic_plus_adr(str, str, offset_offset),
  3780                      TypeInt::INT, T_INT, offset_field_idx);
  3781   } else {
  3782     return intcon(0);
  3786 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
  3787   if (java_lang_String::has_count_field()) {
  3788     int count_offset = java_lang_String::count_offset_in_bytes();
  3789     const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
  3790                                                        false, NULL, 0);
  3791     const TypePtr* count_field_type = string_type->add_offset(count_offset);
  3792     int count_field_idx = C->get_alias_index(count_field_type);
  3793     return make_load(ctrl,
  3794                      basic_plus_adr(str, str, count_offset),
  3795                      TypeInt::INT, T_INT, count_field_idx);
  3796   } else {
  3797     return load_array_length(load_String_value(ctrl, str));
  3801 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
  3802   int value_offset = java_lang_String::value_offset_in_bytes();
  3803   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
  3804                                                      false, NULL, 0);
  3805   const TypePtr* value_field_type = string_type->add_offset(value_offset);
  3806   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
  3807                                                    TypeAry::make(TypeInt::CHAR,TypeInt::POS),
  3808                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
  3809   int value_field_idx = C->get_alias_index(value_field_type);
  3810   return make_load(ctrl, basic_plus_adr(str, str, value_offset),
  3811                    value_type, T_OBJECT, value_field_idx);
  3814 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
  3815   int offset_offset = java_lang_String::offset_offset_in_bytes();
  3816   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
  3817                                                      false, NULL, 0);
  3818   const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
  3819   int offset_field_idx = C->get_alias_index(offset_field_type);
  3820   store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
  3821                   value, T_INT, offset_field_idx);
  3824 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
  3825   int value_offset = java_lang_String::value_offset_in_bytes();
  3826   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
  3827                                                      false, NULL, 0);
  3828   const TypePtr* value_field_type = string_type->add_offset(value_offset);
  3829   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
  3830                                                    TypeAry::make(TypeInt::CHAR,TypeInt::POS),
  3831                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
  3832   int value_field_idx = C->get_alias_index(value_field_type);
  3833   store_to_memory(ctrl, basic_plus_adr(str, value_offset),
  3834                   value, T_OBJECT, value_field_idx);
  3837 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
  3838   int count_offset = java_lang_String::count_offset_in_bytes();
  3839   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
  3840                                                      false, NULL, 0);
  3841   const TypePtr* count_field_type = string_type->add_offset(count_offset);
  3842   int count_field_idx = C->get_alias_index(count_field_type);
  3843   store_to_memory(ctrl, basic_plus_adr(str, count_offset),
  3844                   value, T_INT, count_field_idx);

mercurial