src/share/vm/opto/graphKit.hpp

Sat, 01 Dec 2007 00:00:00 +0000

author
duke
date
Sat, 01 Dec 2007 00:00:00 +0000
changeset 435
a61af66fc99e
child 777
37f87013dfd8
permissions
-rw-r--r--

Initial load

     1 /*
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 class FastLockNode;
    26 class FastUnlockNode;
    27 class Parse;
    28 class RootNode;
    30 //-----------------------------------------------------------------------------
    31 //----------------------------GraphKit-----------------------------------------
    32 // Toolkit for building the common sorts of subgraphs.
    33 // Does not know about bytecode parsing or type-flow results.
    34 // It is able to create graphs implementing the semantics of most
    35 // or all bytecodes, so that it can expand intrinsics and calls.
    36 // It may depend on JVMState structure, but it must not depend
    37 // on specific bytecode streams.
    38 class GraphKit : public Phase {
    39   friend class PreserveJVMState;
    41  protected:
    42   ciEnv*            _env;       // Compilation environment
    43   PhaseGVN         &_gvn;       // Some optimizations while parsing
    44   SafePointNode*    _map;       // Parser map from JVM to Nodes
    45   SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
    46   int               _sp;        // JVM Expression Stack Pointer
    47   int               _bci;       // JVM Bytecode Pointer
    48   ciMethod*         _method;    // JVM Current Method
    50  private:
    51   SafePointNode*     map_not_null() const {
    52     assert(_map != NULL, "must call stopped() to test for reset compiler map");
    53     return _map;
    54   }
    56  public:
    57   GraphKit();                   // empty constructor
    58   GraphKit(JVMState* jvms);     // the JVM state on which to operate
    60 #ifdef ASSERT
    61   ~GraphKit() {
    62     assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
    63   }
    64 #endif
    66   virtual Parse* is_Parse() const { return NULL; }
    68   ciEnv*        env()           const { return _env; }
    69   PhaseGVN&     gvn()           const { return _gvn; }
    71   void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
    73   // Handy well-known nodes:
    74   Node*         null()          const { return zerocon(T_OBJECT); }
    75   Node*         top()           const { return C->top(); }
    76   RootNode*     root()          const { return C->root(); }
    78   // Create or find a constant node
    79   Node* intcon(jint con)        const { return _gvn.intcon(con); }
    80   Node* longcon(jlong con)      const { return _gvn.longcon(con); }
    81   Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
    82   Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
    83   // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
    85   jint  find_int_con(Node* n, jint value_if_unknown) {
    86     return _gvn.find_int_con(n, value_if_unknown);
    87   }
    88   jlong find_long_con(Node* n, jlong value_if_unknown) {
    89     return _gvn.find_long_con(n, value_if_unknown);
    90   }
    91   // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
    93   // JVM State accessors:
    94   // Parser mapping from JVM indices into Nodes.
    95   // Low slots are accessed by the StartNode::enum.
    96   // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
    97   // Then come JVM stack slots.
    98   // Finally come the monitors, if any.
    99   // See layout accessors in class JVMState.
   101   SafePointNode*     map()      const { return _map; }
   102   bool               has_exceptions() const { return _exceptions != NULL; }
   103   JVMState*          jvms()     const { return map_not_null()->_jvms; }
   104   int                sp()       const { return _sp; }
   105   int                bci()      const { return _bci; }
   106   Bytecodes::Code    java_bc()  const;
   107   ciMethod*          method()   const { return _method; }
   109   void set_jvms(JVMState* jvms)       { set_map(jvms->map());
   110                                         assert(jvms == this->jvms(), "sanity");
   111                                         _sp = jvms->sp();
   112                                         _bci = jvms->bci();
   113                                         _method = jvms->has_method() ? jvms->method() : NULL; }
   114   void set_map(SafePointNode* m)      { _map = m; debug_only(verify_map()); }
   115   void set_sp(int i)                  { assert(i >= 0, "must be non-negative"); _sp = i; }
   116   void clean_stack(int from_sp); // clear garbage beyond from_sp to top
   118   void inc_sp(int i)                  { set_sp(sp() + i); }
   119   void set_bci(int bci)               { _bci = bci; }
   121   // Make sure jvms has current bci & sp.
   122   JVMState* sync_jvms()     const;
   123 #ifdef ASSERT
   124   // Make sure JVMS has an updated copy of bci and sp.
   125   // Also sanity-check method, depth, and monitor depth.
   126   bool jvms_in_sync() const;
   128   // Make sure the map looks OK.
   129   void verify_map() const;
   131   // Make sure a proposed exception state looks OK.
   132   static void verify_exception_state(SafePointNode* ex_map);
   133 #endif
   135   // Clone the existing map state.  (Implements PreserveJVMState.)
   136   SafePointNode* clone_map();
   138   // Set the map to a clone of the given one.
   139   void set_map_clone(SafePointNode* m);
   141   // Tell if the compilation is failing.
   142   bool failing() const { return C->failing(); }
   144   // Set _map to NULL, signalling a stop to further bytecode execution.
   145   // Preserve the map intact for future use, and return it back to the caller.
   146   SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
   148   // Stop, but first smash the map's inputs to NULL, to mark it dead.
   149   void stop_and_kill_map();
   151   // Tell if _map is NULL, or control is top.
   152   bool stopped();
   154   // Tell if this method or any caller method has exception handlers.
   155   bool has_ex_handler();
   157   // Save an exception without blowing stack contents or other JVM state.
   158   // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
   159   static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
   161   // Recover a saved exception from its map.
   162   static Node* saved_ex_oop(SafePointNode* ex_map);
   164   // Recover a saved exception from its map, and remove it from the map.
   165   static Node* clear_saved_ex_oop(SafePointNode* ex_map);
   167 #ifdef ASSERT
   168   // Recover a saved exception from its map, and remove it from the map.
   169   static bool has_saved_ex_oop(SafePointNode* ex_map);
   170 #endif
   172   // Push an exception in the canonical position for handlers (stack(0)).
   173   void push_ex_oop(Node* ex_oop) {
   174     ensure_stack(1);  // ensure room to push the exception
   175     set_stack(0, ex_oop);
   176     set_sp(1);
   177     clean_stack(1);
   178   }
   180   // Detach and return an exception state.
   181   SafePointNode* pop_exception_state() {
   182     SafePointNode* ex_map = _exceptions;
   183     if (ex_map != NULL) {
   184       _exceptions = ex_map->next_exception();
   185       ex_map->set_next_exception(NULL);
   186       debug_only(verify_exception_state(ex_map));
   187     }
   188     return ex_map;
   189   }
   191   // Add an exception, using the given JVM state, without commoning.
   192   void push_exception_state(SafePointNode* ex_map) {
   193     debug_only(verify_exception_state(ex_map));
   194     ex_map->set_next_exception(_exceptions);
   195     _exceptions = ex_map;
   196   }
   198   // Turn the current JVM state into an exception state, appending the ex_oop.
   199   SafePointNode* make_exception_state(Node* ex_oop);
   201   // Add an exception, using the given JVM state.
   202   // Combine all exceptions with a common exception type into a single state.
   203   // (This is done via combine_exception_states.)
   204   void add_exception_state(SafePointNode* ex_map);
   206   // Combine all exceptions of any sort whatever into a single master state.
   207   SafePointNode* combine_and_pop_all_exception_states() {
   208     if (_exceptions == NULL)  return NULL;
   209     SafePointNode* phi_map = pop_exception_state();
   210     SafePointNode* ex_map;
   211     while ((ex_map = pop_exception_state()) != NULL) {
   212       combine_exception_states(ex_map, phi_map);
   213     }
   214     return phi_map;
   215   }
   217   // Combine the two exception states, building phis as necessary.
   218   // The second argument is updated to include contributions from the first.
   219   void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
   221   // Reset the map to the given state.  If there are any half-finished phis
   222   // in it (created by combine_exception_states), transform them now.
   223   // Returns the exception oop.  (Caller must call push_ex_oop if required.)
   224   Node* use_exception_state(SafePointNode* ex_map);
   226   // Collect exceptions from a given JVM state into my exception list.
   227   void add_exception_states_from(JVMState* jvms);
   229   // Collect all raised exceptions into the current JVM state.
   230   // Clear the current exception list and map, returns the combined states.
   231   JVMState* transfer_exceptions_into_jvms();
   233   // Helper to throw a built-in exception.
   234   // Range checks take the offending index.
   235   // Cast and array store checks take the offending class.
   236   // Others do not take the optional argument.
   237   // The JVMS must allow the bytecode to be re-executed
   238   // via an uncommon trap.
   239   void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
   241   // Helper Functions for adding debug information
   242   void kill_dead_locals();
   243 #ifdef ASSERT
   244   bool dead_locals_are_killed();
   245 #endif
   246   // The call may deoptimize.  Supply required JVM state as debug info.
   247   // If must_throw is true, the call is guaranteed not to return normally.
   248   void add_safepoint_edges(SafePointNode* call,
   249                            bool must_throw = false);
   251   // How many stack inputs does the current BC consume?
   252   // And, how does the stack change after the bytecode?
   253   // Returns false if unknown.
   254   bool compute_stack_effects(int& inputs, int& depth);
   256   // Add a fixed offset to a pointer
   257   Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
   258     return basic_plus_adr(base, ptr, MakeConX(offset));
   259   }
   260   Node* basic_plus_adr(Node* base, intptr_t offset) {
   261     return basic_plus_adr(base, base, MakeConX(offset));
   262   }
   263   // Add a variable offset to a pointer
   264   Node* basic_plus_adr(Node* base, Node* offset) {
   265     return basic_plus_adr(base, base, offset);
   266   }
   267   Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
   269   // Convert between int and long, and size_t.
   270   // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
   271   Node* ConvI2L(Node* offset);
   272   Node* ConvL2I(Node* offset);
   273   // Find out the klass of an object.
   274   Node* load_object_klass(Node* object);
   275   // Find out the length of an array.
   276   Node* load_array_length(Node* array);
   277   // Helper function to do a NULL pointer check or ZERO check based on type.
   278   Node* null_check_common(Node* value, BasicType type,
   279                           bool assert_null, Node* *null_control);
   280   // Throw an exception if a given value is null.
   281   // Return the value cast to not-null.
   282   // Be clever about equivalent dominating null checks.
   283   Node* do_null_check(Node* value, BasicType type) {
   284     return null_check_common(value, type, false, NULL);
   285   }
   286   // Throw an uncommon trap if a given value is __not__ null.
   287   // Return the value cast to null, and be clever about dominating checks.
   288   Node* do_null_assert(Node* value, BasicType type) {
   289     return null_check_common(value, type, true, NULL);
   290   }
   291   // Null check oop.  Return null-path control into (*null_control).
   292   // Return a cast-not-null node which depends on the not-null control.
   293   // If never_see_null, use an uncommon trap (*null_control sees a top).
   294   // The cast is not valid along the null path; keep a copy of the original.
   295   Node* null_check_oop(Node* value, Node* *null_control,
   296                        bool never_see_null = false);
   298   // Cast obj to not-null on this path
   299   Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
   300   // Replace all occurrences of one node by another.
   301   void replace_in_map(Node* old, Node* neww);
   303   void push(Node* n)    { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); }
   304   Node* pop()           { map_not_null(); return _map->stack(_map->_jvms,--_sp); }
   305   Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); }
   307   void push_pair(Node* ldval) {
   308     push(ldval);
   309     push(top());  // the halfword is merely a placeholder
   310   }
   311   void push_pair_local(int i) {
   312     // longs are stored in locals in "push" order
   313     push(  local(i+0) );  // the real value
   314     assert(local(i+1) == top(), "");
   315     push(top());  // halfword placeholder
   316   }
   317   Node* pop_pair() {
   318     // the second half is pushed last & popped first; it contains exactly nothing
   319     Node* halfword = pop();
   320     assert(halfword == top(), "");
   321     // the long bits are pushed first & popped last:
   322     return pop();
   323   }
   324   void set_pair_local(int i, Node* lval) {
   325     // longs are stored in locals as a value/half pair (like doubles)
   326     set_local(i+0, lval);
   327     set_local(i+1, top());
   328   }
   330   // Push the node, which may be zero, one, or two words.
   331   void push_node(BasicType n_type, Node* n) {
   332     int n_size = type2size[n_type];
   333     if      (n_size == 1)  push(      n );  // T_INT, ...
   334     else if (n_size == 2)  push_pair( n );  // T_DOUBLE, T_LONG
   335     else                   { assert(n_size == 0, "must be T_VOID"); }
   336   }
   338   Node* pop_node(BasicType n_type) {
   339     int n_size = type2size[n_type];
   340     if      (n_size == 1)  return pop();
   341     else if (n_size == 2)  return pop_pair();
   342     else                   return NULL;
   343   }
   345   Node* control()               const { return map_not_null()->control(); }
   346   Node* i_o()                   const { return map_not_null()->i_o(); }
   347   Node* returnadr()             const { return map_not_null()->returnadr(); }
   348   Node* frameptr()              const { return map_not_null()->frameptr(); }
   349   Node* local(uint idx)         const { map_not_null(); return _map->local(      _map->_jvms, idx); }
   350   Node* stack(uint idx)         const { map_not_null(); return _map->stack(      _map->_jvms, idx); }
   351   Node* argument(uint idx)      const { map_not_null(); return _map->argument(   _map->_jvms, idx); }
   352   Node* monitor_box(uint idx)   const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
   353   Node* monitor_obj(uint idx)   const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
   355   void set_control  (Node* c)         { map_not_null()->set_control(c); }
   356   void set_i_o      (Node* c)         { map_not_null()->set_i_o(c); }
   357   void set_local(uint idx, Node* c)   { map_not_null(); _map->set_local(   _map->_jvms, idx, c); }
   358   void set_stack(uint idx, Node* c)   { map_not_null(); _map->set_stack(   _map->_jvms, idx, c); }
   359   void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
   360   void ensure_stack(uint stk_size)    { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
   362   // Access unaliased memory
   363   Node* memory(uint alias_idx);
   364   Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
   365   Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
   367   // Access immutable memory
   368   Node* immutable_memory() { return C->immutable_memory(); }
   370   // Set unaliased memory
   371   void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
   372   void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
   373   void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
   375   // Get the entire memory state (probably a MergeMemNode), and reset it
   376   // (The resetting prevents somebody from using the dangling Node pointer.)
   377   Node* reset_memory();
   379   // Get the entire memory state, asserted to be a MergeMemNode.
   380   MergeMemNode* merged_memory() {
   381     Node* mem = map_not_null()->memory();
   382     assert(mem->is_MergeMem(), "parse memory is always pre-split");
   383     return mem->as_MergeMem();
   384   }
   386   // Set the entire memory state; produce a new MergeMemNode.
   387   void set_all_memory(Node* newmem);
   389   // Create a memory projection from the call, then set_all_memory.
   390   void set_all_memory_call(Node* call);
   392   // Create a LoadNode, reading from the parser's memory state.
   393   // (Note:  require_atomic_access is useful only with T_LONG.)
   394   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
   395                   bool require_atomic_access = false) {
   396     // This version computes alias_index from bottom_type
   397     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
   398                      require_atomic_access);
   399   }
   400   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
   401     // This version computes alias_index from an address type
   402     assert(adr_type != NULL, "use other make_load factory");
   403     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
   404                      require_atomic_access);
   405   }
   406   // This is the base version which is given an alias index.
   407   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);
   409   // Create & transform a StoreNode and store the effect into the
   410   // parser's memory state.
   411   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
   412                         const TypePtr* adr_type,
   413                         bool require_atomic_access = false) {
   414     // This version computes alias_index from an address type
   415     assert(adr_type != NULL, "use other store_to_memory factory");
   416     return store_to_memory(ctl, adr, val, bt,
   417                            C->get_alias_index(adr_type),
   418                            require_atomic_access);
   419   }
   420   // This is the base version which is given alias index
   421   // Return the new StoreXNode
   422   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
   423                         int adr_idx,
   424                         bool require_atomic_access = false);
   427   // All in one pre-barrier, store, post_barrier
   428   // Insert a write-barrier'd store.  This is to let generational GC
   429   // work; we have to flag all oop-stores before the next GC point.
   430   //
   431   // It comes in 3 flavors of store to an object, array, or unknown.
   432   // We use precise card marks for arrays to avoid scanning the entire
   433   // array. We use imprecise for object. We use precise for unknown
   434   // since we don't know if we have an array or and object or even
   435   // where the object starts.
   436   //
   437   // If val==NULL, it is taken to be a completely unknown value. QQQ
   439   Node* store_oop_to_object(Node* ctl,
   440                             Node* obj,   // containing obj
   441                             Node* adr,  // actual adress to store val at
   442                             const TypePtr* adr_type,
   443                             Node* val,
   444                             const Type* val_type,
   445                             BasicType bt);
   447   Node* store_oop_to_array(Node* ctl,
   448                            Node* obj,   // containing obj
   449                            Node* adr,  // actual adress to store val at
   450                            const TypePtr* adr_type,
   451                            Node* val,
   452                            const Type* val_type,
   453                            BasicType bt);
   455   // Could be an array or object we don't know at compile time (unsafe ref.)
   456   Node* store_oop_to_unknown(Node* ctl,
   457                              Node* obj,   // containing obj
   458                              Node* adr,  // actual adress to store val at
   459                              const TypePtr* adr_type,
   460                              Node* val,
   461                              const Type* val_type,
   462                              BasicType bt);
   464   // For the few case where the barriers need special help
   465   void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
   466                    Node* val, const Type* val_type, BasicType bt);
   468   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
   469                     Node* val, BasicType bt, bool use_precise);
   471   // Return addressing for an array element.
   472   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
   473                               // Optional constraint on the array size:
   474                               const TypeInt* sizetype = NULL);
   476   // Return a load of array element at idx.
   477   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
   479   // CMS card-marks have an input from the corresponding oop_store
   480   void  cms_card_mark(Node* ctl, Node* adr, Node* val, Node* oop_store);
   482   //---------------- Dtrace support --------------------
   483   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
   484   void make_dtrace_method_entry(ciMethod* method) {
   485     make_dtrace_method_entry_exit(method, true);
   486   }
   487   void make_dtrace_method_exit(ciMethod* method) {
   488     make_dtrace_method_entry_exit(method, false);
   489   }
   491   //--------------- stub generation -------------------
   492  public:
   493   void gen_stub(address C_function,
   494                 const char *name,
   495                 int is_fancy_jump,
   496                 bool pass_tls,
   497                 bool return_pc);
   499   //---------- help for generating calls --------------
   501   // Do a null check on the receiver, which is in argument(0).
   502   Node* null_check_receiver(ciMethod* callee) {
   503     assert(!callee->is_static(), "must be a virtual method");
   504     int nargs = 1 + callee->signature()->size();
   505     // Null check on self without removing any arguments.  The argument
   506     // null check technically happens in the wrong place, which can lead to
   507     // invalid stack traces when the primitive is inlined into a method
   508     // which handles NullPointerExceptions.
   509     Node* receiver = argument(0);
   510     _sp += nargs;
   511     receiver = do_null_check(receiver, T_OBJECT);
   512     _sp -= nargs;
   513     return receiver;
   514   }
   516   // Fill in argument edges for the call from argument(0), argument(1), ...
   517   // (The next step is to call set_edges_for_java_call.)
   518   void  set_arguments_for_java_call(CallJavaNode* call);
   520   // Fill in non-argument edges for the call.
   521   // Transform the call, and update the basics: control, i_o, memory.
   522   // (The next step is usually to call set_results_for_java_call.)
   523   void set_edges_for_java_call(CallJavaNode* call,
   524                                bool must_throw = false);
   526   // Finish up a java call that was started by set_edges_for_java_call.
   527   // Call add_exception on any throw arising from the call.
   528   // Return the call result (transformed).
   529   Node* set_results_for_java_call(CallJavaNode* call);
   531   // Similar to set_edges_for_java_call, but simplified for runtime calls.
   532   void  set_predefined_output_for_runtime_call(Node* call) {
   533     set_predefined_output_for_runtime_call(call, NULL, NULL);
   534   }
   535   void  set_predefined_output_for_runtime_call(Node* call,
   536                                                Node* keep_mem,
   537                                                const TypePtr* hook_mem);
   538   Node* set_predefined_input_for_runtime_call(SafePointNode* call);
   540   // helper functions for statistics
   541   void increment_counter(address counter_addr);   // increment a debug counter
   542   void increment_counter(Node*   counter_addr);   // increment a debug counter
   544   // Bail out to the interpreter right now
   545   // The optional klass is the one causing the trap.
   546   // The optional reason is debug information written to the compile log.
   547   // Optional must_throw is the same as with add_safepoint_edges.
   548   void uncommon_trap(int trap_request,
   549                      ciKlass* klass = NULL, const char* reason_string = NULL,
   550                      bool must_throw = false, bool keep_exact_action = false);
   552   // Shorthand, to avoid saying "Deoptimization::" so many times.
   553   void uncommon_trap(Deoptimization::DeoptReason reason,
   554                      Deoptimization::DeoptAction action,
   555                      ciKlass* klass = NULL, const char* reason_string = NULL,
   556                      bool must_throw = false, bool keep_exact_action = false) {
   557     uncommon_trap(Deoptimization::make_trap_request(reason, action),
   558                   klass, reason_string, must_throw, keep_exact_action);
   559   }
   561   // Report if there were too many traps at the current method and bci.
   562   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
   563   // If there is no MDO at all, report no trap unless told to assume it.
   564   bool too_many_traps(Deoptimization::DeoptReason reason) {
   565     return C->too_many_traps(method(), bci(), reason);
   566   }
   568   // Report if there were too many recompiles at the current method and bci.
   569   bool too_many_recompiles(Deoptimization::DeoptReason reason) {
   570     return C->too_many_recompiles(method(), bci(), reason);
   571   }
   573   // vanilla/CMS post barrier
   574   void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
   576   // Returns the object (if any) which was created the moment before.
   577   Node* just_allocated_object(Node* current_control);
   579   static bool use_ReduceInitialCardMarks() {
   580     return (ReduceInitialCardMarks
   581             && Universe::heap()->can_elide_tlab_store_barriers());
   582   }
   584   // Helper function to round double arguments before a call
   585   void round_double_arguments(ciMethod* dest_method);
   586   void round_double_result(ciMethod* dest_method);
   588   // rounding for strict float precision conformance
   589   Node* precision_rounding(Node* n);
   591   // rounding for strict double precision conformance
   592   Node* dprecision_rounding(Node* n);
   594   // rounding for non-strict double stores
   595   Node* dstore_rounding(Node* n);
   597   // Helper functions for fast/slow path codes
   598   Node* opt_iff(Node* region, Node* iff);
   599   Node* make_runtime_call(int flags,
   600                           const TypeFunc* call_type, address call_addr,
   601                           const char* call_name,
   602                           const TypePtr* adr_type, // NULL if no memory effects
   603                           Node* parm0 = NULL, Node* parm1 = NULL,
   604                           Node* parm2 = NULL, Node* parm3 = NULL,
   605                           Node* parm4 = NULL, Node* parm5 = NULL,
   606                           Node* parm6 = NULL, Node* parm7 = NULL);
   607   enum {  // flag values for make_runtime_call
   608     RC_NO_FP = 1,               // CallLeafNoFPNode
   609     RC_NO_IO = 2,               // do not hook IO edges
   610     RC_NO_LEAF = 4,             // CallStaticJavaNode
   611     RC_MUST_THROW = 8,          // flag passed to add_safepoint_edges
   612     RC_NARROW_MEM = 16,         // input memory is same as output
   613     RC_UNCOMMON = 32,           // freq. expected to be like uncommon trap
   614     RC_LEAF = 0                 // null value:  no flags set
   615   };
   617   // merge in all memory slices from new_mem, along the given path
   618   void merge_memory(Node* new_mem, Node* region, int new_path);
   619   void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj);
   621   // Helper functions to build synchronizations
   622   int next_monitor();
   623   Node* insert_mem_bar(int opcode, Node* precedent = NULL);
   624   Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
   625   // Optional 'precedent' is appended as an extra edge, to force ordering.
   626   FastLockNode* shared_lock(Node* obj);
   627   void shared_unlock(Node* box, Node* obj);
   629   // helper functions for the fast path/slow path idioms
   630   Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, klassOop ex_klass, Node* slow_result);
   632   // Generate an instance-of idiom.  Used by both the instance-of bytecode
   633   // and the reflective instance-of call.
   634   Node* gen_instanceof( Node *subobj, Node* superkls );
   636   // Generate a check-cast idiom.  Used by both the check-cast bytecode
   637   // and the array-store bytecode
   638   Node* gen_checkcast( Node *subobj, Node* superkls,
   639                        Node* *failure_control = NULL );
   641   // Generate a subtyping check.  Takes as input the subtype and supertype.
   642   // Returns 2 values: sets the default control() to the true path and
   643   // returns the false path.  Only reads from constant memory taken from the
   644   // default memory; does not write anything.  It also doesn't take in an
   645   // Object; if you wish to check an Object you need to load the Object's
   646   // class prior to coming here.
   647   Node* gen_subtype_check(Node* subklass, Node* superklass);
   649   // Static parse-time type checking logic for gen_subtype_check:
   650   enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
   651   int static_subtype_check(ciKlass* superk, ciKlass* subk);
   653   // Exact type check used for predicted calls and casts.
   654   // Rewrites (*casted_receiver) to be casted to the stronger type.
   655   // (Caller is responsible for doing replace_in_map.)
   656   Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
   657                             Node* *casted_receiver);
   659   // implementation of object creation
   660   Node* set_output_for_allocation(AllocateNode* alloc,
   661                                   const TypeOopPtr* oop_type,
   662                                   bool raw_mem_only);
   663   Node* get_layout_helper(Node* klass_node, jint& constant_value);
   664   Node* new_instance(Node* klass_node,
   665                      Node* slow_test = NULL,
   666                      bool raw_mem_only = false,
   667                      Node* *return_size_val = NULL);
   668   Node* new_array(Node* klass_node, Node* count_val,
   669                   bool raw_mem_only = false, Node* *return_size_val = NULL);
   671   // Handy for making control flow
   672   IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
   673     IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
   674     _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
   675     // Place 'if' on worklist if it will be in graph
   676     if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
   677     return iff;
   678   }
   680   IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
   681     IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
   682     _gvn.transform(iff);                           // Value may be known at parse-time
   683     // Place 'if' on worklist if it will be in graph
   684     if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
   685     return iff;
   686   }
   687 };
   689 // Helper class to support building of control flow branches. Upon
   690 // creation the map and sp at bci are cloned and restored upon de-
   691 // struction. Typical use:
   692 //
   693 // { PreserveJVMState pjvms(this);
   694 //   // code of new branch
   695 // }
   696 // // here the JVM state at bci is established
   698 class PreserveJVMState: public StackObj {
   699  protected:
   700   GraphKit*      _kit;
   701 #ifdef ASSERT
   702   int            _block;  // PO of current block, if a Parse
   703   int            _bci;
   704 #endif
   705   SafePointNode* _map;
   706   uint           _sp;
   708  public:
   709   PreserveJVMState(GraphKit* kit, bool clone_map = true);
   710   ~PreserveJVMState();
   711 };
   713 // Helper class to build cutouts of the form if (p) ; else {x...}.
   714 // The code {x...} must not fall through.
   715 // The kit's main flow of control is set to the "then" continuation of if(p).
   716 class BuildCutout: public PreserveJVMState {
   717  public:
   718   BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
   719   ~BuildCutout();
   720 };

mercurial