8026796: Make replace_in_map() on parent maps generic

Wed, 13 Aug 2014 11:00:22 +0200

author
roland
date
Wed, 13 Aug 2014 11:00:22 +0200
changeset 7041
411e30e5fbb8
parent 7040
da00a41842a5
child 7042
92baebeb744b

8026796: Make replace_in_map() on parent maps generic
Summary: propagate node replacements along control flow edges to callers
Reviewed-by: kvn, vlivanov

src/share/vm/opto/c2_globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/callGenerator.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/callGenerator.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/callnode.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/callnode.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/doCall.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/graphKit.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/graphKit.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/library_call.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/node.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse1.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/replacednodes.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/replacednodes.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/growableArray.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/opto/c2_globals.hpp	Wed Apr 23 12:37:36 2014 +0200
     1.2 +++ b/src/share/vm/opto/c2_globals.hpp	Wed Aug 13 11:00:22 2014 +0200
     1.3 @@ -653,9 +653,6 @@
     1.4    product(bool, UseMathExactIntrinsics, true,                               \
     1.5            "Enables intrinsification of various java.lang.Math functions")   \
     1.6                                                                              \
     1.7 -  experimental(bool, ReplaceInParentMaps, false,                            \
     1.8 -          "Propagate type improvements in callers of inlinee if possible")  \
     1.9 -                                                                            \
    1.10    product(bool, UseTypeSpeculation, true,                                   \
    1.11            "Speculatively propagate types from profiles")                    \
    1.12                                                                              \
     2.1 --- a/src/share/vm/opto/callGenerator.cpp	Wed Apr 23 12:37:36 2014 +0200
     2.2 +++ b/src/share/vm/opto/callGenerator.cpp	Wed Aug 13 11:00:22 2014 +0200
     2.3 @@ -63,12 +63,12 @@
     2.4    }
     2.5  
     2.6    virtual bool      is_parse() const           { return true; }
     2.7 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
     2.8 +  virtual JVMState* generate(JVMState* jvms);
     2.9    int is_osr() { return _is_osr; }
    2.10  
    2.11  };
    2.12  
    2.13 -JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
    2.14 +JVMState* ParseGenerator::generate(JVMState* jvms) {
    2.15    Compile* C = Compile::current();
    2.16  
    2.17    if (is_osr()) {
    2.18 @@ -80,7 +80,7 @@
    2.19      return NULL;  // bailing out of the compile; do not try to parse
    2.20    }
    2.21  
    2.22 -  Parse parser(jvms, method(), _expected_uses, parent_parser);
    2.23 +  Parse parser(jvms, method(), _expected_uses);
    2.24    // Grab signature for matching/allocation
    2.25  #ifdef ASSERT
    2.26    if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
    2.27 @@ -119,12 +119,12 @@
    2.28        _separate_io_proj(separate_io_proj)
    2.29    {
    2.30    }
    2.31 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
    2.32 +  virtual JVMState* generate(JVMState* jvms);
    2.33  
    2.34    CallStaticJavaNode* call_node() const { return _call_node; }
    2.35  };
    2.36  
    2.37 -JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
    2.38 +JVMState* DirectCallGenerator::generate(JVMState* jvms) {
    2.39    GraphKit kit(jvms);
    2.40    bool is_static = method()->is_static();
    2.41    address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
    2.42 @@ -171,10 +171,10 @@
    2.43             vtable_index >= 0, "either invalid or usable");
    2.44    }
    2.45    virtual bool      is_virtual() const          { return true; }
    2.46 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
    2.47 +  virtual JVMState* generate(JVMState* jvms);
    2.48  };
    2.49  
    2.50 -JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
    2.51 +JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
    2.52    GraphKit kit(jvms);
    2.53    Node* receiver = kit.argument(0);
    2.54  
    2.55 @@ -276,7 +276,7 @@
    2.56    // Convert the CallStaticJava into an inline
    2.57    virtual void do_late_inline();
    2.58  
    2.59 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
    2.60 +  virtual JVMState* generate(JVMState* jvms) {
    2.61      Compile *C = Compile::current();
    2.62      C->print_inlining_skip(this);
    2.63  
    2.64 @@ -290,7 +290,7 @@
    2.65      // that the late inlining logic can distinguish between fall
    2.66      // through and exceptional uses of the memory and io projections
    2.67      // as is done for allocations and macro expansion.
    2.68 -    return DirectCallGenerator::generate(jvms, parent_parser);
    2.69 +    return DirectCallGenerator::generate(jvms);
    2.70    }
    2.71  
    2.72    virtual void print_inlining_late(const char* msg) {
    2.73 @@ -389,7 +389,7 @@
    2.74    }
    2.75  
    2.76    // Now perform the inling using the synthesized JVMState
    2.77 -  JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
    2.78 +  JVMState* new_jvms = _inline_cg->generate(jvms);
    2.79    if (new_jvms == NULL)  return;  // no change
    2.80    if (C->failing())      return;
    2.81  
    2.82 @@ -407,7 +407,7 @@
    2.83    C->env()->notice_inlined_method(_inline_cg->method());
    2.84    C->set_inlining_progress(true);
    2.85  
    2.86 -  kit.replace_call(call, result);
    2.87 +  kit.replace_call(call, result, true);
    2.88  }
    2.89  
    2.90  
    2.91 @@ -429,8 +429,8 @@
    2.92  
    2.93    virtual bool is_mh_late_inline() const { return true; }
    2.94  
    2.95 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
    2.96 -    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
    2.97 +  virtual JVMState* generate(JVMState* jvms) {
    2.98 +    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
    2.99      if (_input_not_const) {
   2.100        // inlining won't be possible so no need to enqueue right now.
   2.101        call_node()->set_generator(this);
   2.102 @@ -477,13 +477,13 @@
   2.103    LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
   2.104      LateInlineCallGenerator(method, inline_cg) {}
   2.105  
   2.106 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
   2.107 +  virtual JVMState* generate(JVMState* jvms) {
   2.108      Compile *C = Compile::current();
   2.109      C->print_inlining_skip(this);
   2.110  
   2.111      C->add_string_late_inline(this);
   2.112  
   2.113 -    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
   2.114 +    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
   2.115      return new_jvms;
   2.116    }
   2.117  
   2.118 @@ -500,13 +500,13 @@
   2.119    LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
   2.120      LateInlineCallGenerator(method, inline_cg) {}
   2.121  
   2.122 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
   2.123 +  virtual JVMState* generate(JVMState* jvms) {
   2.124      Compile *C = Compile::current();
   2.125      C->print_inlining_skip(this);
   2.126  
   2.127      C->add_boxing_late_inline(this);
   2.128  
   2.129 -    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
   2.130 +    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
   2.131      return new_jvms;
   2.132    }
   2.133  };
   2.134 @@ -542,7 +542,7 @@
   2.135    virtual bool      is_virtual() const          { return _is_virtual; }
   2.136    virtual bool      is_deferred() const         { return true; }
   2.137  
   2.138 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   2.139 +  virtual JVMState* generate(JVMState* jvms);
   2.140  };
   2.141  
   2.142  
   2.143 @@ -552,12 +552,12 @@
   2.144    return new WarmCallGenerator(ci, if_cold, if_hot);
   2.145  }
   2.146  
   2.147 -JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   2.148 +JVMState* WarmCallGenerator::generate(JVMState* jvms) {
   2.149    Compile* C = Compile::current();
   2.150    if (C->log() != NULL) {
   2.151      C->log()->elem("warm_call bci='%d'", jvms->bci());
   2.152    }
   2.153 -  jvms = _if_cold->generate(jvms, parent_parser);
   2.154 +  jvms = _if_cold->generate(jvms);
   2.155    if (jvms != NULL) {
   2.156      Node* m = jvms->map()->control();
   2.157      if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
   2.158 @@ -618,7 +618,7 @@
   2.159    virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
   2.160    virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
   2.161  
   2.162 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   2.163 +  virtual JVMState* generate(JVMState* jvms);
   2.164  };
   2.165  
   2.166  
   2.167 @@ -630,7 +630,7 @@
   2.168  }
   2.169  
   2.170  
   2.171 -JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   2.172 +JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
   2.173    GraphKit kit(jvms);
   2.174    PhaseGVN& gvn = kit.gvn();
   2.175    // We need an explicit receiver null_check before checking its type.
   2.176 @@ -648,6 +648,10 @@
   2.177      return kit.transfer_exceptions_into_jvms();
   2.178    }
   2.179  
   2.180 +  // Make a copy of the replaced nodes in case we need to restore them
   2.181 +  ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
   2.182 +  replaced_nodes.clone();
   2.183 +
   2.184    Node* exact_receiver = receiver;  // will get updated in place...
   2.185    Node* slow_ctl = kit.type_check_receiver(receiver,
   2.186                                             _predicted_receiver, _hit_prob,
   2.187 @@ -658,7 +662,7 @@
   2.188    { PreserveJVMState pjvms(&kit);
   2.189      kit.set_control(slow_ctl);
   2.190      if (!kit.stopped()) {
   2.191 -      slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
   2.192 +      slow_jvms = _if_missed->generate(kit.sync_jvms());
   2.193        if (kit.failing())
   2.194          return NULL;  // might happen because of NodeCountInliningCutoff
   2.195        assert(slow_jvms != NULL, "must be");
   2.196 @@ -679,12 +683,12 @@
   2.197    kit.replace_in_map(receiver, exact_receiver);
   2.198  
   2.199    // Make the hot call:
   2.200 -  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
   2.201 +  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
   2.202    if (new_jvms == NULL) {
   2.203      // Inline failed, so make a direct call.
   2.204      assert(_if_hit->is_inline(), "must have been a failed inline");
   2.205      CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
   2.206 -    new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
   2.207 +    new_jvms = cg->generate(kit.sync_jvms());
   2.208    }
   2.209    kit.add_exception_states_from(new_jvms);
   2.210    kit.set_jvms(new_jvms);
   2.211 @@ -701,6 +705,11 @@
   2.212      return kit.transfer_exceptions_into_jvms();
   2.213    }
   2.214  
   2.215 +  // There are 2 branches and the replaced nodes are only valid on
   2.216 +  // one: restore the replaced nodes to what they were before the
   2.217 +  // branch.
   2.218 +  kit.map()->set_replaced_nodes(replaced_nodes);
   2.219 +
   2.220    // Finish the diamond.
   2.221    kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
   2.222    RegionNode* region = new (kit.C) RegionNode(3);
   2.223 @@ -891,7 +900,7 @@
   2.224    virtual bool      is_inlined()   const    { return true; }
   2.225    virtual bool      is_intrinsic() const    { return true; }
   2.226  
   2.227 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   2.228 +  virtual JVMState* generate(JVMState* jvms);
   2.229  };
   2.230  
   2.231  
   2.232 @@ -901,7 +910,7 @@
   2.233  }
   2.234  
   2.235  
   2.236 -JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   2.237 +JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
   2.238    // The code we want to generate here is:
   2.239    //    if (receiver == NULL)
   2.240    //        uncommon_Trap
   2.241 @@ -961,7 +970,7 @@
   2.242      if (!kit.stopped()) {
   2.243        PreserveJVMState pjvms(&kit);
   2.244        // Generate intrinsic code:
   2.245 -      JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
   2.246 +      JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
   2.247        if (new_jvms == NULL) {
   2.248          // Intrinsic failed, use normal compilation path for this predicate.
   2.249          slow_region->add_req(kit.control());
   2.250 @@ -986,7 +995,7 @@
   2.251      PreserveJVMState pjvms(&kit);
   2.252      // Generate normal compilation code:
   2.253      kit.set_control(gvn.transform(slow_region));
   2.254 -    JVMState* new_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
   2.255 +    JVMState* new_jvms = _cg->generate(kit.sync_jvms());
   2.256      if (kit.failing())
   2.257        return NULL;  // might happen because of NodeCountInliningCutoff
   2.258      assert(new_jvms != NULL, "must be");
   2.259 @@ -1093,7 +1102,7 @@
   2.260    virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
   2.261    virtual bool      is_trap() const             { return true; }
   2.262  
   2.263 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   2.264 +  virtual JVMState* generate(JVMState* jvms);
   2.265  };
   2.266  
   2.267  
   2.268 @@ -1105,7 +1114,7 @@
   2.269  }
   2.270  
   2.271  
   2.272 -JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   2.273 +JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
   2.274    GraphKit kit(jvms);
   2.275    // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
   2.276    int nargs = method()->arg_size();
     3.1 --- a/src/share/vm/opto/callGenerator.hpp	Wed Apr 23 12:37:36 2014 +0200
     3.2 +++ b/src/share/vm/opto/callGenerator.hpp	Wed Aug 13 11:00:22 2014 +0200
     3.3 @@ -31,8 +31,6 @@
     3.4  #include "opto/type.hpp"
     3.5  #include "runtime/deoptimization.hpp"
     3.6  
     3.7 -class Parse;
     3.8 -
     3.9  //---------------------------CallGenerator-------------------------------------
    3.10  // The subclasses of this class handle generation of ideal nodes for
    3.11  // call sites and method entry points.
    3.12 @@ -112,7 +110,7 @@
    3.13    //
    3.14    // If the result is NULL, it means that this CallGenerator was unable
    3.15    // to handle the given call, and another CallGenerator should be consulted.
    3.16 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
    3.17 +  virtual JVMState* generate(JVMState* jvms) = 0;
    3.18  
    3.19    // How to generate a call site that is inlined:
    3.20    static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
     4.1 --- a/src/share/vm/opto/callnode.cpp	Wed Apr 23 12:37:36 2014 +0200
     4.2 +++ b/src/share/vm/opto/callnode.cpp	Wed Aug 13 11:00:22 2014 +0200
     4.3 @@ -1089,6 +1089,7 @@
     4.4  #ifndef PRODUCT
     4.5  void SafePointNode::dump_spec(outputStream *st) const {
     4.6    st->print(" SafePoint ");
     4.7 +  _replaced_nodes.dump(st);
     4.8  }
     4.9  #endif
    4.10  
     5.1 --- a/src/share/vm/opto/callnode.hpp	Wed Apr 23 12:37:36 2014 +0200
     5.2 +++ b/src/share/vm/opto/callnode.hpp	Wed Aug 13 11:00:22 2014 +0200
     5.3 @@ -30,6 +30,7 @@
     5.4  #include "opto/multnode.hpp"
     5.5  #include "opto/opcodes.hpp"
     5.6  #include "opto/phaseX.hpp"
     5.7 +#include "opto/replacednodes.hpp"
     5.8  #include "opto/type.hpp"
     5.9  
    5.10  // Portions of code courtesy of Clifford Click
    5.11 @@ -335,6 +336,7 @@
    5.12    OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
    5.13    JVMState* const _jvms;      // Pointer to list of JVM State objects
    5.14    const TypePtr*  _adr_type;  // What type of memory does this node produce?
    5.15 +  ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
    5.16  
    5.17    // Many calls take *all* of memory as input,
    5.18    // but some produce a limited subset of that memory as output.
    5.19 @@ -426,6 +428,37 @@
    5.20    void               set_next_exception(SafePointNode* n);
    5.21    bool                   has_exceptions() const { return next_exception() != NULL; }
    5.22  
    5.23 +  // Helper methods to operate on replaced nodes
    5.24 +  ReplacedNodes replaced_nodes() const {
    5.25 +    return _replaced_nodes;
    5.26 +  }
    5.27 +
    5.28 +  void set_replaced_nodes(ReplacedNodes replaced_nodes) {
    5.29 +    _replaced_nodes = replaced_nodes;
    5.30 +  }
    5.31 +
    5.32 +  void clone_replaced_nodes() {
    5.33 +    _replaced_nodes.clone();
    5.34 +  }
    5.35 +  void record_replaced_node(Node* initial, Node* improved) {
    5.36 +    _replaced_nodes.record(initial, improved);
    5.37 +  }
    5.38 +  void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
    5.39 +    _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
    5.40 +  }
    5.41 +  void delete_replaced_nodes() {
    5.42 +    _replaced_nodes.reset();
    5.43 +  }
    5.44 +  void apply_replaced_nodes() {
    5.45 +    _replaced_nodes.apply(this);
    5.46 +  }
    5.47 +  void merge_replaced_nodes_with(SafePointNode* sfpt) {
    5.48 +    _replaced_nodes.merge_with(sfpt->_replaced_nodes);
    5.49 +  }
    5.50 +  bool has_replaced_nodes() const {
    5.51 +    return !_replaced_nodes.is_empty();
    5.52 +  }
    5.53 +
    5.54    // Standard Node stuff
    5.55    virtual int            Opcode() const;
    5.56    virtual bool           pinned() const { return true; }
     6.1 --- a/src/share/vm/opto/compile.cpp	Wed Apr 23 12:37:36 2014 +0200
     6.2 +++ b/src/share/vm/opto/compile.cpp	Wed Aug 13 11:00:22 2014 +0200
     6.3 @@ -391,6 +391,11 @@
     6.4    uint next = 0;
     6.5    while (next < useful.size()) {
     6.6      Node *n = useful.at(next++);
     6.7 +    if (n->is_SafePoint()) {
     6.8 +      // We're done with a parsing phase. Replaced nodes are not valid
     6.9 +      // beyond that point.
    6.10 +      n->as_SafePoint()->delete_replaced_nodes();
    6.11 +    }
    6.12      // Use raw traversal of out edges since this code removes out edges
    6.13      int max = n->outcnt();
    6.14      for (int j = 0; j < max; ++j) {
    6.15 @@ -670,7 +675,6 @@
    6.16                    _inlining_incrementally(false),
    6.17                    _print_inlining_list(NULL),
    6.18                    _print_inlining_idx(0),
    6.19 -                  _preserve_jvm_state(0),
    6.20                    _interpreter_frame_size(0) {
    6.21    C = this;
    6.22  
    6.23 @@ -782,7 +786,7 @@
    6.24        return;
    6.25      }
    6.26      JVMState* jvms = build_start_state(start(), tf());
    6.27 -    if ((jvms = cg->generate(jvms, NULL)) == NULL) {
    6.28 +    if ((jvms = cg->generate(jvms)) == NULL) {
    6.29        record_method_not_compilable("method parse failed");
    6.30        return;
    6.31      }
    6.32 @@ -977,7 +981,6 @@
    6.33      _inlining_incrementally(false),
    6.34      _print_inlining_list(NULL),
    6.35      _print_inlining_idx(0),
    6.36 -    _preserve_jvm_state(0),
    6.37      _allowed_reasons(0),
    6.38      _interpreter_frame_size(0) {
    6.39    C = this;
    6.40 @@ -1910,6 +1913,8 @@
    6.41      for_igvn()->clear();
    6.42      gvn->replace_with(&igvn);
    6.43  
    6.44 +    _late_inlines_pos = _late_inlines.length();
    6.45 +
    6.46      while (_boxing_late_inlines.length() > 0) {
    6.47        CallGenerator* cg = _boxing_late_inlines.pop();
    6.48        cg->do_late_inline();
    6.49 @@ -1973,8 +1978,8 @@
    6.50      if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
    6.51        if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
    6.52          // PhaseIdealLoop is expensive so we only try it once we are
    6.53 -        // out of loop and we only try it again if the previous helped
    6.54 -        // got the number of nodes down significantly
    6.55 +        // out of live nodes and we only try it again if the previous
    6.56 +        // helped got the number of nodes down significantly
    6.57          PhaseIdealLoop ideal_loop( igvn, false, true );
    6.58          if (failing())  return;
    6.59          low_live_nodes = live_nodes();
    6.60 @@ -2066,6 +2071,10 @@
    6.61      // Inline valueOf() methods now.
    6.62      inline_boxing_calls(igvn);
    6.63  
    6.64 +    if (AlwaysIncrementalInline) {
    6.65 +      inline_incrementally(igvn);
    6.66 +    }
    6.67 +
    6.68      print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
    6.69  
    6.70      if (failing())  return;
     7.1 --- a/src/share/vm/opto/compile.hpp	Wed Apr 23 12:37:36 2014 +0200
     7.2 +++ b/src/share/vm/opto/compile.hpp	Wed Aug 13 11:00:22 2014 +0200
     7.3 @@ -429,9 +429,6 @@
     7.4    // Remove the speculative part of types and clean up the graph
     7.5    void remove_speculative_types(PhaseIterGVN &igvn);
     7.6  
     7.7 -  // Are we within a PreserveJVMState block?
     7.8 -  int _preserve_jvm_state;
     7.9 -
    7.10    void* _replay_inline_data; // Pointer to data loaded from file
    7.11  
    7.12   public:
    7.13 @@ -1196,21 +1193,6 @@
    7.14  
    7.15    // Auxiliary method for randomized fuzzing/stressing
    7.16    static bool randomized_select(int count);
    7.17 -
    7.18 -  // enter a PreserveJVMState block
    7.19 -  void inc_preserve_jvm_state() {
    7.20 -    _preserve_jvm_state++;
    7.21 -  }
    7.22 -
    7.23 -  // exit a PreserveJVMState block
    7.24 -  void dec_preserve_jvm_state() {
    7.25 -    _preserve_jvm_state--;
    7.26 -    assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
    7.27 -  }
    7.28 -
    7.29 -  bool has_preserve_jvm_state() const {
    7.30 -    return _preserve_jvm_state > 0;
    7.31 -  }
    7.32  };
    7.33  
    7.34  #endif // SHARE_VM_OPTO_COMPILE_HPP
     8.1 --- a/src/share/vm/opto/doCall.cpp	Wed Apr 23 12:37:36 2014 +0200
     8.2 +++ b/src/share/vm/opto/doCall.cpp	Wed Aug 13 11:00:22 2014 +0200
     8.3 @@ -523,7 +523,7 @@
     8.4    // because exceptions don't return to the call site.)
     8.5    profile_call(receiver);
     8.6  
     8.7 -  JVMState* new_jvms = cg->generate(jvms, this);
     8.8 +  JVMState* new_jvms = cg->generate(jvms);
     8.9    if (new_jvms == NULL) {
    8.10      // When inlining attempt fails (e.g., too many arguments),
    8.11      // it may contaminate the current compile state, making it
    8.12 @@ -537,7 +537,7 @@
    8.13      // intrinsic was expecting to optimize. Should always be possible to
    8.14      // get a normal java call that may inline in that case
    8.15      cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
    8.16 -    if ((new_jvms = cg->generate(jvms, this)) == NULL) {
    8.17 +    if ((new_jvms = cg->generate(jvms)) == NULL) {
    8.18        guarantee(failing(), "call failed to generate:  calls should work");
    8.19        return;
    8.20      }
     9.1 --- a/src/share/vm/opto/graphKit.cpp	Wed Apr 23 12:37:36 2014 +0200
     9.2 +++ b/src/share/vm/opto/graphKit.cpp	Wed Aug 13 11:00:22 2014 +0200
     9.3 @@ -428,6 +428,7 @@
     9.4        }
     9.5      }
     9.6    }
     9.7 +  phi_map->merge_replaced_nodes_with(ex_map);
     9.8  }
     9.9  
    9.10  //--------------------------use_exception_state--------------------------------
    9.11 @@ -641,7 +642,6 @@
    9.12    _map    = kit->map();   // preserve the map
    9.13    _sp     = kit->sp();
    9.14    kit->set_map(clone_map ? kit->clone_map() : NULL);
    9.15 -  Compile::current()->inc_preserve_jvm_state();
    9.16  #ifdef ASSERT
    9.17    _bci    = kit->bci();
    9.18    Parse* parser = kit->is_Parse();
    9.19 @@ -659,7 +659,6 @@
    9.20  #endif
    9.21    kit->set_map(_map);
    9.22    kit->set_sp(_sp);
    9.23 -  Compile::current()->dec_preserve_jvm_state();
    9.24  }
    9.25  
    9.26  
    9.27 @@ -1398,60 +1397,17 @@
    9.28    // on the map.  This includes locals, stack, and monitors
    9.29    // of the current (innermost) JVM state.
    9.30  
    9.31 -  if (!ReplaceInParentMaps) {
    9.32 +  // don't let inconsistent types from profiling escape this
    9.33 +  // method
    9.34 +
    9.35 +  const Type* told = _gvn.type(old);
    9.36 +  const Type* tnew = _gvn.type(neww);
    9.37 +
    9.38 +  if (!tnew->higher_equal(told)) {
    9.39      return;
    9.40    }
    9.41  
    9.42 -  // PreserveJVMState doesn't do a deep copy so we can't modify
    9.43 -  // parents
    9.44 -  if (Compile::current()->has_preserve_jvm_state()) {
    9.45 -    return;
    9.46 -  }
    9.47 -
    9.48 -  Parse* parser = is_Parse();
    9.49 -  bool progress = true;
    9.50 -  Node* ctrl = map()->in(0);
    9.51 -  // Follow the chain of parsers and see whether the update can be
    9.52 -  // done in the map of callers. We can do the replace for a caller if
    9.53 -  // the current control post dominates the control of a caller.
    9.54 -  while (parser != NULL && parser->caller() != NULL && progress) {
    9.55 -    progress = false;
    9.56 -    Node* parent_map = parser->caller()->map();
    9.57 -    assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch");
    9.58 -
    9.59 -    Node* parent_ctrl = parent_map->in(0);
    9.60 -
    9.61 -    while (parent_ctrl->is_Region()) {
    9.62 -      Node* n = parent_ctrl->as_Region()->is_copy();
    9.63 -      if (n == NULL) {
    9.64 -        break;
    9.65 -      }
    9.66 -      parent_ctrl = n;
    9.67 -    }
    9.68 -
    9.69 -    for (;;) {
    9.70 -      if (ctrl == parent_ctrl) {
    9.71 -        // update the map of the exits which is the one that will be
    9.72 -        // used when compilation resume after inlining
    9.73 -        parser->exits().map()->replace_edge(old, neww);
    9.74 -        progress = true;
    9.75 -        break;
    9.76 -      }
    9.77 -      if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
    9.78 -        ctrl = ctrl->in(0)->in(0);
    9.79 -      } else if (ctrl->is_Region()) {
    9.80 -        Node* n = ctrl->as_Region()->is_copy();
    9.81 -        if (n == NULL) {
    9.82 -          break;
    9.83 -        }
    9.84 -        ctrl = n;
    9.85 -      } else {
    9.86 -        break;
    9.87 -      }
    9.88 -    }
    9.89 -
    9.90 -    parser = parser->parent_parser();
    9.91 -  }
    9.92 +  map()->record_replaced_node(old, neww);
    9.93  }
    9.94  
    9.95  
    9.96 @@ -1855,12 +1811,16 @@
    9.97  
    9.98  
    9.99  // Replace the call with the current state of the kit.
   9.100 -void GraphKit::replace_call(CallNode* call, Node* result) {
   9.101 +void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
   9.102    JVMState* ejvms = NULL;
   9.103    if (has_exceptions()) {
   9.104      ejvms = transfer_exceptions_into_jvms();
   9.105    }
   9.106  
   9.107 +  ReplacedNodes replaced_nodes = map()->replaced_nodes();
   9.108 +  ReplacedNodes replaced_nodes_exception;
   9.109 +  Node* ex_ctl = top();
   9.110 +
   9.111    SafePointNode* final_state = stop();
   9.112  
   9.113    // Find all the needed outputs of this call
   9.114 @@ -1877,6 +1837,10 @@
   9.115      C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
   9.116    }
   9.117    if (callprojs.fallthrough_memproj != NULL) {
   9.118 +    if (final_mem->is_MergeMem()) {
   9.119 +      // Parser's exits MergeMem was not transformed but may be optimized
   9.120 +      final_mem = _gvn.transform(final_mem);
   9.121 +    }
   9.122      C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
   9.123    }
   9.124    if (callprojs.fallthrough_ioproj != NULL) {
   9.125 @@ -1908,10 +1872,13 @@
   9.126  
   9.127      // Load my combined exception state into the kit, with all phis transformed:
   9.128      SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
   9.129 +    replaced_nodes_exception = ex_map->replaced_nodes();
   9.130  
   9.131      Node* ex_oop = ekit.use_exception_state(ex_map);
   9.132 +
   9.133      if (callprojs.catchall_catchproj != NULL) {
   9.134        C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
   9.135 +      ex_ctl = ekit.control();
   9.136      }
   9.137      if (callprojs.catchall_memproj != NULL) {
   9.138        C->gvn_replace_by(callprojs.catchall_memproj,   ekit.reset_memory());
   9.139 @@ -1944,6 +1911,13 @@
   9.140        _gvn.transform(wl.pop());
   9.141      }
   9.142    }
   9.143 +
   9.144 +  if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
   9.145 +    replaced_nodes.apply(C, final_ctl);
   9.146 +  }
   9.147 +  if (!ex_ctl->is_top() && do_replaced_nodes) {
   9.148 +    replaced_nodes_exception.apply(C, ex_ctl);
   9.149 +  }
   9.150  }
   9.151  
   9.152  
    10.1 --- a/src/share/vm/opto/graphKit.hpp	Wed Apr 23 12:37:36 2014 +0200
    10.2 +++ b/src/share/vm/opto/graphKit.hpp	Wed Aug 13 11:00:22 2014 +0200
    10.3 @@ -685,7 +685,7 @@
    10.4    // Replace the call with the current state of the kit.  Requires
    10.5    // that the call was generated with separate io_projs so that
    10.6    // exceptional control flow can be handled properly.
    10.7 -  void replace_call(CallNode* call, Node* result);
    10.8 +  void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
    10.9  
   10.10    // helper functions for statistics
   10.11    void increment_counter(address counter_addr);   // increment a debug counter
    11.1 --- a/src/share/vm/opto/library_call.cpp	Wed Apr 23 12:37:36 2014 +0200
    11.2 +++ b/src/share/vm/opto/library_call.cpp	Wed Aug 13 11:00:22 2014 +0200
    11.3 @@ -66,7 +66,7 @@
    11.4    virtual bool is_predicated() const { return _predicates_count > 0; }
    11.5    virtual int  predicates_count() const { return _predicates_count; }
    11.6    virtual bool does_virtual_dispatch()   const { return _does_virtual_dispatch; }
    11.7 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
    11.8 +  virtual JVMState* generate(JVMState* jvms);
    11.9    virtual Node* generate_predicate(JVMState* jvms, int predicate);
   11.10    vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
   11.11  };
   11.12 @@ -614,7 +614,7 @@
   11.13    // Nothing to do here.
   11.14  }
   11.15  
   11.16 -JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
   11.17 +JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
   11.18    LibraryCallKit kit(jvms, this);
   11.19    Compile* C = kit.C;
   11.20    int nodes = C->unique();
    12.1 --- a/src/share/vm/opto/node.cpp	Wed Apr 23 12:37:36 2014 +0200
    12.2 +++ b/src/share/vm/opto/node.cpp	Wed Aug 13 11:00:22 2014 +0200
    12.3 @@ -527,6 +527,9 @@
    12.4    if (n->is_Call()) {
    12.5      n->as_Call()->clone_jvms(C);
    12.6    }
    12.7 +  if (n->is_SafePoint()) {
    12.8 +    n->as_SafePoint()->clone_replaced_nodes();
    12.9 +  }
   12.10    return n;                     // Return the clone
   12.11  }
   12.12  
   12.13 @@ -622,6 +625,9 @@
   12.14    if (is_expensive()) {
   12.15      compile->remove_expensive_node(this);
   12.16    }
   12.17 +  if (is_SafePoint()) {
   12.18 +    as_SafePoint()->delete_replaced_nodes();
   12.19 +  }
   12.20  #ifdef ASSERT
   12.21    // We will not actually delete the storage, but we'll make the node unusable.
   12.22    *(address*)this = badAddress;  // smash the C++ vtbl, probably
    13.1 --- a/src/share/vm/opto/parse.hpp	Wed Apr 23 12:37:36 2014 +0200
    13.2 +++ b/src/share/vm/opto/parse.hpp	Wed Aug 13 11:00:22 2014 +0200
    13.3 @@ -357,12 +357,13 @@
    13.4    int _est_switch_depth;        // Debugging SwitchRanges.
    13.5  #endif
    13.6  
    13.7 -  // parser for the caller of the method of this object
    13.8 -  Parse* const _parent;
    13.9 +  bool         _first_return;                  // true if return is the first to be parsed
   13.10 +  bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
   13.11 +  uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
   13.12  
   13.13   public:
   13.14    // Constructor
   13.15 -  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent);
   13.16 +  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
   13.17  
   13.18    virtual Parse* is_Parse() const { return (Parse*)this; }
   13.19  
   13.20 @@ -419,8 +420,6 @@
   13.21      return block()->successor_for_bci(bci);
   13.22    }
   13.23  
   13.24 -  Parse* parent_parser() const { return _parent; }
   13.25 -
   13.26   private:
   13.27    // Create a JVMS & map for the initial state of this method.
   13.28    SafePointNode* create_entry_map();
    14.1 --- a/src/share/vm/opto/parse1.cpp	Wed Apr 23 12:37:36 2014 +0200
    14.2 +++ b/src/share/vm/opto/parse1.cpp	Wed Aug 13 11:00:22 2014 +0200
    14.3 @@ -381,8 +381,8 @@
    14.4  
    14.5  //------------------------------Parse------------------------------------------
    14.6  // Main parser constructor.
    14.7 -Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent)
    14.8 -  : _exits(caller), _parent(parent)
    14.9 +Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
   14.10 +  : _exits(caller)
   14.11  {
   14.12    // Init some variables
   14.13    _caller = caller;
   14.14 @@ -395,6 +395,9 @@
   14.15    _entry_bci = InvocationEntryBci;
   14.16    _tf = NULL;
   14.17    _block = NULL;
   14.18 +  _first_return = true;
   14.19 +  _replaced_nodes_for_exceptions = false;
   14.20 +  _new_idx = C->unique();
   14.21    debug_only(_block_count = -1);
   14.22    debug_only(_blocks = (Block*)-1);
   14.23  #ifndef PRODUCT
   14.24 @@ -895,6 +898,10 @@
   14.25    for (uint i = 0; i < TypeFunc::Parms; i++) {
   14.26      caller.map()->set_req(i, ex_map->in(i));
   14.27    }
   14.28 +  if (ex_map->has_replaced_nodes()) {
   14.29 +    _replaced_nodes_for_exceptions = true;
   14.30 +  }
   14.31 +  caller.map()->transfer_replaced_nodes_from(ex_map, _new_idx);
   14.32    // ...and the exception:
   14.33    Node*          ex_oop        = saved_ex_oop(ex_map);
   14.34    SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop);
   14.35 @@ -963,7 +970,7 @@
   14.36    bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode;
   14.37  
   14.38    // record exit from a method if compiled while Dtrace is turned on.
   14.39 -  if (do_synch || C->env()->dtrace_method_probes()) {
   14.40 +  if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) {
   14.41      // First move the exception list out of _exits:
   14.42      GraphKit kit(_exits.transfer_exceptions_into_jvms());
   14.43      SafePointNode* normal_map = kit.map();  // keep this guy safe
   14.44 @@ -988,6 +995,9 @@
   14.45        if (C->env()->dtrace_method_probes()) {
   14.46          kit.make_dtrace_method_exit(method());
   14.47        }
   14.48 +      if (_replaced_nodes_for_exceptions) {
   14.49 +        kit.map()->apply_replaced_nodes();
   14.50 +      }
   14.51        // Done with exception-path processing.
   14.52        ex_map = kit.make_exception_state(ex_oop);
   14.53        assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
   14.54 @@ -1007,6 +1017,7 @@
   14.55        _exits.add_exception_state(ex_map);
   14.56      }
   14.57    }
   14.58 +  _exits.map()->apply_replaced_nodes();
   14.59  }
   14.60  
   14.61  //-----------------------------create_entry_map-------------------------------
   14.62 @@ -1021,6 +1032,9 @@
   14.63      return NULL;
   14.64    }
   14.65  
   14.66 +  // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
   14.67 +  _caller->map()->delete_replaced_nodes();
   14.68 +
   14.69    // If this is an inlined method, we may have to do a receiver null check.
   14.70    if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
   14.71      GraphKit kit(_caller);
   14.72 @@ -1044,6 +1058,8 @@
   14.73  
   14.74    SafePointNode* inmap = _caller->map();
   14.75    assert(inmap != NULL, "must have inmap");
   14.76 +  // In case of null check on receiver above
   14.77 +  map()->transfer_replaced_nodes_from(inmap, _new_idx);
   14.78  
   14.79    uint i;
   14.80  
   14.81 @@ -1673,6 +1689,8 @@
   14.82        set_control(r->nonnull_req());
   14.83      }
   14.84  
   14.85 +    map()->merge_replaced_nodes_with(newin);
   14.86 +
   14.87      // newin has been subsumed into the lazy merge, and is now dead.
   14.88      set_block(save_block);
   14.89  
   14.90 @@ -2077,6 +2095,13 @@
   14.91      phi->add_req(value);
   14.92    }
   14.93  
   14.94 +  if (_first_return) {
   14.95 +    _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
   14.96 +    _first_return = false;
   14.97 +  } else {
   14.98 +    _exits.map()->merge_replaced_nodes_with(map());
   14.99 +  }
  14.100 +
  14.101    stop_and_kill_map();          // This CFG path dies here
  14.102  }
  14.103  
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/src/share/vm/opto/replacednodes.cpp	Wed Aug 13 11:00:22 2014 +0200
    15.3 @@ -0,0 +1,219 @@
    15.4 +/*
    15.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    15.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    15.7 + *
    15.8 + * This code is free software; you can redistribute it and/or modify it
    15.9 + * under the terms of the GNU General Public License version 2 only, as
   15.10 + * published by the Free Software Foundation.
   15.11 + *
   15.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   15.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   15.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   15.15 + * version 2 for more details (a copy is included in the LICENSE file that
   15.16 + * accompanied this code).
   15.17 + *
   15.18 + * You should have received a copy of the GNU General Public License version
   15.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   15.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   15.21 + *
   15.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   15.23 + * or visit www.oracle.com if you need additional information or have any
   15.24 + * questions.
   15.25 + *
   15.26 + */
   15.27 +
   15.28 +#include "precompiled.hpp"
   15.29 +#include "opto/cfgnode.hpp"
   15.30 +#include "opto/phaseX.hpp"
   15.31 +#include "opto/replacednodes.hpp"
   15.32 +
   15.33 +void ReplacedNodes::allocate_if_necessary() {
   15.34 +  if (_replaced_nodes == NULL) {
   15.35 +    _replaced_nodes = new GrowableArray<ReplacedNode>();
   15.36 +  }
   15.37 +}
   15.38 +
   15.39 +bool ReplacedNodes::is_empty() const {
   15.40 +  return _replaced_nodes == NULL || _replaced_nodes->length() == 0;
   15.41 +}
   15.42 +
   15.43 +bool ReplacedNodes::has_node(const ReplacedNode& r) const {
   15.44 +  return _replaced_nodes->find(r) != -1;
   15.45 +}
   15.46 +
   15.47 +bool ReplacedNodes::has_target_node(Node* n) const {
   15.48 +  for (int i = 0; i < _replaced_nodes->length(); i++) {
   15.49 +    if (_replaced_nodes->at(i).improved() == n) {
   15.50 +      return true;
   15.51 +    }
   15.52 +  }
   15.53 +  return false;
   15.54 +}
   15.55 +
   15.56 +// Record replaced node if not seen before
   15.57 +void ReplacedNodes::record(Node* initial, Node* improved) {
   15.58 +  allocate_if_necessary();
   15.59 +  ReplacedNode r(initial, improved);
   15.60 +  if (!has_node(r)) {
   15.61 +    _replaced_nodes->push(r);
   15.62 +  }
   15.63 +}
   15.64 +
   15.65 +// Copy replaced nodes from one map to another. idx is used to
   15.66 +// identify nodes that are too new to be of interest in the target
   15.67 +// node list.
   15.68 +void ReplacedNodes::transfer_from(const ReplacedNodes& other, uint idx) {
   15.69 +  if (other.is_empty()) {
   15.70 +    return;
   15.71 +  }
   15.72 +  allocate_if_necessary();
   15.73 +  for (int i = 0; i < other._replaced_nodes->length(); i++) {
   15.74 +    ReplacedNode replaced = other._replaced_nodes->at(i);
   15.75 +    // Only transfer the nodes that can actually be useful
   15.76 +    if (!has_node(replaced) && (replaced.initial()->_idx < idx || has_target_node(replaced.initial()))) {
   15.77 +      _replaced_nodes->push(replaced);
   15.78 +    }
   15.79 +  }
   15.80 +}
   15.81 +
   15.82 +void ReplacedNodes::clone() {
   15.83 +  if (_replaced_nodes != NULL) {
   15.84 +    GrowableArray<ReplacedNode>* replaced_nodes_clone = new GrowableArray<ReplacedNode>();
   15.85 +    replaced_nodes_clone->appendAll(_replaced_nodes);
   15.86 +    _replaced_nodes = replaced_nodes_clone;
   15.87 +  }
   15.88 +}
   15.89 +
   15.90 +void ReplacedNodes::reset() {
   15.91 +  if (_replaced_nodes != NULL) {
   15.92 +    _replaced_nodes->clear();
   15.93 +  }
   15.94 +}
   15.95 +
   15.96 +// Perfom node replacement (used when returning to caller)
   15.97 +void ReplacedNodes::apply(Node* n) {
   15.98 +  if (is_empty()) {
   15.99 +    return;
  15.100 +  }
  15.101 +  for (int i = 0; i < _replaced_nodes->length(); i++) {
  15.102 +    ReplacedNode replaced = _replaced_nodes->at(i);
  15.103 +    n->replace_edge(replaced.initial(), replaced.improved());
  15.104 +  }
  15.105 +}
  15.106 +
  15.107 +static void enqueue_use(Node* n, Node* use, Unique_Node_List& work) {
  15.108 +  if (use->is_Phi()) {
  15.109 +    Node* r = use->in(0);
  15.110 +    assert(r->is_Region(), "Phi should have Region");
  15.111 +    for (uint i = 1; i < use->req(); i++) {
  15.112 +      if (use->in(i) == n) {
  15.113 +        work.push(r->in(i));
  15.114 +      }
  15.115 +    }
  15.116 +  } else {
  15.117 +    work.push(use);
  15.118 +  }
  15.119 +}
  15.120 +
  15.121 +// Perfom node replacement following late inlining
  15.122 +void ReplacedNodes::apply(Compile* C, Node* ctl) {
  15.123 +  // ctl is the control on exit of the method that was late inlined
  15.124 +  if (is_empty()) {
  15.125 +    return;
  15.126 +  }
  15.127 +  for (int i = 0; i < _replaced_nodes->length(); i++) {
  15.128 +    ReplacedNode replaced = _replaced_nodes->at(i);
  15.129 +    Node* initial = replaced.initial();
  15.130 +    Node* improved = replaced.improved();
  15.131 +    assert (ctl != NULL && !ctl->is_top(), "replaced node should have actual control");
  15.132 +
  15.133 +    ResourceMark rm;
  15.134 +    Unique_Node_List work;
  15.135 +    // Go over all the uses of the node that is considered for replacement...
  15.136 +    for (DUIterator j = initial->outs(); initial->has_out(j); j++) {
  15.137 +      Node* use = initial->out(j);
  15.138 +
  15.139 +      if (use == improved || use->outcnt() == 0) {
  15.140 +        continue;
  15.141 +      }
  15.142 +      work.clear();
  15.143 +      enqueue_use(initial, use, work);
  15.144 +      bool replace = true;
  15.145 +      // Check that this use is dominated by ctl. Go ahead with the
  15.146 +      // replacement if it is.
  15.147 +      while (work.size() != 0 && replace) {
  15.148 +        Node* n = work.pop();
  15.149 +        if (use->outcnt() == 0) {
  15.150 +          continue;
  15.151 +        }
  15.152 +        if (n->is_CFG() || (n->in(0) != NULL && !n->in(0)->is_top())) {
  15.153 +          int depth = 0;
  15.154 +          Node *m = n;
  15.155 +          if (!n->is_CFG()) {
  15.156 +            n = n->in(0);
  15.157 +          }
  15.158 +          assert(n->is_CFG(), "should be CFG now");
  15.159 +          while(n != ctl) {
  15.160 +            n = IfNode::up_one_dom(n);
  15.161 +            depth++;
  15.162 +            // limit search depth
  15.163 +            if (depth >= 100 || n == NULL) {
  15.164 +              replace = false;
  15.165 +              break;
  15.166 +            }
  15.167 +          }
  15.168 +        } else {
  15.169 +          for (DUIterator k = n->outs(); n->has_out(k); k++) {
  15.170 +            enqueue_use(n, n->out(k), work);
  15.171 +          }
  15.172 +        }
  15.173 +      }
  15.174 +      if (replace) {
  15.175 +        bool is_in_table = C->initial_gvn()->hash_delete(use);
  15.176 +        int replaced = use->replace_edge(initial, improved);
  15.177 +        if (is_in_table) {
  15.178 +          C->initial_gvn()->hash_find_insert(use);
  15.179 +        }
  15.180 +        C->record_for_igvn(use);
  15.181 +
  15.182 +        assert(replaced > 0, "inconsistent");
  15.183 +        --j;
  15.184 +      }
  15.185 +    }
  15.186 +  }
  15.187 +}
  15.188 +
  15.189 +void ReplacedNodes::dump(outputStream *st) const {
  15.190 +  if (!is_empty()) {
  15.191 +    tty->print("replaced nodes: ");
  15.192 +    for (int i = 0; i < _replaced_nodes->length(); i++) {
  15.193 +      tty->print("%d->%d", _replaced_nodes->at(i).initial()->_idx, _replaced_nodes->at(i).improved()->_idx);
  15.194 +      if (i < _replaced_nodes->length()-1) {
  15.195 +        tty->print(",");
  15.196 +      }
  15.197 +    }
  15.198 +  }
  15.199 +}
  15.200 +
  15.201 +// Merge 2 list of replaced node at a point where control flow paths merge
  15.202 +void ReplacedNodes::merge_with(const ReplacedNodes& other) {
  15.203 +  if (is_empty()) {
  15.204 +    return;
  15.205 +  }
  15.206 +  if (other.is_empty()) {
  15.207 +    reset();
  15.208 +    return;
  15.209 +  }
  15.210 +  int shift = 0;
  15.211 +  int len = _replaced_nodes->length();
  15.212 +  for (int i = 0; i < len; i++) {
  15.213 +    if (!other.has_node(_replaced_nodes->at(i))) {
  15.214 +      shift++;
  15.215 +    } else if (shift > 0) {
  15.216 +      _replaced_nodes->at_put(i-shift, _replaced_nodes->at(i));
  15.217 +    }
  15.218 +  }
  15.219 +  if (shift > 0) {
  15.220 +    _replaced_nodes->trunc_to(len - shift);
  15.221 +  }
  15.222 +}
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/src/share/vm/opto/replacednodes.hpp	Wed Aug 13 11:00:22 2014 +0200
    16.3 @@ -0,0 +1,81 @@
    16.4 +/*
    16.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    16.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.7 + *
    16.8 + * This code is free software; you can redistribute it and/or modify it
    16.9 + * under the terms of the GNU General Public License version 2 only, as
   16.10 + * published by the Free Software Foundation.
   16.11 + *
   16.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   16.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   16.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   16.15 + * version 2 for more details (a copy is included in the LICENSE file that
   16.16 + * accompanied this code).
   16.17 + *
   16.18 + * You should have received a copy of the GNU General Public License version
   16.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   16.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   16.21 + *
   16.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   16.23 + * or visit www.oracle.com if you need additional information or have any
   16.24 + * questions.
   16.25 + *
   16.26 + */
   16.27 +
   16.28 +#ifndef SHARE_VM_OPTO_REPLACEDNODES_HPP
   16.29 +#define SHARE_VM_OPTO_REPLACEDNODES_HPP
   16.30 +
   16.31 +#include "opto/connode.hpp"
   16.32 +
   16.33 +// During parsing, when a node is "improved",
   16.34 +// GraphKit::replace_in_map() is called to update the current map so
   16.35 +// that the improved node is used from that point
   16.36 +// on. GraphKit::replace_in_map() doesn't operate on the callers maps
   16.37 +// and so some optimization opportunities may be lost. The
   16.38 +// ReplacedNodes class addresses that problem.
   16.39 +//
   16.40 +// A ReplacedNodes object is a list of pair of nodes. Every
   16.41 +// SafePointNode carries a ReplacedNodes object. Every time
   16.42 +// GraphKit::replace_in_map() is called, a new pair of nodes is pushed
   16.43 +// on the list of replaced nodes. When control flow paths merge, their
   16.44 +// replaced nodes are also merged. When parsing exits a method to
   16.45 +// return to a caller, the replaced nodes on the exit path are used to
   16.46 +// update the caller's map.
   16.47 +class ReplacedNodes VALUE_OBJ_CLASS_SPEC {
   16.48 + private:
   16.49 +  class ReplacedNode VALUE_OBJ_CLASS_SPEC {
   16.50 +  private:
   16.51 +    Node* _initial;
   16.52 +    Node* _improved;
   16.53 +  public:
   16.54 +    ReplacedNode() : _initial(NULL), _improved(NULL) {}
   16.55 +    ReplacedNode(Node* initial, Node* improved) : _initial(initial), _improved(improved) {}
   16.56 +    Node* initial() const  { return _initial; }
   16.57 +    Node* improved() const { return _improved; }
   16.58 +
   16.59 +    bool operator==(const ReplacedNode& other) {
   16.60 +      return _initial == other._initial && _improved == other._improved;
   16.61 +    }
   16.62 +  };
   16.63 +  GrowableArray<ReplacedNode>* _replaced_nodes;
   16.64 +
   16.65 +  void allocate_if_necessary();
   16.66 +  bool has_node(const ReplacedNode& r) const;
   16.67 +  bool has_target_node(Node* n) const;
   16.68 +
   16.69 + public:
   16.70 +  ReplacedNodes()
   16.71 +    : _replaced_nodes(NULL) {}
   16.72 +
   16.73 +  void clone();
   16.74 +  void record(Node* initial, Node* improved);
   16.75 +  void transfer_from(const ReplacedNodes& other, uint idx);
   16.76 +  void reset();
   16.77 +  void apply(Node* n);
   16.78 +  void merge_with(const ReplacedNodes& other);
   16.79 +  bool is_empty() const;
   16.80 +  void dump(outputStream *st) const;
   16.81 +  void apply(Compile* C, Node* ctl);
   16.82 +};
   16.83 +
   16.84 +#endif // SHARE_VM_OPTO_REPLACEDNODES_HPP
    17.1 --- a/src/share/vm/runtime/arguments.cpp	Wed Apr 23 12:37:36 2014 +0200
    17.2 +++ b/src/share/vm/runtime/arguments.cpp	Wed Aug 13 11:00:22 2014 +0200
    17.3 @@ -3792,10 +3792,6 @@
    17.4      // nothing to use the profiling, turn if off
    17.5      FLAG_SET_DEFAULT(TypeProfileLevel, 0);
    17.6    }
    17.7 -  if (UseTypeSpeculation && FLAG_IS_DEFAULT(ReplaceInParentMaps)) {
    17.8 -    // Doing the replace in parent maps helps speculation
    17.9 -    FLAG_SET_DEFAULT(ReplaceInParentMaps, true);
   17.10 -  }
   17.11  #endif
   17.12  
   17.13    if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
    18.1 --- a/src/share/vm/utilities/growableArray.hpp	Wed Apr 23 12:37:36 2014 +0200
    18.2 +++ b/src/share/vm/utilities/growableArray.hpp	Wed Aug 13 11:00:22 2014 +0200
    18.3 @@ -349,6 +349,7 @@
    18.4  
    18.5    // inserts the given element before the element at index i
    18.6    void insert_before(const int idx, const E& elem) {
    18.7 +    assert(0 <= idx && idx <= _len, "illegal index");
    18.8      check_nesting();
    18.9      if (_len == _max) grow(_len);
   18.10      for (int j = _len - 1; j >= idx; j--) {
   18.11 @@ -360,7 +361,7 @@
   18.12  
   18.13    void appendAll(const GrowableArray<E>* l) {
   18.14      for (int i = 0; i < l->_len; i++) {
   18.15 -      raw_at_put_grow(_len, l->_data[i], 0);
   18.16 +      raw_at_put_grow(_len, l->_data[i], E());
   18.17      }
   18.18    }
   18.19  

mercurial