src/share/vm/opto/doCall.cpp

changeset 4414
5698813d45eb
parent 4409
d092d1b31229
child 4447
f1de9dbc914e
     1.1 --- a/src/share/vm/opto/doCall.cpp	Tue Jan 08 11:30:51 2013 -0800
     1.2 +++ b/src/share/vm/opto/doCall.cpp	Wed Jan 09 15:37:23 2013 -0800
     1.3 @@ -61,7 +61,7 @@
     1.4    }
     1.5  }
     1.6  
     1.7 -CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_is_virtual,
     1.8 +CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
     1.9                                         JVMState* jvms, bool allow_inline,
    1.10                                         float prof_factor, bool allow_intrinsics, bool delayed_forbidden) {
    1.11    ciMethod*       caller   = jvms->method();
    1.12 @@ -82,7 +82,7 @@
    1.13    // See how many times this site has been invoked.
    1.14    int site_count = profile.count();
    1.15    int receiver_count = -1;
    1.16 -  if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
    1.17 +  if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {
    1.18      // Receivers in the profile structure are ordered by call counts
    1.19      // so that the most called (major) receiver is profile.receiver(0).
    1.20      receiver_count = profile.receiver_count(0);
    1.21 @@ -94,7 +94,7 @@
    1.22      int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
    1.23      log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
    1.24                      log->identify(callee), site_count, prof_factor);
    1.25 -    if (call_is_virtual)  log->print(" virtual='1'");
    1.26 +    if (call_does_dispatch)  log->print(" virtual='1'");
    1.27      if (allow_inline)     log->print(" inline='1'");
    1.28      if (receiver_count >= 0) {
    1.29        log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
    1.30 @@ -111,12 +111,12 @@
    1.31    // We do this before the strict f.p. check below because the
    1.32    // intrinsics handle strict f.p. correctly.
    1.33    if (allow_inline && allow_intrinsics) {
    1.34 -    CallGenerator* cg = find_intrinsic(callee, call_is_virtual);
    1.35 +    CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
    1.36      if (cg != NULL) {
    1.37        if (cg->is_predicted()) {
    1.38          // Code without intrinsic but, hopefully, inlined.
    1.39          CallGenerator* inline_cg = this->call_generator(callee,
    1.40 -              vtable_index, call_is_virtual, jvms, allow_inline, prof_factor, false);
    1.41 +              vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false);
    1.42          if (inline_cg != NULL) {
    1.43            cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
    1.44          }
    1.45 @@ -131,7 +131,7 @@
    1.46    // have bytecodes and so normal inlining fails.
    1.47    if (callee->is_method_handle_intrinsic()) {
    1.48      CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden);
    1.49 -    assert (cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator");
    1.50 +    assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator");
    1.51      return cg;
    1.52    }
    1.53  
    1.54 @@ -149,7 +149,7 @@
    1.55      float expected_uses = past_uses;
    1.56  
    1.57      // Try inlining a bytecoded method:
    1.58 -    if (!call_is_virtual) {
    1.59 +    if (!call_does_dispatch) {
    1.60        InlineTree* ilt;
    1.61        if (UseOldInlining) {
    1.62          ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
    1.63 @@ -188,14 +188,14 @@
    1.64          } else if (require_inline || !InlineWarmCalls) {
    1.65            return cg;
    1.66          } else {
    1.67 -          CallGenerator* cold_cg = call_generator(callee, vtable_index, call_is_virtual, jvms, false, prof_factor);
    1.68 +          CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
    1.69            return CallGenerator::for_warm_call(ci, cold_cg, cg);
    1.70          }
    1.71        }
    1.72      }
    1.73  
    1.74      // Try using the type profile.
    1.75 -    if (call_is_virtual && site_count > 0 && receiver_count > 0) {
    1.76 +    if (call_does_dispatch && site_count > 0 && receiver_count > 0) {
    1.77        // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
    1.78        bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
    1.79        ciMethod* receiver_method = NULL;
    1.80 @@ -209,7 +209,7 @@
    1.81        if (receiver_method != NULL) {
    1.82          // The single majority receiver sufficiently outweighs the minority.
    1.83          CallGenerator* hit_cg = this->call_generator(receiver_method,
    1.84 -              vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
    1.85 +              vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
    1.86          if (hit_cg != NULL) {
    1.87            // Look up second receiver.
    1.88            CallGenerator* next_hit_cg = NULL;
    1.89 @@ -219,7 +219,7 @@
    1.90                                                                 profile.receiver(1));
    1.91              if (next_receiver_method != NULL) {
    1.92                next_hit_cg = this->call_generator(next_receiver_method,
    1.93 -                                  vtable_index, !call_is_virtual, jvms,
    1.94 +                                  vtable_index, !call_does_dispatch, jvms,
    1.95                                    allow_inline, prof_factor);
    1.96                if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
    1.97                    have_major_receiver && UseOnlyInlinedBimorphic) {
    1.98 @@ -265,7 +265,7 @@
    1.99  
   1.100    // There was no special inlining tactic, or it bailed out.
   1.101    // Use a more generic tactic, like a simple call.
   1.102 -  if (call_is_virtual) {
   1.103 +  if (call_does_dispatch) {
   1.104      return CallGenerator::for_virtual_call(callee, vtable_index);
   1.105    } else {
   1.106      // Class Hierarchy Analysis or Type Profile reveals a unique target,
   1.107 @@ -397,6 +397,7 @@
   1.108    // orig_callee is the resolved callee which's signature includes the
   1.109    // appendix argument.
   1.110    const int nargs = orig_callee->arg_size();
   1.111 +  const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
   1.112  
   1.113    // Push appendix argument (MethodType, CallSite, etc.), if one.
   1.114    if (iter().has_appendix()) {
   1.115 @@ -413,25 +414,18 @@
   1.116    // Then we may introduce a run-time check and inline on the path where it succeeds.
   1.117    // The other path may uncommon_trap, check for another receiver, or do a v-call.
   1.118  
   1.119 -  // Choose call strategy.
   1.120 -  bool call_is_virtual = is_virtual_or_interface;
   1.121 -  int vtable_index = Method::invalid_vtable_index;
   1.122 -  ciMethod* callee = orig_callee;
   1.123 +  // Try to get the most accurate receiver type
   1.124 +  ciMethod* callee             = orig_callee;
   1.125 +  int       vtable_index       = Method::invalid_vtable_index;
   1.126 +  bool      call_does_dispatch = false;
   1.127  
   1.128 -  // Try to get the most accurate receiver type
   1.129    if (is_virtual_or_interface) {
   1.130      Node*             receiver_node = stack(sp() - nargs);
   1.131      const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
   1.132 -    ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, orig_callee, receiver_type);
   1.133 -
   1.134 -    // Have the call been sufficiently improved such that it is no longer a virtual?
   1.135 -    if (optimized_virtual_method != NULL) {
   1.136 -      callee          = optimized_virtual_method;
   1.137 -      call_is_virtual = false;
   1.138 -    } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
   1.139 -      // We can make a vtable call at this site
   1.140 -      vtable_index = callee->resolve_vtable_index(method()->holder(), klass);
   1.141 -    }
   1.142 +    // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
   1.143 +    callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
   1.144 +                                      is_virtual,
   1.145 +                                      call_does_dispatch, vtable_index);  // out-parameters
   1.146    }
   1.147  
   1.148    // Note:  It's OK to try to inline a virtual call.
   1.149 @@ -447,7 +441,7 @@
   1.150    // Decide call tactic.
   1.151    // This call checks with CHA, the interpreter profile, intrinsics table, etc.
   1.152    // It decides whether inlining is desirable or not.
   1.153 -  CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
   1.154 +  CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor());
   1.155  
   1.156    // NOTE:  Don't use orig_callee and callee after this point!  Use cg->method() instead.
   1.157    orig_callee = callee = NULL;
   1.158 @@ -487,7 +481,7 @@
   1.159      // the call site, perhaps because it did not match a pattern the
   1.160      // intrinsic was expecting to optimize. Should always be possible to
   1.161      // get a normal java call that may inline in that case
   1.162 -    cg = C->call_generator(cg->method(), vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
   1.163 +    cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
   1.164      if ((new_jvms = cg->generate(jvms)) == NULL) {
   1.165        guarantee(failing(), "call failed to generate:  calls should work");
   1.166        return;
   1.167 @@ -522,55 +516,44 @@
   1.168      round_double_result(cg->method());
   1.169  
   1.170      ciType* rtype = cg->method()->return_type();
   1.171 -    if (Bytecodes::has_optional_appendix(iter().cur_bc_raw())) {
   1.172 +    ciType* ctype = declared_signature->return_type();
   1.173 +
   1.174 +    if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {
   1.175        // Be careful here with return types.
   1.176 -      ciType* ctype = declared_signature->return_type();
   1.177        if (ctype != rtype) {
   1.178          BasicType rt = rtype->basic_type();
   1.179          BasicType ct = ctype->basic_type();
   1.180 -        Node* retnode = peek();
   1.181          if (ct == T_VOID) {
   1.182            // It's OK for a method  to return a value that is discarded.
   1.183            // The discarding does not require any special action from the caller.
   1.184            // The Java code knows this, at VerifyType.isNullConversion.
   1.185            pop_node(rt);  // whatever it was, pop it
   1.186 -          retnode = top();
   1.187          } else if (rt == T_INT || is_subword_type(rt)) {
   1.188 -          // FIXME: This logic should be factored out.
   1.189 -          if (ct == T_BOOLEAN) {
   1.190 -            retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0x1)) );
   1.191 -          } else if (ct == T_CHAR) {
   1.192 -            retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) );
   1.193 -          } else if (ct == T_BYTE) {
   1.194 -            retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) );
   1.195 -            retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) );
   1.196 -          } else if (ct == T_SHORT) {
   1.197 -            retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) );
   1.198 -            retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) );
   1.199 -          } else {
   1.200 -            assert(ct == T_INT, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
   1.201 -          }
   1.202 +          // Nothing.  These cases are handled in lambda form bytecode.
   1.203 +          assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct)));
   1.204          } else if (rt == T_OBJECT || rt == T_ARRAY) {
   1.205            assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
   1.206            if (ctype->is_loaded()) {
   1.207              const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
   1.208              const Type*       sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
   1.209              if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
   1.210 +              Node* retnode = pop();
   1.211                Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type));
   1.212 -              pop();
   1.213                push(cast_obj);
   1.214              }
   1.215            }
   1.216          } else {
   1.217 -          assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct));
   1.218 +          assert(rt == ct, err_msg_res("unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)));
   1.219            // push a zero; it's better than getting an oop/int mismatch
   1.220 -          retnode = pop_node(rt);
   1.221 -          retnode = zerocon(ct);
   1.222 +          pop_node(rt);
   1.223 +          Node* retnode = zerocon(ct);
   1.224            push_node(ct, retnode);
   1.225          }
   1.226          // Now that the value is well-behaved, continue with the call-site type.
   1.227          rtype = ctype;
   1.228        }
   1.229 +    } else {
   1.230 +      assert(rtype == ctype, "mismatched return types");  // symbolic resolution enforces this
   1.231      }
   1.232  
   1.233      // If the return type of the method is not loaded, assert that the
   1.234 @@ -888,17 +871,39 @@
   1.235  #endif //PRODUCT
   1.236  
   1.237  
   1.238 +ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
   1.239 +                                         ciMethod* callee, const TypeOopPtr* receiver_type,
   1.240 +                                         bool is_virtual,
   1.241 +                                         bool& call_does_dispatch, int& vtable_index) {
   1.242 +  // Set default values for out-parameters.
   1.243 +  call_does_dispatch = true;
   1.244 +  vtable_index       = Method::invalid_vtable_index;
   1.245 +
   1.246 +  // Choose call strategy.
   1.247 +  ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee, receiver_type);
   1.248 +
   1.249 +  // Have the call been sufficiently improved such that it is no longer a virtual?
   1.250 +  if (optimized_virtual_method != NULL) {
   1.251 +    callee             = optimized_virtual_method;
   1.252 +    call_does_dispatch = false;
   1.253 +  } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
   1.254 +    // We can make a vtable call at this site
   1.255 +    vtable_index = callee->resolve_vtable_index(caller->holder(), klass);
   1.256 +  }
   1.257 +  return callee;
   1.258 +}
   1.259 +
   1.260  // Identify possible target method and inlining style
   1.261 -ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
   1.262 -                                   ciMethod *dest_method, const TypeOopPtr* receiver_type) {
   1.263 +ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
   1.264 +                                     ciMethod* callee, const TypeOopPtr* receiver_type) {
   1.265    // only use for virtual or interface calls
   1.266  
   1.267    // If it is obviously final, do not bother to call find_monomorphic_target,
   1.268    // because the class hierarchy checks are not needed, and may fail due to
   1.269    // incompletely loaded classes.  Since we do our own class loading checks
   1.270    // in this module, we may confidently bind to any method.
   1.271 -  if (dest_method->can_be_statically_bound()) {
   1.272 -    return dest_method;
   1.273 +  if (callee->can_be_statically_bound()) {
   1.274 +    return callee;
   1.275    }
   1.276  
   1.277    // Attempt to improve the receiver
   1.278 @@ -907,8 +912,8 @@
   1.279    if (receiver_type != NULL) {
   1.280      // Array methods are all inherited from Object, and are monomorphic.
   1.281      if (receiver_type->isa_aryptr() &&
   1.282 -        dest_method->holder() == env()->Object_klass()) {
   1.283 -      return dest_method;
   1.284 +        callee->holder() == env()->Object_klass()) {
   1.285 +      return callee;
   1.286      }
   1.287  
   1.288      // All other interesting cases are instance klasses.
   1.289 @@ -928,7 +933,7 @@
   1.290    }
   1.291  
   1.292    ciInstanceKlass*   calling_klass = caller->holder();
   1.293 -  ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
   1.294 +  ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver);
   1.295    if (cha_monomorphic_target != NULL) {
   1.296      assert(!cha_monomorphic_target->is_abstract(), "");
   1.297      // Look at the method-receiver type.  Does it add "too much information"?
   1.298 @@ -946,10 +951,10 @@
   1.299          cha_monomorphic_target->print();
   1.300          tty->cr();
   1.301        }
   1.302 -      if (C->log() != NULL) {
   1.303 -        C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
   1.304 -                       C->log()->identify(klass),
   1.305 -                       C->log()->identify(cha_monomorphic_target));
   1.306 +      if (log() != NULL) {
   1.307 +        log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
   1.308 +                       log()->identify(klass),
   1.309 +                       log()->identify(cha_monomorphic_target));
   1.310        }
   1.311        cha_monomorphic_target = NULL;
   1.312      }
   1.313 @@ -961,7 +966,7 @@
   1.314      // by dynamic class loading.  Be sure to test the "static" receiver
   1.315      // dest_method here, as opposed to the actual receiver, which may
   1.316      // falsely lead us to believe that the receiver is final or private.
   1.317 -    C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
   1.318 +    dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
   1.319      return cha_monomorphic_target;
   1.320    }
   1.321  
   1.322 @@ -970,7 +975,7 @@
   1.323    if (actual_receiver_is_exact) {
   1.324      // In case of evolution, there is a dependence on every inlined method, since each
   1.325      // such method can be changed when its class is redefined.
   1.326 -    ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver);
   1.327 +    ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
   1.328      if (exact_method != NULL) {
   1.329  #ifndef PRODUCT
   1.330        if (PrintOpto) {

mercurial