Merge

Thu, 17 Oct 2013 10:58:45 -0700

author
iveresov
date
Thu, 17 Oct 2013 10:58:45 -0700
changeset 5929
8f4bb1773fd9
parent 5899
e8703d708e6e
parent 5928
90abdd727e64
child 5930
7114c4597ae3

Merge

src/share/vm/interpreter/linkResolver.cpp file | annotate | diff | comparison | revisions
test/compiler/8013496/Test8013496.sh file | annotate | diff | comparison | revisions
     1.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Oct 16 11:48:03 2013 -0700
     1.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Oct 17 10:58:45 2013 -0700
     1.3 @@ -3100,6 +3100,10 @@
     1.4    }
     1.5  }
     1.6  
     1.7 +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
     1.8 +  fatal("Type profiling not implemented on this platform");
     1.9 +}
    1.10 +
    1.11  void LIR_Assembler::align_backward_branch_target() {
    1.12    __ align(OptoLoopAlignment);
    1.13  }
     2.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Oct 16 11:48:03 2013 -0700
     2.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Oct 17 10:58:45 2013 -0700
     2.3 @@ -1076,6 +1076,25 @@
     2.4  
     2.5    __ verify_not_null_oop(Oexception);
     2.6  
     2.7 +#ifdef ASSERT
     2.8 +  // check that fields in JavaThread for exception oop and issuing pc are
     2.9 +  // empty before writing to them
    2.10 +  Label oop_empty;
    2.11 +  Register scratch = I7;  // We can use I7 here because it's overwritten later anyway.
    2.12 +  __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);
    2.13 +  __ br_null(scratch, false, Assembler::pt, oop_empty);
    2.14 +  __ delayed()->nop();
    2.15 +  __ stop("exception oop already set");
    2.16 +  __ bind(oop_empty);
    2.17 +
    2.18 +  Label pc_empty;
    2.19 +  __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);
    2.20 +  __ br_null(scratch, false, Assembler::pt, pc_empty);
    2.21 +  __ delayed()->nop();
    2.22 +  __ stop("exception pc already set");
    2.23 +  __ bind(pc_empty);
    2.24 +#endif
    2.25 +
    2.26    // save the exception and issuing pc in the thread
    2.27    __ st_ptr(Oexception,  G2_thread, in_bytes(JavaThread::exception_oop_offset()));
    2.28    __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
     3.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp	Wed Oct 16 11:48:03 2013 -0700
     3.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp	Thu Oct 17 10:58:45 2013 -0700
     3.3 @@ -76,6 +76,8 @@
     3.4  // GC Ergo Flags
     3.5  define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
     3.6  
     3.7 +define_pd_global(uintx, TypeProfileLevel, 0);
     3.8 +
     3.9  #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
    3.10                                                                              \
    3.11    product(intx, UseVIS, 99,                                                 \
     4.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Oct 16 11:48:03 2013 -0700
     4.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Oct 17 10:58:45 2013 -0700
     4.3 @@ -3581,6 +3581,7 @@
     4.4    // the pending exception will be picked up the interpreter.
     4.5    __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
     4.6    __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
     4.7 +  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
     4.8    __ bind(noException);
     4.9  
    4.10    // deallocate the deoptimization frame taking care to preserve the return values
     5.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Oct 16 11:48:03 2013 -0700
     5.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Oct 17 10:58:45 2013 -0700
     5.3 @@ -3632,6 +3632,161 @@
     5.4    }
     5.5  }
     5.6  
     5.7 +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
     5.8 +  Register obj = op->obj()->as_register();
     5.9 +  Register tmp = op->tmp()->as_pointer_register();
    5.10 +  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
    5.11 +  ciKlass* exact_klass = op->exact_klass();
    5.12 +  intptr_t current_klass = op->current_klass();
    5.13 +  bool not_null = op->not_null();
    5.14 +  bool no_conflict = op->no_conflict();
    5.15 +
    5.16 +  Label update, next, none;
    5.17 +
    5.18 +  bool do_null = !not_null;
    5.19 +  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
    5.20 +  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
    5.21 +
    5.22 +  assert(do_null || do_update, "why are we here?");
    5.23 +  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
    5.24 +
    5.25 +  __ verify_oop(obj);
    5.26 +
    5.27 +  if (tmp != obj) {
    5.28 +    __ mov(tmp, obj);
    5.29 +  }
    5.30 +  if (do_null) {
    5.31 +    __ testptr(tmp, tmp);
    5.32 +    __ jccb(Assembler::notZero, update);
    5.33 +    if (!TypeEntries::was_null_seen(current_klass)) {
    5.34 +      __ orptr(mdo_addr, TypeEntries::null_seen);
    5.35 +    }
    5.36 +    if (do_update) {
    5.37 +#ifndef ASSERT
    5.38 +      __ jmpb(next);
    5.39 +    }
    5.40 +#else
    5.41 +      __ jmp(next);
    5.42 +    }
    5.43 +  } else {
    5.44 +    __ testptr(tmp, tmp);
    5.45 +    __ jccb(Assembler::notZero, update);
    5.46 +    __ stop("unexpect null obj");
    5.47 +#endif
    5.48 +  }
    5.49 +
    5.50 +  __ bind(update);
    5.51 +
    5.52 +  if (do_update) {
    5.53 +#ifdef ASSERT
    5.54 +    if (exact_klass != NULL) {
    5.55 +      Label ok;
    5.56 +      __ load_klass(tmp, tmp);
    5.57 +      __ push(tmp);
    5.58 +      __ mov_metadata(tmp, exact_klass->constant_encoding());
    5.59 +      __ cmpptr(tmp, Address(rsp, 0));
    5.60 +      __ jccb(Assembler::equal, ok);
    5.61 +      __ stop("exact klass and actual klass differ");
    5.62 +      __ bind(ok);
    5.63 +      __ pop(tmp);
    5.64 +    }
    5.65 +#endif
    5.66 +    if (!no_conflict) {
    5.67 +      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
    5.68 +        if (exact_klass != NULL) {
    5.69 +          __ mov_metadata(tmp, exact_klass->constant_encoding());
    5.70 +        } else {
    5.71 +          __ load_klass(tmp, tmp);
    5.72 +        }
    5.73 +
    5.74 +        __ xorptr(tmp, mdo_addr);
    5.75 +        __ testptr(tmp, TypeEntries::type_klass_mask);
    5.76 +        // klass seen before, nothing to do. The unknown bit may have been
    5.77 +        // set already but no need to check.
    5.78 +        __ jccb(Assembler::zero, next);
    5.79 +
    5.80 +        __ testptr(tmp, TypeEntries::type_unknown);
    5.81 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
    5.82 +
    5.83 +        if (TypeEntries::is_type_none(current_klass)) {
    5.84 +          __ cmpptr(mdo_addr, 0);
    5.85 +          __ jccb(Assembler::equal, none);
    5.86 +          __ cmpptr(mdo_addr, TypeEntries::null_seen);
    5.87 +          __ jccb(Assembler::equal, none);
    5.88 +          // There is a chance that the checks above (re-reading profiling
    5.89 +          // data from memory) fail if another thread has just set the
    5.90 +          // profiling to this obj's klass
    5.91 +          __ xorptr(tmp, mdo_addr);
    5.92 +          __ testptr(tmp, TypeEntries::type_klass_mask);
    5.93 +          __ jccb(Assembler::zero, next);
    5.94 +        }
    5.95 +      } else {
    5.96 +        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
    5.97 +               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
    5.98 +
    5.99 +        __ movptr(tmp, mdo_addr);
   5.100 +        __ testptr(tmp, TypeEntries::type_unknown);
   5.101 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   5.102 +      }
   5.103 +
   5.104 +      // different than before. Cannot keep accurate profile.
   5.105 +      __ orptr(mdo_addr, TypeEntries::type_unknown);
   5.106 +
   5.107 +      if (TypeEntries::is_type_none(current_klass)) {
   5.108 +        __ jmpb(next);
   5.109 +
   5.110 +        __ bind(none);
   5.111 +        // first time here. Set profile type.
   5.112 +        __ movptr(mdo_addr, tmp);
   5.113 +      }
   5.114 +    } else {
   5.115 +      // There's a single possible klass at this profile point
   5.116 +      assert(exact_klass != NULL, "should be");
   5.117 +      if (TypeEntries::is_type_none(current_klass)) {
   5.118 +        __ mov_metadata(tmp, exact_klass->constant_encoding());
   5.119 +        __ xorptr(tmp, mdo_addr);
   5.120 +        __ testptr(tmp, TypeEntries::type_klass_mask);
   5.121 +#ifdef ASSERT
   5.122 +        __ jcc(Assembler::zero, next);
   5.123 +
   5.124 +        {
   5.125 +          Label ok;
   5.126 +          __ push(tmp);
   5.127 +          __ cmpptr(mdo_addr, 0);
   5.128 +          __ jcc(Assembler::equal, ok);
   5.129 +          __ cmpptr(mdo_addr, TypeEntries::null_seen);
   5.130 +          __ jcc(Assembler::equal, ok);
   5.131 +          // may have been set by another thread
   5.132 +          __ mov_metadata(tmp, exact_klass->constant_encoding());
   5.133 +          __ xorptr(tmp, mdo_addr);
   5.134 +          __ testptr(tmp, TypeEntries::type_mask);
   5.135 +          __ jcc(Assembler::zero, ok);
   5.136 +
   5.137 +          __ stop("unexpected profiling mismatch");
   5.138 +          __ bind(ok);
   5.139 +          __ pop(tmp);
   5.140 +        }
   5.141 +#else
   5.142 +        __ jccb(Assembler::zero, next);
   5.143 +#endif
   5.144 +        // first time here. Set profile type.
   5.145 +        __ movptr(mdo_addr, tmp);
   5.146 +      } else {
   5.147 +        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
   5.148 +               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
   5.149 +
   5.150 +        __ movptr(tmp, mdo_addr);
   5.151 +        __ testptr(tmp, TypeEntries::type_unknown);
   5.152 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   5.153 +
   5.154 +        __ orptr(mdo_addr, TypeEntries::type_unknown);
   5.155 +      }
   5.156 +    }
   5.157 +
   5.158 +    __ bind(next);
   5.159 +  }
   5.160 +}
   5.161 +
   5.162  void LIR_Assembler::emit_delay(LIR_OpDelay*) {
   5.163    Unimplemented();
   5.164  }
     6.1 --- a/src/cpu/x86/vm/globals_x86.hpp	Wed Oct 16 11:48:03 2013 -0700
     6.2 +++ b/src/cpu/x86/vm/globals_x86.hpp	Thu Oct 17 10:58:45 2013 -0700
     6.3 @@ -79,6 +79,8 @@
     6.4  // GC Ergo Flags
     6.5  define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
     6.6  
     6.7 +define_pd_global(uintx, TypeProfileLevel, 11);
     6.8 +
     6.9  #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
    6.10                                                                              \
    6.11    develop(bool, IEEEPrecision, true,                                        \
     7.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Oct 16 11:48:03 2013 -0700
     7.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu Oct 17 10:58:45 2013 -0700
     7.3 @@ -1046,6 +1046,158 @@
     7.4    }
     7.5  }
     7.6  
     7.7 +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
     7.8 +  Label update, next, none;
     7.9 +
    7.10 +  verify_oop(obj);
    7.11 +
    7.12 +  testptr(obj, obj);
    7.13 +  jccb(Assembler::notZero, update);
    7.14 +  orptr(mdo_addr, TypeEntries::null_seen);
    7.15 +  jmpb(next);
    7.16 +
    7.17 +  bind(update);
    7.18 +  load_klass(obj, obj);
    7.19 +
    7.20 +  xorptr(obj, mdo_addr);
    7.21 +  testptr(obj, TypeEntries::type_klass_mask);
    7.22 +  jccb(Assembler::zero, next); // klass seen before, nothing to
    7.23 +                               // do. The unknown bit may have been
    7.24 +                               // set already but no need to check.
    7.25 +
    7.26 +  testptr(obj, TypeEntries::type_unknown);
    7.27 +  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
    7.28 +
    7.29 +  cmpptr(mdo_addr, 0);
    7.30 +  jccb(Assembler::equal, none);
    7.31 +  cmpptr(mdo_addr, TypeEntries::null_seen);
    7.32 +  jccb(Assembler::equal, none);
    7.33 +  // There is a chance that the checks above (re-reading profiling
    7.34 +  // data from memory) fail if another thread has just set the
    7.35 +  // profiling to this obj's klass
    7.36 +  xorptr(obj, mdo_addr);
    7.37 +  testptr(obj, TypeEntries::type_klass_mask);
    7.38 +  jccb(Assembler::zero, next);
    7.39 +
    7.40 +  // different than before. Cannot keep accurate profile.
    7.41 +  orptr(mdo_addr, TypeEntries::type_unknown);
    7.42 +  jmpb(next);
    7.43 +
    7.44 +  bind(none);
    7.45 +  // first time here. Set profile type.
    7.46 +  movptr(mdo_addr, obj);
    7.47 +
    7.48 +  bind(next);
    7.49 +}
    7.50 +
    7.51 +void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
    7.52 +  if (!ProfileInterpreter) {
    7.53 +    return;
    7.54 +  }
    7.55 +
    7.56 +  if (MethodData::profile_arguments() || MethodData::profile_return()) {
    7.57 +    Label profile_continue;
    7.58 +
    7.59 +    test_method_data_pointer(mdp, profile_continue);
    7.60 +
    7.61 +    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
    7.62 +
    7.63 +    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
    7.64 +    jcc(Assembler::notEqual, profile_continue);
    7.65 +
    7.66 +    if (MethodData::profile_arguments()) {
    7.67 +      Label done;
    7.68 +      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
    7.69 +      addptr(mdp, off_to_args);
    7.70 +
    7.71 +      for (int i = 0; i < TypeProfileArgsLimit; i++) {
    7.72 +        if (i > 0 || MethodData::profile_return()) {
    7.73 +          // If return value type is profiled we may have no argument to profile
    7.74 +          movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
    7.75 +          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
    7.76 +          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
    7.77 +          jcc(Assembler::less, done);
    7.78 +        }
    7.79 +        movptr(tmp, Address(callee, Method::const_offset()));
    7.80 +        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
    7.81 +        // stack offset o (zero based) from the start of the argument
    7.82 +        // list, for n arguments translates into offset n - o - 1 from
    7.83 +        // the end of the argument list
    7.84 +        subl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
    7.85 +        subl(tmp, 1);
    7.86 +        Address arg_addr = argument_address(tmp);
    7.87 +        movptr(tmp, arg_addr);
    7.88 +
    7.89 +        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
    7.90 +        profile_obj_type(tmp, mdo_arg_addr);
    7.91 +
    7.92 +        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
    7.93 +        addptr(mdp, to_add);
    7.94 +        off_to_args += to_add;
    7.95 +      }
    7.96 +
    7.97 +      if (MethodData::profile_return()) {
    7.98 +        movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
    7.99 +        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
   7.100 +      }
   7.101 +
   7.102 +      bind(done);
   7.103 +
   7.104 +      if (MethodData::profile_return()) {
   7.105 +        // We're right after the type profile for the last
   7.106 +        // argument. tmp is the number of cell left in the
   7.107 +        // CallTypeData/VirtualCallTypeData to reach its end. Non null
   7.108 +        // if there's a return to profile.
   7.109 +        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
   7.110 +        shll(tmp, exact_log2(DataLayout::cell_size));
   7.111 +        addptr(mdp, tmp);
   7.112 +      }
   7.113 +      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
   7.114 +    } else {
   7.115 +      assert(MethodData::profile_return(), "either profile call args or call ret");
   7.116 +      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
   7.117 +    }
   7.118 +
   7.119 +    // mdp points right after the end of the
   7.120 +    // CallTypeData/VirtualCallTypeData, right after the cells for the
   7.121 +    // return value type if there's one
   7.122 +
   7.123 +    bind(profile_continue);
   7.124 +  }
   7.125 +}
   7.126 +
   7.127 +void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
   7.128 +  assert_different_registers(mdp, ret, tmp, rsi);
   7.129 +  if (ProfileInterpreter && MethodData::profile_return()) {
   7.130 +    Label profile_continue, done;
   7.131 +
   7.132 +    test_method_data_pointer(mdp, profile_continue);
   7.133 +
   7.134 +    if (MethodData::profile_return_jsr292_only()) {
   7.135 +      // If we don't profile all invoke bytecodes we must make sure
   7.136 +      // it's a bytecode we indeed profile. We can't go back to the
   7.137 +      // begining of the ProfileData we intend to update to check its
   7.138 +      // type because we're right after it and we don't known its
   7.139 +      // length
   7.140 +      Label do_profile;
   7.141 +      cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
   7.142 +      jcc(Assembler::equal, do_profile);
   7.143 +      cmpb(Address(rsi, 0), Bytecodes::_invokehandle);
   7.144 +      jcc(Assembler::equal, do_profile);
   7.145 +      get_method(tmp);
   7.146 +      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
   7.147 +      jcc(Assembler::notEqual, profile_continue);
   7.148 +
   7.149 +      bind(do_profile);
   7.150 +    }
   7.151 +
   7.152 +    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
   7.153 +    mov(tmp, ret);
   7.154 +    profile_obj_type(tmp, mdo_ret_addr);
   7.155 +
   7.156 +    bind(profile_continue);
   7.157 +  }
   7.158 +}
   7.159  
   7.160  void InterpreterMacroAssembler::profile_call(Register mdp) {
   7.161    if (ProfileInterpreter) {
     8.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Oct 16 11:48:03 2013 -0700
     8.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu Oct 17 10:58:45 2013 -0700
     8.3 @@ -215,6 +215,9 @@
     8.4  
     8.5    void profile_taken_branch(Register mdp, Register bumped_count);
     8.6    void profile_not_taken_branch(Register mdp);
     8.7 +  void profile_obj_type(Register obj, const Address& mdo_addr);
     8.8 +  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
     8.9 +  void profile_return_type(Register mdp, Register ret, Register tmp);
    8.10    void profile_call(Register mdp);
    8.11    void profile_final_call(Register mdp);
    8.12    void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
     9.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Oct 16 11:48:03 2013 -0700
     9.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu Oct 17 10:58:45 2013 -0700
     9.3 @@ -1067,6 +1067,159 @@
     9.4    }
     9.5  }
     9.6  
     9.7 +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
     9.8 +  Label update, next, none;
     9.9 +
    9.10 +  verify_oop(obj);
    9.11 +
    9.12 +  testptr(obj, obj);
    9.13 +  jccb(Assembler::notZero, update);
    9.14 +  orptr(mdo_addr, TypeEntries::null_seen);
    9.15 +  jmpb(next);
    9.16 +
    9.17 +  bind(update);
    9.18 +  load_klass(obj, obj);
    9.19 +
    9.20 +  xorptr(obj, mdo_addr);
    9.21 +  testptr(obj, TypeEntries::type_klass_mask);
    9.22 +  jccb(Assembler::zero, next); // klass seen before, nothing to
    9.23 +                               // do. The unknown bit may have been
    9.24 +                               // set already but no need to check.
    9.25 +
    9.26 +  testptr(obj, TypeEntries::type_unknown);
    9.27 +  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
    9.28 +
    9.29 +  // There is a chance that by the time we do these checks (re-reading
    9.30 +  // profiling data from memory) another thread has set the profling
    9.31 +  // to this obj's klass and we set the profiling as unknow
    9.32 +  // erroneously
    9.33 +  cmpptr(mdo_addr, 0);
    9.34 +  jccb(Assembler::equal, none);
    9.35 +  cmpptr(mdo_addr, TypeEntries::null_seen);
    9.36 +  jccb(Assembler::equal, none);
    9.37 +  // There is a chance that the checks above (re-reading profiling
    9.38 +  // data from memory) fail if another thread has just set the
    9.39 +  // profiling to this obj's klass
    9.40 +  xorptr(obj, mdo_addr);
    9.41 +  testptr(obj, TypeEntries::type_klass_mask);
    9.42 +  jccb(Assembler::zero, next);
    9.43 +
    9.44 +  // different than before. Cannot keep accurate profile.
    9.45 +  orptr(mdo_addr, TypeEntries::type_unknown);
    9.46 +  jmpb(next);
    9.47 +
    9.48 +  bind(none);
    9.49 +  // first time here. Set profile type.
    9.50 +  movptr(mdo_addr, obj);
    9.51 +
    9.52 +  bind(next);
    9.53 +}
    9.54 +
    9.55 +void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
    9.56 +  if (!ProfileInterpreter) {
    9.57 +    return;
    9.58 +  }
    9.59 +
    9.60 +  if (MethodData::profile_arguments() || MethodData::profile_return()) {
    9.61 +    Label profile_continue;
    9.62 +
    9.63 +    test_method_data_pointer(mdp, profile_continue);
    9.64 +
    9.65 +    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
    9.66 +
    9.67 +    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
    9.68 +    jcc(Assembler::notEqual, profile_continue);
    9.69 +
    9.70 +    if (MethodData::profile_arguments()) {
    9.71 +      Label done;
    9.72 +      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
    9.73 +      addptr(mdp, off_to_args);
    9.74 +
    9.75 +      for (int i = 0; i < TypeProfileArgsLimit; i++) {
    9.76 +        if (i > 0 || MethodData::profile_return()) {
    9.77 +          // If return value type is profiled we may have no argument to profile
    9.78 +          movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
    9.79 +          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
    9.80 +          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
    9.81 +          jcc(Assembler::less, done);
    9.82 +        }
    9.83 +        movptr(tmp, Address(callee, Method::const_offset()));
    9.84 +        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
    9.85 +        subq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
    9.86 +        subl(tmp, 1);
    9.87 +        Address arg_addr = argument_address(tmp);
    9.88 +        movptr(tmp, arg_addr);
    9.89 +
    9.90 +        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
    9.91 +        profile_obj_type(tmp, mdo_arg_addr);
    9.92 +
    9.93 +        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
    9.94 +        addptr(mdp, to_add);
    9.95 +        off_to_args += to_add;
    9.96 +      }
    9.97 +
    9.98 +      if (MethodData::profile_return()) {
    9.99 +        movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
   9.100 +        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
   9.101 +      }
   9.102 +
   9.103 +      bind(done);
   9.104 +
   9.105 +      if (MethodData::profile_return()) {
   9.106 +        // We're right after the type profile for the last
   9.107 +        // argument. tmp is the number of cell left in the
   9.108 +        // CallTypeData/VirtualCallTypeData to reach its end. Non null
   9.109 +        // if there's a return to profile.
   9.110 +        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
   9.111 +        shll(tmp, exact_log2(DataLayout::cell_size));
   9.112 +        addptr(mdp, tmp);
   9.113 +      }
   9.114 +      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
   9.115 +    } else {
   9.116 +      assert(MethodData::profile_return(), "either profile call args or call ret");
   9.117 +      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
   9.118 +    }
   9.119 +
   9.120 +    // mdp points right after the end of the
   9.121 +    // CallTypeData/VirtualCallTypeData, right after the cells for the
   9.122 +    // return value type if there's one
   9.123 +
   9.124 +    bind(profile_continue);
   9.125 +  }
   9.126 +}
   9.127 +
   9.128 +void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
   9.129 +  assert_different_registers(mdp, ret, tmp, r13);
   9.130 +  if (ProfileInterpreter && MethodData::profile_return()) {
   9.131 +    Label profile_continue, done;
   9.132 +
   9.133 +    test_method_data_pointer(mdp, profile_continue);
   9.134 +
   9.135 +    if (MethodData::profile_return_jsr292_only()) {
   9.136 +      // If we don't profile all invoke bytecodes we must make sure
   9.137 +      // it's a bytecode we indeed profile. We can't go back to the
   9.138 +      // begining of the ProfileData we intend to update to check its
   9.139 +      // type because we're right after it and we don't known its
   9.140 +      // length
   9.141 +      Label do_profile;
   9.142 +      cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
   9.143 +      jcc(Assembler::equal, do_profile);
   9.144 +      cmpb(Address(r13, 0), Bytecodes::_invokehandle);
   9.145 +      jcc(Assembler::equal, do_profile);
   9.146 +      get_method(tmp);
   9.147 +      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
   9.148 +      jcc(Assembler::notEqual, profile_continue);
   9.149 +
   9.150 +      bind(do_profile);
   9.151 +    }
   9.152 +
   9.153 +    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
   9.154 +    mov(tmp, ret);
   9.155 +    profile_obj_type(tmp, mdo_ret_addr);
   9.156 +
   9.157 +    bind(profile_continue);
   9.158 +  }
   9.159 +}
   9.160  
   9.161  void InterpreterMacroAssembler::profile_call(Register mdp) {
   9.162    if (ProfileInterpreter) {
    10.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Oct 16 11:48:03 2013 -0700
    10.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu Oct 17 10:58:45 2013 -0700
    10.3 @@ -224,6 +224,9 @@
    10.4  
    10.5    void profile_taken_branch(Register mdp, Register bumped_count);
    10.6    void profile_not_taken_branch(Register mdp);
    10.7 +  void profile_obj_type(Register obj, const Address& mdo_addr);
    10.8 +  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
    10.9 +  void profile_return_type(Register mdp, Register ret, Register tmp);
   10.10    void profile_call(Register mdp);
   10.11    void profile_final_call(Register mdp);
   10.12    void profile_virtual_call(Register receiver, Register mdp,
    11.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Oct 16 11:48:03 2013 -0700
    11.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Oct 17 10:58:45 2013 -0700
    11.3 @@ -773,6 +773,7 @@
    11.4    void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    11.5    void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    11.6    void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    11.7 +  void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
    11.8  
    11.9    void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
   11.10    void testptr(Register src1, Register src2);
    12.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Oct 16 11:48:03 2013 -0700
    12.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Oct 17 10:58:45 2013 -0700
    12.3 @@ -194,6 +194,12 @@
    12.4    __ restore_bcp();
    12.5    __ restore_locals();
    12.6  
    12.7 +  if (incoming_state == atos) {
    12.8 +    Register mdp = rbx;
    12.9 +    Register tmp = rcx;
   12.10 +    __ profile_return_type(mdp, rax, tmp);
   12.11 +  }
   12.12 +
   12.13    Label L_got_cache, L_giant_index;
   12.14    if (EnableInvokeDynamic) {
   12.15      __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
    13.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Oct 16 11:48:03 2013 -0700
    13.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Oct 17 10:58:45 2013 -0700
    13.3 @@ -177,6 +177,12 @@
    13.4    __ restore_bcp();
    13.5    __ restore_locals();
    13.6  
    13.7 +  if (state == atos) {
    13.8 +    Register mdp = rbx;
    13.9 +    Register tmp = rcx;
   13.10 +    __ profile_return_type(mdp, rax, tmp);
   13.11 +  }
   13.12 +
   13.13    Label L_got_cache, L_giant_index;
   13.14    if (EnableInvokeDynamic) {
   13.15      __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
    14.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Oct 16 11:48:03 2013 -0700
    14.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Oct 17 10:58:45 2013 -0700
    14.3 @@ -2970,6 +2970,7 @@
    14.4  
    14.5    // profile this call
    14.6    __ profile_final_call(rax);
    14.7 +  __ profile_arguments_type(rax, method, rsi, true);
    14.8  
    14.9    __ jump_from_interpreted(method, rax);
   14.10  
   14.11 @@ -2984,6 +2985,7 @@
   14.12  
   14.13    // get target Method* & entry point
   14.14    __ lookup_virtual_method(rax, index, method);
   14.15 +  __ profile_arguments_type(rdx, method, rsi, true);
   14.16    __ jump_from_interpreted(method, rdx);
   14.17  }
   14.18  
   14.19 @@ -3013,6 +3015,7 @@
   14.20    __ null_check(rcx);
   14.21    // do the call
   14.22    __ profile_call(rax);
   14.23 +  __ profile_arguments_type(rax, rbx, rsi, false);
   14.24    __ jump_from_interpreted(rbx, rax);
   14.25  }
   14.26  
   14.27 @@ -3023,6 +3026,7 @@
   14.28    prepare_invoke(byte_no, rbx);  // get f1 Method*
   14.29    // do the call
   14.30    __ profile_call(rax);
   14.31 +  __ profile_arguments_type(rax, rbx, rsi, false);
   14.32    __ jump_from_interpreted(rbx, rax);
   14.33  }
   14.34  
   14.35 @@ -3082,6 +3086,8 @@
   14.36    __ testptr(rbx, rbx);
   14.37    __ jcc(Assembler::zero, no_such_method);
   14.38  
   14.39 +  __ profile_arguments_type(rdx, rbx, rsi, true);
   14.40 +
   14.41    // do the call
   14.42    // rcx: receiver
   14.43    // rbx,: Method*
   14.44 @@ -3138,6 +3144,7 @@
   14.45  
   14.46    // FIXME: profile the LambdaForm also
   14.47    __ profile_final_call(rax);
   14.48 +  __ profile_arguments_type(rdx, rbx_method, rsi, true);
   14.49  
   14.50    __ jump_from_interpreted(rbx_method, rdx);
   14.51  }
   14.52 @@ -3171,6 +3178,7 @@
   14.53    // %%% should make a type profile for any invokedynamic that takes a ref argument
   14.54    // profile this call
   14.55    __ profile_call(rsi);
   14.56 +  __ profile_arguments_type(rdx, rbx, rsi, false);
   14.57  
   14.58    __ verify_oop(rax_callsite);
   14.59  
    15.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Oct 16 11:48:03 2013 -0700
    15.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Oct 17 10:58:45 2013 -0700
    15.3 @@ -3026,6 +3026,7 @@
    15.4  
    15.5    // profile this call
    15.6    __ profile_final_call(rax);
    15.7 +  __ profile_arguments_type(rax, method, r13, true);
    15.8  
    15.9    __ jump_from_interpreted(method, rax);
   15.10  
   15.11 @@ -3040,6 +3041,7 @@
   15.12  
   15.13    // get target Method* & entry point
   15.14    __ lookup_virtual_method(rax, index, method);
   15.15 +  __ profile_arguments_type(rdx, method, r13, true);
   15.16    __ jump_from_interpreted(method, rdx);
   15.17  }
   15.18  
   15.19 @@ -3069,6 +3071,7 @@
   15.20    __ null_check(rcx);
   15.21    // do the call
   15.22    __ profile_call(rax);
   15.23 +  __ profile_arguments_type(rax, rbx, r13, false);
   15.24    __ jump_from_interpreted(rbx, rax);
   15.25  }
   15.26  
   15.27 @@ -3079,6 +3082,7 @@
   15.28    prepare_invoke(byte_no, rbx);  // get f1 Method*
   15.29    // do the call
   15.30    __ profile_call(rax);
   15.31 +  __ profile_arguments_type(rax, rbx, r13, false);
   15.32    __ jump_from_interpreted(rbx, rax);
   15.33  }
   15.34  
   15.35 @@ -3136,6 +3140,8 @@
   15.36    __ testptr(rbx, rbx);
   15.37    __ jcc(Assembler::zero, no_such_method);
   15.38  
   15.39 +  __ profile_arguments_type(rdx, rbx, r13, true);
   15.40 +
   15.41    // do the call
   15.42    // rcx: receiver
   15.43    // rbx,: Method*
   15.44 @@ -3193,6 +3199,7 @@
   15.45  
   15.46    // FIXME: profile the LambdaForm also
   15.47    __ profile_final_call(rax);
   15.48 +  __ profile_arguments_type(rdx, rbx_method, r13, true);
   15.49  
   15.50    __ jump_from_interpreted(rbx_method, rdx);
   15.51  }
   15.52 @@ -3226,6 +3233,7 @@
   15.53    // %%% should make a type profile for any invokedynamic that takes a ref argument
   15.54    // profile this call
   15.55    __ profile_call(r13);
   15.56 +  __ profile_arguments_type(rdx, rbx_method, r13, false);
   15.57  
   15.58    __ verify_oop(rax_callsite);
   15.59  
    16.1 --- a/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Oct 16 11:48:03 2013 -0700
    16.2 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Oct 17 10:58:45 2013 -0700
    16.3 @@ -935,6 +935,7 @@
    16.4  void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
    16.5  void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
    16.6  void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
    16.7 +void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {}
    16.8  void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
    16.9  void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
   16.10  void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
    17.1 --- a/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Oct 16 11:48:03 2013 -0700
    17.2 +++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Thu Oct 17 10:58:45 2013 -0700
    17.3 @@ -104,6 +104,7 @@
    17.4    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    17.5    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    17.6    virtual void do_ProfileCall    (ProfileCall*     x);
    17.7 +  virtual void do_ProfileReturnType (ProfileReturnType*  x);
    17.8    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
    17.9    virtual void do_RuntimeCall    (RuntimeCall*     x);
   17.10    virtual void do_MemBar         (MemBar*          x);
    18.1 --- a/src/share/vm/c1/c1_Compilation.cpp	Wed Oct 16 11:48:03 2013 -0700
    18.2 +++ b/src/share/vm/c1/c1_Compilation.cpp	Thu Oct 17 10:58:45 2013 -0700
    18.3 @@ -601,6 +601,17 @@
    18.4    }
    18.5  }
    18.6  
    18.7 +ciKlass* Compilation::cha_exact_type(ciType* type) {
    18.8 +  if (type != NULL && type->is_loaded() && type->is_instance_klass()) {
    18.9 +    ciInstanceKlass* ik = type->as_instance_klass();
   18.10 +    assert(ik->exact_klass() == NULL, "no cha for final klass");
   18.11 +    if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
   18.12 +      dependency_recorder()->assert_leaf_type(ik);
   18.13 +      return ik;
   18.14 +    }
   18.15 +  }
   18.16 +  return NULL;
   18.17 +}
   18.18  
   18.19  void Compilation::print_timers() {
   18.20    // tty->print_cr("    Native methods         : %6.3f s, Average : %2.3f", CompileBroker::_t_native_compilation.seconds(), CompileBroker::_t_native_compilation.seconds() / CompileBroker::_total_native_compile_count);
    19.1 --- a/src/share/vm/c1/c1_Compilation.hpp	Wed Oct 16 11:48:03 2013 -0700
    19.2 +++ b/src/share/vm/c1/c1_Compilation.hpp	Thu Oct 17 10:58:45 2013 -0700
    19.3 @@ -246,6 +246,8 @@
    19.4        (RangeCheckElimination || UseLoopInvariantCodeMotion) &&
    19.5        method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
    19.6    }
    19.7 +
    19.8 +  ciKlass* cha_exact_type(ciType* type);
    19.9  };
   19.10  
   19.11  
    20.1 --- a/src/share/vm/c1/c1_Compiler.cpp	Wed Oct 16 11:48:03 2013 -0700
    20.2 +++ b/src/share/vm/c1/c1_Compiler.cpp	Thu Oct 17 10:58:45 2013 -0700
    20.3 @@ -42,26 +42,16 @@
    20.4  #include "runtime/interfaceSupport.hpp"
    20.5  #include "runtime/sharedRuntime.hpp"
    20.6  
    20.7 -volatile int Compiler::_runtimes = uninitialized;
    20.8  
    20.9 -Compiler::Compiler() {
   20.10 -}
   20.11 +Compiler::Compiler () {}
   20.12  
   20.13 -
   20.14 -Compiler::~Compiler() {
   20.15 -  Unimplemented();
   20.16 -}
   20.17 -
   20.18 -
   20.19 -void Compiler::initialize_all() {
   20.20 +void Compiler::init_c1_runtime() {
   20.21    BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   20.22    Arena* arena = new (mtCompiler) Arena();
   20.23    Runtime1::initialize(buffer_blob);
   20.24    FrameMap::initialize();
   20.25    // initialize data structures
   20.26    ValueType::initialize(arena);
   20.27 -  // Instruction::initialize();
   20.28 -  // BlockBegin::initialize();
   20.29    GraphBuilder::initialize();
   20.30    // note: to use more than one instance of LinearScan at a time this function call has to
   20.31    //       be moved somewhere outside of this constructor:
   20.32 @@ -70,32 +60,33 @@
   20.33  
   20.34  
   20.35  void Compiler::initialize() {
   20.36 -  if (_runtimes != initialized) {
   20.37 -    initialize_runtimes( initialize_all, &_runtimes);
   20.38 +  // Buffer blob must be allocated per C1 compiler thread at startup
   20.39 +  BufferBlob* buffer_blob = init_buffer_blob();
   20.40 +
   20.41 +  if (should_perform_init()) {
   20.42 +    if (buffer_blob == NULL) {
   20.43 +      // When we come here we are in state 'initializing'; entire C1 compilation
   20.44 +      // can be shut down.
   20.45 +      set_state(failed);
   20.46 +    } else {
   20.47 +      init_c1_runtime();
   20.48 +      set_state(initialized);
   20.49 +    }
   20.50    }
   20.51 -  mark_initialized();
   20.52  }
   20.53  
   20.54 -
   20.55 -BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
   20.56 +BufferBlob* Compiler::init_buffer_blob() {
   20.57    // Allocate buffer blob once at startup since allocation for each
   20.58    // compilation seems to be too expensive (at least on Intel win32).
   20.59 -  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   20.60 -  if (buffer_blob != NULL) {
   20.61 -    return buffer_blob;
   20.62 -  }
   20.63 +  assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
   20.64  
   20.65    // setup CodeBuffer.  Preallocate a BufferBlob of size
   20.66    // NMethodSizeLimit plus some extra space for constants.
   20.67    int code_buffer_size = Compilation::desired_max_code_buffer_size() +
   20.68      Compilation::desired_max_constant_size();
   20.69  
   20.70 -  buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
   20.71 -                                   code_buffer_size);
   20.72 -  if (buffer_blob == NULL) {
   20.73 -    CompileBroker::handle_full_code_cache();
   20.74 -    env->record_failure("CodeCache is full");
   20.75 -  } else {
   20.76 +  BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size);
   20.77 +  if (buffer_blob != NULL) {
   20.78      CompilerThread::current()->set_buffer_blob(buffer_blob);
   20.79    }
   20.80  
   20.81 @@ -104,15 +95,8 @@
   20.82  
   20.83  
   20.84  void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
   20.85 -  BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
   20.86 -  if (buffer_blob == NULL) {
   20.87 -    return;
   20.88 -  }
   20.89 -
   20.90 -  if (!is_initialized()) {
   20.91 -    initialize();
   20.92 -  }
   20.93 -
   20.94 +  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   20.95 +  assert(buffer_blob != NULL, "Must exist");
   20.96    // invoke compilation
   20.97    {
   20.98      // We are nested here because we need for the destructor
    21.1 --- a/src/share/vm/c1/c1_Compiler.hpp	Wed Oct 16 11:48:03 2013 -0700
    21.2 +++ b/src/share/vm/c1/c1_Compiler.hpp	Thu Oct 17 10:58:45 2013 -0700
    21.3 @@ -30,11 +30,9 @@
    21.4  // There is one instance of the Compiler per CompilerThread.
    21.5  
    21.6  class Compiler: public AbstractCompiler {
    21.7 -
    21.8   private:
    21.9 -
   21.10 - // Tracks whether runtime has been initialized
   21.11 - static volatile int _runtimes;
   21.12 +  static void init_c1_runtime();
   21.13 +  BufferBlob* init_buffer_blob();
   21.14  
   21.15   public:
   21.16    // Creation
   21.17 @@ -46,19 +44,12 @@
   21.18  
   21.19    virtual bool is_c1()                           { return true; };
   21.20  
   21.21 -  BufferBlob* get_buffer_blob(ciEnv* env);
   21.22 -
   21.23    // Missing feature tests
   21.24    virtual bool supports_native()                 { return true; }
   21.25    virtual bool supports_osr   ()                 { return true; }
   21.26  
   21.27 -  // Customization
   21.28 -  virtual bool needs_adapters         ()         { return false; }
   21.29 -  virtual bool needs_stubs            ()         { return false; }
   21.30 -
   21.31    // Initialization
   21.32    virtual void initialize();
   21.33 -  static  void initialize_all();
   21.34  
   21.35    // Compilation entry point for methods
   21.36    virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
    22.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Oct 16 11:48:03 2013 -0700
    22.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Oct 17 10:58:45 2013 -0700
    22.3 @@ -1466,9 +1466,22 @@
    22.4      // State at end of inlined method is the state of the caller
    22.5      // without the method parameters on stack, including the
    22.6      // return value, if any, of the inlined method on operand stack.
    22.7 +    int invoke_bci = state()->caller_state()->bci();
    22.8      set_state(state()->caller_state()->copy_for_parsing());
    22.9      if (x != NULL) {
   22.10        state()->push(x->type(), x);
   22.11 +      if (profile_calls() && MethodData::profile_return() && x->type()->is_object_kind()) {
   22.12 +        ciMethod* caller = state()->scope()->method();
   22.13 +        ciMethodData* md = caller->method_data_or_null();
   22.14 +        ciProfileData* data = md->bci_to_data(invoke_bci);
   22.15 +        if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   22.16 +          bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
   22.17 +          // May not be true in case of an inlined call through a method handle intrinsic.
   22.18 +          if (has_return) {
   22.19 +            profile_return_type(x, method(), caller, invoke_bci);
   22.20 +          }
   22.21 +        }
   22.22 +      }
   22.23      }
   22.24      Goto* goto_callee = new Goto(continuation(), false);
   22.25  
   22.26 @@ -1658,6 +1671,42 @@
   22.27    return compilation()->dependency_recorder();
   22.28  }
   22.29  
   22.30 +// How many arguments do we want to profile?
   22.31 +Values* GraphBuilder::args_list_for_profiling(int& start, bool may_have_receiver) {
   22.32 +  int n = 0;
   22.33 +  assert(start == 0, "should be initialized");
   22.34 +  if (MethodData::profile_arguments()) {
   22.35 +    ciProfileData* data = method()->method_data()->bci_to_data(bci());
   22.36 +    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   22.37 +      n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
   22.38 +      bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
   22.39 +      start = has_receiver ? 1 : 0;
   22.40 +    }
   22.41 +  }
   22.42 +  if (n > 0) {
   22.43 +    return new Values(n);
   22.44 +  }
   22.45 +  return NULL;
   22.46 +}
   22.47 +
   22.48 +// Collect arguments that we want to profile in a list
   22.49 +Values* GraphBuilder::collect_args_for_profiling(Values* args, bool may_have_receiver) {
   22.50 +  int start = 0;
   22.51 +  Values* obj_args = args_list_for_profiling(start, may_have_receiver);
   22.52 +  if (obj_args == NULL) {
   22.53 +    return NULL;
   22.54 +  }
   22.55 +  int s = obj_args->size();
   22.56 +  for (int i = start, j = 0; j < s; i++) {
   22.57 +    if (args->at(i)->type()->is_object_kind()) {
   22.58 +      obj_args->push(args->at(i));
   22.59 +      j++;
   22.60 +    }
   22.61 +  }
   22.62 +  assert(s == obj_args->length(), "missed on arg?");
   22.63 +  return obj_args;
   22.64 +}
   22.65 +
   22.66  
   22.67  void GraphBuilder::invoke(Bytecodes::Code code) {
   22.68    bool will_link;
   22.69 @@ -1957,7 +2006,7 @@
   22.70        } else if (exact_target != NULL) {
   22.71          target_klass = exact_target->holder();
   22.72        }
   22.73 -      profile_call(target, recv, target_klass);
   22.74 +      profile_call(target, recv, target_klass, collect_args_for_profiling(args, false), false);
   22.75      }
   22.76    }
   22.77  
   22.78 @@ -1972,6 +2021,9 @@
   22.79        push(result_type, result);
   22.80      }
   22.81    }
   22.82 +  if (profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
   22.83 +    profile_return_type(result, target);
   22.84 +  }
   22.85  }
   22.86  
   22.87  
   22.88 @@ -3509,7 +3561,7 @@
   22.89            recv = args->at(0);
   22.90            null_check(recv);
   22.91          }
   22.92 -        profile_call(callee, recv, NULL);
   22.93 +        profile_call(callee, recv, NULL, collect_args_for_profiling(args, true), true);
   22.94        }
   22.95      }
   22.96    }
   22.97 @@ -3520,6 +3572,10 @@
   22.98    Value value = append_split(result);
   22.99    if (result_type != voidType) push(result_type, value);
  22.100  
  22.101 +  if (callee != method() && profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
  22.102 +    profile_return_type(result, callee);
  22.103 +  }
  22.104 +
  22.105    // done
  22.106    return true;
  22.107  }
  22.108 @@ -3763,7 +3819,28 @@
  22.109      compilation()->set_would_profile(true);
  22.110  
  22.111      if (profile_calls()) {
  22.112 -      profile_call(callee, recv, holder_known ? callee->holder() : NULL);
  22.113 +      int start = 0;
  22.114 +      Values* obj_args = args_list_for_profiling(start, has_receiver);
  22.115 +      if (obj_args != NULL) {
  22.116 +        int s = obj_args->size();
  22.117 +        // if called through method handle invoke, some arguments may have been popped
  22.118 +        for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) {
  22.119 +          Value v = state()->stack_at_inc(i);
  22.120 +          if (v->type()->is_object_kind()) {
  22.121 +            obj_args->push(v);
  22.122 +            j++;
  22.123 +          }
  22.124 +        }
  22.125 +#ifdef ASSERT
  22.126 +        {
  22.127 +          bool ignored_will_link;
  22.128 +          ciSignature* declared_signature = NULL;
  22.129 +          ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
  22.130 +          assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
  22.131 +        }
  22.132 +#endif
  22.133 +      }
  22.134 +      profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
  22.135      }
  22.136    }
  22.137  
  22.138 @@ -4251,8 +4328,23 @@
  22.139  }
  22.140  #endif // PRODUCT
  22.141  
  22.142 -void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) {
  22.143 -  append(new ProfileCall(method(), bci(), callee, recv, known_holder));
  22.144 +void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
  22.145 +  append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
  22.146 +}
  22.147 +
  22.148 +void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
  22.149 +  assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
  22.150 +  if (m == NULL) {
  22.151 +    m = method();
  22.152 +  }
  22.153 +  if (invoke_bci < 0) {
  22.154 +    invoke_bci = bci();
  22.155 +  }
  22.156 +  ciMethodData* md = m->method_data_or_null();
  22.157 +  ciProfileData* data = md->bci_to_data(invoke_bci);
  22.158 +  if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
  22.159 +    append(new ProfileReturnType(m , invoke_bci, callee, ret));
  22.160 +  }
  22.161  }
  22.162  
  22.163  void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
    23.1 --- a/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Oct 16 11:48:03 2013 -0700
    23.2 +++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Thu Oct 17 10:58:45 2013 -0700
    23.3 @@ -374,7 +374,8 @@
    23.4  
    23.5    void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
    23.6  
    23.7 -  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
    23.8 +  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
    23.9 +  void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
   23.10    void profile_invocation(ciMethod* inlinee, ValueStack* state);
   23.11  
   23.12    // Shortcuts to profiling control.
   23.13 @@ -386,6 +387,9 @@
   23.14    bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
   23.15    bool profile_checkcasts()    { return _compilation->profile_checkcasts();    }
   23.16  
   23.17 +  Values* args_list_for_profiling(int& start, bool may_have_receiver);
   23.18 +  Values* collect_args_for_profiling(Values* args, bool may_have_receiver);
   23.19 +
   23.20   public:
   23.21    NOT_PRODUCT(void print_stats();)
   23.22  
    24.1 --- a/src/share/vm/c1/c1_Instruction.cpp	Wed Oct 16 11:48:03 2013 -0700
    24.2 +++ b/src/share/vm/c1/c1_Instruction.cpp	Thu Oct 17 10:58:45 2013 -0700
    24.3 @@ -104,6 +104,14 @@
    24.4    }
    24.5  }
    24.6  
    24.7 +ciType* Instruction::exact_type() const {
    24.8 +  ciType* t =  declared_type();
    24.9 +  if (t != NULL && t->is_klass()) {
   24.10 +    return t->as_klass()->exact_klass();
   24.11 +  }
   24.12 +  return NULL;
   24.13 +}
   24.14 +
   24.15  
   24.16  #ifndef PRODUCT
   24.17  void Instruction::check_state(ValueStack* state) {
   24.18 @@ -135,9 +143,7 @@
   24.19  
   24.20  // perform constant and interval tests on index value
   24.21  bool AccessIndexed::compute_needs_range_check() {
   24.22 -
   24.23    if (length()) {
   24.24 -
   24.25      Constant* clength = length()->as_Constant();
   24.26      Constant* cindex = index()->as_Constant();
   24.27      if (clength && cindex) {
   24.28 @@ -157,34 +163,8 @@
   24.29  }
   24.30  
   24.31  
   24.32 -ciType* Local::exact_type() const {
   24.33 -  ciType* type = declared_type();
   24.34 -
   24.35 -  // for primitive arrays, the declared type is the exact type
   24.36 -  if (type->is_type_array_klass()) {
   24.37 -    return type;
   24.38 -  } else if (type->is_instance_klass()) {
   24.39 -    ciInstanceKlass* ik = (ciInstanceKlass*)type;
   24.40 -    if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) {
   24.41 -      return type;
   24.42 -    }
   24.43 -  } else if (type->is_obj_array_klass()) {
   24.44 -    ciObjArrayKlass* oak = (ciObjArrayKlass*)type;
   24.45 -    ciType* base = oak->base_element_type();
   24.46 -    if (base->is_instance_klass()) {
   24.47 -      ciInstanceKlass* ik = base->as_instance_klass();
   24.48 -      if (ik->is_loaded() && ik->is_final()) {
   24.49 -        return type;
   24.50 -      }
   24.51 -    } else if (base->is_primitive_type()) {
   24.52 -      return type;
   24.53 -    }
   24.54 -  }
   24.55 -  return NULL;
   24.56 -}
   24.57 -
   24.58  ciType* Constant::exact_type() const {
   24.59 -  if (type()->is_object()) {
   24.60 +  if (type()->is_object() && type()->as_ObjectType()->is_loaded()) {
   24.61      return type()->as_ObjectType()->exact_type();
   24.62    }
   24.63    return NULL;
   24.64 @@ -192,19 +172,18 @@
   24.65  
   24.66  ciType* LoadIndexed::exact_type() const {
   24.67    ciType* array_type = array()->exact_type();
   24.68 -  if (array_type == NULL) {
   24.69 -    return NULL;
   24.70 -  }
   24.71 -  assert(array_type->is_array_klass(), "what else?");
   24.72 -  ciArrayKlass* ak = (ciArrayKlass*)array_type;
   24.73 +  if (array_type != NULL) {
   24.74 +    assert(array_type->is_array_klass(), "what else?");
   24.75 +    ciArrayKlass* ak = (ciArrayKlass*)array_type;
   24.76  
   24.77 -  if (ak->element_type()->is_instance_klass()) {
   24.78 -    ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
   24.79 -    if (ik->is_loaded() && ik->is_final()) {
   24.80 -      return ik;
   24.81 +    if (ak->element_type()->is_instance_klass()) {
   24.82 +      ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
   24.83 +      if (ik->is_loaded() && ik->is_final()) {
   24.84 +        return ik;
   24.85 +      }
   24.86      }
   24.87    }
   24.88 -  return NULL;
   24.89 +  return Instruction::exact_type();
   24.90  }
   24.91  
   24.92  
   24.93 @@ -224,22 +203,6 @@
   24.94  }
   24.95  
   24.96  
   24.97 -ciType* LoadField::exact_type() const {
   24.98 -  ciType* type = declared_type();
   24.99 -  // for primitive arrays, the declared type is the exact type
  24.100 -  if (type->is_type_array_klass()) {
  24.101 -    return type;
  24.102 -  }
  24.103 -  if (type->is_instance_klass()) {
  24.104 -    ciInstanceKlass* ik = (ciInstanceKlass*)type;
  24.105 -    if (ik->is_loaded() && ik->is_final()) {
  24.106 -      return type;
  24.107 -    }
  24.108 -  }
  24.109 -  return NULL;
  24.110 -}
  24.111 -
  24.112 -
  24.113  ciType* NewTypeArray::exact_type() const {
  24.114    return ciTypeArrayKlass::make(elt_type());
  24.115  }
  24.116 @@ -264,16 +227,6 @@
  24.117    return klass();
  24.118  }
  24.119  
  24.120 -ciType* CheckCast::exact_type() const {
  24.121 -  if (klass()->is_instance_klass()) {
  24.122 -    ciInstanceKlass* ik = (ciInstanceKlass*)klass();
  24.123 -    if (ik->is_loaded() && ik->is_final()) {
  24.124 -      return ik;
  24.125 -    }
  24.126 -  }
  24.127 -  return NULL;
  24.128 -}
  24.129 -
  24.130  // Implementation of ArithmeticOp
  24.131  
  24.132  bool ArithmeticOp::is_commutative() const {
    25.1 --- a/src/share/vm/c1/c1_Instruction.hpp	Wed Oct 16 11:48:03 2013 -0700
    25.2 +++ b/src/share/vm/c1/c1_Instruction.hpp	Thu Oct 17 10:58:45 2013 -0700
    25.3 @@ -107,6 +107,7 @@
    25.4  class         UnsafePrefetchRead;
    25.5  class         UnsafePrefetchWrite;
    25.6  class   ProfileCall;
    25.7 +class   ProfileReturnType;
    25.8  class   ProfileInvoke;
    25.9  class   RuntimeCall;
   25.10  class   MemBar;
   25.11 @@ -211,6 +212,7 @@
   25.12    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) = 0;
   25.13    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
   25.14    virtual void do_ProfileCall    (ProfileCall*     x) = 0;
   25.15 +  virtual void do_ProfileReturnType (ProfileReturnType*  x) = 0;
   25.16    virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   25.17    virtual void do_RuntimeCall    (RuntimeCall*     x) = 0;
   25.18    virtual void do_MemBar         (MemBar*          x) = 0;
   25.19 @@ -322,6 +324,36 @@
   25.20      _type = type;
   25.21    }
   25.22  
   25.23 +  // Helper class to keep track of which arguments need a null check
   25.24 +  class ArgsNonNullState {
   25.25 +  private:
   25.26 +    int _nonnull_state; // mask identifying which args are nonnull
   25.27 +  public:
   25.28 +    ArgsNonNullState()
   25.29 +      : _nonnull_state(AllBits) {}
   25.30 +
   25.31 +    // Does argument number i needs a null check?
   25.32 +    bool arg_needs_null_check(int i) const {
   25.33 +      // No data is kept for arguments starting at position 33 so
   25.34 +      // conservatively assume that they need a null check.
   25.35 +      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
   25.36 +        return is_set_nth_bit(_nonnull_state, i);
   25.37 +      }
   25.38 +      return true;
   25.39 +    }
   25.40 +
   25.41 +    // Set whether argument number i needs a null check or not
   25.42 +    void set_arg_needs_null_check(int i, bool check) {
   25.43 +      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
   25.44 +        if (check) {
   25.45 +          _nonnull_state |= nth_bit(i);
   25.46 +        } else {
   25.47 +          _nonnull_state &= ~(nth_bit(i));
   25.48 +        }
   25.49 +      }
   25.50 +    }
   25.51 +  };
   25.52 +
   25.53   public:
   25.54    void* operator new(size_t size) throw() {
   25.55      Compilation* c = Compilation::current();
   25.56 @@ -566,7 +598,7 @@
   25.57    virtual void other_values_do(ValueVisitor* f)   { /* usually no other - override on demand */ }
   25.58            void       values_do(ValueVisitor* f)   { input_values_do(f); state_values_do(f); other_values_do(f); }
   25.59  
   25.60 -  virtual ciType* exact_type() const             { return NULL; }
   25.61 +  virtual ciType* exact_type() const;
   25.62    virtual ciType* declared_type() const          { return NULL; }
   25.63  
   25.64    // hashing
   25.65 @@ -689,7 +721,6 @@
   25.66    int java_index() const                         { return _java_index; }
   25.67  
   25.68    virtual ciType* declared_type() const          { return _declared_type; }
   25.69 -  virtual ciType* exact_type() const;
   25.70  
   25.71    // generic
   25.72    virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
   25.73 @@ -806,7 +837,6 @@
   25.74    {}
   25.75  
   25.76    ciType* declared_type() const;
   25.77 -  ciType* exact_type() const;
   25.78  
   25.79    // generic
   25.80    HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset())  // cannot be eliminated if needs patching or if volatile
   25.81 @@ -1299,6 +1329,7 @@
   25.82  
   25.83    virtual bool needs_exception_state() const     { return false; }
   25.84  
   25.85 +  ciType* exact_type() const                     { return NULL; }
   25.86    ciType* declared_type() const;
   25.87  
   25.88    // generic
   25.89 @@ -1422,7 +1453,6 @@
   25.90    }
   25.91  
   25.92    ciType* declared_type() const;
   25.93 -  ciType* exact_type() const;
   25.94  };
   25.95  
   25.96  
   25.97 @@ -1490,7 +1520,7 @@
   25.98    vmIntrinsics::ID _id;
   25.99    Values*          _args;
  25.100    Value            _recv;
  25.101 -  int              _nonnull_state; // mask identifying which args are nonnull
  25.102 +  ArgsNonNullState _nonnull_state;
  25.103  
  25.104   public:
  25.105    // preserves_state can be set to true for Intrinsics
  25.106 @@ -1511,7 +1541,6 @@
  25.107    , _id(id)
  25.108    , _args(args)
  25.109    , _recv(NULL)
  25.110 -  , _nonnull_state(AllBits)
  25.111    {
  25.112      assert(args != NULL, "args must exist");
  25.113      ASSERT_VALUES
  25.114 @@ -1537,21 +1566,12 @@
  25.115    Value receiver() const                         { assert(has_receiver(), "must have receiver"); return _recv; }
  25.116    bool preserves_state() const                   { return check_flag(PreservesStateFlag); }
  25.117  
  25.118 -  bool arg_needs_null_check(int i) {
  25.119 -    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
  25.120 -      return is_set_nth_bit(_nonnull_state, i);
  25.121 -    }
  25.122 -    return true;
  25.123 +  bool arg_needs_null_check(int i) const {
  25.124 +    return _nonnull_state.arg_needs_null_check(i);
  25.125    }
  25.126  
  25.127    void set_arg_needs_null_check(int i, bool check) {
  25.128 -    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
  25.129 -      if (check) {
  25.130 -        _nonnull_state |= nth_bit(i);
  25.131 -      } else {
  25.132 -        _nonnull_state &= ~(nth_bit(i));
  25.133 -      }
  25.134 -    }
  25.135 +    _nonnull_state.set_arg_needs_null_check(i, check);
  25.136    }
  25.137  
  25.138    // generic
  25.139 @@ -2450,34 +2470,87 @@
  25.140  
  25.141  LEAF(ProfileCall, Instruction)
  25.142   private:
  25.143 -  ciMethod* _method;
  25.144 -  int       _bci_of_invoke;
  25.145 -  ciMethod* _callee;         // the method that is called at the given bci
  25.146 -  Value     _recv;
  25.147 -  ciKlass*  _known_holder;
  25.148 +  ciMethod*        _method;
  25.149 +  int              _bci_of_invoke;
  25.150 +  ciMethod*        _callee;         // the method that is called at the given bci
  25.151 +  Value            _recv;
  25.152 +  ciKlass*         _known_holder;
  25.153 +  Values*          _obj_args;       // arguments for type profiling
  25.154 +  ArgsNonNullState _nonnull_state;  // Do we know whether some arguments are never null?
  25.155 +  bool             _inlined;        // Are we profiling a call that is inlined
  25.156  
  25.157   public:
  25.158 -  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder)
  25.159 +  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined)
  25.160      : Instruction(voidType)
  25.161      , _method(method)
  25.162      , _bci_of_invoke(bci)
  25.163      , _callee(callee)
  25.164      , _recv(recv)
  25.165      , _known_holder(known_holder)
  25.166 +    , _obj_args(obj_args)
  25.167 +    , _inlined(inlined)
  25.168    {
  25.169      // The ProfileCall has side-effects and must occur precisely where located
  25.170      pin();
  25.171    }
  25.172  
  25.173 -  ciMethod* method()      { return _method; }
  25.174 -  int bci_of_invoke()     { return _bci_of_invoke; }
  25.175 -  ciMethod* callee()      { return _callee; }
  25.176 -  Value recv()            { return _recv; }
  25.177 -  ciKlass* known_holder() { return _known_holder; }
  25.178 -
  25.179 -  virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
  25.180 +  ciMethod* method()             const { return _method; }
  25.181 +  int bci_of_invoke()            const { return _bci_of_invoke; }
  25.182 +  ciMethod* callee()             const { return _callee; }
  25.183 +  Value recv()                   const { return _recv; }
  25.184 +  ciKlass* known_holder()        const { return _known_holder; }
  25.185 +  int nb_profiled_args()         const { return _obj_args == NULL ? 0 : _obj_args->length(); }
  25.186 +  Value profiled_arg_at(int i)   const { return _obj_args->at(i); }
  25.187 +  bool arg_needs_null_check(int i) const {
  25.188 +    return _nonnull_state.arg_needs_null_check(i);
  25.189 +  }
  25.190 +  bool inlined()                 const { return _inlined; }
  25.191 +
  25.192 +  void set_arg_needs_null_check(int i, bool check) {
  25.193 +    _nonnull_state.set_arg_needs_null_check(i, check);
  25.194 +  }
  25.195 +
  25.196 +  virtual void input_values_do(ValueVisitor* f)   {
  25.197 +    if (_recv != NULL) {
  25.198 +      f->visit(&_recv);
  25.199 +    }
  25.200 +    for (int i = 0; i < nb_profiled_args(); i++) {
  25.201 +      f->visit(_obj_args->adr_at(i));
  25.202 +    }
  25.203 +  }
  25.204  };
  25.205  
  25.206 +LEAF(ProfileReturnType, Instruction)
  25.207 + private:
  25.208 +  ciMethod*        _method;
  25.209 +  ciMethod*        _callee;
  25.210 +  int              _bci_of_invoke;
  25.211 +  Value            _ret;
  25.212 +
  25.213 + public:
  25.214 +  ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret)
  25.215 +    : Instruction(voidType)
  25.216 +    , _method(method)
  25.217 +    , _callee(callee)
  25.218 +    , _bci_of_invoke(bci)
  25.219 +    , _ret(ret)
  25.220 +  {
  25.221 +    set_needs_null_check(true);
  25.222 +    // The ProfileType has side-effects and must occur precisely where located
  25.223 +    pin();
  25.224 +  }
  25.225 +
  25.226 +  ciMethod* method()             const { return _method; }
  25.227 +  ciMethod* callee()             const { return _callee; }
  25.228 +  int bci_of_invoke()            const { return _bci_of_invoke; }
  25.229 +  Value ret()                    const { return _ret; }
  25.230 +
  25.231 +  virtual void input_values_do(ValueVisitor* f)   {
  25.232 +    if (_ret != NULL) {
  25.233 +      f->visit(&_ret);
  25.234 +    }
  25.235 +  }
  25.236 +};
  25.237  
  25.238  // Call some C runtime function that doesn't safepoint,
  25.239  // optionally passing the current thread as the first argument.
    26.1 --- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Oct 16 11:48:03 2013 -0700
    26.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Thu Oct 17 10:58:45 2013 -0700
    26.3 @@ -892,10 +892,24 @@
    26.4    if (x->known_holder() != NULL) {
    26.5      output()->print(", ");
    26.6      print_klass(x->known_holder());
    26.7 +    output()->print(" ");
    26.8 +  }
    26.9 +  for (int i = 0; i < x->nb_profiled_args(); i++) {
   26.10 +    if (i > 0) output()->print(", ");
   26.11 +    print_value(x->profiled_arg_at(i));
   26.12 +    if (x->arg_needs_null_check(i)) {
   26.13 +      output()->print(" [NC]");
   26.14 +    }
   26.15    }
   26.16    output()->put(')');
   26.17  }
   26.18  
   26.19 +void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) {
   26.20 +  output()->print("profile ret type ");
   26.21 +  print_value(x->ret());
   26.22 +  output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8());
   26.23 +  output()->put(')');
   26.24 +}
   26.25  void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
   26.26    output()->print("profile_invoke ");
   26.27    output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
    27.1 --- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Oct 16 11:48:03 2013 -0700
    27.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Thu Oct 17 10:58:45 2013 -0700
    27.3 @@ -132,6 +132,7 @@
    27.4    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    27.5    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    27.6    virtual void do_ProfileCall    (ProfileCall*     x);
    27.7 +  virtual void do_ProfileReturnType (ProfileReturnType*  x);
    27.8    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
    27.9    virtual void do_RuntimeCall    (RuntimeCall*     x);
   27.10    virtual void do_MemBar         (MemBar*          x);
    28.1 --- a/src/share/vm/c1/c1_LIR.cpp	Wed Oct 16 11:48:03 2013 -0700
    28.2 +++ b/src/share/vm/c1/c1_LIR.cpp	Thu Oct 17 10:58:45 2013 -0700
    28.3 @@ -1001,6 +1001,17 @@
    28.4        assert(opProfileCall->_tmp1->is_valid(), "used");  do_temp(opProfileCall->_tmp1);
    28.5        break;
    28.6      }
    28.7 +
    28.8 +// LIR_OpProfileType:
    28.9 +    case lir_profile_type: {
   28.10 +      assert(op->as_OpProfileType() != NULL, "must be");
   28.11 +      LIR_OpProfileType* opProfileType = (LIR_OpProfileType*)op;
   28.12 +
   28.13 +      do_input(opProfileType->_mdp); do_temp(opProfileType->_mdp);
   28.14 +      do_input(opProfileType->_obj);
   28.15 +      do_temp(opProfileType->_tmp);
   28.16 +      break;
   28.17 +    }
   28.18    default:
   28.19      ShouldNotReachHere();
   28.20    }
   28.21 @@ -1151,6 +1162,10 @@
   28.22    masm->emit_profile_call(this);
   28.23  }
   28.24  
   28.25 +void LIR_OpProfileType::emit_code(LIR_Assembler* masm) {
   28.26 +  masm->emit_profile_type(this);
   28.27 +}
   28.28 +
   28.29  // LIR_List
   28.30  LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
   28.31    : _operations(8)
   28.32 @@ -1803,6 +1818,8 @@
   28.33       case lir_cas_int:               s = "cas_int";      break;
   28.34       // LIR_OpProfileCall
   28.35       case lir_profile_call:          s = "profile_call";  break;
   28.36 +     // LIR_OpProfileType
   28.37 +     case lir_profile_type:          s = "profile_type";  break;
   28.38       // LIR_OpAssert
   28.39  #ifdef ASSERT
   28.40       case lir_assert:                s = "assert";        break;
   28.41 @@ -2086,6 +2103,15 @@
   28.42    tmp1()->print(out);          out->print(" ");
   28.43  }
   28.44  
   28.45 +// LIR_OpProfileType
   28.46 +void LIR_OpProfileType::print_instr(outputStream* out) const {
   28.47 +  out->print("exact = "); exact_klass()->print_name_on(out);
   28.48 +  out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
   28.49 +  mdp()->print(out);          out->print(" ");
   28.50 +  obj()->print(out);          out->print(" ");
   28.51 +  tmp()->print(out);          out->print(" ");
   28.52 +}
   28.53 +
   28.54  #endif // PRODUCT
   28.55  
   28.56  // Implementation of LIR_InsertionBuffer
    29.1 --- a/src/share/vm/c1/c1_LIR.hpp	Wed Oct 16 11:48:03 2013 -0700
    29.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Thu Oct 17 10:58:45 2013 -0700
    29.3 @@ -882,6 +882,7 @@
    29.4  class    LIR_OpTypeCheck;
    29.5  class    LIR_OpCompareAndSwap;
    29.6  class    LIR_OpProfileCall;
    29.7 +class    LIR_OpProfileType;
    29.8  #ifdef ASSERT
    29.9  class    LIR_OpAssert;
   29.10  #endif
   29.11 @@ -1005,6 +1006,7 @@
   29.12    , end_opCompareAndSwap
   29.13    , begin_opMDOProfile
   29.14      , lir_profile_call
   29.15 +    , lir_profile_type
   29.16    , end_opMDOProfile
   29.17    , begin_opAssert
   29.18      , lir_assert
   29.19 @@ -1145,6 +1147,7 @@
   29.20    virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   29.21    virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   29.22    virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
   29.23 +  virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
   29.24  #ifdef ASSERT
   29.25    virtual LIR_OpAssert* as_OpAssert() { return NULL; }
   29.26  #endif
   29.27 @@ -1925,8 +1928,8 @@
   29.28  
   29.29   public:
   29.30    // Destroys recv
   29.31 -  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
   29.32 -    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   29.33 +  LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
   29.34 +    : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   29.35      , _profiled_method(profiled_method)
   29.36      , _profiled_bci(profiled_bci)
   29.37      , _profiled_callee(profiled_callee)
   29.38 @@ -1948,6 +1951,45 @@
   29.39    virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   29.40  };
   29.41  
   29.42 +// LIR_OpProfileType
   29.43 +class LIR_OpProfileType : public LIR_Op {
   29.44 + friend class LIR_OpVisitState;
   29.45 +
   29.46 + private:
   29.47 +  LIR_Opr      _mdp;
   29.48 +  LIR_Opr      _obj;
   29.49 +  LIR_Opr      _tmp;
   29.50 +  ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
   29.51 +  intptr_t     _current_klass; // what the profiling currently reports
   29.52 +  bool         _not_null;      // true if we know statically that _obj cannot be null
   29.53 +  bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
   29.54 +                               // _exact_klass it the only possible type for this parameter in any context.
   29.55 +
   29.56 + public:
   29.57 +  // Destroys recv
   29.58 +  LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
   29.59 +    : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   29.60 +    , _mdp(mdp)
   29.61 +    , _obj(obj)
   29.62 +    , _exact_klass(exact_klass)
   29.63 +    , _current_klass(current_klass)
   29.64 +    , _tmp(tmp)
   29.65 +    , _not_null(not_null)
   29.66 +    , _no_conflict(no_conflict) { }
   29.67 +
   29.68 +  LIR_Opr      mdp()              const             { return _mdp;              }
   29.69 +  LIR_Opr      obj()              const             { return _obj;              }
   29.70 +  LIR_Opr      tmp()              const             { return _tmp;              }
   29.71 +  ciKlass*     exact_klass()      const             { return _exact_klass;      }
   29.72 +  intptr_t     current_klass()    const             { return _current_klass;    }
   29.73 +  bool         not_null()         const             { return _not_null;         }
   29.74 +  bool         no_conflict()      const             { return _no_conflict;      }
   29.75 +
   29.76 +  virtual void emit_code(LIR_Assembler* masm);
   29.77 +  virtual LIR_OpProfileType* as_OpProfileType() { return this; }
   29.78 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   29.79 +};
   29.80 +
   29.81  class LIR_InsertionBuffer;
   29.82  
   29.83  //--------------------------------LIR_List---------------------------------------------------
   29.84 @@ -2247,7 +2289,10 @@
   29.85                    ciMethod* profiled_method, int profiled_bci);
   29.86    // MethodData* profiling
   29.87    void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
   29.88 -    append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
   29.89 +    append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
   29.90 +  }
   29.91 +  void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
   29.92 +    append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
   29.93    }
   29.94  
   29.95    void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
    30.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Oct 16 11:48:03 2013 -0700
    30.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Oct 17 10:58:45 2013 -0700
    30.3 @@ -208,6 +208,7 @@
    30.4    void emit_call(LIR_OpJavaCall* op);
    30.5    void emit_rtcall(LIR_OpRTCall* op);
    30.6    void emit_profile_call(LIR_OpProfileCall* op);
    30.7 +  void emit_profile_type(LIR_OpProfileType* op);
    30.8    void emit_delay(LIR_OpDelay* op);
    30.9  
   30.10    void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
    31.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Oct 16 11:48:03 2013 -0700
    31.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Oct 17 10:58:45 2013 -0700
    31.3 @@ -2571,6 +2571,78 @@
    31.4  }
    31.5  
    31.6  
    31.7 +ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
    31.8 +  ciKlass* result = NULL;
    31.9 +  bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
   31.10 +  bool do_update = !TypeEntries::is_type_unknown(profiled_k);
   31.11 +  // known not to be null or null bit already set and already set to
   31.12 +  // unknown: nothing we can do to improve profiling
   31.13 +  if (!do_null && !do_update) {
   31.14 +    return result;
   31.15 +  }
   31.16 +
   31.17 +  ciKlass* exact_klass = NULL;
   31.18 +  Compilation* comp = Compilation::current();
   31.19 +  if (do_update) {
   31.20 +    // try to find exact type, using CHA if possible, so that loading
   31.21 +    // the klass from the object can be avoided
   31.22 +    ciType* type = arg->exact_type();
   31.23 +    if (type == NULL) {
   31.24 +      type = arg->declared_type();
   31.25 +      type = comp->cha_exact_type(type);
   31.26 +    }
   31.27 +    assert(type == NULL || type->is_klass(), "type should be class");
   31.28 +    exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
   31.29 +
   31.30 +    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
   31.31 +  }
   31.32 +
   31.33 +  if (!do_null && !do_update) {
   31.34 +    return result;
   31.35 +  }
   31.36 +
   31.37 +  ciKlass* exact_signature_k = NULL;
   31.38 +  if (do_update) {
   31.39 +    // Is the type from the signature exact (the only one possible)?
   31.40 +    exact_signature_k = signature_k->exact_klass();
   31.41 +    if (exact_signature_k == NULL) {
   31.42 +      exact_signature_k = comp->cha_exact_type(signature_k);
   31.43 +    } else {
   31.44 +      result = exact_signature_k;
   31.45 +      do_update = false;
   31.46 +      // Known statically. No need to emit any code: prevent
   31.47 +      // LIR_Assembler::emit_profile_type() from emitting useless code
   31.48 +      profiled_k = ciTypeEntries::with_status(result, profiled_k);
   31.49 +    }
   31.50 +    if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
   31.51 +      assert(exact_klass == NULL, "arg and signature disagree?");
   31.52 +      // sometimes the type of the signature is better than the best type
   31.53 +      // the compiler has
   31.54 +      exact_klass = exact_signature_k;
   31.55 +      do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
   31.56 +    }
   31.57 +  }
   31.58 +
   31.59 +  if (!do_null && !do_update) {
   31.60 +    return result;
   31.61 +  }
   31.62 +
   31.63 +  if (mdp == LIR_OprFact::illegalOpr) {
   31.64 +    mdp = new_register(T_METADATA);
   31.65 +    __ metadata2reg(md->constant_encoding(), mdp);
   31.66 +    if (md_base_offset != 0) {
   31.67 +      LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
   31.68 +      mdp = new_pointer_register();
   31.69 +      __ leal(LIR_OprFact::address(base_type_address), mdp);
   31.70 +    }
   31.71 +  }
   31.72 +  LIRItem value(arg, this);
   31.73 +  value.load_item();
   31.74 +  __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
   31.75 +                  value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
   31.76 +  return result;
   31.77 +}
   31.78 +
   31.79  void LIRGenerator::do_Base(Base* x) {
   31.80    __ std_entry(LIR_OprFact::illegalOpr);
   31.81    // Emit moves from physical registers / stack slots to virtual registers
   31.82 @@ -3004,12 +3076,52 @@
   31.83    }
   31.84  }
   31.85  
   31.86 +void LIRGenerator::profile_arguments(ProfileCall* x) {
   31.87 +  if (MethodData::profile_arguments()) {
   31.88 +    int bci = x->bci_of_invoke();
   31.89 +    ciMethodData* md = x->method()->method_data_or_null();
   31.90 +    ciProfileData* data = md->bci_to_data(bci);
   31.91 +    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   31.92 +      ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
   31.93 +      int base_offset = md->byte_offset_of_slot(data, extra);
   31.94 +      LIR_Opr mdp = LIR_OprFact::illegalOpr;
   31.95 +      ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
   31.96 +
   31.97 +      Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
   31.98 +      int start = 0;
   31.99 +      int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
  31.100 +      if (x->nb_profiled_args() < stop) {
  31.101 +        // if called through method handle invoke, some arguments may have been popped
  31.102 +        stop = x->nb_profiled_args();
  31.103 +      }
  31.104 +      ciSignature* sig = x->callee()->signature();
  31.105 +      // method handle call to virtual method
  31.106 +      bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
  31.107 +      ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
  31.108 +      for (int i = 0; i < stop; i++) {
  31.109 +        int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
  31.110 +        ciKlass* exact = profile_arg_type(md, base_offset, off,
  31.111 +                                          args->type(i), x->profiled_arg_at(i+start), mdp,
  31.112 +                                          !x->arg_needs_null_check(i+start), sig_stream.next_klass());
  31.113 +        if (exact != NULL) {
  31.114 +          md->set_argument_type(bci, i, exact);
  31.115 +        }
  31.116 +      }
  31.117 +    }
  31.118 +  }
  31.119 +}
  31.120 +
  31.121  void LIRGenerator::do_ProfileCall(ProfileCall* x) {
  31.122    // Need recv in a temporary register so it interferes with the other temporaries
  31.123    LIR_Opr recv = LIR_OprFact::illegalOpr;
  31.124    LIR_Opr mdo = new_register(T_OBJECT);
  31.125    // tmp is used to hold the counters on SPARC
  31.126    LIR_Opr tmp = new_pointer_register();
  31.127 +
  31.128 +  if (x->nb_profiled_args() > 0) {
  31.129 +    profile_arguments(x);
  31.130 +  }
  31.131 +
  31.132    if (x->recv() != NULL) {
  31.133      LIRItem value(x->recv(), this);
  31.134      value.load_item();
  31.135 @@ -3019,6 +3131,21 @@
  31.136    __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
  31.137  }
  31.138  
  31.139 +void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
  31.140 +  int bci = x->bci_of_invoke();
  31.141 +  ciMethodData* md = x->method()->method_data_or_null();
  31.142 +  ciProfileData* data = md->bci_to_data(bci);
  31.143 +  assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
  31.144 +  ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
  31.145 +  LIR_Opr mdp = LIR_OprFact::illegalOpr;
  31.146 +  ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
  31.147 +                                    ret->type(), x->ret(), mdp,
  31.148 +                                    !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
  31.149 +  if (exact != NULL) {
  31.150 +    md->set_return_type(bci, exact);
  31.151 +  }
  31.152 +}
  31.153 +
  31.154  void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
  31.155    // We can safely ignore accessors here, since c2 will inline them anyway,
  31.156    // accessors are also always mature.
  31.157 @@ -3053,7 +3180,11 @@
  31.158    int offset = -1;
  31.159    LIR_Opr counter_holder;
  31.160    if (level == CompLevel_limited_profile) {
  31.161 -    address counters_adr = method->ensure_method_counters();
  31.162 +    MethodCounters* counters_adr = method->ensure_method_counters();
  31.163 +    if (counters_adr == NULL) {
  31.164 +      bailout("method counters allocation failed");
  31.165 +      return;
  31.166 +    }
  31.167      counter_holder = new_pointer_register();
  31.168      __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
  31.169      offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
    32.1 --- a/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Oct 16 11:48:03 2013 -0700
    32.2 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Oct 17 10:58:45 2013 -0700
    32.3 @@ -434,6 +434,8 @@
    32.4    void do_ThreadIDIntrinsic(Intrinsic* x);
    32.5    void do_ClassIDIntrinsic(Intrinsic* x);
    32.6  #endif
    32.7 +  ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
    32.8 +  void profile_arguments(ProfileCall* x);
    32.9  
   32.10   public:
   32.11    Compilation*  compilation() const              { return _compilation; }
   32.12 @@ -534,6 +536,7 @@
   32.13    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   32.14    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   32.15    virtual void do_ProfileCall    (ProfileCall*     x);
   32.16 +  virtual void do_ProfileReturnType (ProfileReturnType* x);
   32.17    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   32.18    virtual void do_RuntimeCall    (RuntimeCall*     x);
   32.19    virtual void do_MemBar         (MemBar*          x);
    33.1 --- a/src/share/vm/c1/c1_Optimizer.cpp	Wed Oct 16 11:48:03 2013 -0700
    33.2 +++ b/src/share/vm/c1/c1_Optimizer.cpp	Thu Oct 17 10:58:45 2013 -0700
    33.3 @@ -531,6 +531,7 @@
    33.4    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    33.5    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    33.6    void do_ProfileCall    (ProfileCall*     x);
    33.7 +  void do_ProfileReturnType (ProfileReturnType*  x);
    33.8    void do_ProfileInvoke  (ProfileInvoke*   x);
    33.9    void do_RuntimeCall    (RuntimeCall*     x);
   33.10    void do_MemBar         (MemBar*          x);
   33.11 @@ -657,6 +658,8 @@
   33.12    void handle_Intrinsic       (Intrinsic* x);
   33.13    void handle_ExceptionObject (ExceptionObject* x);
   33.14    void handle_Phi             (Phi* x);
   33.15 +  void handle_ProfileCall     (ProfileCall* x);
   33.16 +  void handle_ProfileReturnType (ProfileReturnType* x);
   33.17  };
   33.18  
   33.19  
   33.20 @@ -715,7 +718,9 @@
   33.21  void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
   33.22  void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
   33.23  void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
   33.24 -void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
   33.25 +void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check();
   33.26 +                                                                nce()->handle_ProfileCall(x); }
   33.27 +void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
   33.28  void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
   33.29  void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
   33.30  void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
   33.31 @@ -1134,6 +1139,15 @@
   33.32    }
   33.33  }
   33.34  
   33.35 +void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) {
   33.36 +  for (int i = 0; i < x->nb_profiled_args(); i++) {
   33.37 +    x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i)));
   33.38 +  }
   33.39 +}
   33.40 +
   33.41 +void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
   33.42 +  x->set_needs_null_check(!set_contains(x->ret()));
   33.43 +}
   33.44  
   33.45  void Optimizer::eliminate_null_checks() {
   33.46    ResourceMark rm;
    34.1 --- a/src/share/vm/c1/c1_RangeCheckElimination.hpp	Wed Oct 16 11:48:03 2013 -0700
    34.2 +++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp	Thu Oct 17 10:58:45 2013 -0700
    34.3 @@ -162,7 +162,8 @@
    34.4      void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ };
    34.5      void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
    34.6      void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ };
    34.7 -    void do_ProfileInvoke  (ProfileInvoke*  x)  { /* nothing to do */ };
    34.8 +    void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ };
    34.9 +    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   34.10      void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   34.11      void do_MemBar         (MemBar*          x) { /* nothing to do */ };
   34.12      void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
    35.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Wed Oct 16 11:48:03 2013 -0700
    35.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 17 10:58:45 2013 -0700
    35.3 @@ -542,8 +542,7 @@
    35.4      // exception handler can cause class loading, which might throw an
    35.5      // exception and those fields are expected to be clear during
    35.6      // normal bytecode execution.
    35.7 -    thread->set_exception_oop(NULL);
    35.8 -    thread->set_exception_pc(NULL);
    35.9 +    thread->clear_exception_oop_and_pc();
   35.10  
   35.11      continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
   35.12      // If an exception was thrown during exception dispatch, the exception oop may have changed
    36.1 --- a/src/share/vm/c1/c1_ValueMap.hpp	Wed Oct 16 11:48:03 2013 -0700
    36.2 +++ b/src/share/vm/c1/c1_ValueMap.hpp	Thu Oct 17 10:58:45 2013 -0700
    36.3 @@ -203,6 +203,7 @@
    36.4    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
    36.5    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
    36.6    void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
    36.7 +  void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ }
    36.8    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
    36.9    void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   36.10    void do_MemBar         (MemBar*          x) { /* nothing to do */ };
    37.1 --- a/src/share/vm/ci/ciClassList.hpp	Wed Oct 16 11:48:03 2013 -0700
    37.2 +++ b/src/share/vm/ci/ciClassList.hpp	Thu Oct 17 10:58:45 2013 -0700
    37.3 @@ -102,6 +102,7 @@
    37.4  friend class ciMethodHandle;           \
    37.5  friend class ciMethodType;             \
    37.6  friend class ciReceiverTypeData;       \
    37.7 +friend class ciTypeEntries;            \
    37.8  friend class ciSymbol;                 \
    37.9  friend class ciArray;                  \
   37.10  friend class ciObjArray;               \
    38.1 --- a/src/share/vm/ci/ciEnv.cpp	Wed Oct 16 11:48:03 2013 -0700
    38.2 +++ b/src/share/vm/ci/ciEnv.cpp	Thu Oct 17 10:58:45 2013 -0700
    38.3 @@ -1154,9 +1154,12 @@
    38.4    GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
    38.5  }
    38.6  
    38.7 -void ciEnv::dump_replay_data(outputStream* out) {
    38.8 -  VM_ENTRY_MARK;
    38.9 -  MutexLocker ml(Compile_lock);
   38.10 +// ------------------------------------------------------------------
   38.11 +// ciEnv::dump_replay_data*
   38.12 +
   38.13 +// Don't change thread state and acquire any locks.
   38.14 +// Safe to call from VM error reporter.
   38.15 +void ciEnv::dump_replay_data_unsafe(outputStream* out) {
   38.16    ResourceMark rm;
   38.17  #if INCLUDE_JVMTI
   38.18    out->print_cr("JvmtiExport can_access_local_variables %d",     _jvmti_can_access_local_variables);
   38.19 @@ -1181,3 +1184,10 @@
   38.20                  entry_bci, comp_level);
   38.21    out->flush();
   38.22  }
   38.23 +
   38.24 +void ciEnv::dump_replay_data(outputStream* out) {
   38.25 +  GUARDED_VM_ENTRY(
   38.26 +    MutexLocker ml(Compile_lock);
   38.27 +    dump_replay_data_unsafe(out);
   38.28 +  )
   38.29 +}
    39.1 --- a/src/share/vm/ci/ciEnv.hpp	Wed Oct 16 11:48:03 2013 -0700
    39.2 +++ b/src/share/vm/ci/ciEnv.hpp	Thu Oct 17 10:58:45 2013 -0700
    39.3 @@ -452,6 +452,7 @@
    39.4  
    39.5    // Dump the compilation replay data for the ciEnv to the stream.
    39.6    void dump_replay_data(outputStream* out);
    39.7 +  void dump_replay_data_unsafe(outputStream* out);
    39.8  };
    39.9  
   39.10  #endif // SHARE_VM_CI_CIENV_HPP
    40.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp	Wed Oct 16 11:48:03 2013 -0700
    40.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp	Thu Oct 17 10:58:45 2013 -0700
    40.3 @@ -671,7 +671,6 @@
    40.4  
    40.5  
    40.6  void ciInstanceKlass::dump_replay_data(outputStream* out) {
    40.7 -  ASSERT_IN_VM;
    40.8    ResourceMark rm;
    40.9  
   40.10    InstanceKlass* ik = get_instanceKlass();
    41.1 --- a/src/share/vm/ci/ciInstanceKlass.hpp	Wed Oct 16 11:48:03 2013 -0700
    41.2 +++ b/src/share/vm/ci/ciInstanceKlass.hpp	Thu Oct 17 10:58:45 2013 -0700
    41.3 @@ -235,6 +235,13 @@
    41.4    bool is_instance_klass() const { return true; }
    41.5    bool is_java_klass() const     { return true; }
    41.6  
    41.7 +  virtual ciKlass* exact_klass() {
    41.8 +    if (is_loaded() && is_final() && !is_interface()) {
    41.9 +      return this;
   41.10 +    }
   41.11 +    return NULL;
   41.12 +  }
   41.13 +
   41.14    // Dump the current state of this klass for compilation replay.
   41.15    virtual void dump_replay_data(outputStream* out);
   41.16  };
    42.1 --- a/src/share/vm/ci/ciKlass.cpp	Wed Oct 16 11:48:03 2013 -0700
    42.2 +++ b/src/share/vm/ci/ciKlass.cpp	Thu Oct 17 10:58:45 2013 -0700
    42.3 @@ -66,7 +66,9 @@
    42.4  // ------------------------------------------------------------------
    42.5  // ciKlass::is_subtype_of
    42.6  bool ciKlass::is_subtype_of(ciKlass* that) {
    42.7 -  assert(is_loaded() && that->is_loaded(), "must be loaded");
    42.8 +  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
    42.9 +  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
   42.10 +
   42.11    // Check to see if the klasses are identical.
   42.12    if (this == that) {
   42.13      return true;
   42.14 @@ -83,8 +85,8 @@
   42.15  // ------------------------------------------------------------------
   42.16  // ciKlass::is_subclass_of
   42.17  bool ciKlass::is_subclass_of(ciKlass* that) {
   42.18 -  assert(is_loaded() && that->is_loaded(), "must be loaded");
   42.19 -  // Check to see if the klasses are identical.
   42.20 +  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
   42.21 +  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
   42.22  
   42.23    VM_ENTRY_MARK;
   42.24    Klass* this_klass = get_Klass();
    43.1 --- a/src/share/vm/ci/ciKlass.hpp	Wed Oct 16 11:48:03 2013 -0700
    43.2 +++ b/src/share/vm/ci/ciKlass.hpp	Thu Oct 17 10:58:45 2013 -0700
    43.3 @@ -41,6 +41,7 @@
    43.4    friend class ciEnv;
    43.5    friend class ciField;
    43.6    friend class ciMethod;
    43.7 +  friend class ciMethodData;
    43.8    friend class ciObjArrayKlass;
    43.9  
   43.10  private:
   43.11 @@ -121,6 +122,8 @@
   43.12    // What kind of ciObject is this?
   43.13    bool is_klass() const { return true; }
   43.14  
   43.15 +  virtual ciKlass* exact_klass() = 0;
   43.16 +
   43.17    void print_name_on(outputStream* st);
   43.18  };
   43.19  
    44.1 --- a/src/share/vm/ci/ciMethod.cpp	Wed Oct 16 11:48:03 2013 -0700
    44.2 +++ b/src/share/vm/ci/ciMethod.cpp	Thu Oct 17 10:58:45 2013 -0700
    44.3 @@ -846,7 +846,9 @@
    44.4  // Return true if allocation was successful or no MDO is required.
    44.5  bool ciMethod::ensure_method_data(methodHandle h_m) {
    44.6    EXCEPTION_CONTEXT;
    44.7 -  if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
    44.8 +  if (is_native() || is_abstract() || h_m()->is_accessor()) {
    44.9 +    return true;
   44.10 +  }
   44.11    if (h_m()->method_data() == NULL) {
   44.12      Method::build_interpreter_method_data(h_m, THREAD);
   44.13      if (HAS_PENDING_EXCEPTION) {
   44.14 @@ -903,22 +905,21 @@
   44.15  // NULL otherwise.
   44.16  ciMethodData* ciMethod::method_data_or_null() {
   44.17    ciMethodData *md = method_data();
   44.18 -  if (md->is_empty()) return NULL;
   44.19 +  if (md->is_empty()) {
   44.20 +    return NULL;
   44.21 +  }
   44.22    return md;
   44.23  }
   44.24  
   44.25  // ------------------------------------------------------------------
   44.26  // ciMethod::ensure_method_counters
   44.27  //
   44.28 -address ciMethod::ensure_method_counters() {
   44.29 +MethodCounters* ciMethod::ensure_method_counters() {
   44.30    check_is_loaded();
   44.31    VM_ENTRY_MARK;
   44.32    methodHandle mh(THREAD, get_Method());
   44.33 -  MethodCounters *counter = mh->method_counters();
   44.34 -  if (counter == NULL) {
   44.35 -    counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL);
   44.36 -  }
   44.37 -  return (address)counter;
   44.38 +  MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL);
   44.39 +  return method_counters;
   44.40  }
   44.41  
   44.42  // ------------------------------------------------------------------
   44.43 @@ -1247,7 +1248,6 @@
   44.44  #undef FETCH_FLAG_FROM_VM
   44.45  
   44.46  void ciMethod::dump_replay_data(outputStream* st) {
   44.47 -  ASSERT_IN_VM;
   44.48    ResourceMark rm;
   44.49    Method* method = get_Method();
   44.50    MethodCounters* mcs = method->method_counters();
    45.1 --- a/src/share/vm/ci/ciMethod.hpp	Wed Oct 16 11:48:03 2013 -0700
    45.2 +++ b/src/share/vm/ci/ciMethod.hpp	Thu Oct 17 10:58:45 2013 -0700
    45.3 @@ -265,7 +265,7 @@
    45.4    bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
    45.5    bool check_call(int refinfo_index, bool is_static) const;
    45.6    bool ensure_method_data();  // make sure it exists in the VM also
    45.7 -  address ensure_method_counters();
    45.8 +  MethodCounters* ensure_method_counters();
    45.9    int instructions_size();
   45.10    int scale_count(int count, float prof_factor = 1.);  // make MDO count commensurate with IIC
   45.11  
    46.1 --- a/src/share/vm/ci/ciMethodData.cpp	Wed Oct 16 11:48:03 2013 -0700
    46.2 +++ b/src/share/vm/ci/ciMethodData.cpp	Thu Oct 17 10:58:45 2013 -0700
    46.3 @@ -78,7 +78,9 @@
    46.4  
    46.5  void ciMethodData::load_data() {
    46.6    MethodData* mdo = get_MethodData();
    46.7 -  if (mdo == NULL) return;
    46.8 +  if (mdo == NULL) {
    46.9 +    return;
   46.10 +  }
   46.11  
   46.12    // To do: don't copy the data if it is not "ripe" -- require a minimum #
   46.13    // of invocations.
   46.14 @@ -123,7 +125,7 @@
   46.15  #endif
   46.16  }
   46.17  
   46.18 -void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) {
   46.19 +void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
   46.20    for (uint row = 0; row < row_limit(); row++) {
   46.21      Klass* k = data->as_ReceiverTypeData()->receiver(row);
   46.22      if (k != NULL) {
   46.23 @@ -134,6 +136,18 @@
   46.24  }
   46.25  
   46.26  
   46.27 +void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
   46.28 +  for (int i = 0; i < _number_of_entries; i++) {
   46.29 +    intptr_t k = entries->type(i);
   46.30 +    TypeStackSlotEntries::set_type(i, translate_klass(k));
   46.31 +  }
   46.32 +}
   46.33 +
   46.34 +void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
   46.35 +  intptr_t k = ret->type();
   46.36 +  set_type(translate_klass(k));
   46.37 +}
   46.38 +
   46.39  // Get the data at an arbitrary (sort of) data index.
   46.40  ciProfileData* ciMethodData::data_at(int data_index) {
   46.41    if (out_of_bounds(data_index)) {
   46.42 @@ -164,6 +178,10 @@
   46.43      return new ciMultiBranchData(data_layout);
   46.44    case DataLayout::arg_info_data_tag:
   46.45      return new ciArgInfoData(data_layout);
   46.46 +  case DataLayout::call_type_data_tag:
   46.47 +    return new ciCallTypeData(data_layout);
   46.48 +  case DataLayout::virtual_call_type_data_tag:
   46.49 +    return new ciVirtualCallTypeData(data_layout);
   46.50    };
   46.51  }
   46.52  
   46.53 @@ -286,6 +304,34 @@
   46.54    }
   46.55  }
   46.56  
   46.57 +void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
   46.58 +  VM_ENTRY_MARK;
   46.59 +  MethodData* mdo = get_MethodData();
   46.60 +  if (mdo != NULL) {
   46.61 +    ProfileData* data = mdo->bci_to_data(bci);
   46.62 +    if (data->is_CallTypeData()) {
   46.63 +      data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
   46.64 +    } else {
   46.65 +      assert(data->is_VirtualCallTypeData(), "no arguments!");
   46.66 +      data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
   46.67 +    }
   46.68 +  }
   46.69 +}
   46.70 +
   46.71 +void ciMethodData::set_return_type(int bci, ciKlass* k) {
   46.72 +  VM_ENTRY_MARK;
   46.73 +  MethodData* mdo = get_MethodData();
   46.74 +  if (mdo != NULL) {
   46.75 +    ProfileData* data = mdo->bci_to_data(bci);
   46.76 +    if (data->is_CallTypeData()) {
   46.77 +      data->as_CallTypeData()->set_return_type(k->get_Klass());
   46.78 +    } else {
   46.79 +      assert(data->is_VirtualCallTypeData(), "no arguments!");
   46.80 +      data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
   46.81 +    }
   46.82 +  }
   46.83 +}
   46.84 +
   46.85  bool ciMethodData::has_escape_info() {
   46.86    return eflag_set(MethodData::estimated);
   46.87  }
   46.88 @@ -373,7 +419,6 @@
   46.89  }
   46.90  
   46.91  void ciMethodData::dump_replay_data(outputStream* out) {
   46.92 -  ASSERT_IN_VM;
   46.93    ResourceMark rm;
   46.94    MethodData* mdo = get_MethodData();
   46.95    Method* method = mdo->method();
   46.96 @@ -477,7 +522,50 @@
   46.97    }
   46.98  }
   46.99  
  46.100 -void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {
  46.101 +void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) {
  46.102 +  if (TypeEntries::is_type_none(k)) {
  46.103 +    st->print("none");
  46.104 +  } else if (TypeEntries::is_type_unknown(k)) {
  46.105 +    st->print("unknown");
  46.106 +  } else {
  46.107 +    valid_ciklass(k)->print_name_on(st);
  46.108 +  }
  46.109 +  if (TypeEntries::was_null_seen(k)) {
  46.110 +    st->print(" (null seen)");
  46.111 +  }
  46.112 +}
  46.113 +
  46.114 +void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
  46.115 +  for (int i = 0; i < _number_of_entries; i++) {
  46.116 +    _pd->tab(st);
  46.117 +    st->print("%d: stack (%u) ", i, stack_slot(i));
  46.118 +    print_ciklass(st, type(i));
  46.119 +    st->cr();
  46.120 +  }
  46.121 +}
  46.122 +
  46.123 +void ciReturnTypeEntry::print_data_on(outputStream* st) const {
  46.124 +  _pd->tab(st);
  46.125 +  st->print("ret ");
  46.126 +  print_ciklass(st, type());
  46.127 +  st->cr();
  46.128 +}
  46.129 +
  46.130 +void ciCallTypeData::print_data_on(outputStream* st) const {
  46.131 +  print_shared(st, "ciCallTypeData");
  46.132 +  if (has_arguments()) {
  46.133 +    tab(st, true);
  46.134 +    st->print("argument types");
  46.135 +    args()->print_data_on(st);
  46.136 +  }
  46.137 +  if (has_return()) {
  46.138 +    tab(st, true);
  46.139 +    st->print("return type");
  46.140 +    ret()->print_data_on(st);
  46.141 +  }
  46.142 +}
  46.143 +
  46.144 +void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
  46.145    uint row;
  46.146    int entries = 0;
  46.147    for (row = 0; row < row_limit(); row++) {
  46.148 @@ -493,13 +581,28 @@
  46.149    }
  46.150  }
  46.151  
  46.152 -void ciReceiverTypeData::print_data_on(outputStream* st) {
  46.153 +void ciReceiverTypeData::print_data_on(outputStream* st) const {
  46.154    print_shared(st, "ciReceiverTypeData");
  46.155    print_receiver_data_on(st);
  46.156  }
  46.157  
  46.158 -void ciVirtualCallData::print_data_on(outputStream* st) {
  46.159 +void ciVirtualCallData::print_data_on(outputStream* st) const {
  46.160    print_shared(st, "ciVirtualCallData");
  46.161    rtd_super()->print_receiver_data_on(st);
  46.162  }
  46.163 +
  46.164 +void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
  46.165 +  print_shared(st, "ciVirtualCallTypeData");
  46.166 +  rtd_super()->print_receiver_data_on(st);
  46.167 +  if (has_arguments()) {
  46.168 +    tab(st, true);
  46.169 +    st->print("argument types");
  46.170 +    args()->print_data_on(st);
  46.171 +  }
  46.172 +  if (has_return()) {
  46.173 +    tab(st, true);
  46.174 +    st->print("return type");
  46.175 +    ret()->print_data_on(st);
  46.176 +  }
  46.177 +}
  46.178  #endif
    47.1 --- a/src/share/vm/ci/ciMethodData.hpp	Wed Oct 16 11:48:03 2013 -0700
    47.2 +++ b/src/share/vm/ci/ciMethodData.hpp	Thu Oct 17 10:58:45 2013 -0700
    47.3 @@ -41,6 +41,8 @@
    47.4  class ciArrayData;
    47.5  class ciMultiBranchData;
    47.6  class ciArgInfoData;
    47.7 +class ciCallTypeData;
    47.8 +class ciVirtualCallTypeData;
    47.9  
   47.10  typedef ProfileData ciProfileData;
   47.11  
   47.12 @@ -59,6 +61,103 @@
   47.13    ciJumpData(DataLayout* layout) : JumpData(layout) {};
   47.14  };
   47.15  
   47.16 +class ciTypeEntries {
   47.17 +protected:
   47.18 +  static intptr_t translate_klass(intptr_t k) {
   47.19 +    Klass* v = TypeEntries::valid_klass(k);
   47.20 +    if (v != NULL) {
   47.21 +      ciKlass* klass = CURRENT_ENV->get_klass(v);
   47.22 +      return with_status(klass, k);
   47.23 +    }
   47.24 +    return with_status(NULL, k);
   47.25 +  }
   47.26 +
   47.27 +public:
   47.28 +  static ciKlass* valid_ciklass(intptr_t k) {
   47.29 +    if (!TypeEntries::is_type_none(k) &&
   47.30 +        !TypeEntries::is_type_unknown(k)) {
   47.31 +      return (ciKlass*)TypeEntries::klass_part(k);
   47.32 +    } else {
   47.33 +      return NULL;
   47.34 +    }
   47.35 +  }
   47.36 +
   47.37 +  static intptr_t with_status(ciKlass* k, intptr_t in) {
   47.38 +    return TypeEntries::with_status((intptr_t)k, in);
   47.39 +  }
   47.40 +
   47.41 +#ifndef PRODUCT
   47.42 +  static void print_ciklass(outputStream* st, intptr_t k);
   47.43 +#endif
   47.44 +};
   47.45 +
   47.46 +class ciTypeStackSlotEntries : public TypeStackSlotEntries, ciTypeEntries {
   47.47 +public:
   47.48 +  void translate_type_data_from(const TypeStackSlotEntries* args);
   47.49 +
   47.50 +  ciKlass* valid_type(int i) const {
   47.51 +    return valid_ciklass(type(i));
   47.52 +  }
   47.53 +
   47.54 +#ifndef PRODUCT
   47.55 +  void print_data_on(outputStream* st) const;
   47.56 +#endif
   47.57 +};
   47.58 +
   47.59 +class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries {
   47.60 +public:
   47.61 +  void translate_type_data_from(const ReturnTypeEntry* ret);
   47.62 +
   47.63 +  ciKlass* valid_type() const {
   47.64 +    return valid_ciklass(type());
   47.65 +  }
   47.66 +
   47.67 +#ifndef PRODUCT
   47.68 +  void print_data_on(outputStream* st) const;
   47.69 +#endif
   47.70 +};
   47.71 +
   47.72 +class ciCallTypeData : public CallTypeData {
   47.73 +public:
   47.74 +  ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {}
   47.75 +
   47.76 +  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
   47.77 +  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
   47.78 +
   47.79 +  void translate_type_data_from(const ProfileData* data) {
   47.80 +    if (has_arguments()) {
   47.81 +      args()->translate_type_data_from(data->as_CallTypeData()->args());
   47.82 +    }
   47.83 +    if (has_return()) {
   47.84 +      ret()->translate_type_data_from(data->as_CallTypeData()->ret());
   47.85 +    }
   47.86 +  }
   47.87 +
   47.88 +  intptr_t argument_type(int i) const {
   47.89 +    assert(has_arguments(), "no arg type profiling data");
   47.90 +    return args()->type(i);
   47.91 +  }
   47.92 +
   47.93 +  ciKlass* valid_argument_type(int i) const {
   47.94 +    assert(has_arguments(), "no arg type profiling data");
   47.95 +    return args()->valid_type(i);
   47.96 +  }
   47.97 +
   47.98 +  intptr_t return_type() const {
   47.99 +    assert(has_return(), "no ret type profiling data");
  47.100 +    return ret()->type();
  47.101 +  }
  47.102 +
  47.103 +  ciKlass* valid_return_type() const {
  47.104 +    assert(has_return(), "no ret type profiling data");
  47.105 +    return ret()->valid_type();
  47.106 +  }
  47.107 +
  47.108 +#ifndef PRODUCT
  47.109 +  void print_data_on(outputStream* st) const;
  47.110 +#endif
  47.111 +};
  47.112 +
  47.113  class ciReceiverTypeData : public ReceiverTypeData {
  47.114  public:
  47.115    ciReceiverTypeData(DataLayout* layout) : ReceiverTypeData(layout) {};
  47.116 @@ -69,7 +168,7 @@
  47.117                    (intptr_t) recv);
  47.118    }
  47.119  
  47.120 -  ciKlass* receiver(uint row) {
  47.121 +  ciKlass* receiver(uint row) const {
  47.122      assert((uint)row < row_limit(), "oob");
  47.123      ciKlass* recv = (ciKlass*)intptr_at(receiver0_offset + row * receiver_type_row_cell_count);
  47.124      assert(recv == NULL || recv->is_klass(), "wrong type");
  47.125 @@ -77,19 +176,19 @@
  47.126    }
  47.127  
  47.128    // Copy & translate from oop based ReceiverTypeData
  47.129 -  virtual void translate_from(ProfileData* data) {
  47.130 +  virtual void translate_from(const ProfileData* data) {
  47.131      translate_receiver_data_from(data);
  47.132    }
  47.133 -  void translate_receiver_data_from(ProfileData* data);
  47.134 +  void translate_receiver_data_from(const ProfileData* data);
  47.135  #ifndef PRODUCT
  47.136 -  void print_data_on(outputStream* st);
  47.137 -  void print_receiver_data_on(outputStream* st);
  47.138 +  void print_data_on(outputStream* st) const;
  47.139 +  void print_receiver_data_on(outputStream* st) const;
  47.140  #endif
  47.141  };
  47.142  
  47.143  class ciVirtualCallData : public VirtualCallData {
  47.144    // Fake multiple inheritance...  It's a ciReceiverTypeData also.
  47.145 -  ciReceiverTypeData* rtd_super() { return (ciReceiverTypeData*) this; }
  47.146 +  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
  47.147  
  47.148  public:
  47.149    ciVirtualCallData(DataLayout* layout) : VirtualCallData(layout) {};
  47.150 @@ -103,11 +202,65 @@
  47.151    }
  47.152  
  47.153    // Copy & translate from oop based VirtualCallData
  47.154 -  virtual void translate_from(ProfileData* data) {
  47.155 +  virtual void translate_from(const ProfileData* data) {
  47.156      rtd_super()->translate_receiver_data_from(data);
  47.157    }
  47.158  #ifndef PRODUCT
  47.159 -  void print_data_on(outputStream* st);
  47.160 +  void print_data_on(outputStream* st) const;
  47.161 +#endif
  47.162 +};
  47.163 +
  47.164 +class ciVirtualCallTypeData : public VirtualCallTypeData {
  47.165 +private:
  47.166 +  // Fake multiple inheritance...  It's a ciReceiverTypeData also.
  47.167 +  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
  47.168 +public:
  47.169 +  ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {}
  47.170 +
  47.171 +  void set_receiver(uint row, ciKlass* recv) {
  47.172 +    rtd_super()->set_receiver(row, recv);
  47.173 +  }
  47.174 +
  47.175 +  ciKlass* receiver(uint row) const {
  47.176 +    return rtd_super()->receiver(row);
  47.177 +  }
  47.178 +
  47.179 +  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
  47.180 +  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); }
  47.181 +
  47.182 +  // Copy & translate from oop based VirtualCallData
  47.183 +  virtual void translate_from(const ProfileData* data) {
  47.184 +    rtd_super()->translate_receiver_data_from(data);
  47.185 +    if (has_arguments()) {
  47.186 +      args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
  47.187 +    }
  47.188 +    if (has_return()) {
  47.189 +      ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret());
  47.190 +    }
  47.191 +  }
  47.192 +
  47.193 +  intptr_t argument_type(int i) const {
  47.194 +    assert(has_arguments(), "no arg type profiling data");
  47.195 +    return args()->type(i);
  47.196 +  }
  47.197 +
  47.198 +  ciKlass* valid_argument_type(int i) const {
  47.199 +    assert(has_arguments(), "no arg type profiling data");
  47.200 +    return args()->valid_type(i);
  47.201 +  }
  47.202 +
  47.203 +  intptr_t return_type() const {
  47.204 +    assert(has_return(), "no ret type profiling data");
  47.205 +    return ret()->type();
  47.206 +  }
  47.207 +
  47.208 +  ciKlass* valid_return_type() const {
  47.209 +    assert(has_return(), "no ret type profiling data");
  47.210 +    return ret()->valid_type();
  47.211 +  }
  47.212 +
  47.213 +#ifndef PRODUCT
  47.214 +  void print_data_on(outputStream* st) const;
  47.215  #endif
  47.216  };
  47.217  
  47.218 @@ -232,8 +385,6 @@
  47.219  public:
  47.220    bool is_method_data() const { return true; }
  47.221  
  47.222 -  void set_mature() { _state = mature_state; }
  47.223 -
  47.224    bool is_empty()  { return _state == empty_state; }
  47.225    bool is_mature() { return _state == mature_state; }
  47.226  
  47.227 @@ -249,6 +400,10 @@
  47.228    // Also set the numer of loops and blocks in the method.
  47.229    // Again, this is used to determine if a method is trivial.
  47.230    void set_compilation_stats(short loops, short blocks);
  47.231 +  // If the compiler finds a profiled type that is known statically
  47.232 +  // for sure, set it in the MethodData
  47.233 +  void set_argument_type(int bci, int i, ciKlass* k);
  47.234 +  void set_return_type(int bci, ciKlass* k);
  47.235  
  47.236    void load_data();
  47.237  
    48.1 --- a/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Oct 16 11:48:03 2013 -0700
    48.2 +++ b/src/share/vm/ci/ciObjArrayKlass.cpp	Thu Oct 17 10:58:45 2013 -0700
    48.3 @@ -179,3 +179,16 @@
    48.4  ciObjArrayKlass* ciObjArrayKlass::make(ciKlass* element_klass) {
    48.5    GUARDED_VM_ENTRY(return make_impl(element_klass);)
    48.6  }
    48.7 +
    48.8 +ciKlass* ciObjArrayKlass::exact_klass() {
    48.9 +  ciType* base = base_element_type();
   48.10 +  if (base->is_instance_klass()) {
   48.11 +    ciInstanceKlass* ik = base->as_instance_klass();
   48.12 +    if (ik->exact_klass() != NULL) {
   48.13 +      return this;
   48.14 +    }
   48.15 +  } else if (base->is_primitive_type()) {
   48.16 +    return this;
   48.17 +  }
   48.18 +  return NULL;
   48.19 +}
    49.1 --- a/src/share/vm/ci/ciObjArrayKlass.hpp	Wed Oct 16 11:48:03 2013 -0700
    49.2 +++ b/src/share/vm/ci/ciObjArrayKlass.hpp	Thu Oct 17 10:58:45 2013 -0700
    49.3 @@ -73,6 +73,8 @@
    49.4    bool is_obj_array_klass() const { return true; }
    49.5  
    49.6    static ciObjArrayKlass* make(ciKlass* element_klass);
    49.7 +
    49.8 +  virtual ciKlass* exact_klass();
    49.9  };
   49.10  
   49.11  #endif // SHARE_VM_CI_CIOBJARRAYKLASS_HPP
    50.1 --- a/src/share/vm/ci/ciReplay.cpp	Wed Oct 16 11:48:03 2013 -0700
    50.2 +++ b/src/share/vm/ci/ciReplay.cpp	Thu Oct 17 10:58:45 2013 -0700
    50.3 @@ -965,14 +965,12 @@
    50.4      tty->cr();
    50.5    } else {
    50.6      EXCEPTION_CONTEXT;
    50.7 -    MethodCounters* mcs = method->method_counters();
    50.8      // m->_instructions_size = rec->instructions_size;
    50.9      m->_instructions_size = -1;
   50.10      m->_interpreter_invocation_count = rec->interpreter_invocation_count;
   50.11      m->_interpreter_throwout_count = rec->interpreter_throwout_count;
   50.12 -    if (mcs == NULL) {
   50.13 -      mcs = Method::build_method_counters(method, CHECK_AND_CLEAR);
   50.14 -    }
   50.15 +    MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
   50.16 +    guarantee(mcs != NULL, "method counters allocation failed");
   50.17      mcs->invocation_counter()->_counter = rec->invocation_counter;
   50.18      mcs->backedge_counter()->_counter = rec->backedge_counter;
   50.19    }
    51.1 --- a/src/share/vm/ci/ciStreams.hpp	Wed Oct 16 11:48:03 2013 -0700
    51.2 +++ b/src/share/vm/ci/ciStreams.hpp	Thu Oct 17 10:58:45 2013 -0700
    51.3 @@ -277,11 +277,14 @@
    51.4  class ciSignatureStream : public StackObj {
    51.5  private:
    51.6    ciSignature* _sig;
    51.7 -  int    _pos;
    51.8 +  int          _pos;
    51.9 +  // holder is a method's holder
   51.10 +  ciKlass*     _holder;
   51.11  public:
   51.12 -  ciSignatureStream(ciSignature* signature) {
   51.13 +  ciSignatureStream(ciSignature* signature, ciKlass* holder = NULL) {
   51.14      _sig = signature;
   51.15      _pos = 0;
   51.16 +    _holder = holder;
   51.17    }
   51.18  
   51.19    bool at_return_type() { return _pos == _sig->count(); }
   51.20 @@ -301,6 +304,23 @@
   51.21        return _sig->type_at(_pos);
   51.22      }
   51.23    }
   51.24 +
   51.25 +  // next klass in the signature
   51.26 +  ciKlass* next_klass() {
   51.27 +    ciKlass* sig_k;
   51.28 +    if (_holder != NULL) {
   51.29 +      sig_k = _holder;
   51.30 +      _holder = NULL;
   51.31 +    } else {
   51.32 +      while (!type()->is_klass()) {
   51.33 +        next();
   51.34 +      }
   51.35 +      assert(!at_return_type(), "passed end of signature");
   51.36 +      sig_k = type()->as_klass();
   51.37 +      next();
   51.38 +    }
   51.39 +    return sig_k;
   51.40 +  }
   51.41  };
   51.42  
   51.43  
    52.1 --- a/src/share/vm/ci/ciTypeArrayKlass.hpp	Wed Oct 16 11:48:03 2013 -0700
    52.2 +++ b/src/share/vm/ci/ciTypeArrayKlass.hpp	Thu Oct 17 10:58:45 2013 -0700
    52.3 @@ -57,6 +57,10 @@
    52.4  
    52.5    // Make an array klass corresponding to the specified primitive type.
    52.6    static ciTypeArrayKlass* make(BasicType type);
    52.7 +
    52.8 +  virtual ciKlass* exact_klass() {
    52.9 +    return this;
   52.10 +  }
   52.11  };
   52.12  
   52.13  #endif // SHARE_VM_CI_CITYPEARRAYKLASS_HPP
    53.1 --- a/src/share/vm/classfile/defaultMethods.cpp	Wed Oct 16 11:48:03 2013 -0700
    53.2 +++ b/src/share/vm/classfile/defaultMethods.cpp	Thu Oct 17 10:58:45 2013 -0700
    53.3 @@ -857,7 +857,6 @@
    53.4    m->set_max_locals(params);
    53.5    m->constMethod()->set_stackmap_data(NULL);
    53.6    m->set_code(code_start);
    53.7 -  m->set_force_inline(true);
    53.8  
    53.9    return m;
   53.10  }
    54.1 --- a/src/share/vm/code/codeBlob.cpp	Wed Oct 16 11:48:03 2013 -0700
    54.2 +++ b/src/share/vm/code/codeBlob.cpp	Thu Oct 17 10:58:45 2013 -0700
    54.3 @@ -245,8 +245,8 @@
    54.4  }
    54.5  
    54.6  
    54.7 -void* BufferBlob::operator new(size_t s, unsigned size) throw() {
    54.8 -  void* p = CodeCache::allocate(size);
    54.9 +void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
   54.10 +  void* p = CodeCache::allocate(size, is_critical);
   54.11    return p;
   54.12  }
   54.13  
   54.14 @@ -277,7 +277,10 @@
   54.15    unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
   54.16    {
   54.17      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   54.18 -    blob = new (size) AdapterBlob(size, cb);
   54.19 +    // The parameter 'true' indicates a critical memory allocation.
   54.20 +    // This means that CodeCacheMinimumFreeSpace is used, if necessary
   54.21 +    const bool is_critical = true;
   54.22 +    blob = new (size, is_critical) AdapterBlob(size, cb);
   54.23    }
   54.24    // Track memory usage statistic after releasing CodeCache_lock
   54.25    MemoryService::track_code_cache_memory_usage();
   54.26 @@ -299,7 +302,10 @@
   54.27    size += round_to(buffer_size, oopSize);
   54.28    {
   54.29      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   54.30 -    blob = new (size) MethodHandlesAdapterBlob(size);
   54.31 +    // The parameter 'true' indicates a critical memory allocation.
   54.32 +    // This means that CodeCacheMinimumFreeSpace is used, if necessary
   54.33 +    const bool is_critical = true;
   54.34 +    blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
   54.35    }
   54.36    // Track memory usage statistic after releasing CodeCache_lock
   54.37    MemoryService::track_code_cache_memory_usage();
    55.1 --- a/src/share/vm/code/codeBlob.hpp	Wed Oct 16 11:48:03 2013 -0700
    55.2 +++ b/src/share/vm/code/codeBlob.hpp	Thu Oct 17 10:58:45 2013 -0700
    55.3 @@ -209,7 +209,7 @@
    55.4    BufferBlob(const char* name, int size);
    55.5    BufferBlob(const char* name, int size, CodeBuffer* cb);
    55.6  
    55.7 -  void* operator new(size_t s, unsigned size) throw();
    55.8 +  void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
    55.9  
   55.10   public:
   55.11    // Creation
   55.12 @@ -253,7 +253,6 @@
   55.13  class MethodHandlesAdapterBlob: public BufferBlob {
   55.14  private:
   55.15    MethodHandlesAdapterBlob(int size)                 : BufferBlob("MethodHandles adapters", size) {}
   55.16 -  MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
   55.17  
   55.18  public:
   55.19    // Creation
    56.1 --- a/src/share/vm/compiler/abstractCompiler.cpp	Wed Oct 16 11:48:03 2013 -0700
    56.2 +++ b/src/share/vm/compiler/abstractCompiler.cpp	Thu Oct 17 10:58:45 2013 -0700
    56.3 @@ -24,41 +24,42 @@
    56.4  
    56.5  #include "precompiled.hpp"
    56.6  #include "compiler/abstractCompiler.hpp"
    56.7 +#include "compiler/compileBroker.hpp"
    56.8  #include "runtime/mutexLocker.hpp"
    56.9 -void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
   56.10 -  if (*state != initialized) {
   56.11  
   56.12 -    // We are thread in native here...
   56.13 -    CompilerThread* thread = CompilerThread::current();
   56.14 -    bool do_initialization = false;
   56.15 -    {
   56.16 -      ThreadInVMfromNative tv(thread);
   56.17 -      ResetNoHandleMark rnhm;
   56.18 -      MutexLocker only_one(CompileThread_lock, thread);
   56.19 -      if ( *state == uninitialized) {
   56.20 -        do_initialization = true;
   56.21 -        *state = initializing;
   56.22 -      } else {
   56.23 -        while (*state == initializing ) {
   56.24 -          CompileThread_lock->wait();
   56.25 -        }
   56.26 +bool AbstractCompiler::should_perform_init() {
   56.27 +  if (_compiler_state != initialized) {
   56.28 +    MutexLocker only_one(CompileThread_lock);
   56.29 +
   56.30 +    if (_compiler_state == uninitialized) {
   56.31 +      _compiler_state = initializing;
   56.32 +      return true;
   56.33 +    } else {
   56.34 +      while (_compiler_state == initializing) {
   56.35 +        CompileThread_lock->wait();
   56.36        }
   56.37      }
   56.38 -    if (do_initialization) {
   56.39 -      // We can not hold any locks here since JVMTI events may call agents
   56.40 +  }
   56.41 +  return false;
   56.42 +}
   56.43  
   56.44 -      // Compiler(s) run as native
   56.45 +bool AbstractCompiler::should_perform_shutdown() {
   56.46 +  // Since this method can be called by multiple threads, the lock ensures atomicity of
   56.47 +  // decrementing '_num_compiler_threads' and the following operations.
   56.48 +  MutexLocker only_one(CompileThread_lock);
   56.49 +  _num_compiler_threads--;
   56.50 +  assert (CompileBroker::is_compilation_disabled_forever(), "Must be set, otherwise thread waits forever");
   56.51  
   56.52 -      (*f)();
   56.53 +  // Only the last thread will perform shutdown operations
   56.54 +  if (_num_compiler_threads == 0) {
   56.55 +    return true;
   56.56 +  }
   56.57 +  return false;
   56.58 +}
   56.59  
   56.60 -      // To in_vm so we can use the lock
   56.61 -
   56.62 -      ThreadInVMfromNative tv(thread);
   56.63 -      ResetNoHandleMark rnhm;
   56.64 -      MutexLocker only_one(CompileThread_lock, thread);
   56.65 -      assert(*state == initializing, "wrong state");
   56.66 -      *state = initialized;
   56.67 -      CompileThread_lock->notify_all();
   56.68 -    }
   56.69 -  }
   56.70 +void AbstractCompiler::set_state(int state) {
   56.71 +  // Ensure that ste is only set by one thread at a time
   56.72 +  MutexLocker only_one(CompileThread_lock);
   56.73 +  _compiler_state =  state;
   56.74 +  CompileThread_lock->notify_all();
   56.75  }
    57.1 --- a/src/share/vm/compiler/abstractCompiler.hpp	Wed Oct 16 11:48:03 2013 -0700
    57.2 +++ b/src/share/vm/compiler/abstractCompiler.hpp	Thu Oct 17 10:58:45 2013 -0700
    57.3 @@ -27,22 +27,25 @@
    57.4  
    57.5  #include "ci/compilerInterface.hpp"
    57.6  
    57.7 -typedef void (*initializer)(void);
    57.8 -
    57.9  class AbstractCompiler : public CHeapObj<mtCompiler> {
   57.10   private:
   57.11 -  bool _is_initialized; // Mark whether compiler object is initialized
   57.12 +  volatile int _num_compiler_threads;
   57.13  
   57.14   protected:
   57.15 +  volatile int _compiler_state;
   57.16    // Used for tracking global state of compiler runtime initialization
   57.17 -  enum { uninitialized, initializing, initialized };
   57.18 +  enum { uninitialized, initializing, initialized, failed, shut_down };
   57.19  
   57.20 -  // This method will call the initialization method "f" once (per compiler class/subclass)
   57.21 -  // and do so without holding any locks
   57.22 -  void initialize_runtimes(initializer f, volatile int* state);
   57.23 +  // This method returns true for the first compiler thread that reaches that methods.
   57.24 +  // This thread will initialize the compiler runtime.
   57.25 +  bool should_perform_init();
   57.26  
   57.27   public:
   57.28 -  AbstractCompiler() : _is_initialized(false)    {}
   57.29 +  AbstractCompiler() : _compiler_state(uninitialized), _num_compiler_threads(0) {}
   57.30 +
   57.31 +  // This function determines the compiler thread that will perform the
   57.32 +  // shutdown of the corresponding compiler runtime.
   57.33 +  bool should_perform_shutdown();
   57.34  
   57.35    // Name of this compiler
   57.36    virtual const char* name() = 0;
   57.37 @@ -74,17 +77,18 @@
   57.38  #endif // TIERED
   57.39  
   57.40    // Customization
   57.41 -  virtual bool needs_stubs            ()         = 0;
   57.42 +  virtual void initialize () = 0;
   57.43  
   57.44 -  void mark_initialized()                        { _is_initialized = true; }
   57.45 -  bool is_initialized()                          { return _is_initialized; }
   57.46 +  void set_num_compiler_threads(int num) { _num_compiler_threads = num;  }
   57.47 +  int num_compiler_threads()             { return _num_compiler_threads; }
   57.48  
   57.49 -  virtual void initialize()                      = 0;
   57.50 -
   57.51 +  // Get/set state of compiler objects
   57.52 +  bool is_initialized()           { return _compiler_state == initialized; }
   57.53 +  bool is_failed     ()           { return _compiler_state == failed;}
   57.54 +  void set_state     (int state);
   57.55 +  void set_shut_down ()           { set_state(shut_down); }
   57.56    // Compilation entry point for methods
   57.57 -  virtual void compile_method(ciEnv* env,
   57.58 -                              ciMethod* target,
   57.59 -                              int entry_bci) {
   57.60 +  virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
   57.61      ShouldNotReachHere();
   57.62    }
   57.63  
    58.1 --- a/src/share/vm/compiler/compileBroker.cpp	Wed Oct 16 11:48:03 2013 -0700
    58.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Thu Oct 17 10:58:45 2013 -0700
    58.3 @@ -186,7 +186,7 @@
    58.4  CompileQueue* CompileBroker::_c1_method_queue    = NULL;
    58.5  CompileTask*  CompileBroker::_task_free_list     = NULL;
    58.6  
    58.7 -GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
    58.8 +GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
    58.9  
   58.10  
   58.11  class CompilationLog : public StringEventLog {
   58.12 @@ -587,9 +587,6 @@
   58.13  
   58.14  
   58.15  
   58.16 -// ------------------------------------------------------------------
   58.17 -// CompileQueue::add
   58.18 -//
   58.19  // Add a CompileTask to a CompileQueue
   58.20  void CompileQueue::add(CompileTask* task) {
   58.21    assert(lock()->owned_by_self(), "must own lock");
   58.22 @@ -626,6 +623,16 @@
   58.23    lock()->notify_all();
   58.24  }
   58.25  
   58.26 +void CompileQueue::delete_all() {
   58.27 +  assert(lock()->owned_by_self(), "must own lock");
   58.28 +  if (_first != NULL) {
   58.29 +    for (CompileTask* task = _first; task != NULL; task = task->next()) {
   58.30 +      delete task;
   58.31 +    }
   58.32 +    _first = NULL;
   58.33 +  }
   58.34 +}
   58.35 +
   58.36  // ------------------------------------------------------------------
   58.37  // CompileQueue::get
   58.38  //
   58.39 @@ -640,6 +647,11 @@
   58.40    // case we perform code cache sweeps to free memory such that we can re-enable
   58.41    // compilation.
   58.42    while (_first == NULL) {
   58.43 +    // Exit loop if compilation is disabled forever
   58.44 +    if (CompileBroker::is_compilation_disabled_forever()) {
   58.45 +      return NULL;
   58.46 +    }
   58.47 +
   58.48      if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
   58.49        // Wait a certain amount of time to possibly do another sweep.
   58.50        // We must wait until stack scanning has happened so that we can
   58.51 @@ -664,9 +676,17 @@
   58.52        // remains unchanged. This behavior is desired, since we want to keep
   58.53        // the stable state, i.e., we do not want to evict methods from the
   58.54        // code cache if it is unnecessary.
   58.55 -      lock()->wait();
   58.56 +      // We need a timed wait here, since compiler threads can exit if compilation
   58.57 +      // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
   58.58 +      // is not critical and we do not want idle compiler threads to wake up too often.
   58.59 +      lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
   58.60      }
   58.61    }
   58.62 +
   58.63 +  if (CompileBroker::is_compilation_disabled_forever()) {
   58.64 +    return NULL;
   58.65 +  }
   58.66 +
   58.67    CompileTask* task = CompilationPolicy::policy()->select_task(this);
   58.68    remove(task);
   58.69    return task;
   58.70 @@ -891,10 +911,8 @@
   58.71  }
   58.72  
   58.73  
   58.74 -
   58.75 -// ------------------------------------------------------------------
   58.76 -// CompileBroker::make_compiler_thread
   58.77 -CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS) {
   58.78 +CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
   58.79 +                                                    AbstractCompiler* comp, TRAPS) {
   58.80    CompilerThread* compiler_thread = NULL;
   58.81  
   58.82    Klass* k =
   58.83 @@ -961,6 +979,7 @@
   58.84      java_lang_Thread::set_daemon(thread_oop());
   58.85  
   58.86      compiler_thread->set_threadObj(thread_oop());
   58.87 +    compiler_thread->set_compiler(comp);
   58.88      Threads::add(compiler_thread);
   58.89      Thread::start(compiler_thread);
   58.90    }
   58.91 @@ -972,25 +991,24 @@
   58.92  }
   58.93  
   58.94  
   58.95 -// ------------------------------------------------------------------
   58.96 -// CompileBroker::init_compiler_threads
   58.97 -//
   58.98 -// Initialize the compilation queue
   58.99  void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
  58.100    EXCEPTION_MARK;
  58.101  #if !defined(ZERO) && !defined(SHARK)
  58.102    assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
  58.103  #endif // !ZERO && !SHARK
  58.104 +  // Initialize the compilation queue
  58.105    if (c2_compiler_count > 0) {
  58.106      _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
  58.107 +    _compilers[1]->set_num_compiler_threads(c2_compiler_count);
  58.108    }
  58.109    if (c1_compiler_count > 0) {
  58.110      _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
  58.111 +    _compilers[0]->set_num_compiler_threads(c1_compiler_count);
  58.112    }
  58.113  
  58.114    int compiler_count = c1_compiler_count + c2_compiler_count;
  58.115  
  58.116 -  _method_threads =
  58.117 +  _compiler_threads =
  58.118      new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
  58.119  
  58.120    char name_buffer[256];
  58.121 @@ -998,21 +1016,22 @@
  58.122      // Create a name for our thread.
  58.123      sprintf(name_buffer, "C2 CompilerThread%d", i);
  58.124      CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  58.125 -    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
  58.126 -    _method_threads->append(new_thread);
  58.127 +    // Shark and C2
  58.128 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
  58.129 +    _compiler_threads->append(new_thread);
  58.130    }
  58.131  
  58.132    for (int i = c2_compiler_count; i < compiler_count; i++) {
  58.133      // Create a name for our thread.
  58.134      sprintf(name_buffer, "C1 CompilerThread%d", i);
  58.135      CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  58.136 -    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
  58.137 -    _method_threads->append(new_thread);
  58.138 +    // C1
  58.139 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
  58.140 +    _compiler_threads->append(new_thread);
  58.141    }
  58.142  
  58.143    if (UsePerfData) {
  58.144 -    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
  58.145 -                                     compiler_count, CHECK);
  58.146 +    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
  58.147    }
  58.148  }
  58.149  
  58.150 @@ -1029,27 +1048,6 @@
  58.151  }
  58.152  
  58.153  // ------------------------------------------------------------------
  58.154 -// CompileBroker::is_idle
  58.155 -bool CompileBroker::is_idle() {
  58.156 -  if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
  58.157 -    return false;
  58.158 -  } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
  58.159 -    return false;
  58.160 -  } else {
  58.161 -    int num_threads = _method_threads->length();
  58.162 -    for (int i=0; i<num_threads; i++) {
  58.163 -      if (_method_threads->at(i)->task() != NULL) {
  58.164 -        return false;
  58.165 -      }
  58.166 -    }
  58.167 -
  58.168 -    // No pending or active compilations.
  58.169 -    return true;
  58.170 -  }
  58.171 -}
  58.172 -
  58.173 -
  58.174 -// ------------------------------------------------------------------
  58.175  // CompileBroker::compile_method
  58.176  //
  58.177  // Request compilation of a method.
  58.178 @@ -1551,6 +1549,101 @@
  58.179    free_task(task);
  58.180  }
  58.181  
  58.182 +// Initialize compiler thread(s) + compiler object(s). The postcondition
  58.183 +// of this function is that the compiler runtimes are initialized and that
  58.184 +//compiler threads can start compiling.
  58.185 +bool CompileBroker::init_compiler_runtime() {
  58.186 +  CompilerThread* thread = CompilerThread::current();
  58.187 +  AbstractCompiler* comp = thread->compiler();
  58.188 +  // Final sanity check - the compiler object must exist
  58.189 +  guarantee(comp != NULL, "Compiler object must exist");
  58.190 +
  58.191 +  int system_dictionary_modification_counter;
  58.192 +  {
  58.193 +    MutexLocker locker(Compile_lock, thread);
  58.194 +    system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
  58.195 +  }
  58.196 +
  58.197 +  {
  58.198 +    // Must switch to native to allocate ci_env
  58.199 +    ThreadToNativeFromVM ttn(thread);
  58.200 +    ciEnv ci_env(NULL, system_dictionary_modification_counter);
  58.201 +    // Cache Jvmti state
  58.202 +    ci_env.cache_jvmti_state();
  58.203 +    // Cache DTrace flags
  58.204 +    ci_env.cache_dtrace_flags();
  58.205 +
  58.206 +    // Switch back to VM state to do compiler initialization
  58.207 +    ThreadInVMfromNative tv(thread);
  58.208 +    ResetNoHandleMark rnhm;
  58.209 +
  58.210 +
  58.211 +    if (!comp->is_shark()) {
  58.212 +      // Perform per-thread and global initializations
  58.213 +      comp->initialize();
  58.214 +    }
  58.215 +  }
  58.216 +
  58.217 +  if (comp->is_failed()) {
  58.218 +    disable_compilation_forever();
  58.219 +    // If compiler initialization failed, no compiler thread that is specific to a
  58.220 +    // particular compiler runtime will ever start to compile methods.
  58.221 +
  58.222 +    shutdown_compiler_runtime(comp, thread);
  58.223 +    return false;
  58.224 +  }
  58.225 +
  58.226 +  // C1 specific check
  58.227 +  if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) {
  58.228 +    warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
  58.229 +    return false;
  58.230 +  }
  58.231 +
  58.232 +  return true;
  58.233 +}
  58.234 +
  58.235 +// If C1 and/or C2 initialization failed, we shut down all compilation.
  58.236 +// We do this to keep things simple. This can be changed if it ever turns out to be
  58.237 +// a problem.
  58.238 +void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
  58.239 +  // Free buffer blob, if allocated
  58.240 +  if (thread->get_buffer_blob() != NULL) {
  58.241 +    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  58.242 +    CodeCache::free(thread->get_buffer_blob());
  58.243 +  }
  58.244 +
  58.245 +  if (comp->should_perform_shutdown()) {
  58.246 +    // There are two reasons for shutting down the compiler
  58.247 +    // 1) compiler runtime initialization failed
  58.248 +    // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
  58.249 +    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
  58.250 +
  58.251 +    // Only one thread per compiler runtime object enters here
  58.252 +    // Set state to shut down
  58.253 +    comp->set_shut_down();
  58.254 +
  58.255 +    MutexLocker mu(MethodCompileQueue_lock, thread);
  58.256 +    CompileQueue* queue;
  58.257 +    if (_c1_method_queue != NULL) {
  58.258 +      _c1_method_queue->delete_all();
  58.259 +      queue = _c1_method_queue;
  58.260 +      _c1_method_queue = NULL;
  58.261 +      delete _c1_method_queue;
  58.262 +    }
  58.263 +
  58.264 +    if (_c2_method_queue != NULL) {
  58.265 +      _c2_method_queue->delete_all();
  58.266 +      queue = _c2_method_queue;
  58.267 +      _c2_method_queue = NULL;
  58.268 +      delete _c2_method_queue;
  58.269 +    }
  58.270 +
  58.271 +    // We could delete compiler runtimes also. However, there are references to
  58.272 +    // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
  58.273 +    // fail. This can be done later if necessary.
  58.274 +  }
  58.275 +}
  58.276 +
  58.277  // ------------------------------------------------------------------
  58.278  // CompileBroker::compiler_thread_loop
  58.279  //
  58.280 @@ -1558,7 +1651,6 @@
  58.281  void CompileBroker::compiler_thread_loop() {
  58.282    CompilerThread* thread = CompilerThread::current();
  58.283    CompileQueue* queue = thread->queue();
  58.284 -
  58.285    // For the thread that initializes the ciObjectFactory
  58.286    // this resource mark holds all the shared objects
  58.287    ResourceMark rm;
  58.288 @@ -1587,65 +1679,78 @@
  58.289      log->end_elem();
  58.290    }
  58.291  
  58.292 -  while (true) {
  58.293 -    {
  58.294 -      // We need this HandleMark to avoid leaking VM handles.
  58.295 -      HandleMark hm(thread);
  58.296 +  // If compiler thread/runtime initialization fails, exit the compiler thread
  58.297 +  if (!init_compiler_runtime()) {
  58.298 +    return;
  58.299 +  }
  58.300  
  58.301 -      if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
  58.302 -        // the code cache is really full
  58.303 -        handle_full_code_cache();
  58.304 -      }
  58.305 +  // Poll for new compilation tasks as long as the JVM runs. Compilation
  58.306 +  // should only be disabled if something went wrong while initializing the
  58.307 +  // compiler runtimes. This, in turn, should not happen. The only known case
  58.308 +  // when compiler runtime initialization fails is if there is not enough free
  58.309 +  // space in the code cache to generate the necessary stubs, etc.
  58.310 +  while (!is_compilation_disabled_forever()) {
  58.311 +    // We need this HandleMark to avoid leaking VM handles.
  58.312 +    HandleMark hm(thread);
  58.313  
  58.314 -      CompileTask* task = queue->get();
  58.315 +    if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
  58.316 +      // the code cache is really full
  58.317 +      handle_full_code_cache();
  58.318 +    }
  58.319  
  58.320 -      // Give compiler threads an extra quanta.  They tend to be bursty and
  58.321 -      // this helps the compiler to finish up the job.
  58.322 -      if( CompilerThreadHintNoPreempt )
  58.323 -        os::hint_no_preempt();
  58.324 +    CompileTask* task = queue->get();
  58.325 +    if (task == NULL) {
  58.326 +      continue;
  58.327 +    }
  58.328  
  58.329 -      // trace per thread time and compile statistics
  58.330 -      CompilerCounters* counters = ((CompilerThread*)thread)->counters();
  58.331 -      PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
  58.332 +    // Give compiler threads an extra quanta.  They tend to be bursty and
  58.333 +    // this helps the compiler to finish up the job.
  58.334 +    if( CompilerThreadHintNoPreempt )
  58.335 +      os::hint_no_preempt();
  58.336  
  58.337 -      // Assign the task to the current thread.  Mark this compilation
  58.338 -      // thread as active for the profiler.
  58.339 -      CompileTaskWrapper ctw(task);
  58.340 -      nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
  58.341 -      task->set_code_handle(&result_handle);
  58.342 -      methodHandle method(thread, task->method());
  58.343 +    // trace per thread time and compile statistics
  58.344 +    CompilerCounters* counters = ((CompilerThread*)thread)->counters();
  58.345 +    PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
  58.346  
  58.347 -      // Never compile a method if breakpoints are present in it
  58.348 -      if (method()->number_of_breakpoints() == 0) {
  58.349 -        // Compile the method.
  58.350 -        if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
  58.351 +    // Assign the task to the current thread.  Mark this compilation
  58.352 +    // thread as active for the profiler.
  58.353 +    CompileTaskWrapper ctw(task);
  58.354 +    nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
  58.355 +    task->set_code_handle(&result_handle);
  58.356 +    methodHandle method(thread, task->method());
  58.357 +
  58.358 +    // Never compile a method if breakpoints are present in it
  58.359 +    if (method()->number_of_breakpoints() == 0) {
  58.360 +      // Compile the method.
  58.361 +      if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
  58.362  #ifdef COMPILER1
  58.363 -          // Allow repeating compilations for the purpose of benchmarking
  58.364 -          // compile speed. This is not useful for customers.
  58.365 -          if (CompilationRepeat != 0) {
  58.366 -            int compile_count = CompilationRepeat;
  58.367 -            while (compile_count > 0) {
  58.368 -              invoke_compiler_on_method(task);
  58.369 -              nmethod* nm = method->code();
  58.370 -              if (nm != NULL) {
  58.371 -                nm->make_zombie();
  58.372 -                method->clear_code();
  58.373 -              }
  58.374 -              compile_count--;
  58.375 +        // Allow repeating compilations for the purpose of benchmarking
  58.376 +        // compile speed. This is not useful for customers.
  58.377 +        if (CompilationRepeat != 0) {
  58.378 +          int compile_count = CompilationRepeat;
  58.379 +          while (compile_count > 0) {
  58.380 +            invoke_compiler_on_method(task);
  58.381 +            nmethod* nm = method->code();
  58.382 +            if (nm != NULL) {
  58.383 +              nm->make_zombie();
  58.384 +              method->clear_code();
  58.385              }
  58.386 +            compile_count--;
  58.387            }
  58.388 +        }
  58.389  #endif /* COMPILER1 */
  58.390 -          invoke_compiler_on_method(task);
  58.391 -        } else {
  58.392 -          // After compilation is disabled, remove remaining methods from queue
  58.393 -          method->clear_queued_for_compilation();
  58.394 -        }
  58.395 +        invoke_compiler_on_method(task);
  58.396 +      } else {
  58.397 +        // After compilation is disabled, remove remaining methods from queue
  58.398 +        method->clear_queued_for_compilation();
  58.399        }
  58.400      }
  58.401    }
  58.402 +
  58.403 +  // Shut down compiler runtime
  58.404 +  shutdown_compiler_runtime(thread->compiler(), thread);
  58.405  }
  58.406  
  58.407 -
  58.408  // ------------------------------------------------------------------
  58.409  // CompileBroker::init_compiler_thread_log
  58.410  //
  58.411 @@ -1953,11 +2058,14 @@
  58.412        // Since code cache is full, immediately stop new compiles
  58.413        if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
  58.414          NMethodSweeper::log_sweep("disable_compiler");
  58.415 +
  58.416 +        // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
  58.417 +        // without having to consider the state in which the current thread is.
  58.418 +        ThreadInVMfromUnknown in_vm;
  58.419          NMethodSweeper::possibly_sweep();
  58.420        }
  58.421      } else {
  58.422 -      UseCompiler               = false;
  58.423 -      AlwaysCompileLoopMethods  = false;
  58.424 +      disable_compilation_forever();
  58.425      }
  58.426    }
  58.427    codecache_print(/* detailed= */ true);
    59.1 --- a/src/share/vm/compiler/compileBroker.hpp	Wed Oct 16 11:48:03 2013 -0700
    59.2 +++ b/src/share/vm/compiler/compileBroker.hpp	Thu Oct 17 10:58:45 2013 -0700
    59.3 @@ -213,8 +213,12 @@
    59.4  
    59.5    // Redefine Classes support
    59.6    void mark_on_stack();
    59.7 +  void delete_all();
    59.8 +  void         print();
    59.9  
   59.10 -  void         print();
   59.11 +  ~CompileQueue() {
   59.12 +    assert (is_empty(), " Compile Queue must be empty");
   59.13 +  }
   59.14  };
   59.15  
   59.16  // CompileTaskWrapper
   59.17 @@ -266,7 +270,7 @@
   59.18    static CompileQueue* _c1_method_queue;
   59.19    static CompileTask* _task_free_list;
   59.20  
   59.21 -  static GrowableArray<CompilerThread*>* _method_threads;
   59.22 +  static GrowableArray<CompilerThread*>* _compiler_threads;
   59.23  
   59.24    // performance counters
   59.25    static PerfCounter* _perf_total_compilation;
   59.26 @@ -311,7 +315,7 @@
   59.27    static int _sum_nmethod_code_size;
   59.28    static long _peak_compilation_time;
   59.29  
   59.30 -  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
   59.31 +  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
   59.32    static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   59.33    static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   59.34    static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
   59.35 @@ -351,6 +355,9 @@
   59.36      if (is_c1_compile(comp_level)) return _c1_method_queue;
   59.37      return NULL;
   59.38    }
   59.39 +  static bool init_compiler_runtime();
   59.40 +  static void shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread);
   59.41 +
   59.42   public:
   59.43    enum {
   59.44      // The entry bci used for non-OSR compilations.
   59.45 @@ -378,9 +385,7 @@
   59.46                                   const char* comment, Thread* thread);
   59.47  
   59.48    static void compiler_thread_loop();
   59.49 -
   59.50    static uint get_compilation_id() { return _compilation_id; }
   59.51 -  static bool is_idle();
   59.52  
   59.53    // Set _should_block.
   59.54    // Call this from the VM, with Threads_lock held and a safepoint requested.
   59.55 @@ -391,8 +396,9 @@
   59.56  
   59.57    enum {
   59.58      // Flags for toggling compiler activity
   59.59 -    stop_compilation = 0,
   59.60 -    run_compilation  = 1
   59.61 +    stop_compilation    = 0,
   59.62 +    run_compilation     = 1,
   59.63 +    shutdown_compilaton = 2
   59.64    };
   59.65  
   59.66    static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
   59.67 @@ -401,6 +407,16 @@
   59.68      jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
   59.69      return (old == (1-new_state));
   59.70    }
   59.71 +
   59.72 +  static void disable_compilation_forever() {
   59.73 +    UseCompiler               = false;
   59.74 +    AlwaysCompileLoopMethods  = false;
   59.75 +    Atomic::xchg(shutdown_compilaton, &_should_compile_new_jobs);
   59.76 +  }
   59.77 +
   59.78 +  static bool is_compilation_disabled_forever() {
   59.79 +    return _should_compile_new_jobs == shutdown_compilaton;
   59.80 +  }
   59.81    static void handle_full_code_cache();
   59.82  
   59.83    // Return total compilation ticks
    60.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Wed Oct 16 11:48:03 2013 -0700
    60.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 17 10:58:45 2013 -0700
    60.3 @@ -1,5 +1,6 @@
    60.4  /*
    60.5   * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    60.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    60.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    60.8   *
    60.9   * This code is free software; you can redistribute it and/or modify it
   60.10 @@ -158,6 +159,22 @@
   60.11      index = vt->index_of_miranda(resolved_method->name(),
   60.12                                   resolved_method->signature());
   60.13      kind = CallInfo::vtable_call;
   60.14 +  } else if (resolved_method->has_vtable_index()) {
   60.15 +    // Can occur if an interface redeclares a method of Object.
   60.16 +
   60.17 +#ifdef ASSERT
   60.18 +    // Ensure that this is really the case.
   60.19 +    KlassHandle object_klass = SystemDictionary::Object_klass();
   60.20 +    Method * object_resolved_method = object_klass()->vtable()->method_at(index);
   60.21 +    assert(object_resolved_method->name() == resolved_method->name(),
   60.22 +      err_msg("Object and interface method names should match at vtable index %d, %s != %s",
   60.23 +      index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string()));
   60.24 +    assert(object_resolved_method->signature() == resolved_method->signature(),
   60.25 +      err_msg("Object and interface method signatures should match at vtable index %d, %s != %s",
   60.26 +      index, object_resolved_method->signature()->as_C_string(), resolved_method->signature()->as_C_string()));
   60.27 +#endif // ASSERT
   60.28 +
   60.29 +    kind = CallInfo::vtable_call;
   60.30    } else {
   60.31      // A regular interface call.
   60.32      kind = CallInfo::itable_call;
    61.1 --- a/src/share/vm/oops/method.hpp	Wed Oct 16 11:48:03 2013 -0700
    61.2 +++ b/src/share/vm/oops/method.hpp	Thu Oct 17 10:58:45 2013 -0700
    61.3 @@ -805,6 +805,7 @@
    61.4   private:
    61.5    void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
    61.6  
    61.7 + public:
    61.8    MethodCounters* get_method_counters(TRAPS) {
    61.9      if (_method_counters == NULL) {
   61.10        build_method_counters(this, CHECK_AND_CLEAR_NULL);
   61.11 @@ -812,7 +813,6 @@
   61.12      return _method_counters;
   61.13    }
   61.14  
   61.15 - public:
   61.16    bool   is_not_c1_compilable() const         { return access_flags().is_not_c1_compilable();  }
   61.17    void  set_not_c1_compilable()               {       _access_flags.set_not_c1_compilable();   }
   61.18    void clear_not_c1_compilable()              {       _access_flags.clear_not_c1_compilable(); }
    62.1 --- a/src/share/vm/oops/methodData.cpp	Wed Oct 16 11:48:03 2013 -0700
    62.2 +++ b/src/share/vm/oops/methodData.cpp	Thu Oct 17 10:58:45 2013 -0700
    62.3 @@ -56,6 +56,11 @@
    62.4    if (needs_array_len(tag)) {
    62.5      set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
    62.6    }
    62.7 +  if (tag == call_type_data_tag) {
    62.8 +    CallTypeData::initialize(this, cell_count);
    62.9 +  } else if (tag == virtual_call_type_data_tag) {
   62.10 +    VirtualCallTypeData::initialize(this, cell_count);
   62.11 +  }
   62.12  }
   62.13  
   62.14  void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) {
   62.15 @@ -76,7 +81,7 @@
   62.16  }
   62.17  
   62.18  #ifndef PRODUCT
   62.19 -void ProfileData::print_shared(outputStream* st, const char* name) {
   62.20 +void ProfileData::print_shared(outputStream* st, const char* name) const {
   62.21    st->print("bci: %d", bci());
   62.22    st->fill_to(tab_width_one);
   62.23    st->print("%s", name);
   62.24 @@ -91,8 +96,8 @@
   62.25      st->print("flags(%d) ", flags);
   62.26  }
   62.27  
   62.28 -void ProfileData::tab(outputStream* st) {
   62.29 -  st->fill_to(tab_width_two);
   62.30 +void ProfileData::tab(outputStream* st, bool first) const {
   62.31 +  st->fill_to(first ? tab_width_one : tab_width_two);
   62.32  }
   62.33  #endif // !PRODUCT
   62.34  
   62.35 @@ -104,7 +109,7 @@
   62.36  
   62.37  
   62.38  #ifndef PRODUCT
   62.39 -void BitData::print_data_on(outputStream* st) {
   62.40 +void BitData::print_data_on(outputStream* st) const {
   62.41    print_shared(st, "BitData");
   62.42  }
   62.43  #endif // !PRODUCT
   62.44 @@ -115,7 +120,7 @@
   62.45  // A CounterData corresponds to a simple counter.
   62.46  
   62.47  #ifndef PRODUCT
   62.48 -void CounterData::print_data_on(outputStream* st) {
   62.49 +void CounterData::print_data_on(outputStream* st) const {
   62.50    print_shared(st, "CounterData");
   62.51    st->print_cr("count(%u)", count());
   62.52  }
   62.53 @@ -145,12 +150,207 @@
   62.54  }
   62.55  
   62.56  #ifndef PRODUCT
   62.57 -void JumpData::print_data_on(outputStream* st) {
   62.58 +void JumpData::print_data_on(outputStream* st) const {
   62.59    print_shared(st, "JumpData");
   62.60    st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
   62.61  }
   62.62  #endif // !PRODUCT
   62.63  
   62.64 +int TypeStackSlotEntries::compute_cell_count(Symbol* signature, int max) {
   62.65 +  ResourceMark rm;
   62.66 +  SignatureStream ss(signature);
   62.67 +  int args_count = MIN2(ss.reference_parameter_count(), max);
   62.68 +  return args_count * per_arg_cell_count;
   62.69 +}
   62.70 +
   62.71 +int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
   62.72 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
   62.73 +  assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
   62.74 +  Bytecode_invoke inv(stream->method(), stream->bci());
   62.75 +  int args_cell = 0;
   62.76 +  if (arguments_profiling_enabled()) {
   62.77 +    args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), TypeProfileArgsLimit);
   62.78 +  }
   62.79 +  int ret_cell = 0;
   62.80 +  if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
   62.81 +    ret_cell = ReturnTypeEntry::static_cell_count();
   62.82 +  }
   62.83 +  int header_cell = 0;
   62.84 +  if (args_cell + ret_cell > 0) {
   62.85 +    header_cell = header_cell_count();
   62.86 +  }
   62.87 +
   62.88 +  return header_cell + args_cell + ret_cell;
   62.89 +}
   62.90 +
   62.91 +class ArgumentOffsetComputer : public SignatureInfo {
   62.92 +private:
   62.93 +  int _max;
   62.94 +  GrowableArray<int> _offsets;
   62.95 +
   62.96 +  void set(int size, BasicType type) { _size += size; }
   62.97 +  void do_object(int begin, int end) {
   62.98 +    if (_offsets.length() < _max) {
   62.99 +      _offsets.push(_size);
  62.100 +    }
  62.101 +    SignatureInfo::do_object(begin, end);
  62.102 +  }
  62.103 +  void do_array (int begin, int end) {
  62.104 +    if (_offsets.length() < _max) {
  62.105 +      _offsets.push(_size);
  62.106 +    }
  62.107 +    SignatureInfo::do_array(begin, end);
  62.108 +  }
  62.109 +
  62.110 +public:
  62.111 +  ArgumentOffsetComputer(Symbol* signature, int max)
  62.112 +    : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
  62.113 +  }
  62.114 +
  62.115 +  int total() { lazy_iterate_parameters(); return _size; }
  62.116 +
  62.117 +  int off_at(int i) const { return _offsets.at(i); }
  62.118 +};
  62.119 +
  62.120 +void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver) {
  62.121 +  ResourceMark rm;
  62.122 +  ArgumentOffsetComputer aos(signature, _number_of_entries);
  62.123 +  aos.total();
  62.124 +  for (int i = 0; i < _number_of_entries; i++) {
  62.125 +    set_stack_slot(i, aos.off_at(i) + (has_receiver ? 1 : 0));
  62.126 +    set_type(i, type_none());
  62.127 +  }
  62.128 +}
  62.129 +
  62.130 +void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
  62.131 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
  62.132 +  Bytecode_invoke inv(stream->method(), stream->bci());
  62.133 +
  62.134 +  SignatureStream ss(inv.signature());
  62.135 +  if (has_arguments()) {
  62.136 +#ifdef ASSERT
  62.137 +    ResourceMark rm;
  62.138 +    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
  62.139 +    assert(count > 0, "room for args type but none found?");
  62.140 +    check_number_of_arguments(count);
  62.141 +#endif
  62.142 +    _args.post_initialize(inv.signature(), inv.has_receiver());
  62.143 +  }
  62.144 +
  62.145 +  if (has_return()) {
  62.146 +    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
  62.147 +    _ret.post_initialize();
  62.148 +  }
  62.149 +}
  62.150 +
  62.151 +void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
  62.152 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
  62.153 +  Bytecode_invoke inv(stream->method(), stream->bci());
  62.154 +
  62.155 +  if (has_arguments()) {
  62.156 +#ifdef ASSERT
  62.157 +    ResourceMark rm;
  62.158 +    SignatureStream ss(inv.signature());
  62.159 +    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
  62.160 +    assert(count > 0, "room for args type but none found?");
  62.161 +    check_number_of_arguments(count);
  62.162 +#endif
  62.163 +    _args.post_initialize(inv.signature(), inv.has_receiver());
  62.164 +  }
  62.165 +
  62.166 +  if (has_return()) {
  62.167 +    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
  62.168 +    _ret.post_initialize();
  62.169 +  }
  62.170 +}
  62.171 +
  62.172 +bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
  62.173 +  return !is_type_none(p) &&
  62.174 +    !((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
  62.175 +}
  62.176 +
  62.177 +void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
  62.178 +  for (int i = 0; i < _number_of_entries; i++) {
  62.179 +    intptr_t p = type(i);
  62.180 +    if (is_loader_alive(is_alive_cl, p)) {
  62.181 +      set_type(i, type_none());
  62.182 +    }
  62.183 +  }
  62.184 +}
  62.185 +
  62.186 +void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
  62.187 +  intptr_t p = type();
  62.188 +  if (is_loader_alive(is_alive_cl, p)) {
  62.189 +    set_type(type_none());
  62.190 +  }
  62.191 +}
  62.192 +
  62.193 +bool TypeEntriesAtCall::return_profiling_enabled() {
  62.194 +  return MethodData::profile_return();
  62.195 +}
  62.196 +
  62.197 +bool TypeEntriesAtCall::arguments_profiling_enabled() {
  62.198 +  return MethodData::profile_arguments();
  62.199 +}
  62.200 +
  62.201 +#ifndef PRODUCT
  62.202 +void TypeEntries::print_klass(outputStream* st, intptr_t k) {
  62.203 +  if (is_type_none(k)) {
  62.204 +    st->print("none");
  62.205 +  } else if (is_type_unknown(k)) {
  62.206 +    st->print("unknown");
  62.207 +  } else {
  62.208 +    valid_klass(k)->print_value_on(st);
  62.209 +  }
  62.210 +  if (was_null_seen(k)) {
  62.211 +    st->print(" (null seen)");
  62.212 +  }
  62.213 +}
  62.214 +
  62.215 +void TypeStackSlotEntries::print_data_on(outputStream* st) const {
  62.216 +  for (int i = 0; i < _number_of_entries; i++) {
  62.217 +    _pd->tab(st);
  62.218 +    st->print("%d: stack(%u) ", i, stack_slot(i));
  62.219 +    print_klass(st, type(i));
  62.220 +    st->cr();
  62.221 +  }
  62.222 +}
  62.223 +
  62.224 +void ReturnTypeEntry::print_data_on(outputStream* st) const {
  62.225 +  _pd->tab(st);
  62.226 +  print_klass(st, type());
  62.227 +  st->cr();
  62.228 +}
  62.229 +
  62.230 +void CallTypeData::print_data_on(outputStream* st) const {
  62.231 +  CounterData::print_data_on(st);
  62.232 +  if (has_arguments()) {
  62.233 +    tab(st, true);
  62.234 +    st->print("argument types");
  62.235 +    _args.print_data_on(st);
  62.236 +  }
  62.237 +  if (has_return()) {
  62.238 +    tab(st, true);
  62.239 +    st->print("return type");
  62.240 +    _ret.print_data_on(st);
  62.241 +  }
  62.242 +}
  62.243 +
  62.244 +void VirtualCallTypeData::print_data_on(outputStream* st) const {
  62.245 +  VirtualCallData::print_data_on(st);
  62.246 +  if (has_arguments()) {
  62.247 +    tab(st, true);
  62.248 +    st->print("argument types");
  62.249 +    _args.print_data_on(st);
  62.250 +  }
  62.251 +  if (has_return()) {
  62.252 +    tab(st, true);
  62.253 +    st->print("return type");
  62.254 +    _ret.print_data_on(st);
  62.255 +  }
  62.256 +}
  62.257 +#endif
  62.258 +
  62.259  // ==================================================================
  62.260  // ReceiverTypeData
  62.261  //
  62.262 @@ -169,7 +369,7 @@
  62.263  }
  62.264  
  62.265  #ifndef PRODUCT
  62.266 -void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
  62.267 +void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
  62.268    uint row;
  62.269    int entries = 0;
  62.270    for (row = 0; row < row_limit(); row++) {
  62.271 @@ -190,11 +390,11 @@
  62.272      }
  62.273    }
  62.274  }
  62.275 -void ReceiverTypeData::print_data_on(outputStream* st) {
  62.276 +void ReceiverTypeData::print_data_on(outputStream* st) const {
  62.277    print_shared(st, "ReceiverTypeData");
  62.278    print_receiver_data_on(st);
  62.279  }
  62.280 -void VirtualCallData::print_data_on(outputStream* st) {
  62.281 +void VirtualCallData::print_data_on(outputStream* st) const {
  62.282    print_shared(st, "VirtualCallData");
  62.283    print_receiver_data_on(st);
  62.284  }
  62.285 @@ -246,7 +446,7 @@
  62.286  
  62.287  
  62.288  #ifndef PRODUCT
  62.289 -void RetData::print_data_on(outputStream* st) {
  62.290 +void RetData::print_data_on(outputStream* st) const {
  62.291    print_shared(st, "RetData");
  62.292    uint row;
  62.293    int entries = 0;
  62.294 @@ -281,7 +481,7 @@
  62.295  }
  62.296  
  62.297  #ifndef PRODUCT
  62.298 -void BranchData::print_data_on(outputStream* st) {
  62.299 +void BranchData::print_data_on(outputStream* st) const {
  62.300    print_shared(st, "BranchData");
  62.301    st->print_cr("taken(%u) displacement(%d)",
  62.302                 taken(), displacement());
  62.303 @@ -355,7 +555,7 @@
  62.304  }
  62.305  
  62.306  #ifndef PRODUCT
  62.307 -void MultiBranchData::print_data_on(outputStream* st) {
  62.308 +void MultiBranchData::print_data_on(outputStream* st) const {
  62.309    print_shared(st, "MultiBranchData");
  62.310    st->print_cr("default_count(%u) displacement(%d)",
  62.311                 default_count(), default_displacement());
  62.312 @@ -369,7 +569,7 @@
  62.313  #endif
  62.314  
  62.315  #ifndef PRODUCT
  62.316 -void ArgInfoData::print_data_on(outputStream* st) {
  62.317 +void ArgInfoData::print_data_on(outputStream* st) const {
  62.318    print_shared(st, "ArgInfoData");
  62.319    int nargs = number_of_args();
  62.320    for (int i = 0; i < nargs; i++) {
  62.321 @@ -407,7 +607,11 @@
  62.322      }
  62.323    case Bytecodes::_invokespecial:
  62.324    case Bytecodes::_invokestatic:
  62.325 -    return CounterData::static_cell_count();
  62.326 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  62.327 +      return variable_cell_count;
  62.328 +    } else {
  62.329 +      return CounterData::static_cell_count();
  62.330 +    }
  62.331    case Bytecodes::_goto:
  62.332    case Bytecodes::_goto_w:
  62.333    case Bytecodes::_jsr:
  62.334 @@ -415,9 +619,17 @@
  62.335      return JumpData::static_cell_count();
  62.336    case Bytecodes::_invokevirtual:
  62.337    case Bytecodes::_invokeinterface:
  62.338 -    return VirtualCallData::static_cell_count();
  62.339 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  62.340 +      return variable_cell_count;
  62.341 +    } else {
  62.342 +      return VirtualCallData::static_cell_count();
  62.343 +    }
  62.344    case Bytecodes::_invokedynamic:
  62.345 -    return CounterData::static_cell_count();
  62.346 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  62.347 +      return variable_cell_count;
  62.348 +    } else {
  62.349 +      return CounterData::static_cell_count();
  62.350 +    }
  62.351    case Bytecodes::_ret:
  62.352      return RetData::static_cell_count();
  62.353    case Bytecodes::_ifeq:
  62.354 @@ -453,7 +665,36 @@
  62.355      return 0;
  62.356    }
  62.357    if (cell_count == variable_cell_count) {
  62.358 -    cell_count = MultiBranchData::compute_cell_count(stream);
  62.359 +    switch (stream->code()) {
  62.360 +    case Bytecodes::_lookupswitch:
  62.361 +    case Bytecodes::_tableswitch:
  62.362 +      cell_count = MultiBranchData::compute_cell_count(stream);
  62.363 +      break;
  62.364 +    case Bytecodes::_invokespecial:
  62.365 +    case Bytecodes::_invokestatic:
  62.366 +    case Bytecodes::_invokedynamic:
  62.367 +      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
  62.368 +      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  62.369 +          profile_return_for_invoke(stream->method(), stream->bci())) {
  62.370 +        cell_count = CallTypeData::compute_cell_count(stream);
  62.371 +      } else {
  62.372 +        cell_count = CounterData::static_cell_count();
  62.373 +      }
  62.374 +      break;
  62.375 +    case Bytecodes::_invokevirtual:
  62.376 +    case Bytecodes::_invokeinterface: {
  62.377 +      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
  62.378 +      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  62.379 +          profile_return_for_invoke(stream->method(), stream->bci())) {
  62.380 +        cell_count = VirtualCallTypeData::compute_cell_count(stream);
  62.381 +      } else {
  62.382 +        cell_count = VirtualCallData::static_cell_count();
  62.383 +      }
  62.384 +      break;
  62.385 +    }
  62.386 +    default:
  62.387 +      fatal("unexpected bytecode for var length profile data");
  62.388 +    }
  62.389    }
  62.390    // Note:  cell_count might be zero, meaning that there is just
  62.391    //        a DataLayout header, with no extra cells.
  62.392 @@ -499,6 +740,7 @@
  62.393    // Add a cell to record information about modified arguments.
  62.394    int arg_size = method->size_of_parameters();
  62.395    object_size += DataLayout::compute_size_in_bytes(arg_size+1);
  62.396 +
  62.397    return object_size;
  62.398  }
  62.399  
  62.400 @@ -534,10 +776,21 @@
  62.401      }
  62.402      break;
  62.403    case Bytecodes::_invokespecial:
  62.404 -  case Bytecodes::_invokestatic:
  62.405 -    cell_count = CounterData::static_cell_count();
  62.406 -    tag = DataLayout::counter_data_tag;
  62.407 +  case Bytecodes::_invokestatic: {
  62.408 +    int counter_data_cell_count = CounterData::static_cell_count();
  62.409 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  62.410 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  62.411 +      cell_count = CallTypeData::compute_cell_count(stream);
  62.412 +    } else {
  62.413 +      cell_count = counter_data_cell_count;
  62.414 +    }
  62.415 +    if (cell_count > counter_data_cell_count) {
  62.416 +      tag = DataLayout::call_type_data_tag;
  62.417 +    } else {
  62.418 +      tag = DataLayout::counter_data_tag;
  62.419 +    }
  62.420      break;
  62.421 +  }
  62.422    case Bytecodes::_goto:
  62.423    case Bytecodes::_goto_w:
  62.424    case Bytecodes::_jsr:
  62.425 @@ -546,15 +799,37 @@
  62.426      tag = DataLayout::jump_data_tag;
  62.427      break;
  62.428    case Bytecodes::_invokevirtual:
  62.429 -  case Bytecodes::_invokeinterface:
  62.430 -    cell_count = VirtualCallData::static_cell_count();
  62.431 -    tag = DataLayout::virtual_call_data_tag;
  62.432 +  case Bytecodes::_invokeinterface: {
  62.433 +    int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
  62.434 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  62.435 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  62.436 +      cell_count = VirtualCallTypeData::compute_cell_count(stream);
  62.437 +    } else {
  62.438 +      cell_count = virtual_call_data_cell_count;
  62.439 +    }
  62.440 +    if (cell_count > virtual_call_data_cell_count) {
  62.441 +      tag = DataLayout::virtual_call_type_data_tag;
  62.442 +    } else {
  62.443 +      tag = DataLayout::virtual_call_data_tag;
  62.444 +    }
  62.445      break;
  62.446 -  case Bytecodes::_invokedynamic:
  62.447 +  }
  62.448 +  case Bytecodes::_invokedynamic: {
  62.449      // %%% should make a type profile for any invokedynamic that takes a ref argument
  62.450 -    cell_count = CounterData::static_cell_count();
  62.451 -    tag = DataLayout::counter_data_tag;
  62.452 +    int counter_data_cell_count = CounterData::static_cell_count();
  62.453 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  62.454 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  62.455 +      cell_count = CallTypeData::compute_cell_count(stream);
  62.456 +    } else {
  62.457 +      cell_count = counter_data_cell_count;
  62.458 +    }
  62.459 +    if (cell_count > counter_data_cell_count) {
  62.460 +      tag = DataLayout::call_type_data_tag;
  62.461 +    } else {
  62.462 +      tag = DataLayout::counter_data_tag;
  62.463 +    }
  62.464      break;
  62.465 +  }
  62.466    case Bytecodes::_ret:
  62.467      cell_count = RetData::static_cell_count();
  62.468      tag = DataLayout::ret_data_tag;
  62.469 @@ -585,6 +860,11 @@
  62.470      break;
  62.471    }
  62.472    assert(tag == DataLayout::multi_branch_data_tag ||
  62.473 +         ((MethodData::profile_arguments() || MethodData::profile_return()) &&
  62.474 +          (tag == DataLayout::call_type_data_tag ||
  62.475 +           tag == DataLayout::counter_data_tag ||
  62.476 +           tag == DataLayout::virtual_call_type_data_tag ||
  62.477 +           tag == DataLayout::virtual_call_data_tag)) ||
  62.478           cell_count == bytecode_cell_count(c), "cell counts must agree");
  62.479    if (cell_count >= 0) {
  62.480      assert(tag != DataLayout::no_tag, "bad tag");
  62.481 @@ -631,6 +911,10 @@
  62.482      return new MultiBranchData(this);
  62.483    case DataLayout::arg_info_data_tag:
  62.484      return new ArgInfoData(this);
  62.485 +  case DataLayout::call_type_data_tag:
  62.486 +    return new CallTypeData(this);
  62.487 +  case DataLayout::virtual_call_type_data_tag:
  62.488 +    return new VirtualCallTypeData(this);
  62.489    };
  62.490  }
  62.491  
  62.492 @@ -898,3 +1182,70 @@
  62.493    NEEDS_CLEANUP;
  62.494    // not yet implemented.
  62.495  }
  62.496 +
  62.497 +bool MethodData::profile_jsr292(methodHandle m, int bci) {
  62.498 +  if (m->is_compiled_lambda_form()) {
  62.499 +    return true;
  62.500 +  }
  62.501 +
  62.502 +  Bytecode_invoke inv(m , bci);
  62.503 +  return inv.is_invokedynamic() || inv.is_invokehandle();
  62.504 +}
  62.505 +
  62.506 +int MethodData::profile_arguments_flag() {
  62.507 +  return TypeProfileLevel % 10;
  62.508 +}
  62.509 +
  62.510 +bool MethodData::profile_arguments() {
  62.511 +  return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
  62.512 +}
  62.513 +
  62.514 +bool MethodData::profile_arguments_jsr292_only() {
  62.515 +  return profile_arguments_flag() == type_profile_jsr292;
  62.516 +}
  62.517 +
  62.518 +bool MethodData::profile_all_arguments() {
  62.519 +  return profile_arguments_flag() == type_profile_all;
  62.520 +}
  62.521 +
  62.522 +bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
  62.523 +  if (!profile_arguments()) {
  62.524 +    return false;
  62.525 +  }
  62.526 +
  62.527 +  if (profile_all_arguments()) {
  62.528 +    return true;
  62.529 +  }
  62.530 +
  62.531 +  assert(profile_arguments_jsr292_only(), "inconsistent");
  62.532 +  return profile_jsr292(m, bci);
  62.533 +}
  62.534 +
  62.535 +int MethodData::profile_return_flag() {
  62.536 +  return TypeProfileLevel / 10;
  62.537 +}
  62.538 +
  62.539 +bool MethodData::profile_return() {
  62.540 +  return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
  62.541 +}
  62.542 +
  62.543 +bool MethodData::profile_return_jsr292_only() {
  62.544 +  return profile_return_flag() == type_profile_jsr292;
  62.545 +}
  62.546 +
  62.547 +bool MethodData::profile_all_return() {
  62.548 +  return profile_return_flag() == type_profile_all;
  62.549 +}
  62.550 +
  62.551 +bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
  62.552 +  if (!profile_return()) {
  62.553 +    return false;
  62.554 +  }
  62.555 +
  62.556 +  if (profile_all_return()) {
  62.557 +    return true;
  62.558 +  }
  62.559 +
  62.560 +  assert(profile_return_jsr292_only(), "inconsistent");
  62.561 +  return profile_jsr292(m, bci);
  62.562 +}
    63.1 --- a/src/share/vm/oops/methodData.hpp	Wed Oct 16 11:48:03 2013 -0700
    63.2 +++ b/src/share/vm/oops/methodData.hpp	Thu Oct 17 10:58:45 2013 -0700
    63.3 @@ -117,7 +117,9 @@
    63.4      ret_data_tag,
    63.5      branch_data_tag,
    63.6      multi_branch_data_tag,
    63.7 -    arg_info_data_tag
    63.8 +    arg_info_data_tag,
    63.9 +    call_type_data_tag,
   63.10 +    virtual_call_type_data_tag
   63.11    };
   63.12  
   63.13    enum {
   63.14 @@ -165,7 +167,7 @@
   63.15    // occurred, and the MDO shows N occurrences of X, we make the
   63.16    // simplifying assumption that all N occurrences can be blamed
   63.17    // on that BCI.
   63.18 -  int trap_state() {
   63.19 +  int trap_state() const {
   63.20      return ((_header._struct._flags >> trap_shift) & trap_mask);
   63.21    }
   63.22  
   63.23 @@ -175,11 +177,11 @@
   63.24      _header._struct._flags = (new_state << trap_shift) | old_flags;
   63.25    }
   63.26  
   63.27 -  u1 flags() {
   63.28 +  u1 flags() const {
   63.29      return _header._struct._flags;
   63.30    }
   63.31  
   63.32 -  u2 bci() {
   63.33 +  u2 bci() const {
   63.34      return _header._struct._bci;
   63.35    }
   63.36  
   63.37 @@ -198,7 +200,7 @@
   63.38    void release_set_cell_at(int index, intptr_t value) {
   63.39      OrderAccess::release_store_ptr(&_cells[index], value);
   63.40    }
   63.41 -  intptr_t cell_at(int index) {
   63.42 +  intptr_t cell_at(int index) const {
   63.43      return _cells[index];
   63.44    }
   63.45  
   63.46 @@ -206,7 +208,7 @@
   63.47      assert(flag_number < flag_limit, "oob");
   63.48      _header._struct._flags |= (0x1 << flag_number);
   63.49    }
   63.50 -  bool flag_at(int flag_number) {
   63.51 +  bool flag_at(int flag_number) const {
   63.52      assert(flag_number < flag_limit, "oob");
   63.53      return (_header._struct._flags & (0x1 << flag_number)) != 0;
   63.54    }
   63.55 @@ -254,19 +256,23 @@
   63.56  class     CounterData;
   63.57  class       ReceiverTypeData;
   63.58  class         VirtualCallData;
   63.59 +class           VirtualCallTypeData;
   63.60  class       RetData;
   63.61 +class       CallTypeData;
   63.62  class   JumpData;
   63.63  class     BranchData;
   63.64  class   ArrayData;
   63.65  class     MultiBranchData;
   63.66  class     ArgInfoData;
   63.67  
   63.68 -
   63.69  // ProfileData
   63.70  //
   63.71  // A ProfileData object is created to refer to a section of profiling
   63.72  // data in a structured way.
   63.73  class ProfileData : public ResourceObj {
   63.74 +  friend class TypeEntries;
   63.75 +  friend class ReturnTypeEntry;
   63.76 +  friend class TypeStackSlotEntries;
   63.77  private:
   63.78  #ifndef PRODUCT
   63.79    enum {
   63.80 @@ -280,6 +286,7 @@
   63.81  
   63.82  protected:
   63.83    DataLayout* data() { return _data; }
   63.84 +  const DataLayout* data() const { return _data; }
   63.85  
   63.86    enum {
   63.87      cell_size = DataLayout::cell_size
   63.88 @@ -287,7 +294,7 @@
   63.89  
   63.90  public:
   63.91    // How many cells are in this?
   63.92 -  virtual int cell_count() {
   63.93 +  virtual int cell_count() const {
   63.94      ShouldNotReachHere();
   63.95      return -1;
   63.96    }
   63.97 @@ -307,7 +314,7 @@
   63.98      assert(0 <= index && index < cell_count(), "oob");
   63.99      data()->release_set_cell_at(index, value);
  63.100    }
  63.101 -  intptr_t intptr_at(int index) {
  63.102 +  intptr_t intptr_at(int index) const {
  63.103      assert(0 <= index && index < cell_count(), "oob");
  63.104      return data()->cell_at(index);
  63.105    }
  63.106 @@ -317,7 +324,7 @@
  63.107    void release_set_uint_at(int index, uint value) {
  63.108      release_set_intptr_at(index, (intptr_t) value);
  63.109    }
  63.110 -  uint uint_at(int index) {
  63.111 +  uint uint_at(int index) const {
  63.112      return (uint)intptr_at(index);
  63.113    }
  63.114    void set_int_at(int index, int value) {
  63.115 @@ -326,23 +333,23 @@
  63.116    void release_set_int_at(int index, int value) {
  63.117      release_set_intptr_at(index, (intptr_t) value);
  63.118    }
  63.119 -  int int_at(int index) {
  63.120 +  int int_at(int index) const {
  63.121      return (int)intptr_at(index);
  63.122    }
  63.123 -  int int_at_unchecked(int index) {
  63.124 +  int int_at_unchecked(int index) const {
  63.125      return (int)data()->cell_at(index);
  63.126    }
  63.127    void set_oop_at(int index, oop value) {
  63.128      set_intptr_at(index, cast_from_oop<intptr_t>(value));
  63.129    }
  63.130 -  oop oop_at(int index) {
  63.131 +  oop oop_at(int index) const {
  63.132      return cast_to_oop(intptr_at(index));
  63.133    }
  63.134  
  63.135    void set_flag_at(int flag_number) {
  63.136      data()->set_flag_at(flag_number);
  63.137    }
  63.138 -  bool flag_at(int flag_number) {
  63.139 +  bool flag_at(int flag_number) const {
  63.140      return data()->flag_at(flag_number);
  63.141    }
  63.142  
  63.143 @@ -362,7 +369,7 @@
  63.144    // Constructor for invalid ProfileData.
  63.145    ProfileData();
  63.146  
  63.147 -  u2 bci() {
  63.148 +  u2 bci() const {
  63.149      return data()->bci();
  63.150    }
  63.151  
  63.152 @@ -370,7 +377,7 @@
  63.153      return (address)_data;
  63.154    }
  63.155  
  63.156 -  int trap_state() {
  63.157 +  int trap_state() const {
  63.158      return data()->trap_state();
  63.159    }
  63.160    void set_trap_state(int new_state) {
  63.161 @@ -378,58 +385,68 @@
  63.162    }
  63.163  
  63.164    // Type checking
  63.165 -  virtual bool is_BitData()         { return false; }
  63.166 -  virtual bool is_CounterData()     { return false; }
  63.167 -  virtual bool is_JumpData()        { return false; }
  63.168 -  virtual bool is_ReceiverTypeData(){ return false; }
  63.169 -  virtual bool is_VirtualCallData() { return false; }
  63.170 -  virtual bool is_RetData()         { return false; }
  63.171 -  virtual bool is_BranchData()      { return false; }
  63.172 -  virtual bool is_ArrayData()       { return false; }
  63.173 -  virtual bool is_MultiBranchData() { return false; }
  63.174 -  virtual bool is_ArgInfoData()     { return false; }
  63.175 +  virtual bool is_BitData()         const { return false; }
  63.176 +  virtual bool is_CounterData()     const { return false; }
  63.177 +  virtual bool is_JumpData()        const { return false; }
  63.178 +  virtual bool is_ReceiverTypeData()const { return false; }
  63.179 +  virtual bool is_VirtualCallData() const { return false; }
  63.180 +  virtual bool is_RetData()         const { return false; }
  63.181 +  virtual bool is_BranchData()      const { return false; }
  63.182 +  virtual bool is_ArrayData()       const { return false; }
  63.183 +  virtual bool is_MultiBranchData() const { return false; }
  63.184 +  virtual bool is_ArgInfoData()     const { return false; }
  63.185 +  virtual bool is_CallTypeData()    const { return false; }
  63.186 +  virtual bool is_VirtualCallTypeData()const { return false; }
  63.187  
  63.188  
  63.189 -  BitData* as_BitData() {
  63.190 +  BitData* as_BitData() const {
  63.191      assert(is_BitData(), "wrong type");
  63.192      return is_BitData()         ? (BitData*)        this : NULL;
  63.193    }
  63.194 -  CounterData* as_CounterData() {
  63.195 +  CounterData* as_CounterData() const {
  63.196      assert(is_CounterData(), "wrong type");
  63.197      return is_CounterData()     ? (CounterData*)    this : NULL;
  63.198    }
  63.199 -  JumpData* as_JumpData() {
  63.200 +  JumpData* as_JumpData() const {
  63.201      assert(is_JumpData(), "wrong type");
  63.202      return is_JumpData()        ? (JumpData*)       this : NULL;
  63.203    }
  63.204 -  ReceiverTypeData* as_ReceiverTypeData() {
  63.205 +  ReceiverTypeData* as_ReceiverTypeData() const {
  63.206      assert(is_ReceiverTypeData(), "wrong type");
  63.207      return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
  63.208    }
  63.209 -  VirtualCallData* as_VirtualCallData() {
  63.210 +  VirtualCallData* as_VirtualCallData() const {
  63.211      assert(is_VirtualCallData(), "wrong type");
  63.212      return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
  63.213    }
  63.214 -  RetData* as_RetData() {
  63.215 +  RetData* as_RetData() const {
  63.216      assert(is_RetData(), "wrong type");
  63.217      return is_RetData()         ? (RetData*)        this : NULL;
  63.218    }
  63.219 -  BranchData* as_BranchData() {
  63.220 +  BranchData* as_BranchData() const {
  63.221      assert(is_BranchData(), "wrong type");
  63.222      return is_BranchData()      ? (BranchData*)     this : NULL;
  63.223    }
  63.224 -  ArrayData* as_ArrayData() {
  63.225 +  ArrayData* as_ArrayData() const {
  63.226      assert(is_ArrayData(), "wrong type");
  63.227      return is_ArrayData()       ? (ArrayData*)      this : NULL;
  63.228    }
  63.229 -  MultiBranchData* as_MultiBranchData() {
  63.230 +  MultiBranchData* as_MultiBranchData() const {
  63.231      assert(is_MultiBranchData(), "wrong type");
  63.232      return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
  63.233    }
  63.234 -  ArgInfoData* as_ArgInfoData() {
  63.235 +  ArgInfoData* as_ArgInfoData() const {
  63.236      assert(is_ArgInfoData(), "wrong type");
  63.237      return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
  63.238    }
  63.239 +  CallTypeData* as_CallTypeData() const {
  63.240 +    assert(is_CallTypeData(), "wrong type");
  63.241 +    return is_CallTypeData() ? (CallTypeData*)this : NULL;
  63.242 +  }
  63.243 +  VirtualCallTypeData* as_VirtualCallTypeData() const {
  63.244 +    assert(is_VirtualCallTypeData(), "wrong type");
  63.245 +    return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
  63.246 +  }
  63.247  
  63.248  
  63.249    // Subclass specific initialization
  63.250 @@ -443,15 +460,15 @@
  63.251    // an oop in a ProfileData to the ci equivalent. Generally speaking,
  63.252    // most ProfileData don't require any translation, so we provide the null
  63.253    // translation here, and the required translators are in the ci subclasses.
  63.254 -  virtual void translate_from(ProfileData* data) {}
  63.255 +  virtual void translate_from(const ProfileData* data) {}
  63.256  
  63.257 -  virtual void print_data_on(outputStream* st) {
  63.258 +  virtual void print_data_on(outputStream* st) const {
  63.259      ShouldNotReachHere();
  63.260    }
  63.261  
  63.262  #ifndef PRODUCT
  63.263 -  void print_shared(outputStream* st, const char* name);
  63.264 -  void tab(outputStream* st);
  63.265 +  void print_shared(outputStream* st, const char* name) const;
  63.266 +  void tab(outputStream* st, bool first = false) const;
  63.267  #endif
  63.268  };
  63.269  
  63.270 @@ -470,13 +487,13 @@
  63.271    BitData(DataLayout* layout) : ProfileData(layout) {
  63.272    }
  63.273  
  63.274 -  virtual bool is_BitData() { return true; }
  63.275 +  virtual bool is_BitData() const { return true; }
  63.276  
  63.277    static int static_cell_count() {
  63.278      return bit_cell_count;
  63.279    }
  63.280  
  63.281 -  virtual int cell_count() {
  63.282 +  virtual int cell_count() const {
  63.283      return static_cell_count();
  63.284    }
  63.285  
  63.286 @@ -498,7 +515,7 @@
  63.287    }
  63.288  
  63.289  #ifndef PRODUCT
  63.290 -  void print_data_on(outputStream* st);
  63.291 +  void print_data_on(outputStream* st) const;
  63.292  #endif
  63.293  };
  63.294  
  63.295 @@ -514,18 +531,18 @@
  63.296  public:
  63.297    CounterData(DataLayout* layout) : BitData(layout) {}
  63.298  
  63.299 -  virtual bool is_CounterData() { return true; }
  63.300 +  virtual bool is_CounterData() const { return true; }
  63.301  
  63.302    static int static_cell_count() {
  63.303      return counter_cell_count;
  63.304    }
  63.305  
  63.306 -  virtual int cell_count() {
  63.307 +  virtual int cell_count() const {
  63.308      return static_cell_count();
  63.309    }
  63.310  
  63.311    // Direct accessor
  63.312 -  uint count() {
  63.313 +  uint count() const {
  63.314      return uint_at(count_off);
  63.315    }
  63.316  
  63.317 @@ -542,7 +559,7 @@
  63.318    }
  63.319  
  63.320  #ifndef PRODUCT
  63.321 -  void print_data_on(outputStream* st);
  63.322 +  void print_data_on(outputStream* st) const;
  63.323  #endif
  63.324  };
  63.325  
  63.326 @@ -570,18 +587,18 @@
  63.327        layout->tag() == DataLayout::branch_data_tag, "wrong type");
  63.328    }
  63.329  
  63.330 -  virtual bool is_JumpData() { return true; }
  63.331 +  virtual bool is_JumpData() const { return true; }
  63.332  
  63.333    static int static_cell_count() {
  63.334      return jump_cell_count;
  63.335    }
  63.336  
  63.337 -  virtual int cell_count() {
  63.338 +  virtual int cell_count() const {
  63.339      return static_cell_count();
  63.340    }
  63.341  
  63.342    // Direct accessor
  63.343 -  uint taken() {
  63.344 +  uint taken() const {
  63.345      return uint_at(taken_off_set);
  63.346    }
  63.347  
  63.348 @@ -598,7 +615,7 @@
  63.349      return cnt;
  63.350    }
  63.351  
  63.352 -  int displacement() {
  63.353 +  int displacement() const {
  63.354      return int_at(displacement_off_set);
  63.355    }
  63.356  
  63.357 @@ -615,7 +632,418 @@
  63.358    void post_initialize(BytecodeStream* stream, MethodData* mdo);
  63.359  
  63.360  #ifndef PRODUCT
  63.361 -  void print_data_on(outputStream* st);
  63.362 +  void print_data_on(outputStream* st) const;
  63.363 +#endif
  63.364 +};
  63.365 +
  63.366 +// Entries in a ProfileData object to record types: it can either be
  63.367 +// none (no profile), unknown (conflicting profile data) or a klass if
  63.368 +// a single one is seen. Whether a null reference was seen is also
  63.369 +// recorded. No counter is associated with the type and a single type
  63.370 +// is tracked (unlike VirtualCallData).
  63.371 +class TypeEntries {
  63.372 +
  63.373 +public:
  63.374 +
  63.375 +  // A single cell is used to record information for a type:
  63.376 +  // - the cell is initialized to 0
  63.377 +  // - when a type is discovered it is stored in the cell
  63.378 +  // - bit zero of the cell is used to record whether a null reference
  63.379 +  // was encountered or not
  63.380 +  // - bit 1 is set to record a conflict in the type information
  63.381 +
  63.382 +  enum {
  63.383 +    null_seen = 1,
  63.384 +    type_mask = ~null_seen,
  63.385 +    type_unknown = 2,
  63.386 +    status_bits = null_seen | type_unknown,
  63.387 +    type_klass_mask = ~status_bits
  63.388 +  };
  63.389 +
  63.390 +  // what to initialize a cell to
  63.391 +  static intptr_t type_none() {
  63.392 +    return 0;
  63.393 +  }
  63.394 +
  63.395 +  // null seen = bit 0 set?
  63.396 +  static bool was_null_seen(intptr_t v) {
  63.397 +    return (v & null_seen) != 0;
  63.398 +  }
  63.399 +
  63.400 +  // conflicting type information = bit 1 set?
  63.401 +  static bool is_type_unknown(intptr_t v) {
  63.402 +    return (v & type_unknown) != 0;
  63.403 +  }
  63.404 +
  63.405 +  // not type information yet = all bits cleared, ignoring bit 0?
  63.406 +  static bool is_type_none(intptr_t v) {
  63.407 +    return (v & type_mask) == 0;
  63.408 +  }
  63.409 +
  63.410 +  // recorded type: cell without bit 0 and 1
  63.411 +  static intptr_t klass_part(intptr_t v) {
  63.412 +    intptr_t r = v & type_klass_mask;
  63.413 +    assert (r != 0, "invalid");
  63.414 +    return r;
  63.415 +  }
  63.416 +
  63.417 +  // type recorded
  63.418 +  static Klass* valid_klass(intptr_t k) {
  63.419 +    if (!is_type_none(k) &&
  63.420 +        !is_type_unknown(k)) {
  63.421 +      return (Klass*)klass_part(k);
  63.422 +    } else {
  63.423 +      return NULL;
  63.424 +    }
  63.425 +  }
  63.426 +
  63.427 +  static intptr_t with_status(intptr_t k, intptr_t in) {
  63.428 +    return k | (in & status_bits);
  63.429 +  }
  63.430 +
  63.431 +  static intptr_t with_status(Klass* k, intptr_t in) {
  63.432 +    return with_status((intptr_t)k, in);
  63.433 +  }
  63.434 +
  63.435 +#ifndef PRODUCT
  63.436 +  static void print_klass(outputStream* st, intptr_t k);
  63.437 +#endif
  63.438 +
  63.439 +  // GC support
  63.440 +  static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
  63.441 +
  63.442 +protected:
  63.443 +  // ProfileData object these entries are part of
  63.444 +  ProfileData* _pd;
  63.445 +  // offset within the ProfileData object where the entries start
  63.446 +  const int _base_off;
  63.447 +
  63.448 +  TypeEntries(int base_off)
  63.449 +    : _base_off(base_off), _pd(NULL) {}
  63.450 +
  63.451 +  void set_intptr_at(int index, intptr_t value) {
  63.452 +    _pd->set_intptr_at(index, value);
  63.453 +  }
  63.454 +
  63.455 +  intptr_t intptr_at(int index) const {
  63.456 +    return _pd->intptr_at(index);
  63.457 +  }
  63.458 +
  63.459 +public:
  63.460 +  void set_profile_data(ProfileData* pd) {
  63.461 +    _pd = pd;
  63.462 +  }
  63.463 +};
  63.464 +
  63.465 +// Type entries used for arguments passed at a call and parameters on
  63.466 +// method entry. 2 cells per entry: one for the type encoded as in
  63.467 +// TypeEntries and one initialized with the stack slot where the
  63.468 +// profiled object is to be found so that the interpreter can locate
  63.469 +// it quickly.
  63.470 +class TypeStackSlotEntries : public TypeEntries {
  63.471 +
  63.472 +private:
  63.473 +  enum {
  63.474 +    stack_slot_entry,
  63.475 +    type_entry,
  63.476 +    per_arg_cell_count
  63.477 +  };
  63.478 +
  63.479 +  // offset of cell for stack slot for entry i within ProfileData object
  63.480 +  int stack_slot_offset(int i) const {
  63.481 +    return _base_off + stack_slot_local_offset(i);
  63.482 +  }
  63.483 +
  63.484 +protected:
  63.485 +  const int _number_of_entries;
  63.486 +
  63.487 +  // offset of cell for type for entry i within ProfileData object
  63.488 +  int type_offset(int i) const {
  63.489 +    return _base_off + type_local_offset(i);
  63.490 +  }
  63.491 +
  63.492 +public:
  63.493 +
  63.494 +  TypeStackSlotEntries(int base_off, int nb_entries)
  63.495 +    : TypeEntries(base_off), _number_of_entries(nb_entries) {}
  63.496 +
  63.497 +  static int compute_cell_count(Symbol* signature, int max);
  63.498 +
  63.499 +  void post_initialize(Symbol* signature, bool has_receiver);
  63.500 +
  63.501 +  // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
  63.502 +  static int stack_slot_local_offset(int i) {
  63.503 +    return i * per_arg_cell_count + stack_slot_entry;
  63.504 +  }
  63.505 +
  63.506 +  // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
  63.507 +  static int type_local_offset(int i) {
  63.508 +    return i * per_arg_cell_count + type_entry;
  63.509 +  }
  63.510 +
  63.511 +  // stack slot for entry i
  63.512 +  uint stack_slot(int i) const {
  63.513 +    assert(i >= 0 && i < _number_of_entries, "oob");
  63.514 +    return _pd->uint_at(stack_slot_offset(i));
  63.515 +  }
  63.516 +
  63.517 +  // set stack slot for entry i
  63.518 +  void set_stack_slot(int i, uint num) {
  63.519 +    assert(i >= 0 && i < _number_of_entries, "oob");
  63.520 +    _pd->set_uint_at(stack_slot_offset(i), num);
  63.521 +  }
  63.522 +
  63.523 +  // type for entry i
  63.524 +  intptr_t type(int i) const {
  63.525 +    assert(i >= 0 && i < _number_of_entries, "oob");
  63.526 +    return _pd->intptr_at(type_offset(i));
  63.527 +  }
  63.528 +
  63.529 +  // set type for entry i
  63.530 +  void set_type(int i, intptr_t k) {
  63.531 +    assert(i >= 0 && i < _number_of_entries, "oob");
  63.532 +    _pd->set_intptr_at(type_offset(i), k);
  63.533 +  }
  63.534 +
  63.535 +  static ByteSize per_arg_size() {
  63.536 +    return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
  63.537 +  }
  63.538 +
  63.539 +  static int per_arg_count() {
  63.540 +    return per_arg_cell_count ;
  63.541 +  }
  63.542 +
  63.543 +  // GC support
  63.544 +  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  63.545 +
  63.546 +#ifndef PRODUCT
  63.547 +  void print_data_on(outputStream* st) const;
  63.548 +#endif
  63.549 +};
  63.550 +
  63.551 +// Type entry used for return from a call. A single cell to record the
  63.552 +// type.
  63.553 +class ReturnTypeEntry : public TypeEntries {
  63.554 +
  63.555 +private:
  63.556 +  enum {
  63.557 +    cell_count = 1
  63.558 +  };
  63.559 +
  63.560 +public:
  63.561 +  ReturnTypeEntry(int base_off)
  63.562 +    : TypeEntries(base_off) {}
  63.563 +
  63.564 +  void post_initialize() {
  63.565 +    set_type(type_none());
  63.566 +  }
  63.567 +
  63.568 +  intptr_t type() const {
  63.569 +    return _pd->intptr_at(_base_off);
  63.570 +  }
  63.571 +
  63.572 +  void set_type(intptr_t k) {
  63.573 +    _pd->set_intptr_at(_base_off, k);
  63.574 +  }
  63.575 +
  63.576 +  static int static_cell_count() {
  63.577 +    return cell_count;
  63.578 +  }
  63.579 +
  63.580 +  static ByteSize size() {
  63.581 +    return in_ByteSize(cell_count * DataLayout::cell_size);
  63.582 +  }
  63.583 +
  63.584 +  ByteSize type_offset() {
  63.585 +    return DataLayout::cell_offset(_base_off);
  63.586 +  }
  63.587 +
  63.588 +  // GC support
  63.589 +  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  63.590 +
  63.591 +#ifndef PRODUCT
  63.592 +  void print_data_on(outputStream* st) const;
  63.593 +#endif
  63.594 +};
  63.595 +
  63.596 +// Entries to collect type information at a call: contains arguments
  63.597 +// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
  63.598 +// number of cells. Because the number of cells for the return type is
  63.599 +// smaller than the number of cells for the type of an arguments, the
  63.600 +// number of cells is used to tell how many arguments are profiled and
  63.601 +// whether a return value is profiled. See has_arguments() and
  63.602 +// has_return().
  63.603 +class TypeEntriesAtCall {
  63.604 +private:
  63.605 +  static int stack_slot_local_offset(int i) {
  63.606 +    return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
  63.607 +  }
  63.608 +
  63.609 +  static int argument_type_local_offset(int i) {
  63.610 +    return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);;
  63.611 +  }
  63.612 +
  63.613 +public:
  63.614 +
  63.615 +  static int header_cell_count() {
  63.616 +    return 1;
  63.617 +  }
  63.618 +
  63.619 +  static int cell_count_local_offset() {
  63.620 +    return 0;
  63.621 +  }
  63.622 +
  63.623 +  static int compute_cell_count(BytecodeStream* stream);
  63.624 +
  63.625 +  static void initialize(DataLayout* dl, int base, int cell_count) {
  63.626 +    int off = base + cell_count_local_offset();
  63.627 +    dl->set_cell_at(off, cell_count - base - header_cell_count());
  63.628 +  }
  63.629 +
  63.630 +  static bool arguments_profiling_enabled();
  63.631 +  static bool return_profiling_enabled();
  63.632 +
  63.633 +  // Code generation support
  63.634 +  static ByteSize cell_count_offset() {
  63.635 +    return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
  63.636 +  }
  63.637 +
  63.638 +  static ByteSize args_data_offset() {
  63.639 +    return in_ByteSize(header_cell_count() * DataLayout::cell_size);
  63.640 +  }
  63.641 +
  63.642 +  static ByteSize stack_slot_offset(int i) {
  63.643 +    return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
  63.644 +  }
  63.645 +
  63.646 +  static ByteSize argument_type_offset(int i) {
  63.647 +    return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
  63.648 +  }
  63.649 +};
  63.650 +
  63.651 +// CallTypeData
  63.652 +//
  63.653 +// A CallTypeData is used to access profiling information about a non
  63.654 +// virtual call for which we collect type information about arguments
  63.655 +// and return value.
  63.656 +class CallTypeData : public CounterData {
  63.657 +private:
  63.658 +  // entries for arguments if any
  63.659 +  TypeStackSlotEntries _args;
  63.660 +  // entry for return type if any
  63.661 +  ReturnTypeEntry _ret;
  63.662 +
  63.663 +  int cell_count_global_offset() const {
  63.664 +    return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
  63.665 +  }
  63.666 +
  63.667 +  // number of cells not counting the header
  63.668 +  int cell_count_no_header() const {
  63.669 +    return uint_at(cell_count_global_offset());
  63.670 +  }
  63.671 +
  63.672 +  void check_number_of_arguments(int total) {
  63.673 +    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
  63.674 +  }
  63.675 +
  63.676 +protected:
  63.677 +  // An entry for a return value takes less space than an entry for an
  63.678 +  // argument so if the number of cells exceeds the number of cells
  63.679 +  // needed for an argument, this object contains type information for
  63.680 +  // at least one argument.
  63.681 +  bool has_arguments() const {
  63.682 +    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
  63.683 +    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
  63.684 +    return res;
  63.685 +  }
  63.686 +
  63.687 +public:
  63.688 +  CallTypeData(DataLayout* layout) :
  63.689 +    CounterData(layout),
  63.690 +    _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
  63.691 +    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
  63.692 +  {
  63.693 +    assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
  63.694 +    // Some compilers (VC++) don't want this passed in member initialization list
  63.695 +    _args.set_profile_data(this);
  63.696 +    _ret.set_profile_data(this);
  63.697 +  }
  63.698 +
  63.699 +  const TypeStackSlotEntries* args() const {
  63.700 +    assert(has_arguments(), "no profiling of arguments");
  63.701 +    return &_args;
  63.702 +  }
  63.703 +
  63.704 +  const ReturnTypeEntry* ret() const {
  63.705 +    assert(has_return(), "no profiling of return value");
  63.706 +    return &_ret;
  63.707 +  }
  63.708 +
  63.709 +  virtual bool is_CallTypeData() const { return true; }
  63.710 +
  63.711 +  static int static_cell_count() {
  63.712 +    return -1;
  63.713 +  }
  63.714 +
  63.715 +  static int compute_cell_count(BytecodeStream* stream) {
  63.716 +    return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
  63.717 +  }
  63.718 +
  63.719 +  static void initialize(DataLayout* dl, int cell_count) {
  63.720 +    TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
  63.721 +  }
  63.722 +
  63.723 +  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
  63.724 +
  63.725 +  virtual int cell_count() const {
  63.726 +    return CounterData::static_cell_count() +
  63.727 +      TypeEntriesAtCall::header_cell_count() +
  63.728 +      int_at_unchecked(cell_count_global_offset());
  63.729 +  }
  63.730 +
  63.731 +  int number_of_arguments() const {
  63.732 +    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
  63.733 +  }
  63.734 +
  63.735 +  void set_argument_type(int i, Klass* k) {
  63.736 +    assert(has_arguments(), "no arguments!");
  63.737 +    intptr_t current = _args.type(i);
  63.738 +    _args.set_type(i, TypeEntries::with_status(k, current));
  63.739 +  }
  63.740 +
  63.741 +  void set_return_type(Klass* k) {
  63.742 +    assert(has_return(), "no return!");
  63.743 +    intptr_t current = _ret.type();
  63.744 +    _ret.set_type(TypeEntries::with_status(k, current));
  63.745 +  }
  63.746 +
  63.747 +  // An entry for a return value takes less space than an entry for an
  63.748 +  // argument, so if the remainder of the number of cells divided by
  63.749 +  // the number of cells for an argument is not null, a return value
  63.750 +  // is profiled in this object.
  63.751 +  bool has_return() const {
  63.752 +    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
  63.753 +    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
  63.754 +    return res;
  63.755 +  }
  63.756 +
  63.757 +  // Code generation support
  63.758 +  static ByteSize args_data_offset() {
  63.759 +    return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
  63.760 +  }
  63.761 +
  63.762 +  // GC support
  63.763 +  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
  63.764 +    if (has_arguments()) {
  63.765 +      _args.clean_weak_klass_links(is_alive_closure);
  63.766 +    }
  63.767 +    if (has_return()) {
  63.768 +      _ret.clean_weak_klass_links(is_alive_closure);
  63.769 +    }
  63.770 +  }
  63.771 +
  63.772 +#ifndef PRODUCT
  63.773 +  virtual void print_data_on(outputStream* st) const;
  63.774  #endif
  63.775  };
  63.776  
  63.777 @@ -636,16 +1064,17 @@
  63.778  public:
  63.779    ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
  63.780      assert(layout->tag() == DataLayout::receiver_type_data_tag ||
  63.781 -           layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
  63.782 +           layout->tag() == DataLayout::virtual_call_data_tag ||
  63.783 +           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  63.784    }
  63.785  
  63.786 -  virtual bool is_ReceiverTypeData() { return true; }
  63.787 +  virtual bool is_ReceiverTypeData() const { return true; }
  63.788  
  63.789    static int static_cell_count() {
  63.790      return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
  63.791    }
  63.792  
  63.793 -  virtual int cell_count() {
  63.794 +  virtual int cell_count() const {
  63.795      return static_cell_count();
  63.796    }
  63.797  
  63.798 @@ -660,7 +1089,7 @@
  63.799      return count0_offset + row * receiver_type_row_cell_count;
  63.800    }
  63.801  
  63.802 -  Klass* receiver(uint row) {
  63.803 +  Klass* receiver(uint row) const {
  63.804      assert(row < row_limit(), "oob");
  63.805  
  63.806      Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
  63.807 @@ -673,7 +1102,7 @@
  63.808      set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
  63.809    }
  63.810  
  63.811 -  uint receiver_count(uint row) {
  63.812 +  uint receiver_count(uint row) const {
  63.813      assert(row < row_limit(), "oob");
  63.814      return uint_at(receiver_count_cell_index(row));
  63.815    }
  63.816 @@ -721,8 +1150,8 @@
  63.817    virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  63.818  
  63.819  #ifndef PRODUCT
  63.820 -  void print_receiver_data_on(outputStream* st);
  63.821 -  void print_data_on(outputStream* st);
  63.822 +  void print_receiver_data_on(outputStream* st) const;
  63.823 +  void print_data_on(outputStream* st) const;
  63.824  #endif
  63.825  };
  63.826  
  63.827 @@ -733,10 +1162,11 @@
  63.828  class VirtualCallData : public ReceiverTypeData {
  63.829  public:
  63.830    VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
  63.831 -    assert(layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
  63.832 +    assert(layout->tag() == DataLayout::virtual_call_data_tag ||
  63.833 +           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  63.834    }
  63.835  
  63.836 -  virtual bool is_VirtualCallData() { return true; }
  63.837 +  virtual bool is_VirtualCallData() const { return true; }
  63.838  
  63.839    static int static_cell_count() {
  63.840      // At this point we could add more profile state, e.g., for arguments.
  63.841 @@ -744,7 +1174,7 @@
  63.842      return ReceiverTypeData::static_cell_count();
  63.843    }
  63.844  
  63.845 -  virtual int cell_count() {
  63.846 +  virtual int cell_count() const {
  63.847      return static_cell_count();
  63.848    }
  63.849  
  63.850 @@ -754,7 +1184,134 @@
  63.851    }
  63.852  
  63.853  #ifndef PRODUCT
  63.854 -  void print_data_on(outputStream* st);
  63.855 +  void print_data_on(outputStream* st) const;
  63.856 +#endif
  63.857 +};
  63.858 +
  63.859 +// VirtualCallTypeData
  63.860 +//
  63.861 +// A VirtualCallTypeData is used to access profiling information about
  63.862 +// a virtual call for which we collect type information about
  63.863 +// arguments and return value.
  63.864 +class VirtualCallTypeData : public VirtualCallData {
  63.865 +private:
  63.866 +  // entries for arguments if any
  63.867 +  TypeStackSlotEntries _args;
  63.868 +  // entry for return type if any
  63.869 +  ReturnTypeEntry _ret;
  63.870 +
  63.871 +  int cell_count_global_offset() const {
  63.872 +    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
  63.873 +  }
  63.874 +
  63.875 +  // number of cells not counting the header
  63.876 +  int cell_count_no_header() const {
  63.877 +    return uint_at(cell_count_global_offset());
  63.878 +  }
  63.879 +
  63.880 +  void check_number_of_arguments(int total) {
  63.881 +    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
  63.882 +  }
  63.883 +
  63.884 +protected:
  63.885 +  // An entry for a return value takes less space than an entry for an
  63.886 +  // argument so if the number of cells exceeds the number of cells
  63.887 +  // needed for an argument, this object contains type information for
  63.888 +  // at least one argument.
  63.889 +  bool has_arguments() const {
  63.890 +    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
  63.891 +    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
  63.892 +    return res;
  63.893 +  }
  63.894 +
  63.895 +public:
  63.896 +  VirtualCallTypeData(DataLayout* layout) :
  63.897 +    VirtualCallData(layout),
  63.898 +    _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
  63.899 +    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
  63.900 +  {
  63.901 +    assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  63.902 +    // Some compilers (VC++) don't want this passed in member initialization list
  63.903 +    _args.set_profile_data(this);
  63.904 +    _ret.set_profile_data(this);
  63.905 +  }
  63.906 +
  63.907 +  const TypeStackSlotEntries* args() const {
  63.908 +    assert(has_arguments(), "no profiling of arguments");
  63.909 +    return &_args;
  63.910 +  }
  63.911 +
  63.912 +  const ReturnTypeEntry* ret() const {
  63.913 +    assert(has_return(), "no profiling of return value");
  63.914 +    return &_ret;
  63.915 +  }
  63.916 +
  63.917 +  virtual bool is_VirtualCallTypeData() const { return true; }
  63.918 +
  63.919 +  static int static_cell_count() {
  63.920 +    return -1;
  63.921 +  }
  63.922 +
  63.923 +  static int compute_cell_count(BytecodeStream* stream) {
  63.924 +    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
  63.925 +  }
  63.926 +
  63.927 +  static void initialize(DataLayout* dl, int cell_count) {
  63.928 +    TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
  63.929 +  }
  63.930 +
  63.931 +  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
  63.932 +
  63.933 +  virtual int cell_count() const {
  63.934 +    return VirtualCallData::static_cell_count() +
  63.935 +      TypeEntriesAtCall::header_cell_count() +
  63.936 +      int_at_unchecked(cell_count_global_offset());
  63.937 +  }
  63.938 +
  63.939 +  int number_of_arguments() const {
  63.940 +    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
  63.941 +  }
  63.942 +
  63.943 +  void set_argument_type(int i, Klass* k) {
  63.944 +    assert(has_arguments(), "no arguments!");
  63.945 +    intptr_t current = _args.type(i);
  63.946 +    _args.set_type(i, TypeEntries::with_status(k, current));
  63.947 +  }
  63.948 +
  63.949 +  void set_return_type(Klass* k) {
  63.950 +    assert(has_return(), "no return!");
  63.951 +    intptr_t current = _ret.type();
  63.952 +    _ret.set_type(TypeEntries::with_status(k, current));
  63.953 +  }
  63.954 +
  63.955 +  // An entry for a return value takes less space than an entry for an
  63.956 +  // argument, so if the remainder of the number of cells divided by
  63.957 +  // the number of cells for an argument is not null, a return value
  63.958 +  // is profiled in this object.
  63.959 +  bool has_return() const {
  63.960 +    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
  63.961 +    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
  63.962 +    return res;
  63.963 +  }
  63.964 +
  63.965 +  // Code generation support
  63.966 +  static ByteSize args_data_offset() {
  63.967 +    return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
  63.968 +  }
  63.969 +
  63.970 +  // GC support
  63.971 +  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
  63.972 +    ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
  63.973 +    if (has_arguments()) {
  63.974 +      _args.clean_weak_klass_links(is_alive_closure);
  63.975 +    }
  63.976 +    if (has_return()) {
  63.977 +      _ret.clean_weak_klass_links(is_alive_closure);
  63.978 +    }
  63.979 +  }
  63.980 +
  63.981 +#ifndef PRODUCT
  63.982 +  virtual void print_data_on(outputStream* st) const;
  63.983  #endif
  63.984  };
  63.985  
  63.986 @@ -797,7 +1354,7 @@
  63.987      assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
  63.988    }
  63.989  
  63.990 -  virtual bool is_RetData() { return true; }
  63.991 +  virtual bool is_RetData() const { return true; }
  63.992  
  63.993    enum {
  63.994      no_bci = -1 // value of bci when bci1/2 are not in use.
  63.995 @@ -807,7 +1364,7 @@
  63.996      return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
  63.997    }
  63.998  
  63.999 -  virtual int cell_count() {
 63.1000 +  virtual int cell_count() const {
 63.1001      return static_cell_count();
 63.1002    }
 63.1003  
 63.1004 @@ -825,13 +1382,13 @@
 63.1005    }
 63.1006  
 63.1007    // Direct accessors
 63.1008 -  int bci(uint row) {
 63.1009 +  int bci(uint row) const {
 63.1010      return int_at(bci_cell_index(row));
 63.1011    }
 63.1012 -  uint bci_count(uint row) {
 63.1013 +  uint bci_count(uint row) const {
 63.1014      return uint_at(bci_count_cell_index(row));
 63.1015    }
 63.1016 -  int bci_displacement(uint row) {
 63.1017 +  int bci_displacement(uint row) const {
 63.1018      return int_at(bci_displacement_cell_index(row));
 63.1019    }
 63.1020  
 63.1021 @@ -853,7 +1410,7 @@
 63.1022    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 63.1023  
 63.1024  #ifndef PRODUCT
 63.1025 -  void print_data_on(outputStream* st);
 63.1026 +  void print_data_on(outputStream* st) const;
 63.1027  #endif
 63.1028  };
 63.1029  
 63.1030 @@ -878,18 +1435,18 @@
 63.1031      assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
 63.1032    }
 63.1033  
 63.1034 -  virtual bool is_BranchData() { return true; }
 63.1035 +  virtual bool is_BranchData() const { return true; }
 63.1036  
 63.1037    static int static_cell_count() {
 63.1038      return branch_cell_count;
 63.1039    }
 63.1040  
 63.1041 -  virtual int cell_count() {
 63.1042 +  virtual int cell_count() const {
 63.1043      return static_cell_count();
 63.1044    }
 63.1045  
 63.1046    // Direct accessor
 63.1047 -  uint not_taken() {
 63.1048 +  uint not_taken() const {
 63.1049      return uint_at(not_taken_off_set);
 63.1050    }
 63.1051  
 63.1052 @@ -917,7 +1474,7 @@
 63.1053    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 63.1054  
 63.1055  #ifndef PRODUCT
 63.1056 -  void print_data_on(outputStream* st);
 63.1057 +  void print_data_on(outputStream* st) const;
 63.1058  #endif
 63.1059  };
 63.1060  
 63.1061 @@ -935,15 +1492,15 @@
 63.1062      array_start_off_set
 63.1063    };
 63.1064  
 63.1065 -  uint array_uint_at(int index) {
 63.1066 +  uint array_uint_at(int index) const {
 63.1067      int aindex = index + array_start_off_set;
 63.1068      return uint_at(aindex);
 63.1069    }
 63.1070 -  int array_int_at(int index) {
 63.1071 +  int array_int_at(int index) const {
 63.1072      int aindex = index + array_start_off_set;
 63.1073      return int_at(aindex);
 63.1074    }
 63.1075 -  oop array_oop_at(int index) {
 63.1076 +  oop array_oop_at(int index) const {
 63.1077      int aindex = index + array_start_off_set;
 63.1078      return oop_at(aindex);
 63.1079    }
 63.1080 @@ -960,17 +1517,17 @@
 63.1081  public:
 63.1082    ArrayData(DataLayout* layout) : ProfileData(layout) {}
 63.1083  
 63.1084 -  virtual bool is_ArrayData() { return true; }
 63.1085 +  virtual bool is_ArrayData() const { return true; }
 63.1086  
 63.1087    static int static_cell_count() {
 63.1088      return -1;
 63.1089    }
 63.1090  
 63.1091 -  int array_len() {
 63.1092 +  int array_len() const {
 63.1093      return int_at_unchecked(array_len_off_set);
 63.1094    }
 63.1095  
 63.1096 -  virtual int cell_count() {
 63.1097 +  virtual int cell_count() const {
 63.1098      return array_len() + 1;
 63.1099    }
 63.1100  
 63.1101 @@ -1017,29 +1574,29 @@
 63.1102      assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
 63.1103    }
 63.1104  
 63.1105 -  virtual bool is_MultiBranchData() { return true; }
 63.1106 +  virtual bool is_MultiBranchData() const { return true; }
 63.1107  
 63.1108    static int compute_cell_count(BytecodeStream* stream);
 63.1109  
 63.1110 -  int number_of_cases() {
 63.1111 +  int number_of_cases() const {
 63.1112      int alen = array_len() - 2; // get rid of default case here.
 63.1113      assert(alen % per_case_cell_count == 0, "must be even");
 63.1114      return (alen / per_case_cell_count);
 63.1115    }
 63.1116  
 63.1117 -  uint default_count() {
 63.1118 +  uint default_count() const {
 63.1119      return array_uint_at(default_count_off_set);
 63.1120    }
 63.1121 -  int default_displacement() {
 63.1122 +  int default_displacement() const {
 63.1123      return array_int_at(default_disaplacement_off_set);
 63.1124    }
 63.1125  
 63.1126 -  uint count_at(int index) {
 63.1127 +  uint count_at(int index) const {
 63.1128      return array_uint_at(case_array_start +
 63.1129                           index * per_case_cell_count +
 63.1130                           relative_count_off_set);
 63.1131    }
 63.1132 -  int displacement_at(int index) {
 63.1133 +  int displacement_at(int index) const {
 63.1134      return array_int_at(case_array_start +
 63.1135                          index * per_case_cell_count +
 63.1136                          relative_displacement_off_set);
 63.1137 @@ -1074,7 +1631,7 @@
 63.1138    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 63.1139  
 63.1140  #ifndef PRODUCT
 63.1141 -  void print_data_on(outputStream* st);
 63.1142 +  void print_data_on(outputStream* st) const;
 63.1143  #endif
 63.1144  };
 63.1145  
 63.1146 @@ -1085,14 +1642,14 @@
 63.1147      assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
 63.1148    }
 63.1149  
 63.1150 -  virtual bool is_ArgInfoData() { return true; }
 63.1151 +  virtual bool is_ArgInfoData() const { return true; }
 63.1152  
 63.1153  
 63.1154 -  int number_of_args() {
 63.1155 +  int number_of_args() const {
 63.1156      return array_len();
 63.1157    }
 63.1158  
 63.1159 -  uint arg_modified(int arg) {
 63.1160 +  uint arg_modified(int arg) const {
 63.1161      return array_uint_at(arg);
 63.1162    }
 63.1163  
 63.1164 @@ -1101,7 +1658,7 @@
 63.1165    }
 63.1166  
 63.1167  #ifndef PRODUCT
 63.1168 -  void print_data_on(outputStream* st);
 63.1169 +  void print_data_on(outputStream* st) const;
 63.1170  #endif
 63.1171  };
 63.1172  
 63.1173 @@ -1271,6 +1828,21 @@
 63.1174    // return the argument info cell
 63.1175    ArgInfoData *arg_info();
 63.1176  
 63.1177 +  enum {
 63.1178 +    no_type_profile = 0,
 63.1179 +    type_profile_jsr292 = 1,
 63.1180 +    type_profile_all = 2
 63.1181 +  };
 63.1182 +
 63.1183 +  static bool profile_jsr292(methodHandle m, int bci);
 63.1184 +  static int profile_arguments_flag();
 63.1185 +  static bool profile_arguments_jsr292_only();
 63.1186 +  static bool profile_all_arguments();
 63.1187 +  static bool profile_arguments_for_invoke(methodHandle m, int bci);
 63.1188 +  static int profile_return_flag();
 63.1189 +  static bool profile_all_return();
 63.1190 +  static bool profile_return_for_invoke(methodHandle m, int bci);
 63.1191 +
 63.1192  public:
 63.1193    static int header_size() {
 63.1194      return sizeof(MethodData)/wordSize;
 63.1195 @@ -1510,6 +2082,10 @@
 63.1196    // verification
 63.1197    void verify_on(outputStream* st);
 63.1198    void verify_data_on(outputStream* st);
 63.1199 +
 63.1200 +  static bool profile_arguments();
 63.1201 +  static bool profile_return();
 63.1202 +  static bool profile_return_jsr292_only();
 63.1203  };
 63.1204  
 63.1205  #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
    64.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Oct 16 11:48:03 2013 -0700
    64.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Thu Oct 17 10:58:45 2013 -0700
    64.3 @@ -197,6 +197,7 @@
    64.4  // negative filter: should callee NOT be inlined?
    64.5  bool InlineTree::should_not_inline(ciMethod *callee_method,
    64.6                                     ciMethod* caller_method,
    64.7 +                                   JVMState* jvms,
    64.8                                     WarmCallInfo* wci_result) {
    64.9  
   64.10    const char* fail_msg = NULL;
   64.11 @@ -226,7 +227,7 @@
   64.12      // don't inline exception code unless the top method belongs to an
   64.13      // exception class
   64.14      if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   64.15 -      ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method();
   64.16 +      ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
   64.17        if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   64.18          wci_result->set_profit(wci_result->profit() * 0.1);
   64.19        }
   64.20 @@ -328,7 +329,7 @@
   64.21  // return true if ok
   64.22  // Relocated from "InliningClosure::try_to_inline"
   64.23  bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
   64.24 -                               int caller_bci, ciCallProfile& profile,
   64.25 +                               int caller_bci, JVMState* jvms, ciCallProfile& profile,
   64.26                                 WarmCallInfo* wci_result, bool& should_delay) {
   64.27  
   64.28     // Old algorithm had funny accumulating BC-size counters
   64.29 @@ -346,7 +347,7 @@
   64.30                       wci_result)) {
   64.31      return false;
   64.32    }
   64.33 -  if (should_not_inline(callee_method, caller_method, wci_result)) {
   64.34 +  if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
   64.35      return false;
   64.36    }
   64.37  
   64.38 @@ -397,24 +398,35 @@
   64.39    }
   64.40  
   64.41    // detect direct and indirect recursive inlining
   64.42 -  if (!callee_method->is_compiled_lambda_form()) {
   64.43 +  {
   64.44      // count the current method and the callee
   64.45 -    int inline_level = (method() == callee_method) ? 1 : 0;
   64.46 -    if (inline_level > MaxRecursiveInlineLevel) {
   64.47 -      set_msg("recursively inlining too deep");
   64.48 -      return false;
   64.49 +    const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
   64.50 +    int inline_level = 0;
   64.51 +    if (!is_compiled_lambda_form) {
   64.52 +      if (method() == callee_method) {
   64.53 +        inline_level++;
   64.54 +      }
   64.55      }
   64.56      // count callers of current method and callee
   64.57 -    JVMState* jvms = caller_jvms();
   64.58 -    while (jvms != NULL && jvms->has_method()) {
   64.59 -      if (jvms->method() == callee_method) {
   64.60 -        inline_level++;
   64.61 -        if (inline_level > MaxRecursiveInlineLevel) {
   64.62 -          set_msg("recursively inlining too deep");
   64.63 -          return false;
   64.64 +    Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
   64.65 +    for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
   64.66 +      if (j->method() == callee_method) {
   64.67 +        if (is_compiled_lambda_form) {
   64.68 +          // Since compiled lambda forms are heavily reused we allow recursive inlining.  If it is truly
   64.69 +          // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
   64.70 +          // compiler stack.
   64.71 +          Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
   64.72 +          if (caller_argument0 == callee_argument0) {
   64.73 +            inline_level++;
   64.74 +          }
   64.75 +        } else {
   64.76 +          inline_level++;
   64.77          }
   64.78        }
   64.79 -      jvms = jvms->caller();
   64.80 +    }
   64.81 +    if (inline_level > MaxRecursiveInlineLevel) {
   64.82 +      set_msg("recursive inlining is too deep");
   64.83 +      return false;
   64.84      }
   64.85    }
   64.86  
   64.87 @@ -536,7 +548,7 @@
   64.88    // Check if inlining policy says no.
   64.89    WarmCallInfo wci = *(initial_wci);
   64.90    bool success = try_to_inline(callee_method, caller_method, caller_bci,
   64.91 -                               profile, &wci, should_delay);
   64.92 +                               jvms, profile, &wci, should_delay);
   64.93  
   64.94  #ifndef PRODUCT
   64.95    if (UseOldInlining && InlineWarmCalls
    65.1 --- a/src/share/vm/opto/c2compiler.cpp	Wed Oct 16 11:48:03 2013 -0700
    65.2 +++ b/src/share/vm/opto/c2compiler.cpp	Thu Oct 17 10:58:45 2013 -0700
    65.3 @@ -44,9 +44,6 @@
    65.4  # include "adfiles/ad_ppc.hpp"
    65.5  #endif
    65.6  
    65.7 -
    65.8 -volatile int C2Compiler::_runtimes = uninitialized;
    65.9 -
   65.10  // register information defined by ADLC
   65.11  extern const char register_save_policy[];
   65.12  extern const int  register_save_type[];
   65.13 @@ -57,7 +54,7 @@
   65.14  const char* C2Compiler::retry_no_escape_analysis() {
   65.15    return "retry without escape analysis";
   65.16  }
   65.17 -void C2Compiler::initialize_runtime() {
   65.18 +bool C2Compiler::init_c2_runtime() {
   65.19  
   65.20    // Check assumptions used while running ADLC
   65.21    Compile::adlc_verification();
   65.22 @@ -90,41 +87,31 @@
   65.23  
   65.24    CompilerThread* thread = CompilerThread::current();
   65.25  
   65.26 -  HandleMark  handle_mark(thread);
   65.27 -
   65.28 -  OptoRuntime::generate(thread->env());
   65.29 -
   65.30 +  HandleMark handle_mark(thread);
   65.31 +  return OptoRuntime::generate(thread->env());
   65.32  }
   65.33  
   65.34  
   65.35  void C2Compiler::initialize() {
   65.36 -
   65.37 -  // This method can only be called once per C2Compiler object
   65.38    // The first compiler thread that gets here will initialize the
   65.39 -  // small amount of global state (and runtime stubs) that c2 needs.
   65.40 +  // small amount of global state (and runtime stubs) that C2 needs.
   65.41  
   65.42    // There is a race possible once at startup and then we're fine
   65.43  
   65.44    // Note that this is being called from a compiler thread not the
   65.45    // main startup thread.
   65.46 -
   65.47 -  if (_runtimes != initialized) {
   65.48 -    initialize_runtimes( initialize_runtime, &_runtimes);
   65.49 +  if (should_perform_init()) {
   65.50 +    bool successful = C2Compiler::init_c2_runtime();
   65.51 +    int new_state = (successful) ? initialized : failed;
   65.52 +    set_state(new_state);
   65.53    }
   65.54 -
   65.55 -  // Mark this compiler object as ready to roll
   65.56 -  mark_initialized();
   65.57  }
   65.58  
   65.59 -void C2Compiler::compile_method(ciEnv* env,
   65.60 -                                ciMethod* target,
   65.61 -                                int entry_bci) {
   65.62 -  if (!is_initialized()) {
   65.63 -    initialize();
   65.64 -  }
   65.65 +void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
   65.66 +  assert(is_initialized(), "Compiler thread must be initialized");
   65.67 +
   65.68    bool subsume_loads = SubsumeLoads;
   65.69 -  bool do_escape_analysis = DoEscapeAnalysis &&
   65.70 -    !env->jvmti_can_access_local_variables();
   65.71 +  bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables();
   65.72    bool eliminate_boxing = EliminateAutoBox;
   65.73    while (!env->failing()) {
   65.74      // Attempt to compile while subsuming loads into machine instructions.
    66.1 --- a/src/share/vm/opto/c2compiler.hpp	Wed Oct 16 11:48:03 2013 -0700
    66.2 +++ b/src/share/vm/opto/c2compiler.hpp	Thu Oct 17 10:58:45 2013 -0700
    66.3 @@ -28,24 +28,17 @@
    66.4  #include "compiler/abstractCompiler.hpp"
    66.5  
    66.6  class C2Compiler : public AbstractCompiler {
    66.7 -private:
    66.8 -
    66.9 -  static void initialize_runtime();
   66.10 + private:
   66.11 +  static bool init_c2_runtime();
   66.12  
   66.13  public:
   66.14    // Name
   66.15    const char *name() { return "C2"; }
   66.16  
   66.17 -  static volatile int _runtimes;
   66.18 -
   66.19  #ifdef TIERED
   66.20    virtual bool is_c2() { return true; };
   66.21  #endif // TIERED
   66.22  
   66.23 -  // Customization
   66.24 -  bool needs_adapters         () { return true; }
   66.25 -  bool needs_stubs            () { return true; }
   66.26 -
   66.27    void initialize();
   66.28  
   66.29    // Compilation entry point for methods
    67.1 --- a/src/share/vm/opto/chaitin.hpp	Wed Oct 16 11:48:03 2013 -0700
    67.2 +++ b/src/share/vm/opto/chaitin.hpp	Thu Oct 17 10:58:45 2013 -0700
    67.3 @@ -52,6 +52,7 @@
    67.4  class LRG : public ResourceObj {
    67.5    friend class VMStructs;
    67.6  public:
    67.7 +  static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions
    67.8    enum { SPILL_REG=29999 };     // Register number of a spilled LRG
    67.9  
   67.10    double _cost;                 // 2 for loads/1 for stores times block freq
   67.11 @@ -80,14 +81,21 @@
   67.12  private:
   67.13    uint _eff_degree;             // Effective degree: Sum of neighbors _num_regs
   67.14  public:
   67.15 -  int degree() const { assert( _degree_valid, "" ); return _eff_degree; }
   67.16 +  int degree() const { assert( _degree_valid , "" ); return _eff_degree; }
   67.17    // Degree starts not valid and any change to the IFG neighbor
   67.18    // set makes it not valid.
   67.19 -  void set_degree( uint degree ) { _eff_degree = degree; debug_only(_degree_valid = 1;) }
   67.20 +  void set_degree( uint degree ) {
   67.21 +    _eff_degree = degree;
   67.22 +    debug_only(_degree_valid = 1;)
   67.23 +    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
   67.24 +  }
   67.25    // Made a change that hammered degree
   67.26    void invalid_degree() { debug_only(_degree_valid=0;) }
   67.27    // Incrementally modify degree.  If it was correct, it should remain correct
   67.28 -  void inc_degree( uint mod ) { _eff_degree += mod; }
   67.29 +  void inc_degree( uint mod ) {
   67.30 +    _eff_degree += mod;
   67.31 +    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
   67.32 +  }
   67.33    // Compute the degree between 2 live ranges
   67.34    int compute_degree( LRG &l ) const;
   67.35  
   67.36 @@ -95,9 +103,9 @@
   67.37    RegMask _mask;                // Allowed registers for this LRG
   67.38    uint _mask_size;              // cache of _mask.Size();
   67.39  public:
   67.40 -  int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); }
   67.41 +  int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); }
   67.42    void set_mask_size( int size ) {
   67.43 -    assert((size == 65535) || (size == (int)_mask.Size()), "");
   67.44 +    assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), "");
   67.45      _mask_size = size;
   67.46  #ifdef ASSERT
   67.47      _msize_valid=1;
    68.1 --- a/src/share/vm/opto/compile.cpp	Wed Oct 16 11:48:03 2013 -0700
    68.2 +++ b/src/share/vm/opto/compile.cpp	Thu Oct 17 10:58:45 2013 -0700
    68.3 @@ -47,6 +47,7 @@
    68.4  #include "opto/machnode.hpp"
    68.5  #include "opto/macro.hpp"
    68.6  #include "opto/matcher.hpp"
    68.7 +#include "opto/mathexactnode.hpp"
    68.8  #include "opto/memnode.hpp"
    68.9  #include "opto/mulnode.hpp"
   68.10  #include "opto/node.hpp"
   68.11 @@ -2986,6 +2987,32 @@
   68.12        n->set_req(MemBarNode::Precedent, top());
   68.13      }
   68.14      break;
   68.15 +    // Must set a control edge on all nodes that produce a FlagsProj
   68.16 +    // so they can't escape the block that consumes the flags.
   68.17 +    // Must also set the non throwing branch as the control
   68.18 +    // for all nodes that depends on the result. Unless the node
   68.19 +    // already have a control that isn't the control of the
   68.20 +    // flag producer
   68.21 +  case Op_FlagsProj:
   68.22 +    {
   68.23 +      MathExactNode* math = (MathExactNode*)  n->in(0);
   68.24 +      Node* ctrl = math->control_node();
   68.25 +      Node* non_throwing = math->non_throwing_branch();
   68.26 +      math->set_req(0, ctrl);
   68.27 +
   68.28 +      Node* result = math->result_node();
   68.29 +      if (result != NULL) {
   68.30 +        for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
   68.31 +          Node* out = result->fast_out(j);
   68.32 +          if (out->in(0) == NULL) {
   68.33 +            out->set_req(0, non_throwing);
   68.34 +          } else if (out->in(0) == ctrl) {
   68.35 +            out->set_req(0, non_throwing);
   68.36 +          }
   68.37 +        }
   68.38 +      }
   68.39 +    }
   68.40 +    break;
   68.41    default:
   68.42      assert( !n->is_Call(), "" );
   68.43      assert( !n->is_Mem(), "" );
    69.1 --- a/src/share/vm/opto/escape.cpp	Wed Oct 16 11:48:03 2013 -0700
    69.2 +++ b/src/share/vm/opto/escape.cpp	Thu Oct 17 10:58:45 2013 -0700
    69.3 @@ -780,6 +780,7 @@
    69.4        }
    69.5      } else {  // Allocate instance
    69.6        if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
    69.7 +          cik->is_subclass_of(_compile->env()->Reference_klass()) ||
    69.8           !cik->is_instance_klass() || // StressReflectiveCode
    69.9            cik->as_instance_klass()->has_finalizer()) {
   69.10          es = PointsToNode::GlobalEscape;
    70.1 --- a/src/share/vm/opto/graphKit.cpp	Wed Oct 16 11:48:03 2013 -0700
    70.2 +++ b/src/share/vm/opto/graphKit.cpp	Thu Oct 17 10:58:45 2013 -0700
    70.3 @@ -2122,7 +2122,7 @@
    70.4  // Null check oop.  Set null-path control into Region in slot 3.
    70.5  // Make a cast-not-nullness use the other not-null control.  Return cast.
    70.6  Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
    70.7 -                               bool never_see_null) {
    70.8 +                               bool never_see_null, bool safe_for_replace) {
    70.9    // Initial NULL check taken path
   70.10    (*null_control) = top();
   70.11    Node* cast = null_check_common(value, T_OBJECT, false, null_control);
   70.12 @@ -2140,6 +2140,9 @@
   70.13                    Deoptimization::Action_make_not_entrant);
   70.14      (*null_control) = top();    // NULL path is dead
   70.15    }
   70.16 +  if ((*null_control) == top() && safe_for_replace) {
   70.17 +    replace_in_map(value, cast);
   70.18 +  }
   70.19  
   70.20    // Cast away null-ness on the result
   70.21    return cast;
   70.22 @@ -2634,15 +2637,17 @@
   70.23    C->set_has_split_ifs(true); // Has chance for split-if optimization
   70.24  
   70.25    ciProfileData* data = NULL;
   70.26 +  bool safe_for_replace = false;
   70.27    if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
   70.28      data = method()->method_data()->bci_to_data(bci());
   70.29 +    safe_for_replace = true;
   70.30    }
   70.31    bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
   70.32                           && seems_never_null(obj, data));
   70.33  
   70.34    // Null check; get casted pointer; set region slot 3
   70.35    Node* null_ctl = top();
   70.36 -  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
   70.37 +  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
   70.38  
   70.39    // If not_null_obj is dead, only null-path is taken
   70.40    if (stopped()) {              // Doing instance-of on a NULL?
   70.41 @@ -2723,11 +2728,13 @@
   70.42    }
   70.43  
   70.44    ciProfileData* data = NULL;
   70.45 +  bool safe_for_replace = false;
   70.46    if (failure_control == NULL) {        // use MDO in regular case only
   70.47      assert(java_bc() == Bytecodes::_aastore ||
   70.48             java_bc() == Bytecodes::_checkcast,
   70.49             "interpreter profiles type checks only for these BCs");
   70.50      data = method()->method_data()->bci_to_data(bci());
   70.51 +    safe_for_replace = true;
   70.52    }
   70.53  
   70.54    // Make the merge point
   70.55 @@ -2742,7 +2749,7 @@
   70.56  
   70.57    // Null check; get casted pointer; set region slot 3
   70.58    Node* null_ctl = top();
   70.59 -  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
   70.60 +  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
   70.61  
   70.62    // If not_null_obj is dead, only null-path is taken
   70.63    if (stopped()) {              // Doing instance-of on a NULL?
   70.64 @@ -3608,7 +3615,7 @@
   70.65    Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
   70.66  
   70.67    // if (!marking)
   70.68 -  __ if_then(marking, BoolTest::ne, zero); {
   70.69 +  __ if_then(marking, BoolTest::ne, zero, unlikely); {
   70.70      BasicType index_bt = TypeX_X->basic_type();
   70.71      assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
   70.72      Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
    71.1 --- a/src/share/vm/opto/graphKit.hpp	Wed Oct 16 11:48:03 2013 -0700
    71.2 +++ b/src/share/vm/opto/graphKit.hpp	Thu Oct 17 10:58:45 2013 -0700
    71.3 @@ -378,8 +378,10 @@
    71.4    // Return a cast-not-null node which depends on the not-null control.
    71.5    // If never_see_null, use an uncommon trap (*null_control sees a top).
    71.6    // The cast is not valid along the null path; keep a copy of the original.
    71.7 +  // If safe_for_replace, then we can replace the value with the cast
    71.8 +  // in the parsing map (the cast is guaranteed to dominate the map)
    71.9    Node* null_check_oop(Node* value, Node* *null_control,
   71.10 -                       bool never_see_null = false);
   71.11 +                       bool never_see_null = false, bool safe_for_replace = false);
   71.12  
   71.13    // Check the null_seen bit.
   71.14    bool seems_never_null(Node* obj, ciProfileData* data);
    72.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Wed Oct 16 11:48:03 2013 -0700
    72.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Thu Oct 17 10:58:45 2013 -0700
    72.3 @@ -616,7 +616,11 @@
    72.4        buffer[0] = 0;
    72.5        _chaitin->dump_register(node, buffer);
    72.6        print_prop("reg", buffer);
    72.7 -      print_prop("lrg", _chaitin->_lrg_map.live_range_id(node));
    72.8 +      uint lrg_id = 0;
    72.9 +      if (node->_idx < _chaitin->_lrg_map.size()) {
   72.10 +        lrg_id = _chaitin->_lrg_map.live_range_id(node);
   72.11 +      }
   72.12 +      print_prop("lrg", lrg_id);
   72.13      }
   72.14  
   72.15      node->_in_dump_cnt--;
    73.1 --- a/src/share/vm/opto/ifg.cpp	Wed Oct 16 11:48:03 2013 -0700
    73.2 +++ b/src/share/vm/opto/ifg.cpp	Thu Oct 17 10:58:45 2013 -0700
    73.3 @@ -677,7 +677,7 @@
    73.4              } else {            // Common case: size 1 bound removal
    73.5                if( lrg.mask().Member(r_reg) ) {
    73.6                  lrg.Remove(r_reg);
    73.7 -                lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1);
    73.8 +                lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
    73.9                }
   73.10              }
   73.11              // If 'l' goes completely dry, it must spill.
    74.1 --- a/src/share/vm/opto/ifnode.cpp	Wed Oct 16 11:48:03 2013 -0700
    74.2 +++ b/src/share/vm/opto/ifnode.cpp	Thu Oct 17 10:58:45 2013 -0700
    74.3 @@ -689,6 +689,7 @@
    74.4          ctrl->in(0)->in(1)->is_Bool() &&
    74.5          ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI &&
    74.6          ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() &&
    74.7 +        ctrl->in(0)->in(1)->in(1)->in(2) != phase->C->top() &&
    74.8          ctrl->in(0)->in(1)->in(1)->in(1) == n) {
    74.9        IfNode* dom_iff = ctrl->in(0)->as_If();
   74.10        Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con);
    75.1 --- a/src/share/vm/opto/mathexactnode.cpp	Wed Oct 16 11:48:03 2013 -0700
    75.2 +++ b/src/share/vm/opto/mathexactnode.cpp	Thu Oct 17 10:58:45 2013 -0700
    75.3 @@ -25,9 +25,10 @@
    75.4  #include "precompiled.hpp"
    75.5  #include "memory/allocation.inline.hpp"
    75.6  #include "opto/addnode.hpp"
    75.7 +#include "opto/cfgnode.hpp"
    75.8  #include "opto/machnode.hpp"
    75.9 +#include "opto/matcher.hpp"
   75.10  #include "opto/mathexactnode.hpp"
   75.11 -#include "opto/matcher.hpp"
   75.12  #include "opto/subnode.hpp"
   75.13  
   75.14  MathExactNode::MathExactNode(Node* ctrl, Node* n1, Node* n2) : MultiNode(3) {
   75.15 @@ -36,6 +37,33 @@
   75.16    init_req(2, n2);
   75.17  }
   75.18  
   75.19 +BoolNode* MathExactNode::bool_node() const {
   75.20 +  Node* flags = flags_node();
   75.21 +  BoolNode* boolnode = flags->unique_out()->as_Bool();
   75.22 +  assert(boolnode != NULL, "must have BoolNode");
   75.23 +  return boolnode;
   75.24 +}
   75.25 +
   75.26 +IfNode* MathExactNode::if_node() const {
   75.27 +  BoolNode* boolnode = bool_node();
   75.28 +  IfNode* ifnode = boolnode->unique_out()->as_If();
   75.29 +  assert(ifnode != NULL, "must have IfNode");
   75.30 +  return ifnode;
   75.31 +}
   75.32 +
   75.33 +Node* MathExactNode::control_node() const {
   75.34 +  IfNode* ifnode = if_node();
   75.35 +  return ifnode->in(0);
   75.36 +}
   75.37 +
   75.38 +Node* MathExactNode::non_throwing_branch() const {
   75.39 +  IfNode* ifnode = if_node();
   75.40 +  if (bool_node()->_test._test == BoolTest::overflow) {
   75.41 +    return ifnode->proj_out(0);
   75.42 +  }
   75.43 +  return ifnode->proj_out(1);
   75.44 +}
   75.45 +
   75.46  Node* AddExactINode::match(const ProjNode* proj, const Matcher* m) {
   75.47    uint ideal_reg = proj->ideal_reg();
   75.48    RegMask rm;
   75.49 @@ -62,15 +90,15 @@
   75.50      }
   75.51  
   75.52      if (flags != NULL) {
   75.53 -      BoolNode* bolnode = (BoolNode *) flags->unique_out();
   75.54 -      switch (bolnode->_test._test) {
   75.55 +      BoolNode* boolnode = bool_node();
   75.56 +      switch (boolnode->_test._test) {
   75.57          case BoolTest::overflow:
   75.58            // if the check is for overflow - never taken
   75.59 -          igvn->replace_node(bolnode, phase->intcon(0));
   75.60 +          igvn->replace_node(boolnode, phase->intcon(0));
   75.61            break;
   75.62          case BoolTest::no_overflow:
   75.63            // if the check is for no overflow - always taken
   75.64 -          igvn->replace_node(bolnode, phase->intcon(1));
   75.65 +          igvn->replace_node(boolnode, phase->intcon(1));
   75.66            break;
   75.67          default:
   75.68            fatal("Unexpected value of BoolTest");
    76.1 --- a/src/share/vm/opto/mathexactnode.hpp	Wed Oct 16 11:48:03 2013 -0700
    76.2 +++ b/src/share/vm/opto/mathexactnode.hpp	Thu Oct 17 10:58:45 2013 -0700
    76.3 @@ -27,8 +27,11 @@
    76.4  
    76.5  #include "opto/multnode.hpp"
    76.6  #include "opto/node.hpp"
    76.7 +#include "opto/subnode.hpp"
    76.8  #include "opto/type.hpp"
    76.9  
   76.10 +class BoolNode;
   76.11 +class IfNode;
   76.12  class Node;
   76.13  
   76.14  class PhaseGVN;
   76.15 @@ -49,9 +52,13 @@
   76.16    virtual bool is_CFG() const { return false; }
   76.17    virtual uint ideal_reg() const { return NotAMachineReg; }
   76.18  
   76.19 -  ProjNode* result_node() { return proj_out(result_proj_node); }
   76.20 -  ProjNode* flags_node() { return proj_out(flags_proj_node); }
   76.21 +  ProjNode* result_node() const { return proj_out(result_proj_node); }
   76.22 +  ProjNode* flags_node() const { return proj_out(flags_proj_node); }
   76.23 +  Node* control_node() const;
   76.24 +  Node* non_throwing_branch() const;
   76.25  protected:
   76.26 +  IfNode* if_node() const;
   76.27 +  BoolNode* bool_node() const;
   76.28    Node* no_overflow(PhaseGVN *phase, Node* new_result);
   76.29  };
   76.30  
    77.1 --- a/src/share/vm/opto/parse.hpp	Wed Oct 16 11:48:03 2013 -0700
    77.2 +++ b/src/share/vm/opto/parse.hpp	Thu Oct 17 10:58:45 2013 -0700
    77.3 @@ -73,6 +73,7 @@
    77.4    bool        try_to_inline(ciMethod* callee_method,
    77.5                              ciMethod* caller_method,
    77.6                              int caller_bci,
    77.7 +                            JVMState* jvms,
    77.8                              ciCallProfile& profile,
    77.9                              WarmCallInfo* wci_result,
   77.10                              bool& should_delay);
   77.11 @@ -83,6 +84,7 @@
   77.12                              WarmCallInfo* wci_result);
   77.13    bool        should_not_inline(ciMethod* callee_method,
   77.14                                  ciMethod* caller_method,
   77.15 +                                JVMState* jvms,
   77.16                                  WarmCallInfo* wci_result);
   77.17    void        print_inlining(ciMethod* callee_method, int caller_bci,
   77.18                               bool success) const;
    78.1 --- a/src/share/vm/opto/parse2.cpp	Wed Oct 16 11:48:03 2013 -0700
    78.2 +++ b/src/share/vm/opto/parse2.cpp	Thu Oct 17 10:58:45 2013 -0700
    78.3 @@ -268,7 +268,7 @@
    78.4      return adjoinRange(value, value, dest, table_index);
    78.5    }
    78.6  
    78.7 -  void print(ciEnv* env) {
    78.8 +  void print() {
    78.9      if (is_singleton())
   78.10        tty->print(" {%d}=>%d", lo(), dest());
   78.11      else if (lo() == min_jint)
   78.12 @@ -471,8 +471,8 @@
   78.13    // These are the switch destinations hanging off the jumpnode
   78.14    int i = 0;
   78.15    for (SwitchRange* r = lo; r <= hi; r++) {
   78.16 -    for (int j = r->lo(); j <= r->hi(); j++, i++) {
   78.17 -      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), j - lowval));
   78.18 +    for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
   78.19 +      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
   78.20        {
   78.21          PreserveJVMState pjvms(this);
   78.22          set_control(input);
   78.23 @@ -632,7 +632,7 @@
   78.24      }
   78.25      tty->print("   ");
   78.26      for( r = lo; r <= hi; r++ ) {
   78.27 -      r->print(env());
   78.28 +      r->print();
   78.29      }
   78.30      tty->print_cr("");
   78.31    }
    79.1 --- a/src/share/vm/opto/parseHelper.cpp	Wed Oct 16 11:48:03 2013 -0700
    79.2 +++ b/src/share/vm/opto/parseHelper.cpp	Thu Oct 17 10:58:45 2013 -0700
    79.3 @@ -343,10 +343,14 @@
    79.4  
    79.5    // Get the Method* node.
    79.6    ciMethod* m = method();
    79.7 -  address counters_adr = m->ensure_method_counters();
    79.8 +  MethodCounters* counters_adr = m->ensure_method_counters();
    79.9 +  if (counters_adr == NULL) {
   79.10 +    C->record_failure("method counters allocation failed");
   79.11 +    return;
   79.12 +  }
   79.13  
   79.14    Node* ctrl = control();
   79.15 -  const TypePtr* adr_type = TypeRawPtr::make(counters_adr);
   79.16 +  const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
   79.17    Node *counters_node = makecon(adr_type);
   79.18    Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
   79.19      MethodCounters::interpreter_invocation_counter_offset_in_bytes());
    80.1 --- a/src/share/vm/opto/reg_split.cpp	Wed Oct 16 11:48:03 2013 -0700
    80.2 +++ b/src/share/vm/opto/reg_split.cpp	Thu Oct 17 10:58:45 2013 -0700
    80.3 @@ -375,6 +375,7 @@
    80.4        }
    80.5  
    80.6        if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) {
    80.7 +        assert(Reachblock != NULL, "Reachblock must be non-NULL");
    80.8          Node *rdef = Reachblock[lrg2reach[lidx]];
    80.9          if (rdef) {
   80.10            spill->set_req(i, rdef);
   80.11 @@ -1336,7 +1337,8 @@
   80.12                 _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
   80.13            insert--;
   80.14          }
   80.15 -        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
   80.16 +        // since the def cannot contain any live range input, we can pass in NULL as Reachlock parameter
   80.17 +        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, NULL, false);
   80.18          if (!def) {
   80.19            return 0;    // Bail out
   80.20          }
    81.1 --- a/src/share/vm/opto/runtime.cpp	Wed Oct 16 11:48:03 2013 -0700
    81.2 +++ b/src/share/vm/opto/runtime.cpp	Thu Oct 17 10:58:45 2013 -0700
    81.3 @@ -138,9 +138,10 @@
    81.4  
    81.5  
    81.6  #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \
    81.7 -  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc)
    81.8 +  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \
    81.9 +  if (var == NULL) { return false; }
   81.10  
   81.11 -void OptoRuntime::generate(ciEnv* env) {
   81.12 +bool OptoRuntime::generate(ciEnv* env) {
   81.13  
   81.14    generate_exception_blob();
   81.15  
   81.16 @@ -158,7 +159,7 @@
   81.17    gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
   81.18    gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   81.19    gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
   81.20 -  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
   81.21 +  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
   81.22    gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , false, true );
   81.23  
   81.24    gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false, false);
   81.25 @@ -168,7 +169,7 @@
   81.26    gen(env, _zap_dead_Java_locals_Java      , zap_dead_locals_Type         , zap_dead_Java_locals_C          ,    0 , false, true , false );
   81.27    gen(env, _zap_dead_native_locals_Java    , zap_dead_locals_Type         , zap_dead_native_locals_C        ,    0 , false, true , false );
   81.28  # endif
   81.29 -
   81.30 +  return true;
   81.31  }
   81.32  
   81.33  #undef gen
   81.34 @@ -976,30 +977,36 @@
   81.35    address handler_address = NULL;
   81.36  
   81.37    Handle exception(thread, thread->exception_oop());
   81.38 +  address pc = thread->exception_pc();
   81.39 +
   81.40 +  // Clear out the exception oop and pc since looking up an
   81.41 +  // exception handler can cause class loading, which might throw an
   81.42 +  // exception and those fields are expected to be clear during
   81.43 +  // normal bytecode execution.
   81.44 +  thread->clear_exception_oop_and_pc();
   81.45  
   81.46    if (TraceExceptions) {
   81.47 -    trace_exception(exception(), thread->exception_pc(), "");
   81.48 +    trace_exception(exception(), pc, "");
   81.49    }
   81.50 +
   81.51    // for AbortVMOnException flag
   81.52    NOT_PRODUCT(Exceptions::debug_check_abort(exception));
   81.53  
   81.54 -  #ifdef ASSERT
   81.55 -    if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
   81.56 -      // should throw an exception here
   81.57 -      ShouldNotReachHere();
   81.58 -    }
   81.59 -  #endif
   81.60 -
   81.61 +#ifdef ASSERT
   81.62 +  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
   81.63 +    // should throw an exception here
   81.64 +    ShouldNotReachHere();
   81.65 +  }
   81.66 +#endif
   81.67  
   81.68    // new exception handling: this method is entered only from adapters
   81.69    // exceptions from compiled java methods are handled in compiled code
   81.70    // using rethrow node
   81.71  
   81.72 -  address pc = thread->exception_pc();
   81.73    nm = CodeCache::find_nmethod(pc);
   81.74    assert(nm != NULL, "No NMethod found");
   81.75    if (nm->is_native_method()) {
   81.76 -    fatal("Native mathod should not have path to exception handling");
   81.77 +    fatal("Native method should not have path to exception handling");
   81.78    } else {
   81.79      // we are switching to old paradigm: search for exception handler in caller_frame
   81.80      // instead in exception handler of caller_frame.sender()
   81.81 @@ -1346,7 +1353,8 @@
   81.82    tty->print(" in ");
   81.83    CodeBlob* blob = CodeCache::find_blob(exception_pc);
   81.84    if (blob->is_nmethod()) {
   81.85 -    ((nmethod*)blob)->method()->print_value();
   81.86 +    nmethod* nm = blob->as_nmethod_or_null();
   81.87 +    nm->method()->print_value();
   81.88    } else if (blob->is_runtime_stub()) {
   81.89      tty->print("<runtime-stub>");
   81.90    } else {
    82.1 --- a/src/share/vm/opto/runtime.hpp	Wed Oct 16 11:48:03 2013 -0700
    82.2 +++ b/src/share/vm/opto/runtime.hpp	Thu Oct 17 10:58:45 2013 -0700
    82.3 @@ -203,8 +203,10 @@
    82.4  
    82.5    static bool is_callee_saved_register(MachRegisterNumbers reg);
    82.6  
    82.7 -  // One time only generate runtime code stubs
    82.8 -  static void generate(ciEnv* env);
    82.9 +  // One time only generate runtime code stubs. Returns true
   82.10 +  // when runtime stubs have been generated successfully and
   82.11 +  // false otherwise.
   82.12 +  static bool generate(ciEnv* env);
   82.13  
   82.14    // Returns the name of a stub
   82.15    static const char* stub_name(address entry);
    83.1 --- a/src/share/vm/opto/stringopts.cpp	Wed Oct 16 11:48:03 2013 -0700
    83.2 +++ b/src/share/vm/opto/stringopts.cpp	Thu Oct 17 10:58:45 2013 -0700
    83.3 @@ -1,5 +1,5 @@
    83.4  /*
    83.5 - * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
    83.6 + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
    83.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    83.8   *
    83.9   * This code is free software; you can redistribute it and/or modify it
   83.10 @@ -50,10 +50,11 @@
   83.11    Node*               _arguments;      // The list of arguments to be concatenated
   83.12    GrowableArray<int>  _mode;           // into a String along with a mode flag
   83.13                                         // indicating how to treat the value.
   83.14 -
   83.15 +  Node_List           _constructors;   // List of constructors (many in case of stacked concat)
   83.16    Node_List           _control;        // List of control nodes that will be deleted
   83.17    Node_List           _uncommon_traps; // Uncommon traps that needs to be rewritten
   83.18                                         // to restart at the initial JVMState.
   83.19 +
   83.20   public:
   83.21    // Mode for converting arguments to Strings
   83.22    enum {
   83.23 @@ -73,6 +74,7 @@
   83.24      _arguments->del_req(0);
   83.25    }
   83.26  
   83.27 +  bool validate_mem_flow();
   83.28    bool validate_control_flow();
   83.29  
   83.30    void merge_add() {
   83.31 @@ -189,6 +191,10 @@
   83.32      assert(!_control.contains(ctrl), "only push once");
   83.33      _control.push(ctrl);
   83.34    }
   83.35 +  void add_constructor(Node* init) {
   83.36 +    assert(!_constructors.contains(init), "only push once");
   83.37 +    _constructors.push(init);
   83.38 +  }
   83.39    CallStaticJavaNode* end() { return _end; }
   83.40    AllocateNode* begin() { return _begin; }
   83.41    Node* string_alloc() { return _string_alloc; }
   83.42 @@ -301,6 +307,12 @@
   83.43      }
   83.44    }
   83.45    result->set_allocation(other->_begin);
   83.46 +  for (uint i = 0; i < _constructors.size(); i++) {
   83.47 +    result->add_constructor(_constructors.at(i));
   83.48 +  }
   83.49 +  for (uint i = 0; i < other->_constructors.size(); i++) {
   83.50 +    result->add_constructor(other->_constructors.at(i));
   83.51 +  }
   83.52    result->_multiple = true;
   83.53    return result;
   83.54  }
   83.55 @@ -510,7 +522,8 @@
   83.56        sc->add_control(constructor);
   83.57        sc->add_control(alloc);
   83.58        sc->set_allocation(alloc);
   83.59 -      if (sc->validate_control_flow()) {
   83.60 +      sc->add_constructor(constructor);
   83.61 +      if (sc->validate_control_flow() && sc->validate_mem_flow()) {
   83.62          return sc;
   83.63        } else {
   83.64          return NULL;
   83.65 @@ -620,7 +633,7 @@
   83.66  #endif
   83.67  
   83.68              StringConcat* merged = sc->merge(other, arg);
   83.69 -            if (merged->validate_control_flow()) {
   83.70 +            if (merged->validate_control_flow() && merged->validate_mem_flow()) {
   83.71  #ifndef PRODUCT
   83.72                if (PrintOptimizeStringConcat) {
   83.73                  tty->print_cr("stacking would succeed");
   83.74 @@ -708,6 +721,139 @@
   83.75  }
   83.76  
   83.77  
   83.78 +bool StringConcat::validate_mem_flow() {
   83.79 +  Compile* C = _stringopts->C;
   83.80 +
   83.81 +  for (uint i = 0; i < _control.size(); i++) {
   83.82 +#ifndef PRODUCT
   83.83 +    Node_List path;
   83.84 +#endif
   83.85 +    Node* curr = _control.at(i);
   83.86 +    if (curr->is_Call() && curr != _begin) { // For all calls except the first allocation
   83.87 +      // Now here's the main invariant in our case:
   83.88 +      // For memory between the constructor, and appends, and toString we should only see bottom memory,
   83.89 +      // produced by the previous call we know about.
   83.90 +      if (!_constructors.contains(curr)) {
   83.91 +        NOT_PRODUCT(path.push(curr);)
   83.92 +        Node* mem = curr->in(TypeFunc::Memory);
   83.93 +        assert(mem != NULL, "calls should have memory edge");
   83.94 +        assert(!mem->is_Phi(), "should be handled by control flow validation");
   83.95 +        NOT_PRODUCT(path.push(mem);)
   83.96 +        while (mem->is_MergeMem()) {
   83.97 +          for (uint i = 1; i < mem->req(); i++) {
   83.98 +            if (i != Compile::AliasIdxBot && mem->in(i) != C->top()) {
   83.99 +#ifndef PRODUCT
  83.100 +              if (PrintOptimizeStringConcat) {
  83.101 +                tty->print("fusion has incorrect memory flow (side effects) for ");
  83.102 +                _begin->jvms()->dump_spec(tty); tty->cr();
  83.103 +                path.dump();
  83.104 +              }
  83.105 +#endif
  83.106 +              return false;
  83.107 +            }
  83.108 +          }
  83.109 +          // skip through a potential MergeMem chain, linked through Bot
  83.110 +          mem = mem->in(Compile::AliasIdxBot);
  83.111 +          NOT_PRODUCT(path.push(mem);)
  83.112 +        }
  83.113 +        // now let it fall through, and see if we have a projection
  83.114 +        if (mem->is_Proj()) {
  83.115 +          // Should point to a previous known call
  83.116 +          Node *prev = mem->in(0);
  83.117 +          NOT_PRODUCT(path.push(prev);)
  83.118 +          if (!prev->is_Call() || !_control.contains(prev)) {
  83.119 +#ifndef PRODUCT
  83.120 +            if (PrintOptimizeStringConcat) {
  83.121 +              tty->print("fusion has incorrect memory flow (unknown call) for ");
  83.122 +              _begin->jvms()->dump_spec(tty); tty->cr();
  83.123 +              path.dump();
  83.124 +            }
  83.125 +#endif
  83.126 +            return false;
  83.127 +          }
  83.128 +        } else {
  83.129 +          assert(mem->is_Store() || mem->is_LoadStore(), err_msg_res("unexpected node type: %s", mem->Name()));
  83.130 +#ifndef PRODUCT
  83.131 +          if (PrintOptimizeStringConcat) {
  83.132 +            tty->print("fusion has incorrect memory flow (unexpected source) for ");
  83.133 +            _begin->jvms()->dump_spec(tty); tty->cr();
  83.134 +            path.dump();
  83.135 +          }
  83.136 +#endif
  83.137 +          return false;
  83.138 +        }
  83.139 +      } else {
  83.140 +        // For memory that feeds into constructors it's more complicated.
  83.141 +        // However the advantage is that any side effect that happens between the Allocate/Initialize and
  83.142 +        // the constructor will have to be control-dependent on Initialize.
  83.143 +        // So we actually don't have to do anything, since it's going to be caught by the control flow
  83.144 +        // analysis.
  83.145 +#ifdef ASSERT
  83.146 +        // Do a quick verification of the control pattern between the constructor and the initialize node
  83.147 +        assert(curr->is_Call(), "constructor should be a call");
  83.148 +        // Go up the control starting from the constructor call
  83.149 +        Node* ctrl = curr->in(0);
  83.150 +        IfNode* iff = NULL;
  83.151 +        RegionNode* copy = NULL;
  83.152 +
  83.153 +        while (true) {
  83.154 +          // skip known check patterns
  83.155 +          if (ctrl->is_Region()) {
  83.156 +            if (ctrl->as_Region()->is_copy()) {
  83.157 +              copy = ctrl->as_Region();
  83.158 +              ctrl = copy->is_copy();
  83.159 +            } else { // a cast
  83.160 +              assert(ctrl->req() == 3 &&
  83.161 +                     ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() &&
  83.162 +                     ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() &&
  83.163 +                     ctrl->in(1)->in(0) == ctrl->in(2)->in(0) &&
  83.164 +                     ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(),
  83.165 +                     "must be a simple diamond");
  83.166 +              Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2);
  83.167 +              for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) {
  83.168 +                Node* use = i.get();
  83.169 +                assert(use == ctrl || use->is_ConstraintCast(),
  83.170 +                       err_msg_res("unexpected user: %s", use->Name()));
  83.171 +              }
  83.172 +
  83.173 +              iff = ctrl->in(1)->in(0)->as_If();
  83.174 +              ctrl = iff->in(0);
  83.175 +            }
  83.176 +          } else if (ctrl->is_IfTrue()) { // null checks, class checks
  83.177 +            iff = ctrl->in(0)->as_If();
  83.178 +            assert(iff->is_If(), "must be if");
  83.179 +            // Verify that the other arm is an uncommon trap
  83.180 +            Node* otherproj = iff->proj_out(1 - ctrl->as_Proj()->_con);
  83.181 +            CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
  83.182 +            assert(strcmp(call->_name, "uncommon_trap") == 0, "must be uncommond trap");
  83.183 +            ctrl = iff->in(0);
  83.184 +          } else {
  83.185 +            break;
  83.186 +          }
  83.187 +        }
  83.188 +
  83.189 +        assert(ctrl->is_Proj(), "must be a projection");
  83.190 +        assert(ctrl->in(0)->is_Initialize(), "should be initialize");
  83.191 +        for (SimpleDUIterator i(ctrl); i.has_next(); i.next()) {
  83.192 +          Node* use = i.get();
  83.193 +          assert(use == copy || use == iff || use == curr || use->is_CheckCastPP() || use->is_Load(),
  83.194 +                 err_msg_res("unexpected user: %s", use->Name()));
  83.195 +        }
  83.196 +#endif // ASSERT
  83.197 +      }
  83.198 +    }
  83.199 +  }
  83.200 +
  83.201 +#ifndef PRODUCT
  83.202 +  if (PrintOptimizeStringConcat) {
  83.203 +    tty->print("fusion has correct memory flow for ");
  83.204 +    _begin->jvms()->dump_spec(tty); tty->cr();
  83.205 +    tty->cr();
  83.206 +  }
  83.207 +#endif
  83.208 +  return true;
  83.209 +}
  83.210 +
  83.211  bool StringConcat::validate_control_flow() {
  83.212    // We found all the calls and arguments now lets see if it's
  83.213    // safe to transform the graph as we would expect.
  83.214 @@ -753,7 +899,7 @@
  83.215      }
  83.216    }
  83.217  
  83.218 -  // Skip backwards through the control checking for unexpected contro flow
  83.219 +  // Skip backwards through the control checking for unexpected control flow
  83.220    Node* ptr = _end;
  83.221    bool fail = false;
  83.222    while (ptr != _begin) {
  83.223 @@ -936,7 +1082,7 @@
  83.224    if (PrintOptimizeStringConcat && !fail) {
  83.225      ttyLocker ttyl;
  83.226      tty->cr();
  83.227 -    tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
  83.228 +    tty->print("fusion has correct control flow (%d %d) for ", null_check_count, _uncommon_traps.size());
  83.229      _begin->jvms()->dump_spec(tty); tty->cr();
  83.230      for (int i = 0; i < num_arguments(); i++) {
  83.231        argument(i)->dump();
    84.1 --- a/src/share/vm/runtime/globals.hpp	Wed Oct 16 11:48:03 2013 -0700
    84.2 +++ b/src/share/vm/runtime/globals.hpp	Thu Oct 17 10:58:45 2013 -0700
    84.3 @@ -2677,6 +2677,14 @@
    84.4    product(bool, AggressiveOpts, false,                                      \
    84.5            "Enable aggressive optimizations - see arguments.cpp")            \
    84.6                                                                              \
    84.7 +  product_pd(uintx, TypeProfileLevel,                                       \
    84.8 +          "=XY, with Y, Type profiling of arguments at call"                \
    84.9 +          "          X, Type profiling of return value at call"             \
   84.10 +          "X and Y in 0->off ; 1->js292 only; 2->all methods")              \
   84.11 +                                                                            \
   84.12 +  product(intx, TypeProfileArgsLimit,     2,                                \
   84.13 +          "max number of call arguments to consider for type profiling")    \
   84.14 +                                                                            \
   84.15    /* statistics */                                                          \
   84.16    develop(bool, CountCompiledCalls, false,                                  \
   84.17            "Count method invocations")                                       \
   84.18 @@ -3823,7 +3831,6 @@
   84.19    product(bool, UseLockedTracing, false,                                    \
   84.20            "Use locked-tracing when doing event-based tracing")
   84.21  
   84.22 -
   84.23  /*
   84.24   *  Macros for factoring of globals
   84.25   */
    85.1 --- a/src/share/vm/runtime/java.cpp	Wed Oct 16 11:48:03 2013 -0700
    85.2 +++ b/src/share/vm/runtime/java.cpp	Thu Oct 17 10:58:45 2013 -0700
    85.3 @@ -183,6 +183,7 @@
    85.4    collected_profiled_methods->sort(&compare_methods);
    85.5  
    85.6    int count = collected_profiled_methods->length();
    85.7 +  int total_size = 0;
    85.8    if (count > 0) {
    85.9      for (int index = 0; index < count; index++) {
   85.10        Method* m = collected_profiled_methods->at(index);
   85.11 @@ -190,10 +191,13 @@
   85.12        tty->print_cr("------------------------------------------------------------------------");
   85.13        //m->print_name(tty);
   85.14        m->print_invocation_count();
   85.15 +      tty->print_cr("  mdo size: %d bytes", m->method_data()->size_in_bytes());
   85.16        tty->cr();
   85.17        m->print_codes();
   85.18 +      total_size += m->method_data()->size_in_bytes();
   85.19      }
   85.20      tty->print_cr("------------------------------------------------------------------------");
   85.21 +    tty->print_cr("Total MDO size: %d bytes", total_size);
   85.22    }
   85.23  }
   85.24  
    86.1 --- a/src/share/vm/runtime/signature.cpp	Wed Oct 16 11:48:03 2013 -0700
    86.2 +++ b/src/share/vm/runtime/signature.cpp	Thu Oct 17 10:58:45 2013 -0700
    86.3 @@ -378,6 +378,16 @@
    86.4    return result;
    86.5  }
    86.6  
    86.7 +int SignatureStream::reference_parameter_count() {
    86.8 +  int args_count = 0;
    86.9 +  for ( ; !at_return_type(); next()) {
   86.10 +    if (is_object()) {
   86.11 +      args_count++;
   86.12 +    }
   86.13 +  }
   86.14 +  return args_count;
   86.15 +}
   86.16 +
   86.17  bool SignatureVerifier::is_valid_signature(Symbol* sig) {
   86.18    const char* signature = (const char*)sig->bytes();
   86.19    ssize_t len = sig->utf8_length();
    87.1 --- a/src/share/vm/runtime/signature.hpp	Wed Oct 16 11:48:03 2013 -0700
    87.2 +++ b/src/share/vm/runtime/signature.hpp	Thu Oct 17 10:58:45 2013 -0700
    87.3 @@ -401,6 +401,9 @@
    87.4  
    87.5    // return same as_symbol except allocation of new symbols is avoided.
    87.6    Symbol* as_symbol_or_null();
    87.7 +
    87.8 +  // count the number of references in the signature
    87.9 +  int reference_parameter_count();
   87.10  };
   87.11  
   87.12  class SignatureVerifier : public StackObj {
    88.1 --- a/src/share/vm/runtime/thread.cpp	Wed Oct 16 11:48:03 2013 -0700
    88.2 +++ b/src/share/vm/runtime/thread.cpp	Thu Oct 17 10:58:45 2013 -0700
    88.3 @@ -1454,7 +1454,6 @@
    88.4    _interp_only_mode    = 0;
    88.5    _special_runtime_exit_condition = _no_async_condition;
    88.6    _pending_async_exception = NULL;
    88.7 -  _is_compiling = false;
    88.8    _thread_stat = NULL;
    88.9    _thread_stat = new ThreadStatistics();
   88.10    _blocked_on_compilation = false;
   88.11 @@ -1815,7 +1814,8 @@
   88.12      // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
   88.13      // the execution of the method. If that is not enough, then we don't really care. Thread.stop
   88.14      // is deprecated anyhow.
   88.15 -    { int count = 3;
   88.16 +    if (!is_Compiler_thread()) {
   88.17 +      int count = 3;
   88.18        while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
   88.19          EXCEPTION_MARK;
   88.20          JavaValue result(T_VOID);
   88.21 @@ -1828,7 +1828,6 @@
   88.22          CLEAR_PENDING_EXCEPTION;
   88.23        }
   88.24      }
   88.25 -
   88.26      // notify JVMTI
   88.27      if (JvmtiExport::should_post_thread_life()) {
   88.28        JvmtiExport::post_thread_end(this);
   88.29 @@ -3239,6 +3238,7 @@
   88.30    _counters = counters;
   88.31    _buffer_blob = NULL;
   88.32    _scanned_nmethod = NULL;
   88.33 +  _compiler = NULL;
   88.34  
   88.35  #ifndef PRODUCT
   88.36    _ideal_graph_printer = NULL;
   88.37 @@ -3255,6 +3255,7 @@
   88.38    }
   88.39  }
   88.40  
   88.41 +
   88.42  // ======= Threads ========
   88.43  
   88.44  // The Threads class links together all active threads, and provides
   88.45 @@ -3275,8 +3276,6 @@
   88.46  // All JavaThreads
   88.47  #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
   88.48  
   88.49 -void os_stream();
   88.50 -
   88.51  // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
   88.52  void Threads::threads_do(ThreadClosure* tc) {
   88.53    assert_locked_or_safepoint(Threads_lock);
    89.1 --- a/src/share/vm/runtime/thread.hpp	Wed Oct 16 11:48:03 2013 -0700
    89.2 +++ b/src/share/vm/runtime/thread.hpp	Thu Oct 17 10:58:45 2013 -0700
    89.3 @@ -923,9 +923,6 @@
    89.4    volatile address _exception_handler_pc;        // PC for handler of exception
    89.5    volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
    89.6  
    89.7 -  // support for compilation
    89.8 -  bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
    89.9 -
   89.10    // support for JNI critical regions
   89.11    jint    _jni_active_critical;                  // count of entries into JNI critical region
   89.12  
   89.13 @@ -1005,10 +1002,6 @@
   89.14    // Testers
   89.15    virtual bool is_Java_thread() const            { return true;  }
   89.16  
   89.17 -  // compilation
   89.18 -  void set_is_compiling(bool f)                  { _is_compiling = f; }
   89.19 -  bool is_compiling() const                      { return _is_compiling; }
   89.20 -
   89.21    // Thread chain operations
   89.22    JavaThread* next() const                       { return _next; }
   89.23    void set_next(JavaThread* p)                   { _next = p; }
   89.24 @@ -1283,6 +1276,11 @@
   89.25    void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
   89.26    void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
   89.27  
   89.28 +  void clear_exception_oop_and_pc() {
   89.29 +    set_exception_oop(NULL);
   89.30 +    set_exception_pc(NULL);
   89.31 +  }
   89.32 +
   89.33    // Stack overflow support
   89.34    inline size_t stack_available(address cur_sp);
   89.35    address stack_yellow_zone_base()
   89.36 @@ -1811,13 +1809,14 @@
   89.37   private:
   89.38    CompilerCounters* _counters;
   89.39  
   89.40 -  ciEnv*        _env;
   89.41 -  CompileLog*   _log;
   89.42 -  CompileTask*  _task;
   89.43 -  CompileQueue* _queue;
   89.44 -  BufferBlob*   _buffer_blob;
   89.45 +  ciEnv*            _env;
   89.46 +  CompileLog*       _log;
   89.47 +  CompileTask*      _task;
   89.48 +  CompileQueue*     _queue;
   89.49 +  BufferBlob*       _buffer_blob;
   89.50  
   89.51 -  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
   89.52 +  nmethod*          _scanned_nmethod;  // nmethod being scanned by the sweeper
   89.53 +  AbstractCompiler* _compiler;
   89.54  
   89.55   public:
   89.56  
   89.57 @@ -1829,14 +1828,17 @@
   89.58    // Hide this compiler thread from external view.
   89.59    bool is_hidden_from_external_view() const      { return true; }
   89.60  
   89.61 -  CompileQueue* queue()                          { return _queue; }
   89.62 -  CompilerCounters* counters()                   { return _counters; }
   89.63 +  void set_compiler(AbstractCompiler* c)         { _compiler = c; }
   89.64 +  AbstractCompiler* compiler() const             { return _compiler; }
   89.65 +
   89.66 +  CompileQueue* queue()        const             { return _queue; }
   89.67 +  CompilerCounters* counters() const             { return _counters; }
   89.68  
   89.69    // Get/set the thread's compilation environment.
   89.70    ciEnv*        env()                            { return _env; }
   89.71    void          set_env(ciEnv* env)              { _env = env; }
   89.72  
   89.73 -  BufferBlob*   get_buffer_blob()                { return _buffer_blob; }
   89.74 +  BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
   89.75    void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; };
   89.76  
   89.77    // Get/set the thread's logging information
    90.1 --- a/src/share/vm/runtime/vmStructs.cpp	Wed Oct 16 11:48:03 2013 -0700
    90.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Thu Oct 17 10:58:45 2013 -0700
    90.3 @@ -917,7 +917,6 @@
    90.4    volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
    90.5    volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
    90.6    volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
    90.7 -  nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
    90.8    nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
    90.9    nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
   90.10     volatile_nonstatic_field(JavaThread,        _thread_state,                                 JavaThreadState)                       \
    91.1 --- a/src/share/vm/shark/sharkCompiler.cpp	Wed Oct 16 11:48:03 2013 -0700
    91.2 +++ b/src/share/vm/shark/sharkCompiler.cpp	Thu Oct 17 10:58:45 2013 -0700
    91.3 @@ -133,11 +133,10 @@
    91.4      exit(1);
    91.5    }
    91.6  
    91.7 -  execution_engine()->addModule(
    91.8 -    _native_context->module());
    91.9 +  execution_engine()->addModule(_native_context->module());
   91.10  
   91.11    // All done
   91.12 -  mark_initialized();
   91.13 +  set_state(initialized);
   91.14  }
   91.15  
   91.16  void SharkCompiler::initialize() {
    92.1 --- a/src/share/vm/shark/sharkCompiler.hpp	Wed Oct 16 11:48:03 2013 -0700
    92.2 +++ b/src/share/vm/shark/sharkCompiler.hpp	Thu Oct 17 10:58:45 2013 -0700
    92.3 @@ -50,10 +50,6 @@
    92.4      return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
    92.5    }
    92.6  
    92.7 -  // Customization
    92.8 -  bool needs_adapters()  { return false; }
    92.9 -  bool needs_stubs()     { return false; }
   92.10 -
   92.11    // Initialization
   92.12    void initialize();
   92.13  
    93.1 --- a/src/share/vm/utilities/ostream.cpp	Wed Oct 16 11:48:03 2013 -0700
    93.2 +++ b/src/share/vm/utilities/ostream.cpp	Thu Oct 17 10:58:45 2013 -0700
    93.3 @@ -465,7 +465,7 @@
    93.4  }
    93.5  
    93.6  // log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
    93.7 -// in log_name, %p => pipd1234 and
    93.8 +// in log_name, %p => pid1234 and
    93.9  //              %t => YYYY-MM-DD_HH-MM-SS
   93.10  static const char* make_log_name(const char* log_name, const char* force_directory) {
   93.11    char timestr[32];
   93.12 @@ -792,7 +792,7 @@
   93.13  
   93.14  void defaultStream::init_log() {
   93.15    // %%% Need a MutexLocker?
   93.16 -  const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
   93.17 +  const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
   93.18    const char* try_name = make_log_name(log_name, NULL);
   93.19    fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
   93.20    if (!file->is_open()) {
    94.1 --- a/src/share/vm/utilities/vmError.cpp	Wed Oct 16 11:48:03 2013 -0700
    94.2 +++ b/src/share/vm/utilities/vmError.cpp	Thu Oct 17 10:58:45 2013 -0700
    94.3 @@ -1050,7 +1050,7 @@
    94.4          FILE* replay_data_file = os::open(fd, "w");
    94.5          if (replay_data_file != NULL) {
    94.6            fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
    94.7 -          env->dump_replay_data(&replay_data_stream);
    94.8 +          env->dump_replay_data_unsafe(&replay_data_stream);
    94.9            out.print_raw("#\n# Compiler replay data is saved as:\n# ");
   94.10            out.print_raw_cr(buffer);
   94.11          } else {
    95.1 --- a/test/compiler/8013496/Test8013496.sh	Wed Oct 16 11:48:03 2013 -0700
    95.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    95.3 @@ -1,55 +0,0 @@
    95.4 -#!/bin/sh
    95.5 -# 
    95.6 -# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    95.7 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    95.8 -# 
    95.9 -# This code is free software; you can redistribute it and/or modify it
   95.10 -# under the terms of the GNU General Public License version 2 only, as
   95.11 -# published by the Free Software Foundation.
   95.12 -# 
   95.13 -# This code is distributed in the hope that it will be useful, but WITHOUT
   95.14 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   95.15 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   95.16 -# version 2 for more details (a copy is included in the LICENSE file that
   95.17 -# accompanied this code).
   95.18 -# 
   95.19 -# You should have received a copy of the GNU General Public License version
   95.20 -# 2 along with this work; if not, write to the Free Software Foundation,
   95.21 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   95.22 -# 
   95.23 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   95.24 -# or visit www.oracle.com if you need additional information or have any
   95.25 -# questions.
   95.26 -# 
   95.27 -#
   95.28 -# @test
   95.29 -# @bug 8013496
   95.30 -# @summary Test checks that the order in which ReversedCodeCacheSize and 
   95.31 -#          InitialCodeCacheSize are passed to the VM is irrelevant.  
   95.32 -# @run shell Test8013496.sh
   95.33 -#
   95.34 -#
   95.35 -## some tests require path to find test source dir
   95.36 -if [ "${TESTSRC}" = "" ]
   95.37 -then
   95.38 -  TESTSRC=${PWD}
   95.39 -  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
   95.40 -fi
   95.41 -echo "TESTSRC=${TESTSRC}"
   95.42 -## Adding common setup Variables for running shell tests.
   95.43 -. ${TESTSRC}/../../test_env.sh
   95.44 -set -x
   95.45 -
   95.46 -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1
   95.47 -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1
   95.48 -
   95.49 -diff 1.out 2.out
   95.50 -
   95.51 -result=$?
   95.52 -if [ $result -eq 0 ] ; then  
   95.53 -  echo "Test Passed"
   95.54 -  exit 0
   95.55 -else
   95.56 -  echo "Test Failed"
   95.57 -  exit 1
   95.58 -fi
    96.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    96.2 +++ b/test/compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java	Thu Oct 17 10:58:45 2013 -0700
    96.3 @@ -0,0 +1,53 @@
    96.4 +/*
    96.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    96.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    96.7 + *
    96.8 + * This code is free software; you can redistribute it and/or modify it
    96.9 + * under the terms of the GNU General Public License version 2 only, as
   96.10 + * published by the Free Software Foundation.
   96.11 + *
   96.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   96.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   96.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   96.15 + * version 2 for more details (a copy is included in the LICENSE file that
   96.16 + * accompanied this code).
   96.17 + *
   96.18 + * You should have received a copy of the GNU General Public License version
   96.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   96.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   96.21 + *
   96.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   96.23 + * or visit www.oracle.com if you need additional information or have any
   96.24 + * questions.
   96.25 + */
   96.26 +
   96.27 +/*
   96.28 + * @test
   96.29 + * @bug 8013496
   96.30 + * @summary Test checks that the order in which ReversedCodeCacheSize and
   96.31 + *          InitialCodeCacheSize are passed to the VM is irrelevant.
   96.32 + * @library /testlibrary
   96.33 + *
   96.34 + */
   96.35 +import com.oracle.java.testlibrary.*;
   96.36 +
   96.37 +public class CheckReservedInitialCodeCacheSizeArgOrder {
   96.38 +  public static void main(String[] args) throws Exception {
   96.39 +    ProcessBuilder pb1,  pb2;
   96.40 +    OutputAnalyzer out1, out2;
   96.41 +
   96.42 +    pb1 = ProcessTools.createJavaProcessBuilder("-XX:InitialCodeCacheSize=4m", "-XX:ReservedCodeCacheSize=8m", "-version");
   96.43 +    pb2 = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=8m", "-XX:InitialCodeCacheSize=4m", "-version");
   96.44 +
   96.45 +    out1 = new OutputAnalyzer(pb1.start());
   96.46 +    out2 = new OutputAnalyzer(pb2.start());
   96.47 +
   96.48 +    // Check that the outputs are equal
   96.49 +    if (out1.getStdout().compareTo(out2.getStdout()) != 0) {
   96.50 +      throw new RuntimeException("Test failed");
   96.51 +    }
   96.52 +
   96.53 +    out1.shouldHaveExitValue(0);
   96.54 +    out2.shouldHaveExitValue(0);
   96.55 +  }
   96.56 +}
    97.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    97.2 +++ b/test/compiler/intrinsics/mathexact/RepeatTest.java	Thu Oct 17 10:58:45 2013 -0700
    97.3 @@ -0,0 +1,107 @@
    97.4 +/*
    97.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    97.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    97.7 + *
    97.8 + * This code is free software; you can redistribute it and/or modify it
    97.9 + * under the terms of the GNU General Public License version 2 only, as
   97.10 + * published by the Free Software Foundation.
   97.11 + *
   97.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   97.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   97.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   97.15 + * version 2 for more details (a copy is included in the LICENSE file that
   97.16 + * accompanied this code).
   97.17 + *
   97.18 + * You should have received a copy of the GNU General Public License version
   97.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   97.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   97.21 + *
   97.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   97.23 + * or visit www.oracle.com if you need additional information or have any
   97.24 + * questions.
   97.25 + */
   97.26 +
   97.27 +/*
   97.28 + * @test
   97.29 + * @bug 8025657
   97.30 + * @summary Test repeating addExact
   97.31 + * @compile RepeatTest.java
   97.32 + * @run main RepeatTest
   97.33 + *
   97.34 + */
   97.35 +
   97.36 +import java.lang.ArithmeticException;
   97.37 +
   97.38 +public class RepeatTest {
   97.39 +  public static void main(String[] args) {
   97.40 +    java.util.Random rnd = new java.util.Random();
   97.41 +    for (int i = 0; i < 50000; ++i) {
   97.42 +      int x = Integer.MAX_VALUE - 10;
   97.43 +      int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5); //rnd.nextInt() / 2;
   97.44 +
   97.45 +      int c = rnd.nextInt() / 2;
   97.46 +      int d = rnd.nextInt() / 2;
   97.47 +
   97.48 +      int a = addExact(x, y);
   97.49 +
   97.50 +      if (a != 36) {
   97.51 +          throw new RuntimeException("a != 0 : " + a);
   97.52 +      }
   97.53 +
   97.54 +      int b = nonExact(c, d);
   97.55 +      int n = addExact2(c, d);
   97.56 +
   97.57 +
   97.58 +      if (n != b) {
   97.59 +        throw new RuntimeException("n != b : " + n + " != " + b);
   97.60 +      }
   97.61 +    }
   97.62 +  }
   97.63 +
   97.64 +  public static int addExact2(int x, int y) {
   97.65 +      int result = 0;
   97.66 +      result += java.lang.Math.addExact(x, y);
   97.67 +      result += java.lang.Math.addExact(x, y);
   97.68 +      result += java.lang.Math.addExact(x, y);
   97.69 +      result += java.lang.Math.addExact(x, y);
   97.70 +      return result;
   97.71 +  }
   97.72 +
   97.73 +  public static int addExact(int x, int y) {
   97.74 +    int result = 0;
   97.75 +    try {
   97.76 +        result += 5;
   97.77 +        result = java.lang.Math.addExact(x, y);
   97.78 +    } catch (ArithmeticException e) {
   97.79 +        result += 1;
   97.80 +    }
   97.81 +    try {
   97.82 +        result += 6;
   97.83 +
   97.84 +        result += java.lang.Math.addExact(x, y);
   97.85 +    } catch (ArithmeticException e) {
   97.86 +        result += 2;
   97.87 +    }
   97.88 +    try {
   97.89 +        result += 7;
   97.90 +        result += java.lang.Math.addExact(x, y);
   97.91 +    } catch (ArithmeticException e) {
   97.92 +        result += 3;
   97.93 +    }
   97.94 +    try {
   97.95 +        result += 8;
   97.96 +        result += java.lang.Math.addExact(x, y);
   97.97 +    } catch (ArithmeticException e) {
   97.98 +        result += 4;
   97.99 +    }
  97.100 +    return result;
  97.101 +  }
  97.102 +
  97.103 +  public static int nonExact(int x, int y) {
  97.104 +    int result = x + y;
  97.105 +    result += x + y;
  97.106 +    result += x + y;
  97.107 +    result += x + y;
  97.108 +    return result;
  97.109 +  }
  97.110 +}
    98.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    98.2 +++ b/test/compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java	Thu Oct 17 10:58:45 2013 -0700
    98.3 @@ -0,0 +1,40 @@
    98.4 +/*
    98.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    98.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    98.7 + *
    98.8 + * This code is free software; you can redistribute it and/or modify it
    98.9 + * under the terms of the GNU General Public License version 2 only, as
   98.10 + * published by the Free Software Foundation.
   98.11 + *
   98.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   98.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   98.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   98.15 + * version 2 for more details (a copy is included in the LICENSE file that
   98.16 + * accompanied this code).
   98.17 + *
   98.18 + * You should have received a copy of the GNU General Public License version
   98.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   98.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   98.21 + *
   98.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   98.23 + * or visit www.oracle.com if you need additional information or have any
   98.24 + * questions.
   98.25 + *
   98.26 + */
   98.27 +
   98.28 +/**
   98.29 + * @test
   98.30 + * @bug 8026124
   98.31 + * @summary Javascript file provoked assertion failure in linkResolver.cpp
   98.32 + *
   98.33 + * @run main/othervm CreatesInterfaceDotEqualsCallInfo
   98.34 + */
   98.35 +
   98.36 +public class CreatesInterfaceDotEqualsCallInfo {
   98.37 +  public static void main(String[] args) throws java.io.IOException {
   98.38 +    String[] jsargs = { System.getProperty("test.src", ".") +
   98.39 +                        "/createsInterfaceDotEqualsCallInfo.js" };
   98.40 +    jdk.nashorn.tools.Shell.main(System.in, System.out, System.err, jsargs);
   98.41 +    System.out.println("PASS, did not crash running Javascript");
   98.42 +  }
   98.43 +}
    99.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    99.2 +++ b/test/compiler/jsr292/createsInterfaceDotEqualsCallInfo.js	Thu Oct 17 10:58:45 2013 -0700
    99.3 @@ -0,0 +1,26 @@
    99.4 +/*
    99.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    99.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    99.7 + *
    99.8 + * This code is free software; you can redistribute it and/or modify it
    99.9 + * under the terms of the GNU General Public License version 2 only, as
   99.10 + * published by the Free Software Foundation.
   99.11 + *
   99.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   99.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   99.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   99.15 + * version 2 for more details (a copy is included in the LICENSE file that
   99.16 + * accompanied this code).
   99.17 + *
   99.18 + * You should have received a copy of the GNU General Public License version
   99.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   99.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   99.21 + *
   99.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   99.23 + * or visit www.oracle.com if you need additional information or have any
   99.24 + * questions.
   99.25 + *
   99.26 + */
   99.27 +
   99.28 +var path = new java.io.File("/Users/someone").toPath();
   99.29 +path.toString();
   100.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   100.2 +++ b/test/compiler/startup/SmallCodeCacheStartup.java	Thu Oct 17 10:58:45 2013 -0700
   100.3 @@ -0,0 +1,43 @@
   100.4 +/*
   100.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   100.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   100.7 + *
   100.8 + * This code is free software; you can redistribute it and/or modify it
   100.9 + * under the terms of the GNU General Public License version 2 only, as
  100.10 + * published by the Free Software Foundation.
  100.11 + *
  100.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  100.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  100.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  100.15 + * version 2 for more details (a copy is included in the LICENSE file that
  100.16 + * accompanied this code).
  100.17 + *
  100.18 + * You should have received a copy of the GNU General Public License version
  100.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  100.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  100.21 + *
  100.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  100.23 + * or visit www.oracle.com if you need additional information or have any
  100.24 + * questions.
  100.25 + */
  100.26 +
  100.27 +/*
  100.28 + * @test
  100.29 + * @bug 8023014
  100.30 + * @summary Test ensures that there is no crash when compiler initialization fails
  100.31 + * @library /testlibrary
  100.32 + *
  100.33 + */
  100.34 +import com.oracle.java.testlibrary.*;
  100.35 +
  100.36 +public class SmallCodeCacheStartup {
  100.37 +  public static void main(String[] args) throws Exception {
  100.38 +    ProcessBuilder pb;
  100.39 +    OutputAnalyzer out;
  100.40 +
  100.41 +    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m", "-XX:CICompilerCount=64", "-version");
  100.42 +    out = new OutputAnalyzer(pb.start());
  100.43 +    out.shouldContain("no space to run compiler");
  100.44 +    out.shouldHaveExitValue(0);
  100.45 +  }
  100.46 +}

mercurial