Mon, 21 Oct 2013 17:34:27 -0700
Merge
1.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Mon Oct 21 14:38:11 2013 -0700 1.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Mon Oct 21 17:34:27 2013 -0700 1.3 @@ -2565,7 +2565,7 @@ 1.4 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 1.5 mdo_offset_bias); 1.6 __ ld_ptr(receiver_addr, tmp1); 1.7 - __ verify_oop(tmp1); 1.8 + __ verify_klass_ptr(tmp1); 1.9 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); 1.10 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1.11 mdo_offset_bias);
2.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Mon Oct 21 14:38:11 2013 -0700 2.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Mon Oct 21 17:34:27 2013 -0700 2.3 @@ -404,7 +404,9 @@ 2.4 if (id == fast_new_instance_init_check_id) { 2.5 // make sure the klass is initialized 2.6 __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1); 2.7 - __ cmp_and_br_short(G3_t1, InstanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path); 2.8 + __ cmp(G3_t1, InstanceKlass::fully_initialized); 2.9 + __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 2.10 + __ delayed()->nop(); 2.11 } 2.12 #ifdef ASSERT 2.13 // assert object can be fast path allocated 2.14 @@ -515,7 +517,9 @@ 2.15 2.16 // check that array length is small enough for fast path 2.17 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); 2.18 - __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path); 2.19 + __ cmp(G4_length, G3_t1); 2.20 + __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path); 2.21 + __ delayed()->nop(); 2.22 2.23 // if we got here then the TLAB allocation failed, so try 2.24 // refilling the TLAB or allocating directly from eden.
3.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Mon Oct 21 14:38:11 2013 -0700 3.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Mon Oct 21 17:34:27 2013 -0700 3.3 @@ -3333,7 +3333,8 @@ 3.4 3.5 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 3.6 // No allocation in the shared eden. 3.7 - ba_short(slow_case); 3.8 + ba(slow_case); 3.9 + delayed()->nop(); 3.10 } 3.11 3.12 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3.13 @@ -3358,7 +3359,8 @@ 3.14 add(t2, 1, t2); 3.15 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3.16 } 3.17 - ba_short(try_eden); 3.18 + ba(try_eden); 3.19 + delayed()->nop(); 3.20 3.21 bind(discard_tlab); 3.22 if (TLABStats) { 3.23 @@ -3420,7 +3422,8 @@ 3.24 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3.25 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3.26 verify_tlab(); 3.27 - ba_short(retry); 3.28 + ba(retry); 3.29 + delayed()->nop(); 3.30 } 3.31 3.32 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
4.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Oct 21 14:38:11 2013 -0700 4.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Oct 21 17:34:27 2013 -0700 4.3 @@ -1206,6 +1206,10 @@ 4.4 LIR_Address* addr = src->as_address_ptr(); 4.5 Address from_addr = as_Address(addr); 4.6 4.7 + if (addr->base()->type() == T_OBJECT) { 4.8 + __ verify_oop(addr->base()->as_pointer_register()); 4.9 + } 4.10 + 4.11 switch (type) { 4.12 case T_BOOLEAN: // fall through 4.13 case T_BYTE: // fall through
5.1 --- a/src/cpu/x86/vm/templateInterpreter_x86.hpp Mon Oct 21 14:38:11 2013 -0700 5.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp Mon Oct 21 17:34:27 2013 -0700 5.3 @@ -34,9 +34,9 @@ 5.4 // Run with +PrintInterpreter to get the VM to print out the size. 5.5 // Max size with JVMTI 5.6 #ifdef AMD64 5.7 - const static int InterpreterCodeSize = 208 * 1024; 5.8 + const static int InterpreterCodeSize = 256 * 1024; 5.9 #else 5.10 - const static int InterpreterCodeSize = 176 * 1024; 5.11 + const static int InterpreterCodeSize = 224 * 1024; 5.12 #endif // AMD64 5.13 5.14 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
6.1 --- a/src/share/vm/adlc/formssel.cpp Mon Oct 21 14:38:11 2013 -0700 6.2 +++ b/src/share/vm/adlc/formssel.cpp Mon Oct 21 17:34:27 2013 -0700 6.3 @@ -536,12 +536,6 @@ 6.4 if( data_type != Form::none ) 6.5 rematerialize = true; 6.6 6.7 - // Ugly: until a better fix is implemented, disable rematerialization for 6.8 - // negD nodes because they are proved to be problematic. 6.9 - if (is_ideal_negD()) { 6.10 - return false; 6.11 - } 6.12 - 6.13 // Constants 6.14 if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) ) 6.15 rematerialize = true;
7.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp Mon Oct 21 14:38:11 2013 -0700 7.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Mon Oct 21 17:34:27 2013 -0700 7.3 @@ -1265,6 +1265,7 @@ 7.4 7.5 LIRItem rcvr(x->argument_at(0), this); 7.6 rcvr.load_item(); 7.7 + LIR_Opr temp = new_register(T_METADATA); 7.8 LIR_Opr result = rlock_result(x); 7.9 7.10 // need to perform the null check on the rcvr 7.11 @@ -1272,8 +1273,11 @@ 7.12 if (x->needs_null_check()) { 7.13 info = state_for(x); 7.14 } 7.15 - __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info); 7.16 - __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result); 7.17 + 7.18 + // FIXME T_ADDRESS should actually be T_METADATA but it can't because the 7.19 + // meaning of these two is mixed up (see JDK-8026837). 7.20 + __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info); 7.21 + __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result); 7.22 } 7.23 7.24
8.1 --- a/src/share/vm/opto/c2_globals.hpp Mon Oct 21 14:38:11 2013 -0700 8.2 +++ b/src/share/vm/opto/c2_globals.hpp Mon Oct 21 17:34:27 2013 -0700 8.3 @@ -638,7 +638,10 @@ 8.4 "Find best control for expensive operations") \ 8.5 \ 8.6 product(bool, UseMathExactIntrinsics, true, \ 8.7 - "Enables intrinsification of various java.lang.Math funcitons") 8.8 + "Enables intrinsification of various java.lang.Math functions") \ 8.9 + \ 8.10 + experimental(bool, ReplaceInParentMaps, false, \ 8.11 + "Propagate type improvements in callers of inlinee if possible") 8.12 8.13 C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG) 8.14
9.1 --- a/src/share/vm/opto/callGenerator.cpp Mon Oct 21 14:38:11 2013 -0700 9.2 +++ b/src/share/vm/opto/callGenerator.cpp Mon Oct 21 17:34:27 2013 -0700 9.3 @@ -63,12 +63,12 @@ 9.4 } 9.5 9.6 virtual bool is_parse() const { return true; } 9.7 - virtual JVMState* generate(JVMState* jvms); 9.8 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 9.9 int is_osr() { return _is_osr; } 9.10 9.11 }; 9.12 9.13 -JVMState* ParseGenerator::generate(JVMState* jvms) { 9.14 +JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) { 9.15 Compile* C = Compile::current(); 9.16 9.17 if (is_osr()) { 9.18 @@ -80,7 +80,7 @@ 9.19 return NULL; // bailing out of the compile; do not try to parse 9.20 } 9.21 9.22 - Parse parser(jvms, method(), _expected_uses); 9.23 + Parse parser(jvms, method(), _expected_uses, parent_parser); 9.24 // Grab signature for matching/allocation 9.25 #ifdef ASSERT 9.26 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 9.27 @@ -119,12 +119,12 @@ 9.28 _separate_io_proj(separate_io_proj) 9.29 { 9.30 } 9.31 - virtual JVMState* generate(JVMState* jvms); 9.32 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 9.33 9.34 CallStaticJavaNode* call_node() const { return _call_node; } 9.35 }; 9.36 9.37 -JVMState* DirectCallGenerator::generate(JVMState* jvms) { 9.38 +JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 9.39 GraphKit kit(jvms); 9.40 bool is_static = method()->is_static(); 9.41 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 9.42 @@ -171,10 +171,10 @@ 9.43 vtable_index >= 0, "either invalid or usable"); 9.44 } 9.45 virtual bool is_virtual() const { return true; } 9.46 - virtual JVMState* generate(JVMState* jvms); 9.47 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 9.48 }; 9.49 9.50 -JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 9.51 +JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 9.52 GraphKit kit(jvms); 9.53 Node* receiver = kit.argument(0); 9.54 9.55 @@ -276,7 +276,7 @@ 9.56 // Convert the CallStaticJava into an inline 9.57 virtual void do_late_inline(); 9.58 9.59 - virtual JVMState* generate(JVMState* jvms) { 9.60 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 9.61 Compile *C = Compile::current(); 9.62 C->print_inlining_skip(this); 9.63 9.64 @@ -290,7 +290,7 @@ 9.65 // that the late inlining logic can distinguish between fall 9.66 // through and exceptional uses of the memory and io projections 9.67 // as is done for allocations and macro expansion. 9.68 - return DirectCallGenerator::generate(jvms); 9.69 + return DirectCallGenerator::generate(jvms, parent_parser); 9.70 } 9.71 9.72 virtual void print_inlining_late(const char* msg) { 9.73 @@ -389,7 +389,7 @@ 9.74 } 9.75 9.76 // Now perform the inling using the synthesized JVMState 9.77 - JVMState* new_jvms = _inline_cg->generate(jvms); 9.78 + JVMState* new_jvms = _inline_cg->generate(jvms, NULL); 9.79 if (new_jvms == NULL) return; // no change 9.80 if (C->failing()) return; 9.81 9.82 @@ -429,8 +429,8 @@ 9.83 9.84 virtual bool is_mh_late_inline() const { return true; } 9.85 9.86 - virtual JVMState* generate(JVMState* jvms) { 9.87 - JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 9.88 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 9.89 + JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); 9.90 if (_input_not_const) { 9.91 // inlining won't be possible so no need to enqueue right now. 9.92 call_node()->set_generator(this); 9.93 @@ -477,13 +477,13 @@ 9.94 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 9.95 LateInlineCallGenerator(method, inline_cg) {} 9.96 9.97 - virtual JVMState* generate(JVMState* jvms) { 9.98 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 9.99 Compile *C = Compile::current(); 9.100 C->print_inlining_skip(this); 9.101 9.102 C->add_string_late_inline(this); 9.103 9.104 - JVMState* new_jvms = DirectCallGenerator::generate(jvms); 9.105 + JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); 9.106 return new_jvms; 9.107 } 9.108 }; 9.109 @@ -498,13 +498,13 @@ 9.110 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 9.111 LateInlineCallGenerator(method, inline_cg) {} 9.112 9.113 - virtual JVMState* generate(JVMState* jvms) { 9.114 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 9.115 Compile *C = Compile::current(); 9.116 C->print_inlining_skip(this); 9.117 9.118 C->add_boxing_late_inline(this); 9.119 9.120 - JVMState* new_jvms = DirectCallGenerator::generate(jvms); 9.121 + JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); 9.122 return new_jvms; 9.123 } 9.124 }; 9.125 @@ -540,7 +540,7 @@ 9.126 virtual bool is_virtual() const { return _is_virtual; } 9.127 virtual bool is_deferred() const { return true; } 9.128 9.129 - virtual JVMState* generate(JVMState* jvms); 9.130 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 9.131 }; 9.132 9.133 9.134 @@ -550,12 +550,12 @@ 9.135 return new WarmCallGenerator(ci, if_cold, if_hot); 9.136 } 9.137 9.138 -JVMState* WarmCallGenerator::generate(JVMState* jvms) { 9.139 +JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 9.140 Compile* C = Compile::current(); 9.141 if (C->log() != NULL) { 9.142 C->log()->elem("warm_call bci='%d'", jvms->bci()); 9.143 } 9.144 - jvms = _if_cold->generate(jvms); 9.145 + jvms = _if_cold->generate(jvms, parent_parser); 9.146 if (jvms != NULL) { 9.147 Node* m = jvms->map()->control(); 9.148 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 9.149 @@ -616,7 +616,7 @@ 9.150 virtual bool is_inline() const { return _if_hit->is_inline(); } 9.151 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 9.152 9.153 - virtual JVMState* generate(JVMState* jvms); 9.154 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 9.155 }; 9.156 9.157 9.158 @@ -628,7 +628,7 @@ 9.159 } 9.160 9.161 9.162 -JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 9.163 +JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 9.164 GraphKit kit(jvms); 9.165 PhaseGVN& gvn = kit.gvn(); 9.166 // We need an explicit receiver null_check before checking its type. 9.167 @@ -656,7 +656,7 @@ 9.168 { PreserveJVMState pjvms(&kit); 9.169 kit.set_control(slow_ctl); 9.170 if (!kit.stopped()) { 9.171 - slow_jvms = _if_missed->generate(kit.sync_jvms()); 9.172 + slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser); 9.173 if (kit.failing()) 9.174 return NULL; // might happen because of NodeCountInliningCutoff 9.175 assert(slow_jvms != NULL, "must be"); 9.176 @@ -677,12 +677,12 @@ 9.177 kit.replace_in_map(receiver, exact_receiver); 9.178 9.179 // Make the hot call: 9.180 - JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 9.181 + JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser); 9.182 if (new_jvms == NULL) { 9.183 // Inline failed, so make a direct call. 9.184 assert(_if_hit->is_inline(), "must have been a failed inline"); 9.185 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 9.186 - new_jvms = cg->generate(kit.sync_jvms()); 9.187 + new_jvms = cg->generate(kit.sync_jvms(), parent_parser); 9.188 } 9.189 kit.add_exception_states_from(new_jvms); 9.190 kit.set_jvms(new_jvms); 9.191 @@ -874,7 +874,7 @@ 9.192 virtual bool is_inlined() const { return true; } 9.193 virtual bool is_intrinsic() const { return true; } 9.194 9.195 - virtual JVMState* generate(JVMState* jvms); 9.196 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 9.197 }; 9.198 9.199 9.200 @@ -884,7 +884,7 @@ 9.201 } 9.202 9.203 9.204 -JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) { 9.205 +JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) { 9.206 GraphKit kit(jvms); 9.207 PhaseGVN& gvn = kit.gvn(); 9.208 9.209 @@ -904,7 +904,7 @@ 9.210 PreserveJVMState pjvms(&kit); 9.211 kit.set_control(slow_ctl); 9.212 if (!kit.stopped()) { 9.213 - slow_jvms = _cg->generate(kit.sync_jvms()); 9.214 + slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser); 9.215 if (kit.failing()) 9.216 return NULL; // might happen because of NodeCountInliningCutoff 9.217 assert(slow_jvms != NULL, "must be"); 9.218 @@ -922,12 +922,12 @@ 9.219 } 9.220 9.221 // Generate intrinsic code: 9.222 - JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 9.223 + JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser); 9.224 if (new_jvms == NULL) { 9.225 // Intrinsic failed, so use slow code or make a direct call. 9.226 if (slow_map == NULL) { 9.227 CallGenerator* cg = CallGenerator::for_direct_call(method()); 9.228 - new_jvms = cg->generate(kit.sync_jvms()); 9.229 + new_jvms = cg->generate(kit.sync_jvms(), parent_parser); 9.230 } else { 9.231 kit.set_jvms(slow_jvms); 9.232 return kit.transfer_exceptions_into_jvms(); 9.233 @@ -997,7 +997,7 @@ 9.234 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 9.235 virtual bool is_trap() const { return true; } 9.236 9.237 - virtual JVMState* generate(JVMState* jvms); 9.238 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 9.239 }; 9.240 9.241 9.242 @@ -1009,7 +1009,7 @@ 9.243 } 9.244 9.245 9.246 -JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 9.247 +JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 9.248 GraphKit kit(jvms); 9.249 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 9.250 int nargs = method()->arg_size();
10.1 --- a/src/share/vm/opto/callGenerator.hpp Mon Oct 21 14:38:11 2013 -0700 10.2 +++ b/src/share/vm/opto/callGenerator.hpp Mon Oct 21 17:34:27 2013 -0700 10.3 @@ -31,6 +31,8 @@ 10.4 #include "opto/type.hpp" 10.5 #include "runtime/deoptimization.hpp" 10.6 10.7 +class Parse; 10.8 + 10.9 //---------------------------CallGenerator------------------------------------- 10.10 // The subclasses of this class handle generation of ideal nodes for 10.11 // call sites and method entry points. 10.12 @@ -108,7 +110,7 @@ 10.13 // 10.14 // If the result is NULL, it means that this CallGenerator was unable 10.15 // to handle the given call, and another CallGenerator should be consulted. 10.16 - virtual JVMState* generate(JVMState* jvms) = 0; 10.17 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0; 10.18 10.19 // How to generate a call site that is inlined: 10.20 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
11.1 --- a/src/share/vm/opto/compile.cpp Mon Oct 21 14:38:11 2013 -0700 11.2 +++ b/src/share/vm/opto/compile.cpp Mon Oct 21 17:34:27 2013 -0700 11.3 @@ -655,7 +655,8 @@ 11.4 _inlining_progress(false), 11.5 _inlining_incrementally(false), 11.6 _print_inlining_list(NULL), 11.7 - _print_inlining_idx(0) { 11.8 + _print_inlining_idx(0), 11.9 + _preserve_jvm_state(0) { 11.10 C = this; 11.11 11.12 CompileWrapper cw(this); 11.13 @@ -763,7 +764,7 @@ 11.14 return; 11.15 } 11.16 JVMState* jvms = build_start_state(start(), tf()); 11.17 - if ((jvms = cg->generate(jvms)) == NULL) { 11.18 + if ((jvms = cg->generate(jvms, NULL)) == NULL) { 11.19 record_method_not_compilable("method parse failed"); 11.20 return; 11.21 } 11.22 @@ -940,7 +941,8 @@ 11.23 _inlining_progress(false), 11.24 _inlining_incrementally(false), 11.25 _print_inlining_list(NULL), 11.26 - _print_inlining_idx(0) { 11.27 + _print_inlining_idx(0), 11.28 + _preserve_jvm_state(0) { 11.29 C = this; 11.30 11.31 #ifndef PRODUCT
12.1 --- a/src/share/vm/opto/compile.hpp Mon Oct 21 14:38:11 2013 -0700 12.2 +++ b/src/share/vm/opto/compile.hpp Mon Oct 21 17:34:27 2013 -0700 12.3 @@ -425,6 +425,9 @@ 12.4 // Expensive nodes list already sorted? 12.5 bool expensive_nodes_sorted() const; 12.6 12.7 + // Are we within a PreserveJVMState block? 12.8 + int _preserve_jvm_state; 12.9 + 12.10 public: 12.11 12.12 outputStream* print_inlining_stream() const { 12.13 @@ -820,7 +823,9 @@ 12.14 12.15 // Decide how to build a call. 12.16 // The profile factor is a discount to apply to this site's interp. profile. 12.17 - CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false); 12.18 + CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, 12.19 + JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, 12.20 + bool delayed_forbidden = false); 12.21 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { 12.22 return should_delay_string_inlining(call_method, jvms) || 12.23 should_delay_boxing_inlining(call_method, jvms); 12.24 @@ -1156,6 +1161,21 @@ 12.25 12.26 // Auxiliary method for randomized fuzzing/stressing 12.27 static bool randomized_select(int count); 12.28 + 12.29 + // enter a PreserveJVMState block 12.30 + void inc_preserve_jvm_state() { 12.31 + _preserve_jvm_state++; 12.32 + } 12.33 + 12.34 + // exit a PreserveJVMState block 12.35 + void dec_preserve_jvm_state() { 12.36 + _preserve_jvm_state--; 12.37 + assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative"); 12.38 + } 12.39 + 12.40 + bool has_preserve_jvm_state() const { 12.41 + return _preserve_jvm_state > 0; 12.42 + } 12.43 }; 12.44 12.45 #endif // SHARE_VM_OPTO_COMPILE_HPP
13.1 --- a/src/share/vm/opto/doCall.cpp Mon Oct 21 14:38:11 2013 -0700 13.2 +++ b/src/share/vm/opto/doCall.cpp Mon Oct 21 17:34:27 2013 -0700 13.3 @@ -495,7 +495,7 @@ 13.4 // because exceptions don't return to the call site.) 13.5 profile_call(receiver); 13.6 13.7 - JVMState* new_jvms = cg->generate(jvms); 13.8 + JVMState* new_jvms = cg->generate(jvms, this); 13.9 if (new_jvms == NULL) { 13.10 // When inlining attempt fails (e.g., too many arguments), 13.11 // it may contaminate the current compile state, making it 13.12 @@ -509,7 +509,7 @@ 13.13 // intrinsic was expecting to optimize. Should always be possible to 13.14 // get a normal java call that may inline in that case 13.15 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false); 13.16 - if ((new_jvms = cg->generate(jvms)) == NULL) { 13.17 + if ((new_jvms = cg->generate(jvms, this)) == NULL) { 13.18 guarantee(failing(), "call failed to generate: calls should work"); 13.19 return; 13.20 }
14.1 --- a/src/share/vm/opto/graphKit.cpp Mon Oct 21 14:38:11 2013 -0700 14.2 +++ b/src/share/vm/opto/graphKit.cpp Mon Oct 21 17:34:27 2013 -0700 14.3 @@ -639,6 +639,7 @@ 14.4 _map = kit->map(); // preserve the map 14.5 _sp = kit->sp(); 14.6 kit->set_map(clone_map ? kit->clone_map() : NULL); 14.7 + Compile::current()->inc_preserve_jvm_state(); 14.8 #ifdef ASSERT 14.9 _bci = kit->bci(); 14.10 Parse* parser = kit->is_Parse(); 14.11 @@ -656,6 +657,7 @@ 14.12 #endif 14.13 kit->set_map(_map); 14.14 kit->set_sp(_sp); 14.15 + Compile::current()->dec_preserve_jvm_state(); 14.16 } 14.17 14.18 14.19 @@ -1373,17 +1375,70 @@ 14.20 14.21 //--------------------------replace_in_map------------------------------------- 14.22 void GraphKit::replace_in_map(Node* old, Node* neww) { 14.23 - this->map()->replace_edge(old, neww); 14.24 + if (old == neww) { 14.25 + return; 14.26 + } 14.27 + 14.28 + map()->replace_edge(old, neww); 14.29 14.30 // Note: This operation potentially replaces any edge 14.31 // on the map. This includes locals, stack, and monitors 14.32 // of the current (innermost) JVM state. 14.33 14.34 - // We can consider replacing in caller maps. 14.35 - // The idea would be that an inlined function's null checks 14.36 - // can be shared with the entire inlining tree. 14.37 - // The expense of doing this is that the PreserveJVMState class 14.38 - // would have to preserve caller states too, with a deep copy. 14.39 + if (!ReplaceInParentMaps) { 14.40 + return; 14.41 + } 14.42 + 14.43 + // PreserveJVMState doesn't do a deep copy so we can't modify 14.44 + // parents 14.45 + if (Compile::current()->has_preserve_jvm_state()) { 14.46 + return; 14.47 + } 14.48 + 14.49 + Parse* parser = is_Parse(); 14.50 + bool progress = true; 14.51 + Node* ctrl = map()->in(0); 14.52 + // Follow the chain of parsers and see whether the update can be 14.53 + // done in the map of callers. We can do the replace for a caller if 14.54 + // the current control post dominates the control of a caller. 14.55 + while (parser != NULL && parser->caller() != NULL && progress) { 14.56 + progress = false; 14.57 + Node* parent_map = parser->caller()->map(); 14.58 + assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch"); 14.59 + 14.60 + Node* parent_ctrl = parent_map->in(0); 14.61 + 14.62 + while (parent_ctrl->is_Region()) { 14.63 + Node* n = parent_ctrl->as_Region()->is_copy(); 14.64 + if (n == NULL) { 14.65 + break; 14.66 + } 14.67 + parent_ctrl = n; 14.68 + } 14.69 + 14.70 + for (;;) { 14.71 + if (ctrl == parent_ctrl) { 14.72 + // update the map of the exits which is the one that will be 14.73 + // used when compilation resume after inlining 14.74 + parser->exits().map()->replace_edge(old, neww); 14.75 + progress = true; 14.76 + break; 14.77 + } 14.78 + if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 14.79 + ctrl = ctrl->in(0)->in(0); 14.80 + } else if (ctrl->is_Region()) { 14.81 + Node* n = ctrl->as_Region()->is_copy(); 14.82 + if (n == NULL) { 14.83 + break; 14.84 + } 14.85 + ctrl = n; 14.86 + } else { 14.87 + break; 14.88 + } 14.89 + } 14.90 + 14.91 + parser = parser->parent_parser(); 14.92 + } 14.93 } 14.94 14.95
15.1 --- a/src/share/vm/opto/ifnode.cpp Mon Oct 21 14:38:11 2013 -0700 15.2 +++ b/src/share/vm/opto/ifnode.cpp Mon Oct 21 17:34:27 2013 -0700 15.3 @@ -1019,7 +1019,7 @@ 15.4 // be skipped. For example, range check predicate has two checks 15.5 // for lower and upper bounds. 15.6 ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); 15.7 - if (PhaseIdealLoop::is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate)) 15.8 + if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) 15.9 prev_dom = idom; 15.10 15.11 // Now walk the current IfNode's projections.
16.1 --- a/src/share/vm/opto/library_call.cpp Mon Oct 21 14:38:11 2013 -0700 16.2 +++ b/src/share/vm/opto/library_call.cpp Mon Oct 21 17:34:27 2013 -0700 16.3 @@ -63,7 +63,7 @@ 16.4 virtual bool is_virtual() const { return _is_virtual; } 16.5 virtual bool is_predicted() const { return _is_predicted; } 16.6 virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; } 16.7 - virtual JVMState* generate(JVMState* jvms); 16.8 + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 16.9 virtual Node* generate_predicate(JVMState* jvms); 16.10 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } 16.11 }; 16.12 @@ -556,7 +556,7 @@ 16.13 // Nothing to do here. 16.14 } 16.15 16.16 -JVMState* LibraryIntrinsic::generate(JVMState* jvms) { 16.17 +JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) { 16.18 LibraryCallKit kit(jvms, this); 16.19 Compile* C = kit.C; 16.20 int nodes = C->unique();
17.1 --- a/src/share/vm/opto/loopPredicate.cpp Mon Oct 21 14:38:11 2013 -0700 17.2 +++ b/src/share/vm/opto/loopPredicate.cpp Mon Oct 21 17:34:27 2013 -0700 17.3 @@ -41,63 +41,6 @@ 17.4 * checks (such as null checks). 17.5 */ 17.6 17.7 -//-------------------------------is_uncommon_trap_proj---------------------------- 17.8 -// Return true if proj is the form of "proj->[region->..]call_uct" 17.9 -bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) { 17.10 - int path_limit = 10; 17.11 - assert(proj, "invalid argument"); 17.12 - Node* out = proj; 17.13 - for (int ct = 0; ct < path_limit; ct++) { 17.14 - out = out->unique_ctrl_out(); 17.15 - if (out == NULL) 17.16 - return false; 17.17 - if (out->is_CallStaticJava()) { 17.18 - int req = out->as_CallStaticJava()->uncommon_trap_request(); 17.19 - if (req != 0) { 17.20 - Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); 17.21 - if (trap_reason == reason || reason == Deoptimization::Reason_none) { 17.22 - return true; 17.23 - } 17.24 - } 17.25 - return false; // don't do further after call 17.26 - } 17.27 - if (out->Opcode() != Op_Region) 17.28 - return false; 17.29 - } 17.30 - return false; 17.31 -} 17.32 - 17.33 -//-------------------------------is_uncommon_trap_if_pattern------------------------- 17.34 -// Return true for "if(test)-> proj -> ... 17.35 -// | 17.36 -// V 17.37 -// other_proj->[region->..]call_uct" 17.38 -// 17.39 -// "must_reason_predicate" means the uct reason must be Reason_predicate 17.40 -bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) { 17.41 - Node *in0 = proj->in(0); 17.42 - if (!in0->is_If()) return false; 17.43 - // Variation of a dead If node. 17.44 - if (in0->outcnt() < 2) return false; 17.45 - IfNode* iff = in0->as_If(); 17.46 - 17.47 - // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate 17.48 - if (reason != Deoptimization::Reason_none) { 17.49 - if (iff->in(1)->Opcode() != Op_Conv2B || 17.50 - iff->in(1)->in(1)->Opcode() != Op_Opaque1) { 17.51 - return false; 17.52 - } 17.53 - } 17.54 - 17.55 - ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj(); 17.56 - if (is_uncommon_trap_proj(other_proj, reason)) { 17.57 - assert(reason == Deoptimization::Reason_none || 17.58 - Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); 17.59 - return true; 17.60 - } 17.61 - return false; 17.62 -} 17.63 - 17.64 //-------------------------------register_control------------------------- 17.65 void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) { 17.66 assert(n->is_CFG(), "must be control node"); 17.67 @@ -147,7 +90,7 @@ 17.68 // This code is also used to clone predicates to clonned loops. 17.69 ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, 17.70 Deoptimization::DeoptReason reason) { 17.71 - assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); 17.72 + assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); 17.73 IfNode* iff = cont_proj->in(0)->as_If(); 17.74 17.75 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); 17.76 @@ -235,7 +178,7 @@ 17.77 ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, 17.78 Deoptimization::DeoptReason reason) { 17.79 assert(new_entry != 0, "only used for clone predicate"); 17.80 - assert(PhaseIdealLoop::is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); 17.81 + assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); 17.82 IfNode* iff = cont_proj->in(0)->as_If(); 17.83 17.84 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); 17.85 @@ -422,7 +365,7 @@ 17.86 ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) { 17.87 if (start_c == NULL || !start_c->is_Proj()) 17.88 return NULL; 17.89 - if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) { 17.90 + if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) { 17.91 return start_c->as_Proj(); 17.92 } 17.93 return NULL; 17.94 @@ -773,7 +716,7 @@ 17.95 ProjNode* proj = if_proj_list.pop()->as_Proj(); 17.96 IfNode* iff = proj->in(0)->as_If(); 17.97 17.98 - if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) { 17.99 + if (!proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 17.100 if (loop->is_loop_exit(iff)) { 17.101 // stop processing the remaining projs in the list because the execution of them 17.102 // depends on the condition of "iff" (iff->in(1)).
18.1 --- a/src/share/vm/opto/loopnode.cpp Mon Oct 21 14:38:11 2013 -0700 18.2 +++ b/src/share/vm/opto/loopnode.cpp Mon Oct 21 17:34:27 2013 -0700 18.3 @@ -167,7 +167,7 @@ 18.4 // expensive nodes will notice the loop and skip over it to try to 18.5 // move the node further up. 18.6 if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) { 18.7 - if (!is_uncommon_trap_if_pattern(ctl->in(1)->as_Proj(), Deoptimization::Reason_none)) { 18.8 + if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 18.9 break; 18.10 } 18.11 next = idom(ctl->in(1)->in(0)); 18.12 @@ -181,7 +181,7 @@ 18.13 } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) { 18.14 next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control(); 18.15 } else if (parent_ctl->is_If()) { 18.16 - if (!is_uncommon_trap_if_pattern(ctl->as_Proj(), Deoptimization::Reason_none)) { 18.17 + if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 18.18 break; 18.19 } 18.20 assert(idom(ctl) == parent_ctl, "strange");
19.1 --- a/src/share/vm/opto/loopnode.hpp Mon Oct 21 14:38:11 2013 -0700 19.2 +++ b/src/share/vm/opto/loopnode.hpp Mon Oct 21 17:34:27 2013 -0700 19.3 @@ -876,13 +876,6 @@ 19.4 // Return true if exp is a scaled induction var plus (or minus) constant 19.5 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); 19.6 19.7 - // Return true if proj is for "proj->[region->..]call_uct" 19.8 - static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason); 19.9 - // Return true for "if(test)-> proj -> ... 19.10 - // | 19.11 - // V 19.12 - // other_proj->[region->..]call_uct" 19.13 - static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason); 19.14 // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted 19.15 ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, 19.16 Deoptimization::DeoptReason reason);
20.1 --- a/src/share/vm/opto/loopopts.cpp Mon Oct 21 14:38:11 2013 -0700 20.2 +++ b/src/share/vm/opto/loopopts.cpp Mon Oct 21 17:34:27 2013 -0700 20.3 @@ -238,7 +238,7 @@ 20.4 ProjNode* dp_proj = dp->as_Proj(); 20.5 ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); 20.6 if (exclude_loop_predicate && 20.7 - is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate)) 20.8 + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) 20.9 return; // Let IGVN transformation change control dependence. 20.10 20.11 IdealLoopTree *old_loop = get_loop(dp);
21.1 --- a/src/share/vm/opto/multnode.cpp Mon Oct 21 14:38:11 2013 -0700 21.2 +++ b/src/share/vm/opto/multnode.cpp Mon Oct 21 17:34:27 2013 -0700 21.3 @@ -24,6 +24,7 @@ 21.4 21.5 #include "precompiled.hpp" 21.6 #include "opto/callnode.hpp" 21.7 +#include "opto/cfgnode.hpp" 21.8 #include "opto/matcher.hpp" 21.9 #include "opto/mathexactnode.hpp" 21.10 #include "opto/multnode.hpp" 21.11 @@ -150,3 +151,59 @@ 21.12 uint ProjNode::ideal_reg() const { 21.13 return bottom_type()->ideal_reg(); 21.14 } 21.15 + 21.16 +//-------------------------------is_uncommon_trap_proj---------------------------- 21.17 +// Return true if proj is the form of "proj->[region->..]call_uct" 21.18 +bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) { 21.19 + int path_limit = 10; 21.20 + Node* out = this; 21.21 + for (int ct = 0; ct < path_limit; ct++) { 21.22 + out = out->unique_ctrl_out(); 21.23 + if (out == NULL) 21.24 + return false; 21.25 + if (out->is_CallStaticJava()) { 21.26 + int req = out->as_CallStaticJava()->uncommon_trap_request(); 21.27 + if (req != 0) { 21.28 + Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); 21.29 + if (trap_reason == reason || reason == Deoptimization::Reason_none) { 21.30 + return true; 21.31 + } 21.32 + } 21.33 + return false; // don't do further after call 21.34 + } 21.35 + if (out->Opcode() != Op_Region) 21.36 + return false; 21.37 + } 21.38 + return false; 21.39 +} 21.40 + 21.41 +//-------------------------------is_uncommon_trap_if_pattern------------------------- 21.42 +// Return true for "if(test)-> proj -> ... 21.43 +// | 21.44 +// V 21.45 +// other_proj->[region->..]call_uct" 21.46 +// 21.47 +// "must_reason_predicate" means the uct reason must be Reason_predicate 21.48 +bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) { 21.49 + Node *in0 = in(0); 21.50 + if (!in0->is_If()) return false; 21.51 + // Variation of a dead If node. 21.52 + if (in0->outcnt() < 2) return false; 21.53 + IfNode* iff = in0->as_If(); 21.54 + 21.55 + // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate 21.56 + if (reason != Deoptimization::Reason_none) { 21.57 + if (iff->in(1)->Opcode() != Op_Conv2B || 21.58 + iff->in(1)->in(1)->Opcode() != Op_Opaque1) { 21.59 + return false; 21.60 + } 21.61 + } 21.62 + 21.63 + ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj(); 21.64 + if (other_proj->is_uncommon_trap_proj(reason)) { 21.65 + assert(reason == Deoptimization::Reason_none || 21.66 + Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); 21.67 + return true; 21.68 + } 21.69 + return false; 21.70 +}
22.1 --- a/src/share/vm/opto/multnode.hpp Mon Oct 21 14:38:11 2013 -0700 22.2 +++ b/src/share/vm/opto/multnode.hpp Mon Oct 21 17:34:27 2013 -0700 22.3 @@ -88,6 +88,14 @@ 22.4 #ifndef PRODUCT 22.5 virtual void dump_spec(outputStream *st) const; 22.6 #endif 22.7 + 22.8 + // Return true if proj is for "proj->[region->..]call_uct" 22.9 + bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason); 22.10 + // Return true for "if(test)-> proj -> ... 22.11 + // | 22.12 + // V 22.13 + // other_proj->[region->..]call_uct" 22.14 + bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason); 22.15 }; 22.16 22.17 #endif // SHARE_VM_OPTO_MULTNODE_HPP
23.1 --- a/src/share/vm/opto/parse.hpp Mon Oct 21 14:38:11 2013 -0700 23.2 +++ b/src/share/vm/opto/parse.hpp Mon Oct 21 17:34:27 2013 -0700 23.3 @@ -349,13 +349,15 @@ 23.4 int _est_switch_depth; // Debugging SwitchRanges. 23.5 #endif 23.6 23.7 + // parser for the caller of the method of this object 23.8 + Parse* const _parent; 23.9 + 23.10 public: 23.11 // Constructor 23.12 - Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); 23.13 + Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent); 23.14 23.15 virtual Parse* is_Parse() const { return (Parse*)this; } 23.16 23.17 - public: 23.18 // Accessors. 23.19 JVMState* caller() const { return _caller; } 23.20 float expected_uses() const { return _expected_uses; } 23.21 @@ -407,6 +409,8 @@ 23.22 return block()->successor_for_bci(bci); 23.23 } 23.24 23.25 + Parse* parent_parser() const { return _parent; } 23.26 + 23.27 private: 23.28 // Create a JVMS & map for the initial state of this method. 23.29 SafePointNode* create_entry_map();
24.1 --- a/src/share/vm/opto/parse1.cpp Mon Oct 21 14:38:11 2013 -0700 24.2 +++ b/src/share/vm/opto/parse1.cpp Mon Oct 21 17:34:27 2013 -0700 24.3 @@ -381,8 +381,8 @@ 24.4 24.5 //------------------------------Parse------------------------------------------ 24.6 // Main parser constructor. 24.7 -Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) 24.8 - : _exits(caller) 24.9 +Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent) 24.10 + : _exits(caller), _parent(parent) 24.11 { 24.12 // Init some variables 24.13 _caller = caller;
25.1 --- a/src/share/vm/opto/reg_split.cpp Mon Oct 21 14:38:11 2013 -0700 25.2 +++ b/src/share/vm/opto/reg_split.cpp Mon Oct 21 17:34:27 2013 -0700 25.3 @@ -51,15 +51,6 @@ 25.4 25.5 static const char out_of_nodes[] = "out of nodes during split"; 25.6 25.7 -static bool contains_no_live_range_input(const Node* def) { 25.8 - for (uint i = 1; i < def->req(); ++i) { 25.9 - if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) { 25.10 - return false; 25.11 - } 25.12 - } 25.13 - return true; 25.14 -} 25.15 - 25.16 //------------------------------get_spillcopy_wide----------------------------- 25.17 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the 25.18 // wide ideal-register spill-mask if possible. If the 'wide-mask' does 25.19 @@ -326,12 +317,11 @@ 25.20 if( def->req() > 1 ) { 25.21 for( uint i = 1; i < def->req(); i++ ) { 25.22 Node *in = def->in(i); 25.23 - // Check for single-def (LRG cannot redefined) 25.24 uint lidx = _lrg_map.live_range_id(in); 25.25 - if (lidx >= _lrg_map.max_lrg_id()) { 25.26 - continue; // Value is a recent spill-copy 25.27 - } 25.28 - if (lrgs(lidx).is_singledef()) { 25.29 + // We do not need this for live ranges that are only defined once. 25.30 + // However, this is not true for spill copies that are added in this 25.31 + // Split() pass, since they might get coalesced later on in this pass. 25.32 + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) { 25.33 continue; 25.34 } 25.35 25.36 @@ -1327,7 +1317,7 @@ 25.37 Node *def = Reaches[pidx][slidx]; 25.38 assert( def, "must have reaching def" ); 25.39 // If input up/down sense and reg-pressure DISagree 25.40 - if (def->rematerialize() && contains_no_live_range_input(def)) { 25.41 + if (def->rematerialize()) { 25.42 // Place the rematerialized node above any MSCs created during 25.43 // phi node splitting. end_idx points at the insertion point 25.44 // so look at the node before it.
26.1 --- a/test/compiler/print/PrintInlining.java Mon Oct 21 14:38:11 2013 -0700 26.2 +++ b/test/compiler/print/PrintInlining.java Mon Oct 21 17:34:27 2013 -0700 26.3 @@ -25,7 +25,7 @@ 26.4 * @test 26.5 * @bug 8022585 26.6 * @summary VM crashes when ran with -XX:+PrintInlining 26.7 - * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining 26.8 + * @run main/othervm -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:+PrintInlining PrintInlining 26.9 * 26.10 */ 26.11
27.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 27.2 +++ b/test/compiler/tiered/CompLevelsTest.java Mon Oct 21 17:34:27 2013 -0700 27.3 @@ -0,0 +1,75 @@ 27.4 +/* 27.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 27.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 27.7 + * 27.8 + * This code is free software; you can redistribute it and/or modify it 27.9 + * under the terms of the GNU General Public License version 2 only, as 27.10 + * published by the Free Software Foundation. 27.11 + * 27.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 27.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 27.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 27.15 + * version 2 for more details (a copy is included in the LICENSE file that 27.16 + * accompanied this code). 27.17 + * 27.18 + * You should have received a copy of the GNU General Public License version 27.19 + * 2 along with this work; if not, write to the Free Software Foundation, 27.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 27.21 + * 27.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 27.23 + * or visit www.oracle.com if you need additional information or have any 27.24 + * questions. 27.25 + */ 27.26 + 27.27 +/** 27.28 + * Abstract class for testing of used compilation levels correctness. 27.29 + * 27.30 + * @author igor.ignatyev@oracle.com 27.31 + */ 27.32 +public abstract class CompLevelsTest extends CompilerWhiteBoxTest { 27.33 + protected CompLevelsTest(TestCase testCase) { 27.34 + super(testCase); 27.35 + // to prevent inlining of #method 27.36 + WHITE_BOX.testSetDontInlineMethod(method, true); 27.37 + } 27.38 + 27.39 + /** 27.40 + * Checks that level is available. 27.41 + * @param compLevel level to check 27.42 + */ 27.43 + protected void testAvailableLevel(int compLevel, int bci) { 27.44 + if (IS_VERBOSE) { 27.45 + System.out.printf("testAvailableLevel(level = %d, bci = %d)%n", 27.46 + compLevel, bci); 27.47 + } 27.48 + WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci); 27.49 + checkCompiled(); 27.50 + checkLevel(compLevel, getCompLevel()); 27.51 + deoptimize(); 27.52 + } 27.53 + 27.54 + /** 27.55 + * Checks that level is unavailable. 27.56 + * @param compLevel level to check 27.57 + */ 27.58 + protected void testUnavailableLevel(int compLevel, int bci) { 27.59 + if (IS_VERBOSE) { 27.60 + System.out.printf("testUnavailableLevel(level = %d, bci = %d)%n", 27.61 + compLevel, bci); 27.62 + } 27.63 + WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci); 27.64 + checkNotCompiled(); 27.65 + } 27.66 + 27.67 + /** 27.68 + * Checks validity of compilation level. 27.69 + * @param expected expected level 27.70 + * @param actual actually level 27.71 + */ 27.72 + protected void checkLevel(int expected, int actual) { 27.73 + if (expected != actual) { 27.74 + throw new RuntimeException("expected[" + expected + "] != actual[" 27.75 + + actual + "]"); 27.76 + } 27.77 + } 27.78 +}
28.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 28.2 +++ b/test/compiler/tiered/NonTieredLevelsTest.java Mon Oct 21 17:34:27 2013 -0700 28.3 @@ -0,0 +1,96 @@ 28.4 +/* 28.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 28.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 28.7 + * 28.8 + * This code is free software; you can redistribute it and/or modify it 28.9 + * under the terms of the GNU General Public License version 2 only, as 28.10 + * published by the Free Software Foundation. 28.11 + * 28.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 28.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 28.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 28.15 + * version 2 for more details (a copy is included in the LICENSE file that 28.16 + * accompanied this code). 28.17 + * 28.18 + * You should have received a copy of the GNU General Public License version 28.19 + * 2 along with this work; if not, write to the Free Software Foundation, 28.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 28.21 + * 28.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 28.23 + * or visit www.oracle.com if you need additional information or have any 28.24 + * questions. 28.25 + */ 28.26 + 28.27 +import java.util.function.IntPredicate; 28.28 + 28.29 +/** 28.30 + * @test NonTieredLevelsTest 28.31 + * @library /testlibrary /testlibrary/whitebox /compiler/whitebox 28.32 + * @build NonTieredLevelsTest 28.33 + * @run main ClassFileInstaller sun.hotspot.WhiteBox 28.34 + * @run main/othervm -Xbootclasspath/a:. -XX:-TieredCompilation 28.35 + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI 28.36 + * -XX:CompileCommand=compileonly,TestCase$Helper::* 28.37 + * NonTieredLevelsTest 28.38 + * @summary Verify that only one level can be used 28.39 + * @author igor.ignatyev@oracle.com 28.40 + */ 28.41 +public class NonTieredLevelsTest extends CompLevelsTest { 28.42 + private static final int AVAILABLE_COMP_LEVEL; 28.43 + private static final IntPredicate IS_AVAILABLE_COMPLEVEL; 28.44 + static { 28.45 + String vmName = System.getProperty("java.vm.name"); 28.46 + if (vmName.endsWith(" Server VM")) { 28.47 + AVAILABLE_COMP_LEVEL = COMP_LEVEL_FULL_OPTIMIZATION; 28.48 + IS_AVAILABLE_COMPLEVEL = x -> x == COMP_LEVEL_FULL_OPTIMIZATION; 28.49 + } else if (vmName.endsWith(" Client VM") 28.50 + || vmName.endsWith(" Minimal VM")) { 28.51 + AVAILABLE_COMP_LEVEL = COMP_LEVEL_SIMPLE; 28.52 + IS_AVAILABLE_COMPLEVEL = x -> x >= COMP_LEVEL_SIMPLE 28.53 + && x <= COMP_LEVEL_FULL_PROFILE; 28.54 + } else { 28.55 + throw new RuntimeException("Unknown VM: " + vmName); 28.56 + } 28.57 + 28.58 + } 28.59 + public static void main(String[] args) throws Exception { 28.60 + if (TIERED_COMPILATION) { 28.61 + System.err.println("Test isn't applicable w/ enabled " 28.62 + + "TieredCompilation. Skip test."); 28.63 + return; 28.64 + } 28.65 + for (TestCase test : TestCase.values()) { 28.66 + new NonTieredLevelsTest(test).runTest(); 28.67 + } 28.68 + } 28.69 + 28.70 + private NonTieredLevelsTest(TestCase testCase) { 28.71 + super(testCase); 28.72 + // to prevent inlining of #method 28.73 + WHITE_BOX.testSetDontInlineMethod(method, true); 28.74 + } 28.75 + 28.76 + @Override 28.77 + protected void test() throws Exception { 28.78 + checkNotCompiled(); 28.79 + compile(); 28.80 + checkCompiled(); 28.81 + 28.82 + int compLevel = getCompLevel(); 28.83 + checkLevel(AVAILABLE_COMP_LEVEL, compLevel); 28.84 + int bci = WHITE_BOX.getMethodEntryBci(method); 28.85 + deoptimize(); 28.86 + if (!testCase.isOsr) { 28.87 + for (int level = 1; level <= COMP_LEVEL_MAX; ++level) { 28.88 + if (IS_AVAILABLE_COMPLEVEL.test(level)) { 28.89 + testAvailableLevel(level, bci); 28.90 + } else { 28.91 + testUnavailableLevel(level, bci); 28.92 + } 28.93 + } 28.94 + } else { 28.95 + System.out.println("skip other levels testing in OSR"); 28.96 + testAvailableLevel(AVAILABLE_COMP_LEVEL, bci); 28.97 + } 28.98 + } 28.99 +}
29.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 29.2 +++ b/test/compiler/tiered/TieredLevelsTest.java Mon Oct 21 17:34:27 2013 -0700 29.3 @@ -0,0 +1,88 @@ 29.4 +/* 29.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 29.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.7 + * 29.8 + * This code is free software; you can redistribute it and/or modify it 29.9 + * under the terms of the GNU General Public License version 2 only, as 29.10 + * published by the Free Software Foundation. 29.11 + * 29.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 29.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 29.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 29.15 + * version 2 for more details (a copy is included in the LICENSE file that 29.16 + * accompanied this code). 29.17 + * 29.18 + * You should have received a copy of the GNU General Public License version 29.19 + * 2 along with this work; if not, write to the Free Software Foundation, 29.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 29.21 + * 29.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 29.23 + * or visit www.oracle.com if you need additional information or have any 29.24 + * questions. 29.25 + */ 29.26 + 29.27 +/** 29.28 + * @test TieredLevelsTest 29.29 + * @library /testlibrary /testlibrary/whitebox /compiler/whitebox 29.30 + * @build TieredLevelsTest 29.31 + * @run main ClassFileInstaller sun.hotspot.WhiteBox 29.32 + * @run main/othervm -Xbootclasspath/a:. -XX:+TieredCompilation 29.33 + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI 29.34 + * -XX:CompileCommand=compileonly,TestCase$Helper::* 29.35 + * TieredLevelsTest 29.36 + * @summary Verify that all levels < 'TieredStopAtLevel' can be used 29.37 + * @author igor.ignatyev@oracle.com 29.38 + */ 29.39 +public class TieredLevelsTest extends CompLevelsTest { 29.40 + public static void main(String[] args) throws Exception { 29.41 + if (!TIERED_COMPILATION) { 29.42 + System.err.println("Test isn't applicable w/ disabled " 29.43 + + "TieredCompilation. Skip test."); 29.44 + return; 29.45 + } 29.46 + for (TestCase test : TestCase.values()) { 29.47 + new TieredLevelsTest(test).runTest(); 29.48 + } 29.49 + } 29.50 + 29.51 + private TieredLevelsTest(TestCase testCase) { 29.52 + super(testCase); 29.53 + // to prevent inlining of #method 29.54 + WHITE_BOX.testSetDontInlineMethod(method, true); 29.55 + } 29.56 + 29.57 + @Override 29.58 + protected void test() throws Exception { 29.59 + checkNotCompiled(); 29.60 + compile(); 29.61 + checkCompiled(); 29.62 + 29.63 + int compLevel = getCompLevel(); 29.64 + if (compLevel > TIERED_STOP_AT_LEVEL) { 29.65 + throw new RuntimeException("method.compLevel[" + compLevel 29.66 + + "] > TieredStopAtLevel [" + TIERED_STOP_AT_LEVEL + "]"); 29.67 + } 29.68 + int bci = WHITE_BOX.getMethodEntryBci(method); 29.69 + deoptimize(); 29.70 + 29.71 + for (int testedTier = 1; testedTier <= TIERED_STOP_AT_LEVEL; 29.72 + ++testedTier) { 29.73 + testAvailableLevel(testedTier, bci); 29.74 + } 29.75 + for (int testedTier = TIERED_STOP_AT_LEVEL + 1; 29.76 + testedTier <= COMP_LEVEL_MAX; ++testedTier) { 29.77 + testUnavailableLevel(testedTier, bci); 29.78 + } 29.79 + } 29.80 + 29.81 + 29.82 + @Override 29.83 + protected void checkLevel(int expected, int actual) { 29.84 + if (expected == COMP_LEVEL_FULL_PROFILE 29.85 + && actual == COMP_LEVEL_LIMITED_PROFILE) { 29.86 + // for simple method full_profile may be replaced by limited_profile 29.87 + return; 29.88 + } 29.89 + super.checkLevel(expected, actual); 29.90 + } 29.91 +}
30.1 --- a/test/compiler/whitebox/CompilerWhiteBoxTest.java Mon Oct 21 14:38:11 2013 -0700 30.2 +++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java Mon Oct 21 17:34:27 2013 -0700 30.3 @@ -80,8 +80,7 @@ 30.4 30.5 static { 30.6 if (TIERED_COMPILATION) { 30.7 - THRESHOLD = 150000; 30.8 - BACKEDGE_THRESHOLD = 0xFFFFFFFFL; 30.9 + BACKEDGE_THRESHOLD = THRESHOLD = 150000; 30.10 } else { 30.11 THRESHOLD = COMPILE_THRESHOLD; 30.12 BACKEDGE_THRESHOLD = COMPILE_THRESHOLD * Long.parseLong(getVMOption( 30.13 @@ -364,7 +363,7 @@ 30.14 /** OSR constructor test case */ 30.15 OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR, 30.16 Helper.OSR_CONSTRUCTOR_CALLABLE, true), 30.17 - /** OSR method test case */ 30.18 + /** OSR method test case */ 30.19 OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true), 30.20 /** OSR static method test case */ 30.21 OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true); 30.22 @@ -373,7 +372,7 @@ 30.23 final Executable executable; 30.24 /** object to invoke {@linkplain #executable} */ 30.25 final Callable<Integer> callable; 30.26 - /** flag for OSR test case */ 30.27 + /** flag for OSR test case */ 30.28 final boolean isOsr; 30.29 30.30 private TestCase(Executable executable, Callable<Integer> callable,