Thu, 03 Aug 2017 08:07:17 -0700
Merge
1.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Tue Jul 25 07:47:13 2017 -0700 1.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Aug 03 08:07:17 2017 -0700 1.3 @@ -98,7 +98,7 @@ 1.4 } 1.5 pop(rax); 1.6 #endif 1.7 - reset_last_Java_frame(thread, true, align_stack); 1.8 + reset_last_Java_frame(thread, true); 1.9 1.10 // discard thread and arguments 1.11 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 1.12 @@ -882,7 +882,7 @@ 1.13 } 1.14 __ pop(rax); 1.15 #endif 1.16 - __ reset_last_Java_frame(thread, true, false); 1.17 + __ reset_last_Java_frame(thread, true); 1.18 #ifndef _LP64 1.19 __ pop(rcx); // discard thread arg 1.20 __ pop(rcx); // discard dummy
2.1 --- a/src/cpu/x86/vm/frame_x86.cpp Tue Jul 25 07:47:13 2017 -0700 2.2 +++ b/src/cpu/x86/vm/frame_x86.cpp Thu Aug 03 08:07:17 2017 -0700 2.3 @@ -370,13 +370,16 @@ 2.4 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); 2.5 assert(!entry_frame_is_first(), "next Java fp must be non zero"); 2.6 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); 2.7 + // Since we are walking the stack now this nested anchor is obviously walkable 2.8 + // even if it wasn't when it was stacked. 2.9 + if (!jfa->walkable()) { 2.10 + // Capture _last_Java_pc (if needed) and mark anchor walkable. 2.11 + jfa->capture_last_Java_pc(); 2.12 + } 2.13 map->clear(); 2.14 assert(map->include_argument_oops(), "should be set by clear"); 2.15 - if (jfa->last_Java_pc() != NULL ) { 2.16 - frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); 2.17 - return fr; 2.18 - } 2.19 - frame fr(jfa->last_Java_sp(), jfa->last_Java_fp()); 2.20 + assert(jfa->last_Java_pc() != NULL, "not walkable"); 2.21 + frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); 2.22 return fr; 2.23 } 2.24 2.25 @@ -714,3 +717,21 @@ 2.26 init((intptr_t*)sp, (intptr_t*)fp, (address)pc); 2.27 } 2.28 #endif 2.29 + 2.30 +void JavaFrameAnchor::make_walkable(JavaThread* thread) { 2.31 + // last frame set? 2.32 + if (last_Java_sp() == NULL) return; 2.33 + // already walkable? 2.34 + if (walkable()) return; 2.35 + assert(Thread::current() == (Thread*)thread, "not current thread"); 2.36 + assert(last_Java_sp() != NULL, "not called from Java code?"); 2.37 + assert(last_Java_pc() == NULL, "already walkable"); 2.38 + capture_last_Java_pc(); 2.39 + assert(walkable(), "something went wrong"); 2.40 +} 2.41 + 2.42 +void JavaFrameAnchor::capture_last_Java_pc() { 2.43 + assert(_last_Java_sp != NULL, "no last frame set"); 2.44 + assert(_last_Java_pc == NULL, "already walkable"); 2.45 + _last_Java_pc = (address)_last_Java_sp[-1]; 2.46 +}
3.1 --- a/src/cpu/x86/vm/frame_x86.inline.hpp Tue Jul 25 07:47:13 2017 -0700 3.2 +++ b/src/cpu/x86/vm/frame_x86.inline.hpp Thu Aug 03 08:07:17 2017 -0700 3.3 @@ -96,6 +96,7 @@ 3.4 // call a specialized frame constructor instead of this one. 3.5 // Then we could use the assert below. However this assert is of somewhat dubious 3.6 // value. 3.7 + // UPDATE: this constructor is only used by trace_method_handle_stub() now. 3.8 // assert(_pc != NULL, "no pc?"); 3.9 3.10 _cb = CodeCache::find_blob(_pc);
4.1 --- a/src/cpu/x86/vm/javaFrameAnchor_x86.hpp Tue Jul 25 07:47:13 2017 -0700 4.2 +++ b/src/cpu/x86/vm/javaFrameAnchor_x86.hpp Thu Aug 03 08:07:17 2017 -0700 4.3 @@ -62,10 +62,9 @@ 4.4 _last_Java_sp = src->_last_Java_sp; 4.5 } 4.6 4.7 - // Always walkable 4.8 - bool walkable(void) { return true; } 4.9 - // Never any thing to do since we are always walkable and can find address of return addresses 4.10 - void make_walkable(JavaThread* thread) { } 4.11 + bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } 4.12 + void make_walkable(JavaThread* thread); 4.13 + void capture_last_Java_pc(void); 4.14 4.15 intptr_t* last_Java_sp(void) const { return _last_Java_sp; } 4.16
5.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Tue Jul 25 07:47:13 2017 -0700 5.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Aug 03 08:07:17 2017 -0700 5.3 @@ -748,8 +748,7 @@ 5.4 } 5.5 } 5.6 5.7 -void MacroAssembler::reset_last_Java_frame(bool clear_fp, 5.8 - bool clear_pc) { 5.9 +void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 5.10 // we must set sp to zero to clear frame 5.11 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 5.12 // must clear fp, so that compiled frames are not confused; it is 5.13 @@ -758,9 +757,8 @@ 5.14 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 5.15 } 5.16 5.17 - if (clear_pc) { 5.18 - movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 5.19 - } 5.20 + // Always clear the pc because it could have been set by make_walkable() 5.21 + movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 5.22 } 5.23 5.24 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 5.25 @@ -2561,7 +2559,7 @@ 5.26 } 5.27 // reset last Java frame 5.28 // Only interpreter should have to clear fp 5.29 - reset_last_Java_frame(java_thread, true, false); 5.30 + reset_last_Java_frame(java_thread, true); 5.31 5.32 #ifndef CC_INTERP 5.33 // C++ interp handles this in the interpreter 5.34 @@ -3808,7 +3806,7 @@ 5.35 pusha(); 5.36 } 5.37 5.38 -void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { 5.39 +void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { 5.40 // determine java_thread register 5.41 if (!java_thread->is_valid()) { 5.42 java_thread = rdi; 5.43 @@ -3820,8 +3818,8 @@ 5.44 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 5.45 } 5.46 5.47 - if (clear_pc) 5.48 - movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 5.49 + // Always clear the pc because it could have been set by make_walkable() 5.50 + movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 5.51 5.52 } 5.53
6.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp Tue Jul 25 07:47:13 2017 -0700 6.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Thu Aug 03 08:07:17 2017 -0700 6.3 @@ -289,10 +289,10 @@ 6.4 Register last_java_fp, 6.5 address last_java_pc); 6.6 6.7 - void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); 6.8 + void reset_last_Java_frame(Register thread, bool clear_fp); 6.9 6.10 // thread in the default location (r15_thread on 64bit) 6.11 - void reset_last_Java_frame(bool clear_fp, bool clear_pc); 6.12 + void reset_last_Java_frame(bool clear_fp); 6.13 6.14 // Stores 6.15 void store_check(Register obj); // store check for obj - register is destroyed afterwards
7.1 --- a/src/cpu/x86/vm/runtime_x86_32.cpp Tue Jul 25 07:47:13 2017 -0700 7.2 +++ b/src/cpu/x86/vm/runtime_x86_32.cpp Thu Aug 03 08:07:17 2017 -0700 7.3 @@ -116,7 +116,7 @@ 7.4 // No registers to map, rbp is known implicitly 7.5 oop_maps->add_gc_map( __ pc() - start, new OopMap( framesize, 0 )); 7.6 __ get_thread(rcx); 7.7 - __ reset_last_Java_frame(rcx, false, false); 7.8 + __ reset_last_Java_frame(rcx, false); 7.9 7.10 // Restore callee-saved registers 7.11 __ movptr(rbp, Address(rsp, rbp_off * wordSize));
8.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Tue Jul 25 07:47:13 2017 -0700 8.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Thu Aug 03 08:07:17 2017 -0700 8.3 @@ -1333,7 +1333,7 @@ 8.4 __ increment(rsp, wordSize); 8.5 8.6 __ get_thread(thread); 8.7 - __ reset_last_Java_frame(thread, false, true); 8.8 + __ reset_last_Java_frame(thread, false); 8.9 8.10 save_or_restore_arguments(masm, stack_slots, total_in_args, 8.11 arg_save_area, NULL, in_regs, in_sig_bt); 8.12 @@ -2251,7 +2251,7 @@ 8.13 8.14 // We can finally stop using that last_Java_frame we setup ages ago 8.15 8.16 - __ reset_last_Java_frame(thread, false, true); 8.17 + __ reset_last_Java_frame(thread, false); 8.18 8.19 // Unpack oop result 8.20 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 8.21 @@ -2951,7 +2951,7 @@ 8.22 __ pop(rcx); 8.23 8.24 __ get_thread(rcx); 8.25 - __ reset_last_Java_frame(rcx, false, false); 8.26 + __ reset_last_Java_frame(rcx, false); 8.27 8.28 // Load UnrollBlock into EDI 8.29 __ mov(rdi, rax); 8.30 @@ -3117,7 +3117,7 @@ 8.31 __ push(rax); 8.32 8.33 __ get_thread(rcx); 8.34 - __ reset_last_Java_frame(rcx, false, false); 8.35 + __ reset_last_Java_frame(rcx, false); 8.36 8.37 // Collect return values 8.38 __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize)); 8.39 @@ -3219,7 +3219,7 @@ 8.40 8.41 __ get_thread(rcx); 8.42 8.43 - __ reset_last_Java_frame(rcx, false, false); 8.44 + __ reset_last_Java_frame(rcx, false); 8.45 8.46 // Load UnrollBlock into EDI 8.47 __ movptr(rdi, rax); 8.48 @@ -3331,7 +3331,7 @@ 8.49 oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) ); 8.50 8.51 __ get_thread(rdi); 8.52 - __ reset_last_Java_frame(rdi, true, false); 8.53 + __ reset_last_Java_frame(rdi, true); 8.54 8.55 // Pop self-frame. 8.56 __ leave(); // Epilog! 8.57 @@ -3426,7 +3426,7 @@ 8.58 8.59 // Clear last_Java_sp again 8.60 __ get_thread(java_thread); 8.61 - __ reset_last_Java_frame(java_thread, false, false); 8.62 + __ reset_last_Java_frame(java_thread, false); 8.63 8.64 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 8.65 __ jcc(Assembler::equal, noException); 8.66 @@ -3501,7 +3501,7 @@ 8.67 __ addptr(rsp, wordSize); 8.68 8.69 // clear last_Java_sp 8.70 - __ reset_last_Java_frame(thread, true, false); 8.71 + __ reset_last_Java_frame(thread, true); 8.72 // check for pending exceptions 8.73 Label pending; 8.74 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
9.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Tue Jul 25 07:47:13 2017 -0700 9.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Aug 03 08:07:17 2017 -0700 9.3 @@ -1388,7 +1388,7 @@ 9.4 __ mov(rsp, r12); // restore sp 9.5 __ reinit_heapbase(); 9.6 9.7 - __ reset_last_Java_frame(false, true); 9.8 + __ reset_last_Java_frame(false); 9.9 9.10 save_or_restore_arguments(masm, stack_slots, total_in_args, 9.11 arg_save_area, NULL, in_regs, in_sig_bt); 9.12 @@ -2497,7 +2497,7 @@ 9.13 restore_native_result(masm, ret_type, stack_slots); 9.14 } 9.15 9.16 - __ reset_last_Java_frame(false, true); 9.17 + __ reset_last_Java_frame(false); 9.18 9.19 // Unpack oop result 9.20 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 9.21 @@ -3435,7 +3435,7 @@ 9.22 // find any register it might need. 9.23 oop_maps->add_gc_map(__ pc() - start, map); 9.24 9.25 - __ reset_last_Java_frame(false, false); 9.26 + __ reset_last_Java_frame(false); 9.27 9.28 // Load UnrollBlock* into rdi 9.29 __ mov(rdi, rax); 9.30 @@ -3592,7 +3592,7 @@ 9.31 new OopMap( frame_size_in_words, 0 )); 9.32 9.33 // Clear fp AND pc 9.34 - __ reset_last_Java_frame(true, true); 9.35 + __ reset_last_Java_frame(true); 9.36 9.37 // Collect return values 9.38 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes())); 9.39 @@ -3662,7 +3662,7 @@ 9.40 9.41 oop_maps->add_gc_map(__ pc() - start, map); 9.42 9.43 - __ reset_last_Java_frame(false, false); 9.44 + __ reset_last_Java_frame(false); 9.45 9.46 // Load UnrollBlock* into rdi 9.47 __ mov(rdi, rax); 9.48 @@ -3775,7 +3775,7 @@ 9.49 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 9.50 9.51 // Clear fp AND pc 9.52 - __ reset_last_Java_frame(true, true); 9.53 + __ reset_last_Java_frame(true); 9.54 9.55 // Pop self-frame. 9.56 __ leave(); // Epilog 9.57 @@ -3858,7 +3858,7 @@ 9.58 9.59 Label noException; 9.60 9.61 - __ reset_last_Java_frame(false, false); 9.62 + __ reset_last_Java_frame(false); 9.63 9.64 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 9.65 __ jcc(Assembler::equal, noException); 9.66 @@ -3928,7 +3928,7 @@ 9.67 // rax contains the address we are going to jump to assuming no exception got installed 9.68 9.69 // clear last_Java_sp 9.70 - __ reset_last_Java_frame(false, false); 9.71 + __ reset_last_Java_frame(false); 9.72 // check for pending exceptions 9.73 Label pending; 9.74 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 9.75 @@ -4309,7 +4309,7 @@ 9.76 9.77 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 9.78 9.79 - __ reset_last_Java_frame(false, true); 9.80 + __ reset_last_Java_frame(false); 9.81 9.82 // Restore callee-saved registers 9.83
10.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Jul 25 07:47:13 2017 -0700 10.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Aug 03 08:07:17 2017 -0700 10.3 @@ -2901,7 +2901,7 @@ 10.4 // however can use the register value directly if it is callee saved. 10.5 __ get_thread(java_thread); 10.6 10.7 - __ reset_last_Java_frame(java_thread, true, false); 10.8 + __ reset_last_Java_frame(java_thread, true); 10.9 10.10 __ leave(); // required for proper stackwalking of RuntimeStub frame 10.11
11.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Jul 25 07:47:13 2017 -0700 11.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Aug 03 08:07:17 2017 -0700 11.3 @@ -3923,7 +3923,7 @@ 11.4 11.5 oop_maps->add_gc_map(the_pc - start, map); 11.6 11.7 - __ reset_last_Java_frame(true, true); 11.8 + __ reset_last_Java_frame(true); 11.9 11.10 __ leave(); // required for proper stackwalking of RuntimeStub frame 11.11
12.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Tue Jul 25 07:47:13 2017 -0700 12.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Aug 03 08:07:17 2017 -0700 12.3 @@ -1289,7 +1289,7 @@ 12.4 // change thread state 12.5 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 12.6 12.7 - __ reset_last_Java_frame(thread, true, true); 12.8 + __ reset_last_Java_frame(thread, true); 12.9 12.10 // reset handle block 12.11 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 12.12 @@ -1819,7 +1819,7 @@ 12.13 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 12.14 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 12.15 __ get_thread(thread); 12.16 - __ reset_last_Java_frame(thread, true, true); 12.17 + __ reset_last_Java_frame(thread, true); 12.18 // Restore the last_sp and null it out 12.19 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 12.20 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
13.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Jul 25 07:47:13 2017 -0700 13.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Aug 03 08:07:17 2017 -0700 13.3 @@ -1262,7 +1262,7 @@ 13.4 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); 13.5 13.6 // reset_last_Java_frame 13.7 - __ reset_last_Java_frame(true, true); 13.8 + __ reset_last_Java_frame(r15_thread, true); 13.9 13.10 // reset handle block 13.11 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); 13.12 @@ -1837,7 +1837,7 @@ 13.13 // PC must point into interpreter here 13.14 __ set_last_Java_frame(noreg, rbp, __ pc()); 13.15 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 13.16 - __ reset_last_Java_frame(true, true); 13.17 + __ reset_last_Java_frame(r15_thread, true); 13.18 // Restore the last_sp and null it out 13.19 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 13.20 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
14.1 --- a/src/os/linux/vm/os_linux.cpp Tue Jul 25 07:47:13 2017 -0700 14.2 +++ b/src/os/linux/vm/os_linux.cpp Thu Aug 03 08:07:17 2017 -0700 14.3 @@ -2819,11 +2819,8 @@ 14.4 extern "C" JNIEXPORT void numa_error(char *where) { } 14.5 extern "C" JNIEXPORT int fork1() { return fork(); } 14.6 14.7 - 14.8 -// If we are running with libnuma version > 2, then we should 14.9 -// be trying to use symbols with versions 1.1 14.10 -// If we are running with earlier version, which did not have symbol versions, 14.11 -// we should use the base version. 14.12 +// Handle request to load libnuma symbol version 1.1 (API v1). If it fails 14.13 +// load symbol from base version instead. 14.14 void* os::Linux::libnuma_dlsym(void* handle, const char *name) { 14.15 void *f = dlvsym(handle, name, "libnuma_1.1"); 14.16 if (f == NULL) { 14.17 @@ -2832,6 +2829,12 @@ 14.18 return f; 14.19 } 14.20 14.21 +// Handle request to load libnuma symbol version 1.2 (API v2) only. 14.22 +// Return NULL if the symbol is not defined in this particular version. 14.23 +void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) { 14.24 + return dlvsym(handle, name, "libnuma_1.2"); 14.25 +} 14.26 + 14.27 bool os::Linux::libnuma_init() { 14.28 // sched_getcpu() should be in libc. 14.29 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, 14.30 @@ -2856,6 +2859,8 @@ 14.31 libnuma_dlsym(handle, "numa_tonode_memory"))); 14.32 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, 14.33 libnuma_dlsym(handle, "numa_interleave_memory"))); 14.34 + set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t, 14.35 + libnuma_v2_dlsym(handle, "numa_interleave_memory"))); 14.36 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t, 14.37 libnuma_dlsym(handle, "numa_set_bind_policy"))); 14.38 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t, 14.39 @@ -2975,6 +2980,7 @@ 14.40 os::Linux::numa_available_func_t os::Linux::_numa_available; 14.41 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; 14.42 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; 14.43 +os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2; 14.44 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy; 14.45 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset; 14.46 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
15.1 --- a/src/os/linux/vm/os_linux.hpp Tue Jul 25 07:47:13 2017 -0700 15.2 +++ b/src/os/linux/vm/os_linux.hpp Thu Aug 03 08:07:17 2017 -0700 15.3 @@ -190,6 +190,8 @@ 15.4 static void libpthread_init(); 15.5 static bool libnuma_init(); 15.6 static void* libnuma_dlsym(void* handle, const char* name); 15.7 + // libnuma v2 (libnuma_1.2) symbols 15.8 + static void* libnuma_v2_dlsym(void* handle, const char* name); 15.9 // Minimum stack size a thread can be created with (allowing 15.10 // the VM to completely create the thread and enter user code) 15.11 static size_t min_stack_allowed; 15.12 @@ -250,6 +252,8 @@ 15.13 typedef int (*numa_available_func_t)(void); 15.14 typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); 15.15 typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); 15.16 + typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask); 15.17 + 15.18 typedef void (*numa_set_bind_policy_func_t)(int policy); 15.19 typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n); 15.20 typedef int (*numa_distance_func_t)(int node1, int node2); 15.21 @@ -261,6 +265,7 @@ 15.22 static numa_available_func_t _numa_available; 15.23 static numa_tonode_memory_func_t _numa_tonode_memory; 15.24 static numa_interleave_memory_func_t _numa_interleave_memory; 15.25 + static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2; 15.26 static numa_set_bind_policy_func_t _numa_set_bind_policy; 15.27 static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset; 15.28 static numa_distance_func_t _numa_distance; 15.29 @@ -275,6 +280,7 @@ 15.30 static void set_numa_available(numa_available_func_t func) { _numa_available = func; } 15.31 static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } 15.32 static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } 15.33 + static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; } 15.34 static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; } 15.35 static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; } 15.36 static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; } 15.37 @@ -296,7 +302,10 @@ 15.38 return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1; 15.39 } 15.40 static void numa_interleave_memory(void *start, size_t size) { 15.41 - if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) { 15.42 + // Use v2 api if available 15.43 + if (_numa_interleave_memory_v2 != NULL && _numa_all_nodes_ptr != NULL) { 15.44 + _numa_interleave_memory_v2(start, size, _numa_all_nodes_ptr); 15.45 + } else if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) { 15.46 _numa_interleave_memory(start, size, _numa_all_nodes); 15.47 } 15.48 }
16.1 --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Tue Jul 25 07:47:13 2017 -0700 16.2 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Thu Aug 03 08:07:17 2017 -0700 16.3 @@ -44,7 +44,7 @@ 16.4 16.5 // If we have a last_Java_frame, then we should use it even if 16.6 // isInJava == true. It should be more reliable than ucontext info. 16.7 - if (jt->has_last_Java_frame()) { 16.8 + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { 16.9 *fr_addr = jt->pd_last_frame(); 16.10 return true; 16.11 }
17.1 --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Tue Jul 25 07:47:13 2017 -0700 17.2 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Thu Aug 03 08:07:17 2017 -0700 17.3 @@ -32,12 +32,8 @@ 17.4 17.5 frame pd_last_frame() { 17.6 assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); 17.7 - if (_anchor.last_Java_pc() != NULL) { 17.8 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 17.9 - } else { 17.10 - // This will pick up pc from sp 17.11 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); 17.12 - } 17.13 + assert(_anchor.last_Java_pc() != NULL, "not walkable"); 17.14 + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 17.15 } 17.16 17.17 public:
18.1 --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Tue Jul 25 07:47:13 2017 -0700 18.2 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Thu Aug 03 08:07:17 2017 -0700 18.3 @@ -45,7 +45,7 @@ 18.4 18.5 // If we have a last_Java_frame, then we should use it even if 18.6 // isInJava == true. It should be more reliable than ucontext info. 18.7 - if (jt->has_last_Java_frame()) { 18.8 + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { 18.9 *fr_addr = jt->pd_last_frame(); 18.10 return true; 18.11 }
19.1 --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Tue Jul 25 07:47:13 2017 -0700 19.2 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Thu Aug 03 08:07:17 2017 -0700 19.3 @@ -32,12 +32,8 @@ 19.4 19.5 frame pd_last_frame() { 19.6 assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); 19.7 - if (_anchor.last_Java_pc() != NULL) { 19.8 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 19.9 - } else { 19.10 - // This will pick up pc from sp 19.11 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); 19.12 - } 19.13 + assert(_anchor.last_Java_pc() != NULL, "not walkable"); 19.14 + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 19.15 } 19.16 19.17 public:
20.1 --- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Tue Jul 25 07:47:13 2017 -0700 20.2 +++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Thu Aug 03 08:07:17 2017 -0700 20.3 @@ -442,7 +442,7 @@ 20.4 // is available to us as well 20.5 Sysinfo cpu_info(SI_CPUBRAND); 20.6 bool use_solaris_12_api = cpu_info.valid(); 20.7 - const char* impl; 20.8 + const char* impl = "unknown"; 20.9 int impl_m = 0; 20.10 if (use_solaris_12_api) { 20.11 impl = cpu_info.value(); 20.12 @@ -477,7 +477,7 @@ 20.13 kstat_close(kc); 20.14 } 20.15 } 20.16 - assert(impl_m != 0, err_msg("Unknown CPU implementation %s", impl)); 20.17 + assert(impl_m != 0, err_msg("Unrecognized CPU implementation %s", impl)); 20.18 features |= impl_m; 20.19 20.20 bool is_sun4v = (features & sun4v_m) != 0;
21.1 --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Tue Jul 25 07:47:13 2017 -0700 21.2 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Thu Aug 03 08:07:17 2017 -0700 21.3 @@ -44,9 +44,8 @@ 21.4 assert(this->is_Java_thread(), "must be JavaThread"); 21.5 JavaThread* jt = (JavaThread *)this; 21.6 21.7 - // last_Java_frame is always walkable and safe use it if we have it 21.8 - 21.9 - if (jt->has_last_Java_frame()) { 21.10 + // There is small window where last_Java_frame is not walkable or safe 21.11 + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { 21.12 *fr_addr = jt->pd_last_frame(); 21.13 return true; 21.14 }
22.1 --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Tue Jul 25 07:47:13 2017 -0700 22.2 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Thu Aug 03 08:07:17 2017 -0700 22.3 @@ -30,12 +30,8 @@ 22.4 22.5 frame pd_last_frame() { 22.6 assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); 22.7 - if (_anchor.last_Java_pc() != NULL) { 22.8 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 22.9 - } else { 22.10 - // This will pick up pc from sp 22.11 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); 22.12 - } 22.13 + assert(_anchor.last_Java_pc() != NULL, "not walkable"); 22.14 + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 22.15 } 22.16 22.17 public:
23.1 --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Tue Jul 25 07:47:13 2017 -0700 23.2 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Thu Aug 03 08:07:17 2017 -0700 23.3 @@ -47,7 +47,7 @@ 23.4 23.5 // If we have a last_Java_frame, then we should use it even if 23.6 // isInJava == true. It should be more reliable than CONTEXT info. 23.7 - if (jt->has_last_Java_frame()) { 23.8 + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { 23.9 *fr_addr = jt->pd_last_frame(); 23.10 return true; 23.11 }
24.1 --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Tue Jul 25 07:47:13 2017 -0700 24.2 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Thu Aug 03 08:07:17 2017 -0700 24.3 @@ -32,12 +32,8 @@ 24.4 24.5 frame pd_last_frame() { 24.6 assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); 24.7 - if (_anchor.last_Java_pc() != NULL) { 24.8 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 24.9 - } else { 24.10 - // This will pick up pc from sp 24.11 - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); 24.12 - } 24.13 + assert(_anchor.last_Java_pc() != NULL, "not walkable"); 24.14 + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); 24.15 } 24.16 24.17 public:
25.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Tue Jul 25 07:47:13 2017 -0700 25.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Aug 03 08:07:17 2017 -0700 25.3 @@ -1530,7 +1530,7 @@ 25.4 ciMethod* caller = state()->scope()->method(); 25.5 ciMethodData* md = caller->method_data_or_null(); 25.6 ciProfileData* data = md->bci_to_data(invoke_bci); 25.7 - if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { 25.8 + if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 25.9 bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); 25.10 // May not be true in case of an inlined call through a method handle intrinsic. 25.11 if (has_return) { 25.12 @@ -1747,7 +1747,7 @@ 25.13 start = has_receiver ? 1 : 0; 25.14 if (profile_arguments()) { 25.15 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 25.16 - if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { 25.17 + if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 25.18 n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); 25.19 } 25.20 } 25.21 @@ -4465,7 +4465,7 @@ 25.22 } 25.23 ciMethodData* md = m->method_data_or_null(); 25.24 ciProfileData* data = md->bci_to_data(invoke_bci); 25.25 - if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { 25.26 + if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 25.27 append(new ProfileReturnType(m , invoke_bci, callee, ret)); 25.28 } 25.29 }
26.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp Tue Jul 25 07:47:13 2017 -0700 26.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Thu Aug 03 08:07:17 2017 -0700 26.3 @@ -3185,50 +3185,52 @@ 26.4 int bci = x->bci_of_invoke(); 26.5 ciMethodData* md = x->method()->method_data_or_null(); 26.6 ciProfileData* data = md->bci_to_data(bci); 26.7 - if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) || 26.8 - (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) { 26.9 - ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset(); 26.10 - int base_offset = md->byte_offset_of_slot(data, extra); 26.11 - LIR_Opr mdp = LIR_OprFact::illegalOpr; 26.12 - ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args(); 26.13 - 26.14 - Bytecodes::Code bc = x->method()->java_code_at_bci(bci); 26.15 - int start = 0; 26.16 - int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); 26.17 - if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { 26.18 - // first argument is not profiled at call (method handle invoke) 26.19 - assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); 26.20 - start = 1; 26.21 + if (data != NULL) { 26.22 + if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) || 26.23 + (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) { 26.24 + ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset(); 26.25 + int base_offset = md->byte_offset_of_slot(data, extra); 26.26 + LIR_Opr mdp = LIR_OprFact::illegalOpr; 26.27 + ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args(); 26.28 + 26.29 + Bytecodes::Code bc = x->method()->java_code_at_bci(bci); 26.30 + int start = 0; 26.31 + int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); 26.32 + if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { 26.33 + // first argument is not profiled at call (method handle invoke) 26.34 + assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); 26.35 + start = 1; 26.36 + } 26.37 + ciSignature* callee_signature = x->callee()->signature(); 26.38 + // method handle call to virtual method 26.39 + bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); 26.40 + ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); 26.41 + 26.42 + bool ignored_will_link; 26.43 + ciSignature* signature_at_call = NULL; 26.44 + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 26.45 + ciSignatureStream signature_at_call_stream(signature_at_call); 26.46 + 26.47 + // if called through method handle invoke, some arguments may have been popped 26.48 + for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) { 26.49 + int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset()); 26.50 + ciKlass* exact = profile_type(md, base_offset, off, 26.51 + args->type(i), x->profiled_arg_at(i+start), mdp, 26.52 + !x->arg_needs_null_check(i+start), 26.53 + signature_at_call_stream.next_klass(), callee_signature_stream.next_klass()); 26.54 + if (exact != NULL) { 26.55 + md->set_argument_type(bci, i, exact); 26.56 + } 26.57 + } 26.58 + } else { 26.59 +#ifdef ASSERT 26.60 + Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke()); 26.61 + int n = x->nb_profiled_args(); 26.62 + assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() || 26.63 + (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))), 26.64 + "only at JSR292 bytecodes"); 26.65 +#endif 26.66 } 26.67 - ciSignature* callee_signature = x->callee()->signature(); 26.68 - // method handle call to virtual method 26.69 - bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); 26.70 - ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); 26.71 - 26.72 - bool ignored_will_link; 26.73 - ciSignature* signature_at_call = NULL; 26.74 - x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 26.75 - ciSignatureStream signature_at_call_stream(signature_at_call); 26.76 - 26.77 - // if called through method handle invoke, some arguments may have been popped 26.78 - for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) { 26.79 - int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset()); 26.80 - ciKlass* exact = profile_type(md, base_offset, off, 26.81 - args->type(i), x->profiled_arg_at(i+start), mdp, 26.82 - !x->arg_needs_null_check(i+start), 26.83 - signature_at_call_stream.next_klass(), callee_signature_stream.next_klass()); 26.84 - if (exact != NULL) { 26.85 - md->set_argument_type(bci, i, exact); 26.86 - } 26.87 - } 26.88 - } else { 26.89 -#ifdef ASSERT 26.90 - Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke()); 26.91 - int n = x->nb_profiled_args(); 26.92 - assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() || 26.93 - (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))), 26.94 - "only at JSR292 bytecodes"); 26.95 -#endif 26.96 } 26.97 } 26.98 } 26.99 @@ -3319,24 +3321,26 @@ 26.100 int bci = x->bci_of_invoke(); 26.101 ciMethodData* md = x->method()->method_data_or_null(); 26.102 ciProfileData* data = md->bci_to_data(bci); 26.103 - assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type"); 26.104 - ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret(); 26.105 - LIR_Opr mdp = LIR_OprFact::illegalOpr; 26.106 - 26.107 - bool ignored_will_link; 26.108 - ciSignature* signature_at_call = NULL; 26.109 - x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 26.110 - 26.111 - // The offset within the MDO of the entry to update may be too large 26.112 - // to be used in load/store instructions on some platforms. So have 26.113 - // profile_type() compute the address of the profile in a register. 26.114 - ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0, 26.115 - ret->type(), x->ret(), mdp, 26.116 - !x->needs_null_check(), 26.117 - signature_at_call->return_type()->as_klass(), 26.118 - x->callee()->signature()->return_type()->as_klass()); 26.119 - if (exact != NULL) { 26.120 - md->set_return_type(bci, exact); 26.121 + if (data != NULL) { 26.122 + assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type"); 26.123 + ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret(); 26.124 + LIR_Opr mdp = LIR_OprFact::illegalOpr; 26.125 + 26.126 + bool ignored_will_link; 26.127 + ciSignature* signature_at_call = NULL; 26.128 + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 26.129 + 26.130 + // The offset within the MDO of the entry to update may be too large 26.131 + // to be used in load/store instructions on some platforms. So have 26.132 + // profile_type() compute the address of the profile in a register. 26.133 + ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0, 26.134 + ret->type(), x->ret(), mdp, 26.135 + !x->needs_null_check(), 26.136 + signature_at_call->return_type()->as_klass(), 26.137 + x->callee()->signature()->return_type()->as_klass()); 26.138 + if (exact != NULL) { 26.139 + md->set_return_type(bci, exact); 26.140 + } 26.141 } 26.142 } 26.143
27.1 --- a/src/share/vm/ci/ciMethodData.cpp Tue Jul 25 07:47:13 2017 -0700 27.2 +++ b/src/share/vm/ci/ciMethodData.cpp Thu Aug 03 08:07:17 2017 -0700 27.3 @@ -391,11 +391,13 @@ 27.4 MethodData* mdo = get_MethodData(); 27.5 if (mdo != NULL) { 27.6 ProfileData* data = mdo->bci_to_data(bci); 27.7 - if (data->is_CallTypeData()) { 27.8 - data->as_CallTypeData()->set_argument_type(i, k->get_Klass()); 27.9 - } else { 27.10 - assert(data->is_VirtualCallTypeData(), "no arguments!"); 27.11 - data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass()); 27.12 + if (data != NULL) { 27.13 + if (data->is_CallTypeData()) { 27.14 + data->as_CallTypeData()->set_argument_type(i, k->get_Klass()); 27.15 + } else { 27.16 + assert(data->is_VirtualCallTypeData(), "no arguments!"); 27.17 + data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass()); 27.18 + } 27.19 } 27.20 } 27.21 } 27.22 @@ -413,11 +415,13 @@ 27.23 MethodData* mdo = get_MethodData(); 27.24 if (mdo != NULL) { 27.25 ProfileData* data = mdo->bci_to_data(bci); 27.26 - if (data->is_CallTypeData()) { 27.27 - data->as_CallTypeData()->set_return_type(k->get_Klass()); 27.28 - } else { 27.29 - assert(data->is_VirtualCallTypeData(), "no arguments!"); 27.30 - data->as_VirtualCallTypeData()->set_return_type(k->get_Klass()); 27.31 + if (data != NULL) { 27.32 + if (data->is_CallTypeData()) { 27.33 + data->as_CallTypeData()->set_return_type(k->get_Klass()); 27.34 + } else { 27.35 + assert(data->is_VirtualCallTypeData(), "no arguments!"); 27.36 + data->as_VirtualCallTypeData()->set_return_type(k->get_Klass()); 27.37 + } 27.38 } 27.39 } 27.40 }
28.1 --- a/src/share/vm/opto/memnode.cpp Tue Jul 25 07:47:13 2017 -0700 28.2 +++ b/src/share/vm/opto/memnode.cpp Thu Aug 03 08:07:17 2017 -0700 28.3 @@ -1,5 +1,5 @@ 28.4 /* 28.5 - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 28.6 + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 28.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 28.8 * 28.9 * This code is free software; you can redistribute it and/or modify it 28.10 @@ -55,6 +55,15 @@ 28.11 return calculate_adr_type(adr->bottom_type(), cross_check); 28.12 } 28.13 28.14 +bool MemNode::check_if_adr_maybe_raw(Node* adr) { 28.15 + if (adr != NULL) { 28.16 + if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) { 28.17 + return true; 28.18 + } 28.19 + } 28.20 + return false; 28.21 +} 28.22 + 28.23 #ifndef PRODUCT 28.24 void MemNode::dump_spec(outputStream *st) const { 28.25 if (in(Address) == NULL) return; // node is dead 28.26 @@ -503,6 +512,7 @@ 28.27 if (offset == Type::OffsetBot) 28.28 return NULL; // cannot unalias unless there are precise offsets 28.29 28.30 + const bool adr_maybe_raw = check_if_adr_maybe_raw(adr); 28.31 const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); 28.32 28.33 intptr_t size_in_bytes = memory_size(); 28.34 @@ -519,6 +529,13 @@ 28.35 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 28.36 if (st_base == NULL) 28.37 break; // inscrutable pointer 28.38 + 28.39 + // For raw accesses it's not enough to prove that constant offsets don't intersect. 28.40 + // We need the bases to be the equal in order for the offset check to make sense. 28.41 + if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) { 28.42 + break; 28.43 + } 28.44 + 28.45 if (st_offset != offset && st_offset != Type::OffsetBot) { 28.46 const int MAX_STORE = BytesPerLong; 28.47 if (st_offset >= offset + size_in_bytes ||
29.1 --- a/src/share/vm/opto/memnode.hpp Tue Jul 25 07:47:13 2017 -0700 29.2 +++ b/src/share/vm/opto/memnode.hpp Thu Aug 03 08:07:17 2017 -0700 29.3 @@ -1,5 +1,5 @@ 29.4 /* 29.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 29.6 + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 29.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.8 * 29.9 * This code is free software; you can redistribute it and/or modify it 29.10 @@ -75,6 +75,8 @@ 29.11 debug_only(_adr_type=at; adr_type();) 29.12 } 29.13 29.14 + static bool check_if_adr_maybe_raw(Node* adr); 29.15 + 29.16 public: 29.17 // Helpers for the optimizer. Documented in memnode.cpp. 29.18 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
30.1 --- a/src/share/vm/opto/parse2.cpp Tue Jul 25 07:47:13 2017 -0700 30.2 +++ b/src/share/vm/opto/parse2.cpp Thu Aug 03 08:07:17 2017 -0700 30.3 @@ -812,6 +812,9 @@ 30.4 ciMethodData* methodData = method()->method_data(); 30.5 if (!methodData->is_mature()) return PROB_UNKNOWN; 30.6 ciProfileData* data = methodData->bci_to_data(bci()); 30.7 + if (data == NULL) { 30.8 + return PROB_UNKNOWN; 30.9 + } 30.10 if (!data->is_JumpData()) return PROB_UNKNOWN; 30.11 30.12 // get taken and not taken values 30.13 @@ -903,8 +906,8 @@ 30.14 // of the OSR-ed method, and we want to deopt to gather more stats. 30.15 // If you have ANY counts, then this loop is simply 'cold' relative 30.16 // to the OSR loop. 30.17 - if (data->as_BranchData()->taken() + 30.18 - data->as_BranchData()->not_taken() == 0 ) { 30.19 + if (data == NULL || 30.20 + (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 30.21 // This is the only way to return PROB_UNKNOWN: 30.22 return PROB_UNKNOWN; 30.23 }
31.1 --- a/src/share/vm/services/lowMemoryDetector.cpp Tue Jul 25 07:47:13 2017 -0700 31.2 +++ b/src/share/vm/services/lowMemoryDetector.cpp Thu Aug 03 08:07:17 2017 -0700 31.3 @@ -1,5 +1,5 @@ 31.4 /* 31.5 - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. 31.6 + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 31.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 31.8 * 31.9 * This code is free software; you can redistribute it and/or modify it 31.10 @@ -298,19 +298,41 @@ 31.11 Klass* k = Management::sun_management_Sensor_klass(CHECK); 31.12 instanceKlassHandle sensorKlass (THREAD, k); 31.13 Handle sensor_h(THREAD, _sensor_obj); 31.14 - Handle usage_h = MemoryService::create_MemoryUsage_obj(_usage, CHECK); 31.15 + 31.16 + Symbol* trigger_method_signature; 31.17 31.18 JavaValue result(T_VOID); 31.19 JavaCallArguments args(sensor_h); 31.20 args.push_int((int) count); 31.21 - args.push_oop(usage_h); 31.22 + 31.23 + Handle usage_h = MemoryService::create_MemoryUsage_obj(_usage, THREAD); 31.24 + // Call Sensor::trigger(int, MemoryUsage) to send notification to listeners. 31.25 + // When OOME occurs and fails to allocate MemoryUsage object, call 31.26 + // Sensor::trigger(int) instead. The pending request will be processed 31.27 + // but no notification will be sent. 31.28 + if (HAS_PENDING_EXCEPTION) { 31.29 + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOME here"); 31.30 + CLEAR_PENDING_EXCEPTION; 31.31 + trigger_method_signature = vmSymbols::int_void_signature(); 31.32 + } else { 31.33 + trigger_method_signature = vmSymbols::trigger_method_signature(); 31.34 + args.push_oop(usage_h); 31.35 + } 31.36 31.37 JavaCalls::call_virtual(&result, 31.38 sensorKlass, 31.39 vmSymbols::trigger_name(), 31.40 - vmSymbols::trigger_method_signature(), 31.41 + trigger_method_signature, 31.42 &args, 31.43 - CHECK); 31.44 + THREAD); 31.45 + 31.46 + if (HAS_PENDING_EXCEPTION) { 31.47 + // We just clear the OOM pending exception that we might have encountered 31.48 + // in Java's tiggerAction(), and continue with updating the counters since 31.49 + // the Java counters have been updated too. 31.50 + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOME here"); 31.51 + CLEAR_PENDING_EXCEPTION; 31.52 + } 31.53 } 31.54 31.55 {
32.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 32.2 +++ b/test/compiler/unsafe/TestRawAliasing.java Thu Aug 03 08:07:17 2017 -0700 32.3 @@ -0,0 +1,70 @@ 32.4 +/* 32.5 + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 32.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 32.7 + * 32.8 + * This code is free software; you can redistribute it and/or modify it 32.9 + * under the terms of the GNU General Public License version 2 only, as 32.10 + * published by the Free Software Foundation. 32.11 + * 32.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 32.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 32.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 32.15 + * version 2 for more details (a copy is included in the LICENSE file that 32.16 + * accompanied this code). 32.17 + * 32.18 + * You should have received a copy of the GNU General Public License version 32.19 + * 2 along with this work; if not, write to the Free Software Foundation, 32.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 32.21 + * 32.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 32.23 + * or visit www.oracle.com if you need additional information or have any 32.24 + * questions. 32.25 + */ 32.26 + 32.27 +/* 32.28 + * @test 32.29 + * @bug 8178047 32.30 + * @run main/othervm -XX:CompileCommand=exclude,*.main -XX:-TieredCompilation -XX:-BackgroundCompilation compiler.unsafe.TestRawAliasing 32.31 + */ 32.32 + 32.33 +package compiler.unsafe; 32.34 + 32.35 +import java.lang.reflect.Field; 32.36 + 32.37 +public class TestRawAliasing { 32.38 + static private final sun.misc.Unsafe UNSAFE; 32.39 + static { 32.40 + try { 32.41 + Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe"); 32.42 + f.setAccessible(true); 32.43 + UNSAFE = (sun.misc.Unsafe) f.get(null); 32.44 + } catch (Exception e) { 32.45 + throw new RuntimeException("Unable to get Unsafe instance.", e); 32.46 + } 32.47 + } 32.48 + 32.49 + static private final int OFFSET_X = 50; 32.50 + static private final int OFFSET_Y = 100; 32.51 + 32.52 + private static int test(long base_plus_offset_x, long base_plus_offset_y, int magic_value) { 32.53 + // write 0 to a location 32.54 + UNSAFE.putByte(base_plus_offset_x - OFFSET_X, (byte)0); 32.55 + // write unfoldable value to really the same location with another base 32.56 + UNSAFE.putByte(base_plus_offset_y - OFFSET_Y, (byte)magic_value); 32.57 + // read the value back, should be equal to "unfoldable_value" 32.58 + return UNSAFE.getByte(base_plus_offset_x - OFFSET_X); 32.59 + } 32.60 + 32.61 + private static final int OFF_HEAP_AREA_SIZE = 128; 32.62 + private static final byte MAGIC = 123; 32.63 + 32.64 + // main is excluded from compilation since we don't want the test method to inline and make base values fold 32.65 + public static void main(String... args) { 32.66 + long base = UNSAFE.allocateMemory(OFF_HEAP_AREA_SIZE); 32.67 + for (int i = 0; i < 100_000; i++) { 32.68 + if (test(base + OFFSET_X, base + OFFSET_Y, MAGIC) != MAGIC) { 32.69 + throw new RuntimeException("Unexpected magic value"); 32.70 + } 32.71 + } 32.72 + } 32.73 +}