# HG changeset patch # User asaha # Date 1500913960 25200 # Node ID 1e225dabccc538455e02ea2e853aca90a8f4bcdc # Parent bfa3989eb8d3e927c1507d7aea8f3f60d9fbdac6# Parent 7245ee7fa44a8855305f04015f5b7cc581e41cb4 Merge diff -r bfa3989eb8d3 -r 1e225dabccc5 .hgtags --- a/.hgtags Fri Jul 21 20:31:41 2017 -0700 +++ b/.hgtags Mon Jul 24 09:32:40 2017 -0700 @@ -931,6 +931,8 @@ b28d012a24cab8f4ceeee0c9d3252969757423ed jdk8u112-b15 e134dc1879b72124e478be01680b0646a2fbf585 jdk8u112-b16 87440ed4e1de7753a436f957d35555d8b4e26f1d jdk8u112-b31 +ba25f5833a128b8062e597f794efda26b30f095d jdk8u112-b32 +919ffdca10c2721ee0f6f233e704709174556510 jdk8u112-b33 3b0e5f01891f5ebbf67797b1aae786196f1bb4f6 jdk8u121-b00 251a2493b1857f2ff4f11eab2dfd8b2fe8ed441b jdk8u121-b01 70c4a50f576a01ec975d0a02b3642ee33db39ed8 jdk8u121-b02 @@ -945,6 +947,12 @@ 11f91811e4d7e5ddfaf938dcf386ec8fe5bf7b7c jdk8u121-b11 b132b08b28bf23a26329928cf6b4ffda5857f4d3 jdk8u121-b12 90f94521c3515e5f27af0ab9b31d036e88bb322a jdk8u121-b13 +351bf1d4ff9a41137f91e2ec97ec59ed29a38d8b jdk8u121-b31 +41daac438a2ac5a80755dc3de88b76e4ac66750a jdk8u121-b32 +eb9e617d6f64d4ad689feac0707b5e4335b00ce2 jdk8u121-b33 +c60b0994e8eee152666252c3ba4105db65c004db jdk8u121-b34 +0612a789929b88612509668bea4b3138613e91e4 jdk8u121-b35 +0ea269e49511a890e6fabfd468638dd1c0ed0be3 jdk8u121-b36 c0a1ba0df20fda10ddb8599e888eb56ad98b3874 jdk8u131-b00 0b85ccd6240991e1a501602ff5addec6b88ae0af jdk8u131-b01 ef90c721a4e59b01ca36f25619010a1afe9ed4d5 jdk8u131-b02 @@ -957,6 +965,10 @@ 56e71d16083904ceddfdd1d66312582a42781646 jdk8u131-b09 1da23ae49386608550596502d90a381ee6c1dfaa jdk8u131-b10 829ea9b92cda9545652f1b309f56c57383024ebb jdk8u131-b11 +41e0713bcca27cef5d6a9afd44c7ca4811937713 jdk8u131-b31 +e318654a4fa352a06935dd56eebf88ae387b31f9 jdk8u131-b32 +32998fc932dc58c6bbac185cc17d2752fa6dba4c jdk8u131-b33 +50b3fa6791f46bc582528bdc7f6311b3b6832c51 jdk8u131-b34 692bc6b674dcab72453de08ee9da0856a7e41c0f jdk8u141-b00 0cee0db0180b64655751e7058c251103f9660f85 jdk8u141-b01 82435799636c8b50a090aebcb5af49946afa7bb5 jdk8u141-b02 @@ -973,5 +985,7 @@ df6af363337eff5b22ae7940b0981231fdf5dfb4 jdk8u141-b13 3a1543e089c32592be9c201c6e021295fbf5fdc1 jdk8u141-b14 23f1790147d838ddb1133cc79dc08e7c9ba5ab44 jdk8u141-b15 +9ffa0d7ed932045a0b4ceb095fb52444eed39c1b jdk8u141-b31 +ae8cae699f62b845703c891e0e7633e2089a3ec4 jdk8u141-b32 eea89df81a8e414813d921eeeeef9b6795f56698 jdk8u144-b00 db36f4d498b1bde975700a800b5ce732941c04b7 jdk8u144-b01 diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/sparc/vm/macroAssembler_sparc.cpp --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -4261,6 +4261,7 @@ assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); Register end = count; int cache_line_size = VM_Version::prefetch_data_size(); + assert(cache_line_size > 0, "cache line size should be known for this code"); // Minimum count when BIS zeroing can be used since // it needs membar which is expensive. int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/sparc/vm/vm_version_sparc.cpp --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -74,7 +74,7 @@ AllocatePrefetchDistance = AllocatePrefetchStepSize; } - if (AllocatePrefetchStyle == 3 && !has_blk_init()) { + if (AllocatePrefetchStyle == 3 && (!has_blk_init() || cache_line_size <= 0)) { warning("BIS instructions are not available on this CPU"); FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1); } @@ -138,7 +138,7 @@ FLAG_SET_DEFAULT(InteriorEntryAlignment, 4); } if (is_niagara_plus()) { - if (has_blk_init() && UseTLAB && + if (has_blk_init() && (cache_line_size > 0) && UseTLAB && FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { // Use BIS instruction for TLAB allocation prefetch. FLAG_SET_ERGO(intx, AllocatePrefetchInstr, 1); @@ -236,7 +236,7 @@ assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size"); char buf[512]; - jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")), (has_hardware_popc() ? ", popc" : ""), (has_vis1() ? ", vis1" : ""), @@ -249,6 +249,7 @@ (has_sha256() ? ", sha256" : ""), (has_sha512() ? ", sha512" : ""), (is_ultra3() ? ", ultra3" : ""), + (has_sparc5_instr() ? ", sparc5" : ""), (is_sun4v() ? ", sun4v" : ""), (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")), (is_sparc64() ? ", sparc64" : ""), @@ -364,6 +365,7 @@ #ifndef PRODUCT if (PrintMiscellaneous && Verbose) { + tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size()); tty->print("Allocation"); if (AllocatePrefetchStyle <= 0) { @@ -447,9 +449,10 @@ unsigned int VM_Version::calc_parallel_worker_threads() { unsigned int result; - if (is_M_series()) { - // for now, use same gc thread calculation for M-series as for niagara-plus - // in future, we may want to tweak parameters for nof_parallel_worker_thread + if (is_M_series() || is_S_series()) { + // for now, use same gc thread calculation for M-series and S-series as for + // niagara-plus. In future, we may want to tweak parameters for + // nof_parallel_worker_thread result = nof_parallel_worker_threads(5, 16, 8); } else if (is_niagara_plus()) { result = nof_parallel_worker_threads(5, 16, 8); @@ -458,3 +461,37 @@ } return result; } + + +int VM_Version::parse_features(const char* implementation) { + int features = unknown_m; + // Convert to UPPER case before compare. + char* impl = os::strdup(implementation); + + for (int i = 0; impl[i] != 0; i++) + impl[i] = (char)toupper((uint)impl[i]); + + if (strstr(impl, "SPARC64") != NULL) { + features |= sparc64_family_m; + } else if (strstr(impl, "SPARC-M") != NULL) { + // M-series SPARC is based on T-series. + features |= (M_family_m | T_family_m); + } else if (strstr(impl, "SPARC-S") != NULL) { + // S-series SPARC is based on T-series. + features |= (S_family_m | T_family_m); + } else if (strstr(impl, "SPARC-T") != NULL) { + features |= T_family_m; + if (strstr(impl, "SPARC-T1") != NULL) { + features |= T1_model_m; + } + } else if (strstr(impl, "SUN4V-CPU") != NULL) { + // Generic or migration class LDOM + features |= T_family_m; + } else { +#ifndef PRODUCT + warning("Failed to parse CPU implementation = '%s'", impl); +#endif + } + os::free((void*)impl); + return features; +} diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/sparc/vm/vm_version_sparc.hpp --- a/src/cpu/sparc/vm/vm_version_sparc.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -47,13 +47,14 @@ cbcond_instructions = 13, sparc64_family = 14, M_family = 15, - T_family = 16, - T1_model = 17, - sparc5_instructions = 18, - aes_instructions = 19, - sha1_instruction = 20, - sha256_instruction = 21, - sha512_instruction = 22 + S_family = 16, + T_family = 17, + T1_model = 18, + sparc5_instructions = 19, + aes_instructions = 20, + sha1_instruction = 21, + sha256_instruction = 22, + sha512_instruction = 23 }; enum Feature_Flag_Set { @@ -76,6 +77,7 @@ cbcond_instructions_m = 1 << cbcond_instructions, sparc64_family_m = 1 << sparc64_family, M_family_m = 1 << M_family, + S_family_m = 1 << S_family, T_family_m = 1 << T_family, T1_model_m = 1 << T1_model, sparc5_instructions_m = 1 << sparc5_instructions, @@ -105,6 +107,7 @@ // Returns true if the platform is in the niagara line (T series) static bool is_M_family(int features) { return (features & M_family_m) != 0; } + static bool is_S_family(int features) { return (features & S_family_m) != 0; } static bool is_T_family(int features) { return (features & T_family_m) != 0; } static bool is_niagara() { return is_T_family(_features); } #ifdef ASSERT @@ -119,7 +122,7 @@ static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); } static int maximum_niagara1_processor_count() { return 32; } - + static int parse_features(const char* implementation); public: // Initialization static void initialize(); @@ -152,6 +155,7 @@ static bool is_niagara_plus() { return is_T_family(_features) && !is_T1_model(_features); } static bool is_M_series() { return is_M_family(_features); } + static bool is_S_series() { return is_S_family(_features); } static bool is_T4() { return is_T_family(_features) && has_cbcond(); } static bool is_T7() { return is_T_family(_features) && has_sparc5_instr(); } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/c1_Runtime1_x86.cpp --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -98,7 +98,7 @@ } pop(rax); #endif - reset_last_Java_frame(thread, true, align_stack); + reset_last_Java_frame(thread, true); // discard thread and arguments NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); @@ -882,7 +882,7 @@ } __ pop(rax); #endif - __ reset_last_Java_frame(thread, true, false); + __ reset_last_Java_frame(thread, true); #ifndef _LP64 __ pop(rcx); // discard thread arg __ pop(rcx); // discard dummy diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/frame_x86.cpp --- a/src/cpu/x86/vm/frame_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/frame_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -370,13 +370,16 @@ JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); assert(!entry_frame_is_first(), "next Java fp must be non zero"); assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); + // Since we are walking the stack now this nested anchor is obviously walkable + // even if it wasn't when it was stacked. + if (!jfa->walkable()) { + // Capture _last_Java_pc (if needed) and mark anchor walkable. + jfa->capture_last_Java_pc(); + } map->clear(); assert(map->include_argument_oops(), "should be set by clear"); - if (jfa->last_Java_pc() != NULL ) { - frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); - return fr; - } - frame fr(jfa->last_Java_sp(), jfa->last_Java_fp()); + assert(jfa->last_Java_pc() != NULL, "not walkable"); + frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); return fr; } @@ -714,3 +717,21 @@ init((intptr_t*)sp, (intptr_t*)fp, (address)pc); } #endif + +void JavaFrameAnchor::make_walkable(JavaThread* thread) { + // last frame set? + if (last_Java_sp() == NULL) return; + // already walkable? + if (walkable()) return; + assert(Thread::current() == (Thread*)thread, "not current thread"); + assert(last_Java_sp() != NULL, "not called from Java code?"); + assert(last_Java_pc() == NULL, "already walkable"); + capture_last_Java_pc(); + assert(walkable(), "something went wrong"); +} + +void JavaFrameAnchor::capture_last_Java_pc() { + assert(_last_Java_sp != NULL, "no last frame set"); + assert(_last_Java_pc == NULL, "already walkable"); + _last_Java_pc = (address)_last_Java_sp[-1]; +} diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/frame_x86.inline.hpp --- a/src/cpu/x86/vm/frame_x86.inline.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/frame_x86.inline.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -96,6 +96,7 @@ // call a specialized frame constructor instead of this one. // Then we could use the assert below. However this assert is of somewhat dubious // value. + // UPDATE: this constructor is only used by trace_method_handle_stub() now. // assert(_pc != NULL, "no pc?"); _cb = CodeCache::find_blob(_pc); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/javaFrameAnchor_x86.hpp --- a/src/cpu/x86/vm/javaFrameAnchor_x86.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/javaFrameAnchor_x86.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -62,10 +62,9 @@ _last_Java_sp = src->_last_Java_sp; } - // Always walkable - bool walkable(void) { return true; } - // Never any thing to do since we are always walkable and can find address of return addresses - void make_walkable(JavaThread* thread) { } + bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } + void make_walkable(JavaThread* thread); + void capture_last_Java_pc(void); intptr_t* last_Java_sp(void) const { return _last_Java_sp; } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/macroAssembler_x86.cpp --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -748,8 +748,7 @@ } } -void MacroAssembler::reset_last_Java_frame(bool clear_fp, - bool clear_pc) { +void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // we must set sp to zero to clear frame movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); // must clear fp, so that compiled frames are not confused; it is @@ -758,9 +757,8 @@ movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); } - if (clear_pc) { - movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); - } + // Always clear the pc because it could have been set by make_walkable() + movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); } void MacroAssembler::set_last_Java_frame(Register last_java_sp, @@ -2561,7 +2559,7 @@ } // reset last Java frame // Only interpreter should have to clear fp - reset_last_Java_frame(java_thread, true, false); + reset_last_Java_frame(java_thread, true); #ifndef CC_INTERP // C++ interp handles this in the interpreter @@ -3808,7 +3806,7 @@ pusha(); } -void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { +void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register if (!java_thread->is_valid()) { java_thread = rdi; @@ -3820,8 +3818,8 @@ movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); } - if (clear_pc) - movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); + // Always clear the pc because it could have been set by make_walkable() + movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/macroAssembler_x86.hpp --- a/src/cpu/x86/vm/macroAssembler_x86.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -289,10 +289,10 @@ Register last_java_fp, address last_java_pc); - void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); + void reset_last_Java_frame(Register thread, bool clear_fp); // thread in the default location (r15_thread on 64bit) - void reset_last_Java_frame(bool clear_fp, bool clear_pc); + void reset_last_Java_frame(bool clear_fp); // Stores void store_check(Register obj); // store check for obj - register is destroyed afterwards diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/runtime_x86_32.cpp --- a/src/cpu/x86/vm/runtime_x86_32.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/runtime_x86_32.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -116,7 +116,7 @@ // No registers to map, rbp is known implicitly oop_maps->add_gc_map( __ pc() - start, new OopMap( framesize, 0 )); __ get_thread(rcx); - __ reset_last_Java_frame(rcx, false, false); + __ reset_last_Java_frame(rcx, false); // Restore callee-saved registers __ movptr(rbp, Address(rsp, rbp_off * wordSize)); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/sharedRuntime_x86_32.cpp --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1333,7 +1333,7 @@ __ increment(rsp, wordSize); __ get_thread(thread); - __ reset_last_Java_frame(thread, false, true); + __ reset_last_Java_frame(thread, false); save_or_restore_arguments(masm, stack_slots, total_in_args, arg_save_area, NULL, in_regs, in_sig_bt); @@ -2251,7 +2251,7 @@ // We can finally stop using that last_Java_frame we setup ages ago - __ reset_last_Java_frame(thread, false, true); + __ reset_last_Java_frame(thread, false); // Unpack oop result if (ret_type == T_OBJECT || ret_type == T_ARRAY) { @@ -2951,7 +2951,7 @@ __ pop(rcx); __ get_thread(rcx); - __ reset_last_Java_frame(rcx, false, false); + __ reset_last_Java_frame(rcx, false); // Load UnrollBlock into EDI __ mov(rdi, rax); @@ -3117,7 +3117,7 @@ __ push(rax); __ get_thread(rcx); - __ reset_last_Java_frame(rcx, false, false); + __ reset_last_Java_frame(rcx, false); // Collect return values __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize)); @@ -3219,7 +3219,7 @@ __ get_thread(rcx); - __ reset_last_Java_frame(rcx, false, false); + __ reset_last_Java_frame(rcx, false); // Load UnrollBlock into EDI __ movptr(rdi, rax); @@ -3331,7 +3331,7 @@ oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) ); __ get_thread(rdi); - __ reset_last_Java_frame(rdi, true, false); + __ reset_last_Java_frame(rdi, true); // Pop self-frame. __ leave(); // Epilog! @@ -3426,7 +3426,7 @@ // Clear last_Java_sp again __ get_thread(java_thread); - __ reset_last_Java_frame(java_thread, false, false); + __ reset_last_Java_frame(java_thread, false); __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, noException); @@ -3501,7 +3501,7 @@ __ addptr(rsp, wordSize); // clear last_Java_sp - __ reset_last_Java_frame(thread, true, false); + __ reset_last_Java_frame(thread, true); // check for pending exceptions Label pending; __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/sharedRuntime_x86_64.cpp --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1388,7 +1388,7 @@ __ mov(rsp, r12); // restore sp __ reinit_heapbase(); - __ reset_last_Java_frame(false, true); + __ reset_last_Java_frame(false); save_or_restore_arguments(masm, stack_slots, total_in_args, arg_save_area, NULL, in_regs, in_sig_bt); @@ -2497,7 +2497,7 @@ restore_native_result(masm, ret_type, stack_slots); } - __ reset_last_Java_frame(false, true); + __ reset_last_Java_frame(false); // Unpack oop result if (ret_type == T_OBJECT || ret_type == T_ARRAY) { @@ -3435,7 +3435,7 @@ // find any register it might need. oop_maps->add_gc_map(__ pc() - start, map); - __ reset_last_Java_frame(false, false); + __ reset_last_Java_frame(false); // Load UnrollBlock* into rdi __ mov(rdi, rax); @@ -3592,7 +3592,7 @@ new OopMap( frame_size_in_words, 0 )); // Clear fp AND pc - __ reset_last_Java_frame(true, true); + __ reset_last_Java_frame(true); // Collect return values __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes())); @@ -3662,7 +3662,7 @@ oop_maps->add_gc_map(__ pc() - start, map); - __ reset_last_Java_frame(false, false); + __ reset_last_Java_frame(false); // Load UnrollBlock* into rdi __ mov(rdi, rax); @@ -3775,7 +3775,7 @@ oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); // Clear fp AND pc - __ reset_last_Java_frame(true, true); + __ reset_last_Java_frame(true); // Pop self-frame. __ leave(); // Epilog @@ -3858,7 +3858,7 @@ Label noException; - __ reset_last_Java_frame(false, false); + __ reset_last_Java_frame(false); __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, noException); @@ -3928,7 +3928,7 @@ // rax contains the address we are going to jump to assuming no exception got installed // clear last_Java_sp - __ reset_last_Java_frame(false, false); + __ reset_last_Java_frame(false); // check for pending exceptions Label pending; __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); @@ -4309,7 +4309,7 @@ oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); - __ reset_last_Java_frame(false, true); + __ reset_last_Java_frame(false); // Restore callee-saved registers diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/stubGenerator_x86_32.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -2901,7 +2901,7 @@ // however can use the register value directly if it is callee saved. __ get_thread(java_thread); - __ reset_last_Java_frame(java_thread, true, false); + __ reset_last_Java_frame(java_thread, true); __ leave(); // required for proper stackwalking of RuntimeStub frame diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/stubGenerator_x86_64.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -3923,7 +3923,7 @@ oop_maps->add_gc_map(the_pc - start, map); - __ reset_last_Java_frame(true, true); + __ reset_last_Java_frame(true); __ leave(); // required for proper stackwalking of RuntimeStub frame diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/templateInterpreter_x86_32.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1289,7 +1289,7 @@ // change thread state __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); - __ reset_last_Java_frame(thread, true, true); + __ reset_last_Java_frame(thread, true); // reset handle block __ movptr(t, Address(thread, JavaThread::active_handles_offset())); @@ -1819,7 +1819,7 @@ __ set_last_Java_frame(thread, noreg, rbp, __ pc()); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); __ get_thread(thread); - __ reset_last_Java_frame(thread, true, true); + __ reset_last_Java_frame(thread, true); // Restore the last_sp and null it out __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/templateInterpreter_x86_64.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1262,7 +1262,7 @@ __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); // reset_last_Java_frame - __ reset_last_Java_frame(true, true); + __ reset_last_Java_frame(r15_thread, true); // reset handle block __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); @@ -1837,7 +1837,7 @@ // PC must point into interpreter here __ set_last_Java_frame(noreg, rbp, __ pc()); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); - __ reset_last_Java_frame(true, true); + __ reset_last_Java_frame(r15_thread, true); // Restore the last_sp and null it out __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/vm_version_x86.cpp --- a/src/cpu/x86/vm/vm_version_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -406,6 +406,8 @@ _stepping = 0; _cpuFeatures = 0; _logical_processors_per_package = 1; + // i486 internal cache is both I&D and has a 16-byte line size + _L1_data_cache_line_size = 16; if (!Use486InstrsOnly) { // Get raw processor info @@ -424,6 +426,7 @@ // Logical processors are only available on P4s and above, // and only if hyperthreading is available. _logical_processors_per_package = logical_processor_count(); + _L1_data_cache_line_size = L1_line_size(); } } @@ -1034,6 +1037,7 @@ if (PrintMiscellaneous && Verbose) { tty->print_cr("Logical CPUs per core: %u", logical_processors_per_package()); + tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); tty->print("UseSSE=%d", (int) UseSSE); if (UseAVX > 0) { tty->print(" UseAVX=%d", (int) UseAVX); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/cpu/x86/vm/vm_version_x86.hpp --- a/src/cpu/x86/vm/vm_version_x86.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -595,7 +595,7 @@ return (result == 0 ? 1 : result); } - static intx prefetch_data_size() { + static intx L1_line_size() { intx result = 0; if (is_intel()) { result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); @@ -607,6 +607,10 @@ return result; } + static intx prefetch_data_size() { + return L1_line_size(); + } + // // Feature identification // diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os/linux/vm/os_linux.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -2859,7 +2859,7 @@ // in the library. const size_t BitsPerCLong = sizeof(long) * CHAR_BIT; - size_t cpu_num = os::active_processor_count(); + size_t cpu_num = processor_count(); size_t cpu_map_size = NCPUS / BitsPerCLong; size_t cpu_map_valid_size = MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -44,7 +44,7 @@ // If we have a last_Java_frame, then we should use it even if // isInJava == true. It should be more reliable than ucontext info. - if (jt->has_last_Java_frame()) { + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { *fr_addr = jt->pd_last_frame(); return true; } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -32,12 +32,8 @@ frame pd_last_frame() { assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); - if (_anchor.last_Java_pc() != NULL) { - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); - } else { - // This will pick up pc from sp - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); - } + assert(_anchor.last_Java_pc() != NULL, "not walkable"); + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); } public: diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/linux_x86/vm/thread_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -45,7 +45,7 @@ // If we have a last_Java_frame, then we should use it even if // isInJava == true. It should be more reliable than ucontext info. - if (jt->has_last_Java_frame()) { + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { *fr_addr = jt->pd_last_frame(); return true; } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/linux_x86/vm/thread_linux_x86.hpp --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -32,12 +32,8 @@ frame pd_last_frame() { assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); - if (_anchor.last_Java_pc() != NULL) { - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); - } else { - // This will pick up pc from sp - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); - } + assert(_anchor.last_Java_pc() != NULL, "not walkable"); + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); } public: diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -262,6 +262,7 @@ // We need to keep these here as long as we have to build on Solaris // versions before 10. + #ifndef SI_ARCHITECTURE_32 #define SI_ARCHITECTURE_32 516 /* basic 32-bit SI_ARCHITECTURE */ #endif @@ -270,231 +271,233 @@ #define SI_ARCHITECTURE_64 517 /* basic 64-bit SI_ARCHITECTURE */ #endif -static void do_sysinfo(int si, const char* string, int* features, int mask) { - char tmp; - size_t bufsize = sysinfo(si, &tmp, 1); +#ifndef SI_CPUBRAND +#define SI_CPUBRAND 523 /* return cpu brand string */ +#endif - // All SI defines used below must be supported. - guarantee(bufsize != -1, "must be supported"); +class Sysinfo { + char* _string; +public: + Sysinfo(int si) : _string(NULL) { + char tmp; + size_t bufsize = sysinfo(si, &tmp, 1); - char* buf = (char*) malloc(bufsize); + if (bufsize != -1) { + char* buf = (char*) os::malloc(bufsize, mtInternal); + if (buf != NULL) { + if (sysinfo(si, buf, bufsize) == bufsize) { + _string = buf; + } else { + os::free(buf); + } + } + } + } - if (buf == NULL) - return; - - if (sysinfo(si, buf, bufsize) == bufsize) { - // Compare the string. - if (strcmp(buf, string) == 0) { - *features |= mask; + ~Sysinfo() { + if (_string != NULL) { + os::free(_string); } } - free(buf); -} + const char* value() const { + return _string; + } + + bool valid() const { + return _string != NULL; + } + + bool match(const char* s) const { + return valid() ? strcmp(_string, s) == 0 : false; + } + + bool match_substring(const char* s) const { + return valid() ? strstr(_string, s) != NULL : false; + } +}; + +class Sysconf { + int _value; +public: + Sysconf(int sc) : _value(-1) { + _value = sysconf(sc); + } + bool valid() const { + return _value != -1; + } + int value() const { + return _value; + } +}; + + +#ifndef _SC_DCACHE_LINESZ +#define _SC_DCACHE_LINESZ 508 /* Data cache line size */ +#endif + +#ifndef _SC_L2CACHE_LINESZ +#define _SC_L2CACHE_LINESZ 527 /* Size of L2 cache line */ +#endif + int VM_Version::platform_features(int features) { - // getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are - // supported on Solaris 10 and later. - if (os::Solaris::supports_getisax()) { + assert(os::Solaris::supports_getisax(), "getisax() must be available"); - // Check 32-bit architecture. - do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m); + // Check 32-bit architecture. + if (Sysinfo(SI_ARCHITECTURE_32).match("sparc")) { + features |= v8_instructions_m; + } - // Check 64-bit architecture. - do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m); + // Check 64-bit architecture. + if (Sysinfo(SI_ARCHITECTURE_64).match("sparcv9")) { + features |= generic_v9_m; + } - // Extract valid instruction set extensions. - uint_t avs[2]; - uint_t avn = os::Solaris::getisax(avs, 2); - assert(avn <= 2, "should return two or less av's"); - uint_t av = avs[0]; + // Extract valid instruction set extensions. + uint_t avs[2]; + uint_t avn = os::Solaris::getisax(avs, 2); + assert(avn <= 2, "should return two or less av's"); + uint_t av = avs[0]; #ifndef PRODUCT - if (PrintMiscellaneous && Verbose) { - tty->print("getisax(2) returned: " PTR32_FORMAT, av); - if (avn > 1) { - tty->print(", " PTR32_FORMAT, avs[1]); - } - tty->cr(); + if (PrintMiscellaneous && Verbose) { + tty->print("getisax(2) returned: " PTR32_FORMAT, av); + if (avn > 1) { + tty->print(", " PTR32_FORMAT, avs[1]); } + tty->cr(); + } #endif - if (av & AV_SPARC_MUL32) features |= hardware_mul32_m; - if (av & AV_SPARC_DIV32) features |= hardware_div32_m; - if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m; - if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m; - if (av & AV_SPARC_POPC) features |= hardware_popc_m; - if (av & AV_SPARC_VIS) features |= vis1_instructions_m; - if (av & AV_SPARC_VIS2) features |= vis2_instructions_m; - if (avn > 1) { - uint_t av2 = avs[1]; + if (av & AV_SPARC_MUL32) features |= hardware_mul32_m; + if (av & AV_SPARC_DIV32) features |= hardware_div32_m; + if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m; + if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m; + if (av & AV_SPARC_POPC) features |= hardware_popc_m; + if (av & AV_SPARC_VIS) features |= vis1_instructions_m; + if (av & AV_SPARC_VIS2) features |= vis2_instructions_m; + if (avn > 1) { + uint_t av2 = avs[1]; #ifndef AV2_SPARC_SPARC5 #define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */ #endif - if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m; - } + if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m; + } - // Next values are not defined before Solaris 10 - // but Solaris 8 is used for jdk6 update builds. + // We only build on Solaris 10 and up, but some of the values below + // are not defined on all versions of Solaris 10, so we define them, + // if necessary. #ifndef AV_SPARC_ASI_BLK_INIT #define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */ #endif - if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m; + if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m; #ifndef AV_SPARC_FMAF #define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */ #endif - if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m; + if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m; #ifndef AV_SPARC_FMAU -#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */ +#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */ #endif - if (av & AV_SPARC_FMAU) features |= fmau_instructions_m; + if (av & AV_SPARC_FMAU) features |= fmau_instructions_m; #ifndef AV_SPARC_VIS3 -#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */ +#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */ #endif - if (av & AV_SPARC_VIS3) features |= vis3_instructions_m; + if (av & AV_SPARC_VIS3) features |= vis3_instructions_m; #ifndef AV_SPARC_CBCOND #define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */ #endif - if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m; + if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m; #ifndef AV_SPARC_AES #define AV_SPARC_AES 0x00020000 /* aes instrs supported */ #endif - if (av & AV_SPARC_AES) features |= aes_instructions_m; + if (av & AV_SPARC_AES) features |= aes_instructions_m; #ifndef AV_SPARC_SHA1 #define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */ #endif - if (av & AV_SPARC_SHA1) features |= sha1_instruction_m; + if (av & AV_SPARC_SHA1) features |= sha1_instruction_m; #ifndef AV_SPARC_SHA256 #define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */ #endif - if (av & AV_SPARC_SHA256) features |= sha256_instruction_m; + if (av & AV_SPARC_SHA256) features |= sha256_instruction_m; #ifndef AV_SPARC_SHA512 #define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */ #endif - if (av & AV_SPARC_SHA512) features |= sha512_instruction_m; + if (av & AV_SPARC_SHA512) features |= sha512_instruction_m; + // Determine the machine type. + if (Sysinfo(SI_MACHINE).match("sun4v")) { + features |= sun4v_m; + } + + // If SI_CPUBRAND works, that means Solaris 12 API to get the cache line sizes + // is available to us as well + Sysinfo cpu_info(SI_CPUBRAND); + bool use_solaris_12_api = cpu_info.valid(); + const char* impl; + int impl_m = 0; + if (use_solaris_12_api) { + impl = cpu_info.value(); +#ifndef PRODUCT + if (PrintMiscellaneous && Verbose) { + tty->print_cr("Parsing CPU implementation from %s", impl); + } +#endif + impl_m = parse_features(impl); } else { - // getisax(2) failed, use the old legacy code. + // Otherwise use kstat to determine the machine type. + kstat_ctl_t* kc = kstat_open(); + if (kc != NULL) { + kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL); + if (ksp != NULL) { + if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) { + kstat_named_t* knm = (kstat_named_t *)ksp->ks_data; + for (int i = 0; i < ksp->ks_ndata; i++) { + if (strcmp((const char*)&(knm[i].name), "implementation") == 0) { + impl = KSTAT_NAMED_STR_PTR(&knm[i]); #ifndef PRODUCT - if (PrintMiscellaneous && Verbose) - tty->print_cr("getisax(2) is not supported."); + if (PrintMiscellaneous && Verbose) { + tty->print_cr("Parsing CPU implementation from %s", impl); + } #endif - - char tmp; - size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1); - char* buf = (char*) malloc(bufsize); - - if (buf != NULL) { - if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) { - // Figure out what kind of sparc we have - char *sparc_string = strstr(buf, "sparc"); - if (sparc_string != NULL) { features |= v8_instructions_m; - if (sparc_string[5] == 'v') { - if (sparc_string[6] == '8') { - if (sparc_string[7] == '-') { features |= hardware_mul32_m; - features |= hardware_div32_m; - } else if (sparc_string[7] == 'p') features |= generic_v9_m; - else features |= generic_v8_m; - } else if (sparc_string[6] == '9') features |= generic_v9_m; + impl_m = parse_features(impl); + break; + } } } - - // Check for visualization instructions - char *vis = strstr(buf, "vis"); - if (vis != NULL) { features |= vis1_instructions_m; - if (vis[3] == '2') features |= vis2_instructions_m; - } } - free(buf); + kstat_close(kc); } } + assert(impl_m != 0, err_msg("Unknown CPU implementation %s", impl)); + features |= impl_m; - // Determine the machine type. - do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m); + bool is_sun4v = (features & sun4v_m) != 0; + if (use_solaris_12_api && is_sun4v) { + // If Solaris 12 API is supported and it's sun4v use sysconf() to get the cache line sizes + Sysconf l1_dcache_line_size(_SC_DCACHE_LINESZ); + if (l1_dcache_line_size.valid()) { + _L1_data_cache_line_size = l1_dcache_line_size.value(); + } - { - // Using kstat to determine the machine type. - kstat_ctl_t* kc = kstat_open(); - kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL); - const char* implementation = "UNKNOWN"; - if (ksp != NULL) { - if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) { - kstat_named_t* knm = (kstat_named_t *)ksp->ks_data; - for (int i = 0; i < ksp->ks_ndata; i++) { - if (strcmp((const char*)&(knm[i].name),"implementation") == 0) { -#ifndef KSTAT_DATA_STRING -#define KSTAT_DATA_STRING 9 -#endif - if (knm[i].data_type == KSTAT_DATA_CHAR) { - // VM is running on Solaris 8 which does not have value.str. - implementation = &(knm[i].value.c[0]); - } else if (knm[i].data_type == KSTAT_DATA_STRING) { - // VM is running on Solaris 10. -#ifndef KSTAT_NAMED_STR_PTR - // Solaris 8 was used to build VM, define the structure it misses. - struct str_t { - union { - char *ptr; /* NULL-term string */ - char __pad[8]; /* 64-bit padding */ - } addr; - uint32_t len; /* # bytes for strlen + '\0' */ - }; -#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr) -#endif - implementation = KSTAT_NAMED_STR_PTR(&knm[i]); - } -#ifndef PRODUCT - if (PrintMiscellaneous && Verbose) { - tty->print_cr("cpu_info.implementation: %s", implementation); - } -#endif - // Convert to UPPER case before compare. - char* impl = strdup(implementation); - - for (int i = 0; impl[i] != 0; i++) - impl[i] = (char)toupper((uint)impl[i]); - if (strstr(impl, "SPARC64") != NULL) { - features |= sparc64_family_m; - } else if (strstr(impl, "SPARC-M") != NULL) { - // M-series SPARC is based on T-series. - features |= (M_family_m | T_family_m); - } else if (strstr(impl, "SPARC-T") != NULL) { - features |= T_family_m; - if (strstr(impl, "SPARC-T1") != NULL) { - features |= T1_model_m; - } - } else { - if (strstr(impl, "SPARC") == NULL) { -#ifndef PRODUCT - // kstat on Solaris 8 virtual machines (branded zones) - // returns "(unsupported)" implementation. - warning("kstat cpu_info implementation = '%s', should contain SPARC", impl); -#endif - implementation = "SPARC"; - } - } - free((void*)impl); - break; - } - } // for( - } + Sysconf l2_dcache_line_size(_SC_L2CACHE_LINESZ); + if (l2_dcache_line_size.valid()) { + _L2_data_cache_line_size = l2_dcache_line_size.value(); } - assert(strcmp(implementation, "UNKNOWN") != 0, - "unknown cpu info (changed kstat interface?)"); - kstat_close(kc); + } else { + // Otherwise figure out the cache line sizes using PICL + bool is_fujitsu = (features & sparc64_family_m) != 0; + PICL picl(is_fujitsu, is_sun4v); + _L1_data_cache_line_size = picl.L1_data_cache_line_size(); + _L2_data_cache_line_size = picl.L2_data_cache_line_size(); } - - // Figure out cache line sizes using PICL - PICL picl((features & sparc64_family_m) != 0, (features & sun4v_m) != 0); - _L2_data_cache_line_size = picl.L2_data_cache_line_size(); - return features; } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -44,9 +44,8 @@ assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; - // last_Java_frame is always walkable and safe use it if we have it - - if (jt->has_last_Java_frame()) { + // There is small window where last_Java_frame is not walkable or safe + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { *fr_addr = jt->pd_last_frame(); return true; } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -30,12 +30,8 @@ frame pd_last_frame() { assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); - if (_anchor.last_Java_pc() != NULL) { - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); - } else { - // This will pick up pc from sp - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); - } + assert(_anchor.last_Java_pc() != NULL, "not walkable"); + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); } public: diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/windows_x86/vm/thread_windows_x86.cpp --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -47,7 +47,7 @@ // If we have a last_Java_frame, then we should use it even if // isInJava == true. It should be more reliable than CONTEXT info. - if (jt->has_last_Java_frame()) { + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { *fr_addr = jt->pd_last_frame(); return true; } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/os_cpu/windows_x86/vm/thread_windows_x86.hpp --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -32,12 +32,8 @@ frame pd_last_frame() { assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); - if (_anchor.last_Java_pc() != NULL) { - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); - } else { - // This will pick up pc from sp - return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp()); - } + assert(_anchor.last_Java_pc() != NULL, "not walkable"); + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); } public: diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/classfile/classLoaderData.cpp --- a/src/share/vm/classfile/classLoaderData.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/classfile/classLoaderData.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,7 +78,7 @@ // The null-class-loader should always be kept alive. _keep_alive(is_anonymous || h_class_loader.is_null()), _metaspace(NULL), _unloading(false), _klasses(NULL), - _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL), + _claimed(0), _jmethod_ids(NULL), _handles(), _deallocate_list(NULL), _next(NULL), _dependencies(dependencies), _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) { // empty @@ -96,6 +96,45 @@ _list_head = oopFactory::new_objectArray(2, CHECK); } +ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() { + Chunk* c = _head; + while (c != NULL) { + Chunk* next = c->_next; + delete c; + c = next; + } +} + +oop* ClassLoaderData::ChunkedHandleList::add(oop o) { + if (_head == NULL || _head->_size == Chunk::CAPACITY) { + Chunk* next = new Chunk(_head); + OrderAccess::release_store_ptr(&_head, next); + } + oop* handle = &_head->_data[_head->_size]; + *handle = o; + OrderAccess::release_store(&_head->_size, _head->_size + 1); + return handle; +} + +inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) { + for (juint i = 0; i < size; i++) { + if (c->_data[i] != NULL) { + f->do_oop(&c->_data[i]); + } + } +} + +void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { + Chunk* head = (Chunk*) OrderAccess::load_ptr_acquire(&_head); + if (head != NULL) { + // Must be careful when reading size of head + oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size)); + for (Chunk* c = head->_next; c != NULL; c = c->_next) { + oops_do_chunk(f, c, c->_size); + } + } +} + bool ClassLoaderData::claim() { if (_claimed == 1) { return false; @@ -111,7 +150,7 @@ f->do_oop(&_class_loader); _dependencies.oops_do(f); - _handles->oops_do(f); + _handles.oops_do(f); if (klass_closure != NULL) { classes_do(klass_closure); } @@ -342,11 +381,6 @@ _metaspace = NULL; // release the metaspace delete m; - // release the handles - if (_handles != NULL) { - JNIHandleBlock::release_block(_handles); - _handles = NULL; - } } // Clear all the JNI handles for methods @@ -406,15 +440,9 @@ return _metaspace; } -JNIHandleBlock* ClassLoaderData::handles() const { return _handles; } -void ClassLoaderData::set_handles(JNIHandleBlock* handles) { _handles = handles; } - jobject ClassLoaderData::add_handle(Handle h) { MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); - if (handles() == NULL) { - set_handles(JNIHandleBlock::allocate_block()); - } - return handles()->allocate_handle(h()); + return (jobject) _handles.add(h()); } // Add this metadata pointer to be freed when it's safe. This is only during @@ -479,7 +507,6 @@ p2i(class_loader() != NULL ? class_loader()->klass() : NULL), loader_name()); if (claimed()) out->print(" claimed "); if (is_unloading()) out->print(" unloading "); - out->print(" handles " INTPTR_FORMAT, p2i(handles())); out->cr(); if (metaspace_or_null() != NULL) { out->print_cr("metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null())); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/classfile/classLoaderData.hpp --- a/src/share/vm/classfile/classLoaderData.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/classfile/classLoaderData.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,6 @@ class ClassLoaderData; class JNIMethodBlock; -class JNIHandleBlock; class Metadebug; // GC root for walking class loader data created @@ -145,6 +144,31 @@ void oops_do(OopClosure* f); }; + class ChunkedHandleList VALUE_OBJ_CLASS_SPEC { + struct Chunk : public CHeapObj { + static const size_t CAPACITY = 32; + + oop _data[CAPACITY]; + volatile juint _size; + Chunk* _next; + + Chunk(Chunk* c) : _next(c), _size(0) { } + }; + + Chunk* _head; + + void oops_do_chunk(OopClosure* f, Chunk* c, const juint size); + + public: + ChunkedHandleList() : _head(NULL) {} + ~ChunkedHandleList(); + + // Only one thread at a time can add, guarded by ClassLoaderData::metaspace_lock(). + // However, multiple threads can execute oops_do concurrently with add. + oop* add(oop o); + void oops_do(OopClosure* f); + }; + friend class ClassLoaderDataGraph; friend class ClassLoaderDataGraphKlassIteratorAtomic; friend class ClassLoaderDataGraphMetaspaceIterator; @@ -169,7 +193,8 @@ // Has to be an int because we cas it. Klass* _klasses; // The classes defined by the class loader. - JNIHandleBlock* _handles; // Handles to constant pool arrays + ChunkedHandleList _handles; // Handles to constant pool arrays, etc, which + // have the same life cycle of the corresponding ClassLoader. // These method IDs are created for the class loader and set to NULL when the // class loader is unloaded. They are rarely freed, only for redefine classes @@ -196,9 +221,6 @@ void set_metaspace(Metaspace* m) { _metaspace = m; } - JNIHandleBlock* handles() const; - void set_handles(JNIHandleBlock* handles); - Mutex* metaspace_lock() const { return _metaspace_lock; } // GC interface. diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/classfile/javaClasses.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2852,6 +2852,15 @@ mname->address_field_put(_vmindex_offset, (address) index); } +bool java_lang_invoke_MemberName::equals(oop mn1, oop mn2) { + if (mn1 == mn2) { + return true; + } + return (vmtarget(mn1) == vmtarget(mn2) && flags(mn1) == flags(mn2) && + vmindex(mn1) == vmindex(mn2) && + clazz(mn1) == clazz(mn2)); +} + oop java_lang_invoke_LambdaForm::vmentry(oop lform) { assert(is_instance(lform), "wrong type"); return lform->obj_field(_vmentry_offset); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/classfile/javaClasses.hpp --- a/src/share/vm/classfile/javaClasses.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/classfile/javaClasses.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1132,6 +1132,8 @@ static int flags_offset_in_bytes() { return _flags_offset; } static int vmtarget_offset_in_bytes() { return _vmtarget_offset; } static int vmindex_offset_in_bytes() { return _vmindex_offset; } + + static bool equals(oop mt1, oop mt2); }; diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/classfile/systemDictionary.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1084,15 +1084,18 @@ THREAD); const char* pkg = "java/"; + size_t pkglen = strlen(pkg); if (!HAS_PENDING_EXCEPTION && !class_loader.is_null() && parsed_name != NULL && - !strncmp((const char*)parsed_name->bytes(), pkg, strlen(pkg))) { + parsed_name->utf8_length() >= (int)pkglen && + !strncmp((const char*)parsed_name->bytes(), pkg, pkglen)) { // It is illegal to define classes in the "java." package from // JVM_DefineClass or jni_DefineClass unless you're the bootclassloader ResourceMark rm(THREAD); char* name = parsed_name->as_C_string(); char* index = strrchr(name, '/'); + assert(index != NULL, "must be"); *index = '\0'; // chop to just the package name while ((index = strchr(name, '/')) != NULL) { *index = '.'; // replace '/' with '.' in package name diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/code/nmethod.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1151,6 +1151,7 @@ // Clear ICStubs of all compiled ICs void nmethod::clear_ic_stubs() { assert_locked_or_safepoint(CompiledIC_lock); + ResourceMark rm; RelocIterator iter(this); while(iter.next()) { if (iter.type() == relocInfo::virtual_call_type) { diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/oops/instanceKlass.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -3018,7 +3018,7 @@ return NULL; } -bool InstanceKlass::add_member_name(Handle mem_name) { +oop InstanceKlass::add_member_name(Handle mem_name, bool intern) { jweak mem_name_wref = JNIHandles::make_weak_global(mem_name); MutexLocker ml(MemberNameTable_lock); DEBUG_ONLY(No_Safepoint_Verifier nsv); @@ -3028,7 +3028,7 @@ // is called! Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name()); if (method->is_obsolete()) { - return false; + return NULL; } else if (method->is_old()) { // Replace method with redefined version java_lang_invoke_MemberName::set_vmtarget(mem_name(), method_with_idnum(method->method_idnum())); @@ -3037,8 +3037,11 @@ if (_member_names == NULL) { _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count()); } - _member_names->add_member_name(mem_name_wref); - return true; + if (intern) { + return _member_names->find_or_add_member_name(mem_name_wref); + } else { + return _member_names->add_member_name(mem_name_wref); + } } // ----------------------------------------------------------------------------------------------------- diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/oops/instanceKlass.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -1105,7 +1105,7 @@ // JSR-292 support MemberNameTable* member_names() { return _member_names; } void set_member_names(MemberNameTable* member_names) { _member_names = member_names; } - bool add_member_name(Handle member_name); + oop add_member_name(Handle member_name, bool intern); public: // JVMTI support diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/opto/cfgnode.hpp --- a/src/share/vm/opto/cfgnode.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/opto/cfgnode.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -119,6 +119,9 @@ // input in slot 0. class PhiNode : public TypeNode { const TypePtr* const _adr_type; // non-null only for Type::MEMORY nodes. + // The following fields are only used for data PhiNodes to indicate + // that the PhiNode represents the value of a known instance field. + int _inst_mem_id; // Instance memory id (node index of the memory Phi) const int _inst_id; // Instance id of the memory slice. const int _inst_index; // Alias index of the instance memory slice. // Array elements references have the same alias_idx but different offset. @@ -138,11 +141,13 @@ }; PhiNode( Node *r, const Type *t, const TypePtr* at = NULL, + const int imid = -1, const int iid = TypeOopPtr::InstanceTop, const int iidx = Compile::AliasIdxTop, const int ioffs = Type::OffsetTop ) : TypeNode(t,r->req()), _adr_type(at), + _inst_mem_id(imid), _inst_id(iid), _inst_index(iidx), _inst_offset(ioffs) @@ -187,11 +192,14 @@ virtual bool pinned() const { return in(0) != 0; } virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; } + void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; } + const int inst_mem_id() const { return _inst_mem_id; } const int inst_id() const { return _inst_id; } const int inst_index() const { return _inst_index; } const int inst_offset() const { return _inst_offset; } - bool is_same_inst_field(const Type* tp, int id, int index, int offset) { + bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) { return type()->basic_type() == tp->basic_type() && + inst_mem_id() == mem_id && inst_id() == id && inst_index() == index && inst_offset() == offset && diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/opto/macro.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -401,7 +401,7 @@ for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { Node* phi = region->fast_out(k); if (phi->is_Phi() && phi != mem && - phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) { + phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) { return phi; } } @@ -420,7 +420,7 @@ GrowableArray values(length, length, NULL, false); // create a new Phi for the value - PhiNode *phi = new (C) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset); + PhiNode *phi = new (C) PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset); transform_later(phi); value_phis->push(phi, mem->_idx); diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/opto/memnode.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1155,7 +1155,7 @@ for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { Node* phi = region->fast_out(i); if (phi->is_Phi() && phi != mem && - phi->as_Phi()->is_same_inst_field(this_type, this_iid, this_index, this_offset)) { + phi->as_Phi()->is_same_inst_field(this_type, (int)mem->_idx, this_iid, this_index, this_offset)) { return phi; } } @@ -1400,7 +1400,7 @@ this_iid = base->_idx; } PhaseIterGVN* igvn = phase->is_IterGVN(); - Node* phi = new (C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); + Node* phi = new (C) PhiNode(region, this_type, NULL, mem->_idx, this_iid, this_index, this_offset); for (uint i = 1; i < region->req(); i++) { Node* x; Node* the_clone = NULL; diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/opto/phaseX.cpp --- a/src/share/vm/opto/phaseX.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/opto/phaseX.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -481,6 +481,8 @@ uint current_idx = 0; // The current new node ID. Incremented after every assignment. for (uint i = 0; i < _useful.size(); i++) { Node* n = _useful.at(i); + // Sanity check that fails if we ever decide to execute this phase after EA + assert(!n->is_Phi() || n->as_Phi()->inst_mem_id() == -1, "should not be linked to data Phi"); const Type* type = gvn->type_or_null(n); new_type_array.map(current_idx, type); @@ -1378,6 +1380,18 @@ i -= num_edges; // we deleted 1 or more copies of this edge } + // Search for instance field data PhiNodes in the same region pointing to the old + // memory PhiNode and update their instance memory ids to point to the new node. + if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != NULL) { + Node* region = old->in(0); + for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { + PhiNode* phi = region->fast_out(i)->isa_Phi(); + if (phi != NULL && phi->inst_mem_id() == (int)old->_idx) { + phi->set_inst_mem_id((int)nn->_idx); + } + } + } + // Smash all inputs to 'old', isolating him completely Node *temp = new (C) Node(1); temp->init_req(0,nn); // Add a use to nn to prevent him from dying diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/opto/type.hpp --- a/src/share/vm/opto/type.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/opto/type.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -882,7 +882,7 @@ // If not InstanceTop or InstanceBot, indicates that this is // a particular instance of this type which is distinct. - // This is the the node index of the allocation node creating this instance. + // This is the node index of the allocation node creating this instance. int _instance_id; // Extra type information profiling gave us. We propagate it the diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/prims/jni.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -5129,6 +5129,7 @@ run_unit_test(TestKlass_test()); run_unit_test(Test_linked_list()); run_unit_test(TestChunkedList_test()); + run_unit_test(ObjectMonitor::sanity_checks()); #if INCLUDE_VM_STRUCTS run_unit_test(VMStructs::test()); #endif diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/prims/jvm.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -643,7 +643,7 @@ // This can safepoint and redefine method, so need both new_obj and method // in a handle, for two different reasons. new_obj can move, method can be // deleted if nothing is using it on the stack. - m->method_holder()->add_member_name(new_obj()); + m->method_holder()->add_member_name(new_obj(), false); } } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/prims/methodHandles.cpp --- a/src/share/vm/prims/methodHandles.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/prims/methodHandles.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -173,7 +173,7 @@ return NULL; } -oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) { +oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, bool intern) { assert(info.resolved_appendix().is_null(), "only normal methods here"); methodHandle m = info.resolved_method(); KlassHandle m_klass = m->method_holder(); @@ -270,13 +270,7 @@ // If relevant, the vtable or itable value is stored as vmindex. // This is done eagerly, since it is readily available without // constructing any new objects. - // TO DO: maybe intern mname_oop - if (m->method_holder()->add_member_name(mname)) { - return mname(); - } else { - // Redefinition caused this to fail. Return NULL (and an exception?) - return NULL; - } + return m->method_holder()->add_member_name(mname, intern); } oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) { @@ -917,7 +911,9 @@ if (!java_lang_invoke_MemberName::is_instance(result())) return -99; // caller bug! CallInfo info(m); - oop saved = MethodHandles::init_method_MemberName(result, info); + // Since this is going through the methods to create MemberNames, don't search + // for matching methods already in the table + oop saved = MethodHandles::init_method_MemberName(result, info, /*intern*/false); if (saved != result()) results->obj_at_put(rfill-1, saved); // show saved instance to user } else if (++overflow >= overflow_limit) { @@ -949,9 +945,34 @@ } } -void MemberNameTable::add_member_name(jweak mem_name_wref) { +oop MemberNameTable::add_member_name(jweak mem_name_wref) { assert_locked_or_safepoint(MemberNameTable_lock); this->push(mem_name_wref); + return JNIHandles::resolve(mem_name_wref); +} + +oop MemberNameTable::find_or_add_member_name(jweak mem_name_wref) { + assert_locked_or_safepoint(MemberNameTable_lock); + oop new_mem_name = JNIHandles::resolve(mem_name_wref); + + // Find matching member name in the list. + // This is linear because these are short lists. + int len = this->length(); + int new_index = len; + for (int idx = 0; idx < len; idx++) { + oop mname = JNIHandles::resolve(this->at(idx)); + if (mname == NULL) { + new_index = idx; + continue; + } + if (java_lang_invoke_MemberName::equals(new_mem_name, mname)) { + JNIHandles::destroy_weak_global(mem_name_wref); + return mname; + } + } + // Not found, push the new one, or reuse empty slot + this->at_put_grow(new_index, mem_name_wref); + return new_mem_name; } #if INCLUDE_JVMTI diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/prims/methodHandles.hpp --- a/src/share/vm/prims/methodHandles.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/prims/methodHandles.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ static Handle new_MemberName(TRAPS); // must be followed by init_MemberName static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false); - static oop init_method_MemberName(Handle mname_h, CallInfo& info); + static oop init_method_MemberName(Handle mname_h, CallInfo& info, bool intern = true); static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true); static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig, int mflags, KlassHandle caller, @@ -236,7 +236,8 @@ public: MemberNameTable(int methods_cnt); ~MemberNameTable(); - void add_member_name(jweak mem_name_ref); + oop add_member_name(jweak mem_name_ref); + oop find_or_add_member_name(jweak mem_name_ref); #if INCLUDE_JVMTI // RedefineClasses() API support: diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/runtime/objectMonitor.cpp --- a/src/share/vm/runtime/objectMonitor.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/runtime/objectMonitor.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -2529,6 +2529,10 @@ SETKNOB(FastHSSEC) ; #undef SETKNOB + if (Knob_Verbose) { + sanity_checks(); + } + if (os::is_MP()) { BackOffMask = (1 << Knob_SpinBackOff) - 1 ; if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; @@ -2549,6 +2553,66 @@ InitDone = 1 ; } +void ObjectMonitor::sanity_checks() { + int error_cnt = 0; + int warning_cnt = 0; + bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests); + + if (verbose) { + tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT, + sizeof(ObjectMonitor)); + } + + uint cache_line_size = VM_Version::L1_data_cache_line_size(); + if (verbose) { + tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size); + } + + ObjectMonitor dummy; + u_char *addr_begin = (u_char*)&dummy; + u_char *addr_header = (u_char*)&dummy._header; + u_char *addr_owner = (u_char*)&dummy._owner; + + uint offset_header = (uint)(addr_header - addr_begin); + if (verbose) tty->print_cr("INFO: offset(_header)=%u", offset_header); + + uint offset_owner = (uint)(addr_owner - addr_begin); + if (verbose) tty->print_cr("INFO: offset(_owner)=%u", offset_owner); + + if ((uint)(addr_header - addr_begin) != 0) { + tty->print_cr("ERROR: offset(_header) must be zero (0)."); + error_cnt++; + } + + if (cache_line_size != 0) { + // We were able to determine the L1 data cache line size so + // do some cache line specific sanity checks + + if ((offset_owner - offset_header) < cache_line_size) { + tty->print_cr("WARNING: the _header and _owner fields are closer " + "than a cache line which permits false sharing."); + warning_cnt++; + } + + if ((sizeof(ObjectMonitor) % cache_line_size) != 0) { + tty->print_cr("WARNING: ObjectMonitor size is not a multiple of " + "a cache line which permits false sharing."); + warning_cnt++; + } + } + + ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt, + &warning_cnt); + + if (verbose || error_cnt != 0 || warning_cnt != 0) { + tty->print_cr("INFO: error_cnt=%d", error_cnt); + tty->print_cr("INFO: warning_cnt=%d", warning_cnt); + } + + guarantee(error_cnt == 0, + "Fatal error(s) found in ObjectMonitor::sanity_checks()"); +} + #ifndef PRODUCT void ObjectMonitor::verify() { } diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/runtime/objectMonitor.hpp --- a/src/share/vm/runtime/objectMonitor.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/runtime/objectMonitor.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -189,6 +189,8 @@ bool check(TRAPS); // true if the thread owns the monitor. void check_slow(TRAPS); void clear(); + static void sanity_checks(); // public for -XX:+ExecuteInternalVMTests + // in PRODUCT for -XX:SyncKnobs=Verbose=1 #ifndef PRODUCT void verify(); void print(); @@ -234,8 +236,6 @@ // WARNING: this must be the very first word of ObjectMonitor // This means this class can't use any virtual member functions. - // TODO-FIXME: assert that offsetof(_header) is 0 or get rid of the - // implicit 0 offset in emitted code. volatile markOop _header; // displaced object header word - mark void* volatile _object; // backward object pointer - strong root diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/runtime/sweeper.cpp --- a/src/share/vm/runtime/sweeper.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/runtime/sweeper.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -319,6 +319,7 @@ } void NMethodSweeper::sweep_code_cache() { + ResourceMark rm; Ticks sweep_start_counter = Ticks::now(); _flushed_count = 0; @@ -626,6 +627,7 @@ // state of the code cache if it's requested. void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { if (PrintMethodFlushing) { + ResourceMark rm; stringStream s; // Dump code cache state into a buffer before locking the tty, // because log_state() will use locks causing lock conflicts. @@ -643,6 +645,7 @@ } if (LogCompilation && (xtty != NULL)) { + ResourceMark rm; stringStream s; // Dump code cache state into a buffer before locking the tty, // because log_state() will use locks causing lock conflicts. diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/runtime/synchronizer.cpp --- a/src/share/vm/runtime/synchronizer.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/runtime/synchronizer.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -437,19 +437,22 @@ // Hash Code handling // // Performance concern: -// OrderAccess::storestore() calls release() which STs 0 into the global volatile -// OrderAccess::Dummy variable. This store is unnecessary for correctness. -// Many threads STing into a common location causes considerable cache migration -// or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() -// until it's repaired. In some cases OrderAccess::fence() -- which incurs local -// latency on the executing processor -- is a better choice as it scales on SMP -// systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a -// discussion of coherency costs. Note that all our current reference platforms -// provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. +// OrderAccess::storestore() calls release() which at one time stored 0 +// into the global volatile OrderAccess::dummy variable. This store was +// unnecessary for correctness. Many threads storing into a common location +// causes considerable cache migration or "sloshing" on large SMP systems. +// As such, I avoided using OrderAccess::storestore(). In some cases +// OrderAccess::fence() -- which incurs local latency on the executing +// processor -- is a better choice as it scales on SMP systems. +// +// See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for +// a discussion of coherency costs. Note that all our current reference +// platforms provide strong ST-ST order, so the issue is moot on IA32, +// x64, and SPARC. // // As a general policy we use "volatile" to control compiler-based reordering -// and explicit fences (barriers) to control for architectural reordering performed -// by the CPU(s) or platform. +// and explicit fences (barriers) to control for architectural reordering +// performed by the CPU(s) or platform. struct SharedGlobals { // These are highly shared mostly-read variables. @@ -1636,7 +1639,55 @@ } //------------------------------------------------------------------------------ -// Non-product code +// Debugging code + +void ObjectSynchronizer::sanity_checks(const bool verbose, + const uint cache_line_size, + int *error_cnt_ptr, + int *warning_cnt_ptr) { + u_char *addr_begin = (u_char*)&GVars; + u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; + u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; + + if (verbose) { + tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, + sizeof(SharedGlobals)); + } + + uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); + if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); + + uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); + if (verbose) { + tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); + } + + if (cache_line_size != 0) { + // We were able to determine the L1 data cache line size so + // do some cache line specific sanity checks + + if (offset_stwRandom < cache_line_size) { + tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " + "to the struct beginning than a cache line which permits " + "false sharing."); + (*warning_cnt_ptr)++; + } + + if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { + tty->print_cr("WARNING: the SharedGlobals.stwRandom and " + "SharedGlobals.hcSequence fields are closer than a cache " + "line which permits false sharing."); + (*warning_cnt_ptr)++; + } + + if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { + tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " + "to the struct end than a cache line which permits false " + "sharing."); + (*warning_cnt_ptr)++; + } + } +} #ifndef PRODUCT diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/runtime/synchronizer.hpp --- a/src/share/vm/runtime/synchronizer.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/runtime/synchronizer.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -121,6 +121,9 @@ static void oops_do(OopClosure* f); // debugging + static void sanity_checks(const bool verbose, + const unsigned int cache_line_size, + int *error_cnt_ptr, int *warning_cnt_ptr); static void verify() PRODUCT_RETURN; static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/runtime/vm_version.cpp --- a/src/share/vm/runtime/vm_version.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/runtime/vm_version.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -50,6 +50,7 @@ bool Abstract_VM_Version::_supports_atomic_getadd4 = false; bool Abstract_VM_Version::_supports_atomic_getadd8 = false; unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U; +unsigned int Abstract_VM_Version::_L1_data_cache_line_size = 0; int Abstract_VM_Version::_reserve_for_allocation_prefetch = 0; #ifndef HOTSPOT_RELEASE_VERSION diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/runtime/vm_version.hpp --- a/src/share/vm/runtime/vm_version.hpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/runtime/vm_version.hpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,6 +42,7 @@ static bool _supports_atomic_getadd4; static bool _supports_atomic_getadd8; static unsigned int _logical_processors_per_package; + static unsigned int _L1_data_cache_line_size; static int _vm_major_version; static int _vm_minor_version; static int _vm_build_number; @@ -114,6 +115,10 @@ return _logical_processors_per_package; } + static unsigned int L1_data_cache_line_size() { + return _L1_data_cache_line_size; + } + // Need a space at the end of TLAB for prefetch instructions // which may fault when accessing memory outside of heap. static int reserve_for_allocation_prefetch() { diff -r bfa3989eb8d3 -r 1e225dabccc5 src/share/vm/services/lowMemoryDetector.cpp --- a/src/share/vm/services/lowMemoryDetector.cpp Fri Jul 21 20:31:41 2017 -0700 +++ b/src/share/vm/services/lowMemoryDetector.cpp Mon Jul 24 09:32:40 2017 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -298,19 +298,41 @@ Klass* k = Management::sun_management_Sensor_klass(CHECK); instanceKlassHandle sensorKlass (THREAD, k); Handle sensor_h(THREAD, _sensor_obj); - Handle usage_h = MemoryService::create_MemoryUsage_obj(_usage, CHECK); + + Symbol* trigger_method_signature; JavaValue result(T_VOID); JavaCallArguments args(sensor_h); args.push_int((int) count); - args.push_oop(usage_h); + + Handle usage_h = MemoryService::create_MemoryUsage_obj(_usage, THREAD); + // Call Sensor::trigger(int, MemoryUsage) to send notification to listeners. + // When OOME occurs and fails to allocate MemoryUsage object, call + // Sensor::trigger(int) instead. The pending request will be processed + // but no notification will be sent. + if (HAS_PENDING_EXCEPTION) { + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOME here"); + CLEAR_PENDING_EXCEPTION; + trigger_method_signature = vmSymbols::int_void_signature(); + } else { + trigger_method_signature = vmSymbols::trigger_method_signature(); + args.push_oop(usage_h); + } JavaCalls::call_virtual(&result, sensorKlass, vmSymbols::trigger_name(), - vmSymbols::trigger_method_signature(), + trigger_method_signature, &args, - CHECK); + THREAD); + + if (HAS_PENDING_EXCEPTION) { + // We just clear the OOM pending exception that we might have encountered + // in Java's tiggerAction(), and continue with updating the counters since + // the Java counters have been updated too. + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOME here"); + CLEAR_PENDING_EXCEPTION; + } } {