twisti@1020: /* drchase@6680: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. twisti@1020: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. twisti@1020: * twisti@1020: * This code is free software; you can redistribute it and/or modify it twisti@1020: * under the terms of the GNU General Public License version 2 only, as twisti@1020: * published by the Free Software Foundation. twisti@1020: * twisti@1020: * This code is distributed in the hope that it will be useful, but WITHOUT twisti@1020: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or twisti@1020: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License twisti@1020: * version 2 for more details (a copy is included in the LICENSE file that twisti@1020: * accompanied this code). twisti@1020: * twisti@1020: * You should have received a copy of the GNU General Public License version twisti@1020: * 2 along with this work; if not, write to the Free Software Foundation, twisti@1020: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. twisti@1020: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. twisti@1020: * twisti@1020: */ twisti@1020: stefank@2314: #include "precompiled.hpp" twisti@4318: #include "asm/macroAssembler.hpp" twisti@4318: #include "asm/macroAssembler.inline.hpp" stefank@2314: #include "memory/resourceArea.hpp" stefank@2314: #include "runtime/java.hpp" stefank@2314: #include "runtime/stubCodeGenerator.hpp" stefank@2314: #include "vm_version_x86.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "os_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "os_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "os_windows.inline.hpp" stefank@2314: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "os_bsd.inline.hpp" never@3156: #endif twisti@1020: twisti@1020: twisti@1020: int VM_Version::_cpu; twisti@1020: int VM_Version::_model; twisti@1020: int VM_Version::_stepping; twisti@1020: int VM_Version::_cpuFeatures; twisti@1020: const char* VM_Version::_features_str = ""; twisti@1020: VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; twisti@1020: kvn@6388: // Address of instruction which causes SEGV kvn@6388: address VM_Version::_cpuinfo_segv_addr = 0; kvn@6388: // Address of instruction after the one which causes SEGV kvn@6388: address VM_Version::_cpuinfo_cont_addr = 0; kvn@6388: twisti@1020: static BufferBlob* stub_blob; kvn@6388: static const int stub_size = 600; twisti@1020: twisti@1020: extern "C" { kvn@6537: typedef void (*get_cpu_info_stub_t)(void*); twisti@1020: } kvn@6537: static get_cpu_info_stub_t get_cpu_info_stub = NULL; twisti@1020: twisti@1020: twisti@1020: class VM_Version_StubGenerator: public StubCodeGenerator { twisti@1020: public: twisti@1020: twisti@1020: VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} twisti@1020: kvn@6537: address generate_get_cpu_info() { twisti@1020: // Flags to test CPU type. sla@3587: const uint32_t HS_EFL_AC = 0x40000; sla@3587: const uint32_t HS_EFL_ID = 0x200000; twisti@1020: // Values for when we don't have a CPUID instruction. twisti@1020: const int CPU_FAMILY_SHIFT = 8; twisti@1020: const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); twisti@1020: const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); twisti@1020: kvn@1977: Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; kvn@3400: Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done; twisti@1020: kvn@6537: StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); twisti@1020: # define __ _masm-> twisti@1020: twisti@1020: address start = __ pc(); twisti@1020: twisti@1020: // kvn@6537: // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); twisti@1020: // twisti@1020: // LP64: rcx and rdx are first and second argument registers on windows twisti@1020: twisti@1020: __ push(rbp); twisti@1020: #ifdef _LP64 twisti@1020: __ mov(rbp, c_rarg0); // cpuid_info address twisti@1020: #else twisti@1020: __ movptr(rbp, Address(rsp, 8)); // cpuid_info address twisti@1020: #endif twisti@1020: __ push(rbx); twisti@1020: __ push(rsi); twisti@1020: __ pushf(); // preserve rbx, and flags twisti@1020: __ pop(rax); twisti@1020: __ push(rax); twisti@1020: __ mov(rcx, rax); twisti@1020: // twisti@1020: // if we are unable to change the AC flag, we have a 386 twisti@1020: // sla@3587: __ xorl(rax, HS_EFL_AC); twisti@1020: __ push(rax); twisti@1020: __ popf(); twisti@1020: __ pushf(); twisti@1020: __ pop(rax); twisti@1020: __ cmpptr(rax, rcx); twisti@1020: __ jccb(Assembler::notEqual, detect_486); twisti@1020: twisti@1020: __ movl(rax, CPU_FAMILY_386); twisti@1020: __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); twisti@1020: __ jmp(done); twisti@1020: twisti@1020: // twisti@1020: // If we are unable to change the ID flag, we have a 486 which does twisti@1020: // not support the "cpuid" instruction. twisti@1020: // twisti@1020: __ bind(detect_486); twisti@1020: __ mov(rax, rcx); sla@3587: __ xorl(rax, HS_EFL_ID); twisti@1020: __ push(rax); twisti@1020: __ popf(); twisti@1020: __ pushf(); twisti@1020: __ pop(rax); twisti@1020: __ cmpptr(rcx, rax); twisti@1020: __ jccb(Assembler::notEqual, detect_586); twisti@1020: twisti@1020: __ bind(cpu486); twisti@1020: __ movl(rax, CPU_FAMILY_486); twisti@1020: __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); twisti@1020: __ jmp(done); twisti@1020: twisti@1020: // twisti@1020: // At this point, we have a chip which supports the "cpuid" instruction twisti@1020: // twisti@1020: __ bind(detect_586); twisti@1020: __ xorl(rax, rax); twisti@1020: __ cpuid(); twisti@1020: __ orl(rax, rax); twisti@1020: __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input twisti@1020: // value of at least 1, we give up and twisti@1020: // assume a 486 twisti@1020: __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); twisti@1020: __ movl(Address(rsi, 0), rax); twisti@1020: __ movl(Address(rsi, 4), rbx); twisti@1020: __ movl(Address(rsi, 8), rcx); twisti@1020: __ movl(Address(rsi,12), rdx); twisti@1020: kvn@1977: __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? kvn@1977: __ jccb(Assembler::belowEqual, std_cpuid4); kvn@1977: kvn@1977: // kvn@1977: // cpuid(0xB) Processor Topology kvn@1977: // kvn@1977: __ movl(rax, 0xb); kvn@1977: __ xorl(rcx, rcx); // Threads level kvn@1977: __ cpuid(); kvn@1977: kvn@1977: __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); kvn@1977: __ movl(Address(rsi, 0), rax); kvn@1977: __ movl(Address(rsi, 4), rbx); kvn@1977: __ movl(Address(rsi, 8), rcx); kvn@1977: __ movl(Address(rsi,12), rdx); kvn@1977: kvn@1977: __ movl(rax, 0xb); kvn@1977: __ movl(rcx, 1); // Cores level kvn@1977: __ cpuid(); kvn@1977: __ push(rax); kvn@1977: __ andl(rax, 0x1f); // Determine if valid topology level kvn@1977: __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level kvn@1977: __ andl(rax, 0xffff); kvn@1977: __ pop(rax); kvn@1977: __ jccb(Assembler::equal, std_cpuid4); kvn@1977: kvn@1977: __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); kvn@1977: __ movl(Address(rsi, 0), rax); kvn@1977: __ movl(Address(rsi, 4), rbx); kvn@1977: __ movl(Address(rsi, 8), rcx); kvn@1977: __ movl(Address(rsi,12), rdx); kvn@1977: kvn@1977: __ movl(rax, 0xb); kvn@1977: __ movl(rcx, 2); // Packages level kvn@1977: __ cpuid(); kvn@1977: __ push(rax); kvn@1977: __ andl(rax, 0x1f); // Determine if valid topology level kvn@1977: __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level kvn@1977: __ andl(rax, 0xffff); kvn@1977: __ pop(rax); kvn@1977: __ jccb(Assembler::equal, std_cpuid4); kvn@1977: kvn@1977: __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); kvn@1977: __ movl(Address(rsi, 0), rax); kvn@1977: __ movl(Address(rsi, 4), rbx); kvn@1977: __ movl(Address(rsi, 8), rcx); kvn@1977: __ movl(Address(rsi,12), rdx); twisti@1020: twisti@1020: // twisti@1020: // cpuid(0x4) Deterministic cache params twisti@1020: // kvn@1977: __ bind(std_cpuid4); twisti@1020: __ movl(rax, 4); kvn@1977: __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? kvn@1977: __ jccb(Assembler::greater, std_cpuid1); kvn@1977: twisti@1020: __ xorl(rcx, rcx); // L1 cache twisti@1020: __ cpuid(); twisti@1020: __ push(rax); twisti@1020: __ andl(rax, 0x1f); // Determine if valid cache parameters used twisti@1020: __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache twisti@1020: __ pop(rax); twisti@1020: __ jccb(Assembler::equal, std_cpuid1); twisti@1020: twisti@1020: __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); twisti@1020: __ movl(Address(rsi, 0), rax); twisti@1020: __ movl(Address(rsi, 4), rbx); twisti@1020: __ movl(Address(rsi, 8), rcx); twisti@1020: __ movl(Address(rsi,12), rdx); twisti@1020: twisti@1020: // twisti@1020: // Standard cpuid(0x1) twisti@1020: // twisti@1020: __ bind(std_cpuid1); twisti@1020: __ movl(rax, 1); twisti@1020: __ cpuid(); twisti@1020: __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); twisti@1020: __ movl(Address(rsi, 0), rax); twisti@1020: __ movl(Address(rsi, 4), rbx); twisti@1020: __ movl(Address(rsi, 8), rcx); twisti@1020: __ movl(Address(rsi,12), rdx); twisti@1020: kvn@3388: // kvn@3388: // Check if OS has enabled XGETBV instruction to access XCR0 kvn@3388: // (OSXSAVE feature flag) and CPU supports AVX kvn@3388: // kvn@6388: __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx kvn@3388: __ cmpl(rcx, 0x18000000); kvn@6388: __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported kvn@3388: kvn@3388: // kvn@3388: // XCR0, XFEATURE_ENABLED_MASK register kvn@3388: // kvn@3388: __ xorl(rcx, rcx); // zero for XCR0 register kvn@3388: __ xgetbv(); kvn@3388: __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); kvn@3388: __ movl(Address(rsi, 0), rax); kvn@3388: __ movl(Address(rsi, 4), rdx); kvn@3388: kvn@6388: __ andl(rax, 0x6); // xcr0 bits sse | ymm kvn@6388: __ cmpl(rax, 0x6); kvn@6388: __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported kvn@6388: kvn@6388: // kvn@6388: // Some OSs have a bug when upper 128bits of YMM kvn@6388: // registers are not restored after a signal processing. kvn@6388: // Generate SEGV here (reference through NULL) kvn@6388: // and check upper YMM bits after it. kvn@6388: // kvn@6388: VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts kvn@6656: intx saved_useavx = UseAVX; kvn@6656: intx saved_usesse = UseSSE; kvn@6656: UseAVX = 1; kvn@6656: UseSSE = 2; kvn@6388: kvn@6388: // load value into all 32 bytes of ymm7 register kvn@6388: __ movl(rcx, VM_Version::ymm_test_value()); kvn@6388: kvn@6388: __ movdl(xmm0, rcx); kvn@6388: __ pshufd(xmm0, xmm0, 0x00); kvn@6388: __ vinsertf128h(xmm0, xmm0, xmm0); kvn@6388: __ vmovdqu(xmm7, xmm0); kvn@6388: #ifdef _LP64 kvn@6388: __ vmovdqu(xmm8, xmm0); kvn@6388: __ vmovdqu(xmm15, xmm0); kvn@6388: #endif kvn@6388: kvn@6388: __ xorl(rsi, rsi); kvn@6388: VM_Version::set_cpuinfo_segv_addr( __ pc() ); kvn@6388: // Generate SEGV kvn@6388: __ movl(rax, Address(rsi, 0)); kvn@6388: kvn@6388: VM_Version::set_cpuinfo_cont_addr( __ pc() ); kvn@6388: // Returns here after signal. Save xmm0 to check it later. kvn@6388: __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); kvn@6388: __ vmovdqu(Address(rsi, 0), xmm0); kvn@6388: __ vmovdqu(Address(rsi, 32), xmm7); kvn@6388: #ifdef _LP64 kvn@6388: __ vmovdqu(Address(rsi, 64), xmm8); kvn@6388: __ vmovdqu(Address(rsi, 96), xmm15); kvn@6388: #endif kvn@6388: kvn@6388: VM_Version::clean_cpuFeatures(); kvn@6656: UseAVX = saved_useavx; kvn@6656: UseSSE = saved_usesse; kvn@6388: kvn@3388: // kvn@3388: // cpuid(0x7) Structured Extended Features kvn@3388: // kvn@3388: __ bind(sef_cpuid); kvn@3388: __ movl(rax, 7); kvn@3388: __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? kvn@3388: __ jccb(Assembler::greater, ext_cpuid); kvn@3388: kvn@3388: __ xorl(rcx, rcx); kvn@3388: __ cpuid(); kvn@3388: __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); kvn@3388: __ movl(Address(rsi, 0), rax); kvn@3388: __ movl(Address(rsi, 4), rbx); kvn@3388: kvn@3388: // kvn@3388: // Extended cpuid(0x80000000) kvn@3388: // kvn@3388: __ bind(ext_cpuid); twisti@1020: __ movl(rax, 0x80000000); twisti@1020: __ cpuid(); twisti@1020: __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? twisti@1020: __ jcc(Assembler::belowEqual, done); twisti@1020: __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? twisti@1020: __ jccb(Assembler::belowEqual, ext_cpuid1); phh@3378: __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? phh@3378: __ jccb(Assembler::belowEqual, ext_cpuid5); twisti@1020: __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? phh@3378: __ jccb(Assembler::belowEqual, ext_cpuid7); twisti@1020: // twisti@1020: // Extended cpuid(0x80000008) twisti@1020: // twisti@1020: __ movl(rax, 0x80000008); twisti@1020: __ cpuid(); twisti@1020: __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); twisti@1020: __ movl(Address(rsi, 0), rax); twisti@1020: __ movl(Address(rsi, 4), rbx); twisti@1020: __ movl(Address(rsi, 8), rcx); twisti@1020: __ movl(Address(rsi,12), rdx); twisti@1020: twisti@1020: // phh@3378: // Extended cpuid(0x80000007) phh@3378: // phh@3378: __ bind(ext_cpuid7); phh@3378: __ movl(rax, 0x80000007); phh@3378: __ cpuid(); phh@3378: __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); phh@3378: __ movl(Address(rsi, 0), rax); phh@3378: __ movl(Address(rsi, 4), rbx); phh@3378: __ movl(Address(rsi, 8), rcx); phh@3378: __ movl(Address(rsi,12), rdx); phh@3378: phh@3378: // twisti@1020: // Extended cpuid(0x80000005) twisti@1020: // twisti@1020: __ bind(ext_cpuid5); twisti@1020: __ movl(rax, 0x80000005); twisti@1020: __ cpuid(); twisti@1020: __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); twisti@1020: __ movl(Address(rsi, 0), rax); twisti@1020: __ movl(Address(rsi, 4), rbx); twisti@1020: __ movl(Address(rsi, 8), rcx); twisti@1020: __ movl(Address(rsi,12), rdx); twisti@1020: twisti@1020: // twisti@1020: // Extended cpuid(0x80000001) twisti@1020: // twisti@1020: __ bind(ext_cpuid1); twisti@1020: __ movl(rax, 0x80000001); twisti@1020: __ cpuid(); twisti@1020: __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); twisti@1020: __ movl(Address(rsi, 0), rax); twisti@1020: __ movl(Address(rsi, 4), rbx); twisti@1020: __ movl(Address(rsi, 8), rcx); twisti@1020: __ movl(Address(rsi,12), rdx); twisti@1020: twisti@1020: // twisti@1020: // return twisti@1020: // twisti@1020: __ bind(done); twisti@1020: __ popf(); twisti@1020: __ pop(rsi); twisti@1020: __ pop(rbx); twisti@1020: __ pop(rbp); twisti@1020: __ ret(0); twisti@1020: twisti@1020: # undef __ twisti@1020: twisti@1020: return start; twisti@1020: }; twisti@1020: }; twisti@1020: twisti@1020: kvn@6537: void VM_Version::get_cpu_info_wrapper() { kvn@6537: get_cpu_info_stub(&_cpuid_info); kvn@6537: } kvn@6537: kvn@6537: #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED kvn@6537: #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f() kvn@6537: #endif kvn@6537: twisti@1020: void VM_Version::get_processor_features() { twisti@1020: twisti@1020: _cpu = 4; // 486 by default twisti@1020: _model = 0; twisti@1020: _stepping = 0; twisti@1020: _cpuFeatures = 0; twisti@1020: _logical_processors_per_package = 1; kevinw@8966: // i486 internal cache is both I&D and has a 16-byte line size kevinw@8966: _L1_data_cache_line_size = 16; twisti@1020: twisti@1020: if (!Use486InstrsOnly) { twisti@1020: // Get raw processor info kvn@6537: kvn@6537: // Some platforms (like Win*) need a wrapper around here kvn@6537: // in order to properly handle SEGV for YMM registers test. kvn@6537: CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper); kvn@6537: twisti@1020: assert_is_initialized(); twisti@1020: _cpu = extended_cpu_family(); twisti@1020: _model = extended_cpu_model(); twisti@1020: _stepping = cpu_stepping(); twisti@1020: twisti@1020: if (cpu_family() > 4) { // it supports CPUID twisti@1020: _cpuFeatures = feature_flags(); twisti@1020: // Logical processors are only available on P4s and above, twisti@1020: // and only if hyperthreading is available. twisti@1020: _logical_processors_per_package = logical_processor_count(); kevinw@8966: _L1_data_cache_line_size = L1_line_size(); twisti@1020: } twisti@1020: } twisti@1020: twisti@1020: _supports_cx8 = supports_cmpxchg8(); roland@4106: // xchg and xadd instructions roland@4106: _supports_atomic_getset4 = true; roland@4106: _supports_atomic_getadd4 = true; roland@4106: LP64_ONLY(_supports_atomic_getset8 = true); roland@4106: LP64_ONLY(_supports_atomic_getadd8 = true); twisti@1020: twisti@1020: #ifdef _LP64 twisti@1020: // OS should support SSE for x64 and hardware should support at least SSE2. twisti@1020: if (!VM_Version::supports_sse2()) { twisti@1020: vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); twisti@1020: } roland@1495: // in 64 bit the use of SSE2 is the minimum roland@1495: if (UseSSE < 2) UseSSE = 2; twisti@1020: #endif twisti@1020: kvn@2984: #ifdef AMD64 kvn@2984: // flush_icache_stub have to be generated first. kvn@2984: // That is why Icache line size is hard coded in ICache class, kvn@2984: // see icache_x86.hpp. It is also the reason why we can't use kvn@2984: // clflush instruction in 32-bit VM since it could be running kvn@2984: // on CPU which does not support it. kvn@2984: // kvn@2984: // The only thing we can do is to verify that flushed kvn@2984: // ICache::line_size has correct value. kvn@2984: guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); kvn@2984: // clflush_size is size in quadwords (8 bytes). kvn@2984: guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); kvn@2984: #endif kvn@2984: twisti@1020: // If the OS doesn't support SSE, we can't use this feature even if the HW does twisti@1020: if (!os::supports_sse()) twisti@1020: _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); twisti@1020: twisti@1020: if (UseSSE < 4) { twisti@1020: _cpuFeatures &= ~CPU_SSE4_1; twisti@1020: _cpuFeatures &= ~CPU_SSE4_2; twisti@1020: } twisti@1020: twisti@1020: if (UseSSE < 3) { twisti@1020: _cpuFeatures &= ~CPU_SSE3; twisti@1020: _cpuFeatures &= ~CPU_SSSE3; twisti@1020: _cpuFeatures &= ~CPU_SSE4A; twisti@1020: } twisti@1020: twisti@1020: if (UseSSE < 2) twisti@1020: _cpuFeatures &= ~CPU_SSE2; twisti@1020: twisti@1020: if (UseSSE < 1) twisti@1020: _cpuFeatures &= ~CPU_SSE; twisti@1020: kvn@3388: if (UseAVX < 2) kvn@3388: _cpuFeatures &= ~CPU_AVX2; kvn@3388: kvn@3388: if (UseAVX < 1) kvn@3388: _cpuFeatures &= ~CPU_AVX; kvn@3388: kvn@4205: if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) kvn@4205: _cpuFeatures &= ~CPU_AES; kvn@4205: twisti@1020: if (logical_processors_per_package() == 1) { twisti@1020: // HT processor could be installed on a system which doesn't support HT. twisti@1020: _cpuFeatures &= ~CPU_HT; twisti@1020: } twisti@1020: twisti@1020: char buf[256]; kvn@7152: jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", twisti@1020: cores_per_cpu(), threads_per_core(), twisti@1020: cpu_family(), _model, _stepping, twisti@1020: (supports_cmov() ? ", cmov" : ""), twisti@1020: (supports_cmpxchg8() ? ", cx8" : ""), twisti@1020: (supports_fxsr() ? ", fxsr" : ""), twisti@1020: (supports_mmx() ? ", mmx" : ""), twisti@1020: (supports_sse() ? ", sse" : ""), twisti@1020: (supports_sse2() ? ", sse2" : ""), twisti@1020: (supports_sse3() ? ", sse3" : ""), twisti@1020: (supports_ssse3()? ", ssse3": ""), twisti@1020: (supports_sse4_1() ? ", sse4.1" : ""), twisti@1020: (supports_sse4_2() ? ", sse4.2" : ""), twisti@1078: (supports_popcnt() ? ", popcnt" : ""), kvn@3388: (supports_avx() ? ", avx" : ""), kvn@3388: (supports_avx2() ? ", avx2" : ""), kvn@4205: (supports_aes() ? ", aes" : ""), kvn@6429: (supports_clmul() ? ", clmul" : ""), kvn@4410: (supports_erms() ? ", erms" : ""), kvn@6429: (supports_rtm() ? ", rtm" : ""), twisti@1020: (supports_mmx_ext() ? ", mmxext" : ""), kvn@2761: (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), twisti@1210: (supports_lzcnt() ? ", lzcnt": ""), twisti@1020: (supports_sse4a() ? ", sse4a": ""), phh@3378: (supports_ht() ? ", ht": ""), phh@3378: (supports_tsc() ? ", tsc": ""), phh@3378: (supports_tscinv_bit() ? ", tscinvbit": ""), iveresov@6378: (supports_tscinv() ? ", tscinv": ""), iveresov@6378: (supports_bmi1() ? ", bmi1" : ""), kvn@7152: (supports_bmi2() ? ", bmi2" : ""), kvn@7152: (supports_adx() ? ", adx" : "")); twisti@1020: _features_str = strdup(buf); twisti@1020: twisti@1020: // UseSSE is set to the smaller of what hardware supports and what twisti@1020: // the command line requires. I.e., you cannot set UseSSE to 2 on twisti@1020: // older Pentiums which do not support it. kvn@3388: if (UseSSE > 4) UseSSE=4; kvn@3388: if (UseSSE < 0) UseSSE=0; kvn@3388: if (!supports_sse4_1()) // Drop to 3 if no SSE4 support twisti@1020: UseSSE = MIN2((intx)3,UseSSE); kvn@3388: if (!supports_sse3()) // Drop to 2 if no SSE3 support twisti@1020: UseSSE = MIN2((intx)2,UseSSE); kvn@3388: if (!supports_sse2()) // Drop to 1 if no SSE2 support twisti@1020: UseSSE = MIN2((intx)1,UseSSE); kvn@3388: if (!supports_sse ()) // Drop to 0 if no SSE support twisti@1020: UseSSE = 0; twisti@1020: kvn@3388: if (UseAVX > 2) UseAVX=2; kvn@3388: if (UseAVX < 0) UseAVX=0; kvn@3388: if (!supports_avx2()) // Drop to 1 if no AVX2 support kvn@3388: UseAVX = MIN2((intx)1,UseAVX); kvn@3388: if (!supports_avx ()) // Drop to 0 if no AVX support kvn@3388: UseAVX = 0; kvn@3388: kvn@4205: // Use AES instructions if available. kvn@4205: if (supports_aes()) { kvn@4205: if (FLAG_IS_DEFAULT(UseAES)) { kvn@4205: UseAES = true; kvn@4205: } kvn@4205: } else if (UseAES) { kvn@4205: if (!FLAG_IS_DEFAULT(UseAES)) kvn@6429: warning("AES instructions are not available on this CPU"); kvn@4205: FLAG_SET_DEFAULT(UseAES, false); kvn@4205: } kvn@4205: drchase@5353: // Use CLMUL instructions if available. drchase@5353: if (supports_clmul()) { drchase@5353: if (FLAG_IS_DEFAULT(UseCLMUL)) { drchase@5353: UseCLMUL = true; drchase@5353: } drchase@5353: } else if (UseCLMUL) { drchase@5353: if (!FLAG_IS_DEFAULT(UseCLMUL)) drchase@5353: warning("CLMUL instructions not available on this CPU (AVX may also be required)"); drchase@5353: FLAG_SET_DEFAULT(UseCLMUL, false); drchase@5353: } drchase@5353: kvn@7025: if (UseCLMUL && (UseSSE > 2)) { drchase@5353: if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { drchase@5353: UseCRC32Intrinsics = true; drchase@5353: } drchase@5353: } else if (UseCRC32Intrinsics) { drchase@5353: if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) kvn@7152: warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); drchase@5353: FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); drchase@5353: } drchase@5353: kvn@4205: // The AES intrinsic stubs require AES instruction support (of course) kvn@4363: // but also require sse3 mode for instructions it use. kvn@4363: if (UseAES && (UseSSE > 2)) { kvn@4205: if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { kvn@4205: UseAESIntrinsics = true; kvn@4205: } kvn@4205: } else if (UseAESIntrinsics) { kvn@4205: if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) kvn@6429: warning("AES intrinsics are not available on this CPU"); kvn@4205: FLAG_SET_DEFAULT(UseAESIntrinsics, false); kvn@4205: } kvn@4205: kvn@7027: if (UseSHA) { kvn@7027: warning("SHA instructions are not available on this CPU"); kvn@7027: FLAG_SET_DEFAULT(UseSHA, false); kvn@7027: } kvn@7027: if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) { kvn@7027: warning("SHA intrinsics are not available on this CPU"); kvn@7027: FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); kvn@7027: FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); kvn@7027: FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); kvn@7027: } kvn@7027: kvn@6429: // Adjust RTM (Restricted Transactional Memory) flags kvn@6429: if (!supports_rtm() && UseRTMLocking) { kvn@6429: // Can't continue because UseRTMLocking affects UseBiasedLocking flag kvn@6429: // setting during arguments processing. See use_biased_locking(). kvn@6429: // VM_Version_init() is executed after UseBiasedLocking is used kvn@6429: // in Thread::allocate(). kvn@6429: vm_exit_during_initialization("RTM instructions are not available on this CPU"); kvn@6429: } kvn@6429: kvn@6429: #if INCLUDE_RTM_OPT kvn@6429: if (UseRTMLocking) { kvn@7088: if (is_intel_family_core()) { kvn@7088: if ((_model == CPU_MODEL_HASWELL_E3) || kvn@7088: (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || kvn@7088: (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { kvn@7088: if (!UnlockExperimentalVMOptions) { kvn@7088: vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); kvn@7088: } else { kvn@7088: warning("UseRTMLocking is only available as experimental option on this platform."); kvn@7088: } kvn@7088: } kvn@7088: } kvn@6429: if (!FLAG_IS_CMDLINE(UseRTMLocking)) { kvn@6429: // RTM locking should be used only for applications with kvn@6429: // high lock contention. For now we do not use it by default. kvn@6429: vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); kvn@6429: } kvn@6429: if (!is_power_of_2(RTMTotalCountIncrRate)) { kvn@6429: warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); kvn@6429: FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); kvn@6429: } kvn@6429: if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { kvn@6429: warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); kvn@6429: FLAG_SET_DEFAULT(RTMAbortRatio, 50); kvn@6429: } kvn@6429: } else { // !UseRTMLocking kvn@6429: if (UseRTMForStackLocks) { kvn@6429: if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { kvn@6429: warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); kvn@6429: } kvn@6429: FLAG_SET_DEFAULT(UseRTMForStackLocks, false); kvn@6429: } kvn@6429: if (UseRTMDeopt) { kvn@6429: FLAG_SET_DEFAULT(UseRTMDeopt, false); kvn@6429: } kvn@6429: if (PrintPreciseRTMLockingStatistics) { kvn@6429: FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); kvn@6429: } kvn@6429: } kvn@6429: #else kvn@6429: if (UseRTMLocking) { kvn@6429: // Only C2 does RTM locking optimization. kvn@6429: // Can't continue because UseRTMLocking affects UseBiasedLocking flag kvn@6429: // setting during arguments processing. See use_biased_locking(). kvn@6429: vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); kvn@6429: } kvn@6429: #endif kvn@6429: kvn@3882: #ifdef COMPILER2 kvn@3882: if (UseFPUForSpilling) { kvn@3882: if (UseSSE < 2) { kvn@3882: // Only supported with SSE2+ kvn@3882: FLAG_SET_DEFAULT(UseFPUForSpilling, false); kvn@3882: } kvn@3882: } kvn@3882: if (MaxVectorSize > 0) { kvn@3882: if (!is_power_of_2(MaxVectorSize)) { kvn@3882: warning("MaxVectorSize must be a power of 2"); kvn@3882: FLAG_SET_DEFAULT(MaxVectorSize, 32); kvn@3882: } kvn@3882: if (MaxVectorSize > 32) { kvn@3882: FLAG_SET_DEFAULT(MaxVectorSize, 32); kvn@3882: } kvn@6388: if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { kvn@6388: // 32 bytes vectors (in YMM) are only supported with AVX+ kvn@3882: FLAG_SET_DEFAULT(MaxVectorSize, 16); kvn@3882: } kvn@3882: if (UseSSE < 2) { kvn@6388: // Vectors (in XMM) are only supported with SSE2+ kvn@3882: FLAG_SET_DEFAULT(MaxVectorSize, 0); kvn@3882: } kvn@6388: #ifdef ASSERT kvn@6388: if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { kvn@6388: tty->print_cr("State of YMM registers after signal handle:"); kvn@6388: int nreg = 2 LP64_ONLY(+2); kvn@6388: const char* ymm_name[4] = {"0", "7", "8", "15"}; kvn@6388: for (int i = 0; i < nreg; i++) { kvn@6388: tty->print("YMM%s:", ymm_name[i]); kvn@6388: for (int j = 7; j >=0; j--) { kvn@6388: tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); kvn@6388: } kvn@6388: tty->cr(); kvn@6388: } kvn@6388: } kvn@6388: #endif kvn@3882: } kvn@7152: kvn@7152: #ifdef _LP64 kvn@7152: if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { kvn@7152: UseMultiplyToLenIntrinsic = true; kvn@7152: } igerasim@8307: if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { vkempik@8637: UseSquareToLenIntrinsic = true; igerasim@8307: } igerasim@8307: if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { vkempik@8637: UseMulAddIntrinsic = true; igerasim@8307: } vkempik@8318: if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { vkempik@8637: UseMontgomeryMultiplyIntrinsic = true; vkempik@8318: } vkempik@8318: if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { vkempik@8637: UseMontgomerySquareIntrinsic = true; vkempik@8318: } kvn@7152: #else kvn@7152: if (UseMultiplyToLenIntrinsic) { kvn@7152: if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { kvn@7152: warning("multiplyToLen intrinsic is not available in 32-bit VM"); kvn@7152: } kvn@7152: FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); kvn@7152: } igerasim@8307: if (UseSquareToLenIntrinsic) { igerasim@8307: if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { igerasim@8307: warning("squareToLen intrinsic is not available in 32-bit VM"); igerasim@8307: } igerasim@8307: FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); igerasim@8307: } igerasim@8307: if (UseMulAddIntrinsic) { igerasim@8307: if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { igerasim@8307: warning("mulAdd intrinsic is not available in 32-bit VM"); igerasim@8307: } igerasim@8307: FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); igerasim@8307: } vkempik@8318: if (UseMontgomeryMultiplyIntrinsic) { vkempik@8318: if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { vkempik@8318: warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); vkempik@8318: } vkempik@8318: FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); vkempik@8318: } vkempik@8318: if (UseMontgomerySquareIntrinsic) { vkempik@8318: if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { vkempik@8318: warning("montgomerySquare intrinsic is not available in 32-bit VM"); vkempik@8318: } vkempik@8318: FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); vkempik@8318: } kvn@3882: #endif kvn@7152: #endif // COMPILER2 kvn@3882: twisti@1020: // On new cpus instructions which update whole XMM register should be used twisti@1020: // to prevent partial register stall due to dependencies on high half. twisti@1020: // twisti@1020: // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) twisti@1020: // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) twisti@1020: // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). twisti@1020: // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). twisti@1020: twisti@1020: if( is_amd() ) { // AMD cpus specific settings twisti@1020: if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { twisti@1020: // Use it on new AMD cpus starting from Opteron. twisti@1020: UseAddressNop = true; twisti@1020: } twisti@1020: if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { twisti@1020: // Use it on new AMD cpus starting from Opteron. twisti@1020: UseNewLongLShift = true; twisti@1020: } twisti@1020: if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { twisti@1020: if( supports_sse4a() ) { twisti@1020: UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron twisti@1020: } else { twisti@1020: UseXmmLoadAndClearUpper = false; twisti@1020: } twisti@1020: } twisti@1020: if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { twisti@1020: if( supports_sse4a() ) { twisti@1020: UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' twisti@1020: } else { twisti@1020: UseXmmRegToRegMoveAll = false; twisti@1020: } twisti@1020: } twisti@1020: if( FLAG_IS_DEFAULT(UseXmmI2F) ) { twisti@1020: if( supports_sse4a() ) { twisti@1020: UseXmmI2F = true; twisti@1020: } else { twisti@1020: UseXmmI2F = false; twisti@1020: } twisti@1020: } twisti@1020: if( FLAG_IS_DEFAULT(UseXmmI2D) ) { twisti@1020: if( supports_sse4a() ) { twisti@1020: UseXmmI2D = true; twisti@1020: } else { twisti@1020: UseXmmI2D = false; twisti@1020: } twisti@1020: } kvn@2688: if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) { kvn@2688: if( supports_sse4_2() && UseSSE >= 4 ) { kvn@2688: UseSSE42Intrinsics = true; kvn@2688: } kvn@2688: } twisti@1210: kvn@2808: // some defaults for AMD family 15h kvn@2808: if ( cpu_family() == 0x15 ) { kvn@2808: // On family 15h processors default is no sw prefetch kvn@2640: if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { kvn@2640: AllocatePrefetchStyle = 0; kvn@2640: } kvn@2808: // Also, if some other prefetch style is specified, default instruction type is PREFETCHW kvn@2808: if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { kvn@2808: AllocatePrefetchInstr = 3; kvn@2808: } kvn@2808: // On family 15h processors use XMM and UnalignedLoadStores for Array Copy kvn@4105: if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { kvn@2808: UseXMMForArrayCopy = true; kvn@2808: } kvn@4105: if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { kvn@2808: UseUnalignedLoadStores = true; kvn@2808: } kvn@2640: } kvn@2808: kvn@3882: #ifdef COMPILER2 kvn@3882: if (MaxVectorSize > 16) { kvn@3882: // Limit vectors size to 16 bytes on current AMD cpus. kvn@3882: FLAG_SET_DEFAULT(MaxVectorSize, 16); kvn@3882: } kvn@3882: #endif // COMPILER2 twisti@1020: } twisti@1020: twisti@1020: if( is_intel() ) { // Intel cpus specific settings twisti@1020: if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { twisti@1020: UseStoreImmI16 = false; // don't use it on Intel cpus twisti@1020: } twisti@1020: if( cpu_family() == 6 || cpu_family() == 15 ) { twisti@1020: if( FLAG_IS_DEFAULT(UseAddressNop) ) { twisti@1020: // Use it on all Intel cpus starting from PentiumPro twisti@1020: UseAddressNop = true; twisti@1020: } twisti@1020: } twisti@1020: if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { twisti@1020: UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus twisti@1020: } twisti@1020: if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { twisti@1020: if( supports_sse3() ) { twisti@1020: UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus twisti@1020: } else { twisti@1020: UseXmmRegToRegMoveAll = false; twisti@1020: } twisti@1020: } twisti@1020: if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus twisti@1020: #ifdef COMPILER2 twisti@1020: if( FLAG_IS_DEFAULT(MaxLoopPad) ) { twisti@1020: // For new Intel cpus do the next optimization: twisti@1020: // don't align the beginning of a loop if there are enough instructions twisti@1020: // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) twisti@1020: // in current fetch line (OptoLoopAlignment) or the padding twisti@1020: // is big (> MaxLoopPad). twisti@1020: // Set MaxLoopPad to 11 for new Intel cpus to reduce number of twisti@1020: // generated NOP instructions. 11 is the largest size of one twisti@1020: // address NOP instruction '0F 1F' (see Assembler::nop(i)). twisti@1020: MaxLoopPad = 11; twisti@1020: } twisti@1020: #endif // COMPILER2 kvn@4105: if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { twisti@1020: UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus twisti@1020: } kvn@4105: if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus kvn@4105: if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { twisti@1020: UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus twisti@1020: } twisti@1020: } kvn@4105: if (supports_sse4_2() && UseSSE >= 4) { kvn@4105: if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { cfang@1116: UseSSE42Intrinsics = true; cfang@1116: } cfang@1116: } twisti@1020: } kvn@7025: if ((cpu_family() == 0x06) && kvn@7025: ((extended_cpu_model() == 0x36) || // Centerton kvn@7025: (extended_cpu_model() == 0x37) || // Silvermont kvn@7025: (extended_cpu_model() == 0x4D))) { kvn@7025: #ifdef COMPILER2 kvn@7025: if (FLAG_IS_DEFAULT(OptoScheduling)) { kvn@7025: OptoScheduling = true; kvn@7025: } kvn@7025: #endif kvn@7025: if (supports_sse4_2()) { // Silvermont kvn@7025: if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { kvn@7025: UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus kvn@7025: } kvn@7025: } kvn@7025: } kvn@7152: if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { kvn@7152: AllocatePrefetchInstr = 3; kvn@7152: } twisti@1020: } twisti@1020: iveresov@6378: // Use count leading zeros count instruction if available. iveresov@6378: if (supports_lzcnt()) { iveresov@6378: if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { iveresov@6378: UseCountLeadingZerosInstruction = true; iveresov@6378: } iveresov@6378: } else if (UseCountLeadingZerosInstruction) { iveresov@6378: warning("lzcnt instruction is not available on this CPU"); iveresov@6378: FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); iveresov@6378: } iveresov@6378: kvn@7152: // Use count trailing zeros instruction if available iveresov@6378: if (supports_bmi1()) { kvn@7152: // tzcnt does not require VEX prefix kvn@7152: if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { kvn@7269: if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { kvn@7269: // Don't use tzcnt if BMI1 is switched off on command line. kvn@7269: UseCountTrailingZerosInstruction = false; kvn@7269: } else { kvn@7269: UseCountTrailingZerosInstruction = true; kvn@7269: } kvn@7152: } kvn@7152: } else if (UseCountTrailingZerosInstruction) { kvn@7152: warning("tzcnt instruction is not available on this CPU"); kvn@7152: FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); kvn@7152: } kvn@7152: kvn@7269: // BMI instructions (except tzcnt) use an encoding with VEX prefix. kvn@7152: // VEX prefix is generated only when AVX > 0. kvn@7152: if (supports_bmi1() && supports_avx()) { iveresov@6378: if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { iveresov@6378: UseBMI1Instructions = true; iveresov@6378: } iveresov@6378: } else if (UseBMI1Instructions) { kvn@7152: warning("BMI1 instructions are not available on this CPU (AVX is also required)"); iveresov@6378: FLAG_SET_DEFAULT(UseBMI1Instructions, false); iveresov@6378: } iveresov@6378: kvn@7152: if (supports_bmi2() && supports_avx()) { kvn@7152: if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { kvn@7152: UseBMI2Instructions = true; iveresov@6378: } kvn@7152: } else if (UseBMI2Instructions) { kvn@7152: warning("BMI2 instructions are not available on this CPU (AVX is also required)"); kvn@7152: FLAG_SET_DEFAULT(UseBMI2Instructions, false); iveresov@6378: } iveresov@6378: twisti@1078: // Use population count instruction if available. twisti@1078: if (supports_popcnt()) { twisti@1078: if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { twisti@1078: UsePopCountInstruction = true; twisti@1078: } kvn@3388: } else if (UsePopCountInstruction) { kvn@3388: warning("POPCNT instruction is not available on this CPU"); kvn@3388: FLAG_SET_DEFAULT(UsePopCountInstruction, false); twisti@1078: } twisti@1078: kvn@4410: // Use fast-string operations if available. kvn@4410: if (supports_erms()) { kvn@4410: if (FLAG_IS_DEFAULT(UseFastStosb)) { kvn@4410: UseFastStosb = true; kvn@4410: } kvn@4410: } else if (UseFastStosb) { kvn@4410: warning("fast-string operations are not available on this CPU"); kvn@4410: FLAG_SET_DEFAULT(UseFastStosb, false); kvn@4410: } kvn@4410: kvn@4105: #ifdef COMPILER2 kvn@4105: if (FLAG_IS_DEFAULT(AlignVector)) { kvn@4105: // Modern processors allow misaligned memory operations for vectors. kvn@4105: AlignVector = !UseUnalignedLoadStores; kvn@4105: } kvn@4105: #endif // COMPILER2 kvn@4105: twisti@1020: assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value"); twisti@1020: assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value"); twisti@1020: twisti@1020: // set valid Prefetch instruction twisti@1020: if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0; twisti@1020: if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3; kvn@2761: if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0; kvn@2761: if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3; twisti@1020: twisti@1020: if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; twisti@1020: if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; kvn@2761: if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; kvn@2761: if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; twisti@1020: twisti@1020: // Allocation prefetch settings kvn@3052: intx cache_line_size = prefetch_data_size(); twisti@1020: if( cache_line_size > AllocatePrefetchStepSize ) twisti@1020: AllocatePrefetchStepSize = cache_line_size; kvn@3052: twisti@1020: assert(AllocatePrefetchLines > 0, "invalid value"); kvn@3052: if( AllocatePrefetchLines < 1 ) // set valid value in product VM kvn@3052: AllocatePrefetchLines = 3; kvn@3052: assert(AllocateInstancePrefetchLines > 0, "invalid value"); kvn@3052: if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM kvn@3052: AllocateInstancePrefetchLines = 1; twisti@1020: twisti@1020: AllocatePrefetchDistance = allocate_prefetch_distance(); twisti@1020: AllocatePrefetchStyle = allocate_prefetch_style(); twisti@1020: kvn@7025: if (is_intel() && cpu_family() == 6 && supports_sse3()) { kvn@7025: if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core twisti@1020: #ifdef _LP64 kvn@1977: AllocatePrefetchDistance = 384; twisti@1020: #else kvn@1977: AllocatePrefetchDistance = 320; twisti@1020: #endif kvn@1977: } kvn@7025: if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus kvn@1977: AllocatePrefetchDistance = 192; kvn@1977: AllocatePrefetchLines = 4; kvn@7025: } never@2085: #ifdef COMPILER2 kvn@7025: if (supports_sse4_2()) { kvn@7025: if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { never@2085: FLAG_SET_DEFAULT(UseFPUForSpilling, true); never@2085: } kvn@7025: } never@2085: #endif twisti@1020: } twisti@1020: assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); twisti@1020: twisti@1020: #ifdef _LP64 twisti@1020: // Prefetch settings twisti@1020: PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); twisti@1020: PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); twisti@1020: PrefetchFieldsAhead = prefetch_fields_ahead(); twisti@1020: #endif twisti@1020: jwilhelm@4430: if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && jwilhelm@4430: (cache_line_size > ContendedPaddingWidth)) jwilhelm@4430: ContendedPaddingWidth = cache_line_size; jwilhelm@4430: twisti@1020: #ifndef PRODUCT twisti@1020: if (PrintMiscellaneous && Verbose) { twisti@1020: tty->print_cr("Logical CPUs per core: %u", twisti@1020: logical_processors_per_package()); kevinw@8966: tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); drchase@6680: tty->print("UseSSE=%d", (int) UseSSE); kvn@3388: if (UseAVX > 0) { drchase@6680: tty->print(" UseAVX=%d", (int) UseAVX); kvn@3388: } kvn@4205: if (UseAES) { kvn@4205: tty->print(" UseAES=1"); kvn@4205: } kvn@6388: #ifdef COMPILER2 kvn@6388: if (MaxVectorSize > 0) { drchase@6680: tty->print(" MaxVectorSize=%d", (int) MaxVectorSize); kvn@6388: } kvn@6388: #endif kvn@3388: tty->cr(); kvn@3052: tty->print("Allocation"); kvn@2761: if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { kvn@3052: tty->print_cr(": no prefetching"); twisti@1020: } else { kvn@3052: tty->print(" prefetching: "); kvn@2761: if (UseSSE == 0 && supports_3dnow_prefetch()) { twisti@1020: tty->print("PREFETCHW"); twisti@1020: } else if (UseSSE >= 1) { twisti@1020: if (AllocatePrefetchInstr == 0) { twisti@1020: tty->print("PREFETCHNTA"); twisti@1020: } else if (AllocatePrefetchInstr == 1) { twisti@1020: tty->print("PREFETCHT0"); twisti@1020: } else if (AllocatePrefetchInstr == 2) { twisti@1020: tty->print("PREFETCHT2"); twisti@1020: } else if (AllocatePrefetchInstr == 3) { twisti@1020: tty->print("PREFETCHW"); twisti@1020: } twisti@1020: } twisti@1020: if (AllocatePrefetchLines > 1) { drchase@6680: tty->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); twisti@1020: } else { drchase@6680: tty->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); twisti@1020: } twisti@1020: } twisti@1020: twisti@1020: if (PrefetchCopyIntervalInBytes > 0) { drchase@6680: tty->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); twisti@1020: } twisti@1020: if (PrefetchScanIntervalInBytes > 0) { drchase@6680: tty->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); twisti@1020: } twisti@1020: if (PrefetchFieldsAhead > 0) { drchase@6680: tty->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); twisti@1020: } jwilhelm@4430: if (ContendedPaddingWidth > 0) { drchase@6680: tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); jwilhelm@4430: } twisti@1020: } twisti@1020: #endif // !PRODUCT twisti@1020: } twisti@1020: kvn@6429: bool VM_Version::use_biased_locking() { kvn@6429: #if INCLUDE_RTM_OPT kvn@6429: // RTM locking is most useful when there is high lock contention and kvn@6429: // low data contention. With high lock contention the lock is usually kvn@6429: // inflated and biased locking is not suitable for that case. kvn@6429: // RTM locking code requires that biased locking is off. kvn@6429: // Note: we can't switch off UseBiasedLocking in get_processor_features() kvn@6429: // because it is used by Thread::allocate() which is called before kvn@6429: // VM_Version::initialize(). kvn@6429: if (UseRTMLocking && UseBiasedLocking) { kvn@6429: if (FLAG_IS_DEFAULT(UseBiasedLocking)) { kvn@6429: FLAG_SET_DEFAULT(UseBiasedLocking, false); kvn@6429: } else { kvn@6429: warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); kvn@6429: UseBiasedLocking = false; kvn@6429: } kvn@6429: } kvn@6429: #endif kvn@6429: return UseBiasedLocking; kvn@6429: } kvn@6429: twisti@1020: void VM_Version::initialize() { twisti@1020: ResourceMark rm; twisti@1020: // Making this stub must be FIRST use of assembler twisti@1020: kvn@6537: stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); twisti@1020: if (stub_blob == NULL) { kvn@6537: vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); twisti@1020: } twisti@2103: CodeBuffer c(stub_blob); twisti@1020: VM_Version_StubGenerator g(&c); kvn@6537: get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, kvn@6537: g.generate_get_cpu_info()); twisti@1020: twisti@1020: get_processor_features(); twisti@1020: }