Thu, 03 Apr 2014 12:37:53 +0200
8039146: Fix 64-bit store to int JNIHandleBlock::_top
Reviewed-by: coleenp, kvn
twisti@1020 | 1 | /* |
drchase@5353 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
twisti@1020 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
twisti@1020 | 4 | * |
twisti@1020 | 5 | * This code is free software; you can redistribute it and/or modify it |
twisti@1020 | 6 | * under the terms of the GNU General Public License version 2 only, as |
twisti@1020 | 7 | * published by the Free Software Foundation. |
twisti@1020 | 8 | * |
twisti@1020 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
twisti@1020 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
twisti@1020 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
twisti@1020 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
twisti@1020 | 13 | * accompanied this code). |
twisti@1020 | 14 | * |
twisti@1020 | 15 | * You should have received a copy of the GNU General Public License version |
twisti@1020 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
twisti@1020 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
twisti@1020 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
twisti@1020 | 22 | * |
twisti@1020 | 23 | */ |
twisti@1020 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
twisti@4318 | 26 | #include "asm/macroAssembler.hpp" |
twisti@4318 | 27 | #include "asm/macroAssembler.inline.hpp" |
stefank@2314 | 28 | #include "memory/resourceArea.hpp" |
stefank@2314 | 29 | #include "runtime/java.hpp" |
stefank@2314 | 30 | #include "runtime/stubCodeGenerator.hpp" |
stefank@2314 | 31 | #include "vm_version_x86.hpp" |
stefank@2314 | 32 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 33 | # include "os_linux.inline.hpp" |
stefank@2314 | 34 | #endif |
stefank@2314 | 35 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 36 | # include "os_solaris.inline.hpp" |
stefank@2314 | 37 | #endif |
stefank@2314 | 38 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 39 | # include "os_windows.inline.hpp" |
stefank@2314 | 40 | #endif |
never@3156 | 41 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 42 | # include "os_bsd.inline.hpp" |
never@3156 | 43 | #endif |
twisti@1020 | 44 | |
twisti@1020 | 45 | |
twisti@1020 | 46 | int VM_Version::_cpu; |
twisti@1020 | 47 | int VM_Version::_model; |
twisti@1020 | 48 | int VM_Version::_stepping; |
twisti@1020 | 49 | int VM_Version::_cpuFeatures; |
twisti@1020 | 50 | const char* VM_Version::_features_str = ""; |
twisti@1020 | 51 | VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; |
twisti@1020 | 52 | |
kvn@6388 | 53 | // Address of instruction which causes SEGV |
kvn@6388 | 54 | address VM_Version::_cpuinfo_segv_addr = 0; |
kvn@6388 | 55 | // Address of instruction after the one which causes SEGV |
kvn@6388 | 56 | address VM_Version::_cpuinfo_cont_addr = 0; |
kvn@6388 | 57 | |
twisti@1020 | 58 | static BufferBlob* stub_blob; |
kvn@6388 | 59 | static const int stub_size = 600; |
twisti@1020 | 60 | |
twisti@1020 | 61 | extern "C" { |
kvn@6537 | 62 | typedef void (*get_cpu_info_stub_t)(void*); |
twisti@1020 | 63 | } |
kvn@6537 | 64 | static get_cpu_info_stub_t get_cpu_info_stub = NULL; |
twisti@1020 | 65 | |
twisti@1020 | 66 | |
twisti@1020 | 67 | class VM_Version_StubGenerator: public StubCodeGenerator { |
twisti@1020 | 68 | public: |
twisti@1020 | 69 | |
twisti@1020 | 70 | VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} |
twisti@1020 | 71 | |
kvn@6537 | 72 | address generate_get_cpu_info() { |
twisti@1020 | 73 | // Flags to test CPU type. |
sla@3587 | 74 | const uint32_t HS_EFL_AC = 0x40000; |
sla@3587 | 75 | const uint32_t HS_EFL_ID = 0x200000; |
twisti@1020 | 76 | // Values for when we don't have a CPUID instruction. |
twisti@1020 | 77 | const int CPU_FAMILY_SHIFT = 8; |
twisti@1020 | 78 | const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); |
twisti@1020 | 79 | const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); |
twisti@1020 | 80 | |
kvn@1977 | 81 | Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; |
kvn@3400 | 82 | Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done; |
twisti@1020 | 83 | |
kvn@6537 | 84 | StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); |
twisti@1020 | 85 | # define __ _masm-> |
twisti@1020 | 86 | |
twisti@1020 | 87 | address start = __ pc(); |
twisti@1020 | 88 | |
twisti@1020 | 89 | // |
kvn@6537 | 90 | // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); |
twisti@1020 | 91 | // |
twisti@1020 | 92 | // LP64: rcx and rdx are first and second argument registers on windows |
twisti@1020 | 93 | |
twisti@1020 | 94 | __ push(rbp); |
twisti@1020 | 95 | #ifdef _LP64 |
twisti@1020 | 96 | __ mov(rbp, c_rarg0); // cpuid_info address |
twisti@1020 | 97 | #else |
twisti@1020 | 98 | __ movptr(rbp, Address(rsp, 8)); // cpuid_info address |
twisti@1020 | 99 | #endif |
twisti@1020 | 100 | __ push(rbx); |
twisti@1020 | 101 | __ push(rsi); |
twisti@1020 | 102 | __ pushf(); // preserve rbx, and flags |
twisti@1020 | 103 | __ pop(rax); |
twisti@1020 | 104 | __ push(rax); |
twisti@1020 | 105 | __ mov(rcx, rax); |
twisti@1020 | 106 | // |
twisti@1020 | 107 | // if we are unable to change the AC flag, we have a 386 |
twisti@1020 | 108 | // |
sla@3587 | 109 | __ xorl(rax, HS_EFL_AC); |
twisti@1020 | 110 | __ push(rax); |
twisti@1020 | 111 | __ popf(); |
twisti@1020 | 112 | __ pushf(); |
twisti@1020 | 113 | __ pop(rax); |
twisti@1020 | 114 | __ cmpptr(rax, rcx); |
twisti@1020 | 115 | __ jccb(Assembler::notEqual, detect_486); |
twisti@1020 | 116 | |
twisti@1020 | 117 | __ movl(rax, CPU_FAMILY_386); |
twisti@1020 | 118 | __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); |
twisti@1020 | 119 | __ jmp(done); |
twisti@1020 | 120 | |
twisti@1020 | 121 | // |
twisti@1020 | 122 | // If we are unable to change the ID flag, we have a 486 which does |
twisti@1020 | 123 | // not support the "cpuid" instruction. |
twisti@1020 | 124 | // |
twisti@1020 | 125 | __ bind(detect_486); |
twisti@1020 | 126 | __ mov(rax, rcx); |
sla@3587 | 127 | __ xorl(rax, HS_EFL_ID); |
twisti@1020 | 128 | __ push(rax); |
twisti@1020 | 129 | __ popf(); |
twisti@1020 | 130 | __ pushf(); |
twisti@1020 | 131 | __ pop(rax); |
twisti@1020 | 132 | __ cmpptr(rcx, rax); |
twisti@1020 | 133 | __ jccb(Assembler::notEqual, detect_586); |
twisti@1020 | 134 | |
twisti@1020 | 135 | __ bind(cpu486); |
twisti@1020 | 136 | __ movl(rax, CPU_FAMILY_486); |
twisti@1020 | 137 | __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); |
twisti@1020 | 138 | __ jmp(done); |
twisti@1020 | 139 | |
twisti@1020 | 140 | // |
twisti@1020 | 141 | // At this point, we have a chip which supports the "cpuid" instruction |
twisti@1020 | 142 | // |
twisti@1020 | 143 | __ bind(detect_586); |
twisti@1020 | 144 | __ xorl(rax, rax); |
twisti@1020 | 145 | __ cpuid(); |
twisti@1020 | 146 | __ orl(rax, rax); |
twisti@1020 | 147 | __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input |
twisti@1020 | 148 | // value of at least 1, we give up and |
twisti@1020 | 149 | // assume a 486 |
twisti@1020 | 150 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); |
twisti@1020 | 151 | __ movl(Address(rsi, 0), rax); |
twisti@1020 | 152 | __ movl(Address(rsi, 4), rbx); |
twisti@1020 | 153 | __ movl(Address(rsi, 8), rcx); |
twisti@1020 | 154 | __ movl(Address(rsi,12), rdx); |
twisti@1020 | 155 | |
kvn@1977 | 156 | __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? |
kvn@1977 | 157 | __ jccb(Assembler::belowEqual, std_cpuid4); |
kvn@1977 | 158 | |
kvn@1977 | 159 | // |
kvn@1977 | 160 | // cpuid(0xB) Processor Topology |
kvn@1977 | 161 | // |
kvn@1977 | 162 | __ movl(rax, 0xb); |
kvn@1977 | 163 | __ xorl(rcx, rcx); // Threads level |
kvn@1977 | 164 | __ cpuid(); |
kvn@1977 | 165 | |
kvn@1977 | 166 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); |
kvn@1977 | 167 | __ movl(Address(rsi, 0), rax); |
kvn@1977 | 168 | __ movl(Address(rsi, 4), rbx); |
kvn@1977 | 169 | __ movl(Address(rsi, 8), rcx); |
kvn@1977 | 170 | __ movl(Address(rsi,12), rdx); |
kvn@1977 | 171 | |
kvn@1977 | 172 | __ movl(rax, 0xb); |
kvn@1977 | 173 | __ movl(rcx, 1); // Cores level |
kvn@1977 | 174 | __ cpuid(); |
kvn@1977 | 175 | __ push(rax); |
kvn@1977 | 176 | __ andl(rax, 0x1f); // Determine if valid topology level |
kvn@1977 | 177 | __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level |
kvn@1977 | 178 | __ andl(rax, 0xffff); |
kvn@1977 | 179 | __ pop(rax); |
kvn@1977 | 180 | __ jccb(Assembler::equal, std_cpuid4); |
kvn@1977 | 181 | |
kvn@1977 | 182 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); |
kvn@1977 | 183 | __ movl(Address(rsi, 0), rax); |
kvn@1977 | 184 | __ movl(Address(rsi, 4), rbx); |
kvn@1977 | 185 | __ movl(Address(rsi, 8), rcx); |
kvn@1977 | 186 | __ movl(Address(rsi,12), rdx); |
kvn@1977 | 187 | |
kvn@1977 | 188 | __ movl(rax, 0xb); |
kvn@1977 | 189 | __ movl(rcx, 2); // Packages level |
kvn@1977 | 190 | __ cpuid(); |
kvn@1977 | 191 | __ push(rax); |
kvn@1977 | 192 | __ andl(rax, 0x1f); // Determine if valid topology level |
kvn@1977 | 193 | __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level |
kvn@1977 | 194 | __ andl(rax, 0xffff); |
kvn@1977 | 195 | __ pop(rax); |
kvn@1977 | 196 | __ jccb(Assembler::equal, std_cpuid4); |
kvn@1977 | 197 | |
kvn@1977 | 198 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); |
kvn@1977 | 199 | __ movl(Address(rsi, 0), rax); |
kvn@1977 | 200 | __ movl(Address(rsi, 4), rbx); |
kvn@1977 | 201 | __ movl(Address(rsi, 8), rcx); |
kvn@1977 | 202 | __ movl(Address(rsi,12), rdx); |
twisti@1020 | 203 | |
twisti@1020 | 204 | // |
twisti@1020 | 205 | // cpuid(0x4) Deterministic cache params |
twisti@1020 | 206 | // |
kvn@1977 | 207 | __ bind(std_cpuid4); |
twisti@1020 | 208 | __ movl(rax, 4); |
kvn@1977 | 209 | __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? |
kvn@1977 | 210 | __ jccb(Assembler::greater, std_cpuid1); |
kvn@1977 | 211 | |
twisti@1020 | 212 | __ xorl(rcx, rcx); // L1 cache |
twisti@1020 | 213 | __ cpuid(); |
twisti@1020 | 214 | __ push(rax); |
twisti@1020 | 215 | __ andl(rax, 0x1f); // Determine if valid cache parameters used |
twisti@1020 | 216 | __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache |
twisti@1020 | 217 | __ pop(rax); |
twisti@1020 | 218 | __ jccb(Assembler::equal, std_cpuid1); |
twisti@1020 | 219 | |
twisti@1020 | 220 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); |
twisti@1020 | 221 | __ movl(Address(rsi, 0), rax); |
twisti@1020 | 222 | __ movl(Address(rsi, 4), rbx); |
twisti@1020 | 223 | __ movl(Address(rsi, 8), rcx); |
twisti@1020 | 224 | __ movl(Address(rsi,12), rdx); |
twisti@1020 | 225 | |
twisti@1020 | 226 | // |
twisti@1020 | 227 | // Standard cpuid(0x1) |
twisti@1020 | 228 | // |
twisti@1020 | 229 | __ bind(std_cpuid1); |
twisti@1020 | 230 | __ movl(rax, 1); |
twisti@1020 | 231 | __ cpuid(); |
twisti@1020 | 232 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); |
twisti@1020 | 233 | __ movl(Address(rsi, 0), rax); |
twisti@1020 | 234 | __ movl(Address(rsi, 4), rbx); |
twisti@1020 | 235 | __ movl(Address(rsi, 8), rcx); |
twisti@1020 | 236 | __ movl(Address(rsi,12), rdx); |
twisti@1020 | 237 | |
kvn@3388 | 238 | // |
kvn@3388 | 239 | // Check if OS has enabled XGETBV instruction to access XCR0 |
kvn@3388 | 240 | // (OSXSAVE feature flag) and CPU supports AVX |
kvn@3388 | 241 | // |
kvn@6388 | 242 | __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx |
kvn@3388 | 243 | __ cmpl(rcx, 0x18000000); |
kvn@6388 | 244 | __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported |
kvn@3388 | 245 | |
kvn@3388 | 246 | // |
kvn@3388 | 247 | // XCR0, XFEATURE_ENABLED_MASK register |
kvn@3388 | 248 | // |
kvn@3388 | 249 | __ xorl(rcx, rcx); // zero for XCR0 register |
kvn@3388 | 250 | __ xgetbv(); |
kvn@3388 | 251 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); |
kvn@3388 | 252 | __ movl(Address(rsi, 0), rax); |
kvn@3388 | 253 | __ movl(Address(rsi, 4), rdx); |
kvn@3388 | 254 | |
kvn@6388 | 255 | __ andl(rax, 0x6); // xcr0 bits sse | ymm |
kvn@6388 | 256 | __ cmpl(rax, 0x6); |
kvn@6388 | 257 | __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported |
kvn@6388 | 258 | |
kvn@6388 | 259 | // |
kvn@6388 | 260 | // Some OSs have a bug when upper 128bits of YMM |
kvn@6388 | 261 | // registers are not restored after a signal processing. |
kvn@6388 | 262 | // Generate SEGV here (reference through NULL) |
kvn@6388 | 263 | // and check upper YMM bits after it. |
kvn@6388 | 264 | // |
kvn@6388 | 265 | VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts |
kvn@6388 | 266 | |
kvn@6388 | 267 | // load value into all 32 bytes of ymm7 register |
kvn@6388 | 268 | __ movl(rcx, VM_Version::ymm_test_value()); |
kvn@6388 | 269 | |
kvn@6388 | 270 | __ movdl(xmm0, rcx); |
kvn@6388 | 271 | __ pshufd(xmm0, xmm0, 0x00); |
kvn@6388 | 272 | __ vinsertf128h(xmm0, xmm0, xmm0); |
kvn@6388 | 273 | __ vmovdqu(xmm7, xmm0); |
kvn@6388 | 274 | #ifdef _LP64 |
kvn@6388 | 275 | __ vmovdqu(xmm8, xmm0); |
kvn@6388 | 276 | __ vmovdqu(xmm15, xmm0); |
kvn@6388 | 277 | #endif |
kvn@6388 | 278 | |
kvn@6388 | 279 | __ xorl(rsi, rsi); |
kvn@6388 | 280 | VM_Version::set_cpuinfo_segv_addr( __ pc() ); |
kvn@6388 | 281 | // Generate SEGV |
kvn@6388 | 282 | __ movl(rax, Address(rsi, 0)); |
kvn@6388 | 283 | |
kvn@6388 | 284 | VM_Version::set_cpuinfo_cont_addr( __ pc() ); |
kvn@6388 | 285 | // Returns here after signal. Save xmm0 to check it later. |
kvn@6388 | 286 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); |
kvn@6388 | 287 | __ vmovdqu(Address(rsi, 0), xmm0); |
kvn@6388 | 288 | __ vmovdqu(Address(rsi, 32), xmm7); |
kvn@6388 | 289 | #ifdef _LP64 |
kvn@6388 | 290 | __ vmovdqu(Address(rsi, 64), xmm8); |
kvn@6388 | 291 | __ vmovdqu(Address(rsi, 96), xmm15); |
kvn@6388 | 292 | #endif |
kvn@6388 | 293 | |
kvn@6388 | 294 | VM_Version::clean_cpuFeatures(); |
kvn@6388 | 295 | |
kvn@3388 | 296 | // |
kvn@3388 | 297 | // cpuid(0x7) Structured Extended Features |
kvn@3388 | 298 | // |
kvn@3388 | 299 | __ bind(sef_cpuid); |
kvn@3388 | 300 | __ movl(rax, 7); |
kvn@3388 | 301 | __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? |
kvn@3388 | 302 | __ jccb(Assembler::greater, ext_cpuid); |
kvn@3388 | 303 | |
kvn@3388 | 304 | __ xorl(rcx, rcx); |
kvn@3388 | 305 | __ cpuid(); |
kvn@3388 | 306 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); |
kvn@3388 | 307 | __ movl(Address(rsi, 0), rax); |
kvn@3388 | 308 | __ movl(Address(rsi, 4), rbx); |
kvn@3388 | 309 | |
kvn@3388 | 310 | // |
kvn@3388 | 311 | // Extended cpuid(0x80000000) |
kvn@3388 | 312 | // |
kvn@3388 | 313 | __ bind(ext_cpuid); |
twisti@1020 | 314 | __ movl(rax, 0x80000000); |
twisti@1020 | 315 | __ cpuid(); |
twisti@1020 | 316 | __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? |
twisti@1020 | 317 | __ jcc(Assembler::belowEqual, done); |
twisti@1020 | 318 | __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? |
twisti@1020 | 319 | __ jccb(Assembler::belowEqual, ext_cpuid1); |
phh@3378 | 320 | __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? |
phh@3378 | 321 | __ jccb(Assembler::belowEqual, ext_cpuid5); |
twisti@1020 | 322 | __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? |
phh@3378 | 323 | __ jccb(Assembler::belowEqual, ext_cpuid7); |
twisti@1020 | 324 | // |
twisti@1020 | 325 | // Extended cpuid(0x80000008) |
twisti@1020 | 326 | // |
twisti@1020 | 327 | __ movl(rax, 0x80000008); |
twisti@1020 | 328 | __ cpuid(); |
twisti@1020 | 329 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); |
twisti@1020 | 330 | __ movl(Address(rsi, 0), rax); |
twisti@1020 | 331 | __ movl(Address(rsi, 4), rbx); |
twisti@1020 | 332 | __ movl(Address(rsi, 8), rcx); |
twisti@1020 | 333 | __ movl(Address(rsi,12), rdx); |
twisti@1020 | 334 | |
twisti@1020 | 335 | // |
phh@3378 | 336 | // Extended cpuid(0x80000007) |
phh@3378 | 337 | // |
phh@3378 | 338 | __ bind(ext_cpuid7); |
phh@3378 | 339 | __ movl(rax, 0x80000007); |
phh@3378 | 340 | __ cpuid(); |
phh@3378 | 341 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); |
phh@3378 | 342 | __ movl(Address(rsi, 0), rax); |
phh@3378 | 343 | __ movl(Address(rsi, 4), rbx); |
phh@3378 | 344 | __ movl(Address(rsi, 8), rcx); |
phh@3378 | 345 | __ movl(Address(rsi,12), rdx); |
phh@3378 | 346 | |
phh@3378 | 347 | // |
twisti@1020 | 348 | // Extended cpuid(0x80000005) |
twisti@1020 | 349 | // |
twisti@1020 | 350 | __ bind(ext_cpuid5); |
twisti@1020 | 351 | __ movl(rax, 0x80000005); |
twisti@1020 | 352 | __ cpuid(); |
twisti@1020 | 353 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); |
twisti@1020 | 354 | __ movl(Address(rsi, 0), rax); |
twisti@1020 | 355 | __ movl(Address(rsi, 4), rbx); |
twisti@1020 | 356 | __ movl(Address(rsi, 8), rcx); |
twisti@1020 | 357 | __ movl(Address(rsi,12), rdx); |
twisti@1020 | 358 | |
twisti@1020 | 359 | // |
twisti@1020 | 360 | // Extended cpuid(0x80000001) |
twisti@1020 | 361 | // |
twisti@1020 | 362 | __ bind(ext_cpuid1); |
twisti@1020 | 363 | __ movl(rax, 0x80000001); |
twisti@1020 | 364 | __ cpuid(); |
twisti@1020 | 365 | __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); |
twisti@1020 | 366 | __ movl(Address(rsi, 0), rax); |
twisti@1020 | 367 | __ movl(Address(rsi, 4), rbx); |
twisti@1020 | 368 | __ movl(Address(rsi, 8), rcx); |
twisti@1020 | 369 | __ movl(Address(rsi,12), rdx); |
twisti@1020 | 370 | |
twisti@1020 | 371 | // |
twisti@1020 | 372 | // return |
twisti@1020 | 373 | // |
twisti@1020 | 374 | __ bind(done); |
twisti@1020 | 375 | __ popf(); |
twisti@1020 | 376 | __ pop(rsi); |
twisti@1020 | 377 | __ pop(rbx); |
twisti@1020 | 378 | __ pop(rbp); |
twisti@1020 | 379 | __ ret(0); |
twisti@1020 | 380 | |
twisti@1020 | 381 | # undef __ |
twisti@1020 | 382 | |
twisti@1020 | 383 | return start; |
twisti@1020 | 384 | }; |
twisti@1020 | 385 | }; |
twisti@1020 | 386 | |
twisti@1020 | 387 | |
kvn@6537 | 388 | void VM_Version::get_cpu_info_wrapper() { |
kvn@6537 | 389 | get_cpu_info_stub(&_cpuid_info); |
kvn@6537 | 390 | } |
kvn@6537 | 391 | |
kvn@6537 | 392 | #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED |
kvn@6537 | 393 | #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f() |
kvn@6537 | 394 | #endif |
kvn@6537 | 395 | |
twisti@1020 | 396 | void VM_Version::get_processor_features() { |
twisti@1020 | 397 | |
twisti@1020 | 398 | _cpu = 4; // 486 by default |
twisti@1020 | 399 | _model = 0; |
twisti@1020 | 400 | _stepping = 0; |
twisti@1020 | 401 | _cpuFeatures = 0; |
twisti@1020 | 402 | _logical_processors_per_package = 1; |
twisti@1020 | 403 | |
twisti@1020 | 404 | if (!Use486InstrsOnly) { |
twisti@1020 | 405 | // Get raw processor info |
kvn@6537 | 406 | |
kvn@6537 | 407 | // Some platforms (like Win*) need a wrapper around here |
kvn@6537 | 408 | // in order to properly handle SEGV for YMM registers test. |
kvn@6537 | 409 | CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper); |
kvn@6537 | 410 | |
twisti@1020 | 411 | assert_is_initialized(); |
twisti@1020 | 412 | _cpu = extended_cpu_family(); |
twisti@1020 | 413 | _model = extended_cpu_model(); |
twisti@1020 | 414 | _stepping = cpu_stepping(); |
twisti@1020 | 415 | |
twisti@1020 | 416 | if (cpu_family() > 4) { // it supports CPUID |
twisti@1020 | 417 | _cpuFeatures = feature_flags(); |
twisti@1020 | 418 | // Logical processors are only available on P4s and above, |
twisti@1020 | 419 | // and only if hyperthreading is available. |
twisti@1020 | 420 | _logical_processors_per_package = logical_processor_count(); |
twisti@1020 | 421 | } |
twisti@1020 | 422 | } |
twisti@1020 | 423 | |
twisti@1020 | 424 | _supports_cx8 = supports_cmpxchg8(); |
roland@4106 | 425 | // xchg and xadd instructions |
roland@4106 | 426 | _supports_atomic_getset4 = true; |
roland@4106 | 427 | _supports_atomic_getadd4 = true; |
roland@4106 | 428 | LP64_ONLY(_supports_atomic_getset8 = true); |
roland@4106 | 429 | LP64_ONLY(_supports_atomic_getadd8 = true); |
twisti@1020 | 430 | |
twisti@1020 | 431 | #ifdef _LP64 |
twisti@1020 | 432 | // OS should support SSE for x64 and hardware should support at least SSE2. |
twisti@1020 | 433 | if (!VM_Version::supports_sse2()) { |
twisti@1020 | 434 | vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); |
twisti@1020 | 435 | } |
roland@1495 | 436 | // in 64 bit the use of SSE2 is the minimum |
roland@1495 | 437 | if (UseSSE < 2) UseSSE = 2; |
twisti@1020 | 438 | #endif |
twisti@1020 | 439 | |
kvn@2984 | 440 | #ifdef AMD64 |
kvn@2984 | 441 | // flush_icache_stub have to be generated first. |
kvn@2984 | 442 | // That is why Icache line size is hard coded in ICache class, |
kvn@2984 | 443 | // see icache_x86.hpp. It is also the reason why we can't use |
kvn@2984 | 444 | // clflush instruction in 32-bit VM since it could be running |
kvn@2984 | 445 | // on CPU which does not support it. |
kvn@2984 | 446 | // |
kvn@2984 | 447 | // The only thing we can do is to verify that flushed |
kvn@2984 | 448 | // ICache::line_size has correct value. |
kvn@2984 | 449 | guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); |
kvn@2984 | 450 | // clflush_size is size in quadwords (8 bytes). |
kvn@2984 | 451 | guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); |
kvn@2984 | 452 | #endif |
kvn@2984 | 453 | |
twisti@1020 | 454 | // If the OS doesn't support SSE, we can't use this feature even if the HW does |
twisti@1020 | 455 | if (!os::supports_sse()) |
twisti@1020 | 456 | _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); |
twisti@1020 | 457 | |
twisti@1020 | 458 | if (UseSSE < 4) { |
twisti@1020 | 459 | _cpuFeatures &= ~CPU_SSE4_1; |
twisti@1020 | 460 | _cpuFeatures &= ~CPU_SSE4_2; |
twisti@1020 | 461 | } |
twisti@1020 | 462 | |
twisti@1020 | 463 | if (UseSSE < 3) { |
twisti@1020 | 464 | _cpuFeatures &= ~CPU_SSE3; |
twisti@1020 | 465 | _cpuFeatures &= ~CPU_SSSE3; |
twisti@1020 | 466 | _cpuFeatures &= ~CPU_SSE4A; |
twisti@1020 | 467 | } |
twisti@1020 | 468 | |
twisti@1020 | 469 | if (UseSSE < 2) |
twisti@1020 | 470 | _cpuFeatures &= ~CPU_SSE2; |
twisti@1020 | 471 | |
twisti@1020 | 472 | if (UseSSE < 1) |
twisti@1020 | 473 | _cpuFeatures &= ~CPU_SSE; |
twisti@1020 | 474 | |
kvn@3388 | 475 | if (UseAVX < 2) |
kvn@3388 | 476 | _cpuFeatures &= ~CPU_AVX2; |
kvn@3388 | 477 | |
kvn@3388 | 478 | if (UseAVX < 1) |
kvn@3388 | 479 | _cpuFeatures &= ~CPU_AVX; |
kvn@3388 | 480 | |
kvn@4205 | 481 | if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) |
kvn@4205 | 482 | _cpuFeatures &= ~CPU_AES; |
kvn@4205 | 483 | |
twisti@1020 | 484 | if (logical_processors_per_package() == 1) { |
twisti@1020 | 485 | // HT processor could be installed on a system which doesn't support HT. |
twisti@1020 | 486 | _cpuFeatures &= ~CPU_HT; |
twisti@1020 | 487 | } |
twisti@1020 | 488 | |
twisti@1020 | 489 | char buf[256]; |
kvn@6429 | 490 | jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", |
twisti@1020 | 491 | cores_per_cpu(), threads_per_core(), |
twisti@1020 | 492 | cpu_family(), _model, _stepping, |
twisti@1020 | 493 | (supports_cmov() ? ", cmov" : ""), |
twisti@1020 | 494 | (supports_cmpxchg8() ? ", cx8" : ""), |
twisti@1020 | 495 | (supports_fxsr() ? ", fxsr" : ""), |
twisti@1020 | 496 | (supports_mmx() ? ", mmx" : ""), |
twisti@1020 | 497 | (supports_sse() ? ", sse" : ""), |
twisti@1020 | 498 | (supports_sse2() ? ", sse2" : ""), |
twisti@1020 | 499 | (supports_sse3() ? ", sse3" : ""), |
twisti@1020 | 500 | (supports_ssse3()? ", ssse3": ""), |
twisti@1020 | 501 | (supports_sse4_1() ? ", sse4.1" : ""), |
twisti@1020 | 502 | (supports_sse4_2() ? ", sse4.2" : ""), |
twisti@1078 | 503 | (supports_popcnt() ? ", popcnt" : ""), |
kvn@3388 | 504 | (supports_avx() ? ", avx" : ""), |
kvn@3388 | 505 | (supports_avx2() ? ", avx2" : ""), |
kvn@4205 | 506 | (supports_aes() ? ", aes" : ""), |
kvn@6429 | 507 | (supports_clmul() ? ", clmul" : ""), |
kvn@4410 | 508 | (supports_erms() ? ", erms" : ""), |
kvn@6429 | 509 | (supports_rtm() ? ", rtm" : ""), |
twisti@1020 | 510 | (supports_mmx_ext() ? ", mmxext" : ""), |
kvn@2761 | 511 | (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), |
twisti@1210 | 512 | (supports_lzcnt() ? ", lzcnt": ""), |
twisti@1020 | 513 | (supports_sse4a() ? ", sse4a": ""), |
phh@3378 | 514 | (supports_ht() ? ", ht": ""), |
phh@3378 | 515 | (supports_tsc() ? ", tsc": ""), |
phh@3378 | 516 | (supports_tscinv_bit() ? ", tscinvbit": ""), |
iveresov@6378 | 517 | (supports_tscinv() ? ", tscinv": ""), |
iveresov@6378 | 518 | (supports_bmi1() ? ", bmi1" : ""), |
iveresov@6378 | 519 | (supports_bmi2() ? ", bmi2" : "")); |
twisti@1020 | 520 | _features_str = strdup(buf); |
twisti@1020 | 521 | |
twisti@1020 | 522 | // UseSSE is set to the smaller of what hardware supports and what |
twisti@1020 | 523 | // the command line requires. I.e., you cannot set UseSSE to 2 on |
twisti@1020 | 524 | // older Pentiums which do not support it. |
kvn@3388 | 525 | if (UseSSE > 4) UseSSE=4; |
kvn@3388 | 526 | if (UseSSE < 0) UseSSE=0; |
kvn@3388 | 527 | if (!supports_sse4_1()) // Drop to 3 if no SSE4 support |
twisti@1020 | 528 | UseSSE = MIN2((intx)3,UseSSE); |
kvn@3388 | 529 | if (!supports_sse3()) // Drop to 2 if no SSE3 support |
twisti@1020 | 530 | UseSSE = MIN2((intx)2,UseSSE); |
kvn@3388 | 531 | if (!supports_sse2()) // Drop to 1 if no SSE2 support |
twisti@1020 | 532 | UseSSE = MIN2((intx)1,UseSSE); |
kvn@3388 | 533 | if (!supports_sse ()) // Drop to 0 if no SSE support |
twisti@1020 | 534 | UseSSE = 0; |
twisti@1020 | 535 | |
kvn@3388 | 536 | if (UseAVX > 2) UseAVX=2; |
kvn@3388 | 537 | if (UseAVX < 0) UseAVX=0; |
kvn@3388 | 538 | if (!supports_avx2()) // Drop to 1 if no AVX2 support |
kvn@3388 | 539 | UseAVX = MIN2((intx)1,UseAVX); |
kvn@3388 | 540 | if (!supports_avx ()) // Drop to 0 if no AVX support |
kvn@3388 | 541 | UseAVX = 0; |
kvn@3388 | 542 | |
kvn@4205 | 543 | // Use AES instructions if available. |
kvn@4205 | 544 | if (supports_aes()) { |
kvn@4205 | 545 | if (FLAG_IS_DEFAULT(UseAES)) { |
kvn@4205 | 546 | UseAES = true; |
kvn@4205 | 547 | } |
kvn@4205 | 548 | } else if (UseAES) { |
kvn@4205 | 549 | if (!FLAG_IS_DEFAULT(UseAES)) |
kvn@6429 | 550 | warning("AES instructions are not available on this CPU"); |
kvn@4205 | 551 | FLAG_SET_DEFAULT(UseAES, false); |
kvn@4205 | 552 | } |
kvn@4205 | 553 | |
drchase@5353 | 554 | // Use CLMUL instructions if available. |
drchase@5353 | 555 | if (supports_clmul()) { |
drchase@5353 | 556 | if (FLAG_IS_DEFAULT(UseCLMUL)) { |
drchase@5353 | 557 | UseCLMUL = true; |
drchase@5353 | 558 | } |
drchase@5353 | 559 | } else if (UseCLMUL) { |
drchase@5353 | 560 | if (!FLAG_IS_DEFAULT(UseCLMUL)) |
drchase@5353 | 561 | warning("CLMUL instructions not available on this CPU (AVX may also be required)"); |
drchase@5353 | 562 | FLAG_SET_DEFAULT(UseCLMUL, false); |
drchase@5353 | 563 | } |
drchase@5353 | 564 | |
drchase@5353 | 565 | if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) { |
drchase@5353 | 566 | if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { |
drchase@5353 | 567 | UseCRC32Intrinsics = true; |
drchase@5353 | 568 | } |
drchase@5353 | 569 | } else if (UseCRC32Intrinsics) { |
drchase@5353 | 570 | if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) |
drchase@5353 | 571 | warning("CRC32 Intrinsics requires AVX and CLMUL instructions (not available on this CPU)"); |
drchase@5353 | 572 | FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); |
drchase@5353 | 573 | } |
drchase@5353 | 574 | |
kvn@4205 | 575 | // The AES intrinsic stubs require AES instruction support (of course) |
kvn@4363 | 576 | // but also require sse3 mode for instructions it use. |
kvn@4363 | 577 | if (UseAES && (UseSSE > 2)) { |
kvn@4205 | 578 | if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { |
kvn@4205 | 579 | UseAESIntrinsics = true; |
kvn@4205 | 580 | } |
kvn@4205 | 581 | } else if (UseAESIntrinsics) { |
kvn@4205 | 582 | if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) |
kvn@6429 | 583 | warning("AES intrinsics are not available on this CPU"); |
kvn@4205 | 584 | FLAG_SET_DEFAULT(UseAESIntrinsics, false); |
kvn@4205 | 585 | } |
kvn@4205 | 586 | |
kvn@6429 | 587 | // Adjust RTM (Restricted Transactional Memory) flags |
kvn@6429 | 588 | if (!supports_rtm() && UseRTMLocking) { |
kvn@6429 | 589 | // Can't continue because UseRTMLocking affects UseBiasedLocking flag |
kvn@6429 | 590 | // setting during arguments processing. See use_biased_locking(). |
kvn@6429 | 591 | // VM_Version_init() is executed after UseBiasedLocking is used |
kvn@6429 | 592 | // in Thread::allocate(). |
kvn@6429 | 593 | vm_exit_during_initialization("RTM instructions are not available on this CPU"); |
kvn@6429 | 594 | } |
kvn@6429 | 595 | |
kvn@6429 | 596 | #if INCLUDE_RTM_OPT |
kvn@6429 | 597 | if (UseRTMLocking) { |
kvn@6429 | 598 | if (!FLAG_IS_CMDLINE(UseRTMLocking)) { |
kvn@6429 | 599 | // RTM locking should be used only for applications with |
kvn@6429 | 600 | // high lock contention. For now we do not use it by default. |
kvn@6429 | 601 | vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); |
kvn@6429 | 602 | } |
kvn@6429 | 603 | if (!is_power_of_2(RTMTotalCountIncrRate)) { |
kvn@6429 | 604 | warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); |
kvn@6429 | 605 | FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); |
kvn@6429 | 606 | } |
kvn@6429 | 607 | if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { |
kvn@6429 | 608 | warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); |
kvn@6429 | 609 | FLAG_SET_DEFAULT(RTMAbortRatio, 50); |
kvn@6429 | 610 | } |
kvn@6429 | 611 | } else { // !UseRTMLocking |
kvn@6429 | 612 | if (UseRTMForStackLocks) { |
kvn@6429 | 613 | if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { |
kvn@6429 | 614 | warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); |
kvn@6429 | 615 | } |
kvn@6429 | 616 | FLAG_SET_DEFAULT(UseRTMForStackLocks, false); |
kvn@6429 | 617 | } |
kvn@6429 | 618 | if (UseRTMDeopt) { |
kvn@6429 | 619 | FLAG_SET_DEFAULT(UseRTMDeopt, false); |
kvn@6429 | 620 | } |
kvn@6429 | 621 | if (PrintPreciseRTMLockingStatistics) { |
kvn@6429 | 622 | FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); |
kvn@6429 | 623 | } |
kvn@6429 | 624 | } |
kvn@6429 | 625 | #else |
kvn@6429 | 626 | if (UseRTMLocking) { |
kvn@6429 | 627 | // Only C2 does RTM locking optimization. |
kvn@6429 | 628 | // Can't continue because UseRTMLocking affects UseBiasedLocking flag |
kvn@6429 | 629 | // setting during arguments processing. See use_biased_locking(). |
kvn@6429 | 630 | vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); |
kvn@6429 | 631 | } |
kvn@6429 | 632 | #endif |
kvn@6429 | 633 | |
kvn@3882 | 634 | #ifdef COMPILER2 |
kvn@3882 | 635 | if (UseFPUForSpilling) { |
kvn@3882 | 636 | if (UseSSE < 2) { |
kvn@3882 | 637 | // Only supported with SSE2+ |
kvn@3882 | 638 | FLAG_SET_DEFAULT(UseFPUForSpilling, false); |
kvn@3882 | 639 | } |
kvn@3882 | 640 | } |
kvn@3882 | 641 | if (MaxVectorSize > 0) { |
kvn@3882 | 642 | if (!is_power_of_2(MaxVectorSize)) { |
kvn@3882 | 643 | warning("MaxVectorSize must be a power of 2"); |
kvn@3882 | 644 | FLAG_SET_DEFAULT(MaxVectorSize, 32); |
kvn@3882 | 645 | } |
kvn@3882 | 646 | if (MaxVectorSize > 32) { |
kvn@3882 | 647 | FLAG_SET_DEFAULT(MaxVectorSize, 32); |
kvn@3882 | 648 | } |
kvn@6388 | 649 | if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { |
kvn@6388 | 650 | // 32 bytes vectors (in YMM) are only supported with AVX+ |
kvn@3882 | 651 | FLAG_SET_DEFAULT(MaxVectorSize, 16); |
kvn@3882 | 652 | } |
kvn@3882 | 653 | if (UseSSE < 2) { |
kvn@6388 | 654 | // Vectors (in XMM) are only supported with SSE2+ |
kvn@3882 | 655 | FLAG_SET_DEFAULT(MaxVectorSize, 0); |
kvn@3882 | 656 | } |
kvn@6388 | 657 | #ifdef ASSERT |
kvn@6388 | 658 | if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { |
kvn@6388 | 659 | tty->print_cr("State of YMM registers after signal handle:"); |
kvn@6388 | 660 | int nreg = 2 LP64_ONLY(+2); |
kvn@6388 | 661 | const char* ymm_name[4] = {"0", "7", "8", "15"}; |
kvn@6388 | 662 | for (int i = 0; i < nreg; i++) { |
kvn@6388 | 663 | tty->print("YMM%s:", ymm_name[i]); |
kvn@6388 | 664 | for (int j = 7; j >=0; j--) { |
kvn@6388 | 665 | tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); |
kvn@6388 | 666 | } |
kvn@6388 | 667 | tty->cr(); |
kvn@6388 | 668 | } |
kvn@6388 | 669 | } |
kvn@6388 | 670 | #endif |
kvn@3882 | 671 | } |
kvn@3882 | 672 | #endif |
kvn@3882 | 673 | |
twisti@1020 | 674 | // On new cpus instructions which update whole XMM register should be used |
twisti@1020 | 675 | // to prevent partial register stall due to dependencies on high half. |
twisti@1020 | 676 | // |
twisti@1020 | 677 | // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) |
twisti@1020 | 678 | // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) |
twisti@1020 | 679 | // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). |
twisti@1020 | 680 | // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). |
twisti@1020 | 681 | |
twisti@1020 | 682 | if( is_amd() ) { // AMD cpus specific settings |
twisti@1020 | 683 | if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { |
twisti@1020 | 684 | // Use it on new AMD cpus starting from Opteron. |
twisti@1020 | 685 | UseAddressNop = true; |
twisti@1020 | 686 | } |
twisti@1020 | 687 | if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { |
twisti@1020 | 688 | // Use it on new AMD cpus starting from Opteron. |
twisti@1020 | 689 | UseNewLongLShift = true; |
twisti@1020 | 690 | } |
twisti@1020 | 691 | if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { |
twisti@1020 | 692 | if( supports_sse4a() ) { |
twisti@1020 | 693 | UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron |
twisti@1020 | 694 | } else { |
twisti@1020 | 695 | UseXmmLoadAndClearUpper = false; |
twisti@1020 | 696 | } |
twisti@1020 | 697 | } |
twisti@1020 | 698 | if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { |
twisti@1020 | 699 | if( supports_sse4a() ) { |
twisti@1020 | 700 | UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' |
twisti@1020 | 701 | } else { |
twisti@1020 | 702 | UseXmmRegToRegMoveAll = false; |
twisti@1020 | 703 | } |
twisti@1020 | 704 | } |
twisti@1020 | 705 | if( FLAG_IS_DEFAULT(UseXmmI2F) ) { |
twisti@1020 | 706 | if( supports_sse4a() ) { |
twisti@1020 | 707 | UseXmmI2F = true; |
twisti@1020 | 708 | } else { |
twisti@1020 | 709 | UseXmmI2F = false; |
twisti@1020 | 710 | } |
twisti@1020 | 711 | } |
twisti@1020 | 712 | if( FLAG_IS_DEFAULT(UseXmmI2D) ) { |
twisti@1020 | 713 | if( supports_sse4a() ) { |
twisti@1020 | 714 | UseXmmI2D = true; |
twisti@1020 | 715 | } else { |
twisti@1020 | 716 | UseXmmI2D = false; |
twisti@1020 | 717 | } |
twisti@1020 | 718 | } |
kvn@2688 | 719 | if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) { |
kvn@2688 | 720 | if( supports_sse4_2() && UseSSE >= 4 ) { |
kvn@2688 | 721 | UseSSE42Intrinsics = true; |
kvn@2688 | 722 | } |
kvn@2688 | 723 | } |
twisti@1210 | 724 | |
kvn@2808 | 725 | // some defaults for AMD family 15h |
kvn@2808 | 726 | if ( cpu_family() == 0x15 ) { |
kvn@2808 | 727 | // On family 15h processors default is no sw prefetch |
kvn@2640 | 728 | if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { |
kvn@2640 | 729 | AllocatePrefetchStyle = 0; |
kvn@2640 | 730 | } |
kvn@2808 | 731 | // Also, if some other prefetch style is specified, default instruction type is PREFETCHW |
kvn@2808 | 732 | if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { |
kvn@2808 | 733 | AllocatePrefetchInstr = 3; |
kvn@2808 | 734 | } |
kvn@2808 | 735 | // On family 15h processors use XMM and UnalignedLoadStores for Array Copy |
kvn@4105 | 736 | if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { |
kvn@2808 | 737 | UseXMMForArrayCopy = true; |
kvn@2808 | 738 | } |
kvn@4105 | 739 | if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { |
kvn@2808 | 740 | UseUnalignedLoadStores = true; |
kvn@2808 | 741 | } |
kvn@2640 | 742 | } |
kvn@2808 | 743 | |
kvn@3882 | 744 | #ifdef COMPILER2 |
kvn@3882 | 745 | if (MaxVectorSize > 16) { |
kvn@3882 | 746 | // Limit vectors size to 16 bytes on current AMD cpus. |
kvn@3882 | 747 | FLAG_SET_DEFAULT(MaxVectorSize, 16); |
kvn@3882 | 748 | } |
kvn@3882 | 749 | #endif // COMPILER2 |
twisti@1020 | 750 | } |
twisti@1020 | 751 | |
twisti@1020 | 752 | if( is_intel() ) { // Intel cpus specific settings |
twisti@1020 | 753 | if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { |
twisti@1020 | 754 | UseStoreImmI16 = false; // don't use it on Intel cpus |
twisti@1020 | 755 | } |
twisti@1020 | 756 | if( cpu_family() == 6 || cpu_family() == 15 ) { |
twisti@1020 | 757 | if( FLAG_IS_DEFAULT(UseAddressNop) ) { |
twisti@1020 | 758 | // Use it on all Intel cpus starting from PentiumPro |
twisti@1020 | 759 | UseAddressNop = true; |
twisti@1020 | 760 | } |
twisti@1020 | 761 | } |
twisti@1020 | 762 | if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { |
twisti@1020 | 763 | UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus |
twisti@1020 | 764 | } |
twisti@1020 | 765 | if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { |
twisti@1020 | 766 | if( supports_sse3() ) { |
twisti@1020 | 767 | UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus |
twisti@1020 | 768 | } else { |
twisti@1020 | 769 | UseXmmRegToRegMoveAll = false; |
twisti@1020 | 770 | } |
twisti@1020 | 771 | } |
twisti@1020 | 772 | if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus |
twisti@1020 | 773 | #ifdef COMPILER2 |
twisti@1020 | 774 | if( FLAG_IS_DEFAULT(MaxLoopPad) ) { |
twisti@1020 | 775 | // For new Intel cpus do the next optimization: |
twisti@1020 | 776 | // don't align the beginning of a loop if there are enough instructions |
twisti@1020 | 777 | // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) |
twisti@1020 | 778 | // in current fetch line (OptoLoopAlignment) or the padding |
twisti@1020 | 779 | // is big (> MaxLoopPad). |
twisti@1020 | 780 | // Set MaxLoopPad to 11 for new Intel cpus to reduce number of |
twisti@1020 | 781 | // generated NOP instructions. 11 is the largest size of one |
twisti@1020 | 782 | // address NOP instruction '0F 1F' (see Assembler::nop(i)). |
twisti@1020 | 783 | MaxLoopPad = 11; |
twisti@1020 | 784 | } |
twisti@1020 | 785 | #endif // COMPILER2 |
kvn@4105 | 786 | if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { |
twisti@1020 | 787 | UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus |
twisti@1020 | 788 | } |
kvn@4105 | 789 | if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus |
kvn@4105 | 790 | if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { |
twisti@1020 | 791 | UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus |
twisti@1020 | 792 | } |
twisti@1020 | 793 | } |
kvn@4105 | 794 | if (supports_sse4_2() && UseSSE >= 4) { |
kvn@4105 | 795 | if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { |
cfang@1116 | 796 | UseSSE42Intrinsics = true; |
cfang@1116 | 797 | } |
cfang@1116 | 798 | } |
twisti@1020 | 799 | } |
twisti@1020 | 800 | } |
twisti@1020 | 801 | |
iveresov@6378 | 802 | // Use count leading zeros count instruction if available. |
iveresov@6378 | 803 | if (supports_lzcnt()) { |
iveresov@6378 | 804 | if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { |
iveresov@6378 | 805 | UseCountLeadingZerosInstruction = true; |
iveresov@6378 | 806 | } |
iveresov@6378 | 807 | } else if (UseCountLeadingZerosInstruction) { |
iveresov@6378 | 808 | warning("lzcnt instruction is not available on this CPU"); |
iveresov@6378 | 809 | FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); |
iveresov@6378 | 810 | } |
iveresov@6378 | 811 | |
iveresov@6378 | 812 | if (supports_bmi1()) { |
iveresov@6378 | 813 | if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { |
iveresov@6378 | 814 | UseBMI1Instructions = true; |
iveresov@6378 | 815 | } |
iveresov@6378 | 816 | } else if (UseBMI1Instructions) { |
iveresov@6378 | 817 | warning("BMI1 instructions are not available on this CPU"); |
iveresov@6378 | 818 | FLAG_SET_DEFAULT(UseBMI1Instructions, false); |
iveresov@6378 | 819 | } |
iveresov@6378 | 820 | |
iveresov@6378 | 821 | // Use count trailing zeros instruction if available |
iveresov@6378 | 822 | if (supports_bmi1()) { |
iveresov@6378 | 823 | if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { |
iveresov@6378 | 824 | UseCountTrailingZerosInstruction = UseBMI1Instructions; |
iveresov@6378 | 825 | } |
iveresov@6378 | 826 | } else if (UseCountTrailingZerosInstruction) { |
iveresov@6378 | 827 | warning("tzcnt instruction is not available on this CPU"); |
iveresov@6378 | 828 | FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); |
iveresov@6378 | 829 | } |
iveresov@6378 | 830 | |
twisti@1078 | 831 | // Use population count instruction if available. |
twisti@1078 | 832 | if (supports_popcnt()) { |
twisti@1078 | 833 | if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { |
twisti@1078 | 834 | UsePopCountInstruction = true; |
twisti@1078 | 835 | } |
kvn@3388 | 836 | } else if (UsePopCountInstruction) { |
kvn@3388 | 837 | warning("POPCNT instruction is not available on this CPU"); |
kvn@3388 | 838 | FLAG_SET_DEFAULT(UsePopCountInstruction, false); |
twisti@1078 | 839 | } |
twisti@1078 | 840 | |
kvn@4410 | 841 | // Use fast-string operations if available. |
kvn@4410 | 842 | if (supports_erms()) { |
kvn@4410 | 843 | if (FLAG_IS_DEFAULT(UseFastStosb)) { |
kvn@4410 | 844 | UseFastStosb = true; |
kvn@4410 | 845 | } |
kvn@4410 | 846 | } else if (UseFastStosb) { |
kvn@4410 | 847 | warning("fast-string operations are not available on this CPU"); |
kvn@4410 | 848 | FLAG_SET_DEFAULT(UseFastStosb, false); |
kvn@4410 | 849 | } |
kvn@4410 | 850 | |
kvn@4105 | 851 | #ifdef COMPILER2 |
kvn@4105 | 852 | if (FLAG_IS_DEFAULT(AlignVector)) { |
kvn@4105 | 853 | // Modern processors allow misaligned memory operations for vectors. |
kvn@4105 | 854 | AlignVector = !UseUnalignedLoadStores; |
kvn@4105 | 855 | } |
kvn@4105 | 856 | #endif // COMPILER2 |
kvn@4105 | 857 | |
twisti@1020 | 858 | assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value"); |
twisti@1020 | 859 | assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value"); |
twisti@1020 | 860 | |
twisti@1020 | 861 | // set valid Prefetch instruction |
twisti@1020 | 862 | if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0; |
twisti@1020 | 863 | if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3; |
kvn@2761 | 864 | if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0; |
kvn@2761 | 865 | if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3; |
twisti@1020 | 866 | |
twisti@1020 | 867 | if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; |
twisti@1020 | 868 | if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; |
kvn@2761 | 869 | if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; |
kvn@2761 | 870 | if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; |
twisti@1020 | 871 | |
twisti@1020 | 872 | // Allocation prefetch settings |
kvn@3052 | 873 | intx cache_line_size = prefetch_data_size(); |
twisti@1020 | 874 | if( cache_line_size > AllocatePrefetchStepSize ) |
twisti@1020 | 875 | AllocatePrefetchStepSize = cache_line_size; |
kvn@3052 | 876 | |
twisti@1020 | 877 | assert(AllocatePrefetchLines > 0, "invalid value"); |
kvn@3052 | 878 | if( AllocatePrefetchLines < 1 ) // set valid value in product VM |
kvn@3052 | 879 | AllocatePrefetchLines = 3; |
kvn@3052 | 880 | assert(AllocateInstancePrefetchLines > 0, "invalid value"); |
kvn@3052 | 881 | if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM |
kvn@3052 | 882 | AllocateInstancePrefetchLines = 1; |
twisti@1020 | 883 | |
twisti@1020 | 884 | AllocatePrefetchDistance = allocate_prefetch_distance(); |
twisti@1020 | 885 | AllocatePrefetchStyle = allocate_prefetch_style(); |
twisti@1020 | 886 | |
kvn@1977 | 887 | if( is_intel() && cpu_family() == 6 && supports_sse3() ) { |
kvn@1977 | 888 | if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core |
twisti@1020 | 889 | #ifdef _LP64 |
kvn@1977 | 890 | AllocatePrefetchDistance = 384; |
twisti@1020 | 891 | #else |
kvn@1977 | 892 | AllocatePrefetchDistance = 320; |
twisti@1020 | 893 | #endif |
kvn@1977 | 894 | } |
kvn@1977 | 895 | if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus |
kvn@1977 | 896 | AllocatePrefetchDistance = 192; |
kvn@1977 | 897 | AllocatePrefetchLines = 4; |
never@2085 | 898 | #ifdef COMPILER2 |
never@2085 | 899 | if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) { |
never@2085 | 900 | FLAG_SET_DEFAULT(UseFPUForSpilling, true); |
never@2085 | 901 | } |
never@2085 | 902 | #endif |
kvn@1977 | 903 | } |
twisti@1020 | 904 | } |
twisti@1020 | 905 | assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); |
twisti@1020 | 906 | |
twisti@1020 | 907 | #ifdef _LP64 |
twisti@1020 | 908 | // Prefetch settings |
twisti@1020 | 909 | PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); |
twisti@1020 | 910 | PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); |
twisti@1020 | 911 | PrefetchFieldsAhead = prefetch_fields_ahead(); |
twisti@1020 | 912 | #endif |
twisti@1020 | 913 | |
jwilhelm@4430 | 914 | if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && |
jwilhelm@4430 | 915 | (cache_line_size > ContendedPaddingWidth)) |
jwilhelm@4430 | 916 | ContendedPaddingWidth = cache_line_size; |
jwilhelm@4430 | 917 | |
twisti@1020 | 918 | #ifndef PRODUCT |
twisti@1020 | 919 | if (PrintMiscellaneous && Verbose) { |
twisti@1020 | 920 | tty->print_cr("Logical CPUs per core: %u", |
twisti@1020 | 921 | logical_processors_per_package()); |
kvn@3388 | 922 | tty->print("UseSSE=%d",UseSSE); |
kvn@3388 | 923 | if (UseAVX > 0) { |
kvn@3388 | 924 | tty->print(" UseAVX=%d",UseAVX); |
kvn@3388 | 925 | } |
kvn@4205 | 926 | if (UseAES) { |
kvn@4205 | 927 | tty->print(" UseAES=1"); |
kvn@4205 | 928 | } |
kvn@6388 | 929 | #ifdef COMPILER2 |
kvn@6388 | 930 | if (MaxVectorSize > 0) { |
kvn@6388 | 931 | tty->print(" MaxVectorSize=%d", MaxVectorSize); |
kvn@6388 | 932 | } |
kvn@6388 | 933 | #endif |
kvn@3388 | 934 | tty->cr(); |
kvn@3052 | 935 | tty->print("Allocation"); |
kvn@2761 | 936 | if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { |
kvn@3052 | 937 | tty->print_cr(": no prefetching"); |
twisti@1020 | 938 | } else { |
kvn@3052 | 939 | tty->print(" prefetching: "); |
kvn@2761 | 940 | if (UseSSE == 0 && supports_3dnow_prefetch()) { |
twisti@1020 | 941 | tty->print("PREFETCHW"); |
twisti@1020 | 942 | } else if (UseSSE >= 1) { |
twisti@1020 | 943 | if (AllocatePrefetchInstr == 0) { |
twisti@1020 | 944 | tty->print("PREFETCHNTA"); |
twisti@1020 | 945 | } else if (AllocatePrefetchInstr == 1) { |
twisti@1020 | 946 | tty->print("PREFETCHT0"); |
twisti@1020 | 947 | } else if (AllocatePrefetchInstr == 2) { |
twisti@1020 | 948 | tty->print("PREFETCHT2"); |
twisti@1020 | 949 | } else if (AllocatePrefetchInstr == 3) { |
twisti@1020 | 950 | tty->print("PREFETCHW"); |
twisti@1020 | 951 | } |
twisti@1020 | 952 | } |
twisti@1020 | 953 | if (AllocatePrefetchLines > 1) { |
kvn@3052 | 954 | tty->print_cr(" at distance %d, %d lines of %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize); |
twisti@1020 | 955 | } else { |
kvn@3052 | 956 | tty->print_cr(" at distance %d, one line of %d bytes", AllocatePrefetchDistance, AllocatePrefetchStepSize); |
twisti@1020 | 957 | } |
twisti@1020 | 958 | } |
twisti@1020 | 959 | |
twisti@1020 | 960 | if (PrefetchCopyIntervalInBytes > 0) { |
twisti@1020 | 961 | tty->print_cr("PrefetchCopyIntervalInBytes %d", PrefetchCopyIntervalInBytes); |
twisti@1020 | 962 | } |
twisti@1020 | 963 | if (PrefetchScanIntervalInBytes > 0) { |
twisti@1020 | 964 | tty->print_cr("PrefetchScanIntervalInBytes %d", PrefetchScanIntervalInBytes); |
twisti@1020 | 965 | } |
twisti@1020 | 966 | if (PrefetchFieldsAhead > 0) { |
twisti@1020 | 967 | tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead); |
twisti@1020 | 968 | } |
jwilhelm@4430 | 969 | if (ContendedPaddingWidth > 0) { |
jwilhelm@4430 | 970 | tty->print_cr("ContendedPaddingWidth %d", ContendedPaddingWidth); |
jwilhelm@4430 | 971 | } |
twisti@1020 | 972 | } |
twisti@1020 | 973 | #endif // !PRODUCT |
twisti@1020 | 974 | } |
twisti@1020 | 975 | |
kvn@6429 | 976 | bool VM_Version::use_biased_locking() { |
kvn@6429 | 977 | #if INCLUDE_RTM_OPT |
kvn@6429 | 978 | // RTM locking is most useful when there is high lock contention and |
kvn@6429 | 979 | // low data contention. With high lock contention the lock is usually |
kvn@6429 | 980 | // inflated and biased locking is not suitable for that case. |
kvn@6429 | 981 | // RTM locking code requires that biased locking is off. |
kvn@6429 | 982 | // Note: we can't switch off UseBiasedLocking in get_processor_features() |
kvn@6429 | 983 | // because it is used by Thread::allocate() which is called before |
kvn@6429 | 984 | // VM_Version::initialize(). |
kvn@6429 | 985 | if (UseRTMLocking && UseBiasedLocking) { |
kvn@6429 | 986 | if (FLAG_IS_DEFAULT(UseBiasedLocking)) { |
kvn@6429 | 987 | FLAG_SET_DEFAULT(UseBiasedLocking, false); |
kvn@6429 | 988 | } else { |
kvn@6429 | 989 | warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); |
kvn@6429 | 990 | UseBiasedLocking = false; |
kvn@6429 | 991 | } |
kvn@6429 | 992 | } |
kvn@6429 | 993 | #endif |
kvn@6429 | 994 | return UseBiasedLocking; |
kvn@6429 | 995 | } |
kvn@6429 | 996 | |
twisti@1020 | 997 | void VM_Version::initialize() { |
twisti@1020 | 998 | ResourceMark rm; |
twisti@1020 | 999 | // Making this stub must be FIRST use of assembler |
twisti@1020 | 1000 | |
kvn@6537 | 1001 | stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); |
twisti@1020 | 1002 | if (stub_blob == NULL) { |
kvn@6537 | 1003 | vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); |
twisti@1020 | 1004 | } |
twisti@2103 | 1005 | CodeBuffer c(stub_blob); |
twisti@1020 | 1006 | VM_Version_StubGenerator g(&c); |
kvn@6537 | 1007 | get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, |
kvn@6537 | 1008 | g.generate_get_cpu_info()); |
twisti@1020 | 1009 | |
twisti@1020 | 1010 | get_processor_features(); |
twisti@1020 | 1011 | } |