src/cpu/x86/vm/vm_version_x86_32.cpp

changeset 1022
e57b6f22d1f3
parent 840
2649e5276dd7
equal deleted inserted replaced
1017:a0576ae7045f 1022:e57b6f22d1f3
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_vm_version_x86_32.cpp.incl"
27
28
29 int VM_Version::_cpu;
30 int VM_Version::_model;
31 int VM_Version::_stepping;
32 int VM_Version::_cpuFeatures;
33 const char* VM_Version::_features_str = "";
34 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
35
36 static BufferBlob* stub_blob;
37 static const int stub_size = 300;
38
39 extern "C" {
40 typedef void (*getPsrInfo_stub_t)(void*);
41 }
42 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
43
44
45 class VM_Version_StubGenerator: public StubCodeGenerator {
46 public:
47
48 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
49
50 address generate_getPsrInfo() {
51 // Flags to test CPU type.
52 const uint32_t EFL_AC = 0x40000;
53 const uint32_t EFL_ID = 0x200000;
54 // Values for when we don't have a CPUID instruction.
55 const int CPU_FAMILY_SHIFT = 8;
56 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
57 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
58
59 Label detect_486, cpu486, detect_586, std_cpuid1;
60 Label ext_cpuid1, ext_cpuid5, done;
61
62 StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
63 # define __ _masm->
64
65 address start = __ pc();
66
67 //
68 // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
69 //
70 __ push(rbp);
71 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
72 __ push(rbx);
73 __ push(rsi);
74 __ pushf(); // preserve rbx, and flags
75 __ pop(rax);
76 __ push(rax);
77 __ mov(rcx, rax);
78 //
79 // if we are unable to change the AC flag, we have a 386
80 //
81 __ xorl(rax, EFL_AC);
82 __ push(rax);
83 __ popf();
84 __ pushf();
85 __ pop(rax);
86 __ cmpptr(rax, rcx);
87 __ jccb(Assembler::notEqual, detect_486);
88
89 __ movl(rax, CPU_FAMILY_386);
90 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
91 __ jmp(done);
92
93 //
94 // If we are unable to change the ID flag, we have a 486 which does
95 // not support the "cpuid" instruction.
96 //
97 __ bind(detect_486);
98 __ mov(rax, rcx);
99 __ xorl(rax, EFL_ID);
100 __ push(rax);
101 __ popf();
102 __ pushf();
103 __ pop(rax);
104 __ cmpptr(rcx, rax);
105 __ jccb(Assembler::notEqual, detect_586);
106
107 __ bind(cpu486);
108 __ movl(rax, CPU_FAMILY_486);
109 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
110 __ jmp(done);
111
112 //
113 // at this point, we have a chip which supports the "cpuid" instruction
114 //
115 __ bind(detect_586);
116 __ xorptr(rax, rax);
117 __ cpuid();
118 __ orptr(rax, rax);
119 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input
120 // value of at least 1, we give up and
121 // assume a 486
122 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
123 __ movl(Address(rsi, 0), rax);
124 __ movl(Address(rsi, 4), rbx);
125 __ movl(Address(rsi, 8), rcx);
126 __ movl(Address(rsi,12), rdx);
127
128 __ cmpl(rax, 3); // Is cpuid(0x4) supported?
129 __ jccb(Assembler::belowEqual, std_cpuid1);
130
131 //
132 // cpuid(0x4) Deterministic cache params
133 //
134 __ movl(rax, 4); // and rcx already set to 0x0
135 __ xorl(rcx, rcx);
136 __ cpuid();
137 __ push(rax);
138 __ andl(rax, 0x1f); // Determine if valid cache parameters used
139 __ orl(rax, rax); // rax,[4:0] == 0 indicates invalid cache
140 __ pop(rax);
141 __ jccb(Assembler::equal, std_cpuid1);
142
143 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
144 __ movl(Address(rsi, 0), rax);
145 __ movl(Address(rsi, 4), rbx);
146 __ movl(Address(rsi, 8), rcx);
147 __ movl(Address(rsi,12), rdx);
148
149 //
150 // Standard cpuid(0x1)
151 //
152 __ bind(std_cpuid1);
153 __ movl(rax, 1);
154 __ cpuid();
155 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
156 __ movl(Address(rsi, 0), rax);
157 __ movl(Address(rsi, 4), rbx);
158 __ movl(Address(rsi, 8), rcx);
159 __ movl(Address(rsi,12), rdx);
160
161 __ movl(rax, 0x80000000);
162 __ cpuid();
163 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported?
164 __ jcc(Assembler::belowEqual, done);
165 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported?
166 __ jccb(Assembler::belowEqual, ext_cpuid1);
167 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported?
168 __ jccb(Assembler::belowEqual, ext_cpuid5);
169 //
170 // Extended cpuid(0x80000008)
171 //
172 __ movl(rax, 0x80000008);
173 __ cpuid();
174 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
175 __ movl(Address(rsi, 0), rax);
176 __ movl(Address(rsi, 4), rbx);
177 __ movl(Address(rsi, 8), rcx);
178 __ movl(Address(rsi,12), rdx);
179
180 //
181 // Extended cpuid(0x80000005)
182 //
183 __ bind(ext_cpuid5);
184 __ movl(rax, 0x80000005);
185 __ cpuid();
186 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
187 __ movl(Address(rsi, 0), rax);
188 __ movl(Address(rsi, 4), rbx);
189 __ movl(Address(rsi, 8), rcx);
190 __ movl(Address(rsi,12), rdx);
191
192 //
193 // Extended cpuid(0x80000001)
194 //
195 __ bind(ext_cpuid1);
196 __ movl(rax, 0x80000001);
197 __ cpuid();
198 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
199 __ movl(Address(rsi, 0), rax);
200 __ movl(Address(rsi, 4), rbx);
201 __ movl(Address(rsi, 8), rcx);
202 __ movl(Address(rsi,12), rdx);
203
204 //
205 // return
206 //
207 __ bind(done);
208 __ popf();
209 __ pop(rsi);
210 __ pop(rbx);
211 __ pop(rbp);
212 __ ret(0);
213
214 # undef __
215
216 return start;
217 };
218 };
219
220
221 void VM_Version::get_processor_features() {
222
223 _cpu = 4; // 486 by default
224 _model = 0;
225 _stepping = 0;
226 _cpuFeatures = 0;
227 _logical_processors_per_package = 1;
228 if (!Use486InstrsOnly) {
229 // Get raw processor info
230 getPsrInfo_stub(&_cpuid_info);
231 assert_is_initialized();
232 _cpu = extended_cpu_family();
233 _model = extended_cpu_model();
234 _stepping = cpu_stepping();
235 if (cpu_family() > 4) { // it supports CPUID
236 _cpuFeatures = feature_flags();
237 // Logical processors are only available on P4s and above,
238 // and only if hyperthreading is available.
239 _logical_processors_per_package = logical_processor_count();
240 }
241 }
242 _supports_cx8 = supports_cmpxchg8();
243 // if the OS doesn't support SSE, we can't use this feature even if the HW does
244 if( !os::supports_sse())
245 _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
246 if (UseSSE < 4) {
247 _cpuFeatures &= ~CPU_SSE4_1;
248 _cpuFeatures &= ~CPU_SSE4_2;
249 }
250 if (UseSSE < 3) {
251 _cpuFeatures &= ~CPU_SSE3;
252 _cpuFeatures &= ~CPU_SSSE3;
253 _cpuFeatures &= ~CPU_SSE4A;
254 }
255 if (UseSSE < 2)
256 _cpuFeatures &= ~CPU_SSE2;
257 if (UseSSE < 1)
258 _cpuFeatures &= ~CPU_SSE;
259
260 if (logical_processors_per_package() == 1) {
261 // HT processor could be installed on a system which doesn't support HT.
262 _cpuFeatures &= ~CPU_HT;
263 }
264
265 char buf[256];
266 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
267 cores_per_cpu(), threads_per_core(),
268 cpu_family(), _model, _stepping,
269 (supports_cmov() ? ", cmov" : ""),
270 (supports_cmpxchg8() ? ", cx8" : ""),
271 (supports_fxsr() ? ", fxsr" : ""),
272 (supports_mmx() ? ", mmx" : ""),
273 (supports_sse() ? ", sse" : ""),
274 (supports_sse2() ? ", sse2" : ""),
275 (supports_sse3() ? ", sse3" : ""),
276 (supports_ssse3()? ", ssse3": ""),
277 (supports_sse4_1() ? ", sse4.1" : ""),
278 (supports_sse4_2() ? ", sse4.2" : ""),
279 (supports_mmx_ext() ? ", mmxext" : ""),
280 (supports_3dnow() ? ", 3dnow" : ""),
281 (supports_3dnow2() ? ", 3dnowext" : ""),
282 (supports_sse4a() ? ", sse4a": ""),
283 (supports_ht() ? ", ht": ""));
284 _features_str = strdup(buf);
285
286 // UseSSE is set to the smaller of what hardware supports and what
287 // the command line requires. I.e., you cannot set UseSSE to 2 on
288 // older Pentiums which do not support it.
289 if( UseSSE > 4 ) UseSSE=4;
290 if( UseSSE < 0 ) UseSSE=0;
291 if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support
292 UseSSE = MIN2((intx)3,UseSSE);
293 if( !supports_sse3() ) // Drop to 2 if no SSE3 support
294 UseSSE = MIN2((intx)2,UseSSE);
295 if( !supports_sse2() ) // Drop to 1 if no SSE2 support
296 UseSSE = MIN2((intx)1,UseSSE);
297 if( !supports_sse () ) // Drop to 0 if no SSE support
298 UseSSE = 0;
299
300 // On new cpus instructions which update whole XMM register should be used
301 // to prevent partial register stall due to dependencies on high half.
302 //
303 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem)
304 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
305 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
306 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
307
308 if( is_amd() ) { // AMD cpus specific settings
309 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
310 // Use it on new AMD cpus starting from Opteron.
311 UseAddressNop = true;
312 }
313 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) {
314 // Use it on new AMD cpus starting from Opteron.
315 UseNewLongLShift = true;
316 }
317 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
318 if( supports_sse4a() ) {
319 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
320 } else {
321 UseXmmLoadAndClearUpper = false;
322 }
323 }
324 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
325 if( supports_sse4a() ) {
326 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
327 } else {
328 UseXmmRegToRegMoveAll = false;
329 }
330 }
331 if( FLAG_IS_DEFAULT(UseXmmI2F) ) {
332 if( supports_sse4a() ) {
333 UseXmmI2F = true;
334 } else {
335 UseXmmI2F = false;
336 }
337 }
338 if( FLAG_IS_DEFAULT(UseXmmI2D) ) {
339 if( supports_sse4a() ) {
340 UseXmmI2D = true;
341 } else {
342 UseXmmI2D = false;
343 }
344 }
345 }
346
347 if( is_intel() ) { // Intel cpus specific settings
348 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) {
349 UseStoreImmI16 = false; // don't use it on Intel cpus
350 }
351 if( cpu_family() == 6 || cpu_family() == 15 ) {
352 if( FLAG_IS_DEFAULT(UseAddressNop) ) {
353 // Use it on all Intel cpus starting from PentiumPro
354 UseAddressNop = true;
355 }
356 }
357 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
358 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
359 }
360 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
361 if( supports_sse3() ) {
362 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
363 } else {
364 UseXmmRegToRegMoveAll = false;
365 }
366 }
367 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus
368 #ifdef COMPILER2
369 if( FLAG_IS_DEFAULT(MaxLoopPad) ) {
370 // For new Intel cpus do the next optimization:
371 // don't align the beginning of a loop if there are enough instructions
372 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
373 // in current fetch line (OptoLoopAlignment) or the padding
374 // is big (> MaxLoopPad).
375 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
376 // generated NOP instructions. 11 is the largest size of one
377 // address NOP instruction '0F 1F' (see Assembler::nop(i)).
378 MaxLoopPad = 11;
379 }
380 #endif // COMPILER2
381 if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
382 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
383 }
384 if( supports_sse4_2() && supports_ht() ) { // Newest Intel cpus
385 if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
386 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
387 }
388 }
389 }
390 }
391
392 assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value");
393 assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");
394
395 // set valid Prefetch instruction
396 if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0;
397 if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3;
398 if( ReadPrefetchInstr == 3 && !supports_3dnow() ) ReadPrefetchInstr = 0;
399 if( !supports_sse() && supports_3dnow() ) ReadPrefetchInstr = 3;
400
401 if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
402 if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3;
403 if( AllocatePrefetchInstr == 3 && !supports_3dnow() ) AllocatePrefetchInstr=0;
404 if( !supports_sse() && supports_3dnow() ) AllocatePrefetchInstr = 3;
405
406 // Allocation prefetch settings
407 intx cache_line_size = L1_data_cache_line_size();
408 if( cache_line_size > AllocatePrefetchStepSize )
409 AllocatePrefetchStepSize = cache_line_size;
410 if( FLAG_IS_DEFAULT(AllocatePrefetchLines) )
411 AllocatePrefetchLines = 3; // Optimistic value
412 assert(AllocatePrefetchLines > 0, "invalid value");
413 if( AllocatePrefetchLines < 1 ) // set valid value in product VM
414 AllocatePrefetchLines = 1; // Conservative value
415
416 AllocatePrefetchDistance = allocate_prefetch_distance();
417 AllocatePrefetchStyle = allocate_prefetch_style();
418
419 if( AllocatePrefetchStyle == 2 && is_intel() &&
420 cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core
421 AllocatePrefetchDistance = 320;
422 }
423 assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
424
425 #ifndef PRODUCT
426 if (PrintMiscellaneous && Verbose) {
427 tty->print_cr("Logical CPUs per core: %u",
428 logical_processors_per_package());
429 tty->print_cr("UseSSE=%d",UseSSE);
430 tty->print("Allocation: ");
431 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow()) {
432 tty->print_cr("no prefetching");
433 } else {
434 if (UseSSE == 0 && supports_3dnow()) {
435 tty->print("PREFETCHW");
436 } else if (UseSSE >= 1) {
437 if (AllocatePrefetchInstr == 0) {
438 tty->print("PREFETCHNTA");
439 } else if (AllocatePrefetchInstr == 1) {
440 tty->print("PREFETCHT0");
441 } else if (AllocatePrefetchInstr == 2) {
442 tty->print("PREFETCHT2");
443 } else if (AllocatePrefetchInstr == 3) {
444 tty->print("PREFETCHW");
445 }
446 }
447 if (AllocatePrefetchLines > 1) {
448 tty->print_cr(" %d, %d lines with step %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
449 } else {
450 tty->print_cr(" %d, one line", AllocatePrefetchDistance);
451 }
452 }
453 }
454 #endif // !PRODUCT
455 }
456
457 void VM_Version::initialize() {
458 ResourceMark rm;
459 // Making this stub must be FIRST use of assembler
460
461 stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
462 if (stub_blob == NULL) {
463 vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
464 }
465 CodeBuffer c(stub_blob->instructions_begin(),
466 stub_blob->instructions_size());
467 VM_Version_StubGenerator g(&c);
468 getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
469 g.generate_getPsrInfo());
470
471 get_processor_features();
472 }

mercurial