src/cpu/x86/vm/globals_x86.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_VM_GLOBALS_X86_HPP
26 #define CPU_X86_VM_GLOBALS_X86_HPP
27
28 #include "utilities/globalDefinitions.hpp"
29 #include "utilities/macros.hpp"
30
31 // Sets the default values for platform dependent flags used by the runtime system.
32 // (see globals.hpp)
33
34 define_pd_global(bool, ConvertSleepToYield, true);
35 define_pd_global(bool, ShareVtableStubs, true);
36 define_pd_global(bool, CountInterpCalls, true);
37 define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
38
39 define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
40 define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
41 define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast
42
43 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
44 // assign a different value for C2 without touching a number of files. Use
45 // #ifdef to minimize the change as it's late in Mantis. -- FIXME.
46 // c1 doesn't have this problem because the fix to 4858033 assures us
47 // the the vep is aligned at CodeEntryAlignment whereas c2 only aligns
48 // the uep and the vep doesn't get real alignment but just slops on by
49 // only assured that the entry instruction meets the 5 byte size requirement.
50 #ifdef COMPILER2
51 define_pd_global(intx, CodeEntryAlignment, 32);
52 #else
53 define_pd_global(intx, CodeEntryAlignment, 16);
54 #endif // COMPILER2
55 define_pd_global(intx, OptoLoopAlignment, 16);
56 define_pd_global(intx, InlineFrequencyCount, 100);
57 define_pd_global(intx, InlineSmallCode, 1000);
58
59 define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
60 define_pd_global(intx, StackRedPages, 1);
61 #ifdef AMD64
62 // Very large C++ stack frames using solaris-amd64 optimized builds
63 // due to lack of optimization caused by C++ compiler bugs
64 define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
65 #else
66 define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
67 #endif // AMD64
68
69 define_pd_global(intx, PreInflateSpin, 10);
70
71 define_pd_global(bool, RewriteBytecodes, true);
72 define_pd_global(bool, RewriteFrequentPairs, true);
73
74 #ifdef _ALLBSD_SOURCE
75 define_pd_global(bool, UseMembar, true);
76 #else
77 define_pd_global(bool, UseMembar, false);
78 #endif
79
80 // GC Ergo Flags
81 define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
82
83 define_pd_global(uintx, TypeProfileLevel, 111);
84
85 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
86 \
87 develop(bool, IEEEPrecision, true, \
88 "Enables IEEE precision (for INTEL only)") \
89 \
90 product(intx, FenceInstruction, 0, \
91 "(Unsafe,Unstable) Experimental") \
92 \
93 product(intx, ReadPrefetchInstr, 0, \
94 "Prefetch instruction to prefetch ahead") \
95 \
96 product(bool, UseStoreImmI16, true, \
97 "Use store immediate 16-bits value instruction on x86") \
98 \
99 product(intx, UseAVX, 99, \
100 "Highest supported AVX instructions set on x86/x64") \
101 \
102 product(bool, UseCLMUL, false, \
103 "Control whether CLMUL instructions can be used on x86/x64") \
104 \
105 diagnostic(bool, UseIncDec, true, \
106 "Use INC, DEC instructions on x86") \
107 \
108 product(bool, UseNewLongLShift, false, \
109 "Use optimized bitwise shift left") \
110 \
111 product(bool, UseAddressNop, false, \
112 "Use '0F 1F [addr]' NOP instructions on x86 cpus") \
113 \
114 product(bool, UseXmmLoadAndClearUpper, true, \
115 "Load low part of XMM register and clear upper part") \
116 \
117 product(bool, UseXmmRegToRegMoveAll, false, \
118 "Copy all XMM register bits when moving value between registers") \
119 \
120 product(bool, UseXmmI2D, false, \
121 "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \
122 \
123 product(bool, UseXmmI2F, false, \
124 "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \
125 \
126 product(bool, UseUnalignedLoadStores, false, \
127 "Use SSE2 MOVDQU instruction for Arraycopy") \
128 \
129 product(bool, UseFastStosb, false, \
130 "Use fast-string operation for zeroing: rep stosb") \
131 \
132 /* Use Restricted Transactional Memory for lock eliding */ \
133 experimental(bool, UseRTMLocking, false, \
134 "Enable RTM lock eliding for inflated locks in compiled code") \
135 \
136 experimental(bool, UseRTMForStackLocks, false, \
137 "Enable RTM lock eliding for stack locks in compiled code") \
138 \
139 experimental(bool, UseRTMDeopt, false, \
140 "Perform deopt and recompilation based on RTM abort ratio") \
141 \
142 experimental(uintx, RTMRetryCount, 5, \
143 "Number of RTM retries on lock abort or busy") \
144 \
145 experimental(intx, RTMSpinLoopCount, 100, \
146 "Spin count for lock to become free before RTM retry") \
147 \
148 experimental(intx, RTMAbortThreshold, 1000, \
149 "Calculate abort ratio after this number of aborts") \
150 \
151 experimental(intx, RTMLockingThreshold, 10000, \
152 "Lock count at which to do RTM lock eliding without " \
153 "abort ratio calculation") \
154 \
155 experimental(intx, RTMAbortRatio, 50, \
156 "Lock abort ratio at which to stop use RTM lock eliding") \
157 \
158 experimental(intx, RTMTotalCountIncrRate, 64, \
159 "Increment total RTM attempted lock count once every n times") \
160 \
161 experimental(intx, RTMLockingCalculationDelay, 0, \
162 "Number of milliseconds to wait before start calculating aborts " \
163 "for RTM locking") \
164 \
165 experimental(bool, UseRTMXendForLockBusy, true, \
166 "Use RTM Xend instead of Xabort when lock busy") \
167 \
168 /* assembler */ \
169 product(bool, Use486InstrsOnly, false, \
170 "Use 80486 Compliant instruction subset") \
171 \
172 product(bool, UseCountLeadingZerosInstruction, false, \
173 "Use count leading zeros instruction") \
174 \
175 product(bool, UseCountTrailingZerosInstruction, false, \
176 "Use count trailing zeros instruction") \
177 \
178 product(bool, UseBMI1Instructions, false, \
179 "Use BMI instructions")
180
181 #endif // CPU_X86_VM_GLOBALS_X86_HPP

mercurial