1/*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef CPU_X86_GLOBALS_X86_HPP
26#define CPU_X86_GLOBALS_X86_HPP
27
28#include "utilities/globalDefinitions.hpp"
29#include "utilities/macros.hpp"
30
31// Sets the default values for platform dependent flags used by the runtime system.
32// (see globals.hpp)
33
34define_pd_global(bool, ShareVtableStubs, true);
35
36define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
37define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
38define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast
39
40define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
41// See 4827828 for this change. There is no globals_core_i486.hpp. I can't
42// assign a different value for C2 without touching a number of files. Use
43// #ifdef to minimize the change as it's late in Mantis. -- FIXME.
44// c1 doesn't have this problem because the fix to 4858033 assures us
45// the the vep is aligned at CodeEntryAlignment whereas c2 only aligns
46// the uep and the vep doesn't get real alignment but just slops on by
47// only assured that the entry instruction meets the 5 byte size requirement.
48#if COMPILER2_OR_JVMCI
49define_pd_global(intx, CodeEntryAlignment, 32);
50#else
51define_pd_global(intx, CodeEntryAlignment, 16);
52#endif // COMPILER2_OR_JVMCI
53define_pd_global(intx, OptoLoopAlignment, 16);
54define_pd_global(intx, InlineFrequencyCount, 100);
55define_pd_global(intx, InlineSmallCode, 1000);
56
57#define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
58#define DEFAULT_STACK_RED_PAGES (1)
59#define DEFAULT_STACK_RESERVED_PAGES (NOT_WINDOWS(1) WINDOWS_ONLY(0))
60
61#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
62#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
63#define MIN_STACK_RESERVED_PAGES (0)
64
65#ifdef _LP64
66// Java_java_net_SocketOutputStream_socketWrite0() uses a 64k buffer on the
67// stack if compiled for unix and LP64. To pass stack overflow tests we need
68// 20 shadow pages.
69#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(7) DEBUG_ONLY(+2))
70// For those clients that do not use write socket, we allow
71// the min range value to be below that of the default
72#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(7) DEBUG_ONLY(+2))
73#else
74#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
75#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
76#endif // _LP64
77
78define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
79define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
80define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
81define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
82
83define_pd_global(bool, RewriteBytecodes, true);
84define_pd_global(bool, RewriteFrequentPairs, true);
85
86// GC Ergo Flags
87define_pd_global(size_t, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
88
89define_pd_global(uintx, TypeProfileLevel, 111);
90
91define_pd_global(bool, CompactStrings, true);
92
93define_pd_global(bool, PreserveFramePointer, false);
94
95define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
96
97#if defined(_LP64) || defined(_WINDOWS)
98define_pd_global(bool, ThreadLocalHandshakes, true);
99#else
100// get_thread() is slow on linux 32 bit, therefore off by default
101define_pd_global(bool, ThreadLocalHandshakes, false);
102#endif
103
104#define ARCH_FLAGS(develop, \
105 product, \
106 diagnostic, \
107 experimental, \
108 notproduct, \
109 range, \
110 constraint, \
111 writeable) \
112 \
113 develop(bool, IEEEPrecision, true, \
114 "Enables IEEE precision (for INTEL only)") \
115 \
116 product(bool, UseStoreImmI16, true, \
117 "Use store immediate 16-bits value instruction on x86") \
118 \
119 product(intx, UseAVX, 3, \
120 "Highest supported AVX instructions set on x86/x64") \
121 range(0, 99) \
122 \
123 product(bool, UseCLMUL, false, \
124 "Control whether CLMUL instructions can be used on x86/x64") \
125 \
126 diagnostic(bool, UseIncDec, true, \
127 "Use INC, DEC instructions on x86") \
128 \
129 product(bool, UseNewLongLShift, false, \
130 "Use optimized bitwise shift left") \
131 \
132 product(bool, UseAddressNop, false, \
133 "Use '0F 1F [addr]' NOP instructions on x86 cpus") \
134 \
135 product(bool, UseXmmLoadAndClearUpper, true, \
136 "Load low part of XMM register and clear upper part") \
137 \
138 product(bool, UseXmmRegToRegMoveAll, false, \
139 "Copy all XMM register bits when moving value between registers") \
140 \
141 product(bool, UseXmmI2D, false, \
142 "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \
143 \
144 product(bool, UseXmmI2F, false, \
145 "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \
146 \
147 product(bool, UseUnalignedLoadStores, false, \
148 "Use SSE2 MOVDQU instruction for Arraycopy") \
149 \
150 product(bool, UseXMMForObjInit, false, \
151 "Use XMM/YMM MOVDQU instruction for Object Initialization") \
152 \
153 product(bool, UseFastStosb, false, \
154 "Use fast-string operation for zeroing: rep stosb") \
155 \
156 /* Use Restricted Transactional Memory for lock eliding */ \
157 product(bool, UseRTMLocking, false, \
158 "Enable RTM lock eliding for inflated locks in compiled code") \
159 \
160 experimental(bool, UseRTMForStackLocks, false, \
161 "Enable RTM lock eliding for stack locks in compiled code") \
162 \
163 product(bool, UseRTMDeopt, false, \
164 "Perform deopt and recompilation based on RTM abort ratio") \
165 \
166 product(int, RTMRetryCount, 5, \
167 "Number of RTM retries on lock abort or busy") \
168 range(0, max_jint) \
169 \
170 experimental(int, RTMSpinLoopCount, 100, \
171 "Spin count for lock to become free before RTM retry") \
172 range(0, max_jint) \
173 \
174 experimental(int, RTMAbortThreshold, 1000, \
175 "Calculate abort ratio after this number of aborts") \
176 range(0, max_jint) \
177 \
178 experimental(int, RTMLockingThreshold, 10000, \
179 "Lock count at which to do RTM lock eliding without " \
180 "abort ratio calculation") \
181 range(0, max_jint) \
182 \
183 experimental(int, RTMAbortRatio, 50, \
184 "Lock abort ratio at which to stop use RTM lock eliding") \
185 range(0, 100) /* natural range */ \
186 \
187 experimental(int, RTMTotalCountIncrRate, 64, \
188 "Increment total RTM attempted lock count once every n times") \
189 range(1, max_jint) \
190 constraint(RTMTotalCountIncrRateConstraintFunc,AfterErgo) \
191 \
192 experimental(intx, RTMLockingCalculationDelay, 0, \
193 "Number of milliseconds to wait before start calculating aborts " \
194 "for RTM locking") \
195 \
196 experimental(bool, UseRTMXendForLockBusy, true, \
197 "Use RTM Xend instead of Xabort when lock busy") \
198 \
199 /* assembler */ \
200 product(bool, UseCountLeadingZerosInstruction, false, \
201 "Use count leading zeros instruction") \
202 \
203 product(bool, UseCountTrailingZerosInstruction, false, \
204 "Use count trailing zeros instruction") \
205 \
206 product(bool, UseSSE42Intrinsics, false, \
207 "SSE4.2 versions of intrinsics") \
208 \
209 product(bool, UseBMI1Instructions, false, \
210 "Use BMI1 instructions") \
211 \
212 product(bool, UseBMI2Instructions, false, \
213 "Use BMI2 instructions") \
214 \
215 diagnostic(bool, UseLibmIntrinsic, true, \
216 "Use Libm Intrinsics")
217#endif // CPU_X86_GLOBALS_X86_HPP
218