1/*
2 * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "runtime/globals.hpp"
28#include "runtime/globals_extension.hpp"
29#include "compiler/compilerDefinitions.hpp"
30#include "gc/shared/gcConfig.hpp"
31#include "utilities/defaultStream.hpp"
32
33const char* compilertype2name_tab[compiler_number_of_types] = {
34 "",
35 "c1",
36 "c2",
37 "jvmci"
38};
39
40#if defined(COMPILER2)
41CompLevel CompLevel_highest_tier = CompLevel_full_optimization; // pure C2 and tiered or JVMCI and tiered
42#elif defined(COMPILER1)
43CompLevel CompLevel_highest_tier = CompLevel_simple; // pure C1 or JVMCI
44#else
45CompLevel CompLevel_highest_tier = CompLevel_none;
46#endif
47
48#if defined(TIERED)
49CompLevel CompLevel_initial_compile = CompLevel_full_profile; // tiered
50#elif defined(COMPILER1) || INCLUDE_JVMCI
51CompLevel CompLevel_initial_compile = CompLevel_simple; // pure C1 or JVMCI
52#elif defined(COMPILER2)
53CompLevel CompLevel_initial_compile = CompLevel_full_optimization; // pure C2
54#else
55CompLevel CompLevel_initial_compile = CompLevel_none;
56#endif
57
58#if defined(COMPILER2)
59CompMode Compilation_mode = CompMode_server;
60#elif defined(COMPILER1)
61CompMode Compilation_mode = CompMode_client;
62#else
63CompMode Compilation_mode = CompMode_none;
64#endif
65
66// Returns threshold scaled with CompileThresholdScaling
67intx CompilerConfig::scaled_compile_threshold(intx threshold) {
68 return scaled_compile_threshold(threshold, CompileThresholdScaling);
69}
70
71// Returns freq_log scaled with CompileThresholdScaling
72intx CompilerConfig::scaled_freq_log(intx freq_log) {
73 return scaled_freq_log(freq_log, CompileThresholdScaling);
74}
75
76// Returns threshold scaled with the value of scale.
77// If scale < 0.0, threshold is returned without scaling.
78intx CompilerConfig::scaled_compile_threshold(intx threshold, double scale) {
79 if (scale == 1.0 || scale < 0.0) {
80 return threshold;
81 } else {
82 return (intx)(threshold * scale);
83 }
84}
85
86// Returns freq_log scaled with the value of scale.
87// Returned values are in the range of [0, InvocationCounter::number_of_count_bits + 1].
88// If scale < 0.0, freq_log is returned without scaling.
89intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
90 // Check if scaling is necessary or if negative value was specified.
91 if (scale == 1.0 || scale < 0.0) {
92 return freq_log;
93 }
94 // Check values to avoid calculating log2 of 0.
95 if (scale == 0.0 || freq_log == 0) {
96 return 0;
97 }
98 // Determine the maximum notification frequency value currently supported.
99 // The largest mask value that the interpreter/C1 can handle is
100 // of length InvocationCounter::number_of_count_bits. Mask values are always
101 // one bit shorter then the value of the notification frequency. Set
102 // max_freq_bits accordingly.
103 intx max_freq_bits = InvocationCounter::number_of_count_bits + 1;
104 intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
105 if (scaled_freq == 0) {
106 // Return 0 right away to avoid calculating log2 of 0.
107 return 0;
108 } else if (scaled_freq > nth_bit(max_freq_bits)) {
109 return max_freq_bits;
110 } else {
111 return log2_intptr(scaled_freq);
112 }
113}
114
115#ifdef TIERED
116void set_client_compilation_mode() {
117 Compilation_mode = CompMode_client;
118 CompLevel_highest_tier = CompLevel_simple;
119 CompLevel_initial_compile = CompLevel_simple;
120 FLAG_SET_ERGO(TieredCompilation, false);
121 FLAG_SET_ERGO(ProfileInterpreter, false);
122#if INCLUDE_JVMCI
123 FLAG_SET_ERGO(EnableJVMCI, false);
124 FLAG_SET_ERGO(UseJVMCICompiler, false);
125#endif
126#if INCLUDE_AOT
127 FLAG_SET_ERGO(UseAOT, false);
128#endif
129 if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
130 FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
131 }
132 if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
133 FLAG_SET_ERGO(InitialCodeCacheSize, 160*K);
134 }
135 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
136 FLAG_SET_ERGO(ReservedCodeCacheSize, 32*M);
137 }
138 if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
139 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 27*M);
140 }
141 if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
142 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
143 }
144 if (FLAG_IS_DEFAULT(NonNMethodCodeHeapSize)) {
145 FLAG_SET_ERGO(NonNMethodCodeHeapSize, 5*M);
146 }
147 if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) {
148 FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K);
149 }
150 if (FLAG_IS_DEFAULT(MetaspaceSize)) {
151 FLAG_SET_ERGO(MetaspaceSize, MIN2(12*M, MaxMetaspaceSize));
152 }
153 if (FLAG_IS_DEFAULT(MaxRAM)) {
154 // Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact
155 // heap setting done based on available phys_mem (see Arguments::set_heap_size).
156 FLAG_SET_DEFAULT(MaxRAM, 1ULL*G);
157 }
158 if (FLAG_IS_DEFAULT(CompileThreshold)) {
159 FLAG_SET_ERGO(CompileThreshold, 1500);
160 }
161 if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
162 FLAG_SET_ERGO(OnStackReplacePercentage, 933);
163 }
164 if (FLAG_IS_DEFAULT(CICompilerCount)) {
165 FLAG_SET_ERGO(CICompilerCount, 1);
166 }
167}
168
169bool compilation_mode_selected() {
170 return !FLAG_IS_DEFAULT(TieredCompilation) ||
171 !FLAG_IS_DEFAULT(TieredStopAtLevel) ||
172 !FLAG_IS_DEFAULT(UseAOT)
173 JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
174 || !FLAG_IS_DEFAULT(UseJVMCICompiler));
175}
176
177void select_compilation_mode_ergonomically() {
178#if defined(_WINDOWS) && !defined(_LP64)
179 if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
180 FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
181 }
182#endif
183 if (NeverActAsServerClassMachine) {
184 set_client_compilation_mode();
185 }
186}
187
188#endif // TIERED
189
190void CompilerConfig::set_tiered_flags() {
191 // With tiered, set default policy to SimpleThresholdPolicy, which is 2.
192 if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
193 FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
194 }
195 if (CompilationPolicyChoice < 2) {
196 vm_exit_during_initialization(
197 "Incompatible compilation policy selected", NULL);
198 }
199 // Increase the code cache size - tiered compiles a lot more.
200 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
201 FLAG_SET_ERGO(ReservedCodeCacheSize,
202 MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
203 }
204 // Enable SegmentedCodeCache if TieredCompilation is enabled, ReservedCodeCacheSize >= 240M
205 // and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
206 if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
207 8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
208 FLAG_SET_ERGO(SegmentedCodeCache, true);
209 }
210 if (!UseInterpreter) { // -Xcomp
211 Tier3InvokeNotifyFreqLog = 0;
212 Tier4InvocationThreshold = 0;
213 }
214
215 if (CompileThresholdScaling < 0) {
216 vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
217 }
218
219 // Scale tiered compilation thresholds.
220 // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged.
221 if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
222 FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
223 FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
224
225 FLAG_SET_ERGO(Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
226 FLAG_SET_ERGO(Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
227 FLAG_SET_ERGO(Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
228 FLAG_SET_ERGO(Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
229
230 // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
231 // once these thresholds become supported.
232
233 FLAG_SET_ERGO(Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
234 FLAG_SET_ERGO(Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
235
236 FLAG_SET_ERGO(Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
237 FLAG_SET_ERGO(Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
238
239 FLAG_SET_ERGO(Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
240
241 FLAG_SET_ERGO(Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
242 FLAG_SET_ERGO(Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
243 FLAG_SET_ERGO(Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
244 FLAG_SET_ERGO(Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
245 }
246}
247
248#if INCLUDE_JVMCI
249void set_jvmci_specific_flags() {
250 if (UseJVMCICompiler) {
251 Compilation_mode = CompMode_server;
252
253 if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
254 FLAG_SET_DEFAULT(TypeProfileWidth, 8);
255 }
256 if (TieredStopAtLevel != CompLevel_full_optimization) {
257 // Currently JVMCI compiler can only work at the full optimization level
258 warning("forcing TieredStopAtLevel to full optimization because JVMCI is enabled");
259 FLAG_SET_ERGO(TieredStopAtLevel, CompLevel_full_optimization);
260 }
261 if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
262 FLAG_SET_DEFAULT(TypeProfileLevel, 0);
263 }
264
265 if (UseJVMCINativeLibrary) {
266 // SVM compiled code requires more stack space
267 if (FLAG_IS_DEFAULT(CompilerThreadStackSize)) {
268 // Duplicate logic in the implementations of os::create_thread
269 // so that we can then double the computed stack size. Once
270 // the stack size requirements of SVM are better understood,
271 // this logic can be pushed down into os::create_thread.
272 int stack_size = CompilerThreadStackSize;
273 if (stack_size == 0) {
274 stack_size = VMThreadStackSize;
275 }
276 if (stack_size != 0) {
277 FLAG_SET_DEFAULT(CompilerThreadStackSize, stack_size * 2);
278 }
279 }
280 } else {
281 // Adjust the on stack replacement percentage to avoid early
282 // OSR compilations while JVMCI itself is warming up
283 if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
284 FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
285 }
286 // JVMCI needs values not less than defaults
287 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
288 FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
289 }
290 if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
291 FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
292 }
293 if (FLAG_IS_DEFAULT(MetaspaceSize)) {
294 FLAG_SET_DEFAULT(MetaspaceSize, MIN2(MAX2(12*M, MetaspaceSize), MaxMetaspaceSize));
295 }
296 if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
297 FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
298 }
299 } // !UseJVMCINativeLibrary
300 } // UseJVMCICompiler
301}
302#endif // INCLUDE_JVMCI
303
304bool CompilerConfig::check_args_consistency(bool status) {
305 // Check lower bounds of the code cache
306 // Template Interpreter code is approximately 3X larger in debug builds.
307 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
308 if (ReservedCodeCacheSize < InitialCodeCacheSize) {
309 jio_fprintf(defaultStream::error_stream(),
310 "Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n",
311 ReservedCodeCacheSize/K, InitialCodeCacheSize/K);
312 status = false;
313 } else if (ReservedCodeCacheSize < min_code_cache_size) {
314 jio_fprintf(defaultStream::error_stream(),
315 "Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K,
316 min_code_cache_size/K);
317 status = false;
318 } else if (ReservedCodeCacheSize > CODE_CACHE_SIZE_LIMIT) {
319 // Code cache size larger than CODE_CACHE_SIZE_LIMIT is not supported.
320 jio_fprintf(defaultStream::error_stream(),
321 "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
322 CODE_CACHE_SIZE_LIMIT/M);
323 status = false;
324 } else if (NonNMethodCodeHeapSize < min_code_cache_size) {
325 jio_fprintf(defaultStream::error_stream(),
326 "Invalid NonNMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonNMethodCodeHeapSize/K,
327 min_code_cache_size/K);
328 status = false;
329 }
330
331#ifdef _LP64
332 if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
333 warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
334 }
335#endif
336
337 if (BackgroundCompilation && ReplayCompiles) {
338 if (!FLAG_IS_DEFAULT(BackgroundCompilation)) {
339 warning("BackgroundCompilation disabled due to ReplayCompiles option.");
340 }
341 FLAG_SET_CMDLINE(BackgroundCompilation, false);
342 }
343
344#ifdef COMPILER2
345 if (PostLoopMultiversioning && !RangeCheckElimination) {
346 if (!FLAG_IS_DEFAULT(PostLoopMultiversioning)) {
347 warning("PostLoopMultiversioning disabled because RangeCheckElimination is disabled.");
348 }
349 FLAG_SET_CMDLINE(PostLoopMultiversioning, false);
350 }
351 if (UseCountedLoopSafepoints && LoopStripMiningIter == 0) {
352 if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
353 warning("When counted loop safepoints are enabled, LoopStripMiningIter must be at least 1 (a safepoint every 1 iteration): setting it to 1");
354 }
355 LoopStripMiningIter = 1;
356 } else if (!UseCountedLoopSafepoints && LoopStripMiningIter > 0) {
357 if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
358 warning("Disabling counted safepoints implies no loop strip mining: setting LoopStripMiningIter to 0");
359 }
360 LoopStripMiningIter = 0;
361 }
362#endif // COMPILER2
363
364 if (Arguments::is_interpreter_only()) {
365 if (UseCompiler) {
366 if (!FLAG_IS_DEFAULT(UseCompiler)) {
367 warning("UseCompiler disabled due to -Xint.");
368 }
369 FLAG_SET_CMDLINE(UseCompiler, false);
370 }
371 if (ProfileInterpreter) {
372 if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
373 warning("ProfileInterpreter disabled due to -Xint.");
374 }
375 FLAG_SET_CMDLINE(ProfileInterpreter, false);
376 }
377 if (TieredCompilation) {
378 if (!FLAG_IS_DEFAULT(TieredCompilation)) {
379 warning("TieredCompilation disabled due to -Xint.");
380 }
381 FLAG_SET_CMDLINE(TieredCompilation, false);
382 }
383#if INCLUDE_JVMCI
384 if (EnableJVMCI) {
385 if (!FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler)) {
386 warning("JVMCI Compiler disabled due to -Xint.");
387 }
388 FLAG_SET_CMDLINE(EnableJVMCI, false);
389 FLAG_SET_CMDLINE(UseJVMCICompiler, false);
390 }
391#endif
392 } else {
393#if INCLUDE_JVMCI
394 status = status && JVMCIGlobals::check_jvmci_flags_are_consistent();
395#endif
396 }
397 return status;
398}
399
400void CompilerConfig::ergo_initialize() {
401 if (Arguments::is_interpreter_only()) {
402 return; // Nothing to do.
403 }
404
405#ifdef TIERED
406 if (!compilation_mode_selected()) {
407 select_compilation_mode_ergonomically();
408 }
409#endif
410
411#if INCLUDE_JVMCI
412 // Check that JVMCI compiler supports selested GC.
413 // Should be done after GCConfig::initialize() was called.
414 JVMCIGlobals::check_jvmci_supported_gc();
415
416 // Do JVMCI specific settings
417 set_jvmci_specific_flags();
418#endif
419
420 if (TieredCompilation) {
421 set_tiered_flags();
422 } else {
423 int max_compilation_policy_choice = 1;
424#ifdef COMPILER2
425 if (is_server_compilation_mode_vm()) {
426 max_compilation_policy_choice = 2;
427 }
428#endif
429 // Check if the policy is valid.
430 if (CompilationPolicyChoice >= max_compilation_policy_choice) {
431 vm_exit_during_initialization(
432 "Incompatible compilation policy selected", NULL);
433 }
434 // Scale CompileThreshold
435 // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
436 if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
437 FLAG_SET_ERGO(CompileThreshold, scaled_compile_threshold(CompileThreshold));
438 }
439 }
440
441 if (UseOnStackReplacement && !UseLoopCounter) {
442 warning("On-stack-replacement requires loop counters; enabling loop counters");
443 FLAG_SET_DEFAULT(UseLoopCounter, true);
444 }
445
446#ifdef COMPILER2
447 if (!EliminateLocks) {
448 EliminateNestedLocks = false;
449 }
450 if (!Inline) {
451 IncrementalInline = false;
452 }
453#ifndef PRODUCT
454 if (!IncrementalInline) {
455 AlwaysIncrementalInline = false;
456 }
457 if (PrintIdealGraphLevel > 0) {
458 FLAG_SET_ERGO(PrintIdealGraph, true);
459 }
460#endif
461 if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
462 // nothing to use the profiling, turn if off
463 FLAG_SET_DEFAULT(TypeProfileLevel, 0);
464 }
465 if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
466 FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
467 }
468 if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) {
469 // blind guess
470 LoopStripMiningIterShortLoop = LoopStripMiningIter / 10;
471 }
472#endif // COMPILER2
473}
474