1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_SHARED_GC_GLOBALS_HPP |
26 | #define SHARE_GC_SHARED_GC_GLOBALS_HPP |
27 | |
28 | #include "runtime/globals_shared.hpp" |
29 | #include "utilities/macros.hpp" |
30 | #if INCLUDE_CMSGC |
31 | #include "gc/cms/cms_globals.hpp" |
32 | #endif |
33 | #if INCLUDE_EPSILONGC |
34 | #include "gc/epsilon/epsilon_globals.hpp" |
35 | #endif |
36 | #if INCLUDE_G1GC |
37 | #include "gc/g1/g1_globals.hpp" |
38 | #endif |
39 | #if INCLUDE_PARALLELGC |
40 | #include "gc/parallel/parallel_globals.hpp" |
41 | #endif |
42 | #if INCLUDE_SERIALGC |
43 | #include "gc/serial/serial_globals.hpp" |
44 | #endif |
45 | #if INCLUDE_SHENANDOAHGC |
46 | #include "gc/shenandoah/shenandoah_globals.hpp" |
47 | #endif |
48 | #if INCLUDE_ZGC |
49 | #include "gc/z/z_globals.hpp" |
50 | #endif |
51 | |
52 | #define GC_FLAGS(develop, \ |
53 | develop_pd, \ |
54 | product, \ |
55 | product_pd, \ |
56 | diagnostic, \ |
57 | diagnostic_pd, \ |
58 | experimental, \ |
59 | notproduct, \ |
60 | manageable, \ |
61 | product_rw, \ |
62 | lp64_product, \ |
63 | range, \ |
64 | constraint, \ |
65 | writeable) \ |
66 | \ |
67 | CMSGC_ONLY(GC_CMS_FLAGS( \ |
68 | develop, \ |
69 | develop_pd, \ |
70 | product, \ |
71 | product_pd, \ |
72 | diagnostic, \ |
73 | diagnostic_pd, \ |
74 | experimental, \ |
75 | notproduct, \ |
76 | manageable, \ |
77 | product_rw, \ |
78 | lp64_product, \ |
79 | range, \ |
80 | constraint, \ |
81 | writeable)) \ |
82 | \ |
83 | EPSILONGC_ONLY(GC_EPSILON_FLAGS( \ |
84 | develop, \ |
85 | develop_pd, \ |
86 | product, \ |
87 | product_pd, \ |
88 | diagnostic, \ |
89 | diagnostic_pd, \ |
90 | experimental, \ |
91 | notproduct, \ |
92 | manageable, \ |
93 | product_rw, \ |
94 | lp64_product, \ |
95 | range, \ |
96 | constraint, \ |
97 | writeable)) \ |
98 | \ |
99 | G1GC_ONLY(GC_G1_FLAGS( \ |
100 | develop, \ |
101 | develop_pd, \ |
102 | product, \ |
103 | product_pd, \ |
104 | diagnostic, \ |
105 | diagnostic_pd, \ |
106 | experimental, \ |
107 | notproduct, \ |
108 | manageable, \ |
109 | product_rw, \ |
110 | lp64_product, \ |
111 | range, \ |
112 | constraint, \ |
113 | writeable)) \ |
114 | \ |
115 | PARALLELGC_ONLY(GC_PARALLEL_FLAGS( \ |
116 | develop, \ |
117 | develop_pd, \ |
118 | product, \ |
119 | product_pd, \ |
120 | diagnostic, \ |
121 | diagnostic_pd, \ |
122 | experimental, \ |
123 | notproduct, \ |
124 | manageable, \ |
125 | product_rw, \ |
126 | lp64_product, \ |
127 | range, \ |
128 | constraint, \ |
129 | writeable)) \ |
130 | \ |
131 | SERIALGC_ONLY(GC_SERIAL_FLAGS( \ |
132 | develop, \ |
133 | develop_pd, \ |
134 | product, \ |
135 | product_pd, \ |
136 | diagnostic, \ |
137 | diagnostic_pd, \ |
138 | experimental, \ |
139 | notproduct, \ |
140 | manageable, \ |
141 | product_rw, \ |
142 | lp64_product, \ |
143 | range, \ |
144 | constraint, \ |
145 | writeable)) \ |
146 | \ |
147 | SHENANDOAHGC_ONLY(GC_SHENANDOAH_FLAGS( \ |
148 | develop, \ |
149 | develop_pd, \ |
150 | product, \ |
151 | product_pd, \ |
152 | diagnostic, \ |
153 | diagnostic_pd, \ |
154 | experimental, \ |
155 | notproduct, \ |
156 | manageable, \ |
157 | product_rw, \ |
158 | lp64_product, \ |
159 | range, \ |
160 | constraint, \ |
161 | writeable)) \ |
162 | \ |
163 | ZGC_ONLY(GC_Z_FLAGS( \ |
164 | develop, \ |
165 | develop_pd, \ |
166 | product, \ |
167 | product_pd, \ |
168 | diagnostic, \ |
169 | diagnostic_pd, \ |
170 | experimental, \ |
171 | notproduct, \ |
172 | manageable, \ |
173 | product_rw, \ |
174 | lp64_product, \ |
175 | range, \ |
176 | constraint, \ |
177 | writeable)) \ |
178 | \ |
179 | /* gc */ \ |
180 | \ |
181 | product(bool, UseConcMarkSweepGC, false, \ |
182 | "Use Concurrent Mark-Sweep GC in the old generation") \ |
183 | \ |
184 | product(bool, UseSerialGC, false, \ |
185 | "Use the Serial garbage collector") \ |
186 | \ |
187 | product(bool, UseG1GC, false, \ |
188 | "Use the Garbage-First garbage collector") \ |
189 | \ |
190 | product(bool, UseParallelGC, false, \ |
191 | "Use the Parallel Scavenge garbage collector") \ |
192 | \ |
193 | product(bool, UseParallelOldGC, false, \ |
194 | "Use the Parallel Old garbage collector") \ |
195 | \ |
196 | experimental(bool, UseEpsilonGC, false, \ |
197 | "Use the Epsilon (no-op) garbage collector") \ |
198 | \ |
199 | experimental(bool, UseZGC, false, \ |
200 | "Use the Z garbage collector") \ |
201 | \ |
202 | experimental(bool, UseShenandoahGC, false, \ |
203 | "Use the Shenandoah garbage collector") \ |
204 | \ |
205 | product(uint, ParallelGCThreads, 0, \ |
206 | "Number of parallel threads parallel gc will use") \ |
207 | constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \ |
208 | \ |
209 | diagnostic(bool, UseSemaphoreGCThreadsSynchronization, true, \ |
210 | "Use semaphore synchronization for the GC Threads, " \ |
211 | "instead of synchronization based on mutexes") \ |
212 | \ |
213 | product(bool, UseDynamicNumberOfGCThreads, true, \ |
214 | "Dynamically choose the number of threads up to a maximum of " \ |
215 | "ParallelGCThreads parallel collectors will use for garbage " \ |
216 | "collection work") \ |
217 | \ |
218 | diagnostic(bool, InjectGCWorkerCreationFailure, false, \ |
219 | "Inject thread creation failures for " \ |
220 | "UseDynamicNumberOfGCThreads") \ |
221 | \ |
222 | diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ |
223 | "Force dynamic selection of the number of " \ |
224 | "parallel threads parallel gc will use to aid debugging") \ |
225 | \ |
226 | product(size_t, HeapSizePerGCThread, ScaleForWordSize(32*M), \ |
227 | "Size of heap (bytes) per GC thread used in calculating the " \ |
228 | "number of GC threads") \ |
229 | range((size_t)os::vm_page_size(), (size_t)max_uintx) \ |
230 | \ |
231 | product(uint, ConcGCThreads, 0, \ |
232 | "Number of threads concurrent gc will use") \ |
233 | constraint(ConcGCThreadsConstraintFunc,AfterErgo) \ |
234 | \ |
235 | product(uint, GCTaskTimeStampEntries, 200, \ |
236 | "Number of time stamp entries per gc worker thread") \ |
237 | range(1, max_jint) \ |
238 | \ |
239 | product(bool, AlwaysTenure, false, \ |
240 | "Always tenure objects in eden (ParallelGC only)") \ |
241 | \ |
242 | product(bool, NeverTenure, false, \ |
243 | "Never tenure objects in eden, may tenure on overflow " \ |
244 | "(ParallelGC only)") \ |
245 | \ |
246 | product(bool, ScavengeBeforeFullGC, true, \ |
247 | "Scavenge youngest generation before each full GC.") \ |
248 | \ |
249 | product(bool, ExplicitGCInvokesConcurrent, false, \ |
250 | "A System.gc() request invokes a concurrent collection; " \ |
251 | "(effective only when using concurrent collectors)") \ |
252 | \ |
253 | product(bool, GCLockerInvokesConcurrent, false, \ |
254 | "The exit of a JNI critical section necessitating a scavenge, " \ |
255 | "also kicks off a background concurrent collection") \ |
256 | \ |
257 | product(uintx, GCLockerEdenExpansionPercent, 5, \ |
258 | "How much the GC can expand the eden by while the GC locker " \ |
259 | "is active (as a percentage)") \ |
260 | range(0, 100) \ |
261 | \ |
262 | diagnostic(uintx, GCLockerRetryAllocationCount, 2, \ |
263 | "Number of times to retry allocations when " \ |
264 | "blocked by the GC locker") \ |
265 | range(0, max_uintx) \ |
266 | \ |
267 | product(uintx, ParallelGCBufferWastePct, 10, \ |
268 | "Wasted fraction of parallel allocation buffer") \ |
269 | range(0, 100) \ |
270 | \ |
271 | product(uintx, TargetPLABWastePct, 10, \ |
272 | "Target wasted space in last buffer as percent of overall " \ |
273 | "allocation") \ |
274 | range(1, 100) \ |
275 | \ |
276 | product(uintx, PLABWeight, 75, \ |
277 | "Percentage (0-100) used to weight the current sample when " \ |
278 | "computing exponentially decaying average for ResizePLAB") \ |
279 | range(0, 100) \ |
280 | \ |
281 | product(bool, ResizePLAB, true, \ |
282 | "Dynamically resize (survivor space) promotion LAB's") \ |
283 | \ |
284 | product(int, ParGCArrayScanChunk, 50, \ |
285 | "Scan a subset of object array and push remainder, if array is " \ |
286 | "bigger than this") \ |
287 | range(1, max_jint/3) \ |
288 | \ |
289 | product(uintx, OldPLABWeight, 50, \ |
290 | "Percentage (0-100) used to weight the current sample when " \ |
291 | "computing exponentially decaying average for resizing " \ |
292 | "OldPLABSize") \ |
293 | range(0, 100) \ |
294 | \ |
295 | product(bool, ResizeOldPLAB, true, \ |
296 | "Dynamically resize (old gen) promotion LAB's") \ |
297 | \ |
298 | product(bool, AlwaysPreTouch, false, \ |
299 | "Force all freshly committed pages to be pre-touched") \ |
300 | \ |
301 | product(size_t, PreTouchParallelChunkSize, 1 * G, \ |
302 | "Per-thread chunk size for parallel memory pre-touch.") \ |
303 | range(1, SIZE_MAX / 2) \ |
304 | \ |
305 | /* where does the range max value of (max_jint - 1) come from? */ \ |
306 | product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ |
307 | "Maximum size of marking stack") \ |
308 | range(1, (max_jint - 1)) \ |
309 | \ |
310 | product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ |
311 | "Size of marking stack") \ |
312 | constraint(MarkStackSizeConstraintFunc,AfterErgo) \ |
313 | \ |
314 | develop(bool, VerifyBlockOffsetArray, false, \ |
315 | "Do (expensive) block offset array verification") \ |
316 | \ |
317 | diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ |
318 | "Maintain _unallocated_block in BlockOffsetArray " \ |
319 | "(currently applicable only to CMS collector)") \ |
320 | \ |
321 | product(intx, RefDiscoveryPolicy, 0, \ |
322 | "Select type of reference discovery policy: " \ |
323 | "reference-based(0) or referent-based(1)") \ |
324 | range(ReferenceProcessor::DiscoveryPolicyMin, \ |
325 | ReferenceProcessor::DiscoveryPolicyMax) \ |
326 | \ |
327 | product(bool, ParallelRefProcEnabled, false, \ |
328 | "Enable parallel reference processing whenever possible") \ |
329 | \ |
330 | product(bool, ParallelRefProcBalancingEnabled, true, \ |
331 | "Enable balancing of reference processing queues") \ |
332 | \ |
333 | experimental(size_t, ReferencesPerThread, 1000, \ |
334 | "Ergonomically start one thread for this amount of " \ |
335 | "references for reference processing if " \ |
336 | "ParallelRefProcEnabled is true. Specify 0 to disable and " \ |
337 | "use all threads.") \ |
338 | \ |
339 | product(uintx, InitiatingHeapOccupancyPercent, 45, \ |
340 | "The percent occupancy (IHOP) of the current old generation " \ |
341 | "capacity above which a concurrent mark cycle will be initiated " \ |
342 | "Its value may change over time if adaptive IHOP is enabled, " \ |
343 | "otherwise the value remains constant. " \ |
344 | "In the latter case a value of 0 will result as frequent as " \ |
345 | "possible concurrent marking cycles. A value of 100 disables " \ |
346 | "concurrent marking. " \ |
347 | "Fragmentation waste in the old generation is not considered " \ |
348 | "free space in this calculation. (G1 collector only)") \ |
349 | range(0, 100) \ |
350 | \ |
351 | notproduct(bool, ScavengeALot, false, \ |
352 | "Force scavenge at every Nth exit from the runtime system " \ |
353 | "(N=ScavengeALotInterval)") \ |
354 | \ |
355 | develop(bool, FullGCALot, false, \ |
356 | "Force full gc at every Nth exit from the runtime system " \ |
357 | "(N=FullGCALotInterval)") \ |
358 | \ |
359 | notproduct(bool, GCALotAtAllSafepoints, false, \ |
360 | "Enforce ScavengeALot/GCALot at all potential safepoints") \ |
361 | \ |
362 | notproduct(bool, PromotionFailureALot, false, \ |
363 | "Use promotion failure handling on every youngest generation " \ |
364 | "collection") \ |
365 | \ |
366 | develop(uintx, PromotionFailureALotCount, 1000, \ |
367 | "Number of promotion failures occurring at PLAB " \ |
368 | "refill attempts (ParNew) or promotion attempts " \ |
369 | "(other young collectors)") \ |
370 | \ |
371 | develop(uintx, PromotionFailureALotInterval, 5, \ |
372 | "Total collections between promotion failures a lot") \ |
373 | \ |
374 | diagnostic(bool, UseOWSTTaskTerminator, true, \ |
375 | "Use Optimized Work Stealing Threads task termination " \ |
376 | "protocol") \ |
377 | \ |
378 | experimental(uintx, WorkStealingSleepMillis, 1, \ |
379 | "Sleep time when sleep is used for yields") \ |
380 | \ |
381 | experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ |
382 | "Number of yields before a sleep is done during work stealing") \ |
383 | \ |
384 | experimental(uintx, WorkStealingHardSpins, 4096, \ |
385 | "Number of iterations in a spin loop between checks on " \ |
386 | "time out of hard spin") \ |
387 | \ |
388 | experimental(uintx, WorkStealingSpinToYieldRatio, 10, \ |
389 | "Ratio of hard spins to calls to yield") \ |
390 | \ |
391 | develop(uintx, ObjArrayMarkingStride, 2048, \ |
392 | "Number of object array elements to push onto the marking stack " \ |
393 | "before pushing a continuation entry") \ |
394 | \ |
395 | develop(bool, MetadataAllocationFailALot, false, \ |
396 | "Fail metadata allocations at intervals controlled by " \ |
397 | "MetadataAllocationFailALotInterval") \ |
398 | \ |
399 | develop(uintx, MetadataAllocationFailALotInterval, 1000, \ |
400 | "Metadata allocation failure a lot interval") \ |
401 | \ |
402 | product(bool, ExecutingUnitTests, false, \ |
403 | "Whether the JVM is running unit tests or not") \ |
404 | \ |
405 | product_pd(bool, UseTLAB, "Use thread-local object allocation") \ |
406 | \ |
407 | product_pd(bool, ResizeTLAB, \ |
408 | "Dynamically resize TLAB size for threads") \ |
409 | \ |
410 | product(bool, ZeroTLAB, false, \ |
411 | "Zero out the newly created TLAB") \ |
412 | \ |
413 | product(bool, TLABStats, true, \ |
414 | "Provide more detailed and expensive TLAB statistics.") \ |
415 | \ |
416 | product_pd(bool, NeverActAsServerClassMachine, \ |
417 | "Never act like a server-class machine") \ |
418 | \ |
419 | product(bool, AlwaysActAsServerClassMachine, false, \ |
420 | "Always act like a server-class machine") \ |
421 | \ |
422 | product_pd(uint64_t, MaxRAM, \ |
423 | "Real memory size (in bytes) used to set maximum heap size") \ |
424 | range(0, 0XFFFFFFFFFFFFFFFF) \ |
425 | \ |
426 | product(bool, AggressiveHeap, false, \ |
427 | "Optimize heap options for long-running memory intensive apps") \ |
428 | \ |
429 | product(size_t, ErgoHeapSizeLimit, 0, \ |
430 | "Maximum ergonomically set heap size (in bytes); zero means use " \ |
431 | "MaxRAM * MaxRAMPercentage / 100") \ |
432 | range(0, max_uintx) \ |
433 | \ |
434 | product(uintx, MaxRAMFraction, 4, \ |
435 | "Maximum fraction (1/n) of real memory used for maximum heap " \ |
436 | "size. " \ |
437 | "Deprecated, use MaxRAMPercentage instead") \ |
438 | range(1, max_uintx) \ |
439 | \ |
440 | product(uintx, MinRAMFraction, 2, \ |
441 | "Minimum fraction (1/n) of real memory used for maximum heap " \ |
442 | "size on systems with small physical memory size. " \ |
443 | "Deprecated, use MinRAMPercentage instead") \ |
444 | range(1, max_uintx) \ |
445 | \ |
446 | product(uintx, InitialRAMFraction, 64, \ |
447 | "Fraction (1/n) of real memory used for initial heap size. " \ |
448 | "Deprecated, use InitialRAMPercentage instead") \ |
449 | range(1, max_uintx) \ |
450 | \ |
451 | product(double, MaxRAMPercentage, 25.0, \ |
452 | "Maximum percentage of real memory used for maximum heap size") \ |
453 | range(0.0, 100.0) \ |
454 | \ |
455 | product(double, MinRAMPercentage, 50.0, \ |
456 | "Minimum percentage of real memory used for maximum heap" \ |
457 | "size on systems with small physical memory size") \ |
458 | range(0.0, 100.0) \ |
459 | \ |
460 | product(double, InitialRAMPercentage, 1.5625, \ |
461 | "Percentage of real memory used for initial heap size") \ |
462 | range(0.0, 100.0) \ |
463 | \ |
464 | product(int, ActiveProcessorCount, -1, \ |
465 | "Specify the CPU count the VM should use and report as active") \ |
466 | \ |
467 | develop(uintx, MaxVirtMemFraction, 2, \ |
468 | "Maximum fraction (1/n) of virtual memory used for ergonomically "\ |
469 | "determining maximum heap size") \ |
470 | \ |
471 | product(bool, UseAdaptiveSizePolicy, true, \ |
472 | "Use adaptive generation sizing policies") \ |
473 | \ |
474 | product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \ |
475 | "Use adaptive survivor sizing policies") \ |
476 | \ |
477 | product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \ |
478 | "Use adaptive young-old sizing policies at minor collections") \ |
479 | \ |
480 | product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true, \ |
481 | "Use adaptive young-old sizing policies at major collections") \ |
482 | \ |
483 | product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ |
484 | "Include statistics from System.gc() for adaptive size policy") \ |
485 | \ |
486 | product(bool, UseAdaptiveGCBoundary, false, \ |
487 | "Allow young-old boundary to move") \ |
488 | \ |
489 | develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \ |
490 | "Resize the virtual spaces of the young or old generations") \ |
491 | range(-1, 1) \ |
492 | \ |
493 | product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ |
494 | "Policy for changing generation size for throughput goals") \ |
495 | range(0, 1) \ |
496 | \ |
497 | product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ |
498 | "Number of steps where heuristics is used before data is used") \ |
499 | range(0, max_uintx) \ |
500 | \ |
501 | develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \ |
502 | "Number of collections before the adaptive sizing is started") \ |
503 | \ |
504 | product(uintx, AdaptiveSizePolicyOutputInterval, 0, \ |
505 | "Collection interval for printing information; zero means never") \ |
506 | range(0, max_uintx) \ |
507 | \ |
508 | product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \ |
509 | "Use adaptive minimum footprint as a goal") \ |
510 | \ |
511 | product(uintx, AdaptiveSizePolicyWeight, 10, \ |
512 | "Weight given to exponential resizing, between 0 and 100") \ |
513 | range(0, 100) \ |
514 | \ |
515 | product(uintx, AdaptiveTimeWeight, 25, \ |
516 | "Weight given to time in adaptive policy, between 0 and 100") \ |
517 | range(0, 100) \ |
518 | \ |
519 | product(uintx, PausePadding, 1, \ |
520 | "How much buffer to keep for pause time") \ |
521 | range(0, max_juint) \ |
522 | \ |
523 | product(uintx, PromotedPadding, 3, \ |
524 | "How much buffer to keep for promotion failure") \ |
525 | range(0, max_juint) \ |
526 | \ |
527 | product(uintx, SurvivorPadding, 3, \ |
528 | "How much buffer to keep for survivor overflow") \ |
529 | range(0, max_juint) \ |
530 | \ |
531 | product(uintx, ThresholdTolerance, 10, \ |
532 | "Allowed collection cost difference between generations") \ |
533 | range(0, 100) \ |
534 | \ |
535 | product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \ |
536 | "If collection costs are within margin, reduce both by full " \ |
537 | "delta") \ |
538 | range(0, 100) \ |
539 | \ |
540 | product(uintx, YoungGenerationSizeIncrement, 20, \ |
541 | "Adaptive size percentage change in young generation") \ |
542 | range(0, 100) \ |
543 | \ |
544 | product(uintx, YoungGenerationSizeSupplement, 80, \ |
545 | "Supplement to YoungedGenerationSizeIncrement used at startup") \ |
546 | range(0, 100) \ |
547 | \ |
548 | product(uintx, YoungGenerationSizeSupplementDecay, 8, \ |
549 | "Decay factor to YoungedGenerationSizeSupplement") \ |
550 | range(1, max_uintx) \ |
551 | \ |
552 | product(uintx, TenuredGenerationSizeIncrement, 20, \ |
553 | "Adaptive size percentage change in tenured generation") \ |
554 | range(0, 100) \ |
555 | \ |
556 | product(uintx, TenuredGenerationSizeSupplement, 80, \ |
557 | "Supplement to TenuredGenerationSizeIncrement used at startup") \ |
558 | range(0, 100) \ |
559 | \ |
560 | product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ |
561 | "Decay factor to TenuredGenerationSizeIncrement") \ |
562 | range(1, max_uintx) \ |
563 | \ |
564 | product(uintx, MaxGCPauseMillis, max_uintx - 1, \ |
565 | "Adaptive size policy maximum GC pause time goal in millisecond, "\ |
566 | "or (G1 Only) the maximum GC time per MMU time slice") \ |
567 | range(1, max_uintx - 1) \ |
568 | constraint(MaxGCPauseMillisConstraintFunc,AfterErgo) \ |
569 | \ |
570 | product(uintx, GCPauseIntervalMillis, 0, \ |
571 | "Time slice for MMU specification") \ |
572 | constraint(GCPauseIntervalMillisConstraintFunc,AfterErgo) \ |
573 | \ |
574 | product(uintx, MaxGCMinorPauseMillis, max_uintx, \ |
575 | "Adaptive size policy maximum GC minor pause time goal " \ |
576 | "in millisecond") \ |
577 | range(0, max_uintx) \ |
578 | \ |
579 | product(uintx, GCTimeRatio, 99, \ |
580 | "Adaptive size policy application time to GC time ratio") \ |
581 | range(0, max_juint) \ |
582 | \ |
583 | product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \ |
584 | "Adaptive size scale down factor for shrinking") \ |
585 | range(1, max_uintx) \ |
586 | \ |
587 | product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \ |
588 | "Adaptive size decays the major cost for long major intervals") \ |
589 | \ |
590 | product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \ |
591 | "Time scale over which major costs decay") \ |
592 | range(0, max_uintx) \ |
593 | \ |
594 | product(uintx, MinSurvivorRatio, 3, \ |
595 | "Minimum ratio of young generation/survivor space size") \ |
596 | range(3, max_uintx) \ |
597 | \ |
598 | product(uintx, InitialSurvivorRatio, 8, \ |
599 | "Initial ratio of young generation/survivor space size") \ |
600 | range(0, max_uintx) \ |
601 | \ |
602 | product(size_t, BaseFootPrintEstimate, 256*M, \ |
603 | "Estimate of footprint other than Java Heap") \ |
604 | range(0, max_uintx) \ |
605 | \ |
606 | product(bool, UseGCOverheadLimit, true, \ |
607 | "Use policy to limit of proportion of time spent in GC " \ |
608 | "before an OutOfMemory error is thrown") \ |
609 | \ |
610 | product(uintx, GCTimeLimit, 98, \ |
611 | "Limit of the proportion of time spent in GC before " \ |
612 | "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ |
613 | range(0, 100) \ |
614 | \ |
615 | product(uintx, GCHeapFreeLimit, 2, \ |
616 | "Minimum percentage of free space after a full GC before an " \ |
617 | "OutOfMemoryError is thrown (used with GCTimeLimit)") \ |
618 | range(0, 100) \ |
619 | \ |
620 | develop(uintx, GCOverheadLimitThreshold, 5, \ |
621 | "Number of consecutive collections before gc time limit fires") \ |
622 | range(1, max_uintx) \ |
623 | \ |
624 | product(intx, PrefetchCopyIntervalInBytes, -1, \ |
625 | "How far ahead to prefetch destination area (<= 0 means off)") \ |
626 | range(-1, max_jint) \ |
627 | \ |
628 | product(intx, PrefetchScanIntervalInBytes, -1, \ |
629 | "How far ahead to prefetch scan area (<= 0 means off)") \ |
630 | range(-1, max_jint) \ |
631 | \ |
632 | product(intx, PrefetchFieldsAhead, -1, \ |
633 | "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ |
634 | range(-1, max_jint) \ |
635 | \ |
636 | diagnostic(bool, VerifyDuringStartup, false, \ |
637 | "Verify memory system before executing any Java code " \ |
638 | "during VM initialization") \ |
639 | \ |
640 | diagnostic(bool, VerifyBeforeExit, trueInDebug, \ |
641 | "Verify system before exiting") \ |
642 | \ |
643 | diagnostic(bool, VerifyBeforeGC, false, \ |
644 | "Verify memory system before GC") \ |
645 | \ |
646 | diagnostic(bool, VerifyAfterGC, false, \ |
647 | "Verify memory system after GC") \ |
648 | \ |
649 | diagnostic(bool, VerifyDuringGC, false, \ |
650 | "Verify memory system during GC (between phases)") \ |
651 | \ |
652 | diagnostic(ccstrlist, VerifyGCType, "", \ |
653 | "GC type(s) to verify when Verify*GC is enabled." \ |
654 | "Available types are collector specific.") \ |
655 | \ |
656 | diagnostic(ccstrlist, VerifySubSet, "", \ |
657 | "Memory sub-systems to verify when Verify*GC flag(s) " \ |
658 | "are enabled. One or more sub-systems can be specified " \ |
659 | "in a comma separated string. Sub-systems are: " \ |
660 | "threads, heap, symbol_table, string_table, codecache, " \ |
661 | "dictionary, classloader_data_graph, metaspace, jni_handles, " \ |
662 | "codecache_oops") \ |
663 | \ |
664 | diagnostic(bool, GCParallelVerificationEnabled, true, \ |
665 | "Enable parallel memory system verification") \ |
666 | \ |
667 | diagnostic(bool, DeferInitialCardMark, false, \ |
668 | "When +ReduceInitialCardMarks, explicitly defer any that " \ |
669 | "may arise from new_pre_store_barrier") \ |
670 | \ |
671 | product(bool, UseCondCardMark, false, \ |
672 | "Check for already marked card before updating card table") \ |
673 | \ |
674 | diagnostic(bool, VerifyRememberedSets, false, \ |
675 | "Verify GC remembered sets") \ |
676 | \ |
677 | diagnostic(bool, VerifyObjectStartArray, true, \ |
678 | "Verify GC object start array if verify before/after") \ |
679 | \ |
680 | product(bool, DisableExplicitGC, false, \ |
681 | "Ignore calls to System.gc()") \ |
682 | \ |
683 | product(bool, BindGCTaskThreadsToCPUs, false, \ |
684 | "Bind GCTaskThreads to CPUs if possible") \ |
685 | \ |
686 | product(bool, UseGCTaskAffinity, false, \ |
687 | "Use worker affinity when asking for GCTasks") \ |
688 | \ |
689 | product(bool, PrintGC, false, \ |
690 | "Print message at garbage collection. " \ |
691 | "Deprecated, use -Xlog:gc instead.") \ |
692 | \ |
693 | product(bool, PrintGCDetails, false, \ |
694 | "Print more details at garbage collection. " \ |
695 | "Deprecated, use -Xlog:gc* instead.") \ |
696 | \ |
697 | develop(intx, ConcGCYieldTimeout, 0, \ |
698 | "If non-zero, assert that GC threads yield within this " \ |
699 | "number of milliseconds") \ |
700 | range(0, max_intx) \ |
701 | \ |
702 | notproduct(intx, ScavengeALotInterval, 1, \ |
703 | "Interval between which scavenge will occur with +ScavengeALot") \ |
704 | \ |
705 | notproduct(intx, FullGCALotInterval, 1, \ |
706 | "Interval between which full gc will occur with +FullGCALot") \ |
707 | \ |
708 | notproduct(intx, FullGCALotStart, 0, \ |
709 | "For which invocation to start FullGCAlot") \ |
710 | \ |
711 | notproduct(intx, FullGCALotDummies, 32*K, \ |
712 | "Dummy object allocated with +FullGCALot, forcing all objects " \ |
713 | "to move") \ |
714 | \ |
715 | /* gc parameters */ \ |
716 | product(size_t, MinHeapSize, 0, \ |
717 | "Minimum heap size (in bytes); zero means use ergonomics") \ |
718 | constraint(MinHeapSizeConstraintFunc,AfterErgo) \ |
719 | \ |
720 | product(size_t, InitialHeapSize, 0, \ |
721 | "Initial heap size (in bytes); zero means use ergonomics") \ |
722 | constraint(InitialHeapSizeConstraintFunc,AfterErgo) \ |
723 | \ |
724 | product(size_t, MaxHeapSize, ScaleForWordSize(96*M), \ |
725 | "Maximum heap size (in bytes)") \ |
726 | constraint(MaxHeapSizeConstraintFunc,AfterErgo) \ |
727 | \ |
728 | manageable(size_t, SoftMaxHeapSize, 0, \ |
729 | "Soft limit for maximum heap size (in bytes)") \ |
730 | constraint(SoftMaxHeapSizeConstraintFunc,AfterMemoryInit) \ |
731 | \ |
732 | product(size_t, OldSize, ScaleForWordSize(4*M), \ |
733 | "Initial tenured generation size (in bytes)") \ |
734 | range(0, max_uintx) \ |
735 | \ |
736 | product(size_t, NewSize, ScaleForWordSize(1*M), \ |
737 | "Initial new generation size (in bytes)") \ |
738 | constraint(NewSizeConstraintFunc,AfterErgo) \ |
739 | \ |
740 | product(size_t, MaxNewSize, max_uintx, \ |
741 | "Maximum new generation size (in bytes), max_uintx means set " \ |
742 | "ergonomically") \ |
743 | range(0, max_uintx) \ |
744 | \ |
745 | product_pd(size_t, HeapBaseMinAddress, \ |
746 | "OS specific low limit for heap base address") \ |
747 | constraint(HeapBaseMinAddressConstraintFunc,AfterErgo) \ |
748 | \ |
749 | product(size_t, PretenureSizeThreshold, 0, \ |
750 | "Maximum size in bytes of objects allocated in DefNew " \ |
751 | "generation; zero means no maximum") \ |
752 | range(0, max_uintx) \ |
753 | \ |
754 | product(size_t, MinTLABSize, 2*K, \ |
755 | "Minimum allowed TLAB size (in bytes)") \ |
756 | range(1, max_uintx/2) \ |
757 | constraint(MinTLABSizeConstraintFunc,AfterMemoryInit) \ |
758 | \ |
759 | product(size_t, TLABSize, 0, \ |
760 | "Starting TLAB size (in bytes); zero means set ergonomically") \ |
761 | constraint(TLABSizeConstraintFunc,AfterMemoryInit) \ |
762 | \ |
763 | product(size_t, YoungPLABSize, 4096, \ |
764 | "Size of young gen promotion LAB's (in HeapWords)") \ |
765 | constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit) \ |
766 | \ |
767 | product(size_t, OldPLABSize, 1024, \ |
768 | "Size of old gen promotion LAB's (in HeapWords), or Number " \ |
769 | "of blocks to attempt to claim when refilling CMS LAB's") \ |
770 | constraint(OldPLABSizeConstraintFunc,AfterMemoryInit) \ |
771 | \ |
772 | product(uintx, TLABAllocationWeight, 35, \ |
773 | "Allocation averaging weight") \ |
774 | range(0, 100) \ |
775 | \ |
776 | /* Limit the lower bound of this flag to 1 as it is used */ \ |
777 | /* in a division expression. */ \ |
778 | product(uintx, TLABWasteTargetPercent, 1, \ |
779 | "Percentage of Eden that can be wasted") \ |
780 | range(1, 100) \ |
781 | \ |
782 | product(uintx, TLABRefillWasteFraction, 64, \ |
783 | "Maximum TLAB waste at a refill (internal fragmentation)") \ |
784 | range(1, max_juint) \ |
785 | \ |
786 | product(uintx, TLABWasteIncrement, 4, \ |
787 | "Increment allowed waste at slow allocation") \ |
788 | range(0, max_jint) \ |
789 | constraint(TLABWasteIncrementConstraintFunc,AfterMemoryInit) \ |
790 | \ |
791 | product(uintx, SurvivorRatio, 8, \ |
792 | "Ratio of eden/survivor space size") \ |
793 | range(1, max_uintx-2) \ |
794 | constraint(SurvivorRatioConstraintFunc,AfterMemoryInit) \ |
795 | \ |
796 | product(uintx, NewRatio, 2, \ |
797 | "Ratio of old/new generation sizes") \ |
798 | range(0, max_uintx-1) \ |
799 | \ |
800 | product_pd(size_t, NewSizeThreadIncrease, \ |
801 | "Additional size added to desired new generation size per " \ |
802 | "non-daemon thread (in bytes)") \ |
803 | range(0, max_uintx) \ |
804 | \ |
805 | product(uintx, QueuedAllocationWarningCount, 0, \ |
806 | "Number of times an allocation that queues behind a GC " \ |
807 | "will retry before printing a warning") \ |
808 | range(0, max_uintx) \ |
809 | \ |
810 | diagnostic(uintx, VerifyGCStartAt, 0, \ |
811 | "GC invoke count where +VerifyBefore/AfterGC kicks in") \ |
812 | range(0, max_uintx) \ |
813 | \ |
814 | diagnostic(intx, VerifyGCLevel, 0, \ |
815 | "Generation level at which to start +VerifyBefore/AfterGC") \ |
816 | range(0, 1) \ |
817 | \ |
818 | product(uintx, MaxTenuringThreshold, 15, \ |
819 | "Maximum value for tenuring threshold") \ |
820 | range(0, markOopDesc::max_age + 1) \ |
821 | constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \ |
822 | \ |
823 | product(uintx, InitialTenuringThreshold, 7, \ |
824 | "Initial value for tenuring threshold") \ |
825 | range(0, markOopDesc::max_age + 1) \ |
826 | constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \ |
827 | \ |
828 | product(uintx, TargetSurvivorRatio, 50, \ |
829 | "Desired percentage of survivor space used after scavenge") \ |
830 | range(0, 100) \ |
831 | \ |
832 | product(uintx, MarkSweepDeadRatio, 5, \ |
833 | "Percentage (0-100) of the old gen allowed as dead wood. " \ |
834 | "Serial mark sweep treats this as both the minimum and maximum " \ |
835 | "value. " \ |
836 | "CMS uses this value only if it falls back to mark sweep. " \ |
837 | "Par compact uses a variable scale based on the density of the " \ |
838 | "generation and treats this as the maximum value when the heap " \ |
839 | "is either completely full or completely empty. Par compact " \ |
840 | "also has a smaller default value; see arguments.cpp.") \ |
841 | range(0, 100) \ |
842 | \ |
843 | product(uint, MarkSweepAlwaysCompactCount, 4, \ |
844 | "How often should we fully compact the heap (ignoring the dead " \ |
845 | "space parameters)") \ |
846 | range(1, max_juint) \ |
847 | \ |
848 | develop(uintx, GCExpandToAllocateDelayMillis, 0, \ |
849 | "Delay between expansion and allocation (in milliseconds)") \ |
850 | \ |
851 | product(uintx, GCDrainStackTargetSize, 64, \ |
852 | "Number of entries we will try to leave on the stack " \ |
853 | "during parallel gc") \ |
854 | range(0, max_juint) |
855 | |
856 | #endif // SHARE_GC_SHARED_GC_GLOBALS_HPP |
857 | |