1 | /* |
2 | * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | */ |
23 | |
24 | #include "precompiled.hpp" |
25 | #include "gc/z/zGlobals.hpp" |
26 | #include "gc/z/zTask.hpp" |
27 | #include "gc/z/zThread.hpp" |
28 | #include "gc/z/zWorkers.inline.hpp" |
29 | #include "runtime/os.hpp" |
30 | #include "runtime/mutexLocker.hpp" |
31 | #include "runtime/safepoint.hpp" |
32 | |
33 | static uint calculate_nworkers_based_on_ncpus(double cpu_share_in_percent) { |
34 | return ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0); |
35 | } |
36 | |
37 | static uint calculate_nworkers_based_on_heap_size(double reserve_share_in_percent) { |
38 | const int nworkers = ((MaxHeapSize * (reserve_share_in_percent / 100.0)) - ZPageSizeMedium) / ZPageSizeSmall; |
39 | return MAX2(nworkers, 1); |
40 | } |
41 | |
42 | static uint calculate_nworkers(double cpu_share_in_percent) { |
43 | // Cap number of workers so that we never use more than 10% of the max heap |
44 | // for the reserve. This is useful when using small heaps on large machines. |
45 | return MIN2(calculate_nworkers_based_on_ncpus(cpu_share_in_percent), |
46 | calculate_nworkers_based_on_heap_size(10.0)); |
47 | } |
48 | |
49 | uint ZWorkers::calculate_nparallel() { |
50 | // Use 60% of the CPUs, rounded up. We would like to use as many threads as |
51 | // possible to increase parallelism. However, using a thread count that is |
52 | // close to the number of processors tends to lead to over-provisioning and |
53 | // scheduling latency issues. Using 60% of the active processors appears to |
54 | // be a fairly good balance. |
55 | return calculate_nworkers(60.0); |
56 | } |
57 | |
58 | uint ZWorkers::calculate_nconcurrent() { |
59 | // Use 12.5% of the CPUs, rounded up. The number of concurrent threads we |
60 | // would like to use heavily depends on the type of workload we are running. |
61 | // Using too many threads will have a negative impact on the application |
62 | // throughput, while using too few threads will prolong the GC-cycle and |
63 | // we then risk being out-run by the application. Using 12.5% of the active |
64 | // processors appears to be a fairly good balance. |
65 | return calculate_nworkers(12.5); |
66 | } |
67 | |
68 | class ZWorkersInitializeTask : public ZTask { |
69 | private: |
70 | const uint _nworkers; |
71 | uint _started; |
72 | Monitor _monitor; |
73 | |
74 | public: |
75 | ZWorkersInitializeTask(uint nworkers) : |
76 | ZTask("ZWorkersInitializeTask" ), |
77 | _nworkers(nworkers), |
78 | _started(0), |
79 | _monitor(Monitor::leaf, |
80 | "ZWorkersInitialize" , |
81 | false /* allow_vm_block */, |
82 | Monitor::_safepoint_check_never) {} |
83 | |
84 | virtual void work() { |
85 | // Register as worker |
86 | ZThread::set_worker(); |
87 | |
88 | // Wait for all threads to start |
89 | MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); |
90 | if (++_started == _nworkers) { |
91 | // All threads started |
92 | ml.notify_all(); |
93 | } else { |
94 | while (_started != _nworkers) { |
95 | ml.wait(); |
96 | } |
97 | } |
98 | } |
99 | }; |
100 | |
101 | ZWorkers::ZWorkers() : |
102 | _boost(false), |
103 | _workers("ZWorker" , |
104 | nworkers(), |
105 | true /* are_GC_task_threads */, |
106 | true /* are_ConcurrentGC_threads */) { |
107 | |
108 | log_info(gc, init)("Workers: %u parallel, %u concurrent" , nparallel(), nconcurrent()); |
109 | |
110 | // Initialize worker threads |
111 | _workers.initialize_workers(); |
112 | _workers.update_active_workers(nworkers()); |
113 | if (_workers.active_workers() != nworkers()) { |
114 | vm_exit_during_initialization("Failed to create ZWorkers" ); |
115 | } |
116 | |
117 | // Execute task to register threads as workers. This also helps |
118 | // reduce latency in early GC pauses, which otherwise would have |
119 | // to take on any warmup costs. |
120 | ZWorkersInitializeTask task(nworkers()); |
121 | run(&task, nworkers()); |
122 | } |
123 | |
124 | void ZWorkers::set_boost(bool boost) { |
125 | if (boost) { |
126 | log_debug(gc)("Boosting workers" ); |
127 | } |
128 | |
129 | _boost = boost; |
130 | } |
131 | |
132 | void ZWorkers::run(ZTask* task, uint nworkers) { |
133 | log_debug(gc, task)("Executing Task: %s, Active Workers: %u" , task->name(), nworkers); |
134 | _workers.update_active_workers(nworkers); |
135 | _workers.run_task(task->gang_task()); |
136 | } |
137 | |
138 | void ZWorkers::run_parallel(ZTask* task) { |
139 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint" ); |
140 | run(task, nparallel()); |
141 | } |
142 | |
143 | void ZWorkers::run_concurrent(ZTask* task) { |
144 | run(task, nconcurrent()); |
145 | } |
146 | |
147 | void ZWorkers::threads_do(ThreadClosure* tc) const { |
148 | _workers.threads_do(tc); |
149 | } |
150 | |
151 | void ZWorkers::print_threads_on(outputStream* st) const { |
152 | _workers.print_worker_threads_on(st); |
153 | } |
154 | |