1 | /* |
2 | * Copyright (c) 2020 - 2023 the ThorVG project. All rights reserved. |
3 | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | * of this software and associated documentation files (the "Software"), to deal |
6 | * in the Software without restriction, including without limitation the rights |
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
8 | * copies of the Software, and to permit persons to whom the Software is |
9 | * furnished to do so, subject to the following conditions: |
10 | |
11 | * The above copyright notice and this permission notice shall be included in all |
12 | * copies or substantial portions of the Software. |
13 | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
19 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
20 | * SOFTWARE. |
21 | */ |
22 | |
23 | #include <deque> |
24 | #include <thread> |
25 | #include <vector> |
26 | #include <atomic> |
27 | #include <condition_variable> |
28 | #include "tvgTaskScheduler.h" |
29 | |
30 | /************************************************************************/ |
31 | /* Internal Class Implementation */ |
32 | /************************************************************************/ |
33 | |
34 | namespace tvg { |
35 | |
36 | struct TaskQueue { |
37 | deque<Task*> taskDeque; |
38 | mutex mtx; |
39 | condition_variable ready; |
40 | bool done = false; |
41 | |
42 | bool tryPop(Task** task) |
43 | { |
44 | unique_lock<mutex> lock{mtx, try_to_lock}; |
45 | if (!lock || taskDeque.empty()) return false; |
46 | *task = taskDeque.front(); |
47 | taskDeque.pop_front(); |
48 | |
49 | return true; |
50 | } |
51 | |
52 | bool tryPush(Task* task) |
53 | { |
54 | { |
55 | unique_lock<mutex> lock{mtx, try_to_lock}; |
56 | if (!lock) return false; |
57 | taskDeque.push_back(task); |
58 | } |
59 | |
60 | ready.notify_one(); |
61 | |
62 | return true; |
63 | } |
64 | |
65 | void complete() |
66 | { |
67 | { |
68 | unique_lock<mutex> lock{mtx}; |
69 | done = true; |
70 | } |
71 | ready.notify_all(); |
72 | } |
73 | |
74 | bool pop(Task** task) |
75 | { |
76 | unique_lock<mutex> lock{mtx}; |
77 | |
78 | while (taskDeque.empty() && !done) { |
79 | ready.wait(lock); |
80 | } |
81 | |
82 | if (taskDeque.empty()) return false; |
83 | |
84 | *task = taskDeque.front(); |
85 | taskDeque.pop_front(); |
86 | |
87 | return true; |
88 | } |
89 | |
90 | void push(Task* task) |
91 | { |
92 | { |
93 | unique_lock<mutex> lock{mtx}; |
94 | taskDeque.push_back(task); |
95 | } |
96 | |
97 | ready.notify_one(); |
98 | } |
99 | |
100 | }; |
101 | |
102 | |
103 | struct TaskSchedulerImpl |
104 | { |
105 | uint32_t threadCnt; |
106 | vector<thread> threads; |
107 | vector<TaskQueue> taskQueues; |
108 | atomic<uint32_t> idx{0}; |
109 | |
110 | TaskSchedulerImpl(unsigned threadCnt) : threadCnt(threadCnt), taskQueues(threadCnt) |
111 | { |
112 | for (unsigned i = 0; i < threadCnt; ++i) { |
113 | threads.emplace_back([&, i] { run(i); }); |
114 | } |
115 | } |
116 | |
117 | ~TaskSchedulerImpl() |
118 | { |
119 | for (auto& queue : taskQueues) queue.complete(); |
120 | for (auto& thread : threads) thread.join(); |
121 | } |
122 | |
123 | void run(unsigned i) |
124 | { |
125 | Task* task; |
126 | |
127 | //Thread Loop |
128 | while (true) { |
129 | auto success = false; |
130 | for (unsigned x = 0; x < threadCnt * 2; ++x) { |
131 | if (taskQueues[(i + x) % threadCnt].tryPop(&task)) { |
132 | success = true; |
133 | break; |
134 | } |
135 | } |
136 | |
137 | if (!success && !taskQueues[i].pop(&task)) break; |
138 | (*task)(i + 1); |
139 | } |
140 | } |
141 | |
142 | void request(Task* task) |
143 | { |
144 | //Async |
145 | if (threadCnt > 0) { |
146 | task->prepare(); |
147 | auto i = idx++; |
148 | for (unsigned n = 0; n < threadCnt; ++n) { |
149 | if (taskQueues[(i + n) % threadCnt].tryPush(task)) return; |
150 | } |
151 | taskQueues[i % threadCnt].push(task); |
152 | //Sync |
153 | } else { |
154 | task->run(0); |
155 | } |
156 | } |
157 | }; |
158 | |
159 | } |
160 | |
161 | static TaskSchedulerImpl* inst = nullptr; |
162 | |
163 | /************************************************************************/ |
164 | /* External Class Implementation */ |
165 | /************************************************************************/ |
166 | |
167 | void TaskScheduler::init(unsigned threads) |
168 | { |
169 | if (inst) return; |
170 | inst = new TaskSchedulerImpl(threads); |
171 | } |
172 | |
173 | |
174 | void TaskScheduler::term() |
175 | { |
176 | if (!inst) return; |
177 | delete(inst); |
178 | inst = nullptr; |
179 | } |
180 | |
181 | |
182 | void TaskScheduler::request(Task* task) |
183 | { |
184 | if (inst) inst->request(task); |
185 | } |
186 | |
187 | |
188 | unsigned TaskScheduler::threads() |
189 | { |
190 | if (inst) return inst->threadCnt; |
191 | return 0; |
192 | } |
193 | |