1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/core/SkSharedMutex.h" |
9 | |
10 | #include "include/core/SkTypes.h" |
11 | #include "include/private/SkSemaphore.h" |
12 | |
13 | #if !defined(__has_feature) |
14 | #define __has_feature(x) 0 |
15 | #endif |
16 | |
17 | #if __has_feature(thread_sanitizer) |
18 | |
19 | /* Report that a lock has been created at address "lock". */ |
20 | #define ANNOTATE_RWLOCK_CREATE(lock) \ |
21 | AnnotateRWLockCreate(__FILE__, __LINE__, lock) |
22 | |
23 | /* Report that the lock at address "lock" is about to be destroyed. */ |
24 | #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
25 | AnnotateRWLockDestroy(__FILE__, __LINE__, lock) |
26 | |
27 | /* Report that the lock at address "lock" has been acquired. |
28 | is_w=1 for writer lock, is_w=0 for reader lock. */ |
29 | #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ |
30 | AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) |
31 | |
32 | /* Report that the lock at address "lock" is about to be released. */ |
33 | #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ |
34 | AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) |
35 | |
36 | #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK) |
37 | #if defined(__GNUC__) |
38 | #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak)) |
39 | #else |
40 | /* TODO(glider): for Windows support we may want to change this macro in order |
41 | to prepend __declspec(selectany) to the annotations' declarations. */ |
42 | #error weak annotations are not supported for your compiler |
43 | #endif |
44 | #else |
45 | #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK |
46 | #endif |
47 | |
48 | extern "C" { |
49 | void AnnotateRWLockCreate( |
50 | const char *file, int line, |
51 | const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
52 | void AnnotateRWLockDestroy( |
53 | const char *file, int line, |
54 | const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
55 | void AnnotateRWLockAcquired( |
56 | const char *file, int line, |
57 | const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
58 | void AnnotateRWLockReleased( |
59 | const char *file, int line, |
60 | const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
61 | } |
62 | |
63 | #else |
64 | |
65 | #define ANNOTATE_RWLOCK_CREATE(lock) |
66 | #define ANNOTATE_RWLOCK_DESTROY(lock) |
67 | #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) |
68 | #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) |
69 | |
70 | #endif |
71 | |
72 | #ifdef SK_DEBUG |
73 | |
74 | #include "include/private/SkTDArray.h" |
75 | #include "include/private/SkThreadID.h" |
76 | |
77 | class SkSharedMutex::ThreadIDSet { |
78 | public: |
79 | // Returns true if threadID is in the set. |
80 | bool find(SkThreadID threadID) const { |
81 | for (auto& t : fThreadIDs) { |
82 | if (t == threadID) return true; |
83 | } |
84 | return false; |
85 | } |
86 | |
87 | // Returns true if did not already exist. |
88 | bool tryAdd(SkThreadID threadID) { |
89 | for (auto& t : fThreadIDs) { |
90 | if (t == threadID) return false; |
91 | } |
92 | fThreadIDs.append(1, &threadID); |
93 | return true; |
94 | } |
95 | // Returns true if already exists in Set. |
96 | bool tryRemove(SkThreadID threadID) { |
97 | for (int i = 0; i < fThreadIDs.count(); ++i) { |
98 | if (fThreadIDs[i] == threadID) { |
99 | fThreadIDs.remove(i); |
100 | return true; |
101 | } |
102 | } |
103 | return false; |
104 | } |
105 | |
106 | void swap(ThreadIDSet& other) { |
107 | fThreadIDs.swap(other.fThreadIDs); |
108 | } |
109 | |
110 | int count() const { |
111 | return fThreadIDs.count(); |
112 | } |
113 | |
114 | private: |
115 | SkTDArray<SkThreadID> fThreadIDs; |
116 | }; |
117 | |
118 | SkSharedMutex::SkSharedMutex() |
119 | : fCurrentShared(new ThreadIDSet) |
120 | , fWaitingExclusive(new ThreadIDSet) |
121 | , fWaitingShared(new ThreadIDSet){ |
122 | ANNOTATE_RWLOCK_CREATE(this); |
123 | } |
124 | |
125 | SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
126 | |
127 | void SkSharedMutex::acquire() { |
128 | SkThreadID threadID(SkGetThreadID()); |
129 | int currentSharedCount; |
130 | int waitingExclusiveCount; |
131 | { |
132 | SkAutoMutexExclusive l(fMu); |
133 | |
134 | SkASSERTF(!fCurrentShared->find(threadID), |
135 | "Thread %lx already has an shared lock\n" , threadID); |
136 | |
137 | if (!fWaitingExclusive->tryAdd(threadID)) { |
138 | SkDEBUGFAILF("Thread %lx already has an exclusive lock\n" , threadID); |
139 | } |
140 | |
141 | currentSharedCount = fCurrentShared->count(); |
142 | waitingExclusiveCount = fWaitingExclusive->count(); |
143 | } |
144 | |
145 | if (currentSharedCount > 0 || waitingExclusiveCount > 1) { |
146 | fExclusiveQueue.wait(); |
147 | } |
148 | |
149 | ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
150 | } |
151 | |
152 | // Implementation Detail: |
153 | // The shared threads need two seperate queues to keep the threads that were added after the |
154 | // exclusive lock separate from the threads added before. |
155 | void SkSharedMutex::release() { |
156 | ANNOTATE_RWLOCK_RELEASED(this, 1); |
157 | SkThreadID threadID(SkGetThreadID()); |
158 | int sharedWaitingCount; |
159 | int exclusiveWaitingCount; |
160 | int sharedQueueSelect; |
161 | { |
162 | SkAutoMutexExclusive l(fMu); |
163 | SkASSERT(0 == fCurrentShared->count()); |
164 | if (!fWaitingExclusive->tryRemove(threadID)) { |
165 | SkDEBUGFAILF("Thread %lx did not have the lock held.\n" , threadID); |
166 | } |
167 | exclusiveWaitingCount = fWaitingExclusive->count(); |
168 | sharedWaitingCount = fWaitingShared->count(); |
169 | fWaitingShared.swap(fCurrentShared); |
170 | sharedQueueSelect = fSharedQueueSelect; |
171 | if (sharedWaitingCount > 0) { |
172 | fSharedQueueSelect = 1 - fSharedQueueSelect; |
173 | } |
174 | } |
175 | |
176 | if (sharedWaitingCount > 0) { |
177 | fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount); |
178 | } else if (exclusiveWaitingCount > 0) { |
179 | fExclusiveQueue.signal(); |
180 | } |
181 | } |
182 | |
183 | void SkSharedMutex::assertHeld() const { |
184 | SkThreadID threadID(SkGetThreadID()); |
185 | SkAutoMutexExclusive l(fMu); |
186 | SkASSERT(0 == fCurrentShared->count()); |
187 | SkASSERT(fWaitingExclusive->find(threadID)); |
188 | } |
189 | |
190 | void SkSharedMutex::acquireShared() { |
191 | SkThreadID threadID(SkGetThreadID()); |
192 | int exclusiveWaitingCount; |
193 | int sharedQueueSelect; |
194 | { |
195 | SkAutoMutexExclusive l(fMu); |
196 | exclusiveWaitingCount = fWaitingExclusive->count(); |
197 | if (exclusiveWaitingCount > 0) { |
198 | if (!fWaitingShared->tryAdd(threadID)) { |
199 | SkDEBUGFAILF("Thread %lx was already waiting!\n" , threadID); |
200 | } |
201 | } else { |
202 | if (!fCurrentShared->tryAdd(threadID)) { |
203 | SkDEBUGFAILF("Thread %lx already holds a shared lock!\n" , threadID); |
204 | } |
205 | } |
206 | sharedQueueSelect = fSharedQueueSelect; |
207 | } |
208 | |
209 | if (exclusiveWaitingCount > 0) { |
210 | fSharedQueue[sharedQueueSelect].wait(); |
211 | } |
212 | |
213 | ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
214 | } |
215 | |
216 | void SkSharedMutex::releaseShared() { |
217 | ANNOTATE_RWLOCK_RELEASED(this, 0); |
218 | SkThreadID threadID(SkGetThreadID()); |
219 | |
220 | int currentSharedCount; |
221 | int waitingExclusiveCount; |
222 | { |
223 | SkAutoMutexExclusive l(fMu); |
224 | if (!fCurrentShared->tryRemove(threadID)) { |
225 | SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n" , threadID); |
226 | } |
227 | currentSharedCount = fCurrentShared->count(); |
228 | waitingExclusiveCount = fWaitingExclusive->count(); |
229 | } |
230 | |
231 | if (0 == currentSharedCount && waitingExclusiveCount > 0) { |
232 | fExclusiveQueue.signal(); |
233 | } |
234 | } |
235 | |
236 | void SkSharedMutex::assertHeldShared() const { |
237 | SkThreadID threadID(SkGetThreadID()); |
238 | SkAutoMutexExclusive l(fMu); |
239 | SkASSERT(fCurrentShared->find(threadID)); |
240 | } |
241 | |
242 | #else |
243 | |
244 | // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic. |
245 | // These three counts must be the same size, so each gets 10 bits. The 10 bits represent |
246 | // the log of the count which is 1024. |
247 | // |
248 | // The three counts held in fQueueCounts are: |
249 | // * Shared - the number of shared lock holders currently running. |
250 | // * WaitingExclusive - the number of threads waiting for an exclusive lock. |
251 | // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread |
252 | // to finish. |
253 | static const int kLogThreadCount = 10; |
254 | |
255 | enum { |
256 | kSharedOffset = (0 * kLogThreadCount), |
257 | kWaitingExlusiveOffset = (1 * kLogThreadCount), |
258 | kWaitingSharedOffset = (2 * kLogThreadCount), |
259 | kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, |
260 | kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset, |
261 | kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset, |
262 | }; |
263 | |
264 | SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); } |
265 | SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
266 | void SkSharedMutex::acquire() { |
267 | // Increment the count of exclusive queue waiters. |
268 | int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset, |
269 | std::memory_order_acquire); |
270 | |
271 | // If there are no other exclusive waiters and no shared threads are running then run |
272 | // else wait. |
273 | if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) { |
274 | fExclusiveQueue.wait(); |
275 | } |
276 | ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
277 | } |
278 | |
279 | void SkSharedMutex::release() { |
280 | ANNOTATE_RWLOCK_RELEASED(this, 1); |
281 | |
282 | int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed); |
283 | int32_t waitingShared; |
284 | int32_t newQueueCounts; |
285 | do { |
286 | newQueueCounts = oldQueueCounts; |
287 | |
288 | // Decrement exclusive waiters. |
289 | newQueueCounts -= 1 << kWaitingExlusiveOffset; |
290 | |
291 | // The number of threads waiting to acquire a shared lock. |
292 | waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset; |
293 | |
294 | // If there are any move the counts of all the shared waiters to actual shared. They are |
295 | // going to run next. |
296 | if (waitingShared > 0) { |
297 | |
298 | // Set waiting shared to zero. |
299 | newQueueCounts &= ~kWaitingSharedMask; |
300 | |
301 | // Because this is the exclusive release, then there are zero readers. So, the bits |
302 | // for shared locks should be zero. Since those bits are zero, we can just |= in the |
303 | // waitingShared count instead of clearing with an &= and then |= the count. |
304 | newQueueCounts |= waitingShared << kSharedOffset; |
305 | } |
306 | |
307 | } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts, |
308 | std::memory_order_release, |
309 | std::memory_order_relaxed)); |
310 | |
311 | if (waitingShared > 0) { |
312 | // Run all the shared. |
313 | fSharedQueue.signal(waitingShared); |
314 | } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
315 | // Run a single exclusive waiter. |
316 | fExclusiveQueue.signal(); |
317 | } |
318 | } |
319 | |
320 | void SkSharedMutex::acquireShared() { |
321 | int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed); |
322 | int32_t newQueueCounts; |
323 | do { |
324 | newQueueCounts = oldQueueCounts; |
325 | // If there are waiting exclusives then this shared lock waits else it runs. |
326 | if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
327 | newQueueCounts += 1 << kWaitingSharedOffset; |
328 | } else { |
329 | newQueueCounts += 1 << kSharedOffset; |
330 | } |
331 | } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts, |
332 | std::memory_order_acquire, |
333 | std::memory_order_relaxed)); |
334 | |
335 | // If there are waiting exclusives, then this shared waits until after it runs. |
336 | if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
337 | fSharedQueue.wait(); |
338 | } |
339 | ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
340 | |
341 | } |
342 | |
343 | void SkSharedMutex::releaseShared() { |
344 | ANNOTATE_RWLOCK_RELEASED(this, 0); |
345 | |
346 | // Decrement the shared count. |
347 | int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset, |
348 | std::memory_order_release); |
349 | |
350 | // If shared count is going to zero (because the old count == 1) and there are exclusive |
351 | // waiters, then run a single exclusive waiter. |
352 | if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
353 | && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
354 | fExclusiveQueue.signal(); |
355 | } |
356 | } |
357 | |
358 | #endif |
359 | |