1 | #ifndef QEMU_THREAD_H |
2 | #define QEMU_THREAD_H |
3 | |
4 | #include "qemu/processor.h" |
5 | #include "qemu/atomic.h" |
6 | |
7 | typedef struct QemuCond QemuCond; |
8 | typedef struct QemuSemaphore QemuSemaphore; |
9 | typedef struct QemuEvent QemuEvent; |
10 | typedef struct QemuLockCnt QemuLockCnt; |
11 | typedef struct QemuThread QemuThread; |
12 | |
13 | #ifdef _WIN32 |
14 | #include "qemu/thread-win32.h" |
15 | #else |
16 | #include "qemu/thread-posix.h" |
17 | #endif |
18 | |
19 | /* include QSP header once QemuMutex, QemuCond etc. are defined */ |
20 | #include "qemu/qsp.h" |
21 | |
22 | #define QEMU_THREAD_JOINABLE 0 |
23 | #define QEMU_THREAD_DETACHED 1 |
24 | |
25 | void qemu_mutex_init(QemuMutex *mutex); |
26 | void qemu_mutex_destroy(QemuMutex *mutex); |
27 | int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line); |
28 | void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line); |
29 | void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line); |
30 | |
31 | typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l); |
32 | typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l); |
33 | typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l); |
34 | typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l); |
35 | typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, |
36 | int l); |
37 | |
38 | extern QemuMutexLockFunc qemu_bql_mutex_lock_func; |
39 | extern QemuMutexLockFunc qemu_mutex_lock_func; |
40 | extern QemuMutexTrylockFunc qemu_mutex_trylock_func; |
41 | extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; |
42 | extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func; |
43 | extern QemuCondWaitFunc qemu_cond_wait_func; |
44 | |
45 | /* convenience macros to bypass the profiler */ |
46 | #define qemu_mutex_lock__raw(m) \ |
47 | qemu_mutex_lock_impl(m, __FILE__, __LINE__) |
48 | #define qemu_mutex_trylock__raw(m) \ |
49 | qemu_mutex_trylock_impl(m, __FILE__, __LINE__) |
50 | |
51 | #ifdef __COVERITY__ |
52 | /* |
53 | * Coverity is severely confused by the indirect function calls, |
54 | * hide them. |
55 | */ |
56 | #define qemu_mutex_lock(m) \ |
57 | qemu_mutex_lock_impl(m, __FILE__, __LINE__); |
58 | #define qemu_mutex_trylock(m) \ |
59 | qemu_mutex_trylock_impl(m, __FILE__, __LINE__); |
60 | #define qemu_rec_mutex_lock(m) \ |
61 | qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__); |
62 | #define qemu_rec_mutex_trylock(m) \ |
63 | qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__); |
64 | #define qemu_cond_wait(c, m) \ |
65 | qemu_cond_wait_impl(c, m, __FILE__, __LINE__); |
66 | #else |
67 | #define qemu_mutex_lock(m) ({ \ |
68 | QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \ |
69 | _f(m, __FILE__, __LINE__); \ |
70 | }) |
71 | |
72 | #define qemu_mutex_trylock(m) ({ \ |
73 | QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \ |
74 | _f(m, __FILE__, __LINE__); \ |
75 | }) |
76 | |
77 | #define qemu_rec_mutex_lock(m) ({ \ |
78 | QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \ |
79 | _f(m, __FILE__, __LINE__); \ |
80 | }) |
81 | |
82 | #define qemu_rec_mutex_trylock(m) ({ \ |
83 | QemuRecMutexTrylockFunc _f; \ |
84 | _f = atomic_read(&qemu_rec_mutex_trylock_func); \ |
85 | _f(m, __FILE__, __LINE__); \ |
86 | }) |
87 | |
88 | #define qemu_cond_wait(c, m) ({ \ |
89 | QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \ |
90 | _f(c, m, __FILE__, __LINE__); \ |
91 | }) |
92 | #endif |
93 | |
94 | #define qemu_mutex_unlock(mutex) \ |
95 | qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__) |
96 | |
97 | static inline void (qemu_mutex_lock)(QemuMutex *mutex) |
98 | { |
99 | qemu_mutex_lock(mutex); |
100 | } |
101 | |
102 | static inline int (qemu_mutex_trylock)(QemuMutex *mutex) |
103 | { |
104 | return qemu_mutex_trylock(mutex); |
105 | } |
106 | |
107 | static inline void (qemu_mutex_unlock)(QemuMutex *mutex) |
108 | { |
109 | qemu_mutex_unlock(mutex); |
110 | } |
111 | |
112 | static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex) |
113 | { |
114 | qemu_rec_mutex_lock(mutex); |
115 | } |
116 | |
117 | static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex) |
118 | { |
119 | return qemu_rec_mutex_trylock(mutex); |
120 | } |
121 | |
122 | /* Prototypes for other functions are in thread-posix.h/thread-win32.h. */ |
123 | void qemu_rec_mutex_init(QemuRecMutex *mutex); |
124 | |
125 | void qemu_cond_init(QemuCond *cond); |
126 | void qemu_cond_destroy(QemuCond *cond); |
127 | |
128 | /* |
129 | * IMPORTANT: The implementation does not guarantee that pthread_cond_signal |
130 | * and pthread_cond_broadcast can be called except while the same mutex is |
131 | * held as in the corresponding pthread_cond_wait calls! |
132 | */ |
133 | void qemu_cond_signal(QemuCond *cond); |
134 | void qemu_cond_broadcast(QemuCond *cond); |
135 | void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, |
136 | const char *file, const int line); |
137 | |
138 | static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex) |
139 | { |
140 | qemu_cond_wait(cond, mutex); |
141 | } |
142 | |
143 | void qemu_sem_init(QemuSemaphore *sem, int init); |
144 | void qemu_sem_post(QemuSemaphore *sem); |
145 | void qemu_sem_wait(QemuSemaphore *sem); |
146 | int qemu_sem_timedwait(QemuSemaphore *sem, int ms); |
147 | void qemu_sem_destroy(QemuSemaphore *sem); |
148 | |
149 | void qemu_event_init(QemuEvent *ev, bool init); |
150 | void qemu_event_set(QemuEvent *ev); |
151 | void qemu_event_reset(QemuEvent *ev); |
152 | void qemu_event_wait(QemuEvent *ev); |
153 | void qemu_event_destroy(QemuEvent *ev); |
154 | |
155 | void qemu_thread_create(QemuThread *thread, const char *name, |
156 | void *(*start_routine)(void *), |
157 | void *arg, int mode); |
158 | void *qemu_thread_join(QemuThread *thread); |
159 | void qemu_thread_get_self(QemuThread *thread); |
160 | bool qemu_thread_is_self(QemuThread *thread); |
161 | void qemu_thread_exit(void *retval); |
162 | void qemu_thread_naming(bool enable); |
163 | |
164 | struct Notifier; |
165 | /** |
166 | * qemu_thread_atexit_add: |
167 | * @notifier: Notifier to add |
168 | * |
169 | * Add the specified notifier to a list which will be run via |
170 | * notifier_list_notify() when this thread exits (either by calling |
171 | * qemu_thread_exit() or by returning from its start_routine). |
172 | * The usual usage is that the caller passes a Notifier which is |
173 | * a per-thread variable; it can then use the callback to free |
174 | * other per-thread data. |
175 | * |
176 | * If the thread exits as part of the entire process exiting, |
177 | * it is unspecified whether notifiers are called or not. |
178 | */ |
179 | void qemu_thread_atexit_add(struct Notifier *notifier); |
180 | /** |
181 | * qemu_thread_atexit_remove: |
182 | * @notifier: Notifier to remove |
183 | * |
184 | * Remove the specified notifier from the thread-exit notification |
185 | * list. It is not valid to try to remove a notifier which is not |
186 | * on the list. |
187 | */ |
188 | void qemu_thread_atexit_remove(struct Notifier *notifier); |
189 | |
190 | struct QemuSpin { |
191 | int value; |
192 | }; |
193 | |
194 | static inline void qemu_spin_init(QemuSpin *spin) |
195 | { |
196 | __sync_lock_release(&spin->value); |
197 | } |
198 | |
199 | static inline void qemu_spin_lock(QemuSpin *spin) |
200 | { |
201 | while (unlikely(__sync_lock_test_and_set(&spin->value, true))) { |
202 | while (atomic_read(&spin->value)) { |
203 | cpu_relax(); |
204 | } |
205 | } |
206 | } |
207 | |
208 | static inline bool qemu_spin_trylock(QemuSpin *spin) |
209 | { |
210 | return __sync_lock_test_and_set(&spin->value, true); |
211 | } |
212 | |
213 | static inline bool qemu_spin_locked(QemuSpin *spin) |
214 | { |
215 | return atomic_read(&spin->value); |
216 | } |
217 | |
218 | static inline void qemu_spin_unlock(QemuSpin *spin) |
219 | { |
220 | __sync_lock_release(&spin->value); |
221 | } |
222 | |
223 | struct QemuLockCnt { |
224 | #ifndef CONFIG_LINUX |
225 | QemuMutex mutex; |
226 | #endif |
227 | unsigned count; |
228 | }; |
229 | |
230 | /** |
231 | * qemu_lockcnt_init: initialize a QemuLockcnt |
232 | * @lockcnt: the lockcnt to initialize |
233 | * |
234 | * Initialize lockcnt's counter to zero and prepare its mutex |
235 | * for usage. |
236 | */ |
237 | void qemu_lockcnt_init(QemuLockCnt *lockcnt); |
238 | |
239 | /** |
240 | * qemu_lockcnt_destroy: destroy a QemuLockcnt |
241 | * @lockcnt: the lockcnt to destruct |
242 | * |
243 | * Destroy lockcnt's mutex. |
244 | */ |
245 | void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); |
246 | |
247 | /** |
248 | * qemu_lockcnt_inc: increment a QemuLockCnt's counter |
249 | * @lockcnt: the lockcnt to operate on |
250 | * |
251 | * If the lockcnt's count is zero, wait for critical sections |
252 | * to finish and increment lockcnt's count to 1. If the count |
253 | * is not zero, just increment it. |
254 | * |
255 | * Because this function can wait on the mutex, it must not be |
256 | * called while the lockcnt's mutex is held by the current thread. |
257 | * For the same reason, qemu_lockcnt_inc can also contribute to |
258 | * AB-BA deadlocks. This is a sample deadlock scenario: |
259 | * |
260 | * thread 1 thread 2 |
261 | * ------------------------------------------------------- |
262 | * qemu_lockcnt_lock(&lc1); |
263 | * qemu_lockcnt_lock(&lc2); |
264 | * qemu_lockcnt_inc(&lc2); |
265 | * qemu_lockcnt_inc(&lc1); |
266 | */ |
267 | void qemu_lockcnt_inc(QemuLockCnt *lockcnt); |
268 | |
269 | /** |
270 | * qemu_lockcnt_dec: decrement a QemuLockCnt's counter |
271 | * @lockcnt: the lockcnt to operate on |
272 | */ |
273 | void qemu_lockcnt_dec(QemuLockCnt *lockcnt); |
274 | |
275 | /** |
276 | * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and |
277 | * possibly lock it. |
278 | * @lockcnt: the lockcnt to operate on |
279 | * |
280 | * Decrement lockcnt's count. If the new count is zero, lock |
281 | * the mutex and return true. Otherwise, return false. |
282 | */ |
283 | bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); |
284 | |
285 | /** |
286 | * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and |
287 | * lock it. |
288 | * @lockcnt: the lockcnt to operate on |
289 | * |
290 | * If the count is 1, decrement the count to zero, lock |
291 | * the mutex and return true. Otherwise, return false. |
292 | */ |
293 | bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); |
294 | |
295 | /** |
296 | * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. |
297 | * @lockcnt: the lockcnt to operate on |
298 | * |
299 | * Remember that concurrent visits are not blocked unless the count is |
300 | * also zero. You can use qemu_lockcnt_count to check for this inside a |
301 | * critical section. |
302 | */ |
303 | void qemu_lockcnt_lock(QemuLockCnt *lockcnt); |
304 | |
305 | /** |
306 | * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. |
307 | * @lockcnt: the lockcnt to operate on. |
308 | */ |
309 | void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); |
310 | |
311 | /** |
312 | * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. |
313 | * @lockcnt: the lockcnt to operate on. |
314 | * |
315 | * This is the same as |
316 | * |
317 | * qemu_lockcnt_unlock(lockcnt); |
318 | * qemu_lockcnt_inc(lockcnt); |
319 | * |
320 | * but more efficient. |
321 | */ |
322 | void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); |
323 | |
324 | /** |
325 | * qemu_lockcnt_count: query a LockCnt's count. |
326 | * @lockcnt: the lockcnt to query. |
327 | * |
328 | * Note that the count can change at any time. Still, while the |
329 | * lockcnt is locked, one can usefully check whether the count |
330 | * is non-zero. |
331 | */ |
332 | unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); |
333 | |
334 | #endif |
335 | |