1 | /* |
2 | * Wrappers around mutex/cond/thread functions |
3 | * |
4 | * Copyright Red Hat, Inc. 2009 |
5 | * |
6 | * Author: |
7 | * Marcelo Tosatti <mtosatti@redhat.com> |
8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
10 | * See the COPYING file in the top-level directory. |
11 | * |
12 | */ |
13 | #include "qemu/osdep.h" |
14 | #include "qemu/thread.h" |
15 | #include "qemu/atomic.h" |
16 | #include "qemu/notify.h" |
17 | #include "qemu-thread-common.h" |
18 | |
19 | static bool name_threads; |
20 | |
21 | void qemu_thread_naming(bool enable) |
22 | { |
23 | name_threads = enable; |
24 | |
25 | #ifndef CONFIG_THREAD_SETNAME_BYTHREAD |
26 | /* This is a debugging option, not fatal */ |
27 | if (enable) { |
28 | fprintf(stderr, "qemu: thread naming not supported on this host\n" ); |
29 | } |
30 | #endif |
31 | } |
32 | |
33 | static void error_exit(int err, const char *msg) |
34 | { |
35 | fprintf(stderr, "qemu: %s: %s\n" , msg, strerror(err)); |
36 | abort(); |
37 | } |
38 | |
39 | void qemu_mutex_init(QemuMutex *mutex) |
40 | { |
41 | int err; |
42 | |
43 | err = pthread_mutex_init(&mutex->lock, NULL); |
44 | if (err) |
45 | error_exit(err, __func__); |
46 | qemu_mutex_post_init(mutex); |
47 | } |
48 | |
49 | void qemu_mutex_destroy(QemuMutex *mutex) |
50 | { |
51 | int err; |
52 | |
53 | assert(mutex->initialized); |
54 | mutex->initialized = false; |
55 | err = pthread_mutex_destroy(&mutex->lock); |
56 | if (err) |
57 | error_exit(err, __func__); |
58 | } |
59 | |
60 | void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line) |
61 | { |
62 | int err; |
63 | |
64 | assert(mutex->initialized); |
65 | qemu_mutex_pre_lock(mutex, file, line); |
66 | err = pthread_mutex_lock(&mutex->lock); |
67 | if (err) |
68 | error_exit(err, __func__); |
69 | qemu_mutex_post_lock(mutex, file, line); |
70 | } |
71 | |
72 | int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line) |
73 | { |
74 | int err; |
75 | |
76 | assert(mutex->initialized); |
77 | err = pthread_mutex_trylock(&mutex->lock); |
78 | if (err == 0) { |
79 | qemu_mutex_post_lock(mutex, file, line); |
80 | return 0; |
81 | } |
82 | if (err != EBUSY) { |
83 | error_exit(err, __func__); |
84 | } |
85 | return -EBUSY; |
86 | } |
87 | |
88 | void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line) |
89 | { |
90 | int err; |
91 | |
92 | assert(mutex->initialized); |
93 | qemu_mutex_pre_unlock(mutex, file, line); |
94 | err = pthread_mutex_unlock(&mutex->lock); |
95 | if (err) |
96 | error_exit(err, __func__); |
97 | } |
98 | |
99 | void qemu_rec_mutex_init(QemuRecMutex *mutex) |
100 | { |
101 | int err; |
102 | pthread_mutexattr_t attr; |
103 | |
104 | pthread_mutexattr_init(&attr); |
105 | pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); |
106 | err = pthread_mutex_init(&mutex->lock, &attr); |
107 | pthread_mutexattr_destroy(&attr); |
108 | if (err) { |
109 | error_exit(err, __func__); |
110 | } |
111 | mutex->initialized = true; |
112 | } |
113 | |
114 | void qemu_cond_init(QemuCond *cond) |
115 | { |
116 | int err; |
117 | |
118 | err = pthread_cond_init(&cond->cond, NULL); |
119 | if (err) |
120 | error_exit(err, __func__); |
121 | cond->initialized = true; |
122 | } |
123 | |
124 | void qemu_cond_destroy(QemuCond *cond) |
125 | { |
126 | int err; |
127 | |
128 | assert(cond->initialized); |
129 | cond->initialized = false; |
130 | err = pthread_cond_destroy(&cond->cond); |
131 | if (err) |
132 | error_exit(err, __func__); |
133 | } |
134 | |
135 | void qemu_cond_signal(QemuCond *cond) |
136 | { |
137 | int err; |
138 | |
139 | assert(cond->initialized); |
140 | err = pthread_cond_signal(&cond->cond); |
141 | if (err) |
142 | error_exit(err, __func__); |
143 | } |
144 | |
145 | void qemu_cond_broadcast(QemuCond *cond) |
146 | { |
147 | int err; |
148 | |
149 | assert(cond->initialized); |
150 | err = pthread_cond_broadcast(&cond->cond); |
151 | if (err) |
152 | error_exit(err, __func__); |
153 | } |
154 | |
155 | void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line) |
156 | { |
157 | int err; |
158 | |
159 | assert(cond->initialized); |
160 | qemu_mutex_pre_unlock(mutex, file, line); |
161 | err = pthread_cond_wait(&cond->cond, &mutex->lock); |
162 | qemu_mutex_post_lock(mutex, file, line); |
163 | if (err) |
164 | error_exit(err, __func__); |
165 | } |
166 | |
167 | void qemu_sem_init(QemuSemaphore *sem, int init) |
168 | { |
169 | int rc; |
170 | |
171 | #ifndef CONFIG_SEM_TIMEDWAIT |
172 | rc = pthread_mutex_init(&sem->lock, NULL); |
173 | if (rc != 0) { |
174 | error_exit(rc, __func__); |
175 | } |
176 | rc = pthread_cond_init(&sem->cond, NULL); |
177 | if (rc != 0) { |
178 | error_exit(rc, __func__); |
179 | } |
180 | if (init < 0) { |
181 | error_exit(EINVAL, __func__); |
182 | } |
183 | sem->count = init; |
184 | #else |
185 | rc = sem_init(&sem->sem, 0, init); |
186 | if (rc < 0) { |
187 | error_exit(errno, __func__); |
188 | } |
189 | #endif |
190 | sem->initialized = true; |
191 | } |
192 | |
193 | void qemu_sem_destroy(QemuSemaphore *sem) |
194 | { |
195 | int rc; |
196 | |
197 | assert(sem->initialized); |
198 | sem->initialized = false; |
199 | #ifndef CONFIG_SEM_TIMEDWAIT |
200 | rc = pthread_cond_destroy(&sem->cond); |
201 | if (rc < 0) { |
202 | error_exit(rc, __func__); |
203 | } |
204 | rc = pthread_mutex_destroy(&sem->lock); |
205 | if (rc < 0) { |
206 | error_exit(rc, __func__); |
207 | } |
208 | #else |
209 | rc = sem_destroy(&sem->sem); |
210 | if (rc < 0) { |
211 | error_exit(errno, __func__); |
212 | } |
213 | #endif |
214 | } |
215 | |
216 | void qemu_sem_post(QemuSemaphore *sem) |
217 | { |
218 | int rc; |
219 | |
220 | assert(sem->initialized); |
221 | #ifndef CONFIG_SEM_TIMEDWAIT |
222 | pthread_mutex_lock(&sem->lock); |
223 | if (sem->count == UINT_MAX) { |
224 | rc = EINVAL; |
225 | } else { |
226 | sem->count++; |
227 | rc = pthread_cond_signal(&sem->cond); |
228 | } |
229 | pthread_mutex_unlock(&sem->lock); |
230 | if (rc != 0) { |
231 | error_exit(rc, __func__); |
232 | } |
233 | #else |
234 | rc = sem_post(&sem->sem); |
235 | if (rc < 0) { |
236 | error_exit(errno, __func__); |
237 | } |
238 | #endif |
239 | } |
240 | |
241 | static void compute_abs_deadline(struct timespec *ts, int ms) |
242 | { |
243 | struct timeval tv; |
244 | gettimeofday(&tv, NULL); |
245 | ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000; |
246 | ts->tv_sec = tv.tv_sec + ms / 1000; |
247 | if (ts->tv_nsec >= 1000000000) { |
248 | ts->tv_sec++; |
249 | ts->tv_nsec -= 1000000000; |
250 | } |
251 | } |
252 | |
253 | int qemu_sem_timedwait(QemuSemaphore *sem, int ms) |
254 | { |
255 | int rc; |
256 | struct timespec ts; |
257 | |
258 | assert(sem->initialized); |
259 | #ifndef CONFIG_SEM_TIMEDWAIT |
260 | rc = 0; |
261 | compute_abs_deadline(&ts, ms); |
262 | pthread_mutex_lock(&sem->lock); |
263 | while (sem->count == 0) { |
264 | rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts); |
265 | if (rc == ETIMEDOUT) { |
266 | break; |
267 | } |
268 | if (rc != 0) { |
269 | error_exit(rc, __func__); |
270 | } |
271 | } |
272 | if (rc != ETIMEDOUT) { |
273 | --sem->count; |
274 | } |
275 | pthread_mutex_unlock(&sem->lock); |
276 | return (rc == ETIMEDOUT ? -1 : 0); |
277 | #else |
278 | if (ms <= 0) { |
279 | /* This is cheaper than sem_timedwait. */ |
280 | do { |
281 | rc = sem_trywait(&sem->sem); |
282 | } while (rc == -1 && errno == EINTR); |
283 | if (rc == -1 && errno == EAGAIN) { |
284 | return -1; |
285 | } |
286 | } else { |
287 | compute_abs_deadline(&ts, ms); |
288 | do { |
289 | rc = sem_timedwait(&sem->sem, &ts); |
290 | } while (rc == -1 && errno == EINTR); |
291 | if (rc == -1 && errno == ETIMEDOUT) { |
292 | return -1; |
293 | } |
294 | } |
295 | if (rc < 0) { |
296 | error_exit(errno, __func__); |
297 | } |
298 | return 0; |
299 | #endif |
300 | } |
301 | |
302 | void qemu_sem_wait(QemuSemaphore *sem) |
303 | { |
304 | int rc; |
305 | |
306 | assert(sem->initialized); |
307 | #ifndef CONFIG_SEM_TIMEDWAIT |
308 | pthread_mutex_lock(&sem->lock); |
309 | while (sem->count == 0) { |
310 | rc = pthread_cond_wait(&sem->cond, &sem->lock); |
311 | if (rc != 0) { |
312 | error_exit(rc, __func__); |
313 | } |
314 | } |
315 | --sem->count; |
316 | pthread_mutex_unlock(&sem->lock); |
317 | #else |
318 | do { |
319 | rc = sem_wait(&sem->sem); |
320 | } while (rc == -1 && errno == EINTR); |
321 | if (rc < 0) { |
322 | error_exit(errno, __func__); |
323 | } |
324 | #endif |
325 | } |
326 | |
327 | #ifdef __linux__ |
328 | #include "qemu/futex.h" |
329 | #else |
330 | static inline void qemu_futex_wake(QemuEvent *ev, int n) |
331 | { |
332 | assert(ev->initialized); |
333 | pthread_mutex_lock(&ev->lock); |
334 | if (n == 1) { |
335 | pthread_cond_signal(&ev->cond); |
336 | } else { |
337 | pthread_cond_broadcast(&ev->cond); |
338 | } |
339 | pthread_mutex_unlock(&ev->lock); |
340 | } |
341 | |
342 | static inline void qemu_futex_wait(QemuEvent *ev, unsigned val) |
343 | { |
344 | assert(ev->initialized); |
345 | pthread_mutex_lock(&ev->lock); |
346 | if (ev->value == val) { |
347 | pthread_cond_wait(&ev->cond, &ev->lock); |
348 | } |
349 | pthread_mutex_unlock(&ev->lock); |
350 | } |
351 | #endif |
352 | |
353 | /* Valid transitions: |
354 | * - free->set, when setting the event |
355 | * - busy->set, when setting the event, followed by qemu_futex_wake |
356 | * - set->free, when resetting the event |
357 | * - free->busy, when waiting |
358 | * |
359 | * set->busy does not happen (it can be observed from the outside but |
360 | * it really is set->free->busy). |
361 | * |
362 | * busy->free provably cannot happen; to enforce it, the set->free transition |
363 | * is done with an OR, which becomes a no-op if the event has concurrently |
364 | * transitioned to free or busy. |
365 | */ |
366 | |
367 | #define EV_SET 0 |
368 | #define EV_FREE 1 |
369 | #define EV_BUSY -1 |
370 | |
371 | void qemu_event_init(QemuEvent *ev, bool init) |
372 | { |
373 | #ifndef __linux__ |
374 | pthread_mutex_init(&ev->lock, NULL); |
375 | pthread_cond_init(&ev->cond, NULL); |
376 | #endif |
377 | |
378 | ev->value = (init ? EV_SET : EV_FREE); |
379 | ev->initialized = true; |
380 | } |
381 | |
382 | void qemu_event_destroy(QemuEvent *ev) |
383 | { |
384 | assert(ev->initialized); |
385 | ev->initialized = false; |
386 | #ifndef __linux__ |
387 | pthread_mutex_destroy(&ev->lock); |
388 | pthread_cond_destroy(&ev->cond); |
389 | #endif |
390 | } |
391 | |
392 | void qemu_event_set(QemuEvent *ev) |
393 | { |
394 | /* qemu_event_set has release semantics, but because it *loads* |
395 | * ev->value we need a full memory barrier here. |
396 | */ |
397 | assert(ev->initialized); |
398 | smp_mb(); |
399 | if (atomic_read(&ev->value) != EV_SET) { |
400 | if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { |
401 | /* There were waiters, wake them up. */ |
402 | qemu_futex_wake(ev, INT_MAX); |
403 | } |
404 | } |
405 | } |
406 | |
407 | void qemu_event_reset(QemuEvent *ev) |
408 | { |
409 | unsigned value; |
410 | |
411 | assert(ev->initialized); |
412 | value = atomic_read(&ev->value); |
413 | smp_mb_acquire(); |
414 | if (value == EV_SET) { |
415 | /* |
416 | * If there was a concurrent reset (or even reset+wait), |
417 | * do nothing. Otherwise change EV_SET->EV_FREE. |
418 | */ |
419 | atomic_or(&ev->value, EV_FREE); |
420 | } |
421 | } |
422 | |
423 | void qemu_event_wait(QemuEvent *ev) |
424 | { |
425 | unsigned value; |
426 | |
427 | assert(ev->initialized); |
428 | value = atomic_read(&ev->value); |
429 | smp_mb_acquire(); |
430 | if (value != EV_SET) { |
431 | if (value == EV_FREE) { |
432 | /* |
433 | * Leave the event reset and tell qemu_event_set that there |
434 | * are waiters. No need to retry, because there cannot be |
435 | * a concurrent busy->free transition. After the CAS, the |
436 | * event will be either set or busy. |
437 | */ |
438 | if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { |
439 | return; |
440 | } |
441 | } |
442 | qemu_futex_wait(ev, EV_BUSY); |
443 | } |
444 | } |
445 | |
446 | static __thread NotifierList thread_exit; |
447 | |
448 | /* |
449 | * Note that in this implementation you can register a thread-exit |
450 | * notifier for the main thread, but it will never be called. |
451 | * This is OK because main thread exit can only happen when the |
452 | * entire process is exiting, and the API allows notifiers to not |
453 | * be called on process exit. |
454 | */ |
455 | void qemu_thread_atexit_add(Notifier *notifier) |
456 | { |
457 | notifier_list_add(&thread_exit, notifier); |
458 | } |
459 | |
460 | void qemu_thread_atexit_remove(Notifier *notifier) |
461 | { |
462 | notifier_remove(notifier); |
463 | } |
464 | |
465 | static void qemu_thread_atexit_notify(void *arg) |
466 | { |
467 | /* |
468 | * Called when non-main thread exits (via qemu_thread_exit() |
469 | * or by returning from its start routine.) |
470 | */ |
471 | notifier_list_notify(&thread_exit, NULL); |
472 | } |
473 | |
474 | typedef struct { |
475 | void *(*start_routine)(void *); |
476 | void *arg; |
477 | char *name; |
478 | } QemuThreadArgs; |
479 | |
480 | static void *qemu_thread_start(void *args) |
481 | { |
482 | QemuThreadArgs *qemu_thread_args = args; |
483 | void *(*start_routine)(void *) = qemu_thread_args->start_routine; |
484 | void *arg = qemu_thread_args->arg; |
485 | void *r; |
486 | |
487 | #ifdef CONFIG_THREAD_SETNAME_BYTHREAD |
488 | /* Attempt to set the threads name; note that this is for debug, so |
489 | * we're not going to fail if we can't set it. |
490 | */ |
491 | if (name_threads && qemu_thread_args->name) { |
492 | # if defined(CONFIG_PTHREAD_SETNAME_NP_W_TID) |
493 | pthread_setname_np(pthread_self(), qemu_thread_args->name); |
494 | # elif defined(CONFIG_PTHREAD_SETNAME_NP_WO_TID) |
495 | pthread_setname_np(qemu_thread_args->name); |
496 | # endif |
497 | } |
498 | #endif |
499 | g_free(qemu_thread_args->name); |
500 | g_free(qemu_thread_args); |
501 | pthread_cleanup_push(qemu_thread_atexit_notify, NULL); |
502 | r = start_routine(arg); |
503 | pthread_cleanup_pop(1); |
504 | return r; |
505 | } |
506 | |
507 | void qemu_thread_create(QemuThread *thread, const char *name, |
508 | void *(*start_routine)(void*), |
509 | void *arg, int mode) |
510 | { |
511 | sigset_t set, oldset; |
512 | int err; |
513 | pthread_attr_t attr; |
514 | QemuThreadArgs *qemu_thread_args; |
515 | |
516 | err = pthread_attr_init(&attr); |
517 | if (err) { |
518 | error_exit(err, __func__); |
519 | } |
520 | |
521 | if (mode == QEMU_THREAD_DETACHED) { |
522 | pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); |
523 | } |
524 | |
525 | /* Leave signal handling to the iothread. */ |
526 | sigfillset(&set); |
527 | /* Blocking the signals can result in undefined behaviour. */ |
528 | sigdelset(&set, SIGSEGV); |
529 | sigdelset(&set, SIGFPE); |
530 | sigdelset(&set, SIGILL); |
531 | /* TODO avoid SIGBUS loss on macOS */ |
532 | pthread_sigmask(SIG_SETMASK, &set, &oldset); |
533 | |
534 | qemu_thread_args = g_new0(QemuThreadArgs, 1); |
535 | qemu_thread_args->name = g_strdup(name); |
536 | qemu_thread_args->start_routine = start_routine; |
537 | qemu_thread_args->arg = arg; |
538 | |
539 | err = pthread_create(&thread->thread, &attr, |
540 | qemu_thread_start, qemu_thread_args); |
541 | |
542 | if (err) |
543 | error_exit(err, __func__); |
544 | |
545 | pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
546 | |
547 | pthread_attr_destroy(&attr); |
548 | } |
549 | |
550 | void qemu_thread_get_self(QemuThread *thread) |
551 | { |
552 | thread->thread = pthread_self(); |
553 | } |
554 | |
555 | bool qemu_thread_is_self(QemuThread *thread) |
556 | { |
557 | return pthread_equal(pthread_self(), thread->thread); |
558 | } |
559 | |
560 | void qemu_thread_exit(void *retval) |
561 | { |
562 | pthread_exit(retval); |
563 | } |
564 | |
565 | void *qemu_thread_join(QemuThread *thread) |
566 | { |
567 | int err; |
568 | void *ret; |
569 | |
570 | err = pthread_join(thread->thread, &ret); |
571 | if (err) { |
572 | error_exit(err, __func__); |
573 | } |
574 | return ret; |
575 | } |
576 | |