1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22#include "uv.h"
23#include "internal.h"
24
25#include <pthread.h>
26#include <assert.h>
27#include <errno.h>
28
29#include <sys/time.h>
30#include <sys/resource.h> /* getrlimit() */
31#include <unistd.h> /* getpagesize() */
32
33#include <limits.h>
34
35#ifdef __MVS__
36#include <sys/ipc.h>
37#include <sys/sem.h>
38#endif
39
40#ifdef __GLIBC__
41#include <gnu/libc-version.h> /* gnu_get_libc_version() */
42#endif
43
44#undef NANOSEC
45#define NANOSEC ((uint64_t) 1e9)
46
47#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
48STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
49#endif
50
51/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
52#if defined(_AIX) || \
53 defined(__OpenBSD__) || \
54 !defined(PTHREAD_BARRIER_SERIAL_THREAD)
55int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
56 struct _uv_barrier* b;
57 int rc;
58
59 if (barrier == NULL || count == 0)
60 return UV_EINVAL;
61
62 b = uv__malloc(sizeof(*b));
63 if (b == NULL)
64 return UV_ENOMEM;
65
66 b->in = 0;
67 b->out = 0;
68 b->threshold = count;
69
70 rc = uv_mutex_init(&b->mutex);
71 if (rc != 0)
72 goto error2;
73
74 rc = uv_cond_init(&b->cond);
75 if (rc != 0)
76 goto error;
77
78 barrier->b = b;
79 return 0;
80
81error:
82 uv_mutex_destroy(&b->mutex);
83error2:
84 uv__free(b);
85 return rc;
86}
87
88
89int uv_barrier_wait(uv_barrier_t* barrier) {
90 struct _uv_barrier* b;
91 int last;
92
93 if (barrier == NULL || barrier->b == NULL)
94 return UV_EINVAL;
95
96 b = barrier->b;
97 uv_mutex_lock(&b->mutex);
98
99 if (++b->in == b->threshold) {
100 b->in = 0;
101 b->out = b->threshold;
102 uv_cond_signal(&b->cond);
103 } else {
104 do
105 uv_cond_wait(&b->cond, &b->mutex);
106 while (b->in != 0);
107 }
108
109 last = (--b->out == 0);
110 if (!last)
111 uv_cond_signal(&b->cond); /* Not needed for last thread. */
112
113 uv_mutex_unlock(&b->mutex);
114 return last;
115}
116
117
118void uv_barrier_destroy(uv_barrier_t* barrier) {
119 struct _uv_barrier* b;
120
121 b = barrier->b;
122 uv_mutex_lock(&b->mutex);
123
124 assert(b->in == 0);
125 assert(b->out == 0);
126
127 if (b->in != 0 || b->out != 0)
128 abort();
129
130 uv_mutex_unlock(&b->mutex);
131 uv_mutex_destroy(&b->mutex);
132 uv_cond_destroy(&b->cond);
133
134 uv__free(barrier->b);
135 barrier->b = NULL;
136}
137
138#else
139
140int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
141 return UV__ERR(pthread_barrier_init(barrier, NULL, count));
142}
143
144
145int uv_barrier_wait(uv_barrier_t* barrier) {
146 int rc;
147
148 rc = pthread_barrier_wait(barrier);
149 if (rc != 0)
150 if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
151 abort();
152
153 return rc == PTHREAD_BARRIER_SERIAL_THREAD;
154}
155
156
157void uv_barrier_destroy(uv_barrier_t* barrier) {
158 if (pthread_barrier_destroy(barrier))
159 abort();
160}
161
162#endif
163
164
165/* On MacOS, threads other than the main thread are created with a reduced
166 * stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
167 *
168 * On Linux, threads created by musl have a much smaller stack than threads
169 * created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
170 */
171static size_t thread_stack_size(void) {
172#if defined(__APPLE__) || defined(__linux__)
173 struct rlimit lim;
174
175 if (getrlimit(RLIMIT_STACK, &lim))
176 abort();
177
178 if (lim.rlim_cur != RLIM_INFINITY) {
179 /* pthread_attr_setstacksize() expects page-aligned values. */
180 lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
181
182 /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
183 * too small to safely receive signals on.
184 *
185 * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
186 * the largest MINSIGSTKSZ of the architectures that musl supports) so
187 * let's use that as a lower bound.
188 *
189 * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
190 * is between 28 and 133 KB when compiling against glibc, depending
191 * on the architecture.
192 */
193 if (lim.rlim_cur >= 8192)
194 if (lim.rlim_cur >= PTHREAD_STACK_MIN)
195 return lim.rlim_cur;
196 }
197#endif
198
199#if !defined(__linux__)
200 return 0;
201#elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
202 return 4 << 20; /* glibc default. */
203#else
204 return 2 << 20; /* glibc default. */
205#endif
206}
207
208
209int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
210 uv_thread_options_t params;
211 params.flags = UV_THREAD_NO_FLAGS;
212 return uv_thread_create_ex(tid, &params, entry, arg);
213}
214
215int uv_thread_create_ex(uv_thread_t* tid,
216 const uv_thread_options_t* params,
217 void (*entry)(void *arg),
218 void *arg) {
219 int err;
220 pthread_attr_t* attr;
221 pthread_attr_t attr_storage;
222 size_t pagesize;
223 size_t stack_size;
224
225 stack_size =
226 params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
227
228 attr = NULL;
229 if (stack_size == 0) {
230 stack_size = thread_stack_size();
231 } else {
232 pagesize = (size_t)getpagesize();
233 /* Round up to the nearest page boundary. */
234 stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
235#ifdef PTHREAD_STACK_MIN
236 if (stack_size < PTHREAD_STACK_MIN)
237 stack_size = PTHREAD_STACK_MIN;
238#endif
239 }
240
241 if (stack_size > 0) {
242 attr = &attr_storage;
243
244 if (pthread_attr_init(attr))
245 abort();
246
247 if (pthread_attr_setstacksize(attr, stack_size))
248 abort();
249 }
250
251 err = pthread_create(tid, attr, (void*(*)(void*)) entry, arg);
252
253 if (attr != NULL)
254 pthread_attr_destroy(attr);
255
256 return UV__ERR(err);
257}
258
259
260uv_thread_t uv_thread_self(void) {
261 return pthread_self();
262}
263
264int uv_thread_join(uv_thread_t *tid) {
265 return UV__ERR(pthread_join(*tid, NULL));
266}
267
268
269int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
270 return pthread_equal(*t1, *t2);
271}
272
273
274int uv_mutex_init(uv_mutex_t* mutex) {
275#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
276 return UV__ERR(pthread_mutex_init(mutex, NULL));
277#else
278 pthread_mutexattr_t attr;
279 int err;
280
281 if (pthread_mutexattr_init(&attr))
282 abort();
283
284 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
285 abort();
286
287 err = pthread_mutex_init(mutex, &attr);
288
289 if (pthread_mutexattr_destroy(&attr))
290 abort();
291
292 return UV__ERR(err);
293#endif
294}
295
296
297int uv_mutex_init_recursive(uv_mutex_t* mutex) {
298 pthread_mutexattr_t attr;
299 int err;
300
301 if (pthread_mutexattr_init(&attr))
302 abort();
303
304 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
305 abort();
306
307 err = pthread_mutex_init(mutex, &attr);
308
309 if (pthread_mutexattr_destroy(&attr))
310 abort();
311
312 return UV__ERR(err);
313}
314
315
316void uv_mutex_destroy(uv_mutex_t* mutex) {
317 if (pthread_mutex_destroy(mutex))
318 abort();
319}
320
321
322void uv_mutex_lock(uv_mutex_t* mutex) {
323 if (pthread_mutex_lock(mutex))
324 abort();
325}
326
327
328int uv_mutex_trylock(uv_mutex_t* mutex) {
329 int err;
330
331 err = pthread_mutex_trylock(mutex);
332 if (err) {
333 if (err != EBUSY && err != EAGAIN)
334 abort();
335 return UV_EBUSY;
336 }
337
338 return 0;
339}
340
341
342void uv_mutex_unlock(uv_mutex_t* mutex) {
343 if (pthread_mutex_unlock(mutex))
344 abort();
345}
346
347
348int uv_rwlock_init(uv_rwlock_t* rwlock) {
349 return UV__ERR(pthread_rwlock_init(rwlock, NULL));
350}
351
352
353void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
354 if (pthread_rwlock_destroy(rwlock))
355 abort();
356}
357
358
359void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
360 if (pthread_rwlock_rdlock(rwlock))
361 abort();
362}
363
364
365int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
366 int err;
367
368 err = pthread_rwlock_tryrdlock(rwlock);
369 if (err) {
370 if (err != EBUSY && err != EAGAIN)
371 abort();
372 return UV_EBUSY;
373 }
374
375 return 0;
376}
377
378
379void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
380 if (pthread_rwlock_unlock(rwlock))
381 abort();
382}
383
384
385void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
386 if (pthread_rwlock_wrlock(rwlock))
387 abort();
388}
389
390
391int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
392 int err;
393
394 err = pthread_rwlock_trywrlock(rwlock);
395 if (err) {
396 if (err != EBUSY && err != EAGAIN)
397 abort();
398 return UV_EBUSY;
399 }
400
401 return 0;
402}
403
404
405void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
406 if (pthread_rwlock_unlock(rwlock))
407 abort();
408}
409
410
411void uv_once(uv_once_t* guard, void (*callback)(void)) {
412 if (pthread_once(guard, callback))
413 abort();
414}
415
416#if defined(__APPLE__) && defined(__MACH__)
417
418int uv_sem_init(uv_sem_t* sem, unsigned int value) {
419 kern_return_t err;
420
421 err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
422 if (err == KERN_SUCCESS)
423 return 0;
424 if (err == KERN_INVALID_ARGUMENT)
425 return UV_EINVAL;
426 if (err == KERN_RESOURCE_SHORTAGE)
427 return UV_ENOMEM;
428
429 abort();
430 return UV_EINVAL; /* Satisfy the compiler. */
431}
432
433
434void uv_sem_destroy(uv_sem_t* sem) {
435 if (semaphore_destroy(mach_task_self(), *sem))
436 abort();
437}
438
439
440void uv_sem_post(uv_sem_t* sem) {
441 if (semaphore_signal(*sem))
442 abort();
443}
444
445
446void uv_sem_wait(uv_sem_t* sem) {
447 int r;
448
449 do
450 r = semaphore_wait(*sem);
451 while (r == KERN_ABORTED);
452
453 if (r != KERN_SUCCESS)
454 abort();
455}
456
457
458int uv_sem_trywait(uv_sem_t* sem) {
459 mach_timespec_t interval;
460 kern_return_t err;
461
462 interval.tv_sec = 0;
463 interval.tv_nsec = 0;
464
465 err = semaphore_timedwait(*sem, interval);
466 if (err == KERN_SUCCESS)
467 return 0;
468 if (err == KERN_OPERATION_TIMED_OUT)
469 return UV_EAGAIN;
470
471 abort();
472 return UV_EINVAL; /* Satisfy the compiler. */
473}
474
475#else /* !(defined(__APPLE__) && defined(__MACH__)) */
476
477#ifdef __GLIBC__
478
479/* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
480 * by providing a custom implementation for glibc < 2.21 in terms of other
481 * concurrency primitives.
482 * Refs: https://github.com/nodejs/node/issues/19903 */
483
484/* To preserve ABI compatibility, we treat the uv_sem_t as storage for
485 * a pointer to the actual struct we're using underneath. */
486
487static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
488static int platform_needs_custom_semaphore = 0;
489
490static void glibc_version_check(void) {
491 const char* version = gnu_get_libc_version();
492 platform_needs_custom_semaphore =
493 version[0] == '2' && version[1] == '.' &&
494 atoi(version + 2) < 21;
495}
496
497#elif defined(__MVS__)
498
499#define platform_needs_custom_semaphore 1
500
501#else /* !defined(__GLIBC__) && !defined(__MVS__) */
502
503#define platform_needs_custom_semaphore 0
504
505#endif
506
507typedef struct uv_semaphore_s {
508 uv_mutex_t mutex;
509 uv_cond_t cond;
510 unsigned int value;
511} uv_semaphore_t;
512
513#if defined(__GLIBC__) || platform_needs_custom_semaphore
514STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
515#endif
516
517static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
518 int err;
519 uv_semaphore_t* sem;
520
521 sem = uv__malloc(sizeof(*sem));
522 if (sem == NULL)
523 return UV_ENOMEM;
524
525 if ((err = uv_mutex_init(&sem->mutex)) != 0) {
526 uv__free(sem);
527 return err;
528 }
529
530 if ((err = uv_cond_init(&sem->cond)) != 0) {
531 uv_mutex_destroy(&sem->mutex);
532 uv__free(sem);
533 return err;
534 }
535
536 sem->value = value;
537 *(uv_semaphore_t**)sem_ = sem;
538 return 0;
539}
540
541
542static void uv__custom_sem_destroy(uv_sem_t* sem_) {
543 uv_semaphore_t* sem;
544
545 sem = *(uv_semaphore_t**)sem_;
546 uv_cond_destroy(&sem->cond);
547 uv_mutex_destroy(&sem->mutex);
548 uv__free(sem);
549}
550
551
552static void uv__custom_sem_post(uv_sem_t* sem_) {
553 uv_semaphore_t* sem;
554
555 sem = *(uv_semaphore_t**)sem_;
556 uv_mutex_lock(&sem->mutex);
557 sem->value++;
558 if (sem->value == 1)
559 uv_cond_signal(&sem->cond);
560 uv_mutex_unlock(&sem->mutex);
561}
562
563
564static void uv__custom_sem_wait(uv_sem_t* sem_) {
565 uv_semaphore_t* sem;
566
567 sem = *(uv_semaphore_t**)sem_;
568 uv_mutex_lock(&sem->mutex);
569 while (sem->value == 0)
570 uv_cond_wait(&sem->cond, &sem->mutex);
571 sem->value--;
572 uv_mutex_unlock(&sem->mutex);
573}
574
575
576static int uv__custom_sem_trywait(uv_sem_t* sem_) {
577 uv_semaphore_t* sem;
578
579 sem = *(uv_semaphore_t**)sem_;
580 if (uv_mutex_trylock(&sem->mutex) != 0)
581 return UV_EAGAIN;
582
583 if (sem->value == 0) {
584 uv_mutex_unlock(&sem->mutex);
585 return UV_EAGAIN;
586 }
587
588 sem->value--;
589 uv_mutex_unlock(&sem->mutex);
590
591 return 0;
592}
593
594static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
595 if (sem_init(sem, 0, value))
596 return UV__ERR(errno);
597 return 0;
598}
599
600
601static void uv__sem_destroy(uv_sem_t* sem) {
602 if (sem_destroy(sem))
603 abort();
604}
605
606
607static void uv__sem_post(uv_sem_t* sem) {
608 if (sem_post(sem))
609 abort();
610}
611
612
613static void uv__sem_wait(uv_sem_t* sem) {
614 int r;
615
616 do
617 r = sem_wait(sem);
618 while (r == -1 && errno == EINTR);
619
620 if (r)
621 abort();
622}
623
624
625static int uv__sem_trywait(uv_sem_t* sem) {
626 int r;
627
628 do
629 r = sem_trywait(sem);
630 while (r == -1 && errno == EINTR);
631
632 if (r) {
633 if (errno == EAGAIN)
634 return UV_EAGAIN;
635 abort();
636 }
637
638 return 0;
639}
640
641int uv_sem_init(uv_sem_t* sem, unsigned int value) {
642#ifdef __GLIBC__
643 uv_once(&glibc_version_check_once, glibc_version_check);
644#endif
645
646 if (platform_needs_custom_semaphore)
647 return uv__custom_sem_init(sem, value);
648 else
649 return uv__sem_init(sem, value);
650}
651
652
653void uv_sem_destroy(uv_sem_t* sem) {
654 if (platform_needs_custom_semaphore)
655 uv__custom_sem_destroy(sem);
656 else
657 uv__sem_destroy(sem);
658}
659
660
661void uv_sem_post(uv_sem_t* sem) {
662 if (platform_needs_custom_semaphore)
663 uv__custom_sem_post(sem);
664 else
665 uv__sem_post(sem);
666}
667
668
669void uv_sem_wait(uv_sem_t* sem) {
670 if (platform_needs_custom_semaphore)
671 uv__custom_sem_wait(sem);
672 else
673 uv__sem_wait(sem);
674}
675
676
677int uv_sem_trywait(uv_sem_t* sem) {
678 if (platform_needs_custom_semaphore)
679 return uv__custom_sem_trywait(sem);
680 else
681 return uv__sem_trywait(sem);
682}
683
684#endif /* defined(__APPLE__) && defined(__MACH__) */
685
686
687#if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
688
689int uv_cond_init(uv_cond_t* cond) {
690 return UV__ERR(pthread_cond_init(cond, NULL));
691}
692
693#else /* !(defined(__APPLE__) && defined(__MACH__)) */
694
695int uv_cond_init(uv_cond_t* cond) {
696 pthread_condattr_t attr;
697 int err;
698
699 err = pthread_condattr_init(&attr);
700 if (err)
701 return UV__ERR(err);
702
703#if !(defined(__ANDROID_API__) && __ANDROID_API__ < 21)
704 err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
705 if (err)
706 goto error2;
707#endif
708
709 err = pthread_cond_init(cond, &attr);
710 if (err)
711 goto error2;
712
713 err = pthread_condattr_destroy(&attr);
714 if (err)
715 goto error;
716
717 return 0;
718
719error:
720 pthread_cond_destroy(cond);
721error2:
722 pthread_condattr_destroy(&attr);
723 return UV__ERR(err);
724}
725
726#endif /* defined(__APPLE__) && defined(__MACH__) */
727
728void uv_cond_destroy(uv_cond_t* cond) {
729#if defined(__APPLE__) && defined(__MACH__)
730 /* It has been reported that destroying condition variables that have been
731 * signalled but not waited on can sometimes result in application crashes.
732 * See https://codereview.chromium.org/1323293005.
733 */
734 pthread_mutex_t mutex;
735 struct timespec ts;
736 int err;
737
738 if (pthread_mutex_init(&mutex, NULL))
739 abort();
740
741 if (pthread_mutex_lock(&mutex))
742 abort();
743
744 ts.tv_sec = 0;
745 ts.tv_nsec = 1;
746
747 err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
748 if (err != 0 && err != ETIMEDOUT)
749 abort();
750
751 if (pthread_mutex_unlock(&mutex))
752 abort();
753
754 if (pthread_mutex_destroy(&mutex))
755 abort();
756#endif /* defined(__APPLE__) && defined(__MACH__) */
757
758 if (pthread_cond_destroy(cond))
759 abort();
760}
761
762void uv_cond_signal(uv_cond_t* cond) {
763 if (pthread_cond_signal(cond))
764 abort();
765}
766
767void uv_cond_broadcast(uv_cond_t* cond) {
768 if (pthread_cond_broadcast(cond))
769 abort();
770}
771
772void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
773 if (pthread_cond_wait(cond, mutex))
774 abort();
775}
776
777
778int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
779 int r;
780 struct timespec ts;
781#if defined(__MVS__)
782 struct timeval tv;
783#endif
784
785#if defined(__APPLE__) && defined(__MACH__)
786 ts.tv_sec = timeout / NANOSEC;
787 ts.tv_nsec = timeout % NANOSEC;
788 r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
789#else
790#if defined(__MVS__)
791 if (gettimeofday(&tv, NULL))
792 abort();
793 timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
794#else
795 timeout += uv__hrtime(UV_CLOCK_PRECISE);
796#endif
797 ts.tv_sec = timeout / NANOSEC;
798 ts.tv_nsec = timeout % NANOSEC;
799#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
800
801 /*
802 * The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
803 * but has this alternative function instead.
804 */
805 r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
806#else
807 r = pthread_cond_timedwait(cond, mutex, &ts);
808#endif /* __ANDROID_API__ */
809#endif
810
811
812 if (r == 0)
813 return 0;
814
815 if (r == ETIMEDOUT)
816 return UV_ETIMEDOUT;
817
818 abort();
819#ifndef __SUNPRO_C
820 return UV_EINVAL; /* Satisfy the compiler. */
821#endif
822}
823
824
825int uv_key_create(uv_key_t* key) {
826 return UV__ERR(pthread_key_create(key, NULL));
827}
828
829
830void uv_key_delete(uv_key_t* key) {
831 if (pthread_key_delete(*key))
832 abort();
833}
834
835
836void* uv_key_get(uv_key_t* key) {
837 return pthread_getspecific(*key);
838}
839
840
841void uv_key_set(uv_key_t* key, void* value) {
842 if (pthread_setspecific(*key, value))
843 abort();
844}
845