1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21#include "uv.h"
22#include "internal.h"
23
24#include <stddef.h> /* NULL */
25#include <stdio.h> /* printf */
26#include <stdlib.h>
27#include <string.h> /* strerror */
28#include <errno.h>
29#include <assert.h>
30#include <unistd.h>
31#include <sys/types.h>
32#include <sys/stat.h>
33#include <fcntl.h>
34#include <sys/ioctl.h>
35#include <sys/socket.h>
36#include <sys/un.h>
37#include <netinet/in.h>
38#include <arpa/inet.h>
39#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
40#include <sys/uio.h> /* writev */
41#include <sys/resource.h> /* getrusage */
42#include <pwd.h>
43#include <sys/utsname.h>
44#include <sys/time.h>
45
46#ifdef __sun
47# include <sys/filio.h>
48# include <sys/types.h>
49# include <sys/wait.h>
50#endif
51
52#ifdef __APPLE__
53# include <mach-o/dyld.h> /* _NSGetExecutablePath */
54# include <sys/filio.h>
55# if defined(O_CLOEXEC)
56# define UV__O_CLOEXEC O_CLOEXEC
57# endif
58#endif
59
60#if defined(__DragonFly__) || \
61 defined(__FreeBSD__) || \
62 defined(__FreeBSD_kernel__) || \
63 defined(__NetBSD__)
64# include <sys/sysctl.h>
65# include <sys/filio.h>
66# include <sys/wait.h>
67# define UV__O_CLOEXEC O_CLOEXEC
68# if defined(__FreeBSD__) && __FreeBSD__ >= 10
69# define uv__accept4 accept4
70# endif
71# if defined(__NetBSD__)
72# define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
73# endif
74# if (defined(__FreeBSD__) && __FreeBSD__ >= 10) || defined(__NetBSD__)
75# define UV__SOCK_NONBLOCK SOCK_NONBLOCK
76# define UV__SOCK_CLOEXEC SOCK_CLOEXEC
77# endif
78# if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC)
79# define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC
80# endif
81#endif
82
83#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
84# include <dlfcn.h> /* for dlsym */
85#endif
86
87#if defined(__MVS__)
88#include <sys/ioctl.h>
89#endif
90
91#if defined(__linux__)
92#include <sys/syscall.h>
93#endif
94
95static int uv__run_pending(uv_loop_t* loop);
96
97/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
98STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
99STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
100 sizeof(((struct iovec*) 0)->iov_base));
101STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
102 sizeof(((struct iovec*) 0)->iov_len));
103STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
104STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
105
106
107uint64_t uv_hrtime(void) {
108 return uv__hrtime(UV_CLOCK_PRECISE);
109}
110
111
112void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
113 assert(!uv__is_closing(handle));
114
115 handle->flags |= UV_HANDLE_CLOSING;
116 handle->close_cb = close_cb;
117
118 switch (handle->type) {
119 case UV_NAMED_PIPE:
120 uv__pipe_close((uv_pipe_t*)handle);
121 break;
122
123 case UV_TTY:
124 uv__stream_close((uv_stream_t*)handle);
125 break;
126
127 case UV_TCP:
128 uv__tcp_close((uv_tcp_t*)handle);
129 break;
130
131 case UV_UDP:
132 uv__udp_close((uv_udp_t*)handle);
133 break;
134
135 case UV_PREPARE:
136 uv__prepare_close((uv_prepare_t*)handle);
137 break;
138
139 case UV_CHECK:
140 uv__check_close((uv_check_t*)handle);
141 break;
142
143 case UV_IDLE:
144 uv__idle_close((uv_idle_t*)handle);
145 break;
146
147 case UV_ASYNC:
148 uv__async_close((uv_async_t*)handle);
149 break;
150
151 case UV_TIMER:
152 uv__timer_close((uv_timer_t*)handle);
153 break;
154
155 case UV_PROCESS:
156 uv__process_close((uv_process_t*)handle);
157 break;
158
159 case UV_FS_EVENT:
160 uv__fs_event_close((uv_fs_event_t*)handle);
161 break;
162
163 case UV_POLL:
164 uv__poll_close((uv_poll_t*)handle);
165 break;
166
167 case UV_FS_POLL:
168 uv__fs_poll_close((uv_fs_poll_t*)handle);
169 /* Poll handles use file system requests, and one of them may still be
170 * running. The poll code will call uv__make_close_pending() for us. */
171 return;
172
173 case UV_SIGNAL:
174 uv__signal_close((uv_signal_t*) handle);
175 /* Signal handles may not be closed immediately. The signal code will
176 * itself close uv__make_close_pending whenever appropriate. */
177 return;
178
179 default:
180 assert(0);
181 }
182
183 uv__make_close_pending(handle);
184}
185
186int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
187 int r;
188 int fd;
189 socklen_t len;
190
191 if (handle == NULL || value == NULL)
192 return UV_EINVAL;
193
194 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
195 fd = uv__stream_fd((uv_stream_t*) handle);
196 else if (handle->type == UV_UDP)
197 fd = ((uv_udp_t *) handle)->io_watcher.fd;
198 else
199 return UV_ENOTSUP;
200
201 len = sizeof(*value);
202
203 if (*value == 0)
204 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
205 else
206 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
207
208 if (r < 0)
209 return UV__ERR(errno);
210
211 return 0;
212}
213
214void uv__make_close_pending(uv_handle_t* handle) {
215 assert(handle->flags & UV_HANDLE_CLOSING);
216 assert(!(handle->flags & UV_HANDLE_CLOSED));
217 handle->next_closing = handle->loop->closing_handles;
218 handle->loop->closing_handles = handle;
219}
220
221int uv__getiovmax(void) {
222#if defined(IOV_MAX)
223 return IOV_MAX;
224#elif defined(_SC_IOV_MAX)
225 static int iovmax = -1;
226 if (iovmax == -1) {
227 iovmax = sysconf(_SC_IOV_MAX);
228 /* On some embedded devices (arm-linux-uclibc based ip camera),
229 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
230 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
231 */
232 if (iovmax == -1) iovmax = 1;
233 }
234 return iovmax;
235#else
236 return 1024;
237#endif
238}
239
240
241static void uv__finish_close(uv_handle_t* handle) {
242 /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
243 * possible for it to be active in the sense that uv__is_active() returns
244 * true.
245 *
246 * A good example is when the user calls uv_shutdown(), immediately followed
247 * by uv_close(). The handle is considered active at this point because the
248 * completion of the shutdown req is still pending.
249 */
250 assert(handle->flags & UV_HANDLE_CLOSING);
251 assert(!(handle->flags & UV_HANDLE_CLOSED));
252 handle->flags |= UV_HANDLE_CLOSED;
253
254 switch (handle->type) {
255 case UV_PREPARE:
256 case UV_CHECK:
257 case UV_IDLE:
258 case UV_ASYNC:
259 case UV_TIMER:
260 case UV_PROCESS:
261 case UV_FS_EVENT:
262 case UV_FS_POLL:
263 case UV_POLL:
264 case UV_SIGNAL:
265 break;
266
267 case UV_NAMED_PIPE:
268 case UV_TCP:
269 case UV_TTY:
270 uv__stream_destroy((uv_stream_t*)handle);
271 break;
272
273 case UV_UDP:
274 uv__udp_finish_close((uv_udp_t*)handle);
275 break;
276
277 default:
278 assert(0);
279 break;
280 }
281
282 uv__handle_unref(handle);
283 QUEUE_REMOVE(&handle->handle_queue);
284
285 if (handle->close_cb) {
286 handle->close_cb(handle);
287 }
288}
289
290
291static void uv__run_closing_handles(uv_loop_t* loop) {
292 uv_handle_t* p;
293 uv_handle_t* q;
294
295 p = loop->closing_handles;
296 loop->closing_handles = NULL;
297
298 while (p) {
299 q = p->next_closing;
300 uv__finish_close(p);
301 p = q;
302 }
303}
304
305
306int uv_is_closing(const uv_handle_t* handle) {
307 return uv__is_closing(handle);
308}
309
310
311int uv_backend_fd(const uv_loop_t* loop) {
312 return loop->backend_fd;
313}
314
315
316int uv_backend_timeout(const uv_loop_t* loop) {
317 if (loop->stop_flag != 0)
318 return 0;
319
320 if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
321 return 0;
322
323 if (!QUEUE_EMPTY(&loop->idle_handles))
324 return 0;
325
326 if (!QUEUE_EMPTY(&loop->pending_queue))
327 return 0;
328
329 if (loop->closing_handles)
330 return 0;
331
332 return uv__next_timeout(loop);
333}
334
335
336static int uv__loop_alive(const uv_loop_t* loop) {
337 return uv__has_active_handles(loop) ||
338 uv__has_active_reqs(loop) ||
339 loop->closing_handles != NULL;
340}
341
342
343int uv_loop_alive(const uv_loop_t* loop) {
344 return uv__loop_alive(loop);
345}
346
347
348int uv_run(uv_loop_t* loop, uv_run_mode mode) {
349 int timeout;
350 int r;
351 int ran_pending;
352
353 r = uv__loop_alive(loop);
354 if (!r)
355 uv__update_time(loop);
356
357 while (r != 0 && loop->stop_flag == 0) {
358 uv__update_time(loop);
359 uv__run_timers(loop);
360 ran_pending = uv__run_pending(loop);
361 uv__run_idle(loop);
362 uv__run_prepare(loop);
363
364 timeout = 0;
365 if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
366 timeout = uv_backend_timeout(loop);
367
368 uv__io_poll(loop, timeout);
369 uv__run_check(loop);
370 uv__run_closing_handles(loop);
371
372 if (mode == UV_RUN_ONCE) {
373 /* UV_RUN_ONCE implies forward progress: at least one callback must have
374 * been invoked when it returns. uv__io_poll() can return without doing
375 * I/O (meaning: no callbacks) when its timeout expires - which means we
376 * have pending timers that satisfy the forward progress constraint.
377 *
378 * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
379 * the check.
380 */
381 uv__update_time(loop);
382 uv__run_timers(loop);
383 }
384
385 r = uv__loop_alive(loop);
386 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
387 break;
388 }
389
390 /* The if statement lets gcc compile it to a conditional store. Avoids
391 * dirtying a cache line.
392 */
393 if (loop->stop_flag != 0)
394 loop->stop_flag = 0;
395
396 return r;
397}
398
399
400void uv_update_time(uv_loop_t* loop) {
401 uv__update_time(loop);
402}
403
404
405int uv_is_active(const uv_handle_t* handle) {
406 return uv__is_active(handle);
407}
408
409
410/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
411int uv__socket(int domain, int type, int protocol) {
412 int sockfd;
413 int err;
414
415#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
416 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
417 if (sockfd != -1)
418 return sockfd;
419
420 if (errno != EINVAL)
421 return UV__ERR(errno);
422#endif
423
424 sockfd = socket(domain, type, protocol);
425 if (sockfd == -1)
426 return UV__ERR(errno);
427
428 err = uv__nonblock(sockfd, 1);
429 if (err == 0)
430 err = uv__cloexec(sockfd, 1);
431
432 if (err) {
433 uv__close(sockfd);
434 return err;
435 }
436
437#if defined(SO_NOSIGPIPE)
438 {
439 int on = 1;
440 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
441 }
442#endif
443
444 return sockfd;
445}
446
447/* get a file pointer to a file in read-only and close-on-exec mode */
448FILE* uv__open_file(const char* path) {
449 int fd;
450 FILE* fp;
451
452 fd = uv__open_cloexec(path, O_RDONLY);
453 if (fd < 0)
454 return NULL;
455
456 fp = fdopen(fd, "r");
457 if (fp == NULL)
458 uv__close(fd);
459
460 return fp;
461}
462
463
464int uv__accept(int sockfd) {
465 int peerfd;
466 int err;
467
468 assert(sockfd >= 0);
469
470 while (1) {
471#if defined(__linux__) || \
472 (defined(__FreeBSD__) && __FreeBSD__ >= 10) || \
473 defined(__NetBSD__)
474 static int no_accept4;
475
476 if (no_accept4)
477 goto skip;
478
479 peerfd = uv__accept4(sockfd,
480 NULL,
481 NULL,
482 UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
483 if (peerfd != -1)
484 return peerfd;
485
486 if (errno == EINTR)
487 continue;
488
489 if (errno != ENOSYS)
490 return UV__ERR(errno);
491
492 no_accept4 = 1;
493skip:
494#endif
495
496 peerfd = accept(sockfd, NULL, NULL);
497 if (peerfd == -1) {
498 if (errno == EINTR)
499 continue;
500 return UV__ERR(errno);
501 }
502
503 err = uv__cloexec(peerfd, 1);
504 if (err == 0)
505 err = uv__nonblock(peerfd, 1);
506
507 if (err) {
508 uv__close(peerfd);
509 return err;
510 }
511
512 return peerfd;
513 }
514}
515
516
517/* close() on macos has the "interesting" quirk that it fails with EINTR
518 * without closing the file descriptor when a thread is in the cancel state.
519 * That's why libuv calls close$NOCANCEL() instead.
520 *
521 * glibc on linux has a similar issue: close() is a cancellation point and
522 * will unwind the thread when it's in the cancel state. Work around that
523 * by making the system call directly. Musl libc is unaffected.
524 */
525int uv__close_nocancel(int fd) {
526#if defined(__APPLE__)
527#pragma GCC diagnostic push
528#pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
529#if defined(__LP64__)
530 extern int close$NOCANCEL(int);
531 return close$NOCANCEL(fd);
532#else
533 extern int close$NOCANCEL$UNIX2003(int);
534 return close$NOCANCEL$UNIX2003(fd);
535#endif
536#pragma GCC diagnostic pop
537#elif defined(__linux__)
538 return syscall(SYS_close, fd);
539#else
540 return close(fd);
541#endif
542}
543
544
545int uv__close_nocheckstdio(int fd) {
546 int saved_errno;
547 int rc;
548
549 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
550
551 saved_errno = errno;
552 rc = uv__close_nocancel(fd);
553 if (rc == -1) {
554 rc = UV__ERR(errno);
555 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
556 rc = 0; /* The close is in progress, not an error. */
557 errno = saved_errno;
558 }
559
560 return rc;
561}
562
563
564int uv__close(int fd) {
565 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
566#if defined(__MVS__)
567 SAVE_ERRNO(epoll_file_close(fd));
568#endif
569 return uv__close_nocheckstdio(fd);
570}
571
572
573int uv__nonblock_ioctl(int fd, int set) {
574 int r;
575
576 do
577 r = ioctl(fd, FIONBIO, &set);
578 while (r == -1 && errno == EINTR);
579
580 if (r)
581 return UV__ERR(errno);
582
583 return 0;
584}
585
586
587#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__HAIKU__)
588int uv__cloexec_ioctl(int fd, int set) {
589 int r;
590
591 do
592 r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
593 while (r == -1 && errno == EINTR);
594
595 if (r)
596 return UV__ERR(errno);
597
598 return 0;
599}
600#endif
601
602
603int uv__nonblock_fcntl(int fd, int set) {
604 int flags;
605 int r;
606
607 do
608 r = fcntl(fd, F_GETFL);
609 while (r == -1 && errno == EINTR);
610
611 if (r == -1)
612 return UV__ERR(errno);
613
614 /* Bail out now if already set/clear. */
615 if (!!(r & O_NONBLOCK) == !!set)
616 return 0;
617
618 if (set)
619 flags = r | O_NONBLOCK;
620 else
621 flags = r & ~O_NONBLOCK;
622
623 do
624 r = fcntl(fd, F_SETFL, flags);
625 while (r == -1 && errno == EINTR);
626
627 if (r)
628 return UV__ERR(errno);
629
630 return 0;
631}
632
633
634int uv__cloexec_fcntl(int fd, int set) {
635 int flags;
636 int r;
637
638 do
639 r = fcntl(fd, F_GETFD);
640 while (r == -1 && errno == EINTR);
641
642 if (r == -1)
643 return UV__ERR(errno);
644
645 /* Bail out now if already set/clear. */
646 if (!!(r & FD_CLOEXEC) == !!set)
647 return 0;
648
649 if (set)
650 flags = r | FD_CLOEXEC;
651 else
652 flags = r & ~FD_CLOEXEC;
653
654 do
655 r = fcntl(fd, F_SETFD, flags);
656 while (r == -1 && errno == EINTR);
657
658 if (r)
659 return UV__ERR(errno);
660
661 return 0;
662}
663
664
665ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
666 struct cmsghdr* cmsg;
667 ssize_t rc;
668 int* pfd;
669 int* end;
670#if defined(__linux__)
671 static int no_msg_cmsg_cloexec;
672 if (no_msg_cmsg_cloexec == 0) {
673 rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
674 if (rc != -1)
675 return rc;
676 if (errno != EINVAL)
677 return UV__ERR(errno);
678 rc = recvmsg(fd, msg, flags);
679 if (rc == -1)
680 return UV__ERR(errno);
681 no_msg_cmsg_cloexec = 1;
682 } else {
683 rc = recvmsg(fd, msg, flags);
684 }
685#else
686 rc = recvmsg(fd, msg, flags);
687#endif
688 if (rc == -1)
689 return UV__ERR(errno);
690 if (msg->msg_controllen == 0)
691 return rc;
692 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
693 if (cmsg->cmsg_type == SCM_RIGHTS)
694 for (pfd = (int*) CMSG_DATA(cmsg),
695 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
696 pfd < end;
697 pfd += 1)
698 uv__cloexec(*pfd, 1);
699 return rc;
700}
701
702
703int uv_cwd(char* buffer, size_t* size) {
704 char scratch[1 + UV__PATH_MAX];
705
706 if (buffer == NULL || size == NULL)
707 return UV_EINVAL;
708
709 /* Try to read directly into the user's buffer first... */
710 if (getcwd(buffer, *size) != NULL)
711 goto fixup;
712
713 if (errno != ERANGE)
714 return UV__ERR(errno);
715
716 /* ...or into scratch space if the user's buffer is too small
717 * so we can report how much space to provide on the next try.
718 */
719 if (getcwd(scratch, sizeof(scratch)) == NULL)
720 return UV__ERR(errno);
721
722 buffer = scratch;
723
724fixup:
725
726 *size = strlen(buffer);
727
728 if (*size > 1 && buffer[*size - 1] == '/') {
729 *size -= 1;
730 buffer[*size] = '\0';
731 }
732
733 if (buffer == scratch) {
734 *size += 1;
735 return UV_ENOBUFS;
736 }
737
738 return 0;
739}
740
741
742int uv_chdir(const char* dir) {
743 if (chdir(dir))
744 return UV__ERR(errno);
745
746 return 0;
747}
748
749
750void uv_disable_stdio_inheritance(void) {
751 int fd;
752
753 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
754 * first 16 file descriptors. After that, bail out after the first error.
755 */
756 for (fd = 0; ; fd++)
757 if (uv__cloexec(fd, 1) && fd > 15)
758 break;
759}
760
761
762int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
763 int fd_out;
764
765 switch (handle->type) {
766 case UV_TCP:
767 case UV_NAMED_PIPE:
768 case UV_TTY:
769 fd_out = uv__stream_fd((uv_stream_t*) handle);
770 break;
771
772 case UV_UDP:
773 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
774 break;
775
776 case UV_POLL:
777 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
778 break;
779
780 default:
781 return UV_EINVAL;
782 }
783
784 if (uv__is_closing(handle) || fd_out == -1)
785 return UV_EBADF;
786
787 *fd = fd_out;
788 return 0;
789}
790
791
792static int uv__run_pending(uv_loop_t* loop) {
793 QUEUE* q;
794 QUEUE pq;
795 uv__io_t* w;
796
797 if (QUEUE_EMPTY(&loop->pending_queue))
798 return 0;
799
800 QUEUE_MOVE(&loop->pending_queue, &pq);
801
802 while (!QUEUE_EMPTY(&pq)) {
803 q = QUEUE_HEAD(&pq);
804 QUEUE_REMOVE(q);
805 QUEUE_INIT(q);
806 w = QUEUE_DATA(q, uv__io_t, pending_queue);
807 w->cb(loop, w, POLLOUT);
808 }
809
810 return 1;
811}
812
813
814static unsigned int next_power_of_two(unsigned int val) {
815 val -= 1;
816 val |= val >> 1;
817 val |= val >> 2;
818 val |= val >> 4;
819 val |= val >> 8;
820 val |= val >> 16;
821 val += 1;
822 return val;
823}
824
825static void maybe_resize(uv_loop_t* loop, unsigned int len) {
826 uv__io_t** watchers;
827 void* fake_watcher_list;
828 void* fake_watcher_count;
829 unsigned int nwatchers;
830 unsigned int i;
831
832 if (len <= loop->nwatchers)
833 return;
834
835 /* Preserve fake watcher list and count at the end of the watchers */
836 if (loop->watchers != NULL) {
837 fake_watcher_list = loop->watchers[loop->nwatchers];
838 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
839 } else {
840 fake_watcher_list = NULL;
841 fake_watcher_count = NULL;
842 }
843
844 nwatchers = next_power_of_two(len + 2) - 2;
845 watchers = uv__realloc(loop->watchers,
846 (nwatchers + 2) * sizeof(loop->watchers[0]));
847
848 if (watchers == NULL)
849 abort();
850 for (i = loop->nwatchers; i < nwatchers; i++)
851 watchers[i] = NULL;
852 watchers[nwatchers] = fake_watcher_list;
853 watchers[nwatchers + 1] = fake_watcher_count;
854
855 loop->watchers = watchers;
856 loop->nwatchers = nwatchers;
857}
858
859
860void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
861 assert(cb != NULL);
862 assert(fd >= -1);
863 QUEUE_INIT(&w->pending_queue);
864 QUEUE_INIT(&w->watcher_queue);
865 w->cb = cb;
866 w->fd = fd;
867 w->events = 0;
868 w->pevents = 0;
869
870#if defined(UV_HAVE_KQUEUE)
871 w->rcount = 0;
872 w->wcount = 0;
873#endif /* defined(UV_HAVE_KQUEUE) */
874}
875
876
877void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
878 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
879 assert(0 != events);
880 assert(w->fd >= 0);
881 assert(w->fd < INT_MAX);
882
883 w->pevents |= events;
884 maybe_resize(loop, w->fd + 1);
885
886#if !defined(__sun)
887 /* The event ports backend needs to rearm all file descriptors on each and
888 * every tick of the event loop but the other backends allow us to
889 * short-circuit here if the event mask is unchanged.
890 */
891 if (w->events == w->pevents)
892 return;
893#endif
894
895 if (QUEUE_EMPTY(&w->watcher_queue))
896 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
897
898 if (loop->watchers[w->fd] == NULL) {
899 loop->watchers[w->fd] = w;
900 loop->nfds++;
901 }
902}
903
904
905void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
906 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
907 assert(0 != events);
908
909 if (w->fd == -1)
910 return;
911
912 assert(w->fd >= 0);
913
914 /* Happens when uv__io_stop() is called on a handle that was never started. */
915 if ((unsigned) w->fd >= loop->nwatchers)
916 return;
917
918 w->pevents &= ~events;
919
920 if (w->pevents == 0) {
921 QUEUE_REMOVE(&w->watcher_queue);
922 QUEUE_INIT(&w->watcher_queue);
923
924 if (loop->watchers[w->fd] != NULL) {
925 assert(loop->watchers[w->fd] == w);
926 assert(loop->nfds > 0);
927 loop->watchers[w->fd] = NULL;
928 loop->nfds--;
929 w->events = 0;
930 }
931 }
932 else if (QUEUE_EMPTY(&w->watcher_queue))
933 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
934}
935
936
937void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
938 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
939 QUEUE_REMOVE(&w->pending_queue);
940
941 /* Remove stale events for this file descriptor */
942 if (w->fd != -1)
943 uv__platform_invalidate_fd(loop, w->fd);
944}
945
946
947void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
948 if (QUEUE_EMPTY(&w->pending_queue))
949 QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
950}
951
952
953int uv__io_active(const uv__io_t* w, unsigned int events) {
954 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
955 assert(0 != events);
956 return 0 != (w->pevents & events);
957}
958
959
960int uv__fd_exists(uv_loop_t* loop, int fd) {
961 return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
962}
963
964
965int uv_getrusage(uv_rusage_t* rusage) {
966 struct rusage usage;
967
968 if (getrusage(RUSAGE_SELF, &usage))
969 return UV__ERR(errno);
970
971 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
972 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
973
974 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
975 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
976
977#if !defined(__MVS__) && !defined(__HAIKU__)
978 rusage->ru_maxrss = usage.ru_maxrss;
979 rusage->ru_ixrss = usage.ru_ixrss;
980 rusage->ru_idrss = usage.ru_idrss;
981 rusage->ru_isrss = usage.ru_isrss;
982 rusage->ru_minflt = usage.ru_minflt;
983 rusage->ru_majflt = usage.ru_majflt;
984 rusage->ru_nswap = usage.ru_nswap;
985 rusage->ru_inblock = usage.ru_inblock;
986 rusage->ru_oublock = usage.ru_oublock;
987 rusage->ru_msgsnd = usage.ru_msgsnd;
988 rusage->ru_msgrcv = usage.ru_msgrcv;
989 rusage->ru_nsignals = usage.ru_nsignals;
990 rusage->ru_nvcsw = usage.ru_nvcsw;
991 rusage->ru_nivcsw = usage.ru_nivcsw;
992#endif
993
994 return 0;
995}
996
997
998int uv__open_cloexec(const char* path, int flags) {
999 int err;
1000 int fd;
1001
1002#if defined(UV__O_CLOEXEC)
1003 static int no_cloexec;
1004
1005 if (!no_cloexec) {
1006 fd = open(path, flags | UV__O_CLOEXEC);
1007 if (fd != -1)
1008 return fd;
1009
1010 if (errno != EINVAL)
1011 return UV__ERR(errno);
1012
1013 /* O_CLOEXEC not supported. */
1014 no_cloexec = 1;
1015 }
1016#endif
1017
1018 fd = open(path, flags);
1019 if (fd == -1)
1020 return UV__ERR(errno);
1021
1022 err = uv__cloexec(fd, 1);
1023 if (err) {
1024 uv__close(fd);
1025 return err;
1026 }
1027
1028 return fd;
1029}
1030
1031
1032int uv__dup2_cloexec(int oldfd, int newfd) {
1033 int r;
1034#if (defined(__FreeBSD__) && __FreeBSD__ >= 10) || defined(__NetBSD__)
1035 r = dup3(oldfd, newfd, O_CLOEXEC);
1036 if (r == -1)
1037 return UV__ERR(errno);
1038 return r;
1039#elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC)
1040 r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd);
1041 if (r != -1)
1042 return r;
1043 if (errno != EINVAL)
1044 return UV__ERR(errno);
1045 /* Fall through. */
1046#elif defined(__linux__)
1047 static int no_dup3;
1048 if (!no_dup3) {
1049 do
1050 r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC);
1051 while (r == -1 && errno == EBUSY);
1052 if (r != -1)
1053 return r;
1054 if (errno != ENOSYS)
1055 return UV__ERR(errno);
1056 /* Fall through. */
1057 no_dup3 = 1;
1058 }
1059#endif
1060 {
1061 int err;
1062 do
1063 r = dup2(oldfd, newfd);
1064#if defined(__linux__)
1065 while (r == -1 && errno == EBUSY);
1066#else
1067 while (0); /* Never retry. */
1068#endif
1069
1070 if (r == -1)
1071 return UV__ERR(errno);
1072
1073 err = uv__cloexec(newfd, 1);
1074 if (err) {
1075 uv__close(newfd);
1076 return err;
1077 }
1078
1079 return r;
1080 }
1081}
1082
1083
1084int uv_os_homedir(char* buffer, size_t* size) {
1085 uv_passwd_t pwd;
1086 size_t len;
1087 int r;
1088
1089 /* Check if the HOME environment variable is set first. The task of
1090 performing input validation on buffer and size is taken care of by
1091 uv_os_getenv(). */
1092 r = uv_os_getenv("HOME", buffer, size);
1093
1094 if (r != UV_ENOENT)
1095 return r;
1096
1097 /* HOME is not set, so call uv__getpwuid_r() */
1098 r = uv__getpwuid_r(&pwd);
1099
1100 if (r != 0) {
1101 return r;
1102 }
1103
1104 len = strlen(pwd.homedir);
1105
1106 if (len >= *size) {
1107 *size = len + 1;
1108 uv_os_free_passwd(&pwd);
1109 return UV_ENOBUFS;
1110 }
1111
1112 memcpy(buffer, pwd.homedir, len + 1);
1113 *size = len;
1114 uv_os_free_passwd(&pwd);
1115
1116 return 0;
1117}
1118
1119
1120int uv_os_tmpdir(char* buffer, size_t* size) {
1121 const char* buf;
1122 size_t len;
1123
1124 if (buffer == NULL || size == NULL || *size == 0)
1125 return UV_EINVAL;
1126
1127#define CHECK_ENV_VAR(name) \
1128 do { \
1129 buf = getenv(name); \
1130 if (buf != NULL) \
1131 goto return_buffer; \
1132 } \
1133 while (0)
1134
1135 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1136 CHECK_ENV_VAR("TMPDIR");
1137 CHECK_ENV_VAR("TMP");
1138 CHECK_ENV_VAR("TEMP");
1139 CHECK_ENV_VAR("TEMPDIR");
1140
1141#undef CHECK_ENV_VAR
1142
1143 /* No temp environment variables defined */
1144 #if defined(__ANDROID__)
1145 buf = "/data/local/tmp";
1146 #else
1147 buf = "/tmp";
1148 #endif
1149
1150return_buffer:
1151 len = strlen(buf);
1152
1153 if (len >= *size) {
1154 *size = len + 1;
1155 return UV_ENOBUFS;
1156 }
1157
1158 /* The returned directory should not have a trailing slash. */
1159 if (len > 1 && buf[len - 1] == '/') {
1160 len--;
1161 }
1162
1163 memcpy(buffer, buf, len + 1);
1164 buffer[len] = '\0';
1165 *size = len;
1166
1167 return 0;
1168}
1169
1170
1171int uv__getpwuid_r(uv_passwd_t* pwd) {
1172 struct passwd pw;
1173 struct passwd* result;
1174 char* buf;
1175 uid_t uid;
1176 size_t bufsize;
1177 size_t name_size;
1178 size_t homedir_size;
1179 size_t shell_size;
1180 long initsize;
1181 int r;
1182#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
1183 int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
1184
1185 getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
1186 if (getpwuid_r == NULL)
1187 return UV_ENOSYS;
1188#endif
1189
1190 if (pwd == NULL)
1191 return UV_EINVAL;
1192
1193 initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
1194
1195 if (initsize <= 0)
1196 bufsize = 4096;
1197 else
1198 bufsize = (size_t) initsize;
1199
1200 uid = geteuid();
1201 buf = NULL;
1202
1203 for (;;) {
1204 uv__free(buf);
1205 buf = uv__malloc(bufsize);
1206
1207 if (buf == NULL)
1208 return UV_ENOMEM;
1209
1210 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1211
1212 if (r != ERANGE)
1213 break;
1214
1215 bufsize *= 2;
1216 }
1217
1218 if (r != 0) {
1219 uv__free(buf);
1220 return -r;
1221 }
1222
1223 if (result == NULL) {
1224 uv__free(buf);
1225 return UV_ENOENT;
1226 }
1227
1228 /* Allocate memory for the username, shell, and home directory */
1229 name_size = strlen(pw.pw_name) + 1;
1230 homedir_size = strlen(pw.pw_dir) + 1;
1231 shell_size = strlen(pw.pw_shell) + 1;
1232 pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1233
1234 if (pwd->username == NULL) {
1235 uv__free(buf);
1236 return UV_ENOMEM;
1237 }
1238
1239 /* Copy the username */
1240 memcpy(pwd->username, pw.pw_name, name_size);
1241
1242 /* Copy the home directory */
1243 pwd->homedir = pwd->username + name_size;
1244 memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1245
1246 /* Copy the shell */
1247 pwd->shell = pwd->homedir + homedir_size;
1248 memcpy(pwd->shell, pw.pw_shell, shell_size);
1249
1250 /* Copy the uid and gid */
1251 pwd->uid = pw.pw_uid;
1252 pwd->gid = pw.pw_gid;
1253
1254 uv__free(buf);
1255
1256 return 0;
1257}
1258
1259
1260void uv_os_free_passwd(uv_passwd_t* pwd) {
1261 if (pwd == NULL)
1262 return;
1263
1264 /*
1265 The memory for name, shell, and homedir are allocated in a single
1266 uv__malloc() call. The base of the pointer is stored in pwd->username, so
1267 that is the field that needs to be freed.
1268 */
1269 uv__free(pwd->username);
1270 pwd->username = NULL;
1271 pwd->shell = NULL;
1272 pwd->homedir = NULL;
1273}
1274
1275
1276int uv_os_get_passwd(uv_passwd_t* pwd) {
1277 return uv__getpwuid_r(pwd);
1278}
1279
1280
1281int uv_translate_sys_error(int sys_errno) {
1282 /* If < 0 then it's already a libuv error. */
1283 return sys_errno <= 0 ? sys_errno : -sys_errno;
1284}
1285
1286
1287int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1288 char* var;
1289 size_t len;
1290
1291 if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1292 return UV_EINVAL;
1293
1294 var = getenv(name);
1295
1296 if (var == NULL)
1297 return UV_ENOENT;
1298
1299 len = strlen(var);
1300
1301 if (len >= *size) {
1302 *size = len + 1;
1303 return UV_ENOBUFS;
1304 }
1305
1306 memcpy(buffer, var, len + 1);
1307 *size = len;
1308
1309 return 0;
1310}
1311
1312
1313int uv_os_setenv(const char* name, const char* value) {
1314 if (name == NULL || value == NULL)
1315 return UV_EINVAL;
1316
1317 if (setenv(name, value, 1) != 0)
1318 return UV__ERR(errno);
1319
1320 return 0;
1321}
1322
1323
1324int uv_os_unsetenv(const char* name) {
1325 if (name == NULL)
1326 return UV_EINVAL;
1327
1328 if (unsetenv(name) != 0)
1329 return UV__ERR(errno);
1330
1331 return 0;
1332}
1333
1334
1335int uv_os_gethostname(char* buffer, size_t* size) {
1336 /*
1337 On some platforms, if the input buffer is not large enough, gethostname()
1338 succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1339 instead by creating a large enough buffer and comparing the hostname length
1340 to the size input.
1341 */
1342 char buf[UV_MAXHOSTNAMESIZE];
1343 size_t len;
1344
1345 if (buffer == NULL || size == NULL || *size == 0)
1346 return UV_EINVAL;
1347
1348 if (gethostname(buf, sizeof(buf)) != 0)
1349 return UV__ERR(errno);
1350
1351 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1352 len = strlen(buf);
1353
1354 if (len >= *size) {
1355 *size = len + 1;
1356 return UV_ENOBUFS;
1357 }
1358
1359 memcpy(buffer, buf, len + 1);
1360 *size = len;
1361 return 0;
1362}
1363
1364
1365uv_os_fd_t uv_get_osfhandle(int fd) {
1366 return fd;
1367}
1368
1369int uv_open_osfhandle(uv_os_fd_t os_fd) {
1370 return os_fd;
1371}
1372
1373uv_pid_t uv_os_getpid(void) {
1374 return getpid();
1375}
1376
1377
1378uv_pid_t uv_os_getppid(void) {
1379 return getppid();
1380}
1381
1382
1383int uv_os_getpriority(uv_pid_t pid, int* priority) {
1384 int r;
1385
1386 if (priority == NULL)
1387 return UV_EINVAL;
1388
1389 errno = 0;
1390 r = getpriority(PRIO_PROCESS, (int) pid);
1391
1392 if (r == -1 && errno != 0)
1393 return UV__ERR(errno);
1394
1395 *priority = r;
1396 return 0;
1397}
1398
1399
1400int uv_os_setpriority(uv_pid_t pid, int priority) {
1401 if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1402 return UV_EINVAL;
1403
1404 if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1405 return UV__ERR(errno);
1406
1407 return 0;
1408}
1409
1410
1411int uv_os_uname(uv_utsname_t* buffer) {
1412 struct utsname buf;
1413 int r;
1414
1415 if (buffer == NULL)
1416 return UV_EINVAL;
1417
1418 if (uname(&buf) == -1) {
1419 r = UV__ERR(errno);
1420 goto error;
1421 }
1422
1423 r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1424 if (r == UV_E2BIG)
1425 goto error;
1426
1427#ifdef _AIX
1428 r = snprintf(buffer->release,
1429 sizeof(buffer->release),
1430 "%s.%s",
1431 buf.version,
1432 buf.release);
1433 if (r >= sizeof(buffer->release)) {
1434 r = UV_E2BIG;
1435 goto error;
1436 }
1437#else
1438 r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1439 if (r == UV_E2BIG)
1440 goto error;
1441#endif
1442
1443 r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1444 if (r == UV_E2BIG)
1445 goto error;
1446
1447#if defined(_AIX) || defined(__PASE__)
1448 r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1449#else
1450 r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1451#endif
1452
1453 if (r == UV_E2BIG)
1454 goto error;
1455
1456 return 0;
1457
1458error:
1459 buffer->sysname[0] = '\0';
1460 buffer->release[0] = '\0';
1461 buffer->version[0] = '\0';
1462 buffer->machine[0] = '\0';
1463 return r;
1464}
1465
1466int uv__getsockpeername(const uv_handle_t* handle,
1467 uv__peersockfunc func,
1468 struct sockaddr* name,
1469 int* namelen) {
1470 socklen_t socklen;
1471 uv_os_fd_t fd;
1472 int r;
1473
1474 r = uv_fileno(handle, &fd);
1475 if (r < 0)
1476 return r;
1477
1478 /* sizeof(socklen_t) != sizeof(int) on some systems. */
1479 socklen = (socklen_t) *namelen;
1480
1481 if (func(fd, name, &socklen))
1482 return UV__ERR(errno);
1483
1484 *namelen = (int) socklen;
1485 return 0;
1486}
1487
1488int uv_gettimeofday(uv_timeval64_t* tv) {
1489 struct timeval time;
1490
1491 if (tv == NULL)
1492 return UV_EINVAL;
1493
1494 if (gettimeofday(&time, NULL) != 0)
1495 return UV__ERR(errno);
1496
1497 tv->tv_sec = (int64_t) time.tv_sec;
1498 tv->tv_usec = (int32_t) time.tv_usec;
1499 return 0;
1500}
1501