1 | /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. |
2 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
3 | * of this software and associated documentation files (the "Software"), to |
4 | * deal in the Software without restriction, including without limitation the |
5 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
6 | * sell copies of the Software, and to permit persons to whom the Software is |
7 | * furnished to do so, subject to the following conditions: |
8 | * |
9 | * The above copyright notice and this permission notice shall be included in |
10 | * all copies or substantial portions of the Software. |
11 | * |
12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
15 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
16 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
17 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
18 | * IN THE SOFTWARE. |
19 | */ |
20 | |
21 | #include "uv.h" |
22 | #include "internal.h" |
23 | |
24 | #include <stddef.h> /* NULL */ |
25 | #include <stdio.h> /* printf */ |
26 | #include <stdlib.h> |
27 | #include <string.h> /* strerror */ |
28 | #include <errno.h> |
29 | #include <assert.h> |
30 | #include <unistd.h> |
31 | #include <sys/types.h> |
32 | #include <sys/stat.h> |
33 | #include <fcntl.h> /* O_CLOEXEC */ |
34 | #include <sys/ioctl.h> |
35 | #include <sys/socket.h> |
36 | #include <sys/un.h> |
37 | #include <netinet/in.h> |
38 | #include <arpa/inet.h> |
39 | #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */ |
40 | #include <sys/uio.h> /* writev */ |
41 | #include <sys/resource.h> /* getrusage */ |
42 | #include <pwd.h> |
43 | #include <sys/utsname.h> |
44 | #include <sys/time.h> |
45 | |
46 | #ifdef __sun |
47 | # include <sys/filio.h> |
48 | # include <sys/types.h> |
49 | # include <sys/wait.h> |
50 | #endif |
51 | |
52 | #if defined(__APPLE__) |
53 | # include <sys/filio.h> |
54 | # endif /* defined(__APPLE__) */ |
55 | |
56 | |
57 | #if defined(__APPLE__) && !TARGET_OS_IPHONE |
58 | # include <crt_externs.h> |
59 | # include <mach-o/dyld.h> /* _NSGetExecutablePath */ |
60 | # define environ (*_NSGetEnviron()) |
61 | #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */ |
62 | extern char** environ; |
63 | #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */ |
64 | |
65 | |
66 | #if defined(__DragonFly__) || \ |
67 | defined(__FreeBSD__) || \ |
68 | defined(__FreeBSD_kernel__) || \ |
69 | defined(__NetBSD__) || \ |
70 | defined(__OpenBSD__) |
71 | # include <sys/sysctl.h> |
72 | # include <sys/filio.h> |
73 | # include <sys/wait.h> |
74 | # if defined(__FreeBSD__) |
75 | # define uv__accept4 accept4 |
76 | # endif |
77 | # if defined(__NetBSD__) |
78 | # define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d)) |
79 | # endif |
80 | #endif |
81 | |
82 | #if defined(__MVS__) |
83 | #include <sys/ioctl.h> |
84 | #endif |
85 | |
86 | #if defined(__linux__) |
87 | # include <sys/syscall.h> |
88 | # define uv__accept4 accept4 |
89 | #endif |
90 | |
91 | static int uv__run_pending(uv_loop_t* loop); |
92 | |
93 | /* Verify that uv_buf_t is ABI-compatible with struct iovec. */ |
94 | STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec)); |
95 | STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) == |
96 | sizeof(((struct iovec*) 0)->iov_base)); |
97 | STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) == |
98 | sizeof(((struct iovec*) 0)->iov_len)); |
99 | STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base)); |
100 | STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len)); |
101 | |
102 | |
103 | uint64_t uv_hrtime(void) { |
104 | return uv__hrtime(UV_CLOCK_PRECISE); |
105 | } |
106 | |
107 | |
108 | void uv_close(uv_handle_t* handle, uv_close_cb close_cb) { |
109 | assert(!uv__is_closing(handle)); |
110 | |
111 | handle->flags |= UV_HANDLE_CLOSING; |
112 | handle->close_cb = close_cb; |
113 | |
114 | switch (handle->type) { |
115 | case UV_NAMED_PIPE: |
116 | uv__pipe_close((uv_pipe_t*)handle); |
117 | break; |
118 | |
119 | case UV_TTY: |
120 | uv__stream_close((uv_stream_t*)handle); |
121 | break; |
122 | |
123 | case UV_TCP: |
124 | uv__tcp_close((uv_tcp_t*)handle); |
125 | break; |
126 | |
127 | case UV_UDP: |
128 | uv__udp_close((uv_udp_t*)handle); |
129 | break; |
130 | |
131 | case UV_PREPARE: |
132 | uv__prepare_close((uv_prepare_t*)handle); |
133 | break; |
134 | |
135 | case UV_CHECK: |
136 | uv__check_close((uv_check_t*)handle); |
137 | break; |
138 | |
139 | case UV_IDLE: |
140 | uv__idle_close((uv_idle_t*)handle); |
141 | break; |
142 | |
143 | case UV_ASYNC: |
144 | uv__async_close((uv_async_t*)handle); |
145 | break; |
146 | |
147 | case UV_TIMER: |
148 | uv__timer_close((uv_timer_t*)handle); |
149 | break; |
150 | |
151 | case UV_PROCESS: |
152 | uv__process_close((uv_process_t*)handle); |
153 | break; |
154 | |
155 | case UV_FS_EVENT: |
156 | uv__fs_event_close((uv_fs_event_t*)handle); |
157 | break; |
158 | |
159 | case UV_POLL: |
160 | uv__poll_close((uv_poll_t*)handle); |
161 | break; |
162 | |
163 | case UV_FS_POLL: |
164 | uv__fs_poll_close((uv_fs_poll_t*)handle); |
165 | /* Poll handles use file system requests, and one of them may still be |
166 | * running. The poll code will call uv__make_close_pending() for us. */ |
167 | return; |
168 | |
169 | case UV_SIGNAL: |
170 | uv__signal_close((uv_signal_t*) handle); |
171 | break; |
172 | |
173 | default: |
174 | assert(0); |
175 | } |
176 | |
177 | uv__make_close_pending(handle); |
178 | } |
179 | |
180 | int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) { |
181 | int r; |
182 | int fd; |
183 | socklen_t len; |
184 | |
185 | if (handle == NULL || value == NULL) |
186 | return UV_EINVAL; |
187 | |
188 | if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE) |
189 | fd = uv__stream_fd((uv_stream_t*) handle); |
190 | else if (handle->type == UV_UDP) |
191 | fd = ((uv_udp_t *) handle)->io_watcher.fd; |
192 | else |
193 | return UV_ENOTSUP; |
194 | |
195 | len = sizeof(*value); |
196 | |
197 | if (*value == 0) |
198 | r = getsockopt(fd, SOL_SOCKET, optname, value, &len); |
199 | else |
200 | r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len); |
201 | |
202 | if (r < 0) |
203 | return UV__ERR(errno); |
204 | |
205 | return 0; |
206 | } |
207 | |
208 | void uv__make_close_pending(uv_handle_t* handle) { |
209 | assert(handle->flags & UV_HANDLE_CLOSING); |
210 | assert(!(handle->flags & UV_HANDLE_CLOSED)); |
211 | handle->next_closing = handle->loop->closing_handles; |
212 | handle->loop->closing_handles = handle; |
213 | } |
214 | |
215 | int uv__getiovmax(void) { |
216 | #if defined(IOV_MAX) |
217 | return IOV_MAX; |
218 | #elif defined(_SC_IOV_MAX) |
219 | static int iovmax_cached = -1; |
220 | int iovmax; |
221 | |
222 | iovmax = uv__load_relaxed(&iovmax_cached); |
223 | if (iovmax != -1) |
224 | return iovmax; |
225 | |
226 | /* On some embedded devices (arm-linux-uclibc based ip camera), |
227 | * sysconf(_SC_IOV_MAX) can not get the correct value. The return |
228 | * value is -1 and the errno is EINPROGRESS. Degrade the value to 1. |
229 | */ |
230 | iovmax = sysconf(_SC_IOV_MAX); |
231 | if (iovmax == -1) |
232 | iovmax = 1; |
233 | |
234 | uv__store_relaxed(&iovmax_cached, iovmax); |
235 | |
236 | return iovmax; |
237 | #else |
238 | return 1024; |
239 | #endif |
240 | } |
241 | |
242 | |
243 | static void uv__finish_close(uv_handle_t* handle) { |
244 | uv_signal_t* sh; |
245 | |
246 | /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still |
247 | * possible for it to be active in the sense that uv__is_active() returns |
248 | * true. |
249 | * |
250 | * A good example is when the user calls uv_shutdown(), immediately followed |
251 | * by uv_close(). The handle is considered active at this point because the |
252 | * completion of the shutdown req is still pending. |
253 | */ |
254 | assert(handle->flags & UV_HANDLE_CLOSING); |
255 | assert(!(handle->flags & UV_HANDLE_CLOSED)); |
256 | handle->flags |= UV_HANDLE_CLOSED; |
257 | |
258 | switch (handle->type) { |
259 | case UV_PREPARE: |
260 | case UV_CHECK: |
261 | case UV_IDLE: |
262 | case UV_ASYNC: |
263 | case UV_TIMER: |
264 | case UV_PROCESS: |
265 | case UV_FS_EVENT: |
266 | case UV_FS_POLL: |
267 | case UV_POLL: |
268 | break; |
269 | |
270 | case UV_SIGNAL: |
271 | /* If there are any caught signals "trapped" in the signal pipe, |
272 | * we can't call the close callback yet. Reinserting the handle |
273 | * into the closing queue makes the event loop spin but that's |
274 | * okay because we only need to deliver the pending events. |
275 | */ |
276 | sh = (uv_signal_t*) handle; |
277 | if (sh->caught_signals > sh->dispatched_signals) { |
278 | handle->flags ^= UV_HANDLE_CLOSED; |
279 | uv__make_close_pending(handle); /* Back into the queue. */ |
280 | return; |
281 | } |
282 | break; |
283 | |
284 | case UV_NAMED_PIPE: |
285 | case UV_TCP: |
286 | case UV_TTY: |
287 | uv__stream_destroy((uv_stream_t*)handle); |
288 | break; |
289 | |
290 | case UV_UDP: |
291 | uv__udp_finish_close((uv_udp_t*)handle); |
292 | break; |
293 | |
294 | default: |
295 | assert(0); |
296 | break; |
297 | } |
298 | |
299 | uv__handle_unref(handle); |
300 | QUEUE_REMOVE(&handle->handle_queue); |
301 | |
302 | if (handle->close_cb) { |
303 | handle->close_cb(handle); |
304 | } |
305 | } |
306 | |
307 | |
308 | static void uv__run_closing_handles(uv_loop_t* loop) { |
309 | uv_handle_t* p; |
310 | uv_handle_t* q; |
311 | |
312 | p = loop->closing_handles; |
313 | loop->closing_handles = NULL; |
314 | |
315 | while (p) { |
316 | q = p->next_closing; |
317 | uv__finish_close(p); |
318 | p = q; |
319 | } |
320 | } |
321 | |
322 | |
323 | int uv_is_closing(const uv_handle_t* handle) { |
324 | return uv__is_closing(handle); |
325 | } |
326 | |
327 | |
328 | int uv_backend_fd(const uv_loop_t* loop) { |
329 | return loop->backend_fd; |
330 | } |
331 | |
332 | |
333 | int uv_backend_timeout(const uv_loop_t* loop) { |
334 | if (loop->stop_flag != 0) |
335 | return 0; |
336 | |
337 | if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop)) |
338 | return 0; |
339 | |
340 | if (!QUEUE_EMPTY(&loop->idle_handles)) |
341 | return 0; |
342 | |
343 | if (!QUEUE_EMPTY(&loop->pending_queue)) |
344 | return 0; |
345 | |
346 | if (loop->closing_handles) |
347 | return 0; |
348 | |
349 | return uv__next_timeout(loop); |
350 | } |
351 | |
352 | |
353 | static int uv__loop_alive(const uv_loop_t* loop) { |
354 | return uv__has_active_handles(loop) || |
355 | uv__has_active_reqs(loop) || |
356 | loop->closing_handles != NULL; |
357 | } |
358 | |
359 | |
360 | int uv_loop_alive(const uv_loop_t* loop) { |
361 | return uv__loop_alive(loop); |
362 | } |
363 | |
364 | |
365 | int uv_run(uv_loop_t* loop, uv_run_mode mode) { |
366 | int timeout; |
367 | int r; |
368 | int ran_pending; |
369 | |
370 | r = uv__loop_alive(loop); |
371 | if (!r) |
372 | uv__update_time(loop); |
373 | |
374 | while (r != 0 && loop->stop_flag == 0) { |
375 | uv__update_time(loop); |
376 | uv__run_timers(loop); |
377 | ran_pending = uv__run_pending(loop); |
378 | uv__run_idle(loop); |
379 | uv__run_prepare(loop); |
380 | |
381 | timeout = 0; |
382 | if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT) |
383 | timeout = uv_backend_timeout(loop); |
384 | |
385 | uv__io_poll(loop, timeout); |
386 | |
387 | /* Run one final update on the provider_idle_time in case uv__io_poll |
388 | * returned because the timeout expired, but no events were received. This |
389 | * call will be ignored if the provider_entry_time was either never set (if |
390 | * the timeout == 0) or was already updated b/c an event was received. |
391 | */ |
392 | uv__metrics_update_idle_time(loop); |
393 | |
394 | uv__run_check(loop); |
395 | uv__run_closing_handles(loop); |
396 | |
397 | if (mode == UV_RUN_ONCE) { |
398 | /* UV_RUN_ONCE implies forward progress: at least one callback must have |
399 | * been invoked when it returns. uv__io_poll() can return without doing |
400 | * I/O (meaning: no callbacks) when its timeout expires - which means we |
401 | * have pending timers that satisfy the forward progress constraint. |
402 | * |
403 | * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from |
404 | * the check. |
405 | */ |
406 | uv__update_time(loop); |
407 | uv__run_timers(loop); |
408 | } |
409 | |
410 | r = uv__loop_alive(loop); |
411 | if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) |
412 | break; |
413 | } |
414 | |
415 | /* The if statement lets gcc compile it to a conditional store. Avoids |
416 | * dirtying a cache line. |
417 | */ |
418 | if (loop->stop_flag != 0) |
419 | loop->stop_flag = 0; |
420 | |
421 | return r; |
422 | } |
423 | |
424 | |
425 | void uv_update_time(uv_loop_t* loop) { |
426 | uv__update_time(loop); |
427 | } |
428 | |
429 | |
430 | int uv_is_active(const uv_handle_t* handle) { |
431 | return uv__is_active(handle); |
432 | } |
433 | |
434 | |
435 | /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */ |
436 | int uv__socket(int domain, int type, int protocol) { |
437 | int sockfd; |
438 | int err; |
439 | |
440 | #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) |
441 | sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol); |
442 | if (sockfd != -1) |
443 | return sockfd; |
444 | |
445 | if (errno != EINVAL) |
446 | return UV__ERR(errno); |
447 | #endif |
448 | |
449 | sockfd = socket(domain, type, protocol); |
450 | if (sockfd == -1) |
451 | return UV__ERR(errno); |
452 | |
453 | err = uv__nonblock(sockfd, 1); |
454 | if (err == 0) |
455 | err = uv__cloexec(sockfd, 1); |
456 | |
457 | if (err) { |
458 | uv__close(sockfd); |
459 | return err; |
460 | } |
461 | |
462 | #if defined(SO_NOSIGPIPE) |
463 | { |
464 | int on = 1; |
465 | setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)); |
466 | } |
467 | #endif |
468 | |
469 | return sockfd; |
470 | } |
471 | |
472 | /* get a file pointer to a file in read-only and close-on-exec mode */ |
473 | FILE* uv__open_file(const char* path) { |
474 | int fd; |
475 | FILE* fp; |
476 | |
477 | fd = uv__open_cloexec(path, O_RDONLY); |
478 | if (fd < 0) |
479 | return NULL; |
480 | |
481 | fp = fdopen(fd, "r" ); |
482 | if (fp == NULL) |
483 | uv__close(fd); |
484 | |
485 | return fp; |
486 | } |
487 | |
488 | |
489 | int uv__accept(int sockfd) { |
490 | int peerfd; |
491 | int err; |
492 | |
493 | (void) &err; |
494 | assert(sockfd >= 0); |
495 | |
496 | do |
497 | #ifdef uv__accept4 |
498 | peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC); |
499 | #else |
500 | peerfd = accept(sockfd, NULL, NULL); |
501 | #endif |
502 | while (peerfd == -1 && errno == EINTR); |
503 | |
504 | if (peerfd == -1) |
505 | return UV__ERR(errno); |
506 | |
507 | #ifndef uv__accept4 |
508 | err = uv__cloexec(peerfd, 1); |
509 | if (err == 0) |
510 | err = uv__nonblock(peerfd, 1); |
511 | |
512 | if (err != 0) { |
513 | uv__close(peerfd); |
514 | return err; |
515 | } |
516 | #endif |
517 | |
518 | return peerfd; |
519 | } |
520 | |
521 | |
522 | /* close() on macos has the "interesting" quirk that it fails with EINTR |
523 | * without closing the file descriptor when a thread is in the cancel state. |
524 | * That's why libuv calls close$NOCANCEL() instead. |
525 | * |
526 | * glibc on linux has a similar issue: close() is a cancellation point and |
527 | * will unwind the thread when it's in the cancel state. Work around that |
528 | * by making the system call directly. Musl libc is unaffected. |
529 | */ |
530 | int uv__close_nocancel(int fd) { |
531 | #if defined(__APPLE__) |
532 | #pragma GCC diagnostic push |
533 | #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension" |
534 | #if defined(__LP64__) || TARGET_OS_IPHONE |
535 | extern int close$NOCANCEL(int); |
536 | return close$NOCANCEL(fd); |
537 | #else |
538 | extern int close$NOCANCEL$UNIX2003(int); |
539 | return close$NOCANCEL$UNIX2003(fd); |
540 | #endif |
541 | #pragma GCC diagnostic pop |
542 | #elif defined(__linux__) |
543 | return syscall(SYS_close, fd); |
544 | #else |
545 | return close(fd); |
546 | #endif |
547 | } |
548 | |
549 | |
550 | int uv__close_nocheckstdio(int fd) { |
551 | int saved_errno; |
552 | int rc; |
553 | |
554 | assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */ |
555 | |
556 | saved_errno = errno; |
557 | rc = uv__close_nocancel(fd); |
558 | if (rc == -1) { |
559 | rc = UV__ERR(errno); |
560 | if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS)) |
561 | rc = 0; /* The close is in progress, not an error. */ |
562 | errno = saved_errno; |
563 | } |
564 | |
565 | return rc; |
566 | } |
567 | |
568 | |
569 | int uv__close(int fd) { |
570 | assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */ |
571 | #if defined(__MVS__) |
572 | SAVE_ERRNO(epoll_file_close(fd)); |
573 | #endif |
574 | return uv__close_nocheckstdio(fd); |
575 | } |
576 | |
577 | |
578 | int uv__nonblock_ioctl(int fd, int set) { |
579 | int r; |
580 | |
581 | do |
582 | r = ioctl(fd, FIONBIO, &set); |
583 | while (r == -1 && errno == EINTR); |
584 | |
585 | if (r) |
586 | return UV__ERR(errno); |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | |
592 | #if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__HAIKU__) |
593 | int uv__cloexec_ioctl(int fd, int set) { |
594 | int r; |
595 | |
596 | do |
597 | r = ioctl(fd, set ? FIOCLEX : FIONCLEX); |
598 | while (r == -1 && errno == EINTR); |
599 | |
600 | if (r) |
601 | return UV__ERR(errno); |
602 | |
603 | return 0; |
604 | } |
605 | #endif |
606 | |
607 | |
608 | int uv__nonblock_fcntl(int fd, int set) { |
609 | int flags; |
610 | int r; |
611 | |
612 | do |
613 | r = fcntl(fd, F_GETFL); |
614 | while (r == -1 && errno == EINTR); |
615 | |
616 | if (r == -1) |
617 | return UV__ERR(errno); |
618 | |
619 | /* Bail out now if already set/clear. */ |
620 | if (!!(r & O_NONBLOCK) == !!set) |
621 | return 0; |
622 | |
623 | if (set) |
624 | flags = r | O_NONBLOCK; |
625 | else |
626 | flags = r & ~O_NONBLOCK; |
627 | |
628 | do |
629 | r = fcntl(fd, F_SETFL, flags); |
630 | while (r == -1 && errno == EINTR); |
631 | |
632 | if (r) |
633 | return UV__ERR(errno); |
634 | |
635 | return 0; |
636 | } |
637 | |
638 | |
639 | int uv__cloexec_fcntl(int fd, int set) { |
640 | int flags; |
641 | int r; |
642 | |
643 | do |
644 | r = fcntl(fd, F_GETFD); |
645 | while (r == -1 && errno == EINTR); |
646 | |
647 | if (r == -1) |
648 | return UV__ERR(errno); |
649 | |
650 | /* Bail out now if already set/clear. */ |
651 | if (!!(r & FD_CLOEXEC) == !!set) |
652 | return 0; |
653 | |
654 | if (set) |
655 | flags = r | FD_CLOEXEC; |
656 | else |
657 | flags = r & ~FD_CLOEXEC; |
658 | |
659 | do |
660 | r = fcntl(fd, F_SETFD, flags); |
661 | while (r == -1 && errno == EINTR); |
662 | |
663 | if (r) |
664 | return UV__ERR(errno); |
665 | |
666 | return 0; |
667 | } |
668 | |
669 | |
670 | ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) { |
671 | struct cmsghdr* cmsg; |
672 | ssize_t rc; |
673 | int* pfd; |
674 | int* end; |
675 | #if defined(__linux__) |
676 | static int no_msg_cmsg_cloexec; |
677 | if (0 == uv__load_relaxed(&no_msg_cmsg_cloexec)) { |
678 | rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */ |
679 | if (rc != -1) |
680 | return rc; |
681 | if (errno != EINVAL) |
682 | return UV__ERR(errno); |
683 | rc = recvmsg(fd, msg, flags); |
684 | if (rc == -1) |
685 | return UV__ERR(errno); |
686 | uv__store_relaxed(&no_msg_cmsg_cloexec, 1); |
687 | } else { |
688 | rc = recvmsg(fd, msg, flags); |
689 | } |
690 | #else |
691 | rc = recvmsg(fd, msg, flags); |
692 | #endif |
693 | if (rc == -1) |
694 | return UV__ERR(errno); |
695 | if (msg->msg_controllen == 0) |
696 | return rc; |
697 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) |
698 | if (cmsg->cmsg_type == SCM_RIGHTS) |
699 | for (pfd = (int*) CMSG_DATA(cmsg), |
700 | end = (int*) ((char*) cmsg + cmsg->cmsg_len); |
701 | pfd < end; |
702 | pfd += 1) |
703 | uv__cloexec(*pfd, 1); |
704 | return rc; |
705 | } |
706 | |
707 | |
708 | int uv_cwd(char* buffer, size_t* size) { |
709 | char scratch[1 + UV__PATH_MAX]; |
710 | |
711 | if (buffer == NULL || size == NULL) |
712 | return UV_EINVAL; |
713 | |
714 | /* Try to read directly into the user's buffer first... */ |
715 | if (getcwd(buffer, *size) != NULL) |
716 | goto fixup; |
717 | |
718 | if (errno != ERANGE) |
719 | return UV__ERR(errno); |
720 | |
721 | /* ...or into scratch space if the user's buffer is too small |
722 | * so we can report how much space to provide on the next try. |
723 | */ |
724 | if (getcwd(scratch, sizeof(scratch)) == NULL) |
725 | return UV__ERR(errno); |
726 | |
727 | buffer = scratch; |
728 | |
729 | fixup: |
730 | |
731 | *size = strlen(buffer); |
732 | |
733 | if (*size > 1 && buffer[*size - 1] == '/') { |
734 | *size -= 1; |
735 | buffer[*size] = '\0'; |
736 | } |
737 | |
738 | if (buffer == scratch) { |
739 | *size += 1; |
740 | return UV_ENOBUFS; |
741 | } |
742 | |
743 | return 0; |
744 | } |
745 | |
746 | |
747 | int uv_chdir(const char* dir) { |
748 | if (chdir(dir)) |
749 | return UV__ERR(errno); |
750 | |
751 | return 0; |
752 | } |
753 | |
754 | |
755 | void uv_disable_stdio_inheritance(void) { |
756 | int fd; |
757 | |
758 | /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the |
759 | * first 16 file descriptors. After that, bail out after the first error. |
760 | */ |
761 | for (fd = 0; ; fd++) |
762 | if (uv__cloexec(fd, 1) && fd > 15) |
763 | break; |
764 | } |
765 | |
766 | |
767 | int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { |
768 | int fd_out; |
769 | |
770 | switch (handle->type) { |
771 | case UV_TCP: |
772 | case UV_NAMED_PIPE: |
773 | case UV_TTY: |
774 | fd_out = uv__stream_fd((uv_stream_t*) handle); |
775 | break; |
776 | |
777 | case UV_UDP: |
778 | fd_out = ((uv_udp_t *) handle)->io_watcher.fd; |
779 | break; |
780 | |
781 | case UV_POLL: |
782 | fd_out = ((uv_poll_t *) handle)->io_watcher.fd; |
783 | break; |
784 | |
785 | default: |
786 | return UV_EINVAL; |
787 | } |
788 | |
789 | if (uv__is_closing(handle) || fd_out == -1) |
790 | return UV_EBADF; |
791 | |
792 | *fd = fd_out; |
793 | return 0; |
794 | } |
795 | |
796 | |
797 | static int uv__run_pending(uv_loop_t* loop) { |
798 | QUEUE* q; |
799 | QUEUE pq; |
800 | uv__io_t* w; |
801 | |
802 | if (QUEUE_EMPTY(&loop->pending_queue)) |
803 | return 0; |
804 | |
805 | QUEUE_MOVE(&loop->pending_queue, &pq); |
806 | |
807 | while (!QUEUE_EMPTY(&pq)) { |
808 | q = QUEUE_HEAD(&pq); |
809 | QUEUE_REMOVE(q); |
810 | QUEUE_INIT(q); |
811 | w = QUEUE_DATA(q, uv__io_t, pending_queue); |
812 | w->cb(loop, w, POLLOUT); |
813 | } |
814 | |
815 | return 1; |
816 | } |
817 | |
818 | |
819 | static unsigned int next_power_of_two(unsigned int val) { |
820 | val -= 1; |
821 | val |= val >> 1; |
822 | val |= val >> 2; |
823 | val |= val >> 4; |
824 | val |= val >> 8; |
825 | val |= val >> 16; |
826 | val += 1; |
827 | return val; |
828 | } |
829 | |
830 | static void maybe_resize(uv_loop_t* loop, unsigned int len) { |
831 | uv__io_t** watchers; |
832 | void* fake_watcher_list; |
833 | void* fake_watcher_count; |
834 | unsigned int nwatchers; |
835 | unsigned int i; |
836 | |
837 | if (len <= loop->nwatchers) |
838 | return; |
839 | |
840 | /* Preserve fake watcher list and count at the end of the watchers */ |
841 | if (loop->watchers != NULL) { |
842 | fake_watcher_list = loop->watchers[loop->nwatchers]; |
843 | fake_watcher_count = loop->watchers[loop->nwatchers + 1]; |
844 | } else { |
845 | fake_watcher_list = NULL; |
846 | fake_watcher_count = NULL; |
847 | } |
848 | |
849 | nwatchers = next_power_of_two(len + 2) - 2; |
850 | watchers = uv__reallocf(loop->watchers, |
851 | (nwatchers + 2) * sizeof(loop->watchers[0])); |
852 | |
853 | if (watchers == NULL) |
854 | abort(); |
855 | for (i = loop->nwatchers; i < nwatchers; i++) |
856 | watchers[i] = NULL; |
857 | watchers[nwatchers] = fake_watcher_list; |
858 | watchers[nwatchers + 1] = fake_watcher_count; |
859 | |
860 | loop->watchers = watchers; |
861 | loop->nwatchers = nwatchers; |
862 | } |
863 | |
864 | |
865 | void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) { |
866 | assert(cb != NULL); |
867 | assert(fd >= -1); |
868 | QUEUE_INIT(&w->pending_queue); |
869 | QUEUE_INIT(&w->watcher_queue); |
870 | w->cb = cb; |
871 | w->fd = fd; |
872 | w->events = 0; |
873 | w->pevents = 0; |
874 | |
875 | #if defined(UV_HAVE_KQUEUE) |
876 | w->rcount = 0; |
877 | w->wcount = 0; |
878 | #endif /* defined(UV_HAVE_KQUEUE) */ |
879 | } |
880 | |
881 | |
882 | void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) { |
883 | assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI))); |
884 | assert(0 != events); |
885 | assert(w->fd >= 0); |
886 | assert(w->fd < INT_MAX); |
887 | |
888 | w->pevents |= events; |
889 | maybe_resize(loop, w->fd + 1); |
890 | |
891 | #if !defined(__sun) |
892 | /* The event ports backend needs to rearm all file descriptors on each and |
893 | * every tick of the event loop but the other backends allow us to |
894 | * short-circuit here if the event mask is unchanged. |
895 | */ |
896 | if (w->events == w->pevents) |
897 | return; |
898 | #endif |
899 | |
900 | if (QUEUE_EMPTY(&w->watcher_queue)) |
901 | QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); |
902 | |
903 | if (loop->watchers[w->fd] == NULL) { |
904 | loop->watchers[w->fd] = w; |
905 | loop->nfds++; |
906 | } |
907 | } |
908 | |
909 | |
910 | void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { |
911 | assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI))); |
912 | assert(0 != events); |
913 | |
914 | if (w->fd == -1) |
915 | return; |
916 | |
917 | assert(w->fd >= 0); |
918 | |
919 | /* Happens when uv__io_stop() is called on a handle that was never started. */ |
920 | if ((unsigned) w->fd >= loop->nwatchers) |
921 | return; |
922 | |
923 | w->pevents &= ~events; |
924 | |
925 | if (w->pevents == 0) { |
926 | QUEUE_REMOVE(&w->watcher_queue); |
927 | QUEUE_INIT(&w->watcher_queue); |
928 | w->events = 0; |
929 | |
930 | if (w == loop->watchers[w->fd]) { |
931 | assert(loop->nfds > 0); |
932 | loop->watchers[w->fd] = NULL; |
933 | loop->nfds--; |
934 | } |
935 | } |
936 | else if (QUEUE_EMPTY(&w->watcher_queue)) |
937 | QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); |
938 | } |
939 | |
940 | |
941 | void uv__io_close(uv_loop_t* loop, uv__io_t* w) { |
942 | uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI); |
943 | QUEUE_REMOVE(&w->pending_queue); |
944 | |
945 | /* Remove stale events for this file descriptor */ |
946 | if (w->fd != -1) |
947 | uv__platform_invalidate_fd(loop, w->fd); |
948 | } |
949 | |
950 | |
951 | void uv__io_feed(uv_loop_t* loop, uv__io_t* w) { |
952 | if (QUEUE_EMPTY(&w->pending_queue)) |
953 | QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue); |
954 | } |
955 | |
956 | |
957 | int uv__io_active(const uv__io_t* w, unsigned int events) { |
958 | assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI))); |
959 | assert(0 != events); |
960 | return 0 != (w->pevents & events); |
961 | } |
962 | |
963 | |
964 | int uv__fd_exists(uv_loop_t* loop, int fd) { |
965 | return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL; |
966 | } |
967 | |
968 | |
969 | int uv_getrusage(uv_rusage_t* rusage) { |
970 | struct rusage usage; |
971 | |
972 | if (getrusage(RUSAGE_SELF, &usage)) |
973 | return UV__ERR(errno); |
974 | |
975 | rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec; |
976 | rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec; |
977 | |
978 | rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec; |
979 | rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec; |
980 | |
981 | #if !defined(__MVS__) && !defined(__HAIKU__) |
982 | rusage->ru_maxrss = usage.ru_maxrss; |
983 | rusage->ru_ixrss = usage.ru_ixrss; |
984 | rusage->ru_idrss = usage.ru_idrss; |
985 | rusage->ru_isrss = usage.ru_isrss; |
986 | rusage->ru_minflt = usage.ru_minflt; |
987 | rusage->ru_majflt = usage.ru_majflt; |
988 | rusage->ru_nswap = usage.ru_nswap; |
989 | rusage->ru_inblock = usage.ru_inblock; |
990 | rusage->ru_oublock = usage.ru_oublock; |
991 | rusage->ru_msgsnd = usage.ru_msgsnd; |
992 | rusage->ru_msgrcv = usage.ru_msgrcv; |
993 | rusage->ru_nsignals = usage.ru_nsignals; |
994 | rusage->ru_nvcsw = usage.ru_nvcsw; |
995 | rusage->ru_nivcsw = usage.ru_nivcsw; |
996 | #endif |
997 | |
998 | return 0; |
999 | } |
1000 | |
1001 | |
1002 | int uv__open_cloexec(const char* path, int flags) { |
1003 | #if defined(O_CLOEXEC) |
1004 | int fd; |
1005 | |
1006 | fd = open(path, flags | O_CLOEXEC); |
1007 | if (fd == -1) |
1008 | return UV__ERR(errno); |
1009 | |
1010 | return fd; |
1011 | #else /* O_CLOEXEC */ |
1012 | int err; |
1013 | int fd; |
1014 | |
1015 | fd = open(path, flags); |
1016 | if (fd == -1) |
1017 | return UV__ERR(errno); |
1018 | |
1019 | err = uv__cloexec(fd, 1); |
1020 | if (err) { |
1021 | uv__close(fd); |
1022 | return err; |
1023 | } |
1024 | |
1025 | return fd; |
1026 | #endif /* O_CLOEXEC */ |
1027 | } |
1028 | |
1029 | |
1030 | int uv__dup2_cloexec(int oldfd, int newfd) { |
1031 | #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__) |
1032 | int r; |
1033 | |
1034 | r = dup3(oldfd, newfd, O_CLOEXEC); |
1035 | if (r == -1) |
1036 | return UV__ERR(errno); |
1037 | |
1038 | return r; |
1039 | #else |
1040 | int err; |
1041 | int r; |
1042 | |
1043 | r = dup2(oldfd, newfd); /* Never retry. */ |
1044 | if (r == -1) |
1045 | return UV__ERR(errno); |
1046 | |
1047 | err = uv__cloexec(newfd, 1); |
1048 | if (err != 0) { |
1049 | uv__close(newfd); |
1050 | return err; |
1051 | } |
1052 | |
1053 | return r; |
1054 | #endif |
1055 | } |
1056 | |
1057 | |
1058 | int uv_os_homedir(char* buffer, size_t* size) { |
1059 | uv_passwd_t pwd; |
1060 | size_t len; |
1061 | int r; |
1062 | |
1063 | /* Check if the HOME environment variable is set first. The task of |
1064 | performing input validation on buffer and size is taken care of by |
1065 | uv_os_getenv(). */ |
1066 | r = uv_os_getenv("HOME" , buffer, size); |
1067 | |
1068 | if (r != UV_ENOENT) |
1069 | return r; |
1070 | |
1071 | /* HOME is not set, so call uv__getpwuid_r() */ |
1072 | r = uv__getpwuid_r(&pwd); |
1073 | |
1074 | if (r != 0) { |
1075 | return r; |
1076 | } |
1077 | |
1078 | len = strlen(pwd.homedir); |
1079 | |
1080 | if (len >= *size) { |
1081 | *size = len + 1; |
1082 | uv_os_free_passwd(&pwd); |
1083 | return UV_ENOBUFS; |
1084 | } |
1085 | |
1086 | memcpy(buffer, pwd.homedir, len + 1); |
1087 | *size = len; |
1088 | uv_os_free_passwd(&pwd); |
1089 | |
1090 | return 0; |
1091 | } |
1092 | |
1093 | |
1094 | int uv_os_tmpdir(char* buffer, size_t* size) { |
1095 | const char* buf; |
1096 | size_t len; |
1097 | |
1098 | if (buffer == NULL || size == NULL || *size == 0) |
1099 | return UV_EINVAL; |
1100 | |
1101 | #define CHECK_ENV_VAR(name) \ |
1102 | do { \ |
1103 | buf = getenv(name); \ |
1104 | if (buf != NULL) \ |
1105 | goto return_buffer; \ |
1106 | } \ |
1107 | while (0) |
1108 | |
1109 | /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */ |
1110 | CHECK_ENV_VAR("TMPDIR" ); |
1111 | CHECK_ENV_VAR("TMP" ); |
1112 | CHECK_ENV_VAR("TEMP" ); |
1113 | CHECK_ENV_VAR("TEMPDIR" ); |
1114 | |
1115 | #undef CHECK_ENV_VAR |
1116 | |
1117 | /* No temp environment variables defined */ |
1118 | #if defined(__ANDROID__) |
1119 | buf = "/data/local/tmp" ; |
1120 | #else |
1121 | buf = "/tmp" ; |
1122 | #endif |
1123 | |
1124 | return_buffer: |
1125 | len = strlen(buf); |
1126 | |
1127 | if (len >= *size) { |
1128 | *size = len + 1; |
1129 | return UV_ENOBUFS; |
1130 | } |
1131 | |
1132 | /* The returned directory should not have a trailing slash. */ |
1133 | if (len > 1 && buf[len - 1] == '/') { |
1134 | len--; |
1135 | } |
1136 | |
1137 | memcpy(buffer, buf, len + 1); |
1138 | buffer[len] = '\0'; |
1139 | *size = len; |
1140 | |
1141 | return 0; |
1142 | } |
1143 | |
1144 | |
1145 | int uv__getpwuid_r(uv_passwd_t* pwd) { |
1146 | struct passwd pw; |
1147 | struct passwd* result; |
1148 | char* buf; |
1149 | uid_t uid; |
1150 | size_t bufsize; |
1151 | size_t name_size; |
1152 | size_t homedir_size; |
1153 | size_t shell_size; |
1154 | long initsize; |
1155 | int r; |
1156 | |
1157 | if (pwd == NULL) |
1158 | return UV_EINVAL; |
1159 | |
1160 | initsize = sysconf(_SC_GETPW_R_SIZE_MAX); |
1161 | |
1162 | if (initsize <= 0) |
1163 | bufsize = 4096; |
1164 | else |
1165 | bufsize = (size_t) initsize; |
1166 | |
1167 | uid = geteuid(); |
1168 | buf = NULL; |
1169 | |
1170 | for (;;) { |
1171 | uv__free(buf); |
1172 | buf = uv__malloc(bufsize); |
1173 | |
1174 | if (buf == NULL) |
1175 | return UV_ENOMEM; |
1176 | |
1177 | r = getpwuid_r(uid, &pw, buf, bufsize, &result); |
1178 | |
1179 | if (r != ERANGE) |
1180 | break; |
1181 | |
1182 | bufsize *= 2; |
1183 | } |
1184 | |
1185 | if (r != 0) { |
1186 | uv__free(buf); |
1187 | return -r; |
1188 | } |
1189 | |
1190 | if (result == NULL) { |
1191 | uv__free(buf); |
1192 | return UV_ENOENT; |
1193 | } |
1194 | |
1195 | /* Allocate memory for the username, shell, and home directory */ |
1196 | name_size = strlen(pw.pw_name) + 1; |
1197 | homedir_size = strlen(pw.pw_dir) + 1; |
1198 | shell_size = strlen(pw.pw_shell) + 1; |
1199 | pwd->username = uv__malloc(name_size + homedir_size + shell_size); |
1200 | |
1201 | if (pwd->username == NULL) { |
1202 | uv__free(buf); |
1203 | return UV_ENOMEM; |
1204 | } |
1205 | |
1206 | /* Copy the username */ |
1207 | memcpy(pwd->username, pw.pw_name, name_size); |
1208 | |
1209 | /* Copy the home directory */ |
1210 | pwd->homedir = pwd->username + name_size; |
1211 | memcpy(pwd->homedir, pw.pw_dir, homedir_size); |
1212 | |
1213 | /* Copy the shell */ |
1214 | pwd->shell = pwd->homedir + homedir_size; |
1215 | memcpy(pwd->shell, pw.pw_shell, shell_size); |
1216 | |
1217 | /* Copy the uid and gid */ |
1218 | pwd->uid = pw.pw_uid; |
1219 | pwd->gid = pw.pw_gid; |
1220 | |
1221 | uv__free(buf); |
1222 | |
1223 | return 0; |
1224 | } |
1225 | |
1226 | |
1227 | void uv_os_free_passwd(uv_passwd_t* pwd) { |
1228 | if (pwd == NULL) |
1229 | return; |
1230 | |
1231 | /* |
1232 | The memory for name, shell, and homedir are allocated in a single |
1233 | uv__malloc() call. The base of the pointer is stored in pwd->username, so |
1234 | that is the field that needs to be freed. |
1235 | */ |
1236 | uv__free(pwd->username); |
1237 | pwd->username = NULL; |
1238 | pwd->shell = NULL; |
1239 | pwd->homedir = NULL; |
1240 | } |
1241 | |
1242 | |
1243 | int uv_os_get_passwd(uv_passwd_t* pwd) { |
1244 | return uv__getpwuid_r(pwd); |
1245 | } |
1246 | |
1247 | |
1248 | int uv_translate_sys_error(int sys_errno) { |
1249 | /* If < 0 then it's already a libuv error. */ |
1250 | return sys_errno <= 0 ? sys_errno : -sys_errno; |
1251 | } |
1252 | |
1253 | |
1254 | int uv_os_environ(uv_env_item_t** envitems, int* count) { |
1255 | int i, j, cnt; |
1256 | uv_env_item_t* envitem; |
1257 | |
1258 | *envitems = NULL; |
1259 | *count = 0; |
1260 | |
1261 | for (i = 0; environ[i] != NULL; i++); |
1262 | |
1263 | *envitems = uv__calloc(i, sizeof(**envitems)); |
1264 | |
1265 | if (*envitems == NULL) |
1266 | return UV_ENOMEM; |
1267 | |
1268 | for (j = 0, cnt = 0; j < i; j++) { |
1269 | char* buf; |
1270 | char* ptr; |
1271 | |
1272 | if (environ[j] == NULL) |
1273 | break; |
1274 | |
1275 | buf = uv__strdup(environ[j]); |
1276 | if (buf == NULL) |
1277 | goto fail; |
1278 | |
1279 | ptr = strchr(buf, '='); |
1280 | if (ptr == NULL) { |
1281 | uv__free(buf); |
1282 | continue; |
1283 | } |
1284 | |
1285 | *ptr = '\0'; |
1286 | |
1287 | envitem = &(*envitems)[cnt]; |
1288 | envitem->name = buf; |
1289 | envitem->value = ptr + 1; |
1290 | |
1291 | cnt++; |
1292 | } |
1293 | |
1294 | *count = cnt; |
1295 | return 0; |
1296 | |
1297 | fail: |
1298 | for (i = 0; i < cnt; i++) { |
1299 | envitem = &(*envitems)[cnt]; |
1300 | uv__free(envitem->name); |
1301 | } |
1302 | uv__free(*envitems); |
1303 | |
1304 | *envitems = NULL; |
1305 | *count = 0; |
1306 | return UV_ENOMEM; |
1307 | } |
1308 | |
1309 | |
1310 | int uv_os_getenv(const char* name, char* buffer, size_t* size) { |
1311 | char* var; |
1312 | size_t len; |
1313 | |
1314 | if (name == NULL || buffer == NULL || size == NULL || *size == 0) |
1315 | return UV_EINVAL; |
1316 | |
1317 | var = getenv(name); |
1318 | |
1319 | if (var == NULL) |
1320 | return UV_ENOENT; |
1321 | |
1322 | len = strlen(var); |
1323 | |
1324 | if (len >= *size) { |
1325 | *size = len + 1; |
1326 | return UV_ENOBUFS; |
1327 | } |
1328 | |
1329 | memcpy(buffer, var, len + 1); |
1330 | *size = len; |
1331 | |
1332 | return 0; |
1333 | } |
1334 | |
1335 | |
1336 | int uv_os_setenv(const char* name, const char* value) { |
1337 | if (name == NULL || value == NULL) |
1338 | return UV_EINVAL; |
1339 | |
1340 | if (setenv(name, value, 1) != 0) |
1341 | return UV__ERR(errno); |
1342 | |
1343 | return 0; |
1344 | } |
1345 | |
1346 | |
1347 | int uv_os_unsetenv(const char* name) { |
1348 | if (name == NULL) |
1349 | return UV_EINVAL; |
1350 | |
1351 | if (unsetenv(name) != 0) |
1352 | return UV__ERR(errno); |
1353 | |
1354 | return 0; |
1355 | } |
1356 | |
1357 | |
1358 | int uv_os_gethostname(char* buffer, size_t* size) { |
1359 | /* |
1360 | On some platforms, if the input buffer is not large enough, gethostname() |
1361 | succeeds, but truncates the result. libuv can detect this and return ENOBUFS |
1362 | instead by creating a large enough buffer and comparing the hostname length |
1363 | to the size input. |
1364 | */ |
1365 | char buf[UV_MAXHOSTNAMESIZE]; |
1366 | size_t len; |
1367 | |
1368 | if (buffer == NULL || size == NULL || *size == 0) |
1369 | return UV_EINVAL; |
1370 | |
1371 | if (gethostname(buf, sizeof(buf)) != 0) |
1372 | return UV__ERR(errno); |
1373 | |
1374 | buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */ |
1375 | len = strlen(buf); |
1376 | |
1377 | if (len >= *size) { |
1378 | *size = len + 1; |
1379 | return UV_ENOBUFS; |
1380 | } |
1381 | |
1382 | memcpy(buffer, buf, len + 1); |
1383 | *size = len; |
1384 | return 0; |
1385 | } |
1386 | |
1387 | |
1388 | uv_os_fd_t uv_get_osfhandle(int fd) { |
1389 | return fd; |
1390 | } |
1391 | |
1392 | int uv_open_osfhandle(uv_os_fd_t os_fd) { |
1393 | return os_fd; |
1394 | } |
1395 | |
1396 | uv_pid_t uv_os_getpid(void) { |
1397 | return getpid(); |
1398 | } |
1399 | |
1400 | |
1401 | uv_pid_t uv_os_getppid(void) { |
1402 | return getppid(); |
1403 | } |
1404 | |
1405 | |
1406 | int uv_os_getpriority(uv_pid_t pid, int* priority) { |
1407 | int r; |
1408 | |
1409 | if (priority == NULL) |
1410 | return UV_EINVAL; |
1411 | |
1412 | errno = 0; |
1413 | r = getpriority(PRIO_PROCESS, (int) pid); |
1414 | |
1415 | if (r == -1 && errno != 0) |
1416 | return UV__ERR(errno); |
1417 | |
1418 | *priority = r; |
1419 | return 0; |
1420 | } |
1421 | |
1422 | |
1423 | int uv_os_setpriority(uv_pid_t pid, int priority) { |
1424 | if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW) |
1425 | return UV_EINVAL; |
1426 | |
1427 | if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0) |
1428 | return UV__ERR(errno); |
1429 | |
1430 | return 0; |
1431 | } |
1432 | |
1433 | |
1434 | int uv_os_uname(uv_utsname_t* buffer) { |
1435 | struct utsname buf; |
1436 | int r; |
1437 | |
1438 | if (buffer == NULL) |
1439 | return UV_EINVAL; |
1440 | |
1441 | if (uname(&buf) == -1) { |
1442 | r = UV__ERR(errno); |
1443 | goto error; |
1444 | } |
1445 | |
1446 | r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname)); |
1447 | if (r == UV_E2BIG) |
1448 | goto error; |
1449 | |
1450 | #ifdef _AIX |
1451 | r = snprintf(buffer->release, |
1452 | sizeof(buffer->release), |
1453 | "%s.%s" , |
1454 | buf.version, |
1455 | buf.release); |
1456 | if (r >= sizeof(buffer->release)) { |
1457 | r = UV_E2BIG; |
1458 | goto error; |
1459 | } |
1460 | #else |
1461 | r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release)); |
1462 | if (r == UV_E2BIG) |
1463 | goto error; |
1464 | #endif |
1465 | |
1466 | r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version)); |
1467 | if (r == UV_E2BIG) |
1468 | goto error; |
1469 | |
1470 | #if defined(_AIX) || defined(__PASE__) |
1471 | r = uv__strscpy(buffer->machine, "ppc64" , sizeof(buffer->machine)); |
1472 | #else |
1473 | r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine)); |
1474 | #endif |
1475 | |
1476 | if (r == UV_E2BIG) |
1477 | goto error; |
1478 | |
1479 | return 0; |
1480 | |
1481 | error: |
1482 | buffer->sysname[0] = '\0'; |
1483 | buffer->release[0] = '\0'; |
1484 | buffer->version[0] = '\0'; |
1485 | buffer->machine[0] = '\0'; |
1486 | return r; |
1487 | } |
1488 | |
1489 | int uv__getsockpeername(const uv_handle_t* handle, |
1490 | uv__peersockfunc func, |
1491 | struct sockaddr* name, |
1492 | int* namelen) { |
1493 | socklen_t socklen; |
1494 | uv_os_fd_t fd; |
1495 | int r; |
1496 | |
1497 | r = uv_fileno(handle, &fd); |
1498 | if (r < 0) |
1499 | return r; |
1500 | |
1501 | /* sizeof(socklen_t) != sizeof(int) on some systems. */ |
1502 | socklen = (socklen_t) *namelen; |
1503 | |
1504 | if (func(fd, name, &socklen)) |
1505 | return UV__ERR(errno); |
1506 | |
1507 | *namelen = (int) socklen; |
1508 | return 0; |
1509 | } |
1510 | |
1511 | int uv_gettimeofday(uv_timeval64_t* tv) { |
1512 | struct timeval time; |
1513 | |
1514 | if (tv == NULL) |
1515 | return UV_EINVAL; |
1516 | |
1517 | if (gettimeofday(&time, NULL) != 0) |
1518 | return UV__ERR(errno); |
1519 | |
1520 | tv->tv_sec = (int64_t) time.tv_sec; |
1521 | tv->tv_usec = (int32_t) time.tv_usec; |
1522 | return 0; |
1523 | } |
1524 | |
1525 | void uv_sleep(unsigned int msec) { |
1526 | struct timespec timeout; |
1527 | int rc; |
1528 | |
1529 | timeout.tv_sec = msec / 1000; |
1530 | timeout.tv_nsec = (msec % 1000) * 1000 * 1000; |
1531 | |
1532 | do |
1533 | rc = nanosleep(&timeout, &timeout); |
1534 | while (rc == -1 && errno == EINTR); |
1535 | |
1536 | assert(rc == 0); |
1537 | } |
1538 | |
1539 | int uv__search_path(const char* prog, char* buf, size_t* buflen) { |
1540 | char abspath[UV__PATH_MAX]; |
1541 | size_t abspath_size; |
1542 | char trypath[UV__PATH_MAX]; |
1543 | char* cloned_path; |
1544 | char* path_env; |
1545 | char* token; |
1546 | |
1547 | if (buf == NULL || buflen == NULL || *buflen == 0) |
1548 | return UV_EINVAL; |
1549 | |
1550 | /* |
1551 | * Possibilities for prog: |
1552 | * i) an absolute path such as: /home/user/myprojects/nodejs/node |
1553 | * ii) a relative path such as: ./node or ../myprojects/nodejs/node |
1554 | * iii) a bare filename such as "node", after exporting PATH variable |
1555 | * to its location. |
1556 | */ |
1557 | |
1558 | /* Case i) and ii) absolute or relative paths */ |
1559 | if (strchr(prog, '/') != NULL) { |
1560 | if (realpath(prog, abspath) != abspath) |
1561 | return UV__ERR(errno); |
1562 | |
1563 | abspath_size = strlen(abspath); |
1564 | |
1565 | *buflen -= 1; |
1566 | if (*buflen > abspath_size) |
1567 | *buflen = abspath_size; |
1568 | |
1569 | memcpy(buf, abspath, *buflen); |
1570 | buf[*buflen] = '\0'; |
1571 | |
1572 | return 0; |
1573 | } |
1574 | |
1575 | /* Case iii). Search PATH environment variable */ |
1576 | cloned_path = NULL; |
1577 | token = NULL; |
1578 | path_env = getenv("PATH" ); |
1579 | |
1580 | if (path_env == NULL) |
1581 | return UV_EINVAL; |
1582 | |
1583 | cloned_path = uv__strdup(path_env); |
1584 | if (cloned_path == NULL) |
1585 | return UV_ENOMEM; |
1586 | |
1587 | token = strtok(cloned_path, ":" ); |
1588 | while (token != NULL) { |
1589 | snprintf(trypath, sizeof(trypath) - 1, "%s/%s" , token, prog); |
1590 | if (realpath(trypath, abspath) == abspath) { |
1591 | /* Check the match is executable */ |
1592 | if (access(abspath, X_OK) == 0) { |
1593 | abspath_size = strlen(abspath); |
1594 | |
1595 | *buflen -= 1; |
1596 | if (*buflen > abspath_size) |
1597 | *buflen = abspath_size; |
1598 | |
1599 | memcpy(buf, abspath, *buflen); |
1600 | buf[*buflen] = '\0'; |
1601 | |
1602 | uv__free(cloned_path); |
1603 | return 0; |
1604 | } |
1605 | } |
1606 | token = strtok(NULL, ":" ); |
1607 | } |
1608 | uv__free(cloned_path); |
1609 | |
1610 | /* Out of tokens (path entries), and no match found */ |
1611 | return UV_EINVAL; |
1612 | } |
1613 | |