1 | /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. |
2 | * |
3 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
4 | * of this software and associated documentation files (the "Software"), to |
5 | * deal in the Software without restriction, including without limitation the |
6 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
7 | * sell copies of the Software, and to permit persons to whom the Software is |
8 | * furnished to do so, subject to the following conditions: |
9 | * |
10 | * The above copyright notice and this permission notice shall be included in |
11 | * all copies or substantial portions of the Software. |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
16 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
17 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
18 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
19 | * IN THE SOFTWARE. |
20 | */ |
21 | |
22 | #include "uv.h" |
23 | #include "uv-common.h" |
24 | |
25 | #include <assert.h> |
26 | #include <errno.h> |
27 | #include <stdarg.h> |
28 | #include <stddef.h> /* NULL */ |
29 | #include <stdio.h> |
30 | #include <stdlib.h> /* malloc */ |
31 | #include <string.h> /* memset */ |
32 | |
33 | #if defined(_WIN32) |
34 | # include <malloc.h> /* malloc */ |
35 | #else |
36 | # include <net/if.h> /* if_nametoindex */ |
37 | # include <sys/un.h> /* AF_UNIX, sockaddr_un */ |
38 | #endif |
39 | |
40 | |
41 | typedef struct { |
42 | uv_malloc_func local_malloc; |
43 | uv_realloc_func local_realloc; |
44 | uv_calloc_func local_calloc; |
45 | uv_free_func local_free; |
46 | } uv__allocator_t; |
47 | |
48 | static uv__allocator_t uv__allocator = { |
49 | malloc, |
50 | realloc, |
51 | calloc, |
52 | free, |
53 | }; |
54 | |
55 | char* uv__strdup(const char* s) { |
56 | size_t len = strlen(s) + 1; |
57 | char* m = uv__malloc(len); |
58 | if (m == NULL) |
59 | return NULL; |
60 | return memcpy(m, s, len); |
61 | } |
62 | |
63 | char* uv__strndup(const char* s, size_t n) { |
64 | char* m; |
65 | size_t len = strlen(s); |
66 | if (n < len) |
67 | len = n; |
68 | m = uv__malloc(len + 1); |
69 | if (m == NULL) |
70 | return NULL; |
71 | m[len] = '\0'; |
72 | return memcpy(m, s, len); |
73 | } |
74 | |
75 | void* uv__malloc(size_t size) { |
76 | if (size > 0) |
77 | return uv__allocator.local_malloc(size); |
78 | return NULL; |
79 | } |
80 | |
81 | void uv__free(void* ptr) { |
82 | int saved_errno; |
83 | |
84 | /* Libuv expects that free() does not clobber errno. The system allocator |
85 | * honors that assumption but custom allocators may not be so careful. |
86 | */ |
87 | saved_errno = errno; |
88 | uv__allocator.local_free(ptr); |
89 | errno = saved_errno; |
90 | } |
91 | |
92 | void* uv__calloc(size_t count, size_t size) { |
93 | return uv__allocator.local_calloc(count, size); |
94 | } |
95 | |
96 | void* uv__realloc(void* ptr, size_t size) { |
97 | if (size > 0) |
98 | return uv__allocator.local_realloc(ptr, size); |
99 | uv__free(ptr); |
100 | return NULL; |
101 | } |
102 | |
103 | void* uv__reallocf(void* ptr, size_t size) { |
104 | void* newptr; |
105 | |
106 | newptr = uv__realloc(ptr, size); |
107 | if (newptr == NULL) |
108 | if (size > 0) |
109 | uv__free(ptr); |
110 | |
111 | return newptr; |
112 | } |
113 | |
114 | int uv_replace_allocator(uv_malloc_func malloc_func, |
115 | uv_realloc_func realloc_func, |
116 | uv_calloc_func calloc_func, |
117 | uv_free_func free_func) { |
118 | if (malloc_func == NULL || realloc_func == NULL || |
119 | calloc_func == NULL || free_func == NULL) { |
120 | return UV_EINVAL; |
121 | } |
122 | |
123 | uv__allocator.local_malloc = malloc_func; |
124 | uv__allocator.local_realloc = realloc_func; |
125 | uv__allocator.local_calloc = calloc_func; |
126 | uv__allocator.local_free = free_func; |
127 | |
128 | return 0; |
129 | } |
130 | |
131 | #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t); |
132 | |
133 | size_t uv_handle_size(uv_handle_type type) { |
134 | switch (type) { |
135 | UV_HANDLE_TYPE_MAP(XX) |
136 | default: |
137 | return -1; |
138 | } |
139 | } |
140 | |
141 | size_t uv_req_size(uv_req_type type) { |
142 | switch(type) { |
143 | UV_REQ_TYPE_MAP(XX) |
144 | default: |
145 | return -1; |
146 | } |
147 | } |
148 | |
149 | #undef XX |
150 | |
151 | |
152 | size_t uv_loop_size(void) { |
153 | return sizeof(uv_loop_t); |
154 | } |
155 | |
156 | |
157 | uv_buf_t uv_buf_init(char* base, unsigned int len) { |
158 | uv_buf_t buf; |
159 | buf.base = base; |
160 | buf.len = len; |
161 | return buf; |
162 | } |
163 | |
164 | |
165 | static const char* uv__unknown_err_code(int err) { |
166 | char buf[32]; |
167 | char* copy; |
168 | |
169 | snprintf(buf, sizeof(buf), "Unknown system error %d" , err); |
170 | copy = uv__strdup(buf); |
171 | |
172 | return copy != NULL ? copy : "Unknown system error" ; |
173 | } |
174 | |
175 | #define UV_ERR_NAME_GEN_R(name, _) \ |
176 | case UV_## name: \ |
177 | uv__strscpy(buf, #name, buflen); break; |
178 | char* uv_err_name_r(int err, char* buf, size_t buflen) { |
179 | switch (err) { |
180 | UV_ERRNO_MAP(UV_ERR_NAME_GEN_R) |
181 | default: snprintf(buf, buflen, "Unknown system error %d" , err); |
182 | } |
183 | return buf; |
184 | } |
185 | #undef UV_ERR_NAME_GEN_R |
186 | |
187 | |
188 | #define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name; |
189 | const char* uv_err_name(int err) { |
190 | switch (err) { |
191 | UV_ERRNO_MAP(UV_ERR_NAME_GEN) |
192 | } |
193 | return uv__unknown_err_code(err); |
194 | } |
195 | #undef UV_ERR_NAME_GEN |
196 | |
197 | |
198 | #define UV_STRERROR_GEN_R(name, msg) \ |
199 | case UV_ ## name: \ |
200 | snprintf(buf, buflen, "%s", msg); break; |
201 | char* uv_strerror_r(int err, char* buf, size_t buflen) { |
202 | switch (err) { |
203 | UV_ERRNO_MAP(UV_STRERROR_GEN_R) |
204 | default: snprintf(buf, buflen, "Unknown system error %d" , err); |
205 | } |
206 | return buf; |
207 | } |
208 | #undef UV_STRERROR_GEN_R |
209 | |
210 | |
211 | #define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg; |
212 | const char* uv_strerror(int err) { |
213 | switch (err) { |
214 | UV_ERRNO_MAP(UV_STRERROR_GEN) |
215 | } |
216 | return uv__unknown_err_code(err); |
217 | } |
218 | #undef UV_STRERROR_GEN |
219 | |
220 | |
221 | int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) { |
222 | memset(addr, 0, sizeof(*addr)); |
223 | addr->sin_family = AF_INET; |
224 | addr->sin_port = htons(port); |
225 | #ifdef SIN6_LEN |
226 | addr->sin_len = sizeof(*addr); |
227 | #endif |
228 | return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr)); |
229 | } |
230 | |
231 | |
232 | int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) { |
233 | char address_part[40]; |
234 | size_t address_part_size; |
235 | const char* zone_index; |
236 | |
237 | memset(addr, 0, sizeof(*addr)); |
238 | addr->sin6_family = AF_INET6; |
239 | addr->sin6_port = htons(port); |
240 | #ifdef SIN6_LEN |
241 | addr->sin6_len = sizeof(*addr); |
242 | #endif |
243 | |
244 | zone_index = strchr(ip, '%'); |
245 | if (zone_index != NULL) { |
246 | address_part_size = zone_index - ip; |
247 | if (address_part_size >= sizeof(address_part)) |
248 | address_part_size = sizeof(address_part) - 1; |
249 | |
250 | memcpy(address_part, ip, address_part_size); |
251 | address_part[address_part_size] = '\0'; |
252 | ip = address_part; |
253 | |
254 | zone_index++; /* skip '%' */ |
255 | /* NOTE: unknown interface (id=0) is silently ignored */ |
256 | #ifdef _WIN32 |
257 | addr->sin6_scope_id = atoi(zone_index); |
258 | #else |
259 | addr->sin6_scope_id = if_nametoindex(zone_index); |
260 | #endif |
261 | } |
262 | |
263 | return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr); |
264 | } |
265 | |
266 | |
267 | int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) { |
268 | return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size); |
269 | } |
270 | |
271 | |
272 | int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) { |
273 | return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size); |
274 | } |
275 | |
276 | |
277 | int uv_tcp_bind(uv_tcp_t* handle, |
278 | const struct sockaddr* addr, |
279 | unsigned int flags) { |
280 | unsigned int addrlen; |
281 | |
282 | if (handle->type != UV_TCP) |
283 | return UV_EINVAL; |
284 | |
285 | if (addr->sa_family == AF_INET) |
286 | addrlen = sizeof(struct sockaddr_in); |
287 | else if (addr->sa_family == AF_INET6) |
288 | addrlen = sizeof(struct sockaddr_in6); |
289 | else |
290 | return UV_EINVAL; |
291 | |
292 | return uv__tcp_bind(handle, addr, addrlen, flags); |
293 | } |
294 | |
295 | |
296 | int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) { |
297 | unsigned ; |
298 | int domain; |
299 | int rc; |
300 | |
301 | /* Use the lower 8 bits for the domain. */ |
302 | domain = flags & 0xFF; |
303 | if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) |
304 | return UV_EINVAL; |
305 | |
306 | /* Use the higher bits for extra flags. */ |
307 | extra_flags = flags & ~0xFF; |
308 | if (extra_flags & ~UV_UDP_RECVMMSG) |
309 | return UV_EINVAL; |
310 | |
311 | rc = uv__udp_init_ex(loop, handle, flags, domain); |
312 | |
313 | if (rc == 0) |
314 | if (extra_flags & UV_UDP_RECVMMSG) |
315 | handle->flags |= UV_HANDLE_UDP_RECVMMSG; |
316 | |
317 | return rc; |
318 | } |
319 | |
320 | |
321 | int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { |
322 | return uv_udp_init_ex(loop, handle, AF_UNSPEC); |
323 | } |
324 | |
325 | |
326 | int uv_udp_bind(uv_udp_t* handle, |
327 | const struct sockaddr* addr, |
328 | unsigned int flags) { |
329 | unsigned int addrlen; |
330 | |
331 | if (handle->type != UV_UDP) |
332 | return UV_EINVAL; |
333 | |
334 | if (addr->sa_family == AF_INET) |
335 | addrlen = sizeof(struct sockaddr_in); |
336 | else if (addr->sa_family == AF_INET6) |
337 | addrlen = sizeof(struct sockaddr_in6); |
338 | else |
339 | return UV_EINVAL; |
340 | |
341 | return uv__udp_bind(handle, addr, addrlen, flags); |
342 | } |
343 | |
344 | |
345 | int uv_tcp_connect(uv_connect_t* req, |
346 | uv_tcp_t* handle, |
347 | const struct sockaddr* addr, |
348 | uv_connect_cb cb) { |
349 | unsigned int addrlen; |
350 | |
351 | if (handle->type != UV_TCP) |
352 | return UV_EINVAL; |
353 | |
354 | if (addr->sa_family == AF_INET) |
355 | addrlen = sizeof(struct sockaddr_in); |
356 | else if (addr->sa_family == AF_INET6) |
357 | addrlen = sizeof(struct sockaddr_in6); |
358 | else |
359 | return UV_EINVAL; |
360 | |
361 | return uv__tcp_connect(req, handle, addr, addrlen, cb); |
362 | } |
363 | |
364 | |
365 | int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) { |
366 | unsigned int addrlen; |
367 | |
368 | if (handle->type != UV_UDP) |
369 | return UV_EINVAL; |
370 | |
371 | /* Disconnect the handle */ |
372 | if (addr == NULL) { |
373 | if (!(handle->flags & UV_HANDLE_UDP_CONNECTED)) |
374 | return UV_ENOTCONN; |
375 | |
376 | return uv__udp_disconnect(handle); |
377 | } |
378 | |
379 | if (addr->sa_family == AF_INET) |
380 | addrlen = sizeof(struct sockaddr_in); |
381 | else if (addr->sa_family == AF_INET6) |
382 | addrlen = sizeof(struct sockaddr_in6); |
383 | else |
384 | return UV_EINVAL; |
385 | |
386 | if (handle->flags & UV_HANDLE_UDP_CONNECTED) |
387 | return UV_EISCONN; |
388 | |
389 | return uv__udp_connect(handle, addr, addrlen); |
390 | } |
391 | |
392 | |
393 | int uv__udp_is_connected(uv_udp_t* handle) { |
394 | struct sockaddr_storage addr; |
395 | int addrlen; |
396 | if (handle->type != UV_UDP) |
397 | return 0; |
398 | |
399 | addrlen = sizeof(addr); |
400 | if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0) |
401 | return 0; |
402 | |
403 | return addrlen > 0; |
404 | } |
405 | |
406 | |
407 | int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) { |
408 | unsigned int addrlen; |
409 | |
410 | if (handle->type != UV_UDP) |
411 | return UV_EINVAL; |
412 | |
413 | if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED)) |
414 | return UV_EISCONN; |
415 | |
416 | if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED)) |
417 | return UV_EDESTADDRREQ; |
418 | |
419 | if (addr != NULL) { |
420 | if (addr->sa_family == AF_INET) |
421 | addrlen = sizeof(struct sockaddr_in); |
422 | else if (addr->sa_family == AF_INET6) |
423 | addrlen = sizeof(struct sockaddr_in6); |
424 | #if defined(AF_UNIX) && !defined(_WIN32) |
425 | else if (addr->sa_family == AF_UNIX) |
426 | addrlen = sizeof(struct sockaddr_un); |
427 | #endif |
428 | else |
429 | return UV_EINVAL; |
430 | } else { |
431 | addrlen = 0; |
432 | } |
433 | |
434 | return addrlen; |
435 | } |
436 | |
437 | |
438 | int uv_udp_send(uv_udp_send_t* req, |
439 | uv_udp_t* handle, |
440 | const uv_buf_t bufs[], |
441 | unsigned int nbufs, |
442 | const struct sockaddr* addr, |
443 | uv_udp_send_cb send_cb) { |
444 | int addrlen; |
445 | |
446 | addrlen = uv__udp_check_before_send(handle, addr); |
447 | if (addrlen < 0) |
448 | return addrlen; |
449 | |
450 | return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb); |
451 | } |
452 | |
453 | |
454 | int uv_udp_try_send(uv_udp_t* handle, |
455 | const uv_buf_t bufs[], |
456 | unsigned int nbufs, |
457 | const struct sockaddr* addr) { |
458 | int addrlen; |
459 | |
460 | addrlen = uv__udp_check_before_send(handle, addr); |
461 | if (addrlen < 0) |
462 | return addrlen; |
463 | |
464 | return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen); |
465 | } |
466 | |
467 | |
468 | int uv_udp_recv_start(uv_udp_t* handle, |
469 | uv_alloc_cb alloc_cb, |
470 | uv_udp_recv_cb recv_cb) { |
471 | if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL) |
472 | return UV_EINVAL; |
473 | else |
474 | return uv__udp_recv_start(handle, alloc_cb, recv_cb); |
475 | } |
476 | |
477 | |
478 | int uv_udp_recv_stop(uv_udp_t* handle) { |
479 | if (handle->type != UV_UDP) |
480 | return UV_EINVAL; |
481 | else |
482 | return uv__udp_recv_stop(handle); |
483 | } |
484 | |
485 | |
486 | void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) { |
487 | QUEUE queue; |
488 | QUEUE* q; |
489 | uv_handle_t* h; |
490 | |
491 | QUEUE_MOVE(&loop->handle_queue, &queue); |
492 | while (!QUEUE_EMPTY(&queue)) { |
493 | q = QUEUE_HEAD(&queue); |
494 | h = QUEUE_DATA(q, uv_handle_t, handle_queue); |
495 | |
496 | QUEUE_REMOVE(q); |
497 | QUEUE_INSERT_TAIL(&loop->handle_queue, q); |
498 | |
499 | if (h->flags & UV_HANDLE_INTERNAL) continue; |
500 | walk_cb(h, arg); |
501 | } |
502 | } |
503 | |
504 | |
505 | static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) { |
506 | const char* type; |
507 | QUEUE* q; |
508 | uv_handle_t* h; |
509 | |
510 | if (loop == NULL) |
511 | loop = uv_default_loop(); |
512 | |
513 | QUEUE_FOREACH(q, &loop->handle_queue) { |
514 | h = QUEUE_DATA(q, uv_handle_t, handle_queue); |
515 | |
516 | if (only_active && !uv__is_active(h)) |
517 | continue; |
518 | |
519 | switch (h->type) { |
520 | #define X(uc, lc) case UV_##uc: type = #lc; break; |
521 | UV_HANDLE_TYPE_MAP(X) |
522 | #undef X |
523 | default: type = "<unknown>" ; |
524 | } |
525 | |
526 | fprintf(stream, |
527 | "[%c%c%c] %-8s %p\n" , |
528 | "R-" [!(h->flags & UV_HANDLE_REF)], |
529 | "A-" [!(h->flags & UV_HANDLE_ACTIVE)], |
530 | "I-" [!(h->flags & UV_HANDLE_INTERNAL)], |
531 | type, |
532 | (void*)h); |
533 | } |
534 | } |
535 | |
536 | |
537 | void uv_print_all_handles(uv_loop_t* loop, FILE* stream) { |
538 | uv__print_handles(loop, 0, stream); |
539 | } |
540 | |
541 | |
542 | void uv_print_active_handles(uv_loop_t* loop, FILE* stream) { |
543 | uv__print_handles(loop, 1, stream); |
544 | } |
545 | |
546 | |
547 | void uv_ref(uv_handle_t* handle) { |
548 | uv__handle_ref(handle); |
549 | } |
550 | |
551 | |
552 | void uv_unref(uv_handle_t* handle) { |
553 | uv__handle_unref(handle); |
554 | } |
555 | |
556 | |
557 | int uv_has_ref(const uv_handle_t* handle) { |
558 | return uv__has_ref(handle); |
559 | } |
560 | |
561 | |
562 | void uv_stop(uv_loop_t* loop) { |
563 | loop->stop_flag = 1; |
564 | } |
565 | |
566 | |
567 | uint64_t uv_now(const uv_loop_t* loop) { |
568 | return loop->time; |
569 | } |
570 | |
571 | |
572 | |
573 | size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) { |
574 | unsigned int i; |
575 | size_t bytes; |
576 | |
577 | bytes = 0; |
578 | for (i = 0; i < nbufs; i++) |
579 | bytes += (size_t) bufs[i].len; |
580 | |
581 | return bytes; |
582 | } |
583 | |
584 | int uv_recv_buffer_size(uv_handle_t* handle, int* value) { |
585 | return uv__socket_sockopt(handle, SO_RCVBUF, value); |
586 | } |
587 | |
588 | int uv_send_buffer_size(uv_handle_t* handle, int *value) { |
589 | return uv__socket_sockopt(handle, SO_SNDBUF, value); |
590 | } |
591 | |
592 | int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) { |
593 | size_t required_len; |
594 | |
595 | if (!uv__is_active(handle)) { |
596 | *size = 0; |
597 | return UV_EINVAL; |
598 | } |
599 | |
600 | required_len = strlen(handle->path); |
601 | if (required_len >= *size) { |
602 | *size = required_len + 1; |
603 | return UV_ENOBUFS; |
604 | } |
605 | |
606 | memcpy(buffer, handle->path, required_len); |
607 | *size = required_len; |
608 | buffer[required_len] = '\0'; |
609 | |
610 | return 0; |
611 | } |
612 | |
613 | /* The windows implementation does not have the same structure layout as |
614 | * the unix implementation (nbufs is not directly inside req but is |
615 | * contained in a nested union/struct) so this function locates it. |
616 | */ |
617 | static unsigned int* uv__get_nbufs(uv_fs_t* req) { |
618 | #ifdef _WIN32 |
619 | return &req->fs.info.nbufs; |
620 | #else |
621 | return &req->nbufs; |
622 | #endif |
623 | } |
624 | |
625 | /* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows |
626 | * systems. So, the memory should be released using free(). On Windows, |
627 | * uv__malloc() is used, so use uv__free() to free memory. |
628 | */ |
629 | #ifdef _WIN32 |
630 | # define uv__fs_scandir_free uv__free |
631 | #else |
632 | # define uv__fs_scandir_free free |
633 | #endif |
634 | |
635 | void uv__fs_scandir_cleanup(uv_fs_t* req) { |
636 | uv__dirent_t** dents; |
637 | |
638 | unsigned int* nbufs = uv__get_nbufs(req); |
639 | |
640 | dents = req->ptr; |
641 | if (*nbufs > 0 && *nbufs != (unsigned int) req->result) |
642 | (*nbufs)--; |
643 | for (; *nbufs < (unsigned int) req->result; (*nbufs)++) |
644 | uv__fs_scandir_free(dents[*nbufs]); |
645 | |
646 | uv__fs_scandir_free(req->ptr); |
647 | req->ptr = NULL; |
648 | } |
649 | |
650 | |
651 | int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) { |
652 | uv__dirent_t** dents; |
653 | uv__dirent_t* dent; |
654 | unsigned int* nbufs; |
655 | |
656 | /* Check to see if req passed */ |
657 | if (req->result < 0) |
658 | return req->result; |
659 | |
660 | /* Ptr will be null if req was canceled or no files found */ |
661 | if (!req->ptr) |
662 | return UV_EOF; |
663 | |
664 | nbufs = uv__get_nbufs(req); |
665 | assert(nbufs); |
666 | |
667 | dents = req->ptr; |
668 | |
669 | /* Free previous entity */ |
670 | if (*nbufs > 0) |
671 | uv__fs_scandir_free(dents[*nbufs - 1]); |
672 | |
673 | /* End was already reached */ |
674 | if (*nbufs == (unsigned int) req->result) { |
675 | uv__fs_scandir_free(dents); |
676 | req->ptr = NULL; |
677 | return UV_EOF; |
678 | } |
679 | |
680 | dent = dents[(*nbufs)++]; |
681 | |
682 | ent->name = dent->d_name; |
683 | ent->type = uv__fs_get_dirent_type(dent); |
684 | |
685 | return 0; |
686 | } |
687 | |
688 | uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) { |
689 | uv_dirent_type_t type; |
690 | |
691 | #ifdef HAVE_DIRENT_TYPES |
692 | switch (dent->d_type) { |
693 | case UV__DT_DIR: |
694 | type = UV_DIRENT_DIR; |
695 | break; |
696 | case UV__DT_FILE: |
697 | type = UV_DIRENT_FILE; |
698 | break; |
699 | case UV__DT_LINK: |
700 | type = UV_DIRENT_LINK; |
701 | break; |
702 | case UV__DT_FIFO: |
703 | type = UV_DIRENT_FIFO; |
704 | break; |
705 | case UV__DT_SOCKET: |
706 | type = UV_DIRENT_SOCKET; |
707 | break; |
708 | case UV__DT_CHAR: |
709 | type = UV_DIRENT_CHAR; |
710 | break; |
711 | case UV__DT_BLOCK: |
712 | type = UV_DIRENT_BLOCK; |
713 | break; |
714 | default: |
715 | type = UV_DIRENT_UNKNOWN; |
716 | } |
717 | #else |
718 | type = UV_DIRENT_UNKNOWN; |
719 | #endif |
720 | |
721 | return type; |
722 | } |
723 | |
724 | void uv__fs_readdir_cleanup(uv_fs_t* req) { |
725 | uv_dir_t* dir; |
726 | uv_dirent_t* dirents; |
727 | int i; |
728 | |
729 | if (req->ptr == NULL) |
730 | return; |
731 | |
732 | dir = req->ptr; |
733 | dirents = dir->dirents; |
734 | req->ptr = NULL; |
735 | |
736 | if (dirents == NULL) |
737 | return; |
738 | |
739 | for (i = 0; i < req->result; ++i) { |
740 | uv__free((char*) dirents[i].name); |
741 | dirents[i].name = NULL; |
742 | } |
743 | } |
744 | |
745 | |
746 | int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) { |
747 | va_list ap; |
748 | int err; |
749 | |
750 | va_start(ap, option); |
751 | /* Any platform-agnostic options should be handled here. */ |
752 | err = uv__loop_configure(loop, option, ap); |
753 | va_end(ap); |
754 | |
755 | return err; |
756 | } |
757 | |
758 | |
759 | static uv_loop_t default_loop_struct; |
760 | static uv_loop_t* default_loop_ptr; |
761 | |
762 | |
763 | uv_loop_t* uv_default_loop(void) { |
764 | if (default_loop_ptr != NULL) |
765 | return default_loop_ptr; |
766 | |
767 | if (uv_loop_init(&default_loop_struct)) |
768 | return NULL; |
769 | |
770 | default_loop_ptr = &default_loop_struct; |
771 | return default_loop_ptr; |
772 | } |
773 | |
774 | |
775 | uv_loop_t* uv_loop_new(void) { |
776 | uv_loop_t* loop; |
777 | |
778 | loop = uv__malloc(sizeof(*loop)); |
779 | if (loop == NULL) |
780 | return NULL; |
781 | |
782 | if (uv_loop_init(loop)) { |
783 | uv__free(loop); |
784 | return NULL; |
785 | } |
786 | |
787 | return loop; |
788 | } |
789 | |
790 | |
791 | int uv_loop_close(uv_loop_t* loop) { |
792 | QUEUE* q; |
793 | uv_handle_t* h; |
794 | #ifndef NDEBUG |
795 | void* saved_data; |
796 | #endif |
797 | |
798 | if (uv__has_active_reqs(loop)) |
799 | return UV_EBUSY; |
800 | |
801 | QUEUE_FOREACH(q, &loop->handle_queue) { |
802 | h = QUEUE_DATA(q, uv_handle_t, handle_queue); |
803 | if (!(h->flags & UV_HANDLE_INTERNAL)) |
804 | return UV_EBUSY; |
805 | } |
806 | |
807 | uv__loop_close(loop); |
808 | |
809 | #ifndef NDEBUG |
810 | saved_data = loop->data; |
811 | memset(loop, -1, sizeof(*loop)); |
812 | loop->data = saved_data; |
813 | #endif |
814 | if (loop == default_loop_ptr) |
815 | default_loop_ptr = NULL; |
816 | |
817 | return 0; |
818 | } |
819 | |
820 | |
821 | void uv_loop_delete(uv_loop_t* loop) { |
822 | uv_loop_t* default_loop; |
823 | int err; |
824 | |
825 | default_loop = default_loop_ptr; |
826 | |
827 | err = uv_loop_close(loop); |
828 | (void) err; /* Squelch compiler warnings. */ |
829 | assert(err == 0); |
830 | if (loop != default_loop) |
831 | uv__free(loop); |
832 | } |
833 | |
834 | |
835 | int uv_read_start(uv_stream_t* stream, |
836 | uv_alloc_cb alloc_cb, |
837 | uv_read_cb read_cb) { |
838 | if (stream == NULL || alloc_cb == NULL || read_cb == NULL) |
839 | return UV_EINVAL; |
840 | |
841 | if (stream->flags & UV_HANDLE_CLOSING) |
842 | return UV_EINVAL; |
843 | |
844 | if (stream->flags & UV_HANDLE_READING) |
845 | return UV_EALREADY; |
846 | |
847 | if (!(stream->flags & UV_HANDLE_READABLE)) |
848 | return UV_ENOTCONN; |
849 | |
850 | return uv__read_start(stream, alloc_cb, read_cb); |
851 | } |
852 | |
853 | |
854 | void uv_os_free_environ(uv_env_item_t* envitems, int count) { |
855 | int i; |
856 | |
857 | for (i = 0; i < count; i++) { |
858 | uv__free(envitems[i].name); |
859 | } |
860 | |
861 | uv__free(envitems); |
862 | } |
863 | |
864 | |
865 | void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { |
866 | int i; |
867 | |
868 | for (i = 0; i < count; i++) |
869 | uv__free(cpu_infos[i].model); |
870 | |
871 | uv__free(cpu_infos); |
872 | } |
873 | |
874 | |
875 | #ifdef __GNUC__ /* Also covers __clang__ and __INTEL_COMPILER. */ |
876 | __attribute__((destructor)) |
877 | #endif |
878 | void uv_library_shutdown(void) { |
879 | static int was_shutdown; |
880 | |
881 | if (uv__load_relaxed(&was_shutdown)) |
882 | return; |
883 | |
884 | uv__process_title_cleanup(); |
885 | uv__signal_cleanup(); |
886 | uv__threadpool_cleanup(); |
887 | uv__store_relaxed(&was_shutdown, 1); |
888 | } |
889 | |
890 | |
891 | void uv__metrics_update_idle_time(uv_loop_t* loop) { |
892 | uv__loop_metrics_t* loop_metrics; |
893 | uint64_t entry_time; |
894 | uint64_t exit_time; |
895 | |
896 | if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME)) |
897 | return; |
898 | |
899 | loop_metrics = uv__get_loop_metrics(loop); |
900 | |
901 | /* The thread running uv__metrics_update_idle_time() is always the same |
902 | * thread that sets provider_entry_time. So it's unnecessary to lock before |
903 | * retrieving this value. |
904 | */ |
905 | if (loop_metrics->provider_entry_time == 0) |
906 | return; |
907 | |
908 | exit_time = uv_hrtime(); |
909 | |
910 | uv_mutex_lock(&loop_metrics->lock); |
911 | entry_time = loop_metrics->provider_entry_time; |
912 | loop_metrics->provider_entry_time = 0; |
913 | loop_metrics->provider_idle_time += exit_time - entry_time; |
914 | uv_mutex_unlock(&loop_metrics->lock); |
915 | } |
916 | |
917 | |
918 | void uv__metrics_set_provider_entry_time(uv_loop_t* loop) { |
919 | uv__loop_metrics_t* loop_metrics; |
920 | uint64_t now; |
921 | |
922 | if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME)) |
923 | return; |
924 | |
925 | now = uv_hrtime(); |
926 | loop_metrics = uv__get_loop_metrics(loop); |
927 | uv_mutex_lock(&loop_metrics->lock); |
928 | loop_metrics->provider_entry_time = now; |
929 | uv_mutex_unlock(&loop_metrics->lock); |
930 | } |
931 | |
932 | |
933 | uint64_t uv_metrics_idle_time(uv_loop_t* loop) { |
934 | uv__loop_metrics_t* loop_metrics; |
935 | uint64_t entry_time; |
936 | uint64_t idle_time; |
937 | |
938 | loop_metrics = uv__get_loop_metrics(loop); |
939 | uv_mutex_lock(&loop_metrics->lock); |
940 | idle_time = loop_metrics->provider_idle_time; |
941 | entry_time = loop_metrics->provider_entry_time; |
942 | uv_mutex_unlock(&loop_metrics->lock); |
943 | |
944 | if (entry_time > 0) |
945 | idle_time += uv_hrtime() - entry_time; |
946 | return idle_time; |
947 | } |
948 | |