1 | /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. |
2 | * |
3 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
4 | * of this software and associated documentation files (the "Software"), to |
5 | * deal in the Software without restriction, including without limitation the |
6 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
7 | * sell copies of the Software, and to permit persons to whom the Software is |
8 | * furnished to do so, subject to the following conditions: |
9 | * |
10 | * The above copyright notice and this permission notice shall be included in |
11 | * all copies or substantial portions of the Software. |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
16 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
17 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
18 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
19 | * IN THE SOFTWARE. |
20 | */ |
21 | |
22 | /* Caveat emptor: this file deviates from the libuv convention of returning |
23 | * negated errno codes. Most uv_fs_*() functions map directly to the system |
24 | * call of the same name. For more complex wrappers, it's easier to just |
25 | * return -1 with errno set. The dispatcher in uv__fs_work() takes care of |
26 | * getting the errno to the right place (req->result or as the return value.) |
27 | */ |
28 | |
29 | #include "uv.h" |
30 | #include "internal.h" |
31 | |
32 | #include <errno.h> |
33 | #include <dlfcn.h> |
34 | #include <stdio.h> |
35 | #include <stdlib.h> |
36 | #include <string.h> |
37 | #include <limits.h> /* PATH_MAX */ |
38 | |
39 | #include <sys/types.h> |
40 | #include <sys/socket.h> |
41 | #include <sys/stat.h> |
42 | #include <sys/time.h> |
43 | #include <sys/uio.h> |
44 | #include <pthread.h> |
45 | #include <unistd.h> |
46 | #include <fcntl.h> |
47 | #include <poll.h> |
48 | |
49 | #if defined(__DragonFly__) || \ |
50 | defined(__FreeBSD__) || \ |
51 | defined(__FreeBSD_kernel__) || \ |
52 | defined(__OpenBSD__) || \ |
53 | defined(__NetBSD__) |
54 | # define HAVE_PREADV 1 |
55 | #else |
56 | # define HAVE_PREADV 0 |
57 | #endif |
58 | |
59 | #if defined(__linux__) || defined(__sun) |
60 | # include <sys/sendfile.h> |
61 | # include <sys/sysmacros.h> |
62 | #endif |
63 | |
64 | #if defined(__APPLE__) |
65 | # include <sys/sysctl.h> |
66 | #elif defined(__linux__) && !defined(FICLONE) |
67 | # include <sys/ioctl.h> |
68 | # define FICLONE _IOW(0x94, 9, int) |
69 | #endif |
70 | |
71 | #if defined(_AIX) && !defined(_AIX71) |
72 | # include <utime.h> |
73 | #endif |
74 | |
75 | #if defined(__APPLE__) || \ |
76 | defined(__DragonFly__) || \ |
77 | defined(__FreeBSD__) || \ |
78 | defined(__FreeBSD_kernel__) || \ |
79 | defined(__OpenBSD__) || \ |
80 | defined(__NetBSD__) |
81 | # include <sys/param.h> |
82 | # include <sys/mount.h> |
83 | #elif defined(__sun) || \ |
84 | defined(__MVS__) || \ |
85 | defined(__NetBSD__) || \ |
86 | defined(__HAIKU__) || \ |
87 | defined(__QNX__) |
88 | # include <sys/statvfs.h> |
89 | #else |
90 | # include <sys/statfs.h> |
91 | #endif |
92 | |
93 | #if defined(_AIX) && _XOPEN_SOURCE <= 600 |
94 | extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */ |
95 | #endif |
96 | |
97 | #define INIT(subtype) \ |
98 | do { \ |
99 | if (req == NULL) \ |
100 | return UV_EINVAL; \ |
101 | UV_REQ_INIT(req, UV_FS); \ |
102 | req->fs_type = UV_FS_ ## subtype; \ |
103 | req->result = 0; \ |
104 | req->ptr = NULL; \ |
105 | req->loop = loop; \ |
106 | req->path = NULL; \ |
107 | req->new_path = NULL; \ |
108 | req->bufs = NULL; \ |
109 | req->cb = cb; \ |
110 | } \ |
111 | while (0) |
112 | |
113 | #define PATH \ |
114 | do { \ |
115 | assert(path != NULL); \ |
116 | if (cb == NULL) { \ |
117 | req->path = path; \ |
118 | } else { \ |
119 | req->path = uv__strdup(path); \ |
120 | if (req->path == NULL) \ |
121 | return UV_ENOMEM; \ |
122 | } \ |
123 | } \ |
124 | while (0) |
125 | |
126 | #define PATH2 \ |
127 | do { \ |
128 | if (cb == NULL) { \ |
129 | req->path = path; \ |
130 | req->new_path = new_path; \ |
131 | } else { \ |
132 | size_t path_len; \ |
133 | size_t new_path_len; \ |
134 | path_len = strlen(path) + 1; \ |
135 | new_path_len = strlen(new_path) + 1; \ |
136 | req->path = uv__malloc(path_len + new_path_len); \ |
137 | if (req->path == NULL) \ |
138 | return UV_ENOMEM; \ |
139 | req->new_path = req->path + path_len; \ |
140 | memcpy((void*) req->path, path, path_len); \ |
141 | memcpy((void*) req->new_path, new_path, new_path_len); \ |
142 | } \ |
143 | } \ |
144 | while (0) |
145 | |
146 | #define POST \ |
147 | do { \ |
148 | if (cb != NULL) { \ |
149 | uv__req_register(loop, req); \ |
150 | uv__work_submit(loop, \ |
151 | &req->work_req, \ |
152 | UV__WORK_FAST_IO, \ |
153 | uv__fs_work, \ |
154 | uv__fs_done); \ |
155 | return 0; \ |
156 | } \ |
157 | else { \ |
158 | uv__fs_work(&req->work_req); \ |
159 | return req->result; \ |
160 | } \ |
161 | } \ |
162 | while (0) |
163 | |
164 | |
165 | static int uv__fs_close(int fd) { |
166 | int rc; |
167 | |
168 | rc = uv__close_nocancel(fd); |
169 | if (rc == -1) |
170 | if (errno == EINTR || errno == EINPROGRESS) |
171 | rc = 0; /* The close is in progress, not an error. */ |
172 | |
173 | return rc; |
174 | } |
175 | |
176 | |
177 | static ssize_t uv__fs_fsync(uv_fs_t* req) { |
178 | #if defined(__APPLE__) |
179 | /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache |
180 | * to the drive platters. This is in contrast to Linux's fdatasync and fsync |
181 | * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent |
182 | * for flushing buffered data to permanent storage. If F_FULLFSYNC is not |
183 | * supported by the file system we fall back to F_BARRIERFSYNC or fsync(). |
184 | * This is the same approach taken by sqlite, except sqlite does not issue |
185 | * an F_BARRIERFSYNC call. |
186 | */ |
187 | int r; |
188 | |
189 | r = fcntl(req->file, F_FULLFSYNC); |
190 | if (r != 0) |
191 | r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */ |
192 | if (r != 0) |
193 | r = fsync(req->file); |
194 | return r; |
195 | #else |
196 | return fsync(req->file); |
197 | #endif |
198 | } |
199 | |
200 | |
201 | static ssize_t uv__fs_fdatasync(uv_fs_t* req) { |
202 | #if defined(__linux__) || defined(__sun) || defined(__NetBSD__) |
203 | return fdatasync(req->file); |
204 | #elif defined(__APPLE__) |
205 | /* See the comment in uv__fs_fsync. */ |
206 | return uv__fs_fsync(req); |
207 | #else |
208 | return fsync(req->file); |
209 | #endif |
210 | } |
211 | |
212 | |
213 | UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) { |
214 | struct timespec ts; |
215 | ts.tv_sec = time; |
216 | ts.tv_nsec = (time - ts.tv_sec) * 1e9; |
217 | |
218 | /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we |
219 | * stick to microsecond resolution for the sake of consistency with other |
220 | * platforms. I'm the original author of this compatibility hack but I'm |
221 | * less convinced it's useful nowadays. |
222 | */ |
223 | ts.tv_nsec -= ts.tv_nsec % 1000; |
224 | |
225 | if (ts.tv_nsec < 0) { |
226 | ts.tv_nsec += 1e9; |
227 | ts.tv_sec -= 1; |
228 | } |
229 | return ts; |
230 | } |
231 | |
232 | UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) { |
233 | struct timeval tv; |
234 | tv.tv_sec = time; |
235 | tv.tv_usec = (time - tv.tv_sec) * 1e6; |
236 | if (tv.tv_usec < 0) { |
237 | tv.tv_usec += 1e6; |
238 | tv.tv_sec -= 1; |
239 | } |
240 | return tv; |
241 | } |
242 | |
243 | static ssize_t uv__fs_futime(uv_fs_t* req) { |
244 | #if defined(__linux__) \ |
245 | || defined(_AIX71) \ |
246 | || defined(__HAIKU__) |
247 | struct timespec ts[2]; |
248 | ts[0] = uv__fs_to_timespec(req->atime); |
249 | ts[1] = uv__fs_to_timespec(req->mtime); |
250 | return futimens(req->file, ts); |
251 | #elif defined(__APPLE__) \ |
252 | || defined(__DragonFly__) \ |
253 | || defined(__FreeBSD__) \ |
254 | || defined(__FreeBSD_kernel__) \ |
255 | || defined(__NetBSD__) \ |
256 | || defined(__OpenBSD__) \ |
257 | || defined(__sun) |
258 | struct timeval tv[2]; |
259 | tv[0] = uv__fs_to_timeval(req->atime); |
260 | tv[1] = uv__fs_to_timeval(req->mtime); |
261 | # if defined(__sun) |
262 | return futimesat(req->file, NULL, tv); |
263 | # else |
264 | return futimes(req->file, tv); |
265 | # endif |
266 | #elif defined(__MVS__) |
267 | attrib_t atr; |
268 | memset(&atr, 0, sizeof(atr)); |
269 | atr.att_mtimechg = 1; |
270 | atr.att_atimechg = 1; |
271 | atr.att_mtime = req->mtime; |
272 | atr.att_atime = req->atime; |
273 | return __fchattr(req->file, &atr, sizeof(atr)); |
274 | #else |
275 | errno = ENOSYS; |
276 | return -1; |
277 | #endif |
278 | } |
279 | |
280 | |
281 | static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { |
282 | return mkdtemp((char*) req->path) ? 0 : -1; |
283 | } |
284 | |
285 | |
286 | static int (*uv__mkostemp)(char*, int); |
287 | |
288 | |
289 | static void uv__mkostemp_initonce(void) { |
290 | /* z/os doesn't have RTLD_DEFAULT but that's okay |
291 | * because it doesn't have mkostemp(O_CLOEXEC) either. |
292 | */ |
293 | #ifdef RTLD_DEFAULT |
294 | uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp" ); |
295 | |
296 | /* We don't care about errors, but we do want to clean them up. |
297 | * If there has been no error, then dlerror() will just return |
298 | * NULL. |
299 | */ |
300 | dlerror(); |
301 | #endif /* RTLD_DEFAULT */ |
302 | } |
303 | |
304 | |
305 | static int uv__fs_mkstemp(uv_fs_t* req) { |
306 | static uv_once_t once = UV_ONCE_INIT; |
307 | int r; |
308 | #ifdef O_CLOEXEC |
309 | static int no_cloexec_support; |
310 | #endif |
311 | static const char pattern[] = "XXXXXX" ; |
312 | static const size_t pattern_size = sizeof(pattern) - 1; |
313 | char* path; |
314 | size_t path_length; |
315 | |
316 | path = (char*) req->path; |
317 | path_length = strlen(path); |
318 | |
319 | /* EINVAL can be returned for 2 reasons: |
320 | 1. The template's last 6 characters were not XXXXXX |
321 | 2. open() didn't support O_CLOEXEC |
322 | We want to avoid going to the fallback path in case |
323 | of 1, so it's manually checked before. */ |
324 | if (path_length < pattern_size || |
325 | strcmp(path + path_length - pattern_size, pattern)) { |
326 | errno = EINVAL; |
327 | r = -1; |
328 | goto clobber; |
329 | } |
330 | |
331 | uv_once(&once, uv__mkostemp_initonce); |
332 | |
333 | #ifdef O_CLOEXEC |
334 | if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) { |
335 | r = uv__mkostemp(path, O_CLOEXEC); |
336 | |
337 | if (r >= 0) |
338 | return r; |
339 | |
340 | /* If mkostemp() returns EINVAL, it means the kernel doesn't |
341 | support O_CLOEXEC, so we just fallback to mkstemp() below. */ |
342 | if (errno != EINVAL) |
343 | goto clobber; |
344 | |
345 | /* We set the static variable so that next calls don't even |
346 | try to use mkostemp. */ |
347 | uv__store_relaxed(&no_cloexec_support, 1); |
348 | } |
349 | #endif /* O_CLOEXEC */ |
350 | |
351 | if (req->cb != NULL) |
352 | uv_rwlock_rdlock(&req->loop->cloexec_lock); |
353 | |
354 | r = mkstemp(path); |
355 | |
356 | /* In case of failure `uv__cloexec` will leave error in `errno`, |
357 | * so it is enough to just set `r` to `-1`. |
358 | */ |
359 | if (r >= 0 && uv__cloexec(r, 1) != 0) { |
360 | r = uv__close(r); |
361 | if (r != 0) |
362 | abort(); |
363 | r = -1; |
364 | } |
365 | |
366 | if (req->cb != NULL) |
367 | uv_rwlock_rdunlock(&req->loop->cloexec_lock); |
368 | |
369 | clobber: |
370 | if (r < 0) |
371 | path[0] = '\0'; |
372 | return r; |
373 | } |
374 | |
375 | |
376 | static ssize_t uv__fs_open(uv_fs_t* req) { |
377 | #ifdef O_CLOEXEC |
378 | return open(req->path, req->flags | O_CLOEXEC, req->mode); |
379 | #else /* O_CLOEXEC */ |
380 | int r; |
381 | |
382 | if (req->cb != NULL) |
383 | uv_rwlock_rdlock(&req->loop->cloexec_lock); |
384 | |
385 | r = open(req->path, req->flags, req->mode); |
386 | |
387 | /* In case of failure `uv__cloexec` will leave error in `errno`, |
388 | * so it is enough to just set `r` to `-1`. |
389 | */ |
390 | if (r >= 0 && uv__cloexec(r, 1) != 0) { |
391 | r = uv__close(r); |
392 | if (r != 0) |
393 | abort(); |
394 | r = -1; |
395 | } |
396 | |
397 | if (req->cb != NULL) |
398 | uv_rwlock_rdunlock(&req->loop->cloexec_lock); |
399 | |
400 | return r; |
401 | #endif /* O_CLOEXEC */ |
402 | } |
403 | |
404 | |
405 | #if !HAVE_PREADV |
406 | static ssize_t uv__fs_preadv(uv_file fd, |
407 | uv_buf_t* bufs, |
408 | unsigned int nbufs, |
409 | off_t off) { |
410 | uv_buf_t* buf; |
411 | uv_buf_t* end; |
412 | ssize_t result; |
413 | ssize_t rc; |
414 | size_t pos; |
415 | |
416 | assert(nbufs > 0); |
417 | |
418 | result = 0; |
419 | pos = 0; |
420 | buf = bufs + 0; |
421 | end = bufs + nbufs; |
422 | |
423 | for (;;) { |
424 | do |
425 | rc = pread(fd, buf->base + pos, buf->len - pos, off + result); |
426 | while (rc == -1 && errno == EINTR); |
427 | |
428 | if (rc == 0) |
429 | break; |
430 | |
431 | if (rc == -1 && result == 0) |
432 | return UV__ERR(errno); |
433 | |
434 | if (rc == -1) |
435 | break; /* We read some data so return that, ignore the error. */ |
436 | |
437 | pos += rc; |
438 | result += rc; |
439 | |
440 | if (pos < buf->len) |
441 | continue; |
442 | |
443 | pos = 0; |
444 | buf += 1; |
445 | |
446 | if (buf == end) |
447 | break; |
448 | } |
449 | |
450 | return result; |
451 | } |
452 | #endif |
453 | |
454 | |
455 | static ssize_t uv__fs_read(uv_fs_t* req) { |
456 | #if defined(__linux__) |
457 | static int no_preadv; |
458 | #endif |
459 | unsigned int iovmax; |
460 | ssize_t result; |
461 | |
462 | iovmax = uv__getiovmax(); |
463 | if (req->nbufs > iovmax) |
464 | req->nbufs = iovmax; |
465 | |
466 | if (req->off < 0) { |
467 | if (req->nbufs == 1) |
468 | result = read(req->file, req->bufs[0].base, req->bufs[0].len); |
469 | else |
470 | result = readv(req->file, (struct iovec*) req->bufs, req->nbufs); |
471 | } else { |
472 | if (req->nbufs == 1) { |
473 | result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off); |
474 | goto done; |
475 | } |
476 | |
477 | #if HAVE_PREADV |
478 | result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); |
479 | #else |
480 | # if defined(__linux__) |
481 | if (uv__load_relaxed(&no_preadv)) retry: |
482 | # endif |
483 | { |
484 | result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off); |
485 | } |
486 | # if defined(__linux__) |
487 | else { |
488 | result = uv__preadv(req->file, |
489 | (struct iovec*)req->bufs, |
490 | req->nbufs, |
491 | req->off); |
492 | if (result == -1 && errno == ENOSYS) { |
493 | uv__store_relaxed(&no_preadv, 1); |
494 | goto retry; |
495 | } |
496 | } |
497 | # endif |
498 | #endif |
499 | } |
500 | |
501 | done: |
502 | /* Early cleanup of bufs allocation, since we're done with it. */ |
503 | if (req->bufs != req->bufsml) |
504 | uv__free(req->bufs); |
505 | |
506 | req->bufs = NULL; |
507 | req->nbufs = 0; |
508 | |
509 | #ifdef __PASE__ |
510 | /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */ |
511 | if (result == -1 && errno == EOPNOTSUPP) { |
512 | struct stat buf; |
513 | ssize_t rc; |
514 | rc = fstat(req->file, &buf); |
515 | if (rc == 0 && S_ISDIR(buf.st_mode)) { |
516 | errno = EISDIR; |
517 | } |
518 | } |
519 | #endif |
520 | |
521 | return result; |
522 | } |
523 | |
524 | |
525 | #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8) |
526 | #define UV_CONST_DIRENT uv__dirent_t |
527 | #else |
528 | #define UV_CONST_DIRENT const uv__dirent_t |
529 | #endif |
530 | |
531 | |
532 | static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) { |
533 | return strcmp(dent->d_name, "." ) != 0 && strcmp(dent->d_name, ".." ) != 0; |
534 | } |
535 | |
536 | |
537 | static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) { |
538 | return strcmp((*a)->d_name, (*b)->d_name); |
539 | } |
540 | |
541 | |
542 | static ssize_t uv__fs_scandir(uv_fs_t* req) { |
543 | uv__dirent_t** dents; |
544 | int n; |
545 | |
546 | dents = NULL; |
547 | n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort); |
548 | |
549 | /* NOTE: We will use nbufs as an index field */ |
550 | req->nbufs = 0; |
551 | |
552 | if (n == 0) { |
553 | /* OS X still needs to deallocate some memory. |
554 | * Memory was allocated using the system allocator, so use free() here. |
555 | */ |
556 | free(dents); |
557 | dents = NULL; |
558 | } else if (n == -1) { |
559 | return n; |
560 | } |
561 | |
562 | req->ptr = dents; |
563 | |
564 | return n; |
565 | } |
566 | |
567 | static int uv__fs_opendir(uv_fs_t* req) { |
568 | uv_dir_t* dir; |
569 | |
570 | dir = uv__malloc(sizeof(*dir)); |
571 | if (dir == NULL) |
572 | goto error; |
573 | |
574 | dir->dir = opendir(req->path); |
575 | if (dir->dir == NULL) |
576 | goto error; |
577 | |
578 | req->ptr = dir; |
579 | return 0; |
580 | |
581 | error: |
582 | uv__free(dir); |
583 | req->ptr = NULL; |
584 | return -1; |
585 | } |
586 | |
587 | static int uv__fs_readdir(uv_fs_t* req) { |
588 | uv_dir_t* dir; |
589 | uv_dirent_t* dirent; |
590 | struct dirent* res; |
591 | unsigned int dirent_idx; |
592 | unsigned int i; |
593 | |
594 | dir = req->ptr; |
595 | dirent_idx = 0; |
596 | |
597 | while (dirent_idx < dir->nentries) { |
598 | /* readdir() returns NULL on end of directory, as well as on error. errno |
599 | is used to differentiate between the two conditions. */ |
600 | errno = 0; |
601 | res = readdir(dir->dir); |
602 | |
603 | if (res == NULL) { |
604 | if (errno != 0) |
605 | goto error; |
606 | break; |
607 | } |
608 | |
609 | if (strcmp(res->d_name, "." ) == 0 || strcmp(res->d_name, ".." ) == 0) |
610 | continue; |
611 | |
612 | dirent = &dir->dirents[dirent_idx]; |
613 | dirent->name = uv__strdup(res->d_name); |
614 | |
615 | if (dirent->name == NULL) |
616 | goto error; |
617 | |
618 | dirent->type = uv__fs_get_dirent_type(res); |
619 | ++dirent_idx; |
620 | } |
621 | |
622 | return dirent_idx; |
623 | |
624 | error: |
625 | for (i = 0; i < dirent_idx; ++i) { |
626 | uv__free((char*) dir->dirents[i].name); |
627 | dir->dirents[i].name = NULL; |
628 | } |
629 | |
630 | return -1; |
631 | } |
632 | |
633 | static int uv__fs_closedir(uv_fs_t* req) { |
634 | uv_dir_t* dir; |
635 | |
636 | dir = req->ptr; |
637 | |
638 | if (dir->dir != NULL) { |
639 | closedir(dir->dir); |
640 | dir->dir = NULL; |
641 | } |
642 | |
643 | uv__free(req->ptr); |
644 | req->ptr = NULL; |
645 | return 0; |
646 | } |
647 | |
648 | static int uv__fs_statfs(uv_fs_t* req) { |
649 | uv_statfs_t* stat_fs; |
650 | #if defined(__sun) || \ |
651 | defined(__MVS__) || \ |
652 | defined(__NetBSD__) || \ |
653 | defined(__HAIKU__) || \ |
654 | defined(__QNX__) |
655 | struct statvfs buf; |
656 | |
657 | if (0 != statvfs(req->path, &buf)) |
658 | #else |
659 | struct statfs buf; |
660 | |
661 | if (0 != statfs(req->path, &buf)) |
662 | #endif /* defined(__sun) */ |
663 | return -1; |
664 | |
665 | stat_fs = uv__malloc(sizeof(*stat_fs)); |
666 | if (stat_fs == NULL) { |
667 | errno = ENOMEM; |
668 | return -1; |
669 | } |
670 | |
671 | #if defined(__sun) || \ |
672 | defined(__MVS__) || \ |
673 | defined(__OpenBSD__) || \ |
674 | defined(__NetBSD__) || \ |
675 | defined(__HAIKU__) || \ |
676 | defined(__QNX__) |
677 | stat_fs->f_type = 0; /* f_type is not supported. */ |
678 | #else |
679 | stat_fs->f_type = buf.f_type; |
680 | #endif |
681 | stat_fs->f_bsize = buf.f_bsize; |
682 | stat_fs->f_blocks = buf.f_blocks; |
683 | stat_fs->f_bfree = buf.f_bfree; |
684 | stat_fs->f_bavail = buf.f_bavail; |
685 | stat_fs->f_files = buf.f_files; |
686 | stat_fs->f_ffree = buf.f_ffree; |
687 | req->ptr = stat_fs; |
688 | return 0; |
689 | } |
690 | |
691 | static ssize_t uv__fs_pathmax_size(const char* path) { |
692 | ssize_t pathmax; |
693 | |
694 | pathmax = pathconf(path, _PC_PATH_MAX); |
695 | |
696 | if (pathmax == -1) |
697 | pathmax = UV__PATH_MAX; |
698 | |
699 | return pathmax; |
700 | } |
701 | |
702 | static ssize_t uv__fs_readlink(uv_fs_t* req) { |
703 | ssize_t maxlen; |
704 | ssize_t len; |
705 | char* buf; |
706 | |
707 | #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX) |
708 | maxlen = uv__fs_pathmax_size(req->path); |
709 | #else |
710 | /* We may not have a real PATH_MAX. Read size of link. */ |
711 | struct stat st; |
712 | int ret; |
713 | ret = lstat(req->path, &st); |
714 | if (ret != 0) |
715 | return -1; |
716 | if (!S_ISLNK(st.st_mode)) { |
717 | errno = EINVAL; |
718 | return -1; |
719 | } |
720 | |
721 | maxlen = st.st_size; |
722 | |
723 | /* According to readlink(2) lstat can report st_size == 0 |
724 | for some symlinks, such as those in /proc or /sys. */ |
725 | if (maxlen == 0) |
726 | maxlen = uv__fs_pathmax_size(req->path); |
727 | #endif |
728 | |
729 | buf = uv__malloc(maxlen); |
730 | |
731 | if (buf == NULL) { |
732 | errno = ENOMEM; |
733 | return -1; |
734 | } |
735 | |
736 | #if defined(__MVS__) |
737 | len = os390_readlink(req->path, buf, maxlen); |
738 | #else |
739 | len = readlink(req->path, buf, maxlen); |
740 | #endif |
741 | |
742 | if (len == -1) { |
743 | uv__free(buf); |
744 | return -1; |
745 | } |
746 | |
747 | /* Uncommon case: resize to make room for the trailing nul byte. */ |
748 | if (len == maxlen) { |
749 | buf = uv__reallocf(buf, len + 1); |
750 | |
751 | if (buf == NULL) |
752 | return -1; |
753 | } |
754 | |
755 | buf[len] = '\0'; |
756 | req->ptr = buf; |
757 | |
758 | return 0; |
759 | } |
760 | |
761 | static ssize_t uv__fs_realpath(uv_fs_t* req) { |
762 | char* buf; |
763 | |
764 | #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L |
765 | buf = realpath(req->path, NULL); |
766 | if (buf == NULL) |
767 | return -1; |
768 | #else |
769 | ssize_t len; |
770 | |
771 | len = uv__fs_pathmax_size(req->path); |
772 | buf = uv__malloc(len + 1); |
773 | |
774 | if (buf == NULL) { |
775 | errno = ENOMEM; |
776 | return -1; |
777 | } |
778 | |
779 | if (realpath(req->path, buf) == NULL) { |
780 | uv__free(buf); |
781 | return -1; |
782 | } |
783 | #endif |
784 | |
785 | req->ptr = buf; |
786 | |
787 | return 0; |
788 | } |
789 | |
790 | static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) { |
791 | struct pollfd pfd; |
792 | int use_pread; |
793 | off_t offset; |
794 | ssize_t nsent; |
795 | ssize_t nread; |
796 | ssize_t nwritten; |
797 | size_t buflen; |
798 | size_t len; |
799 | ssize_t n; |
800 | int in_fd; |
801 | int out_fd; |
802 | char buf[8192]; |
803 | |
804 | len = req->bufsml[0].len; |
805 | in_fd = req->flags; |
806 | out_fd = req->file; |
807 | offset = req->off; |
808 | use_pread = 1; |
809 | |
810 | /* Here are the rules regarding errors: |
811 | * |
812 | * 1. Read errors are reported only if nsent==0, otherwise we return nsent. |
813 | * The user needs to know that some data has already been sent, to stop |
814 | * them from sending it twice. |
815 | * |
816 | * 2. Write errors are always reported. Write errors are bad because they |
817 | * mean data loss: we've read data but now we can't write it out. |
818 | * |
819 | * We try to use pread() and fall back to regular read() if the source fd |
820 | * doesn't support positional reads, for example when it's a pipe fd. |
821 | * |
822 | * If we get EAGAIN when writing to the target fd, we poll() on it until |
823 | * it becomes writable again. |
824 | * |
825 | * FIXME: If we get a write error when use_pread==1, it should be safe to |
826 | * return the number of sent bytes instead of an error because pread() |
827 | * is, in theory, idempotent. However, special files in /dev or /proc |
828 | * may support pread() but not necessarily return the same data on |
829 | * successive reads. |
830 | * |
831 | * FIXME: There is no way now to signal that we managed to send *some* data |
832 | * before a write error. |
833 | */ |
834 | for (nsent = 0; (size_t) nsent < len; ) { |
835 | buflen = len - nsent; |
836 | |
837 | if (buflen > sizeof(buf)) |
838 | buflen = sizeof(buf); |
839 | |
840 | do |
841 | if (use_pread) |
842 | nread = pread(in_fd, buf, buflen, offset); |
843 | else |
844 | nread = read(in_fd, buf, buflen); |
845 | while (nread == -1 && errno == EINTR); |
846 | |
847 | if (nread == 0) |
848 | goto out; |
849 | |
850 | if (nread == -1) { |
851 | if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) { |
852 | use_pread = 0; |
853 | continue; |
854 | } |
855 | |
856 | if (nsent == 0) |
857 | nsent = -1; |
858 | |
859 | goto out; |
860 | } |
861 | |
862 | for (nwritten = 0; nwritten < nread; ) { |
863 | do |
864 | n = write(out_fd, buf + nwritten, nread - nwritten); |
865 | while (n == -1 && errno == EINTR); |
866 | |
867 | if (n != -1) { |
868 | nwritten += n; |
869 | continue; |
870 | } |
871 | |
872 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
873 | nsent = -1; |
874 | goto out; |
875 | } |
876 | |
877 | pfd.fd = out_fd; |
878 | pfd.events = POLLOUT; |
879 | pfd.revents = 0; |
880 | |
881 | do |
882 | n = poll(&pfd, 1, -1); |
883 | while (n == -1 && errno == EINTR); |
884 | |
885 | if (n == -1 || (pfd.revents & ~POLLOUT) != 0) { |
886 | errno = EIO; |
887 | nsent = -1; |
888 | goto out; |
889 | } |
890 | } |
891 | |
892 | offset += nread; |
893 | nsent += nread; |
894 | } |
895 | |
896 | out: |
897 | if (nsent != -1) |
898 | req->off = offset; |
899 | |
900 | return nsent; |
901 | } |
902 | |
903 | |
904 | static ssize_t uv__fs_sendfile(uv_fs_t* req) { |
905 | int in_fd; |
906 | int out_fd; |
907 | |
908 | in_fd = req->flags; |
909 | out_fd = req->file; |
910 | |
911 | #if defined(__linux__) || defined(__sun) |
912 | { |
913 | off_t off; |
914 | ssize_t r; |
915 | |
916 | off = req->off; |
917 | |
918 | #ifdef __linux__ |
919 | { |
920 | static int copy_file_range_support = 1; |
921 | |
922 | if (copy_file_range_support) { |
923 | r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0); |
924 | |
925 | if (r == -1 && errno == ENOSYS) { |
926 | /* ENOSYS - it will never work */ |
927 | errno = 0; |
928 | copy_file_range_support = 0; |
929 | } else if (r == -1 && errno == ENOTSUP) { |
930 | /* ENOTSUP - it could work on another file system type */ |
931 | errno = 0; |
932 | } else { |
933 | goto ok; |
934 | } |
935 | } |
936 | } |
937 | #endif |
938 | |
939 | r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len); |
940 | |
941 | ok: |
942 | /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but |
943 | * it still writes out data. Fortunately, we can detect it by checking if |
944 | * the offset has been updated. |
945 | */ |
946 | if (r != -1 || off > req->off) { |
947 | r = off - req->off; |
948 | req->off = off; |
949 | return r; |
950 | } |
951 | |
952 | if (errno == EINVAL || |
953 | errno == EIO || |
954 | errno == ENOTSOCK || |
955 | errno == EXDEV) { |
956 | errno = 0; |
957 | return uv__fs_sendfile_emul(req); |
958 | } |
959 | |
960 | return -1; |
961 | } |
962 | #elif defined(__APPLE__) || \ |
963 | defined(__DragonFly__) || \ |
964 | defined(__FreeBSD__) || \ |
965 | defined(__FreeBSD_kernel__) |
966 | { |
967 | off_t len; |
968 | ssize_t r; |
969 | |
970 | /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in |
971 | * non-blocking mode and not all data could be written. If a non-zero |
972 | * number of bytes have been sent, we don't consider it an error. |
973 | */ |
974 | |
975 | #if defined(__FreeBSD__) || defined(__DragonFly__) |
976 | len = 0; |
977 | r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0); |
978 | #elif defined(__FreeBSD_kernel__) |
979 | len = 0; |
980 | r = bsd_sendfile(in_fd, |
981 | out_fd, |
982 | req->off, |
983 | req->bufsml[0].len, |
984 | NULL, |
985 | &len, |
986 | 0); |
987 | #else |
988 | /* The darwin sendfile takes len as an input for the length to send, |
989 | * so make sure to initialize it with the caller's value. */ |
990 | len = req->bufsml[0].len; |
991 | r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0); |
992 | #endif |
993 | |
994 | /* |
995 | * The man page for sendfile(2) on DragonFly states that `len` contains |
996 | * a meaningful value ONLY in case of EAGAIN and EINTR. |
997 | * Nothing is said about it's value in case of other errors, so better |
998 | * not depend on the potential wrong assumption that is was not modified |
999 | * by the syscall. |
1000 | */ |
1001 | if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) { |
1002 | req->off += len; |
1003 | return (ssize_t) len; |
1004 | } |
1005 | |
1006 | if (errno == EINVAL || |
1007 | errno == EIO || |
1008 | errno == ENOTSOCK || |
1009 | errno == EXDEV) { |
1010 | errno = 0; |
1011 | return uv__fs_sendfile_emul(req); |
1012 | } |
1013 | |
1014 | return -1; |
1015 | } |
1016 | #else |
1017 | /* Squelch compiler warnings. */ |
1018 | (void) &in_fd; |
1019 | (void) &out_fd; |
1020 | |
1021 | return uv__fs_sendfile_emul(req); |
1022 | #endif |
1023 | } |
1024 | |
1025 | |
1026 | static ssize_t uv__fs_utime(uv_fs_t* req) { |
1027 | #if defined(__linux__) \ |
1028 | || defined(_AIX71) \ |
1029 | || defined(__sun) \ |
1030 | || defined(__HAIKU__) |
1031 | struct timespec ts[2]; |
1032 | ts[0] = uv__fs_to_timespec(req->atime); |
1033 | ts[1] = uv__fs_to_timespec(req->mtime); |
1034 | return utimensat(AT_FDCWD, req->path, ts, 0); |
1035 | #elif defined(__APPLE__) \ |
1036 | || defined(__DragonFly__) \ |
1037 | || defined(__FreeBSD__) \ |
1038 | || defined(__FreeBSD_kernel__) \ |
1039 | || defined(__NetBSD__) \ |
1040 | || defined(__OpenBSD__) |
1041 | struct timeval tv[2]; |
1042 | tv[0] = uv__fs_to_timeval(req->atime); |
1043 | tv[1] = uv__fs_to_timeval(req->mtime); |
1044 | return utimes(req->path, tv); |
1045 | #elif defined(_AIX) \ |
1046 | && !defined(_AIX71) |
1047 | struct utimbuf buf; |
1048 | buf.actime = req->atime; |
1049 | buf.modtime = req->mtime; |
1050 | return utime(req->path, &buf); |
1051 | #elif defined(__MVS__) |
1052 | attrib_t atr; |
1053 | memset(&atr, 0, sizeof(atr)); |
1054 | atr.att_mtimechg = 1; |
1055 | atr.att_atimechg = 1; |
1056 | atr.att_mtime = req->mtime; |
1057 | atr.att_atime = req->atime; |
1058 | return __lchattr((char*) req->path, &atr, sizeof(atr)); |
1059 | #else |
1060 | errno = ENOSYS; |
1061 | return -1; |
1062 | #endif |
1063 | } |
1064 | |
1065 | |
1066 | static ssize_t uv__fs_lutime(uv_fs_t* req) { |
1067 | #if defined(__linux__) || \ |
1068 | defined(_AIX71) || \ |
1069 | defined(__sun) || \ |
1070 | defined(__HAIKU__) |
1071 | struct timespec ts[2]; |
1072 | ts[0] = uv__fs_to_timespec(req->atime); |
1073 | ts[1] = uv__fs_to_timespec(req->mtime); |
1074 | return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW); |
1075 | #elif defined(__APPLE__) || \ |
1076 | defined(__DragonFly__) || \ |
1077 | defined(__FreeBSD__) || \ |
1078 | defined(__FreeBSD_kernel__) || \ |
1079 | defined(__NetBSD__) |
1080 | struct timeval tv[2]; |
1081 | tv[0] = uv__fs_to_timeval(req->atime); |
1082 | tv[1] = uv__fs_to_timeval(req->mtime); |
1083 | return lutimes(req->path, tv); |
1084 | #else |
1085 | errno = ENOSYS; |
1086 | return -1; |
1087 | #endif |
1088 | } |
1089 | |
1090 | |
1091 | static ssize_t uv__fs_write(uv_fs_t* req) { |
1092 | #if defined(__linux__) |
1093 | static int no_pwritev; |
1094 | #endif |
1095 | ssize_t r; |
1096 | |
1097 | /* Serialize writes on OS X, concurrent write() and pwrite() calls result in |
1098 | * data loss. We can't use a per-file descriptor lock, the descriptor may be |
1099 | * a dup(). |
1100 | */ |
1101 | #if defined(__APPLE__) |
1102 | static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; |
1103 | |
1104 | if (pthread_mutex_lock(&lock)) |
1105 | abort(); |
1106 | #endif |
1107 | |
1108 | if (req->off < 0) { |
1109 | if (req->nbufs == 1) |
1110 | r = write(req->file, req->bufs[0].base, req->bufs[0].len); |
1111 | else |
1112 | r = writev(req->file, (struct iovec*) req->bufs, req->nbufs); |
1113 | } else { |
1114 | if (req->nbufs == 1) { |
1115 | r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); |
1116 | goto done; |
1117 | } |
1118 | #if HAVE_PREADV |
1119 | r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); |
1120 | #else |
1121 | # if defined(__linux__) |
1122 | if (no_pwritev) retry: |
1123 | # endif |
1124 | { |
1125 | r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); |
1126 | } |
1127 | # if defined(__linux__) |
1128 | else { |
1129 | r = uv__pwritev(req->file, |
1130 | (struct iovec*) req->bufs, |
1131 | req->nbufs, |
1132 | req->off); |
1133 | if (r == -1 && errno == ENOSYS) { |
1134 | no_pwritev = 1; |
1135 | goto retry; |
1136 | } |
1137 | } |
1138 | # endif |
1139 | #endif |
1140 | } |
1141 | |
1142 | done: |
1143 | #if defined(__APPLE__) |
1144 | if (pthread_mutex_unlock(&lock)) |
1145 | abort(); |
1146 | #endif |
1147 | |
1148 | return r; |
1149 | } |
1150 | |
1151 | static ssize_t uv__fs_copyfile(uv_fs_t* req) { |
1152 | uv_fs_t fs_req; |
1153 | uv_file srcfd; |
1154 | uv_file dstfd; |
1155 | struct stat src_statsbuf; |
1156 | struct stat dst_statsbuf; |
1157 | int dst_flags; |
1158 | int result; |
1159 | int err; |
1160 | off_t bytes_to_send; |
1161 | off_t in_offset; |
1162 | off_t bytes_written; |
1163 | size_t bytes_chunk; |
1164 | |
1165 | dstfd = -1; |
1166 | err = 0; |
1167 | |
1168 | /* Open the source file. */ |
1169 | srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL); |
1170 | uv_fs_req_cleanup(&fs_req); |
1171 | |
1172 | if (srcfd < 0) |
1173 | return srcfd; |
1174 | |
1175 | /* Get the source file's mode. */ |
1176 | if (fstat(srcfd, &src_statsbuf)) { |
1177 | err = UV__ERR(errno); |
1178 | goto out; |
1179 | } |
1180 | |
1181 | dst_flags = O_WRONLY | O_CREAT; |
1182 | |
1183 | if (req->flags & UV_FS_COPYFILE_EXCL) |
1184 | dst_flags |= O_EXCL; |
1185 | |
1186 | /* Open the destination file. */ |
1187 | dstfd = uv_fs_open(NULL, |
1188 | &fs_req, |
1189 | req->new_path, |
1190 | dst_flags, |
1191 | src_statsbuf.st_mode, |
1192 | NULL); |
1193 | uv_fs_req_cleanup(&fs_req); |
1194 | |
1195 | if (dstfd < 0) { |
1196 | err = dstfd; |
1197 | goto out; |
1198 | } |
1199 | |
1200 | /* If the file is not being opened exclusively, verify that the source and |
1201 | destination are not the same file. If they are the same, bail out early. */ |
1202 | if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) { |
1203 | /* Get the destination file's mode. */ |
1204 | if (fstat(dstfd, &dst_statsbuf)) { |
1205 | err = UV__ERR(errno); |
1206 | goto out; |
1207 | } |
1208 | |
1209 | /* Check if srcfd and dstfd refer to the same file */ |
1210 | if (src_statsbuf.st_dev == dst_statsbuf.st_dev && |
1211 | src_statsbuf.st_ino == dst_statsbuf.st_ino) { |
1212 | goto out; |
1213 | } |
1214 | |
1215 | /* Truncate the file in case the destination already existed. */ |
1216 | if (ftruncate(dstfd, 0) != 0) { |
1217 | err = UV__ERR(errno); |
1218 | goto out; |
1219 | } |
1220 | } |
1221 | |
1222 | if (fchmod(dstfd, src_statsbuf.st_mode) == -1) { |
1223 | err = UV__ERR(errno); |
1224 | #ifdef __linux__ |
1225 | if (err != UV_EPERM) |
1226 | goto out; |
1227 | |
1228 | { |
1229 | struct statfs s; |
1230 | |
1231 | /* fchmod() on CIFS shares always fails with EPERM unless the share is |
1232 | * mounted with "noperm". As fchmod() is a meaningless operation on such |
1233 | * shares anyway, detect that condition and squelch the error. |
1234 | */ |
1235 | if (fstatfs(dstfd, &s) == -1) |
1236 | goto out; |
1237 | |
1238 | if ((unsigned) s.f_type != /* CIFS */ 0xFF534D42u) |
1239 | goto out; |
1240 | } |
1241 | |
1242 | err = 0; |
1243 | #else /* !__linux__ */ |
1244 | goto out; |
1245 | #endif /* !__linux__ */ |
1246 | } |
1247 | |
1248 | #ifdef FICLONE |
1249 | if (req->flags & UV_FS_COPYFILE_FICLONE || |
1250 | req->flags & UV_FS_COPYFILE_FICLONE_FORCE) { |
1251 | if (ioctl(dstfd, FICLONE, srcfd) == 0) { |
1252 | /* ioctl() with FICLONE succeeded. */ |
1253 | goto out; |
1254 | } |
1255 | /* If an error occurred and force was set, return the error to the caller; |
1256 | * fall back to sendfile() when force was not set. */ |
1257 | if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) { |
1258 | err = UV__ERR(errno); |
1259 | goto out; |
1260 | } |
1261 | } |
1262 | #else |
1263 | if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) { |
1264 | err = UV_ENOSYS; |
1265 | goto out; |
1266 | } |
1267 | #endif |
1268 | |
1269 | bytes_to_send = src_statsbuf.st_size; |
1270 | in_offset = 0; |
1271 | while (bytes_to_send != 0) { |
1272 | bytes_chunk = SSIZE_MAX; |
1273 | if (bytes_to_send < (off_t) bytes_chunk) |
1274 | bytes_chunk = bytes_to_send; |
1275 | uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL); |
1276 | bytes_written = fs_req.result; |
1277 | uv_fs_req_cleanup(&fs_req); |
1278 | |
1279 | if (bytes_written < 0) { |
1280 | err = bytes_written; |
1281 | break; |
1282 | } |
1283 | |
1284 | bytes_to_send -= bytes_written; |
1285 | in_offset += bytes_written; |
1286 | } |
1287 | |
1288 | out: |
1289 | if (err < 0) |
1290 | result = err; |
1291 | else |
1292 | result = 0; |
1293 | |
1294 | /* Close the source file. */ |
1295 | err = uv__close_nocheckstdio(srcfd); |
1296 | |
1297 | /* Don't overwrite any existing errors. */ |
1298 | if (err != 0 && result == 0) |
1299 | result = err; |
1300 | |
1301 | /* Close the destination file if it is open. */ |
1302 | if (dstfd >= 0) { |
1303 | err = uv__close_nocheckstdio(dstfd); |
1304 | |
1305 | /* Don't overwrite any existing errors. */ |
1306 | if (err != 0 && result == 0) |
1307 | result = err; |
1308 | |
1309 | /* Remove the destination file if something went wrong. */ |
1310 | if (result != 0) { |
1311 | uv_fs_unlink(NULL, &fs_req, req->new_path, NULL); |
1312 | /* Ignore the unlink return value, as an error already happened. */ |
1313 | uv_fs_req_cleanup(&fs_req); |
1314 | } |
1315 | } |
1316 | |
1317 | if (result == 0) |
1318 | return 0; |
1319 | |
1320 | errno = UV__ERR(result); |
1321 | return -1; |
1322 | } |
1323 | |
1324 | static void uv__to_stat(struct stat* src, uv_stat_t* dst) { |
1325 | dst->st_dev = src->st_dev; |
1326 | dst->st_mode = src->st_mode; |
1327 | dst->st_nlink = src->st_nlink; |
1328 | dst->st_uid = src->st_uid; |
1329 | dst->st_gid = src->st_gid; |
1330 | dst->st_rdev = src->st_rdev; |
1331 | dst->st_ino = src->st_ino; |
1332 | dst->st_size = src->st_size; |
1333 | dst->st_blksize = src->st_blksize; |
1334 | dst->st_blocks = src->st_blocks; |
1335 | |
1336 | #if defined(__APPLE__) |
1337 | dst->st_atim.tv_sec = src->st_atimespec.tv_sec; |
1338 | dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec; |
1339 | dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec; |
1340 | dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec; |
1341 | dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec; |
1342 | dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec; |
1343 | dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec; |
1344 | dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec; |
1345 | dst->st_flags = src->st_flags; |
1346 | dst->st_gen = src->st_gen; |
1347 | #elif defined(__ANDROID__) |
1348 | dst->st_atim.tv_sec = src->st_atime; |
1349 | dst->st_atim.tv_nsec = src->st_atimensec; |
1350 | dst->st_mtim.tv_sec = src->st_mtime; |
1351 | dst->st_mtim.tv_nsec = src->st_mtimensec; |
1352 | dst->st_ctim.tv_sec = src->st_ctime; |
1353 | dst->st_ctim.tv_nsec = src->st_ctimensec; |
1354 | dst->st_birthtim.tv_sec = src->st_ctime; |
1355 | dst->st_birthtim.tv_nsec = src->st_ctimensec; |
1356 | dst->st_flags = 0; |
1357 | dst->st_gen = 0; |
1358 | #elif !defined(_AIX) && ( \ |
1359 | defined(__DragonFly__) || \ |
1360 | defined(__FreeBSD__) || \ |
1361 | defined(__OpenBSD__) || \ |
1362 | defined(__NetBSD__) || \ |
1363 | defined(_GNU_SOURCE) || \ |
1364 | defined(_BSD_SOURCE) || \ |
1365 | defined(_SVID_SOURCE) || \ |
1366 | defined(_XOPEN_SOURCE) || \ |
1367 | defined(_DEFAULT_SOURCE)) |
1368 | dst->st_atim.tv_sec = src->st_atim.tv_sec; |
1369 | dst->st_atim.tv_nsec = src->st_atim.tv_nsec; |
1370 | dst->st_mtim.tv_sec = src->st_mtim.tv_sec; |
1371 | dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec; |
1372 | dst->st_ctim.tv_sec = src->st_ctim.tv_sec; |
1373 | dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec; |
1374 | # if defined(__FreeBSD__) || \ |
1375 | defined(__NetBSD__) |
1376 | dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec; |
1377 | dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec; |
1378 | dst->st_flags = src->st_flags; |
1379 | dst->st_gen = src->st_gen; |
1380 | # else |
1381 | dst->st_birthtim.tv_sec = src->st_ctim.tv_sec; |
1382 | dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec; |
1383 | dst->st_flags = 0; |
1384 | dst->st_gen = 0; |
1385 | # endif |
1386 | #else |
1387 | dst->st_atim.tv_sec = src->st_atime; |
1388 | dst->st_atim.tv_nsec = 0; |
1389 | dst->st_mtim.tv_sec = src->st_mtime; |
1390 | dst->st_mtim.tv_nsec = 0; |
1391 | dst->st_ctim.tv_sec = src->st_ctime; |
1392 | dst->st_ctim.tv_nsec = 0; |
1393 | dst->st_birthtim.tv_sec = src->st_ctime; |
1394 | dst->st_birthtim.tv_nsec = 0; |
1395 | dst->st_flags = 0; |
1396 | dst->st_gen = 0; |
1397 | #endif |
1398 | } |
1399 | |
1400 | |
1401 | static int uv__fs_statx(int fd, |
1402 | const char* path, |
1403 | int is_fstat, |
1404 | int is_lstat, |
1405 | uv_stat_t* buf) { |
1406 | STATIC_ASSERT(UV_ENOSYS != -1); |
1407 | #ifdef __linux__ |
1408 | static int no_statx; |
1409 | struct uv__statx statxbuf; |
1410 | int dirfd; |
1411 | int flags; |
1412 | int mode; |
1413 | int rc; |
1414 | |
1415 | if (uv__load_relaxed(&no_statx)) |
1416 | return UV_ENOSYS; |
1417 | |
1418 | dirfd = AT_FDCWD; |
1419 | flags = 0; /* AT_STATX_SYNC_AS_STAT */ |
1420 | mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */ |
1421 | |
1422 | if (is_fstat) { |
1423 | dirfd = fd; |
1424 | flags |= 0x1000; /* AT_EMPTY_PATH */ |
1425 | } |
1426 | |
1427 | if (is_lstat) |
1428 | flags |= AT_SYMLINK_NOFOLLOW; |
1429 | |
1430 | rc = uv__statx(dirfd, path, flags, mode, &statxbuf); |
1431 | |
1432 | switch (rc) { |
1433 | case 0: |
1434 | break; |
1435 | case -1: |
1436 | /* EPERM happens when a seccomp filter rejects the system call. |
1437 | * Has been observed with libseccomp < 2.3.3 and docker < 18.04. |
1438 | * EOPNOTSUPP is used on DVS exported filesystems |
1439 | */ |
1440 | if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP) |
1441 | return -1; |
1442 | /* Fall through. */ |
1443 | default: |
1444 | /* Normally on success, zero is returned and On error, -1 is returned. |
1445 | * Observed on S390 RHEL running in a docker container with statx not |
1446 | * implemented, rc might return 1 with 0 set as the error code in which |
1447 | * case we return ENOSYS. |
1448 | */ |
1449 | uv__store_relaxed(&no_statx, 1); |
1450 | return UV_ENOSYS; |
1451 | } |
1452 | |
1453 | buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor); |
1454 | buf->st_mode = statxbuf.stx_mode; |
1455 | buf->st_nlink = statxbuf.stx_nlink; |
1456 | buf->st_uid = statxbuf.stx_uid; |
1457 | buf->st_gid = statxbuf.stx_gid; |
1458 | buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor); |
1459 | buf->st_ino = statxbuf.stx_ino; |
1460 | buf->st_size = statxbuf.stx_size; |
1461 | buf->st_blksize = statxbuf.stx_blksize; |
1462 | buf->st_blocks = statxbuf.stx_blocks; |
1463 | buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec; |
1464 | buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec; |
1465 | buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec; |
1466 | buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec; |
1467 | buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec; |
1468 | buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec; |
1469 | buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec; |
1470 | buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec; |
1471 | buf->st_flags = 0; |
1472 | buf->st_gen = 0; |
1473 | |
1474 | return 0; |
1475 | #else |
1476 | return UV_ENOSYS; |
1477 | #endif /* __linux__ */ |
1478 | } |
1479 | |
1480 | |
1481 | static int uv__fs_stat(const char *path, uv_stat_t *buf) { |
1482 | struct stat pbuf; |
1483 | int ret; |
1484 | |
1485 | ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf); |
1486 | if (ret != UV_ENOSYS) |
1487 | return ret; |
1488 | |
1489 | ret = stat(path, &pbuf); |
1490 | if (ret == 0) |
1491 | uv__to_stat(&pbuf, buf); |
1492 | |
1493 | return ret; |
1494 | } |
1495 | |
1496 | |
1497 | static int uv__fs_lstat(const char *path, uv_stat_t *buf) { |
1498 | struct stat pbuf; |
1499 | int ret; |
1500 | |
1501 | ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf); |
1502 | if (ret != UV_ENOSYS) |
1503 | return ret; |
1504 | |
1505 | ret = lstat(path, &pbuf); |
1506 | if (ret == 0) |
1507 | uv__to_stat(&pbuf, buf); |
1508 | |
1509 | return ret; |
1510 | } |
1511 | |
1512 | |
1513 | static int uv__fs_fstat(int fd, uv_stat_t *buf) { |
1514 | struct stat pbuf; |
1515 | int ret; |
1516 | |
1517 | ret = uv__fs_statx(fd, "" , /* is_fstat */ 1, /* is_lstat */ 0, buf); |
1518 | if (ret != UV_ENOSYS) |
1519 | return ret; |
1520 | |
1521 | ret = fstat(fd, &pbuf); |
1522 | if (ret == 0) |
1523 | uv__to_stat(&pbuf, buf); |
1524 | |
1525 | return ret; |
1526 | } |
1527 | |
1528 | static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) { |
1529 | size_t offset; |
1530 | /* Figure out which bufs are done */ |
1531 | for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset) |
1532 | size -= bufs[offset].len; |
1533 | |
1534 | /* Fix a partial read/write */ |
1535 | if (size > 0) { |
1536 | bufs[offset].base += size; |
1537 | bufs[offset].len -= size; |
1538 | } |
1539 | return offset; |
1540 | } |
1541 | |
1542 | static ssize_t uv__fs_write_all(uv_fs_t* req) { |
1543 | unsigned int iovmax; |
1544 | unsigned int nbufs; |
1545 | uv_buf_t* bufs; |
1546 | ssize_t total; |
1547 | ssize_t result; |
1548 | |
1549 | iovmax = uv__getiovmax(); |
1550 | nbufs = req->nbufs; |
1551 | bufs = req->bufs; |
1552 | total = 0; |
1553 | |
1554 | while (nbufs > 0) { |
1555 | req->nbufs = nbufs; |
1556 | if (req->nbufs > iovmax) |
1557 | req->nbufs = iovmax; |
1558 | |
1559 | do |
1560 | result = uv__fs_write(req); |
1561 | while (result < 0 && errno == EINTR); |
1562 | |
1563 | if (result <= 0) { |
1564 | if (total == 0) |
1565 | total = result; |
1566 | break; |
1567 | } |
1568 | |
1569 | if (req->off >= 0) |
1570 | req->off += result; |
1571 | |
1572 | req->nbufs = uv__fs_buf_offset(req->bufs, result); |
1573 | req->bufs += req->nbufs; |
1574 | nbufs -= req->nbufs; |
1575 | total += result; |
1576 | } |
1577 | |
1578 | if (bufs != req->bufsml) |
1579 | uv__free(bufs); |
1580 | |
1581 | req->bufs = NULL; |
1582 | req->nbufs = 0; |
1583 | |
1584 | return total; |
1585 | } |
1586 | |
1587 | |
1588 | static void uv__fs_work(struct uv__work* w) { |
1589 | int retry_on_eintr; |
1590 | uv_fs_t* req; |
1591 | ssize_t r; |
1592 | |
1593 | req = container_of(w, uv_fs_t, work_req); |
1594 | retry_on_eintr = !(req->fs_type == UV_FS_CLOSE || |
1595 | req->fs_type == UV_FS_READ); |
1596 | |
1597 | do { |
1598 | errno = 0; |
1599 | |
1600 | #define X(type, action) \ |
1601 | case UV_FS_ ## type: \ |
1602 | r = action; \ |
1603 | break; |
1604 | |
1605 | switch (req->fs_type) { |
1606 | X(ACCESS, access(req->path, req->flags)); |
1607 | X(CHMOD, chmod(req->path, req->mode)); |
1608 | X(CHOWN, chown(req->path, req->uid, req->gid)); |
1609 | X(CLOSE, uv__fs_close(req->file)); |
1610 | X(COPYFILE, uv__fs_copyfile(req)); |
1611 | X(FCHMOD, fchmod(req->file, req->mode)); |
1612 | X(FCHOWN, fchown(req->file, req->uid, req->gid)); |
1613 | X(LCHOWN, lchown(req->path, req->uid, req->gid)); |
1614 | X(FDATASYNC, uv__fs_fdatasync(req)); |
1615 | X(FSTAT, uv__fs_fstat(req->file, &req->statbuf)); |
1616 | X(FSYNC, uv__fs_fsync(req)); |
1617 | X(FTRUNCATE, ftruncate(req->file, req->off)); |
1618 | X(FUTIME, uv__fs_futime(req)); |
1619 | X(LUTIME, uv__fs_lutime(req)); |
1620 | X(LSTAT, uv__fs_lstat(req->path, &req->statbuf)); |
1621 | X(LINK, link(req->path, req->new_path)); |
1622 | X(MKDIR, mkdir(req->path, req->mode)); |
1623 | X(MKDTEMP, uv__fs_mkdtemp(req)); |
1624 | X(MKSTEMP, uv__fs_mkstemp(req)); |
1625 | X(OPEN, uv__fs_open(req)); |
1626 | X(READ, uv__fs_read(req)); |
1627 | X(SCANDIR, uv__fs_scandir(req)); |
1628 | X(OPENDIR, uv__fs_opendir(req)); |
1629 | X(READDIR, uv__fs_readdir(req)); |
1630 | X(CLOSEDIR, uv__fs_closedir(req)); |
1631 | X(READLINK, uv__fs_readlink(req)); |
1632 | X(REALPATH, uv__fs_realpath(req)); |
1633 | X(RENAME, rename(req->path, req->new_path)); |
1634 | X(RMDIR, rmdir(req->path)); |
1635 | X(SENDFILE, uv__fs_sendfile(req)); |
1636 | X(STAT, uv__fs_stat(req->path, &req->statbuf)); |
1637 | X(STATFS, uv__fs_statfs(req)); |
1638 | X(SYMLINK, symlink(req->path, req->new_path)); |
1639 | X(UNLINK, unlink(req->path)); |
1640 | X(UTIME, uv__fs_utime(req)); |
1641 | X(WRITE, uv__fs_write_all(req)); |
1642 | default: abort(); |
1643 | } |
1644 | #undef X |
1645 | } while (r == -1 && errno == EINTR && retry_on_eintr); |
1646 | |
1647 | if (r == -1) |
1648 | req->result = UV__ERR(errno); |
1649 | else |
1650 | req->result = r; |
1651 | |
1652 | if (r == 0 && (req->fs_type == UV_FS_STAT || |
1653 | req->fs_type == UV_FS_FSTAT || |
1654 | req->fs_type == UV_FS_LSTAT)) { |
1655 | req->ptr = &req->statbuf; |
1656 | } |
1657 | } |
1658 | |
1659 | |
1660 | static void uv__fs_done(struct uv__work* w, int status) { |
1661 | uv_fs_t* req; |
1662 | |
1663 | req = container_of(w, uv_fs_t, work_req); |
1664 | uv__req_unregister(req->loop, req); |
1665 | |
1666 | if (status == UV_ECANCELED) { |
1667 | assert(req->result == 0); |
1668 | req->result = UV_ECANCELED; |
1669 | } |
1670 | |
1671 | req->cb(req); |
1672 | } |
1673 | |
1674 | |
1675 | int uv_fs_access(uv_loop_t* loop, |
1676 | uv_fs_t* req, |
1677 | const char* path, |
1678 | int flags, |
1679 | uv_fs_cb cb) { |
1680 | INIT(ACCESS); |
1681 | PATH; |
1682 | req->flags = flags; |
1683 | POST; |
1684 | } |
1685 | |
1686 | |
1687 | int uv_fs_chmod(uv_loop_t* loop, |
1688 | uv_fs_t* req, |
1689 | const char* path, |
1690 | int mode, |
1691 | uv_fs_cb cb) { |
1692 | INIT(CHMOD); |
1693 | PATH; |
1694 | req->mode = mode; |
1695 | POST; |
1696 | } |
1697 | |
1698 | |
1699 | int uv_fs_chown(uv_loop_t* loop, |
1700 | uv_fs_t* req, |
1701 | const char* path, |
1702 | uv_uid_t uid, |
1703 | uv_gid_t gid, |
1704 | uv_fs_cb cb) { |
1705 | INIT(CHOWN); |
1706 | PATH; |
1707 | req->uid = uid; |
1708 | req->gid = gid; |
1709 | POST; |
1710 | } |
1711 | |
1712 | |
1713 | int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { |
1714 | INIT(CLOSE); |
1715 | req->file = file; |
1716 | POST; |
1717 | } |
1718 | |
1719 | |
1720 | int uv_fs_fchmod(uv_loop_t* loop, |
1721 | uv_fs_t* req, |
1722 | uv_file file, |
1723 | int mode, |
1724 | uv_fs_cb cb) { |
1725 | INIT(FCHMOD); |
1726 | req->file = file; |
1727 | req->mode = mode; |
1728 | POST; |
1729 | } |
1730 | |
1731 | |
1732 | int uv_fs_fchown(uv_loop_t* loop, |
1733 | uv_fs_t* req, |
1734 | uv_file file, |
1735 | uv_uid_t uid, |
1736 | uv_gid_t gid, |
1737 | uv_fs_cb cb) { |
1738 | INIT(FCHOWN); |
1739 | req->file = file; |
1740 | req->uid = uid; |
1741 | req->gid = gid; |
1742 | POST; |
1743 | } |
1744 | |
1745 | |
1746 | int uv_fs_lchown(uv_loop_t* loop, |
1747 | uv_fs_t* req, |
1748 | const char* path, |
1749 | uv_uid_t uid, |
1750 | uv_gid_t gid, |
1751 | uv_fs_cb cb) { |
1752 | INIT(LCHOWN); |
1753 | PATH; |
1754 | req->uid = uid; |
1755 | req->gid = gid; |
1756 | POST; |
1757 | } |
1758 | |
1759 | |
1760 | int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { |
1761 | INIT(FDATASYNC); |
1762 | req->file = file; |
1763 | POST; |
1764 | } |
1765 | |
1766 | |
1767 | int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { |
1768 | INIT(FSTAT); |
1769 | req->file = file; |
1770 | POST; |
1771 | } |
1772 | |
1773 | |
1774 | int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { |
1775 | INIT(FSYNC); |
1776 | req->file = file; |
1777 | POST; |
1778 | } |
1779 | |
1780 | |
1781 | int uv_fs_ftruncate(uv_loop_t* loop, |
1782 | uv_fs_t* req, |
1783 | uv_file file, |
1784 | int64_t off, |
1785 | uv_fs_cb cb) { |
1786 | INIT(FTRUNCATE); |
1787 | req->file = file; |
1788 | req->off = off; |
1789 | POST; |
1790 | } |
1791 | |
1792 | |
1793 | int uv_fs_futime(uv_loop_t* loop, |
1794 | uv_fs_t* req, |
1795 | uv_file file, |
1796 | double atime, |
1797 | double mtime, |
1798 | uv_fs_cb cb) { |
1799 | INIT(FUTIME); |
1800 | req->file = file; |
1801 | req->atime = atime; |
1802 | req->mtime = mtime; |
1803 | POST; |
1804 | } |
1805 | |
1806 | int uv_fs_lutime(uv_loop_t* loop, |
1807 | uv_fs_t* req, |
1808 | const char* path, |
1809 | double atime, |
1810 | double mtime, |
1811 | uv_fs_cb cb) { |
1812 | INIT(LUTIME); |
1813 | PATH; |
1814 | req->atime = atime; |
1815 | req->mtime = mtime; |
1816 | POST; |
1817 | } |
1818 | |
1819 | |
1820 | int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { |
1821 | INIT(LSTAT); |
1822 | PATH; |
1823 | POST; |
1824 | } |
1825 | |
1826 | |
1827 | int uv_fs_link(uv_loop_t* loop, |
1828 | uv_fs_t* req, |
1829 | const char* path, |
1830 | const char* new_path, |
1831 | uv_fs_cb cb) { |
1832 | INIT(LINK); |
1833 | PATH2; |
1834 | POST; |
1835 | } |
1836 | |
1837 | |
1838 | int uv_fs_mkdir(uv_loop_t* loop, |
1839 | uv_fs_t* req, |
1840 | const char* path, |
1841 | int mode, |
1842 | uv_fs_cb cb) { |
1843 | INIT(MKDIR); |
1844 | PATH; |
1845 | req->mode = mode; |
1846 | POST; |
1847 | } |
1848 | |
1849 | |
1850 | int uv_fs_mkdtemp(uv_loop_t* loop, |
1851 | uv_fs_t* req, |
1852 | const char* tpl, |
1853 | uv_fs_cb cb) { |
1854 | INIT(MKDTEMP); |
1855 | req->path = uv__strdup(tpl); |
1856 | if (req->path == NULL) |
1857 | return UV_ENOMEM; |
1858 | POST; |
1859 | } |
1860 | |
1861 | |
1862 | int uv_fs_mkstemp(uv_loop_t* loop, |
1863 | uv_fs_t* req, |
1864 | const char* tpl, |
1865 | uv_fs_cb cb) { |
1866 | INIT(MKSTEMP); |
1867 | req->path = uv__strdup(tpl); |
1868 | if (req->path == NULL) |
1869 | return UV_ENOMEM; |
1870 | POST; |
1871 | } |
1872 | |
1873 | |
1874 | int uv_fs_open(uv_loop_t* loop, |
1875 | uv_fs_t* req, |
1876 | const char* path, |
1877 | int flags, |
1878 | int mode, |
1879 | uv_fs_cb cb) { |
1880 | INIT(OPEN); |
1881 | PATH; |
1882 | req->flags = flags; |
1883 | req->mode = mode; |
1884 | POST; |
1885 | } |
1886 | |
1887 | |
1888 | int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, |
1889 | uv_file file, |
1890 | const uv_buf_t bufs[], |
1891 | unsigned int nbufs, |
1892 | int64_t off, |
1893 | uv_fs_cb cb) { |
1894 | INIT(READ); |
1895 | |
1896 | if (bufs == NULL || nbufs == 0) |
1897 | return UV_EINVAL; |
1898 | |
1899 | req->file = file; |
1900 | |
1901 | req->nbufs = nbufs; |
1902 | req->bufs = req->bufsml; |
1903 | if (nbufs > ARRAY_SIZE(req->bufsml)) |
1904 | req->bufs = uv__malloc(nbufs * sizeof(*bufs)); |
1905 | |
1906 | if (req->bufs == NULL) |
1907 | return UV_ENOMEM; |
1908 | |
1909 | memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); |
1910 | |
1911 | req->off = off; |
1912 | POST; |
1913 | } |
1914 | |
1915 | |
1916 | int uv_fs_scandir(uv_loop_t* loop, |
1917 | uv_fs_t* req, |
1918 | const char* path, |
1919 | int flags, |
1920 | uv_fs_cb cb) { |
1921 | INIT(SCANDIR); |
1922 | PATH; |
1923 | req->flags = flags; |
1924 | POST; |
1925 | } |
1926 | |
1927 | int uv_fs_opendir(uv_loop_t* loop, |
1928 | uv_fs_t* req, |
1929 | const char* path, |
1930 | uv_fs_cb cb) { |
1931 | INIT(OPENDIR); |
1932 | PATH; |
1933 | POST; |
1934 | } |
1935 | |
1936 | int uv_fs_readdir(uv_loop_t* loop, |
1937 | uv_fs_t* req, |
1938 | uv_dir_t* dir, |
1939 | uv_fs_cb cb) { |
1940 | INIT(READDIR); |
1941 | |
1942 | if (dir == NULL || dir->dir == NULL || dir->dirents == NULL) |
1943 | return UV_EINVAL; |
1944 | |
1945 | req->ptr = dir; |
1946 | POST; |
1947 | } |
1948 | |
1949 | int uv_fs_closedir(uv_loop_t* loop, |
1950 | uv_fs_t* req, |
1951 | uv_dir_t* dir, |
1952 | uv_fs_cb cb) { |
1953 | INIT(CLOSEDIR); |
1954 | |
1955 | if (dir == NULL) |
1956 | return UV_EINVAL; |
1957 | |
1958 | req->ptr = dir; |
1959 | POST; |
1960 | } |
1961 | |
1962 | int uv_fs_readlink(uv_loop_t* loop, |
1963 | uv_fs_t* req, |
1964 | const char* path, |
1965 | uv_fs_cb cb) { |
1966 | INIT(READLINK); |
1967 | PATH; |
1968 | POST; |
1969 | } |
1970 | |
1971 | |
1972 | int uv_fs_realpath(uv_loop_t* loop, |
1973 | uv_fs_t* req, |
1974 | const char * path, |
1975 | uv_fs_cb cb) { |
1976 | INIT(REALPATH); |
1977 | PATH; |
1978 | POST; |
1979 | } |
1980 | |
1981 | |
1982 | int uv_fs_rename(uv_loop_t* loop, |
1983 | uv_fs_t* req, |
1984 | const char* path, |
1985 | const char* new_path, |
1986 | uv_fs_cb cb) { |
1987 | INIT(RENAME); |
1988 | PATH2; |
1989 | POST; |
1990 | } |
1991 | |
1992 | |
1993 | int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { |
1994 | INIT(RMDIR); |
1995 | PATH; |
1996 | POST; |
1997 | } |
1998 | |
1999 | |
2000 | int uv_fs_sendfile(uv_loop_t* loop, |
2001 | uv_fs_t* req, |
2002 | uv_file out_fd, |
2003 | uv_file in_fd, |
2004 | int64_t off, |
2005 | size_t len, |
2006 | uv_fs_cb cb) { |
2007 | INIT(SENDFILE); |
2008 | req->flags = in_fd; /* hack */ |
2009 | req->file = out_fd; |
2010 | req->off = off; |
2011 | req->bufsml[0].len = len; |
2012 | POST; |
2013 | } |
2014 | |
2015 | |
2016 | int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { |
2017 | INIT(STAT); |
2018 | PATH; |
2019 | POST; |
2020 | } |
2021 | |
2022 | |
2023 | int uv_fs_symlink(uv_loop_t* loop, |
2024 | uv_fs_t* req, |
2025 | const char* path, |
2026 | const char* new_path, |
2027 | int flags, |
2028 | uv_fs_cb cb) { |
2029 | INIT(SYMLINK); |
2030 | PATH2; |
2031 | req->flags = flags; |
2032 | POST; |
2033 | } |
2034 | |
2035 | |
2036 | int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { |
2037 | INIT(UNLINK); |
2038 | PATH; |
2039 | POST; |
2040 | } |
2041 | |
2042 | |
2043 | int uv_fs_utime(uv_loop_t* loop, |
2044 | uv_fs_t* req, |
2045 | const char* path, |
2046 | double atime, |
2047 | double mtime, |
2048 | uv_fs_cb cb) { |
2049 | INIT(UTIME); |
2050 | PATH; |
2051 | req->atime = atime; |
2052 | req->mtime = mtime; |
2053 | POST; |
2054 | } |
2055 | |
2056 | |
2057 | int uv_fs_write(uv_loop_t* loop, |
2058 | uv_fs_t* req, |
2059 | uv_file file, |
2060 | const uv_buf_t bufs[], |
2061 | unsigned int nbufs, |
2062 | int64_t off, |
2063 | uv_fs_cb cb) { |
2064 | INIT(WRITE); |
2065 | |
2066 | if (bufs == NULL || nbufs == 0) |
2067 | return UV_EINVAL; |
2068 | |
2069 | req->file = file; |
2070 | |
2071 | req->nbufs = nbufs; |
2072 | req->bufs = req->bufsml; |
2073 | if (nbufs > ARRAY_SIZE(req->bufsml)) |
2074 | req->bufs = uv__malloc(nbufs * sizeof(*bufs)); |
2075 | |
2076 | if (req->bufs == NULL) |
2077 | return UV_ENOMEM; |
2078 | |
2079 | memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); |
2080 | |
2081 | req->off = off; |
2082 | POST; |
2083 | } |
2084 | |
2085 | |
2086 | void uv_fs_req_cleanup(uv_fs_t* req) { |
2087 | if (req == NULL) |
2088 | return; |
2089 | |
2090 | /* Only necessary for asychronous requests, i.e., requests with a callback. |
2091 | * Synchronous ones don't copy their arguments and have req->path and |
2092 | * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and |
2093 | * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory. |
2094 | */ |
2095 | if (req->path != NULL && |
2096 | (req->cb != NULL || |
2097 | req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP)) |
2098 | uv__free((void*) req->path); /* Memory is shared with req->new_path. */ |
2099 | |
2100 | req->path = NULL; |
2101 | req->new_path = NULL; |
2102 | |
2103 | if (req->fs_type == UV_FS_READDIR && req->ptr != NULL) |
2104 | uv__fs_readdir_cleanup(req); |
2105 | |
2106 | if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL) |
2107 | uv__fs_scandir_cleanup(req); |
2108 | |
2109 | if (req->bufs != req->bufsml) |
2110 | uv__free(req->bufs); |
2111 | req->bufs = NULL; |
2112 | |
2113 | if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf) |
2114 | uv__free(req->ptr); |
2115 | req->ptr = NULL; |
2116 | } |
2117 | |
2118 | |
2119 | int uv_fs_copyfile(uv_loop_t* loop, |
2120 | uv_fs_t* req, |
2121 | const char* path, |
2122 | const char* new_path, |
2123 | int flags, |
2124 | uv_fs_cb cb) { |
2125 | INIT(COPYFILE); |
2126 | |
2127 | if (flags & ~(UV_FS_COPYFILE_EXCL | |
2128 | UV_FS_COPYFILE_FICLONE | |
2129 | UV_FS_COPYFILE_FICLONE_FORCE)) { |
2130 | return UV_EINVAL; |
2131 | } |
2132 | |
2133 | PATH2; |
2134 | req->flags = flags; |
2135 | POST; |
2136 | } |
2137 | |
2138 | |
2139 | int uv_fs_statfs(uv_loop_t* loop, |
2140 | uv_fs_t* req, |
2141 | const char* path, |
2142 | uv_fs_cb cb) { |
2143 | INIT(STATFS); |
2144 | PATH; |
2145 | POST; |
2146 | } |
2147 | |
2148 | int uv_fs_get_system_error(const uv_fs_t* req) { |
2149 | return -req->result; |
2150 | } |
2151 | |