1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22/* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29#include "uv.h"
30#include "internal.h"
31
32#include <errno.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <string.h>
36#include <limits.h> /* PATH_MAX */
37
38#include <sys/types.h>
39#include <sys/socket.h>
40#include <sys/stat.h>
41#include <sys/time.h>
42#include <sys/uio.h>
43#include <pthread.h>
44#include <unistd.h>
45#include <fcntl.h>
46#include <poll.h>
47
48#if defined(__DragonFly__) || \
49 defined(__FreeBSD__) || \
50 defined(__FreeBSD_kernel__) || \
51 defined(__OpenBSD__) || \
52 defined(__NetBSD__)
53# define HAVE_PREADV 1
54#else
55# define HAVE_PREADV 0
56#endif
57
58#if defined(__linux__) || defined(__sun)
59# include <sys/sendfile.h>
60#endif
61
62#if defined(__APPLE__)
63# include <sys/sysctl.h>
64#elif defined(__linux__) && !defined(FICLONE)
65# include <sys/ioctl.h>
66# define FICLONE _IOW(0x94, 9, int)
67#endif
68
69#if defined(_AIX) && !defined(_AIX71)
70# include <utime.h>
71#endif
72
73#if defined(_AIX) && _XOPEN_SOURCE <= 600
74extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
75#endif
76
77#define INIT(subtype) \
78 do { \
79 if (req == NULL) \
80 return UV_EINVAL; \
81 UV_REQ_INIT(req, UV_FS); \
82 req->fs_type = UV_FS_ ## subtype; \
83 req->result = 0; \
84 req->ptr = NULL; \
85 req->loop = loop; \
86 req->path = NULL; \
87 req->new_path = NULL; \
88 req->bufs = NULL; \
89 req->cb = cb; \
90 } \
91 while (0)
92
93#define PATH \
94 do { \
95 assert(path != NULL); \
96 if (cb == NULL) { \
97 req->path = path; \
98 } else { \
99 req->path = uv__strdup(path); \
100 if (req->path == NULL) \
101 return UV_ENOMEM; \
102 } \
103 } \
104 while (0)
105
106#define PATH2 \
107 do { \
108 if (cb == NULL) { \
109 req->path = path; \
110 req->new_path = new_path; \
111 } else { \
112 size_t path_len; \
113 size_t new_path_len; \
114 path_len = strlen(path) + 1; \
115 new_path_len = strlen(new_path) + 1; \
116 req->path = uv__malloc(path_len + new_path_len); \
117 if (req->path == NULL) \
118 return UV_ENOMEM; \
119 req->new_path = req->path + path_len; \
120 memcpy((void*) req->path, path, path_len); \
121 memcpy((void*) req->new_path, new_path, new_path_len); \
122 } \
123 } \
124 while (0)
125
126#define POST \
127 do { \
128 if (cb != NULL) { \
129 uv__req_register(loop, req); \
130 uv__work_submit(loop, \
131 &req->work_req, \
132 UV__WORK_FAST_IO, \
133 uv__fs_work, \
134 uv__fs_done); \
135 return 0; \
136 } \
137 else { \
138 uv__fs_work(&req->work_req); \
139 return req->result; \
140 } \
141 } \
142 while (0)
143
144
145static int uv__fs_close(int fd) {
146 int rc;
147
148 rc = uv__close_nocancel(fd);
149 if (rc == -1)
150 if (errno == EINTR || errno == EINPROGRESS)
151 rc = 0; /* The close is in progress, not an error. */
152
153 return rc;
154}
155
156
157static ssize_t uv__fs_fsync(uv_fs_t* req) {
158#if defined(__APPLE__)
159 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
160 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
161 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
162 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
163 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
164 * This is the same approach taken by sqlite, except sqlite does not issue
165 * an F_BARRIERFSYNC call.
166 */
167 int r;
168
169 r = fcntl(req->file, F_FULLFSYNC);
170 if (r != 0)
171 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
172 if (r != 0)
173 r = fsync(req->file);
174 return r;
175#else
176 return fsync(req->file);
177#endif
178}
179
180
181static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
182#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
183 return fdatasync(req->file);
184#elif defined(__APPLE__)
185 /* See the comment in uv__fs_fsync. */
186 return uv__fs_fsync(req);
187#else
188 return fsync(req->file);
189#endif
190}
191
192
193static ssize_t uv__fs_futime(uv_fs_t* req) {
194#if defined(__linux__) \
195 || defined(_AIX71) \
196 || defined(__HAIKU__)
197 /* utimesat() has nanosecond resolution but we stick to microseconds
198 * for the sake of consistency with other platforms.
199 */
200 struct timespec ts[2];
201 ts[0].tv_sec = req->atime;
202 ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
203 ts[1].tv_sec = req->mtime;
204 ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
205 return futimens(req->file, ts);
206#elif defined(__APPLE__) \
207 || defined(__DragonFly__) \
208 || defined(__FreeBSD__) \
209 || defined(__FreeBSD_kernel__) \
210 || defined(__NetBSD__) \
211 || defined(__OpenBSD__) \
212 || defined(__sun)
213 struct timeval tv[2];
214 tv[0].tv_sec = req->atime;
215 tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
216 tv[1].tv_sec = req->mtime;
217 tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
218# if defined(__sun)
219 return futimesat(req->file, NULL, tv);
220# else
221 return futimes(req->file, tv);
222# endif
223#elif defined(__MVS__)
224 attrib_t atr;
225 memset(&atr, 0, sizeof(atr));
226 atr.att_mtimechg = 1;
227 atr.att_atimechg = 1;
228 atr.att_mtime = req->mtime;
229 atr.att_atime = req->atime;
230 return __fchattr(req->file, &atr, sizeof(atr));
231#else
232 errno = ENOSYS;
233 return -1;
234#endif
235}
236
237
238static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
239 return mkdtemp((char*) req->path) ? 0 : -1;
240}
241
242
243static ssize_t uv__fs_open(uv_fs_t* req) {
244 static int no_cloexec_support;
245 int r;
246
247 /* Try O_CLOEXEC before entering locks */
248 if (no_cloexec_support == 0) {
249#ifdef O_CLOEXEC
250 r = open(req->path, req->flags | O_CLOEXEC, req->mode);
251 if (r >= 0)
252 return r;
253 if (errno != EINVAL)
254 return r;
255 no_cloexec_support = 1;
256#endif /* O_CLOEXEC */
257 }
258
259 if (req->cb != NULL)
260 uv_rwlock_rdlock(&req->loop->cloexec_lock);
261
262 r = open(req->path, req->flags, req->mode);
263
264 /* In case of failure `uv__cloexec` will leave error in `errno`,
265 * so it is enough to just set `r` to `-1`.
266 */
267 if (r >= 0 && uv__cloexec(r, 1) != 0) {
268 r = uv__close(r);
269 if (r != 0)
270 abort();
271 r = -1;
272 }
273
274 if (req->cb != NULL)
275 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
276
277 return r;
278}
279
280
281static ssize_t uv__fs_preadv(uv_file fd,
282 uv_buf_t* bufs,
283 unsigned int nbufs,
284 off_t off) {
285 uv_buf_t* buf;
286 uv_buf_t* end;
287 ssize_t result;
288 ssize_t rc;
289 size_t pos;
290
291 assert(nbufs > 0);
292
293 result = 0;
294 pos = 0;
295 buf = bufs + 0;
296 end = bufs + nbufs;
297
298 for (;;) {
299 do
300 rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
301 while (rc == -1 && errno == EINTR);
302
303 if (rc == 0)
304 break;
305
306 if (rc == -1 && result == 0)
307 return UV__ERR(errno);
308
309 if (rc == -1)
310 break; /* We read some data so return that, ignore the error. */
311
312 pos += rc;
313 result += rc;
314
315 if (pos < buf->len)
316 continue;
317
318 pos = 0;
319 buf += 1;
320
321 if (buf == end)
322 break;
323 }
324
325 return result;
326}
327
328
329static ssize_t uv__fs_read(uv_fs_t* req) {
330#if defined(__linux__)
331 static int no_preadv;
332#endif
333 unsigned int iovmax;
334 ssize_t result;
335
336 iovmax = uv__getiovmax();
337 if (req->nbufs > iovmax)
338 req->nbufs = iovmax;
339
340 if (req->off < 0) {
341 if (req->nbufs == 1)
342 result = read(req->file, req->bufs[0].base, req->bufs[0].len);
343 else
344 result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
345 } else {
346 if (req->nbufs == 1) {
347 result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
348 goto done;
349 }
350
351#if HAVE_PREADV
352 result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
353#else
354# if defined(__linux__)
355 if (no_preadv) retry:
356# endif
357 {
358 result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
359 }
360# if defined(__linux__)
361 else {
362 result = uv__preadv(req->file,
363 (struct iovec*)req->bufs,
364 req->nbufs,
365 req->off);
366 if (result == -1 && errno == ENOSYS) {
367 no_preadv = 1;
368 goto retry;
369 }
370 }
371# endif
372#endif
373 }
374
375done:
376 /* Early cleanup of bufs allocation, since we're done with it. */
377 if (req->bufs != req->bufsml)
378 uv__free(req->bufs);
379
380 req->bufs = NULL;
381 req->nbufs = 0;
382
383#ifdef __PASE__
384 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
385 if (result == -1 && errno == EOPNOTSUPP) {
386 struct stat buf;
387 ssize_t rc;
388 rc = fstat(req->file, &buf);
389 if (rc == 0 && S_ISDIR(buf.st_mode)) {
390 errno = EISDIR;
391 }
392 }
393#endif
394
395 return result;
396}
397
398
399#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
400#define UV_CONST_DIRENT uv__dirent_t
401#else
402#define UV_CONST_DIRENT const uv__dirent_t
403#endif
404
405
406static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
407 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
408}
409
410
411static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
412 return strcmp((*a)->d_name, (*b)->d_name);
413}
414
415
416static ssize_t uv__fs_scandir(uv_fs_t* req) {
417 uv__dirent_t** dents;
418 int n;
419
420 dents = NULL;
421 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
422
423 /* NOTE: We will use nbufs as an index field */
424 req->nbufs = 0;
425
426 if (n == 0) {
427 /* OS X still needs to deallocate some memory.
428 * Memory was allocated using the system allocator, so use free() here.
429 */
430 free(dents);
431 dents = NULL;
432 } else if (n == -1) {
433 return n;
434 }
435
436 req->ptr = dents;
437
438 return n;
439}
440
441static int uv__fs_opendir(uv_fs_t* req) {
442 uv_dir_t* dir;
443
444 dir = uv__malloc(sizeof(*dir));
445 if (dir == NULL)
446 goto error;
447
448 dir->dir = opendir(req->path);
449 if (dir->dir == NULL)
450 goto error;
451
452 req->ptr = dir;
453 return 0;
454
455error:
456 uv__free(dir);
457 req->ptr = NULL;
458 return -1;
459}
460
461static int uv__fs_readdir(uv_fs_t* req) {
462 uv_dir_t* dir;
463 uv_dirent_t* dirent;
464 struct dirent* res;
465 unsigned int dirent_idx;
466 unsigned int i;
467
468 dir = req->ptr;
469 dirent_idx = 0;
470
471 while (dirent_idx < dir->nentries) {
472 /* readdir() returns NULL on end of directory, as well as on error. errno
473 is used to differentiate between the two conditions. */
474 errno = 0;
475 res = readdir(dir->dir);
476
477 if (res == NULL) {
478 if (errno != 0)
479 goto error;
480 break;
481 }
482
483 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
484 continue;
485
486 dirent = &dir->dirents[dirent_idx];
487 dirent->name = uv__strdup(res->d_name);
488
489 if (dirent->name == NULL)
490 goto error;
491
492 dirent->type = uv__fs_get_dirent_type(res);
493 ++dirent_idx;
494 }
495
496 return dirent_idx;
497
498error:
499 for (i = 0; i < dirent_idx; ++i) {
500 uv__free((char*) dir->dirents[i].name);
501 dir->dirents[i].name = NULL;
502 }
503
504 return -1;
505}
506
507static int uv__fs_closedir(uv_fs_t* req) {
508 uv_dir_t* dir;
509
510 dir = req->ptr;
511
512 if (dir->dir != NULL) {
513 closedir(dir->dir);
514 dir->dir = NULL;
515 }
516
517 uv__free(req->ptr);
518 req->ptr = NULL;
519 return 0;
520}
521
522static ssize_t uv__fs_pathmax_size(const char* path) {
523 ssize_t pathmax;
524
525 pathmax = pathconf(path, _PC_PATH_MAX);
526
527 if (pathmax == -1)
528 pathmax = UV__PATH_MAX;
529
530 return pathmax;
531}
532
533static ssize_t uv__fs_readlink(uv_fs_t* req) {
534 ssize_t maxlen;
535 ssize_t len;
536 char* buf;
537 char* newbuf;
538
539#if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
540 maxlen = uv__fs_pathmax_size(req->path);
541#else
542 /* We may not have a real PATH_MAX. Read size of link. */
543 struct stat st;
544 int ret;
545 ret = lstat(req->path, &st);
546 if (ret != 0)
547 return -1;
548 if (!S_ISLNK(st.st_mode)) {
549 errno = EINVAL;
550 return -1;
551 }
552
553 maxlen = st.st_size;
554
555 /* According to readlink(2) lstat can report st_size == 0
556 for some symlinks, such as those in /proc or /sys. */
557 if (maxlen == 0)
558 maxlen = uv__fs_pathmax_size(req->path);
559#endif
560
561 buf = uv__malloc(maxlen);
562
563 if (buf == NULL) {
564 errno = ENOMEM;
565 return -1;
566 }
567
568#if defined(__MVS__)
569 len = os390_readlink(req->path, buf, maxlen);
570#else
571 len = readlink(req->path, buf, maxlen);
572#endif
573
574 if (len == -1) {
575 uv__free(buf);
576 return -1;
577 }
578
579 /* Uncommon case: resize to make room for the trailing nul byte. */
580 if (len == maxlen) {
581 newbuf = uv__realloc(buf, len + 1);
582
583 if (newbuf == NULL) {
584 uv__free(buf);
585 return -1;
586 }
587
588 buf = newbuf;
589 }
590
591 buf[len] = '\0';
592 req->ptr = buf;
593
594 return 0;
595}
596
597static ssize_t uv__fs_realpath(uv_fs_t* req) {
598 char* buf;
599
600#if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
601 buf = realpath(req->path, NULL);
602 if (buf == NULL)
603 return -1;
604#else
605 ssize_t len;
606
607 len = uv__fs_pathmax_size(req->path);
608 buf = uv__malloc(len + 1);
609
610 if (buf == NULL) {
611 errno = ENOMEM;
612 return -1;
613 }
614
615 if (realpath(req->path, buf) == NULL) {
616 uv__free(buf);
617 return -1;
618 }
619#endif
620
621 req->ptr = buf;
622
623 return 0;
624}
625
626static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
627 struct pollfd pfd;
628 int use_pread;
629 off_t offset;
630 ssize_t nsent;
631 ssize_t nread;
632 ssize_t nwritten;
633 size_t buflen;
634 size_t len;
635 ssize_t n;
636 int in_fd;
637 int out_fd;
638 char buf[8192];
639
640 len = req->bufsml[0].len;
641 in_fd = req->flags;
642 out_fd = req->file;
643 offset = req->off;
644 use_pread = 1;
645
646 /* Here are the rules regarding errors:
647 *
648 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
649 * The user needs to know that some data has already been sent, to stop
650 * them from sending it twice.
651 *
652 * 2. Write errors are always reported. Write errors are bad because they
653 * mean data loss: we've read data but now we can't write it out.
654 *
655 * We try to use pread() and fall back to regular read() if the source fd
656 * doesn't support positional reads, for example when it's a pipe fd.
657 *
658 * If we get EAGAIN when writing to the target fd, we poll() on it until
659 * it becomes writable again.
660 *
661 * FIXME: If we get a write error when use_pread==1, it should be safe to
662 * return the number of sent bytes instead of an error because pread()
663 * is, in theory, idempotent. However, special files in /dev or /proc
664 * may support pread() but not necessarily return the same data on
665 * successive reads.
666 *
667 * FIXME: There is no way now to signal that we managed to send *some* data
668 * before a write error.
669 */
670 for (nsent = 0; (size_t) nsent < len; ) {
671 buflen = len - nsent;
672
673 if (buflen > sizeof(buf))
674 buflen = sizeof(buf);
675
676 do
677 if (use_pread)
678 nread = pread(in_fd, buf, buflen, offset);
679 else
680 nread = read(in_fd, buf, buflen);
681 while (nread == -1 && errno == EINTR);
682
683 if (nread == 0)
684 goto out;
685
686 if (nread == -1) {
687 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
688 use_pread = 0;
689 continue;
690 }
691
692 if (nsent == 0)
693 nsent = -1;
694
695 goto out;
696 }
697
698 for (nwritten = 0; nwritten < nread; ) {
699 do
700 n = write(out_fd, buf + nwritten, nread - nwritten);
701 while (n == -1 && errno == EINTR);
702
703 if (n != -1) {
704 nwritten += n;
705 continue;
706 }
707
708 if (errno != EAGAIN && errno != EWOULDBLOCK) {
709 nsent = -1;
710 goto out;
711 }
712
713 pfd.fd = out_fd;
714 pfd.events = POLLOUT;
715 pfd.revents = 0;
716
717 do
718 n = poll(&pfd, 1, -1);
719 while (n == -1 && errno == EINTR);
720
721 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
722 errno = EIO;
723 nsent = -1;
724 goto out;
725 }
726 }
727
728 offset += nread;
729 nsent += nread;
730 }
731
732out:
733 if (nsent != -1)
734 req->off = offset;
735
736 return nsent;
737}
738
739
740static ssize_t uv__fs_sendfile(uv_fs_t* req) {
741 int in_fd;
742 int out_fd;
743
744 in_fd = req->flags;
745 out_fd = req->file;
746
747#if defined(__linux__) || defined(__sun)
748 {
749 off_t off;
750 ssize_t r;
751
752 off = req->off;
753 r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
754
755 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
756 * it still writes out data. Fortunately, we can detect it by checking if
757 * the offset has been updated.
758 */
759 if (r != -1 || off > req->off) {
760 r = off - req->off;
761 req->off = off;
762 return r;
763 }
764
765 if (errno == EINVAL ||
766 errno == EIO ||
767 errno == ENOTSOCK ||
768 errno == EXDEV) {
769 errno = 0;
770 return uv__fs_sendfile_emul(req);
771 }
772
773 return -1;
774 }
775#elif defined(__APPLE__) || \
776 defined(__DragonFly__) || \
777 defined(__FreeBSD__) || \
778 defined(__FreeBSD_kernel__)
779 {
780 off_t len;
781 ssize_t r;
782
783 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
784 * non-blocking mode and not all data could be written. If a non-zero
785 * number of bytes have been sent, we don't consider it an error.
786 */
787
788#if defined(__FreeBSD__) || defined(__DragonFly__)
789 len = 0;
790 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
791#elif defined(__FreeBSD_kernel__)
792 len = 0;
793 r = bsd_sendfile(in_fd,
794 out_fd,
795 req->off,
796 req->bufsml[0].len,
797 NULL,
798 &len,
799 0);
800#else
801 /* The darwin sendfile takes len as an input for the length to send,
802 * so make sure to initialize it with the caller's value. */
803 len = req->bufsml[0].len;
804 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
805#endif
806
807 /*
808 * The man page for sendfile(2) on DragonFly states that `len` contains
809 * a meaningful value ONLY in case of EAGAIN and EINTR.
810 * Nothing is said about it's value in case of other errors, so better
811 * not depend on the potential wrong assumption that is was not modified
812 * by the syscall.
813 */
814 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
815 req->off += len;
816 return (ssize_t) len;
817 }
818
819 if (errno == EINVAL ||
820 errno == EIO ||
821 errno == ENOTSOCK ||
822 errno == EXDEV) {
823 errno = 0;
824 return uv__fs_sendfile_emul(req);
825 }
826
827 return -1;
828 }
829#else
830 /* Squelch compiler warnings. */
831 (void) &in_fd;
832 (void) &out_fd;
833
834 return uv__fs_sendfile_emul(req);
835#endif
836}
837
838
839static ssize_t uv__fs_utime(uv_fs_t* req) {
840#if defined(__linux__) \
841 || defined(_AIX71) \
842 || defined(__sun) \
843 || defined(__HAIKU__)
844 /* utimesat() has nanosecond resolution but we stick to microseconds
845 * for the sake of consistency with other platforms.
846 */
847 struct timespec ts[2];
848 ts[0].tv_sec = req->atime;
849 ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
850 ts[1].tv_sec = req->mtime;
851 ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
852 return utimensat(AT_FDCWD, req->path, ts, 0);
853#elif defined(__APPLE__) \
854 || defined(__DragonFly__) \
855 || defined(__FreeBSD__) \
856 || defined(__FreeBSD_kernel__) \
857 || defined(__NetBSD__) \
858 || defined(__OpenBSD__)
859 struct timeval tv[2];
860 tv[0].tv_sec = req->atime;
861 tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
862 tv[1].tv_sec = req->mtime;
863 tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
864 return utimes(req->path, tv);
865#elif defined(_AIX) \
866 && !defined(_AIX71)
867 struct utimbuf buf;
868 buf.actime = req->atime;
869 buf.modtime = req->mtime;
870 return utime(req->path, &buf);
871#elif defined(__MVS__)
872 attrib_t atr;
873 memset(&atr, 0, sizeof(atr));
874 atr.att_mtimechg = 1;
875 atr.att_atimechg = 1;
876 atr.att_mtime = req->mtime;
877 atr.att_atime = req->atime;
878 return __lchattr((char*) req->path, &atr, sizeof(atr));
879#else
880 errno = ENOSYS;
881 return -1;
882#endif
883}
884
885
886static ssize_t uv__fs_write(uv_fs_t* req) {
887#if defined(__linux__)
888 static int no_pwritev;
889#endif
890 ssize_t r;
891
892 /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
893 * data loss. We can't use a per-file descriptor lock, the descriptor may be
894 * a dup().
895 */
896#if defined(__APPLE__)
897 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
898
899 if (pthread_mutex_lock(&lock))
900 abort();
901#endif
902
903 if (req->off < 0) {
904 if (req->nbufs == 1)
905 r = write(req->file, req->bufs[0].base, req->bufs[0].len);
906 else
907 r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
908 } else {
909 if (req->nbufs == 1) {
910 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
911 goto done;
912 }
913#if HAVE_PREADV
914 r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
915#else
916# if defined(__linux__)
917 if (no_pwritev) retry:
918# endif
919 {
920 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
921 }
922# if defined(__linux__)
923 else {
924 r = uv__pwritev(req->file,
925 (struct iovec*) req->bufs,
926 req->nbufs,
927 req->off);
928 if (r == -1 && errno == ENOSYS) {
929 no_pwritev = 1;
930 goto retry;
931 }
932 }
933# endif
934#endif
935 }
936
937done:
938#if defined(__APPLE__)
939 if (pthread_mutex_unlock(&lock))
940 abort();
941#endif
942
943 return r;
944}
945
946static ssize_t uv__fs_copyfile(uv_fs_t* req) {
947 uv_fs_t fs_req;
948 uv_file srcfd;
949 uv_file dstfd;
950 struct stat src_statsbuf;
951 struct stat dst_statsbuf;
952 int dst_flags;
953 int result;
954 int err;
955 size_t bytes_to_send;
956 int64_t in_offset;
957
958 dstfd = -1;
959 err = 0;
960
961 /* Open the source file. */
962 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
963 uv_fs_req_cleanup(&fs_req);
964
965 if (srcfd < 0)
966 return srcfd;
967
968 /* Get the source file's mode. */
969 if (fstat(srcfd, &src_statsbuf)) {
970 err = UV__ERR(errno);
971 goto out;
972 }
973
974 dst_flags = O_WRONLY | O_CREAT | O_TRUNC;
975
976 if (req->flags & UV_FS_COPYFILE_EXCL)
977 dst_flags |= O_EXCL;
978
979 /* Open the destination file. */
980 dstfd = uv_fs_open(NULL,
981 &fs_req,
982 req->new_path,
983 dst_flags,
984 src_statsbuf.st_mode,
985 NULL);
986 uv_fs_req_cleanup(&fs_req);
987
988 if (dstfd < 0) {
989 err = dstfd;
990 goto out;
991 }
992
993 /* Get the destination file's mode. */
994 if (fstat(dstfd, &dst_statsbuf)) {
995 err = UV__ERR(errno);
996 goto out;
997 }
998
999 /* Check if srcfd and dstfd refer to the same file */
1000 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1001 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1002 goto out;
1003 }
1004
1005 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1006 err = UV__ERR(errno);
1007 goto out;
1008 }
1009
1010#ifdef FICLONE
1011 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1012 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1013 if (ioctl(dstfd, FICLONE, srcfd) == -1) {
1014 /* If an error occurred that the sendfile fallback also won't handle, or
1015 this is a force clone then exit. Otherwise, fall through to try using
1016 sendfile(). */
1017 if (errno != ENOTTY && errno != EOPNOTSUPP && errno != EXDEV) {
1018 err = UV__ERR(errno);
1019 goto out;
1020 } else if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1021 err = UV_ENOTSUP;
1022 goto out;
1023 }
1024 } else {
1025 goto out;
1026 }
1027 }
1028#else
1029 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1030 err = UV_ENOSYS;
1031 goto out;
1032 }
1033#endif
1034
1035 bytes_to_send = src_statsbuf.st_size;
1036 in_offset = 0;
1037 while (bytes_to_send != 0) {
1038 err = uv_fs_sendfile(NULL,
1039 &fs_req,
1040 dstfd,
1041 srcfd,
1042 in_offset,
1043 bytes_to_send,
1044 NULL);
1045 uv_fs_req_cleanup(&fs_req);
1046 if (err < 0)
1047 break;
1048 bytes_to_send -= fs_req.result;
1049 in_offset += fs_req.result;
1050 }
1051
1052out:
1053 if (err < 0)
1054 result = err;
1055 else
1056 result = 0;
1057
1058 /* Close the source file. */
1059 err = uv__close_nocheckstdio(srcfd);
1060
1061 /* Don't overwrite any existing errors. */
1062 if (err != 0 && result == 0)
1063 result = err;
1064
1065 /* Close the destination file if it is open. */
1066 if (dstfd >= 0) {
1067 err = uv__close_nocheckstdio(dstfd);
1068
1069 /* Don't overwrite any existing errors. */
1070 if (err != 0 && result == 0)
1071 result = err;
1072
1073 /* Remove the destination file if something went wrong. */
1074 if (result != 0) {
1075 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1076 /* Ignore the unlink return value, as an error already happened. */
1077 uv_fs_req_cleanup(&fs_req);
1078 }
1079 }
1080
1081 if (result == 0)
1082 return 0;
1083
1084 errno = UV__ERR(result);
1085 return -1;
1086}
1087
1088static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1089 dst->st_dev = src->st_dev;
1090 dst->st_mode = src->st_mode;
1091 dst->st_nlink = src->st_nlink;
1092 dst->st_uid = src->st_uid;
1093 dst->st_gid = src->st_gid;
1094 dst->st_rdev = src->st_rdev;
1095 dst->st_ino = src->st_ino;
1096 dst->st_size = src->st_size;
1097 dst->st_blksize = src->st_blksize;
1098 dst->st_blocks = src->st_blocks;
1099
1100#if defined(__APPLE__)
1101 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1102 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1103 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1104 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1105 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1106 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1107 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1108 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1109 dst->st_flags = src->st_flags;
1110 dst->st_gen = src->st_gen;
1111#elif defined(__ANDROID__)
1112 dst->st_atim.tv_sec = src->st_atime;
1113 dst->st_atim.tv_nsec = src->st_atimensec;
1114 dst->st_mtim.tv_sec = src->st_mtime;
1115 dst->st_mtim.tv_nsec = src->st_mtimensec;
1116 dst->st_ctim.tv_sec = src->st_ctime;
1117 dst->st_ctim.tv_nsec = src->st_ctimensec;
1118 dst->st_birthtim.tv_sec = src->st_ctime;
1119 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1120 dst->st_flags = 0;
1121 dst->st_gen = 0;
1122#elif !defined(_AIX) && ( \
1123 defined(__DragonFly__) || \
1124 defined(__FreeBSD__) || \
1125 defined(__OpenBSD__) || \
1126 defined(__NetBSD__) || \
1127 defined(_GNU_SOURCE) || \
1128 defined(_BSD_SOURCE) || \
1129 defined(_SVID_SOURCE) || \
1130 defined(_XOPEN_SOURCE) || \
1131 defined(_DEFAULT_SOURCE))
1132 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1133 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1134 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1135 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1136 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1137 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1138# if defined(__FreeBSD__) || \
1139 defined(__NetBSD__)
1140 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1141 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1142 dst->st_flags = src->st_flags;
1143 dst->st_gen = src->st_gen;
1144# else
1145 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1146 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1147 dst->st_flags = 0;
1148 dst->st_gen = 0;
1149# endif
1150#else
1151 dst->st_atim.tv_sec = src->st_atime;
1152 dst->st_atim.tv_nsec = 0;
1153 dst->st_mtim.tv_sec = src->st_mtime;
1154 dst->st_mtim.tv_nsec = 0;
1155 dst->st_ctim.tv_sec = src->st_ctime;
1156 dst->st_ctim.tv_nsec = 0;
1157 dst->st_birthtim.tv_sec = src->st_ctime;
1158 dst->st_birthtim.tv_nsec = 0;
1159 dst->st_flags = 0;
1160 dst->st_gen = 0;
1161#endif
1162}
1163
1164
1165static int uv__fs_statx(int fd,
1166 const char* path,
1167 int is_fstat,
1168 int is_lstat,
1169 uv_stat_t* buf) {
1170 STATIC_ASSERT(UV_ENOSYS != -1);
1171#ifdef __linux__
1172 static int no_statx;
1173 struct uv__statx statxbuf;
1174 int dirfd;
1175 int flags;
1176 int mode;
1177 int rc;
1178
1179 if (no_statx)
1180 return UV_ENOSYS;
1181
1182 dirfd = AT_FDCWD;
1183 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1184 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1185
1186 if (is_fstat) {
1187 dirfd = fd;
1188 flags |= 0x1000; /* AT_EMPTY_PATH */
1189 }
1190
1191 if (is_lstat)
1192 flags |= AT_SYMLINK_NOFOLLOW;
1193
1194 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1195
1196 if (rc == -1) {
1197 /* EPERM happens when a seccomp filter rejects the system call.
1198 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1199 */
1200 if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
1201 return -1;
1202
1203 no_statx = 1;
1204 return UV_ENOSYS;
1205 }
1206
1207 buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
1208 buf->st_mode = statxbuf.stx_mode;
1209 buf->st_nlink = statxbuf.stx_nlink;
1210 buf->st_uid = statxbuf.stx_uid;
1211 buf->st_gid = statxbuf.stx_gid;
1212 buf->st_rdev = statxbuf.stx_rdev_major;
1213 buf->st_ino = statxbuf.stx_ino;
1214 buf->st_size = statxbuf.stx_size;
1215 buf->st_blksize = statxbuf.stx_blksize;
1216 buf->st_blocks = statxbuf.stx_blocks;
1217 buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
1218 buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
1219 buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
1220 buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
1221 buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
1222 buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
1223 buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
1224 buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
1225 buf->st_flags = 0;
1226 buf->st_gen = 0;
1227
1228 return 0;
1229#else
1230 return UV_ENOSYS;
1231#endif /* __linux__ */
1232}
1233
1234
1235static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1236 struct stat pbuf;
1237 int ret;
1238
1239 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1240 if (ret != UV_ENOSYS)
1241 return ret;
1242
1243 ret = stat(path, &pbuf);
1244 if (ret == 0)
1245 uv__to_stat(&pbuf, buf);
1246
1247 return ret;
1248}
1249
1250
1251static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1252 struct stat pbuf;
1253 int ret;
1254
1255 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1256 if (ret != UV_ENOSYS)
1257 return ret;
1258
1259 ret = lstat(path, &pbuf);
1260 if (ret == 0)
1261 uv__to_stat(&pbuf, buf);
1262
1263 return ret;
1264}
1265
1266
1267static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1268 struct stat pbuf;
1269 int ret;
1270
1271 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1272 if (ret != UV_ENOSYS)
1273 return ret;
1274
1275 ret = fstat(fd, &pbuf);
1276 if (ret == 0)
1277 uv__to_stat(&pbuf, buf);
1278
1279 return ret;
1280}
1281
1282static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1283 size_t offset;
1284 /* Figure out which bufs are done */
1285 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1286 size -= bufs[offset].len;
1287
1288 /* Fix a partial read/write */
1289 if (size > 0) {
1290 bufs[offset].base += size;
1291 bufs[offset].len -= size;
1292 }
1293 return offset;
1294}
1295
1296static ssize_t uv__fs_write_all(uv_fs_t* req) {
1297 unsigned int iovmax;
1298 unsigned int nbufs;
1299 uv_buf_t* bufs;
1300 ssize_t total;
1301 ssize_t result;
1302
1303 iovmax = uv__getiovmax();
1304 nbufs = req->nbufs;
1305 bufs = req->bufs;
1306 total = 0;
1307
1308 while (nbufs > 0) {
1309 req->nbufs = nbufs;
1310 if (req->nbufs > iovmax)
1311 req->nbufs = iovmax;
1312
1313 do
1314 result = uv__fs_write(req);
1315 while (result < 0 && errno == EINTR);
1316
1317 if (result <= 0) {
1318 if (total == 0)
1319 total = result;
1320 break;
1321 }
1322
1323 if (req->off >= 0)
1324 req->off += result;
1325
1326 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1327 req->bufs += req->nbufs;
1328 nbufs -= req->nbufs;
1329 total += result;
1330 }
1331
1332 if (bufs != req->bufsml)
1333 uv__free(bufs);
1334
1335 req->bufs = NULL;
1336 req->nbufs = 0;
1337
1338 return total;
1339}
1340
1341
1342static void uv__fs_work(struct uv__work* w) {
1343 int retry_on_eintr;
1344 uv_fs_t* req;
1345 ssize_t r;
1346
1347 req = container_of(w, uv_fs_t, work_req);
1348 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1349 req->fs_type == UV_FS_READ);
1350
1351 do {
1352 errno = 0;
1353
1354#define X(type, action) \
1355 case UV_FS_ ## type: \
1356 r = action; \
1357 break;
1358
1359 switch (req->fs_type) {
1360 X(ACCESS, access(req->path, req->flags));
1361 X(CHMOD, chmod(req->path, req->mode));
1362 X(CHOWN, chown(req->path, req->uid, req->gid));
1363 X(CLOSE, uv__fs_close(req->file));
1364 X(COPYFILE, uv__fs_copyfile(req));
1365 X(FCHMOD, fchmod(req->file, req->mode));
1366 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1367 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1368 X(FDATASYNC, uv__fs_fdatasync(req));
1369 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1370 X(FSYNC, uv__fs_fsync(req));
1371 X(FTRUNCATE, ftruncate(req->file, req->off));
1372 X(FUTIME, uv__fs_futime(req));
1373 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1374 X(LINK, link(req->path, req->new_path));
1375 X(MKDIR, mkdir(req->path, req->mode));
1376 X(MKDTEMP, uv__fs_mkdtemp(req));
1377 X(OPEN, uv__fs_open(req));
1378 X(READ, uv__fs_read(req));
1379 X(SCANDIR, uv__fs_scandir(req));
1380 X(OPENDIR, uv__fs_opendir(req));
1381 X(READDIR, uv__fs_readdir(req));
1382 X(CLOSEDIR, uv__fs_closedir(req));
1383 X(READLINK, uv__fs_readlink(req));
1384 X(REALPATH, uv__fs_realpath(req));
1385 X(RENAME, rename(req->path, req->new_path));
1386 X(RMDIR, rmdir(req->path));
1387 X(SENDFILE, uv__fs_sendfile(req));
1388 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1389 X(SYMLINK, symlink(req->path, req->new_path));
1390 X(UNLINK, unlink(req->path));
1391 X(UTIME, uv__fs_utime(req));
1392 X(WRITE, uv__fs_write_all(req));
1393 default: abort();
1394 }
1395#undef X
1396 } while (r == -1 && errno == EINTR && retry_on_eintr);
1397
1398 if (r == -1)
1399 req->result = UV__ERR(errno);
1400 else
1401 req->result = r;
1402
1403 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1404 req->fs_type == UV_FS_FSTAT ||
1405 req->fs_type == UV_FS_LSTAT)) {
1406 req->ptr = &req->statbuf;
1407 }
1408}
1409
1410
1411static void uv__fs_done(struct uv__work* w, int status) {
1412 uv_fs_t* req;
1413
1414 req = container_of(w, uv_fs_t, work_req);
1415 uv__req_unregister(req->loop, req);
1416
1417 if (status == UV_ECANCELED) {
1418 assert(req->result == 0);
1419 req->result = UV_ECANCELED;
1420 }
1421
1422 req->cb(req);
1423}
1424
1425
1426int uv_fs_access(uv_loop_t* loop,
1427 uv_fs_t* req,
1428 const char* path,
1429 int flags,
1430 uv_fs_cb cb) {
1431 INIT(ACCESS);
1432 PATH;
1433 req->flags = flags;
1434 POST;
1435}
1436
1437
1438int uv_fs_chmod(uv_loop_t* loop,
1439 uv_fs_t* req,
1440 const char* path,
1441 int mode,
1442 uv_fs_cb cb) {
1443 INIT(CHMOD);
1444 PATH;
1445 req->mode = mode;
1446 POST;
1447}
1448
1449
1450int uv_fs_chown(uv_loop_t* loop,
1451 uv_fs_t* req,
1452 const char* path,
1453 uv_uid_t uid,
1454 uv_gid_t gid,
1455 uv_fs_cb cb) {
1456 INIT(CHOWN);
1457 PATH;
1458 req->uid = uid;
1459 req->gid = gid;
1460 POST;
1461}
1462
1463
1464int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1465 INIT(CLOSE);
1466 req->file = file;
1467 POST;
1468}
1469
1470
1471int uv_fs_fchmod(uv_loop_t* loop,
1472 uv_fs_t* req,
1473 uv_file file,
1474 int mode,
1475 uv_fs_cb cb) {
1476 INIT(FCHMOD);
1477 req->file = file;
1478 req->mode = mode;
1479 POST;
1480}
1481
1482
1483int uv_fs_fchown(uv_loop_t* loop,
1484 uv_fs_t* req,
1485 uv_file file,
1486 uv_uid_t uid,
1487 uv_gid_t gid,
1488 uv_fs_cb cb) {
1489 INIT(FCHOWN);
1490 req->file = file;
1491 req->uid = uid;
1492 req->gid = gid;
1493 POST;
1494}
1495
1496
1497int uv_fs_lchown(uv_loop_t* loop,
1498 uv_fs_t* req,
1499 const char* path,
1500 uv_uid_t uid,
1501 uv_gid_t gid,
1502 uv_fs_cb cb) {
1503 INIT(LCHOWN);
1504 PATH;
1505 req->uid = uid;
1506 req->gid = gid;
1507 POST;
1508}
1509
1510
1511int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1512 INIT(FDATASYNC);
1513 req->file = file;
1514 POST;
1515}
1516
1517
1518int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1519 INIT(FSTAT);
1520 req->file = file;
1521 POST;
1522}
1523
1524
1525int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1526 INIT(FSYNC);
1527 req->file = file;
1528 POST;
1529}
1530
1531
1532int uv_fs_ftruncate(uv_loop_t* loop,
1533 uv_fs_t* req,
1534 uv_file file,
1535 int64_t off,
1536 uv_fs_cb cb) {
1537 INIT(FTRUNCATE);
1538 req->file = file;
1539 req->off = off;
1540 POST;
1541}
1542
1543
1544int uv_fs_futime(uv_loop_t* loop,
1545 uv_fs_t* req,
1546 uv_file file,
1547 double atime,
1548 double mtime,
1549 uv_fs_cb cb) {
1550 INIT(FUTIME);
1551 req->file = file;
1552 req->atime = atime;
1553 req->mtime = mtime;
1554 POST;
1555}
1556
1557
1558int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1559 INIT(LSTAT);
1560 PATH;
1561 POST;
1562}
1563
1564
1565int uv_fs_link(uv_loop_t* loop,
1566 uv_fs_t* req,
1567 const char* path,
1568 const char* new_path,
1569 uv_fs_cb cb) {
1570 INIT(LINK);
1571 PATH2;
1572 POST;
1573}
1574
1575
1576int uv_fs_mkdir(uv_loop_t* loop,
1577 uv_fs_t* req,
1578 const char* path,
1579 int mode,
1580 uv_fs_cb cb) {
1581 INIT(MKDIR);
1582 PATH;
1583 req->mode = mode;
1584 POST;
1585}
1586
1587
1588int uv_fs_mkdtemp(uv_loop_t* loop,
1589 uv_fs_t* req,
1590 const char* tpl,
1591 uv_fs_cb cb) {
1592 INIT(MKDTEMP);
1593 req->path = uv__strdup(tpl);
1594 if (req->path == NULL)
1595 return UV_ENOMEM;
1596 POST;
1597}
1598
1599
1600int uv_fs_open(uv_loop_t* loop,
1601 uv_fs_t* req,
1602 const char* path,
1603 int flags,
1604 int mode,
1605 uv_fs_cb cb) {
1606 INIT(OPEN);
1607 PATH;
1608 req->flags = flags;
1609 req->mode = mode;
1610 POST;
1611}
1612
1613
1614int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
1615 uv_file file,
1616 const uv_buf_t bufs[],
1617 unsigned int nbufs,
1618 int64_t off,
1619 uv_fs_cb cb) {
1620 INIT(READ);
1621
1622 if (bufs == NULL || nbufs == 0)
1623 return UV_EINVAL;
1624
1625 req->file = file;
1626
1627 req->nbufs = nbufs;
1628 req->bufs = req->bufsml;
1629 if (nbufs > ARRAY_SIZE(req->bufsml))
1630 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1631
1632 if (req->bufs == NULL)
1633 return UV_ENOMEM;
1634
1635 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1636
1637 req->off = off;
1638 POST;
1639}
1640
1641
1642int uv_fs_scandir(uv_loop_t* loop,
1643 uv_fs_t* req,
1644 const char* path,
1645 int flags,
1646 uv_fs_cb cb) {
1647 INIT(SCANDIR);
1648 PATH;
1649 req->flags = flags;
1650 POST;
1651}
1652
1653int uv_fs_opendir(uv_loop_t* loop,
1654 uv_fs_t* req,
1655 const char* path,
1656 uv_fs_cb cb) {
1657 INIT(OPENDIR);
1658 PATH;
1659 POST;
1660}
1661
1662int uv_fs_readdir(uv_loop_t* loop,
1663 uv_fs_t* req,
1664 uv_dir_t* dir,
1665 uv_fs_cb cb) {
1666 INIT(READDIR);
1667
1668 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
1669 return UV_EINVAL;
1670
1671 req->ptr = dir;
1672 POST;
1673}
1674
1675int uv_fs_closedir(uv_loop_t* loop,
1676 uv_fs_t* req,
1677 uv_dir_t* dir,
1678 uv_fs_cb cb) {
1679 INIT(CLOSEDIR);
1680
1681 if (dir == NULL)
1682 return UV_EINVAL;
1683
1684 req->ptr = dir;
1685 POST;
1686}
1687
1688int uv_fs_readlink(uv_loop_t* loop,
1689 uv_fs_t* req,
1690 const char* path,
1691 uv_fs_cb cb) {
1692 INIT(READLINK);
1693 PATH;
1694 POST;
1695}
1696
1697
1698int uv_fs_realpath(uv_loop_t* loop,
1699 uv_fs_t* req,
1700 const char * path,
1701 uv_fs_cb cb) {
1702 INIT(REALPATH);
1703 PATH;
1704 POST;
1705}
1706
1707
1708int uv_fs_rename(uv_loop_t* loop,
1709 uv_fs_t* req,
1710 const char* path,
1711 const char* new_path,
1712 uv_fs_cb cb) {
1713 INIT(RENAME);
1714 PATH2;
1715 POST;
1716}
1717
1718
1719int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1720 INIT(RMDIR);
1721 PATH;
1722 POST;
1723}
1724
1725
1726int uv_fs_sendfile(uv_loop_t* loop,
1727 uv_fs_t* req,
1728 uv_file out_fd,
1729 uv_file in_fd,
1730 int64_t off,
1731 size_t len,
1732 uv_fs_cb cb) {
1733 INIT(SENDFILE);
1734 req->flags = in_fd; /* hack */
1735 req->file = out_fd;
1736 req->off = off;
1737 req->bufsml[0].len = len;
1738 POST;
1739}
1740
1741
1742int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1743 INIT(STAT);
1744 PATH;
1745 POST;
1746}
1747
1748
1749int uv_fs_symlink(uv_loop_t* loop,
1750 uv_fs_t* req,
1751 const char* path,
1752 const char* new_path,
1753 int flags,
1754 uv_fs_cb cb) {
1755 INIT(SYMLINK);
1756 PATH2;
1757 req->flags = flags;
1758 POST;
1759}
1760
1761
1762int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1763 INIT(UNLINK);
1764 PATH;
1765 POST;
1766}
1767
1768
1769int uv_fs_utime(uv_loop_t* loop,
1770 uv_fs_t* req,
1771 const char* path,
1772 double atime,
1773 double mtime,
1774 uv_fs_cb cb) {
1775 INIT(UTIME);
1776 PATH;
1777 req->atime = atime;
1778 req->mtime = mtime;
1779 POST;
1780}
1781
1782
1783int uv_fs_write(uv_loop_t* loop,
1784 uv_fs_t* req,
1785 uv_file file,
1786 const uv_buf_t bufs[],
1787 unsigned int nbufs,
1788 int64_t off,
1789 uv_fs_cb cb) {
1790 INIT(WRITE);
1791
1792 if (bufs == NULL || nbufs == 0)
1793 return UV_EINVAL;
1794
1795 req->file = file;
1796
1797 req->nbufs = nbufs;
1798 req->bufs = req->bufsml;
1799 if (nbufs > ARRAY_SIZE(req->bufsml))
1800 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1801
1802 if (req->bufs == NULL)
1803 return UV_ENOMEM;
1804
1805 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1806
1807 req->off = off;
1808 POST;
1809}
1810
1811
1812void uv_fs_req_cleanup(uv_fs_t* req) {
1813 if (req == NULL)
1814 return;
1815
1816 /* Only necessary for asychronous requests, i.e., requests with a callback.
1817 * Synchronous ones don't copy their arguments and have req->path and
1818 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP is the
1819 * exception to the rule, it always allocates memory.
1820 */
1821 if (req->path != NULL && (req->cb != NULL || req->fs_type == UV_FS_MKDTEMP))
1822 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
1823
1824 req->path = NULL;
1825 req->new_path = NULL;
1826
1827 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
1828 uv__fs_readdir_cleanup(req);
1829
1830 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
1831 uv__fs_scandir_cleanup(req);
1832
1833 if (req->bufs != req->bufsml)
1834 uv__free(req->bufs);
1835 req->bufs = NULL;
1836
1837 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
1838 uv__free(req->ptr);
1839 req->ptr = NULL;
1840}
1841
1842
1843int uv_fs_copyfile(uv_loop_t* loop,
1844 uv_fs_t* req,
1845 const char* path,
1846 const char* new_path,
1847 int flags,
1848 uv_fs_cb cb) {
1849 INIT(COPYFILE);
1850
1851 if (flags & ~(UV_FS_COPYFILE_EXCL |
1852 UV_FS_COPYFILE_FICLONE |
1853 UV_FS_COPYFILE_FICLONE_FORCE)) {
1854 return UV_EINVAL;
1855 }
1856
1857 PATH2;
1858 req->flags = flags;
1859 POST;
1860}
1861