1 | /* |
2 | * Vhost User library |
3 | * |
4 | * Copyright IBM, Corp. 2007 |
5 | * Copyright (c) 2016 Red Hat, Inc. |
6 | * |
7 | * Authors: |
8 | * Anthony Liguori <aliguori@us.ibm.com> |
9 | * Marc-André Lureau <mlureau@redhat.com> |
10 | * Victor Kaplansky <victork@redhat.com> |
11 | * |
12 | * This work is licensed under the terms of the GNU GPL, version 2 or |
13 | * later. See the COPYING file in the top-level directory. |
14 | */ |
15 | |
16 | /* this code avoids GLib dependency */ |
17 | #include <stdlib.h> |
18 | #include <stdio.h> |
19 | #include <unistd.h> |
20 | #include <stdarg.h> |
21 | #include <errno.h> |
22 | #include <string.h> |
23 | #include <assert.h> |
24 | #include <inttypes.h> |
25 | #include <sys/types.h> |
26 | #include <sys/socket.h> |
27 | #include <sys/eventfd.h> |
28 | #include <sys/mman.h> |
29 | #include "qemu/compiler.h" |
30 | |
31 | #if defined(__linux__) |
32 | #include <sys/syscall.h> |
33 | #include <fcntl.h> |
34 | #include <sys/ioctl.h> |
35 | #include <linux/vhost.h> |
36 | |
37 | #ifdef __NR_userfaultfd |
38 | #include <linux/userfaultfd.h> |
39 | #endif |
40 | |
41 | #endif |
42 | |
43 | #include "qemu/atomic.h" |
44 | #include "qemu/osdep.h" |
45 | #include "qemu/memfd.h" |
46 | |
47 | #include "libvhost-user.h" |
48 | |
49 | /* usually provided by GLib */ |
50 | #ifndef MIN |
51 | #define MIN(x, y) ({ \ |
52 | typeof(x) _min1 = (x); \ |
53 | typeof(y) _min2 = (y); \ |
54 | (void) (&_min1 == &_min2); \ |
55 | _min1 < _min2 ? _min1 : _min2; }) |
56 | #endif |
57 | |
58 | /* Round number down to multiple */ |
59 | #define ALIGN_DOWN(n, m) ((n) / (m) * (m)) |
60 | |
61 | /* Round number up to multiple */ |
62 | #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) |
63 | |
64 | /* Align each region to cache line size in inflight buffer */ |
65 | #define INFLIGHT_ALIGNMENT 64 |
66 | |
67 | /* The version of inflight buffer */ |
68 | #define INFLIGHT_VERSION 1 |
69 | |
70 | #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64) |
71 | |
72 | /* The version of the protocol we support */ |
73 | #define VHOST_USER_VERSION 1 |
74 | #define LIBVHOST_USER_DEBUG 0 |
75 | |
76 | #define DPRINT(...) \ |
77 | do { \ |
78 | if (LIBVHOST_USER_DEBUG) { \ |
79 | fprintf(stderr, __VA_ARGS__); \ |
80 | } \ |
81 | } while (0) |
82 | |
83 | static inline |
84 | bool has_feature(uint64_t features, unsigned int fbit) |
85 | { |
86 | assert(fbit < 64); |
87 | return !!(features & (1ULL << fbit)); |
88 | } |
89 | |
90 | static inline |
91 | bool vu_has_feature(VuDev *dev, |
92 | unsigned int fbit) |
93 | { |
94 | return has_feature(dev->features, fbit); |
95 | } |
96 | |
97 | static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) |
98 | { |
99 | return has_feature(dev->protocol_features, fbit); |
100 | } |
101 | |
102 | static const char * |
103 | vu_request_to_string(unsigned int req) |
104 | { |
105 | #define REQ(req) [req] = #req |
106 | static const char *vu_request_str[] = { |
107 | REQ(VHOST_USER_NONE), |
108 | REQ(VHOST_USER_GET_FEATURES), |
109 | REQ(VHOST_USER_SET_FEATURES), |
110 | REQ(VHOST_USER_SET_OWNER), |
111 | REQ(VHOST_USER_RESET_OWNER), |
112 | REQ(VHOST_USER_SET_MEM_TABLE), |
113 | REQ(VHOST_USER_SET_LOG_BASE), |
114 | REQ(VHOST_USER_SET_LOG_FD), |
115 | REQ(VHOST_USER_SET_VRING_NUM), |
116 | REQ(VHOST_USER_SET_VRING_ADDR), |
117 | REQ(VHOST_USER_SET_VRING_BASE), |
118 | REQ(VHOST_USER_GET_VRING_BASE), |
119 | REQ(VHOST_USER_SET_VRING_KICK), |
120 | REQ(VHOST_USER_SET_VRING_CALL), |
121 | REQ(VHOST_USER_SET_VRING_ERR), |
122 | REQ(VHOST_USER_GET_PROTOCOL_FEATURES), |
123 | REQ(VHOST_USER_SET_PROTOCOL_FEATURES), |
124 | REQ(VHOST_USER_GET_QUEUE_NUM), |
125 | REQ(VHOST_USER_SET_VRING_ENABLE), |
126 | REQ(VHOST_USER_SEND_RARP), |
127 | REQ(VHOST_USER_NET_SET_MTU), |
128 | REQ(VHOST_USER_SET_SLAVE_REQ_FD), |
129 | REQ(VHOST_USER_IOTLB_MSG), |
130 | REQ(VHOST_USER_SET_VRING_ENDIAN), |
131 | REQ(VHOST_USER_GET_CONFIG), |
132 | REQ(VHOST_USER_SET_CONFIG), |
133 | REQ(VHOST_USER_POSTCOPY_ADVISE), |
134 | REQ(VHOST_USER_POSTCOPY_LISTEN), |
135 | REQ(VHOST_USER_POSTCOPY_END), |
136 | REQ(VHOST_USER_GET_INFLIGHT_FD), |
137 | REQ(VHOST_USER_SET_INFLIGHT_FD), |
138 | REQ(VHOST_USER_GPU_SET_SOCKET), |
139 | REQ(VHOST_USER_MAX), |
140 | }; |
141 | #undef REQ |
142 | |
143 | if (req < VHOST_USER_MAX) { |
144 | return vu_request_str[req]; |
145 | } else { |
146 | return "unknown" ; |
147 | } |
148 | } |
149 | |
150 | static void |
151 | vu_panic(VuDev *dev, const char *msg, ...) |
152 | { |
153 | char *buf = NULL; |
154 | va_list ap; |
155 | |
156 | va_start(ap, msg); |
157 | if (vasprintf(&buf, msg, ap) < 0) { |
158 | buf = NULL; |
159 | } |
160 | va_end(ap); |
161 | |
162 | dev->broken = true; |
163 | dev->panic(dev, buf); |
164 | free(buf); |
165 | |
166 | /* FIXME: find a way to call virtio_error? */ |
167 | } |
168 | |
169 | /* Translate guest physical address to our virtual address. */ |
170 | void * |
171 | vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) |
172 | { |
173 | int i; |
174 | |
175 | if (*plen == 0) { |
176 | return NULL; |
177 | } |
178 | |
179 | /* Find matching memory region. */ |
180 | for (i = 0; i < dev->nregions; i++) { |
181 | VuDevRegion *r = &dev->regions[i]; |
182 | |
183 | if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) { |
184 | if ((guest_addr + *plen) > (r->gpa + r->size)) { |
185 | *plen = r->gpa + r->size - guest_addr; |
186 | } |
187 | return (void *)(uintptr_t) |
188 | guest_addr - r->gpa + r->mmap_addr + r->mmap_offset; |
189 | } |
190 | } |
191 | |
192 | return NULL; |
193 | } |
194 | |
195 | /* Translate qemu virtual address to our virtual address. */ |
196 | static void * |
197 | qva_to_va(VuDev *dev, uint64_t qemu_addr) |
198 | { |
199 | int i; |
200 | |
201 | /* Find matching memory region. */ |
202 | for (i = 0; i < dev->nregions; i++) { |
203 | VuDevRegion *r = &dev->regions[i]; |
204 | |
205 | if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { |
206 | return (void *)(uintptr_t) |
207 | qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; |
208 | } |
209 | } |
210 | |
211 | return NULL; |
212 | } |
213 | |
214 | static void |
215 | vmsg_close_fds(VhostUserMsg *vmsg) |
216 | { |
217 | int i; |
218 | |
219 | for (i = 0; i < vmsg->fd_num; i++) { |
220 | close(vmsg->fds[i]); |
221 | } |
222 | } |
223 | |
224 | /* Set reply payload.u64 and clear request flags and fd_num */ |
225 | static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val) |
226 | { |
227 | vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ |
228 | vmsg->size = sizeof(vmsg->payload.u64); |
229 | vmsg->payload.u64 = val; |
230 | vmsg->fd_num = 0; |
231 | } |
232 | |
233 | /* A test to see if we have userfault available */ |
234 | static bool |
235 | have_userfault(void) |
236 | { |
237 | #if defined(__linux__) && defined(__NR_userfaultfd) &&\ |
238 | defined(UFFD_FEATURE_MISSING_SHMEM) &&\ |
239 | defined(UFFD_FEATURE_MISSING_HUGETLBFS) |
240 | /* Now test the kernel we're running on really has the features */ |
241 | int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); |
242 | struct uffdio_api api_struct; |
243 | if (ufd < 0) { |
244 | return false; |
245 | } |
246 | |
247 | api_struct.api = UFFD_API; |
248 | api_struct.features = UFFD_FEATURE_MISSING_SHMEM | |
249 | UFFD_FEATURE_MISSING_HUGETLBFS; |
250 | if (ioctl(ufd, UFFDIO_API, &api_struct)) { |
251 | close(ufd); |
252 | return false; |
253 | } |
254 | close(ufd); |
255 | return true; |
256 | |
257 | #else |
258 | return false; |
259 | #endif |
260 | } |
261 | |
262 | static bool |
263 | vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) |
264 | { |
265 | char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { }; |
266 | struct iovec iov = { |
267 | .iov_base = (char *)vmsg, |
268 | .iov_len = VHOST_USER_HDR_SIZE, |
269 | }; |
270 | struct msghdr msg = { |
271 | .msg_iov = &iov, |
272 | .msg_iovlen = 1, |
273 | .msg_control = control, |
274 | .msg_controllen = sizeof(control), |
275 | }; |
276 | size_t fd_size; |
277 | struct cmsghdr *cmsg; |
278 | int rc; |
279 | |
280 | do { |
281 | rc = recvmsg(conn_fd, &msg, 0); |
282 | } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); |
283 | |
284 | if (rc < 0) { |
285 | vu_panic(dev, "Error while recvmsg: %s" , strerror(errno)); |
286 | return false; |
287 | } |
288 | |
289 | vmsg->fd_num = 0; |
290 | for (cmsg = CMSG_FIRSTHDR(&msg); |
291 | cmsg != NULL; |
292 | cmsg = CMSG_NXTHDR(&msg, cmsg)) |
293 | { |
294 | if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { |
295 | fd_size = cmsg->cmsg_len - CMSG_LEN(0); |
296 | vmsg->fd_num = fd_size / sizeof(int); |
297 | memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); |
298 | break; |
299 | } |
300 | } |
301 | |
302 | if (vmsg->size > sizeof(vmsg->payload)) { |
303 | vu_panic(dev, |
304 | "Error: too big message request: %d, size: vmsg->size: %u, " |
305 | "while sizeof(vmsg->payload) = %zu\n" , |
306 | vmsg->request, vmsg->size, sizeof(vmsg->payload)); |
307 | goto fail; |
308 | } |
309 | |
310 | if (vmsg->size) { |
311 | do { |
312 | rc = read(conn_fd, &vmsg->payload, vmsg->size); |
313 | } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); |
314 | |
315 | if (rc <= 0) { |
316 | vu_panic(dev, "Error while reading: %s" , strerror(errno)); |
317 | goto fail; |
318 | } |
319 | |
320 | assert(rc == vmsg->size); |
321 | } |
322 | |
323 | return true; |
324 | |
325 | fail: |
326 | vmsg_close_fds(vmsg); |
327 | |
328 | return false; |
329 | } |
330 | |
331 | static bool |
332 | vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) |
333 | { |
334 | int rc; |
335 | uint8_t *p = (uint8_t *)vmsg; |
336 | char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { }; |
337 | struct iovec iov = { |
338 | .iov_base = (char *)vmsg, |
339 | .iov_len = VHOST_USER_HDR_SIZE, |
340 | }; |
341 | struct msghdr msg = { |
342 | .msg_iov = &iov, |
343 | .msg_iovlen = 1, |
344 | .msg_control = control, |
345 | }; |
346 | struct cmsghdr *cmsg; |
347 | |
348 | memset(control, 0, sizeof(control)); |
349 | assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS); |
350 | if (vmsg->fd_num > 0) { |
351 | size_t fdsize = vmsg->fd_num * sizeof(int); |
352 | msg.msg_controllen = CMSG_SPACE(fdsize); |
353 | cmsg = CMSG_FIRSTHDR(&msg); |
354 | cmsg->cmsg_len = CMSG_LEN(fdsize); |
355 | cmsg->cmsg_level = SOL_SOCKET; |
356 | cmsg->cmsg_type = SCM_RIGHTS; |
357 | memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); |
358 | } else { |
359 | msg.msg_controllen = 0; |
360 | } |
361 | |
362 | do { |
363 | rc = sendmsg(conn_fd, &msg, 0); |
364 | } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); |
365 | |
366 | if (vmsg->size) { |
367 | do { |
368 | if (vmsg->data) { |
369 | rc = write(conn_fd, vmsg->data, vmsg->size); |
370 | } else { |
371 | rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); |
372 | } |
373 | } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); |
374 | } |
375 | |
376 | if (rc <= 0) { |
377 | vu_panic(dev, "Error while writing: %s" , strerror(errno)); |
378 | return false; |
379 | } |
380 | |
381 | return true; |
382 | } |
383 | |
384 | static bool |
385 | vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) |
386 | { |
387 | /* Set the version in the flags when sending the reply */ |
388 | vmsg->flags &= ~VHOST_USER_VERSION_MASK; |
389 | vmsg->flags |= VHOST_USER_VERSION; |
390 | vmsg->flags |= VHOST_USER_REPLY_MASK; |
391 | |
392 | return vu_message_write(dev, conn_fd, vmsg); |
393 | } |
394 | |
395 | static bool |
396 | vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) |
397 | { |
398 | VhostUserMsg msg_reply; |
399 | |
400 | if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { |
401 | return true; |
402 | } |
403 | |
404 | if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) { |
405 | return false; |
406 | } |
407 | |
408 | if (msg_reply.request != vmsg->request) { |
409 | DPRINT("Received unexpected msg type. Expected %d received %d" , |
410 | vmsg->request, msg_reply.request); |
411 | return false; |
412 | } |
413 | |
414 | return msg_reply.payload.u64 == 0; |
415 | } |
416 | |
417 | /* Kick the log_call_fd if required. */ |
418 | static void |
419 | vu_log_kick(VuDev *dev) |
420 | { |
421 | if (dev->log_call_fd != -1) { |
422 | DPRINT("Kicking the QEMU's log...\n" ); |
423 | if (eventfd_write(dev->log_call_fd, 1) < 0) { |
424 | vu_panic(dev, "Error writing eventfd: %s" , strerror(errno)); |
425 | } |
426 | } |
427 | } |
428 | |
429 | static void |
430 | vu_log_page(uint8_t *log_table, uint64_t page) |
431 | { |
432 | DPRINT("Logged dirty guest page: %" PRId64"\n" , page); |
433 | atomic_or(&log_table[page / 8], 1 << (page % 8)); |
434 | } |
435 | |
436 | static void |
437 | vu_log_write(VuDev *dev, uint64_t address, uint64_t length) |
438 | { |
439 | uint64_t page; |
440 | |
441 | if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || |
442 | !dev->log_table || !length) { |
443 | return; |
444 | } |
445 | |
446 | assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); |
447 | |
448 | page = address / VHOST_LOG_PAGE; |
449 | while (page * VHOST_LOG_PAGE < address + length) { |
450 | vu_log_page(dev->log_table, page); |
451 | page += 1; |
452 | } |
453 | |
454 | vu_log_kick(dev); |
455 | } |
456 | |
457 | static void |
458 | vu_kick_cb(VuDev *dev, int condition, void *data) |
459 | { |
460 | int index = (intptr_t)data; |
461 | VuVirtq *vq = &dev->vq[index]; |
462 | int sock = vq->kick_fd; |
463 | eventfd_t kick_data; |
464 | ssize_t rc; |
465 | |
466 | rc = eventfd_read(sock, &kick_data); |
467 | if (rc == -1) { |
468 | vu_panic(dev, "kick eventfd_read(): %s" , strerror(errno)); |
469 | dev->remove_watch(dev, dev->vq[index].kick_fd); |
470 | } else { |
471 | DPRINT("Got kick_data: %016" PRIx64" handler:%p idx:%d\n" , |
472 | kick_data, vq->handler, index); |
473 | if (vq->handler) { |
474 | vq->handler(dev, index); |
475 | } |
476 | } |
477 | } |
478 | |
479 | static bool |
480 | vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) |
481 | { |
482 | vmsg->payload.u64 = |
483 | 1ULL << VHOST_F_LOG_ALL | |
484 | 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; |
485 | |
486 | if (dev->iface->get_features) { |
487 | vmsg->payload.u64 |= dev->iface->get_features(dev); |
488 | } |
489 | |
490 | vmsg->size = sizeof(vmsg->payload.u64); |
491 | vmsg->fd_num = 0; |
492 | |
493 | DPRINT("Sending back to guest u64: 0x%016" PRIx64"\n" , vmsg->payload.u64); |
494 | |
495 | return true; |
496 | } |
497 | |
498 | static void |
499 | vu_set_enable_all_rings(VuDev *dev, bool enabled) |
500 | { |
501 | uint16_t i; |
502 | |
503 | for (i = 0; i < dev->max_queues; i++) { |
504 | dev->vq[i].enable = enabled; |
505 | } |
506 | } |
507 | |
508 | static bool |
509 | vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) |
510 | { |
511 | DPRINT("u64: 0x%016" PRIx64"\n" , vmsg->payload.u64); |
512 | |
513 | dev->features = vmsg->payload.u64; |
514 | |
515 | if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { |
516 | vu_set_enable_all_rings(dev, true); |
517 | } |
518 | |
519 | if (dev->iface->set_features) { |
520 | dev->iface->set_features(dev, dev->features); |
521 | } |
522 | |
523 | return false; |
524 | } |
525 | |
526 | static bool |
527 | vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) |
528 | { |
529 | return false; |
530 | } |
531 | |
532 | static void |
533 | vu_close_log(VuDev *dev) |
534 | { |
535 | if (dev->log_table) { |
536 | if (munmap(dev->log_table, dev->log_size) != 0) { |
537 | perror("close log munmap() error" ); |
538 | } |
539 | |
540 | dev->log_table = NULL; |
541 | } |
542 | if (dev->log_call_fd != -1) { |
543 | close(dev->log_call_fd); |
544 | dev->log_call_fd = -1; |
545 | } |
546 | } |
547 | |
548 | static bool |
549 | vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) |
550 | { |
551 | vu_set_enable_all_rings(dev, false); |
552 | |
553 | return false; |
554 | } |
555 | |
556 | static bool |
557 | vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg) |
558 | { |
559 | int i; |
560 | VhostUserMemory m = vmsg->payload.memory, *memory = &m; |
561 | dev->nregions = memory->nregions; |
562 | |
563 | DPRINT("Nregions: %d\n" , memory->nregions); |
564 | for (i = 0; i < dev->nregions; i++) { |
565 | void *mmap_addr; |
566 | VhostUserMemoryRegion *msg_region = &memory->regions[i]; |
567 | VuDevRegion *dev_region = &dev->regions[i]; |
568 | |
569 | DPRINT("Region %d\n" , i); |
570 | DPRINT(" guest_phys_addr: 0x%016" PRIx64"\n" , |
571 | msg_region->guest_phys_addr); |
572 | DPRINT(" memory_size: 0x%016" PRIx64"\n" , |
573 | msg_region->memory_size); |
574 | DPRINT(" userspace_addr 0x%016" PRIx64"\n" , |
575 | msg_region->userspace_addr); |
576 | DPRINT(" mmap_offset 0x%016" PRIx64"\n" , |
577 | msg_region->mmap_offset); |
578 | |
579 | dev_region->gpa = msg_region->guest_phys_addr; |
580 | dev_region->size = msg_region->memory_size; |
581 | dev_region->qva = msg_region->userspace_addr; |
582 | dev_region->mmap_offset = msg_region->mmap_offset; |
583 | |
584 | /* We don't use offset argument of mmap() since the |
585 | * mapped address has to be page aligned, and we use huge |
586 | * pages. |
587 | * In postcopy we're using PROT_NONE here to catch anyone |
588 | * accessing it before we userfault |
589 | */ |
590 | mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, |
591 | PROT_NONE, MAP_SHARED, |
592 | vmsg->fds[i], 0); |
593 | |
594 | if (mmap_addr == MAP_FAILED) { |
595 | vu_panic(dev, "region mmap error: %s" , strerror(errno)); |
596 | } else { |
597 | dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; |
598 | DPRINT(" mmap_addr: 0x%016" PRIx64"\n" , |
599 | dev_region->mmap_addr); |
600 | } |
601 | |
602 | /* Return the address to QEMU so that it can translate the ufd |
603 | * fault addresses back. |
604 | */ |
605 | msg_region->userspace_addr = (uintptr_t)(mmap_addr + |
606 | dev_region->mmap_offset); |
607 | close(vmsg->fds[i]); |
608 | } |
609 | |
610 | /* Send the message back to qemu with the addresses filled in */ |
611 | vmsg->fd_num = 0; |
612 | if (!vu_send_reply(dev, dev->sock, vmsg)) { |
613 | vu_panic(dev, "failed to respond to set-mem-table for postcopy" ); |
614 | return false; |
615 | } |
616 | |
617 | /* Wait for QEMU to confirm that it's registered the handler for the |
618 | * faults. |
619 | */ |
620 | if (!vu_message_read(dev, dev->sock, vmsg) || |
621 | vmsg->size != sizeof(vmsg->payload.u64) || |
622 | vmsg->payload.u64 != 0) { |
623 | vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table" ); |
624 | return false; |
625 | } |
626 | |
627 | /* OK, now we can go and register the memory and generate faults */ |
628 | for (i = 0; i < dev->nregions; i++) { |
629 | VuDevRegion *dev_region = &dev->regions[i]; |
630 | int ret; |
631 | #ifdef UFFDIO_REGISTER |
632 | /* We should already have an open ufd. Mark each memory |
633 | * range as ufd. |
634 | * Discard any mapping we have here; note I can't use MADV_REMOVE |
635 | * or fallocate to make the hole since I don't want to lose |
636 | * data that's already arrived in the shared process. |
637 | * TODO: How to do hugepage |
638 | */ |
639 | ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, |
640 | dev_region->size + dev_region->mmap_offset, |
641 | MADV_DONTNEED); |
642 | if (ret) { |
643 | fprintf(stderr, |
644 | "%s: Failed to madvise(DONTNEED) region %d: %s\n" , |
645 | __func__, i, strerror(errno)); |
646 | } |
647 | /* Turn off transparent hugepages so we dont get lose wakeups |
648 | * in neighbouring pages. |
649 | * TODO: Turn this backon later. |
650 | */ |
651 | ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, |
652 | dev_region->size + dev_region->mmap_offset, |
653 | MADV_NOHUGEPAGE); |
654 | if (ret) { |
655 | /* Note: This can happen legally on kernels that are configured |
656 | * without madvise'able hugepages |
657 | */ |
658 | fprintf(stderr, |
659 | "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n" , |
660 | __func__, i, strerror(errno)); |
661 | } |
662 | struct uffdio_register reg_struct; |
663 | reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; |
664 | reg_struct.range.len = dev_region->size + dev_region->mmap_offset; |
665 | reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; |
666 | |
667 | if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { |
668 | vu_panic(dev, "%s: Failed to userfault region %d " |
669 | "@%p + size:%zx offset: %zx: (ufd=%d)%s\n" , |
670 | __func__, i, |
671 | dev_region->mmap_addr, |
672 | dev_region->size, dev_region->mmap_offset, |
673 | dev->postcopy_ufd, strerror(errno)); |
674 | return false; |
675 | } |
676 | if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { |
677 | vu_panic(dev, "%s Region (%d) doesn't support COPY" , |
678 | __func__, i); |
679 | return false; |
680 | } |
681 | DPRINT("%s: region %d: Registered userfault for %" |
682 | PRIx64 " + %" PRIx64 "\n" , __func__, i, |
683 | (uint64_t)reg_struct.range.start, |
684 | (uint64_t)reg_struct.range.len); |
685 | /* Now it's registered we can let the client at it */ |
686 | if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, |
687 | dev_region->size + dev_region->mmap_offset, |
688 | PROT_READ | PROT_WRITE)) { |
689 | vu_panic(dev, "failed to mprotect region %d for postcopy (%s)" , |
690 | i, strerror(errno)); |
691 | return false; |
692 | } |
693 | /* TODO: Stash 'zero' support flags somewhere */ |
694 | #endif |
695 | } |
696 | |
697 | return false; |
698 | } |
699 | |
700 | static bool |
701 | vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) |
702 | { |
703 | int i; |
704 | VhostUserMemory m = vmsg->payload.memory, *memory = &m; |
705 | |
706 | for (i = 0; i < dev->nregions; i++) { |
707 | VuDevRegion *r = &dev->regions[i]; |
708 | void *m = (void *) (uintptr_t) r->mmap_addr; |
709 | |
710 | if (m) { |
711 | munmap(m, r->size + r->mmap_offset); |
712 | } |
713 | } |
714 | dev->nregions = memory->nregions; |
715 | |
716 | if (dev->postcopy_listening) { |
717 | return vu_set_mem_table_exec_postcopy(dev, vmsg); |
718 | } |
719 | |
720 | DPRINT("Nregions: %d\n" , memory->nregions); |
721 | for (i = 0; i < dev->nregions; i++) { |
722 | void *mmap_addr; |
723 | VhostUserMemoryRegion *msg_region = &memory->regions[i]; |
724 | VuDevRegion *dev_region = &dev->regions[i]; |
725 | |
726 | DPRINT("Region %d\n" , i); |
727 | DPRINT(" guest_phys_addr: 0x%016" PRIx64"\n" , |
728 | msg_region->guest_phys_addr); |
729 | DPRINT(" memory_size: 0x%016" PRIx64"\n" , |
730 | msg_region->memory_size); |
731 | DPRINT(" userspace_addr 0x%016" PRIx64"\n" , |
732 | msg_region->userspace_addr); |
733 | DPRINT(" mmap_offset 0x%016" PRIx64"\n" , |
734 | msg_region->mmap_offset); |
735 | |
736 | dev_region->gpa = msg_region->guest_phys_addr; |
737 | dev_region->size = msg_region->memory_size; |
738 | dev_region->qva = msg_region->userspace_addr; |
739 | dev_region->mmap_offset = msg_region->mmap_offset; |
740 | |
741 | /* We don't use offset argument of mmap() since the |
742 | * mapped address has to be page aligned, and we use huge |
743 | * pages. */ |
744 | mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, |
745 | PROT_READ | PROT_WRITE, MAP_SHARED, |
746 | vmsg->fds[i], 0); |
747 | |
748 | if (mmap_addr == MAP_FAILED) { |
749 | vu_panic(dev, "region mmap error: %s" , strerror(errno)); |
750 | } else { |
751 | dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; |
752 | DPRINT(" mmap_addr: 0x%016" PRIx64"\n" , |
753 | dev_region->mmap_addr); |
754 | } |
755 | |
756 | close(vmsg->fds[i]); |
757 | } |
758 | |
759 | return false; |
760 | } |
761 | |
762 | static bool |
763 | vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) |
764 | { |
765 | int fd; |
766 | uint64_t log_mmap_size, log_mmap_offset; |
767 | void *rc; |
768 | |
769 | if (vmsg->fd_num != 1 || |
770 | vmsg->size != sizeof(vmsg->payload.log)) { |
771 | vu_panic(dev, "Invalid log_base message" ); |
772 | return true; |
773 | } |
774 | |
775 | fd = vmsg->fds[0]; |
776 | log_mmap_offset = vmsg->payload.log.mmap_offset; |
777 | log_mmap_size = vmsg->payload.log.mmap_size; |
778 | DPRINT("Log mmap_offset: %" PRId64"\n" , log_mmap_offset); |
779 | DPRINT("Log mmap_size: %" PRId64"\n" , log_mmap_size); |
780 | |
781 | rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, |
782 | log_mmap_offset); |
783 | close(fd); |
784 | if (rc == MAP_FAILED) { |
785 | perror("log mmap error" ); |
786 | } |
787 | |
788 | if (dev->log_table) { |
789 | munmap(dev->log_table, dev->log_size); |
790 | } |
791 | dev->log_table = rc; |
792 | dev->log_size = log_mmap_size; |
793 | |
794 | vmsg->size = sizeof(vmsg->payload.u64); |
795 | vmsg->fd_num = 0; |
796 | |
797 | return true; |
798 | } |
799 | |
800 | static bool |
801 | vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) |
802 | { |
803 | if (vmsg->fd_num != 1) { |
804 | vu_panic(dev, "Invalid log_fd message" ); |
805 | return false; |
806 | } |
807 | |
808 | if (dev->log_call_fd != -1) { |
809 | close(dev->log_call_fd); |
810 | } |
811 | dev->log_call_fd = vmsg->fds[0]; |
812 | DPRINT("Got log_call_fd: %d\n" , vmsg->fds[0]); |
813 | |
814 | return false; |
815 | } |
816 | |
817 | static bool |
818 | vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) |
819 | { |
820 | unsigned int index = vmsg->payload.state.index; |
821 | unsigned int num = vmsg->payload.state.num; |
822 | |
823 | DPRINT("State.index: %d\n" , index); |
824 | DPRINT("State.num: %d\n" , num); |
825 | dev->vq[index].vring.num = num; |
826 | |
827 | return false; |
828 | } |
829 | |
830 | static bool |
831 | vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) |
832 | { |
833 | struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; |
834 | unsigned int index = vra->index; |
835 | VuVirtq *vq = &dev->vq[index]; |
836 | |
837 | DPRINT("vhost_vring_addr:\n" ); |
838 | DPRINT(" index: %d\n" , vra->index); |
839 | DPRINT(" flags: %d\n" , vra->flags); |
840 | DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n" , vra->desc_user_addr); |
841 | DPRINT(" used_user_addr: 0x%016" PRIx64 "\n" , vra->used_user_addr); |
842 | DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n" , vra->avail_user_addr); |
843 | DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n" , vra->log_guest_addr); |
844 | |
845 | vq->vring.flags = vra->flags; |
846 | vq->vring.desc = qva_to_va(dev, vra->desc_user_addr); |
847 | vq->vring.used = qva_to_va(dev, vra->used_user_addr); |
848 | vq->vring.avail = qva_to_va(dev, vra->avail_user_addr); |
849 | vq->vring.log_guest_addr = vra->log_guest_addr; |
850 | |
851 | DPRINT("Setting virtq addresses:\n" ); |
852 | DPRINT(" vring_desc at %p\n" , vq->vring.desc); |
853 | DPRINT(" vring_used at %p\n" , vq->vring.used); |
854 | DPRINT(" vring_avail at %p\n" , vq->vring.avail); |
855 | |
856 | if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) { |
857 | vu_panic(dev, "Invalid vring_addr message" ); |
858 | return false; |
859 | } |
860 | |
861 | vq->used_idx = vq->vring.used->idx; |
862 | |
863 | if (vq->last_avail_idx != vq->used_idx) { |
864 | bool resume = dev->iface->queue_is_processed_in_order && |
865 | dev->iface->queue_is_processed_in_order(dev, index); |
866 | |
867 | DPRINT("Last avail index != used index: %u != %u%s\n" , |
868 | vq->last_avail_idx, vq->used_idx, |
869 | resume ? ", resuming" : "" ); |
870 | |
871 | if (resume) { |
872 | vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; |
873 | } |
874 | } |
875 | |
876 | return false; |
877 | } |
878 | |
879 | static bool |
880 | vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) |
881 | { |
882 | unsigned int index = vmsg->payload.state.index; |
883 | unsigned int num = vmsg->payload.state.num; |
884 | |
885 | DPRINT("State.index: %d\n" , index); |
886 | DPRINT("State.num: %d\n" , num); |
887 | dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; |
888 | |
889 | return false; |
890 | } |
891 | |
892 | static bool |
893 | vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) |
894 | { |
895 | unsigned int index = vmsg->payload.state.index; |
896 | |
897 | DPRINT("State.index: %d\n" , index); |
898 | vmsg->payload.state.num = dev->vq[index].last_avail_idx; |
899 | vmsg->size = sizeof(vmsg->payload.state); |
900 | |
901 | dev->vq[index].started = false; |
902 | if (dev->iface->queue_set_started) { |
903 | dev->iface->queue_set_started(dev, index, false); |
904 | } |
905 | |
906 | if (dev->vq[index].call_fd != -1) { |
907 | close(dev->vq[index].call_fd); |
908 | dev->vq[index].call_fd = -1; |
909 | } |
910 | if (dev->vq[index].kick_fd != -1) { |
911 | dev->remove_watch(dev, dev->vq[index].kick_fd); |
912 | close(dev->vq[index].kick_fd); |
913 | dev->vq[index].kick_fd = -1; |
914 | } |
915 | |
916 | return true; |
917 | } |
918 | |
919 | static bool |
920 | vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) |
921 | { |
922 | int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; |
923 | |
924 | if (index >= dev->max_queues) { |
925 | vmsg_close_fds(vmsg); |
926 | vu_panic(dev, "Invalid queue index: %u" , index); |
927 | return false; |
928 | } |
929 | |
930 | if (vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK || |
931 | vmsg->fd_num != 1) { |
932 | vmsg_close_fds(vmsg); |
933 | vu_panic(dev, "Invalid fds in request: %d" , vmsg->request); |
934 | return false; |
935 | } |
936 | |
937 | return true; |
938 | } |
939 | |
940 | static int |
941 | inflight_desc_compare(const void *a, const void *b) |
942 | { |
943 | VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a, |
944 | *desc1 = (VuVirtqInflightDesc *)b; |
945 | |
946 | if (desc1->counter > desc0->counter && |
947 | (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { |
948 | return 1; |
949 | } |
950 | |
951 | return -1; |
952 | } |
953 | |
954 | static int |
955 | vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) |
956 | { |
957 | int i = 0; |
958 | |
959 | if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { |
960 | return 0; |
961 | } |
962 | |
963 | if (unlikely(!vq->inflight)) { |
964 | return -1; |
965 | } |
966 | |
967 | if (unlikely(!vq->inflight->version)) { |
968 | /* initialize the buffer */ |
969 | vq->inflight->version = INFLIGHT_VERSION; |
970 | return 0; |
971 | } |
972 | |
973 | vq->used_idx = vq->vring.used->idx; |
974 | vq->resubmit_num = 0; |
975 | vq->resubmit_list = NULL; |
976 | vq->counter = 0; |
977 | |
978 | if (unlikely(vq->inflight->used_idx != vq->used_idx)) { |
979 | vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; |
980 | |
981 | barrier(); |
982 | |
983 | vq->inflight->used_idx = vq->used_idx; |
984 | } |
985 | |
986 | for (i = 0; i < vq->inflight->desc_num; i++) { |
987 | if (vq->inflight->desc[i].inflight == 1) { |
988 | vq->inuse++; |
989 | } |
990 | } |
991 | |
992 | vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; |
993 | |
994 | if (vq->inuse) { |
995 | vq->resubmit_list = malloc(sizeof(VuVirtqInflightDesc) * vq->inuse); |
996 | if (!vq->resubmit_list) { |
997 | return -1; |
998 | } |
999 | |
1000 | for (i = 0; i < vq->inflight->desc_num; i++) { |
1001 | if (vq->inflight->desc[i].inflight) { |
1002 | vq->resubmit_list[vq->resubmit_num].index = i; |
1003 | vq->resubmit_list[vq->resubmit_num].counter = |
1004 | vq->inflight->desc[i].counter; |
1005 | vq->resubmit_num++; |
1006 | } |
1007 | } |
1008 | |
1009 | if (vq->resubmit_num > 1) { |
1010 | qsort(vq->resubmit_list, vq->resubmit_num, |
1011 | sizeof(VuVirtqInflightDesc), inflight_desc_compare); |
1012 | } |
1013 | vq->counter = vq->resubmit_list[0].counter + 1; |
1014 | } |
1015 | |
1016 | /* in case of I/O hang after reconnecting */ |
1017 | if (eventfd_write(vq->kick_fd, 1)) { |
1018 | return -1; |
1019 | } |
1020 | |
1021 | return 0; |
1022 | } |
1023 | |
1024 | static bool |
1025 | vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) |
1026 | { |
1027 | int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; |
1028 | |
1029 | DPRINT("u64: 0x%016" PRIx64"\n" , vmsg->payload.u64); |
1030 | |
1031 | if (!vu_check_queue_msg_file(dev, vmsg)) { |
1032 | return false; |
1033 | } |
1034 | |
1035 | if (dev->vq[index].kick_fd != -1) { |
1036 | dev->remove_watch(dev, dev->vq[index].kick_fd); |
1037 | close(dev->vq[index].kick_fd); |
1038 | dev->vq[index].kick_fd = -1; |
1039 | } |
1040 | |
1041 | dev->vq[index].kick_fd = vmsg->fds[0]; |
1042 | DPRINT("Got kick_fd: %d for vq: %d\n" , vmsg->fds[0], index); |
1043 | |
1044 | dev->vq[index].started = true; |
1045 | if (dev->iface->queue_set_started) { |
1046 | dev->iface->queue_set_started(dev, index, true); |
1047 | } |
1048 | |
1049 | if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { |
1050 | dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, |
1051 | vu_kick_cb, (void *)(long)index); |
1052 | |
1053 | DPRINT("Waiting for kicks on fd: %d for vq: %d\n" , |
1054 | dev->vq[index].kick_fd, index); |
1055 | } |
1056 | |
1057 | if (vu_check_queue_inflights(dev, &dev->vq[index])) { |
1058 | vu_panic(dev, "Failed to check inflights for vq: %d\n" , index); |
1059 | } |
1060 | |
1061 | return false; |
1062 | } |
1063 | |
1064 | void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, |
1065 | vu_queue_handler_cb handler) |
1066 | { |
1067 | int qidx = vq - dev->vq; |
1068 | |
1069 | vq->handler = handler; |
1070 | if (vq->kick_fd >= 0) { |
1071 | if (handler) { |
1072 | dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, |
1073 | vu_kick_cb, (void *)(long)qidx); |
1074 | } else { |
1075 | dev->remove_watch(dev, vq->kick_fd); |
1076 | } |
1077 | } |
1078 | } |
1079 | |
1080 | bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, |
1081 | int size, int offset) |
1082 | { |
1083 | int qidx = vq - dev->vq; |
1084 | int fd_num = 0; |
1085 | VhostUserMsg vmsg = { |
1086 | .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG, |
1087 | .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, |
1088 | .size = sizeof(vmsg.payload.area), |
1089 | .payload.area = { |
1090 | .u64 = qidx & VHOST_USER_VRING_IDX_MASK, |
1091 | .size = size, |
1092 | .offset = offset, |
1093 | }, |
1094 | }; |
1095 | |
1096 | if (fd == -1) { |
1097 | vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; |
1098 | } else { |
1099 | vmsg.fds[fd_num++] = fd; |
1100 | } |
1101 | |
1102 | vmsg.fd_num = fd_num; |
1103 | |
1104 | if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) { |
1105 | return false; |
1106 | } |
1107 | |
1108 | if (!vu_message_write(dev, dev->slave_fd, &vmsg)) { |
1109 | return false; |
1110 | } |
1111 | |
1112 | return vu_process_message_reply(dev, &vmsg); |
1113 | } |
1114 | |
1115 | static bool |
1116 | vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) |
1117 | { |
1118 | int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; |
1119 | |
1120 | DPRINT("u64: 0x%016" PRIx64"\n" , vmsg->payload.u64); |
1121 | |
1122 | if (!vu_check_queue_msg_file(dev, vmsg)) { |
1123 | return false; |
1124 | } |
1125 | |
1126 | if (dev->vq[index].call_fd != -1) { |
1127 | close(dev->vq[index].call_fd); |
1128 | dev->vq[index].call_fd = -1; |
1129 | } |
1130 | |
1131 | dev->vq[index].call_fd = vmsg->fds[0]; |
1132 | |
1133 | /* in case of I/O hang after reconnecting */ |
1134 | if (eventfd_write(vmsg->fds[0], 1)) { |
1135 | return -1; |
1136 | } |
1137 | |
1138 | DPRINT("Got call_fd: %d for vq: %d\n" , vmsg->fds[0], index); |
1139 | |
1140 | return false; |
1141 | } |
1142 | |
1143 | static bool |
1144 | vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) |
1145 | { |
1146 | int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; |
1147 | |
1148 | DPRINT("u64: 0x%016" PRIx64"\n" , vmsg->payload.u64); |
1149 | |
1150 | if (!vu_check_queue_msg_file(dev, vmsg)) { |
1151 | return false; |
1152 | } |
1153 | |
1154 | if (dev->vq[index].err_fd != -1) { |
1155 | close(dev->vq[index].err_fd); |
1156 | dev->vq[index].err_fd = -1; |
1157 | } |
1158 | |
1159 | dev->vq[index].err_fd = vmsg->fds[0]; |
1160 | |
1161 | return false; |
1162 | } |
1163 | |
1164 | static bool |
1165 | vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) |
1166 | { |
1167 | uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | |
1168 | 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | |
1169 | 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | |
1170 | 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | |
1171 | 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD; |
1172 | |
1173 | if (have_userfault()) { |
1174 | features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT; |
1175 | } |
1176 | |
1177 | if (dev->iface->get_config && dev->iface->set_config) { |
1178 | features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; |
1179 | } |
1180 | |
1181 | if (dev->iface->get_protocol_features) { |
1182 | features |= dev->iface->get_protocol_features(dev); |
1183 | } |
1184 | |
1185 | vmsg_set_reply_u64(vmsg, features); |
1186 | return true; |
1187 | } |
1188 | |
1189 | static bool |
1190 | vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) |
1191 | { |
1192 | uint64_t features = vmsg->payload.u64; |
1193 | |
1194 | DPRINT("u64: 0x%016" PRIx64"\n" , features); |
1195 | |
1196 | dev->protocol_features = vmsg->payload.u64; |
1197 | |
1198 | if (dev->iface->set_protocol_features) { |
1199 | dev->iface->set_protocol_features(dev, features); |
1200 | } |
1201 | |
1202 | return false; |
1203 | } |
1204 | |
1205 | static bool |
1206 | vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) |
1207 | { |
1208 | vmsg_set_reply_u64(vmsg, dev->max_queues); |
1209 | return true; |
1210 | } |
1211 | |
1212 | static bool |
1213 | vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) |
1214 | { |
1215 | unsigned int index = vmsg->payload.state.index; |
1216 | unsigned int enable = vmsg->payload.state.num; |
1217 | |
1218 | DPRINT("State.index: %d\n" , index); |
1219 | DPRINT("State.enable: %d\n" , enable); |
1220 | |
1221 | if (index >= dev->max_queues) { |
1222 | vu_panic(dev, "Invalid vring_enable index: %u" , index); |
1223 | return false; |
1224 | } |
1225 | |
1226 | dev->vq[index].enable = enable; |
1227 | return false; |
1228 | } |
1229 | |
1230 | static bool |
1231 | vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg) |
1232 | { |
1233 | if (vmsg->fd_num != 1) { |
1234 | vu_panic(dev, "Invalid slave_req_fd message (%d fd's)" , vmsg->fd_num); |
1235 | return false; |
1236 | } |
1237 | |
1238 | if (dev->slave_fd != -1) { |
1239 | close(dev->slave_fd); |
1240 | } |
1241 | dev->slave_fd = vmsg->fds[0]; |
1242 | DPRINT("Got slave_fd: %d\n" , vmsg->fds[0]); |
1243 | |
1244 | return false; |
1245 | } |
1246 | |
1247 | static bool |
1248 | vu_get_config(VuDev *dev, VhostUserMsg *vmsg) |
1249 | { |
1250 | int ret = -1; |
1251 | |
1252 | if (dev->iface->get_config) { |
1253 | ret = dev->iface->get_config(dev, vmsg->payload.config.region, |
1254 | vmsg->payload.config.size); |
1255 | } |
1256 | |
1257 | if (ret) { |
1258 | /* resize to zero to indicate an error to master */ |
1259 | vmsg->size = 0; |
1260 | } |
1261 | |
1262 | return true; |
1263 | } |
1264 | |
1265 | static bool |
1266 | vu_set_config(VuDev *dev, VhostUserMsg *vmsg) |
1267 | { |
1268 | int ret = -1; |
1269 | |
1270 | if (dev->iface->set_config) { |
1271 | ret = dev->iface->set_config(dev, vmsg->payload.config.region, |
1272 | vmsg->payload.config.offset, |
1273 | vmsg->payload.config.size, |
1274 | vmsg->payload.config.flags); |
1275 | if (ret) { |
1276 | vu_panic(dev, "Set virtio configuration space failed" ); |
1277 | } |
1278 | } |
1279 | |
1280 | return false; |
1281 | } |
1282 | |
1283 | static bool |
1284 | vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) |
1285 | { |
1286 | dev->postcopy_ufd = -1; |
1287 | #ifdef UFFDIO_API |
1288 | struct uffdio_api api_struct; |
1289 | |
1290 | dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); |
1291 | vmsg->size = 0; |
1292 | #endif |
1293 | |
1294 | if (dev->postcopy_ufd == -1) { |
1295 | vu_panic(dev, "Userfaultfd not available: %s" , strerror(errno)); |
1296 | goto out; |
1297 | } |
1298 | |
1299 | #ifdef UFFDIO_API |
1300 | api_struct.api = UFFD_API; |
1301 | api_struct.features = 0; |
1302 | if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { |
1303 | vu_panic(dev, "Failed UFFDIO_API: %s" , strerror(errno)); |
1304 | close(dev->postcopy_ufd); |
1305 | dev->postcopy_ufd = -1; |
1306 | goto out; |
1307 | } |
1308 | /* TODO: Stash feature flags somewhere */ |
1309 | #endif |
1310 | |
1311 | out: |
1312 | /* Return a ufd to the QEMU */ |
1313 | vmsg->fd_num = 1; |
1314 | vmsg->fds[0] = dev->postcopy_ufd; |
1315 | return true; /* = send a reply */ |
1316 | } |
1317 | |
1318 | static bool |
1319 | vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) |
1320 | { |
1321 | if (dev->nregions) { |
1322 | vu_panic(dev, "Regions already registered at postcopy-listen" ); |
1323 | vmsg_set_reply_u64(vmsg, -1); |
1324 | return true; |
1325 | } |
1326 | dev->postcopy_listening = true; |
1327 | |
1328 | vmsg_set_reply_u64(vmsg, 0); |
1329 | return true; |
1330 | } |
1331 | |
1332 | static bool |
1333 | vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) |
1334 | { |
1335 | DPRINT("%s: Entry\n" , __func__); |
1336 | dev->postcopy_listening = false; |
1337 | if (dev->postcopy_ufd > 0) { |
1338 | close(dev->postcopy_ufd); |
1339 | dev->postcopy_ufd = -1; |
1340 | DPRINT("%s: Done close\n" , __func__); |
1341 | } |
1342 | |
1343 | vmsg_set_reply_u64(vmsg, 0); |
1344 | DPRINT("%s: exit\n" , __func__); |
1345 | return true; |
1346 | } |
1347 | |
1348 | static inline uint64_t |
1349 | vu_inflight_queue_size(uint16_t queue_size) |
1350 | { |
1351 | return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size + |
1352 | sizeof(uint16_t), INFLIGHT_ALIGNMENT); |
1353 | } |
1354 | |
1355 | static bool |
1356 | vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) |
1357 | { |
1358 | int fd; |
1359 | void *addr; |
1360 | uint64_t mmap_size; |
1361 | uint16_t num_queues, queue_size; |
1362 | |
1363 | if (vmsg->size != sizeof(vmsg->payload.inflight)) { |
1364 | vu_panic(dev, "Invalid get_inflight_fd message:%d" , vmsg->size); |
1365 | vmsg->payload.inflight.mmap_size = 0; |
1366 | return true; |
1367 | } |
1368 | |
1369 | num_queues = vmsg->payload.inflight.num_queues; |
1370 | queue_size = vmsg->payload.inflight.queue_size; |
1371 | |
1372 | DPRINT("set_inflight_fd num_queues: %" PRId16"\n" , num_queues); |
1373 | DPRINT("set_inflight_fd queue_size: %" PRId16"\n" , queue_size); |
1374 | |
1375 | mmap_size = vu_inflight_queue_size(queue_size) * num_queues; |
1376 | |
1377 | addr = qemu_memfd_alloc("vhost-inflight" , mmap_size, |
1378 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, |
1379 | &fd, NULL); |
1380 | |
1381 | if (!addr) { |
1382 | vu_panic(dev, "Failed to alloc vhost inflight area" ); |
1383 | vmsg->payload.inflight.mmap_size = 0; |
1384 | return true; |
1385 | } |
1386 | |
1387 | memset(addr, 0, mmap_size); |
1388 | |
1389 | dev->inflight_info.addr = addr; |
1390 | dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; |
1391 | dev->inflight_info.fd = vmsg->fds[0] = fd; |
1392 | vmsg->fd_num = 1; |
1393 | vmsg->payload.inflight.mmap_offset = 0; |
1394 | |
1395 | DPRINT("send inflight mmap_size: %" PRId64"\n" , |
1396 | vmsg->payload.inflight.mmap_size); |
1397 | DPRINT("send inflight mmap offset: %" PRId64"\n" , |
1398 | vmsg->payload.inflight.mmap_offset); |
1399 | |
1400 | return true; |
1401 | } |
1402 | |
1403 | static bool |
1404 | vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) |
1405 | { |
1406 | int fd, i; |
1407 | uint64_t mmap_size, mmap_offset; |
1408 | uint16_t num_queues, queue_size; |
1409 | void *rc; |
1410 | |
1411 | if (vmsg->fd_num != 1 || |
1412 | vmsg->size != sizeof(vmsg->payload.inflight)) { |
1413 | vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d" , |
1414 | vmsg->size, vmsg->fd_num); |
1415 | return false; |
1416 | } |
1417 | |
1418 | fd = vmsg->fds[0]; |
1419 | mmap_size = vmsg->payload.inflight.mmap_size; |
1420 | mmap_offset = vmsg->payload.inflight.mmap_offset; |
1421 | num_queues = vmsg->payload.inflight.num_queues; |
1422 | queue_size = vmsg->payload.inflight.queue_size; |
1423 | |
1424 | DPRINT("set_inflight_fd mmap_size: %" PRId64"\n" , mmap_size); |
1425 | DPRINT("set_inflight_fd mmap_offset: %" PRId64"\n" , mmap_offset); |
1426 | DPRINT("set_inflight_fd num_queues: %" PRId16"\n" , num_queues); |
1427 | DPRINT("set_inflight_fd queue_size: %" PRId16"\n" , queue_size); |
1428 | |
1429 | rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, |
1430 | fd, mmap_offset); |
1431 | |
1432 | if (rc == MAP_FAILED) { |
1433 | vu_panic(dev, "set_inflight_fd mmap error: %s" , strerror(errno)); |
1434 | return false; |
1435 | } |
1436 | |
1437 | if (dev->inflight_info.fd) { |
1438 | close(dev->inflight_info.fd); |
1439 | } |
1440 | |
1441 | if (dev->inflight_info.addr) { |
1442 | munmap(dev->inflight_info.addr, dev->inflight_info.size); |
1443 | } |
1444 | |
1445 | dev->inflight_info.fd = fd; |
1446 | dev->inflight_info.addr = rc; |
1447 | dev->inflight_info.size = mmap_size; |
1448 | |
1449 | for (i = 0; i < num_queues; i++) { |
1450 | dev->vq[i].inflight = (VuVirtqInflight *)rc; |
1451 | dev->vq[i].inflight->desc_num = queue_size; |
1452 | rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size)); |
1453 | } |
1454 | |
1455 | return false; |
1456 | } |
1457 | |
1458 | static bool |
1459 | vu_process_message(VuDev *dev, VhostUserMsg *vmsg) |
1460 | { |
1461 | int do_reply = 0; |
1462 | |
1463 | /* Print out generic part of the request. */ |
1464 | DPRINT("================ Vhost user message ================\n" ); |
1465 | DPRINT("Request: %s (%d)\n" , vu_request_to_string(vmsg->request), |
1466 | vmsg->request); |
1467 | DPRINT("Flags: 0x%x\n" , vmsg->flags); |
1468 | DPRINT("Size: %d\n" , vmsg->size); |
1469 | |
1470 | if (vmsg->fd_num) { |
1471 | int i; |
1472 | DPRINT("Fds:" ); |
1473 | for (i = 0; i < vmsg->fd_num; i++) { |
1474 | DPRINT(" %d" , vmsg->fds[i]); |
1475 | } |
1476 | DPRINT("\n" ); |
1477 | } |
1478 | |
1479 | if (dev->iface->process_msg && |
1480 | dev->iface->process_msg(dev, vmsg, &do_reply)) { |
1481 | return do_reply; |
1482 | } |
1483 | |
1484 | switch (vmsg->request) { |
1485 | case VHOST_USER_GET_FEATURES: |
1486 | return vu_get_features_exec(dev, vmsg); |
1487 | case VHOST_USER_SET_FEATURES: |
1488 | return vu_set_features_exec(dev, vmsg); |
1489 | case VHOST_USER_GET_PROTOCOL_FEATURES: |
1490 | return vu_get_protocol_features_exec(dev, vmsg); |
1491 | case VHOST_USER_SET_PROTOCOL_FEATURES: |
1492 | return vu_set_protocol_features_exec(dev, vmsg); |
1493 | case VHOST_USER_SET_OWNER: |
1494 | return vu_set_owner_exec(dev, vmsg); |
1495 | case VHOST_USER_RESET_OWNER: |
1496 | return vu_reset_device_exec(dev, vmsg); |
1497 | case VHOST_USER_SET_MEM_TABLE: |
1498 | return vu_set_mem_table_exec(dev, vmsg); |
1499 | case VHOST_USER_SET_LOG_BASE: |
1500 | return vu_set_log_base_exec(dev, vmsg); |
1501 | case VHOST_USER_SET_LOG_FD: |
1502 | return vu_set_log_fd_exec(dev, vmsg); |
1503 | case VHOST_USER_SET_VRING_NUM: |
1504 | return vu_set_vring_num_exec(dev, vmsg); |
1505 | case VHOST_USER_SET_VRING_ADDR: |
1506 | return vu_set_vring_addr_exec(dev, vmsg); |
1507 | case VHOST_USER_SET_VRING_BASE: |
1508 | return vu_set_vring_base_exec(dev, vmsg); |
1509 | case VHOST_USER_GET_VRING_BASE: |
1510 | return vu_get_vring_base_exec(dev, vmsg); |
1511 | case VHOST_USER_SET_VRING_KICK: |
1512 | return vu_set_vring_kick_exec(dev, vmsg); |
1513 | case VHOST_USER_SET_VRING_CALL: |
1514 | return vu_set_vring_call_exec(dev, vmsg); |
1515 | case VHOST_USER_SET_VRING_ERR: |
1516 | return vu_set_vring_err_exec(dev, vmsg); |
1517 | case VHOST_USER_GET_QUEUE_NUM: |
1518 | return vu_get_queue_num_exec(dev, vmsg); |
1519 | case VHOST_USER_SET_VRING_ENABLE: |
1520 | return vu_set_vring_enable_exec(dev, vmsg); |
1521 | case VHOST_USER_SET_SLAVE_REQ_FD: |
1522 | return vu_set_slave_req_fd(dev, vmsg); |
1523 | case VHOST_USER_GET_CONFIG: |
1524 | return vu_get_config(dev, vmsg); |
1525 | case VHOST_USER_SET_CONFIG: |
1526 | return vu_set_config(dev, vmsg); |
1527 | case VHOST_USER_NONE: |
1528 | /* if you need processing before exit, override iface->process_msg */ |
1529 | exit(0); |
1530 | case VHOST_USER_POSTCOPY_ADVISE: |
1531 | return vu_set_postcopy_advise(dev, vmsg); |
1532 | case VHOST_USER_POSTCOPY_LISTEN: |
1533 | return vu_set_postcopy_listen(dev, vmsg); |
1534 | case VHOST_USER_POSTCOPY_END: |
1535 | return vu_set_postcopy_end(dev, vmsg); |
1536 | case VHOST_USER_GET_INFLIGHT_FD: |
1537 | return vu_get_inflight_fd(dev, vmsg); |
1538 | case VHOST_USER_SET_INFLIGHT_FD: |
1539 | return vu_set_inflight_fd(dev, vmsg); |
1540 | default: |
1541 | vmsg_close_fds(vmsg); |
1542 | vu_panic(dev, "Unhandled request: %d" , vmsg->request); |
1543 | } |
1544 | |
1545 | return false; |
1546 | } |
1547 | |
1548 | bool |
1549 | vu_dispatch(VuDev *dev) |
1550 | { |
1551 | VhostUserMsg vmsg = { 0, }; |
1552 | int reply_requested; |
1553 | bool success = false; |
1554 | |
1555 | if (!vu_message_read(dev, dev->sock, &vmsg)) { |
1556 | goto end; |
1557 | } |
1558 | |
1559 | reply_requested = vu_process_message(dev, &vmsg); |
1560 | if (!reply_requested) { |
1561 | success = true; |
1562 | goto end; |
1563 | } |
1564 | |
1565 | if (!vu_send_reply(dev, dev->sock, &vmsg)) { |
1566 | goto end; |
1567 | } |
1568 | |
1569 | success = true; |
1570 | |
1571 | end: |
1572 | free(vmsg.data); |
1573 | return success; |
1574 | } |
1575 | |
1576 | void |
1577 | vu_deinit(VuDev *dev) |
1578 | { |
1579 | int i; |
1580 | |
1581 | for (i = 0; i < dev->nregions; i++) { |
1582 | VuDevRegion *r = &dev->regions[i]; |
1583 | void *m = (void *) (uintptr_t) r->mmap_addr; |
1584 | if (m != MAP_FAILED) { |
1585 | munmap(m, r->size + r->mmap_offset); |
1586 | } |
1587 | } |
1588 | dev->nregions = 0; |
1589 | |
1590 | for (i = 0; i < dev->max_queues; i++) { |
1591 | VuVirtq *vq = &dev->vq[i]; |
1592 | |
1593 | if (vq->call_fd != -1) { |
1594 | close(vq->call_fd); |
1595 | vq->call_fd = -1; |
1596 | } |
1597 | |
1598 | if (vq->kick_fd != -1) { |
1599 | close(vq->kick_fd); |
1600 | vq->kick_fd = -1; |
1601 | } |
1602 | |
1603 | if (vq->err_fd != -1) { |
1604 | close(vq->err_fd); |
1605 | vq->err_fd = -1; |
1606 | } |
1607 | |
1608 | if (vq->resubmit_list) { |
1609 | free(vq->resubmit_list); |
1610 | vq->resubmit_list = NULL; |
1611 | } |
1612 | |
1613 | vq->inflight = NULL; |
1614 | } |
1615 | |
1616 | if (dev->inflight_info.addr) { |
1617 | munmap(dev->inflight_info.addr, dev->inflight_info.size); |
1618 | dev->inflight_info.addr = NULL; |
1619 | } |
1620 | |
1621 | if (dev->inflight_info.fd > 0) { |
1622 | close(dev->inflight_info.fd); |
1623 | dev->inflight_info.fd = -1; |
1624 | } |
1625 | |
1626 | vu_close_log(dev); |
1627 | if (dev->slave_fd != -1) { |
1628 | close(dev->slave_fd); |
1629 | dev->slave_fd = -1; |
1630 | } |
1631 | |
1632 | if (dev->sock != -1) { |
1633 | close(dev->sock); |
1634 | } |
1635 | |
1636 | free(dev->vq); |
1637 | dev->vq = NULL; |
1638 | } |
1639 | |
1640 | bool |
1641 | vu_init(VuDev *dev, |
1642 | uint16_t max_queues, |
1643 | int socket, |
1644 | vu_panic_cb panic, |
1645 | vu_set_watch_cb set_watch, |
1646 | vu_remove_watch_cb remove_watch, |
1647 | const VuDevIface *iface) |
1648 | { |
1649 | uint16_t i; |
1650 | |
1651 | assert(max_queues > 0); |
1652 | assert(socket >= 0); |
1653 | assert(set_watch); |
1654 | assert(remove_watch); |
1655 | assert(iface); |
1656 | assert(panic); |
1657 | |
1658 | memset(dev, 0, sizeof(*dev)); |
1659 | |
1660 | dev->sock = socket; |
1661 | dev->panic = panic; |
1662 | dev->set_watch = set_watch; |
1663 | dev->remove_watch = remove_watch; |
1664 | dev->iface = iface; |
1665 | dev->log_call_fd = -1; |
1666 | dev->slave_fd = -1; |
1667 | dev->max_queues = max_queues; |
1668 | |
1669 | dev->vq = malloc(max_queues * sizeof(dev->vq[0])); |
1670 | if (!dev->vq) { |
1671 | DPRINT("%s: failed to malloc virtqueues\n" , __func__); |
1672 | return false; |
1673 | } |
1674 | |
1675 | for (i = 0; i < max_queues; i++) { |
1676 | dev->vq[i] = (VuVirtq) { |
1677 | .call_fd = -1, .kick_fd = -1, .err_fd = -1, |
1678 | .notification = true, |
1679 | }; |
1680 | } |
1681 | |
1682 | return true; |
1683 | } |
1684 | |
1685 | VuVirtq * |
1686 | vu_get_queue(VuDev *dev, int qidx) |
1687 | { |
1688 | assert(qidx < dev->max_queues); |
1689 | return &dev->vq[qidx]; |
1690 | } |
1691 | |
1692 | bool |
1693 | vu_queue_enabled(VuDev *dev, VuVirtq *vq) |
1694 | { |
1695 | return vq->enable; |
1696 | } |
1697 | |
1698 | bool |
1699 | vu_queue_started(const VuDev *dev, const VuVirtq *vq) |
1700 | { |
1701 | return vq->started; |
1702 | } |
1703 | |
1704 | static inline uint16_t |
1705 | vring_avail_flags(VuVirtq *vq) |
1706 | { |
1707 | return vq->vring.avail->flags; |
1708 | } |
1709 | |
1710 | static inline uint16_t |
1711 | vring_avail_idx(VuVirtq *vq) |
1712 | { |
1713 | vq->shadow_avail_idx = vq->vring.avail->idx; |
1714 | |
1715 | return vq->shadow_avail_idx; |
1716 | } |
1717 | |
1718 | static inline uint16_t |
1719 | vring_avail_ring(VuVirtq *vq, int i) |
1720 | { |
1721 | return vq->vring.avail->ring[i]; |
1722 | } |
1723 | |
1724 | static inline uint16_t |
1725 | vring_get_used_event(VuVirtq *vq) |
1726 | { |
1727 | return vring_avail_ring(vq, vq->vring.num); |
1728 | } |
1729 | |
1730 | static int |
1731 | virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) |
1732 | { |
1733 | uint16_t num_heads = vring_avail_idx(vq) - idx; |
1734 | |
1735 | /* Check it isn't doing very strange things with descriptor numbers. */ |
1736 | if (num_heads > vq->vring.num) { |
1737 | vu_panic(dev, "Guest moved used index from %u to %u" , |
1738 | idx, vq->shadow_avail_idx); |
1739 | return -1; |
1740 | } |
1741 | if (num_heads) { |
1742 | /* On success, callers read a descriptor at vq->last_avail_idx. |
1743 | * Make sure descriptor read does not bypass avail index read. */ |
1744 | smp_rmb(); |
1745 | } |
1746 | |
1747 | return num_heads; |
1748 | } |
1749 | |
1750 | static bool |
1751 | virtqueue_get_head(VuDev *dev, VuVirtq *vq, |
1752 | unsigned int idx, unsigned int *head) |
1753 | { |
1754 | /* Grab the next descriptor number they're advertising, and increment |
1755 | * the index we've seen. */ |
1756 | *head = vring_avail_ring(vq, idx % vq->vring.num); |
1757 | |
1758 | /* If their number is silly, that's a fatal mistake. */ |
1759 | if (*head >= vq->vring.num) { |
1760 | vu_panic(dev, "Guest says index %u is available" , head); |
1761 | return false; |
1762 | } |
1763 | |
1764 | return true; |
1765 | } |
1766 | |
1767 | static int |
1768 | virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, |
1769 | uint64_t addr, size_t len) |
1770 | { |
1771 | struct vring_desc *ori_desc; |
1772 | uint64_t read_len; |
1773 | |
1774 | if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { |
1775 | return -1; |
1776 | } |
1777 | |
1778 | if (len == 0) { |
1779 | return -1; |
1780 | } |
1781 | |
1782 | while (len) { |
1783 | read_len = len; |
1784 | ori_desc = vu_gpa_to_va(dev, &read_len, addr); |
1785 | if (!ori_desc) { |
1786 | return -1; |
1787 | } |
1788 | |
1789 | memcpy(desc, ori_desc, read_len); |
1790 | len -= read_len; |
1791 | addr += read_len; |
1792 | desc += read_len; |
1793 | } |
1794 | |
1795 | return 0; |
1796 | } |
1797 | |
1798 | enum { |
1799 | VIRTQUEUE_READ_DESC_ERROR = -1, |
1800 | VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ |
1801 | VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ |
1802 | }; |
1803 | |
1804 | static int |
1805 | virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, |
1806 | int i, unsigned int max, unsigned int *next) |
1807 | { |
1808 | /* If this descriptor says it doesn't chain, we're done. */ |
1809 | if (!(desc[i].flags & VRING_DESC_F_NEXT)) { |
1810 | return VIRTQUEUE_READ_DESC_DONE; |
1811 | } |
1812 | |
1813 | /* Check they're not leading us off end of descriptors. */ |
1814 | *next = desc[i].next; |
1815 | /* Make sure compiler knows to grab that: we don't want it changing! */ |
1816 | smp_wmb(); |
1817 | |
1818 | if (*next >= max) { |
1819 | vu_panic(dev, "Desc next is %u" , next); |
1820 | return VIRTQUEUE_READ_DESC_ERROR; |
1821 | } |
1822 | |
1823 | return VIRTQUEUE_READ_DESC_MORE; |
1824 | } |
1825 | |
1826 | void |
1827 | vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, |
1828 | unsigned int *out_bytes, |
1829 | unsigned max_in_bytes, unsigned max_out_bytes) |
1830 | { |
1831 | unsigned int idx; |
1832 | unsigned int total_bufs, in_total, out_total; |
1833 | int rc; |
1834 | |
1835 | idx = vq->last_avail_idx; |
1836 | |
1837 | total_bufs = in_total = out_total = 0; |
1838 | if (unlikely(dev->broken) || |
1839 | unlikely(!vq->vring.avail)) { |
1840 | goto done; |
1841 | } |
1842 | |
1843 | while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { |
1844 | unsigned int max, desc_len, num_bufs, indirect = 0; |
1845 | uint64_t desc_addr, read_len; |
1846 | struct vring_desc *desc; |
1847 | struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; |
1848 | unsigned int i; |
1849 | |
1850 | max = vq->vring.num; |
1851 | num_bufs = total_bufs; |
1852 | if (!virtqueue_get_head(dev, vq, idx++, &i)) { |
1853 | goto err; |
1854 | } |
1855 | desc = vq->vring.desc; |
1856 | |
1857 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { |
1858 | if (desc[i].len % sizeof(struct vring_desc)) { |
1859 | vu_panic(dev, "Invalid size for indirect buffer table" ); |
1860 | goto err; |
1861 | } |
1862 | |
1863 | /* If we've got too many, that implies a descriptor loop. */ |
1864 | if (num_bufs >= max) { |
1865 | vu_panic(dev, "Looped descriptor" ); |
1866 | goto err; |
1867 | } |
1868 | |
1869 | /* loop over the indirect descriptor table */ |
1870 | indirect = 1; |
1871 | desc_addr = desc[i].addr; |
1872 | desc_len = desc[i].len; |
1873 | max = desc_len / sizeof(struct vring_desc); |
1874 | read_len = desc_len; |
1875 | desc = vu_gpa_to_va(dev, &read_len, desc_addr); |
1876 | if (unlikely(desc && read_len != desc_len)) { |
1877 | /* Failed to use zero copy */ |
1878 | desc = NULL; |
1879 | if (!virtqueue_read_indirect_desc(dev, desc_buf, |
1880 | desc_addr, |
1881 | desc_len)) { |
1882 | desc = desc_buf; |
1883 | } |
1884 | } |
1885 | if (!desc) { |
1886 | vu_panic(dev, "Invalid indirect buffer table" ); |
1887 | goto err; |
1888 | } |
1889 | num_bufs = i = 0; |
1890 | } |
1891 | |
1892 | do { |
1893 | /* If we've got too many, that implies a descriptor loop. */ |
1894 | if (++num_bufs > max) { |
1895 | vu_panic(dev, "Looped descriptor" ); |
1896 | goto err; |
1897 | } |
1898 | |
1899 | if (desc[i].flags & VRING_DESC_F_WRITE) { |
1900 | in_total += desc[i].len; |
1901 | } else { |
1902 | out_total += desc[i].len; |
1903 | } |
1904 | if (in_total >= max_in_bytes && out_total >= max_out_bytes) { |
1905 | goto done; |
1906 | } |
1907 | rc = virtqueue_read_next_desc(dev, desc, i, max, &i); |
1908 | } while (rc == VIRTQUEUE_READ_DESC_MORE); |
1909 | |
1910 | if (rc == VIRTQUEUE_READ_DESC_ERROR) { |
1911 | goto err; |
1912 | } |
1913 | |
1914 | if (!indirect) { |
1915 | total_bufs = num_bufs; |
1916 | } else { |
1917 | total_bufs++; |
1918 | } |
1919 | } |
1920 | if (rc < 0) { |
1921 | goto err; |
1922 | } |
1923 | done: |
1924 | if (in_bytes) { |
1925 | *in_bytes = in_total; |
1926 | } |
1927 | if (out_bytes) { |
1928 | *out_bytes = out_total; |
1929 | } |
1930 | return; |
1931 | |
1932 | err: |
1933 | in_total = out_total = 0; |
1934 | goto done; |
1935 | } |
1936 | |
1937 | bool |
1938 | vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, |
1939 | unsigned int out_bytes) |
1940 | { |
1941 | unsigned int in_total, out_total; |
1942 | |
1943 | vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, |
1944 | in_bytes, out_bytes); |
1945 | |
1946 | return in_bytes <= in_total && out_bytes <= out_total; |
1947 | } |
1948 | |
1949 | /* Fetch avail_idx from VQ memory only when we really need to know if |
1950 | * guest has added some buffers. */ |
1951 | bool |
1952 | vu_queue_empty(VuDev *dev, VuVirtq *vq) |
1953 | { |
1954 | if (unlikely(dev->broken) || |
1955 | unlikely(!vq->vring.avail)) { |
1956 | return true; |
1957 | } |
1958 | |
1959 | if (vq->shadow_avail_idx != vq->last_avail_idx) { |
1960 | return false; |
1961 | } |
1962 | |
1963 | return vring_avail_idx(vq) == vq->last_avail_idx; |
1964 | } |
1965 | |
1966 | static bool |
1967 | vring_notify(VuDev *dev, VuVirtq *vq) |
1968 | { |
1969 | uint16_t old, new; |
1970 | bool v; |
1971 | |
1972 | /* We need to expose used array entries before checking used event. */ |
1973 | smp_mb(); |
1974 | |
1975 | /* Always notify when queue is empty (when feature acknowledge) */ |
1976 | if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && |
1977 | !vq->inuse && vu_queue_empty(dev, vq)) { |
1978 | return true; |
1979 | } |
1980 | |
1981 | if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { |
1982 | return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); |
1983 | } |
1984 | |
1985 | v = vq->signalled_used_valid; |
1986 | vq->signalled_used_valid = true; |
1987 | old = vq->signalled_used; |
1988 | new = vq->signalled_used = vq->used_idx; |
1989 | return !v || vring_need_event(vring_get_used_event(vq), new, old); |
1990 | } |
1991 | |
1992 | void |
1993 | vu_queue_notify(VuDev *dev, VuVirtq *vq) |
1994 | { |
1995 | if (unlikely(dev->broken) || |
1996 | unlikely(!vq->vring.avail)) { |
1997 | return; |
1998 | } |
1999 | |
2000 | if (!vring_notify(dev, vq)) { |
2001 | DPRINT("skipped notify...\n" ); |
2002 | return; |
2003 | } |
2004 | |
2005 | if (eventfd_write(vq->call_fd, 1) < 0) { |
2006 | vu_panic(dev, "Error writing eventfd: %s" , strerror(errno)); |
2007 | } |
2008 | } |
2009 | |
2010 | static inline void |
2011 | vring_used_flags_set_bit(VuVirtq *vq, int mask) |
2012 | { |
2013 | uint16_t *flags; |
2014 | |
2015 | flags = (uint16_t *)((char*)vq->vring.used + |
2016 | offsetof(struct vring_used, flags)); |
2017 | *flags |= mask; |
2018 | } |
2019 | |
2020 | static inline void |
2021 | vring_used_flags_unset_bit(VuVirtq *vq, int mask) |
2022 | { |
2023 | uint16_t *flags; |
2024 | |
2025 | flags = (uint16_t *)((char*)vq->vring.used + |
2026 | offsetof(struct vring_used, flags)); |
2027 | *flags &= ~mask; |
2028 | } |
2029 | |
2030 | static inline void |
2031 | vring_set_avail_event(VuVirtq *vq, uint16_t val) |
2032 | { |
2033 | if (!vq->notification) { |
2034 | return; |
2035 | } |
2036 | |
2037 | *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val; |
2038 | } |
2039 | |
2040 | void |
2041 | vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) |
2042 | { |
2043 | vq->notification = enable; |
2044 | if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { |
2045 | vring_set_avail_event(vq, vring_avail_idx(vq)); |
2046 | } else if (enable) { |
2047 | vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); |
2048 | } else { |
2049 | vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); |
2050 | } |
2051 | if (enable) { |
2052 | /* Expose avail event/used flags before caller checks the avail idx. */ |
2053 | smp_mb(); |
2054 | } |
2055 | } |
2056 | |
2057 | static void |
2058 | virtqueue_map_desc(VuDev *dev, |
2059 | unsigned int *p_num_sg, struct iovec *iov, |
2060 | unsigned int max_num_sg, bool is_write, |
2061 | uint64_t pa, size_t sz) |
2062 | { |
2063 | unsigned num_sg = *p_num_sg; |
2064 | |
2065 | assert(num_sg <= max_num_sg); |
2066 | |
2067 | if (!sz) { |
2068 | vu_panic(dev, "virtio: zero sized buffers are not allowed" ); |
2069 | return; |
2070 | } |
2071 | |
2072 | while (sz) { |
2073 | uint64_t len = sz; |
2074 | |
2075 | if (num_sg == max_num_sg) { |
2076 | vu_panic(dev, "virtio: too many descriptors in indirect table" ); |
2077 | return; |
2078 | } |
2079 | |
2080 | iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); |
2081 | if (iov[num_sg].iov_base == NULL) { |
2082 | vu_panic(dev, "virtio: invalid address for buffers" ); |
2083 | return; |
2084 | } |
2085 | iov[num_sg].iov_len = len; |
2086 | num_sg++; |
2087 | sz -= len; |
2088 | pa += len; |
2089 | } |
2090 | |
2091 | *p_num_sg = num_sg; |
2092 | } |
2093 | |
2094 | static void * |
2095 | virtqueue_alloc_element(size_t sz, |
2096 | unsigned out_num, unsigned in_num) |
2097 | { |
2098 | VuVirtqElement *elem; |
2099 | size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); |
2100 | size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); |
2101 | size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); |
2102 | |
2103 | assert(sz >= sizeof(VuVirtqElement)); |
2104 | elem = malloc(out_sg_end); |
2105 | elem->out_num = out_num; |
2106 | elem->in_num = in_num; |
2107 | elem->in_sg = (void *)elem + in_sg_ofs; |
2108 | elem->out_sg = (void *)elem + out_sg_ofs; |
2109 | return elem; |
2110 | } |
2111 | |
2112 | static void * |
2113 | vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) |
2114 | { |
2115 | struct vring_desc *desc = vq->vring.desc; |
2116 | uint64_t desc_addr, read_len; |
2117 | unsigned int desc_len; |
2118 | unsigned int max = vq->vring.num; |
2119 | unsigned int i = idx; |
2120 | VuVirtqElement *elem; |
2121 | unsigned int out_num = 0, in_num = 0; |
2122 | struct iovec iov[VIRTQUEUE_MAX_SIZE]; |
2123 | struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; |
2124 | int rc; |
2125 | |
2126 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { |
2127 | if (desc[i].len % sizeof(struct vring_desc)) { |
2128 | vu_panic(dev, "Invalid size for indirect buffer table" ); |
2129 | } |
2130 | |
2131 | /* loop over the indirect descriptor table */ |
2132 | desc_addr = desc[i].addr; |
2133 | desc_len = desc[i].len; |
2134 | max = desc_len / sizeof(struct vring_desc); |
2135 | read_len = desc_len; |
2136 | desc = vu_gpa_to_va(dev, &read_len, desc_addr); |
2137 | if (unlikely(desc && read_len != desc_len)) { |
2138 | /* Failed to use zero copy */ |
2139 | desc = NULL; |
2140 | if (!virtqueue_read_indirect_desc(dev, desc_buf, |
2141 | desc_addr, |
2142 | desc_len)) { |
2143 | desc = desc_buf; |
2144 | } |
2145 | } |
2146 | if (!desc) { |
2147 | vu_panic(dev, "Invalid indirect buffer table" ); |
2148 | return NULL; |
2149 | } |
2150 | i = 0; |
2151 | } |
2152 | |
2153 | /* Collect all the descriptors */ |
2154 | do { |
2155 | if (desc[i].flags & VRING_DESC_F_WRITE) { |
2156 | virtqueue_map_desc(dev, &in_num, iov + out_num, |
2157 | VIRTQUEUE_MAX_SIZE - out_num, true, |
2158 | desc[i].addr, desc[i].len); |
2159 | } else { |
2160 | if (in_num) { |
2161 | vu_panic(dev, "Incorrect order for descriptors" ); |
2162 | return NULL; |
2163 | } |
2164 | virtqueue_map_desc(dev, &out_num, iov, |
2165 | VIRTQUEUE_MAX_SIZE, false, |
2166 | desc[i].addr, desc[i].len); |
2167 | } |
2168 | |
2169 | /* If we've got too many, that implies a descriptor loop. */ |
2170 | if ((in_num + out_num) > max) { |
2171 | vu_panic(dev, "Looped descriptor" ); |
2172 | } |
2173 | rc = virtqueue_read_next_desc(dev, desc, i, max, &i); |
2174 | } while (rc == VIRTQUEUE_READ_DESC_MORE); |
2175 | |
2176 | if (rc == VIRTQUEUE_READ_DESC_ERROR) { |
2177 | vu_panic(dev, "read descriptor error" ); |
2178 | return NULL; |
2179 | } |
2180 | |
2181 | /* Now copy what we have collected and mapped */ |
2182 | elem = virtqueue_alloc_element(sz, out_num, in_num); |
2183 | elem->index = idx; |
2184 | for (i = 0; i < out_num; i++) { |
2185 | elem->out_sg[i] = iov[i]; |
2186 | } |
2187 | for (i = 0; i < in_num; i++) { |
2188 | elem->in_sg[i] = iov[out_num + i]; |
2189 | } |
2190 | |
2191 | return elem; |
2192 | } |
2193 | |
2194 | static int |
2195 | vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) |
2196 | { |
2197 | if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { |
2198 | return 0; |
2199 | } |
2200 | |
2201 | if (unlikely(!vq->inflight)) { |
2202 | return -1; |
2203 | } |
2204 | |
2205 | vq->inflight->desc[desc_idx].counter = vq->counter++; |
2206 | vq->inflight->desc[desc_idx].inflight = 1; |
2207 | |
2208 | return 0; |
2209 | } |
2210 | |
2211 | static int |
2212 | vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) |
2213 | { |
2214 | if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { |
2215 | return 0; |
2216 | } |
2217 | |
2218 | if (unlikely(!vq->inflight)) { |
2219 | return -1; |
2220 | } |
2221 | |
2222 | vq->inflight->last_batch_head = desc_idx; |
2223 | |
2224 | return 0; |
2225 | } |
2226 | |
2227 | static int |
2228 | vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) |
2229 | { |
2230 | if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { |
2231 | return 0; |
2232 | } |
2233 | |
2234 | if (unlikely(!vq->inflight)) { |
2235 | return -1; |
2236 | } |
2237 | |
2238 | barrier(); |
2239 | |
2240 | vq->inflight->desc[desc_idx].inflight = 0; |
2241 | |
2242 | barrier(); |
2243 | |
2244 | vq->inflight->used_idx = vq->used_idx; |
2245 | |
2246 | return 0; |
2247 | } |
2248 | |
2249 | void * |
2250 | vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) |
2251 | { |
2252 | int i; |
2253 | unsigned int head; |
2254 | VuVirtqElement *elem; |
2255 | |
2256 | if (unlikely(dev->broken) || |
2257 | unlikely(!vq->vring.avail)) { |
2258 | return NULL; |
2259 | } |
2260 | |
2261 | if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { |
2262 | i = (--vq->resubmit_num); |
2263 | elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); |
2264 | |
2265 | if (!vq->resubmit_num) { |
2266 | free(vq->resubmit_list); |
2267 | vq->resubmit_list = NULL; |
2268 | } |
2269 | |
2270 | return elem; |
2271 | } |
2272 | |
2273 | if (vu_queue_empty(dev, vq)) { |
2274 | return NULL; |
2275 | } |
2276 | /* |
2277 | * Needed after virtio_queue_empty(), see comment in |
2278 | * virtqueue_num_heads(). |
2279 | */ |
2280 | smp_rmb(); |
2281 | |
2282 | if (vq->inuse >= vq->vring.num) { |
2283 | vu_panic(dev, "Virtqueue size exceeded" ); |
2284 | return NULL; |
2285 | } |
2286 | |
2287 | if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { |
2288 | return NULL; |
2289 | } |
2290 | |
2291 | if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { |
2292 | vring_set_avail_event(vq, vq->last_avail_idx); |
2293 | } |
2294 | |
2295 | elem = vu_queue_map_desc(dev, vq, head, sz); |
2296 | |
2297 | if (!elem) { |
2298 | return NULL; |
2299 | } |
2300 | |
2301 | vq->inuse++; |
2302 | |
2303 | vu_queue_inflight_get(dev, vq, head); |
2304 | |
2305 | return elem; |
2306 | } |
2307 | |
2308 | static void |
2309 | vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, |
2310 | size_t len) |
2311 | { |
2312 | vq->inuse--; |
2313 | /* unmap, when DMA support is added */ |
2314 | } |
2315 | |
2316 | void |
2317 | vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, |
2318 | size_t len) |
2319 | { |
2320 | vq->last_avail_idx--; |
2321 | vu_queue_detach_element(dev, vq, elem, len); |
2322 | } |
2323 | |
2324 | bool |
2325 | vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) |
2326 | { |
2327 | if (num > vq->inuse) { |
2328 | return false; |
2329 | } |
2330 | vq->last_avail_idx -= num; |
2331 | vq->inuse -= num; |
2332 | return true; |
2333 | } |
2334 | |
2335 | static inline |
2336 | void vring_used_write(VuDev *dev, VuVirtq *vq, |
2337 | struct vring_used_elem *uelem, int i) |
2338 | { |
2339 | struct vring_used *used = vq->vring.used; |
2340 | |
2341 | used->ring[i] = *uelem; |
2342 | vu_log_write(dev, vq->vring.log_guest_addr + |
2343 | offsetof(struct vring_used, ring[i]), |
2344 | sizeof(used->ring[i])); |
2345 | } |
2346 | |
2347 | |
2348 | static void |
2349 | vu_log_queue_fill(VuDev *dev, VuVirtq *vq, |
2350 | const VuVirtqElement *elem, |
2351 | unsigned int len) |
2352 | { |
2353 | struct vring_desc *desc = vq->vring.desc; |
2354 | unsigned int i, max, min, desc_len; |
2355 | uint64_t desc_addr, read_len; |
2356 | struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; |
2357 | unsigned num_bufs = 0; |
2358 | |
2359 | max = vq->vring.num; |
2360 | i = elem->index; |
2361 | |
2362 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { |
2363 | if (desc[i].len % sizeof(struct vring_desc)) { |
2364 | vu_panic(dev, "Invalid size for indirect buffer table" ); |
2365 | } |
2366 | |
2367 | /* loop over the indirect descriptor table */ |
2368 | desc_addr = desc[i].addr; |
2369 | desc_len = desc[i].len; |
2370 | max = desc_len / sizeof(struct vring_desc); |
2371 | read_len = desc_len; |
2372 | desc = vu_gpa_to_va(dev, &read_len, desc_addr); |
2373 | if (unlikely(desc && read_len != desc_len)) { |
2374 | /* Failed to use zero copy */ |
2375 | desc = NULL; |
2376 | if (!virtqueue_read_indirect_desc(dev, desc_buf, |
2377 | desc_addr, |
2378 | desc_len)) { |
2379 | desc = desc_buf; |
2380 | } |
2381 | } |
2382 | if (!desc) { |
2383 | vu_panic(dev, "Invalid indirect buffer table" ); |
2384 | return; |
2385 | } |
2386 | i = 0; |
2387 | } |
2388 | |
2389 | do { |
2390 | if (++num_bufs > max) { |
2391 | vu_panic(dev, "Looped descriptor" ); |
2392 | return; |
2393 | } |
2394 | |
2395 | if (desc[i].flags & VRING_DESC_F_WRITE) { |
2396 | min = MIN(desc[i].len, len); |
2397 | vu_log_write(dev, desc[i].addr, min); |
2398 | len -= min; |
2399 | } |
2400 | |
2401 | } while (len > 0 && |
2402 | (virtqueue_read_next_desc(dev, desc, i, max, &i) |
2403 | == VIRTQUEUE_READ_DESC_MORE)); |
2404 | } |
2405 | |
2406 | void |
2407 | vu_queue_fill(VuDev *dev, VuVirtq *vq, |
2408 | const VuVirtqElement *elem, |
2409 | unsigned int len, unsigned int idx) |
2410 | { |
2411 | struct vring_used_elem uelem; |
2412 | |
2413 | if (unlikely(dev->broken) || |
2414 | unlikely(!vq->vring.avail)) { |
2415 | return; |
2416 | } |
2417 | |
2418 | vu_log_queue_fill(dev, vq, elem, len); |
2419 | |
2420 | idx = (idx + vq->used_idx) % vq->vring.num; |
2421 | |
2422 | uelem.id = elem->index; |
2423 | uelem.len = len; |
2424 | vring_used_write(dev, vq, &uelem, idx); |
2425 | } |
2426 | |
2427 | static inline |
2428 | void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) |
2429 | { |
2430 | vq->vring.used->idx = val; |
2431 | vu_log_write(dev, |
2432 | vq->vring.log_guest_addr + offsetof(struct vring_used, idx), |
2433 | sizeof(vq->vring.used->idx)); |
2434 | |
2435 | vq->used_idx = val; |
2436 | } |
2437 | |
2438 | void |
2439 | vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) |
2440 | { |
2441 | uint16_t old, new; |
2442 | |
2443 | if (unlikely(dev->broken) || |
2444 | unlikely(!vq->vring.avail)) { |
2445 | return; |
2446 | } |
2447 | |
2448 | /* Make sure buffer is written before we update index. */ |
2449 | smp_wmb(); |
2450 | |
2451 | old = vq->used_idx; |
2452 | new = old + count; |
2453 | vring_used_idx_set(dev, vq, new); |
2454 | vq->inuse -= count; |
2455 | if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { |
2456 | vq->signalled_used_valid = false; |
2457 | } |
2458 | } |
2459 | |
2460 | void |
2461 | vu_queue_push(VuDev *dev, VuVirtq *vq, |
2462 | const VuVirtqElement *elem, unsigned int len) |
2463 | { |
2464 | vu_queue_fill(dev, vq, elem, len, 0); |
2465 | vu_queue_inflight_pre_put(dev, vq, elem->index); |
2466 | vu_queue_flush(dev, vq, 1); |
2467 | vu_queue_inflight_post_put(dev, vq, elem->index); |
2468 | } |
2469 | |