1 | /* |
2 | * generic functions used by VFIO devices |
3 | * |
4 | * Copyright Red Hat, Inc. 2012 |
5 | * |
6 | * Authors: |
7 | * Alex Williamson <alex.williamson@redhat.com> |
8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
10 | * the COPYING file in the top-level directory. |
11 | * |
12 | * Based on qemu-kvm device-assignment: |
13 | * Adapted for KVM by Qumranet. |
14 | * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) |
15 | * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) |
16 | * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) |
17 | * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) |
19 | */ |
20 | |
21 | #include "qemu/osdep.h" |
22 | #include <sys/ioctl.h> |
23 | #ifdef CONFIG_KVM |
24 | #include <linux/kvm.h> |
25 | #endif |
26 | #include <linux/vfio.h> |
27 | |
28 | #include "hw/vfio/vfio-common.h" |
29 | #include "hw/vfio/vfio.h" |
30 | #include "exec/address-spaces.h" |
31 | #include "exec/memory.h" |
32 | #include "hw/hw.h" |
33 | #include "qemu/error-report.h" |
34 | #include "qemu/main-loop.h" |
35 | #include "qemu/range.h" |
36 | #include "sysemu/balloon.h" |
37 | #include "sysemu/kvm.h" |
38 | #include "sysemu/reset.h" |
39 | #include "trace.h" |
40 | #include "qapi/error.h" |
41 | |
42 | VFIOGroupList vfio_group_list = |
43 | QLIST_HEAD_INITIALIZER(vfio_group_list); |
44 | static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = |
45 | QLIST_HEAD_INITIALIZER(vfio_address_spaces); |
46 | |
47 | #ifdef CONFIG_KVM |
48 | /* |
49 | * We have a single VFIO pseudo device per KVM VM. Once created it lives |
50 | * for the life of the VM. Closing the file descriptor only drops our |
51 | * reference to it and the device's reference to kvm. Therefore once |
52 | * initialized, this file descriptor is only released on QEMU exit and |
53 | * we'll re-use it should another vfio device be attached before then. |
54 | */ |
55 | static int vfio_kvm_device_fd = -1; |
56 | #endif |
57 | |
58 | /* |
59 | * Common VFIO interrupt disable |
60 | */ |
61 | void vfio_disable_irqindex(VFIODevice *vbasedev, int index) |
62 | { |
63 | struct vfio_irq_set irq_set = { |
64 | .argsz = sizeof(irq_set), |
65 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, |
66 | .index = index, |
67 | .start = 0, |
68 | .count = 0, |
69 | }; |
70 | |
71 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
72 | } |
73 | |
74 | void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) |
75 | { |
76 | struct vfio_irq_set irq_set = { |
77 | .argsz = sizeof(irq_set), |
78 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, |
79 | .index = index, |
80 | .start = 0, |
81 | .count = 1, |
82 | }; |
83 | |
84 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
85 | } |
86 | |
87 | void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) |
88 | { |
89 | struct vfio_irq_set irq_set = { |
90 | .argsz = sizeof(irq_set), |
91 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, |
92 | .index = index, |
93 | .start = 0, |
94 | .count = 1, |
95 | }; |
96 | |
97 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
98 | } |
99 | |
100 | static inline const char *action_to_str(int action) |
101 | { |
102 | switch (action) { |
103 | case VFIO_IRQ_SET_ACTION_MASK: |
104 | return "MASK" ; |
105 | case VFIO_IRQ_SET_ACTION_UNMASK: |
106 | return "UNMASK" ; |
107 | case VFIO_IRQ_SET_ACTION_TRIGGER: |
108 | return "TRIGGER" ; |
109 | default: |
110 | return "UNKNOWN ACTION" ; |
111 | } |
112 | } |
113 | |
114 | static const char *index_to_str(VFIODevice *vbasedev, int index) |
115 | { |
116 | if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { |
117 | return NULL; |
118 | } |
119 | |
120 | switch (index) { |
121 | case VFIO_PCI_INTX_IRQ_INDEX: |
122 | return "INTX" ; |
123 | case VFIO_PCI_MSI_IRQ_INDEX: |
124 | return "MSI" ; |
125 | case VFIO_PCI_MSIX_IRQ_INDEX: |
126 | return "MSIX" ; |
127 | case VFIO_PCI_ERR_IRQ_INDEX: |
128 | return "ERR" ; |
129 | case VFIO_PCI_REQ_IRQ_INDEX: |
130 | return "REQ" ; |
131 | default: |
132 | return NULL; |
133 | } |
134 | } |
135 | |
136 | int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, |
137 | int action, int fd, Error **errp) |
138 | { |
139 | struct vfio_irq_set *irq_set; |
140 | int argsz, ret = 0; |
141 | const char *name; |
142 | int32_t *pfd; |
143 | |
144 | argsz = sizeof(*irq_set) + sizeof(*pfd); |
145 | |
146 | irq_set = g_malloc0(argsz); |
147 | irq_set->argsz = argsz; |
148 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; |
149 | irq_set->index = index; |
150 | irq_set->start = subindex; |
151 | irq_set->count = 1; |
152 | pfd = (int32_t *)&irq_set->data; |
153 | *pfd = fd; |
154 | |
155 | if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { |
156 | ret = -errno; |
157 | } |
158 | g_free(irq_set); |
159 | |
160 | if (!ret) { |
161 | return 0; |
162 | } |
163 | |
164 | error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure" ); |
165 | |
166 | name = index_to_str(vbasedev, index); |
167 | if (name) { |
168 | error_prepend(errp, "%s-%d: " , name, subindex); |
169 | } else { |
170 | error_prepend(errp, "index %d-%d: " , index, subindex); |
171 | } |
172 | error_prepend(errp, |
173 | "Failed to %s %s eventfd signaling for interrupt " , |
174 | fd < 0 ? "tear down" : "set up" , action_to_str(action)); |
175 | return ret; |
176 | } |
177 | |
178 | /* |
179 | * IO Port/MMIO - Beware of the endians, VFIO is always little endian |
180 | */ |
181 | void vfio_region_write(void *opaque, hwaddr addr, |
182 | uint64_t data, unsigned size) |
183 | { |
184 | VFIORegion *region = opaque; |
185 | VFIODevice *vbasedev = region->vbasedev; |
186 | union { |
187 | uint8_t byte; |
188 | uint16_t word; |
189 | uint32_t dword; |
190 | uint64_t qword; |
191 | } buf; |
192 | |
193 | switch (size) { |
194 | case 1: |
195 | buf.byte = data; |
196 | break; |
197 | case 2: |
198 | buf.word = cpu_to_le16(data); |
199 | break; |
200 | case 4: |
201 | buf.dword = cpu_to_le32(data); |
202 | break; |
203 | case 8: |
204 | buf.qword = cpu_to_le64(data); |
205 | break; |
206 | default: |
207 | hw_error("vfio: unsupported write size, %d bytes" , size); |
208 | break; |
209 | } |
210 | |
211 | if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { |
212 | error_report("%s(%s:region%d+0x%" HWADDR_PRIx", 0x%" PRIx64 |
213 | ",%d) failed: %m" , |
214 | __func__, vbasedev->name, region->nr, |
215 | addr, data, size); |
216 | } |
217 | |
218 | trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); |
219 | |
220 | /* |
221 | * A read or write to a BAR always signals an INTx EOI. This will |
222 | * do nothing if not pending (including not in INTx mode). We assume |
223 | * that a BAR access is in response to an interrupt and that BAR |
224 | * accesses will service the interrupt. Unfortunately, we don't know |
225 | * which access will service the interrupt, so we're potentially |
226 | * getting quite a few host interrupts per guest interrupt. |
227 | */ |
228 | vbasedev->ops->vfio_eoi(vbasedev); |
229 | } |
230 | |
231 | uint64_t vfio_region_read(void *opaque, |
232 | hwaddr addr, unsigned size) |
233 | { |
234 | VFIORegion *region = opaque; |
235 | VFIODevice *vbasedev = region->vbasedev; |
236 | union { |
237 | uint8_t byte; |
238 | uint16_t word; |
239 | uint32_t dword; |
240 | uint64_t qword; |
241 | } buf; |
242 | uint64_t data = 0; |
243 | |
244 | if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { |
245 | error_report("%s(%s:region%d+0x%" HWADDR_PRIx", %d) failed: %m" , |
246 | __func__, vbasedev->name, region->nr, |
247 | addr, size); |
248 | return (uint64_t)-1; |
249 | } |
250 | switch (size) { |
251 | case 1: |
252 | data = buf.byte; |
253 | break; |
254 | case 2: |
255 | data = le16_to_cpu(buf.word); |
256 | break; |
257 | case 4: |
258 | data = le32_to_cpu(buf.dword); |
259 | break; |
260 | case 8: |
261 | data = le64_to_cpu(buf.qword); |
262 | break; |
263 | default: |
264 | hw_error("vfio: unsupported read size, %d bytes" , size); |
265 | break; |
266 | } |
267 | |
268 | trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); |
269 | |
270 | /* Same as write above */ |
271 | vbasedev->ops->vfio_eoi(vbasedev); |
272 | |
273 | return data; |
274 | } |
275 | |
276 | const MemoryRegionOps vfio_region_ops = { |
277 | .read = vfio_region_read, |
278 | .write = vfio_region_write, |
279 | .endianness = DEVICE_LITTLE_ENDIAN, |
280 | .valid = { |
281 | .min_access_size = 1, |
282 | .max_access_size = 8, |
283 | }, |
284 | .impl = { |
285 | .min_access_size = 1, |
286 | .max_access_size = 8, |
287 | }, |
288 | }; |
289 | |
290 | /* |
291 | * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 |
292 | */ |
293 | static int vfio_dma_unmap(VFIOContainer *container, |
294 | hwaddr iova, ram_addr_t size) |
295 | { |
296 | struct vfio_iommu_type1_dma_unmap unmap = { |
297 | .argsz = sizeof(unmap), |
298 | .flags = 0, |
299 | .iova = iova, |
300 | .size = size, |
301 | }; |
302 | |
303 | while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { |
304 | /* |
305 | * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c |
306 | * v4.15) where an overflow in its wrap-around check prevents us from |
307 | * unmapping the last page of the address space. Test for the error |
308 | * condition and re-try the unmap excluding the last page. The |
309 | * expectation is that we've never mapped the last page anyway and this |
310 | * unmap request comes via vIOMMU support which also makes it unlikely |
311 | * that this page is used. This bug was introduced well after type1 v2 |
312 | * support was introduced, so we shouldn't need to test for v1. A fix |
313 | * is queued for kernel v5.0 so this workaround can be removed once |
314 | * affected kernels are sufficiently deprecated. |
315 | */ |
316 | if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && |
317 | container->iommu_type == VFIO_TYPE1v2_IOMMU) { |
318 | trace_vfio_dma_unmap_overflow_workaround(); |
319 | unmap.size -= 1ULL << ctz64(container->pgsizes); |
320 | continue; |
321 | } |
322 | error_report("VFIO_UNMAP_DMA: %d" , -errno); |
323 | return -errno; |
324 | } |
325 | |
326 | return 0; |
327 | } |
328 | |
329 | static int vfio_dma_map(VFIOContainer *container, hwaddr iova, |
330 | ram_addr_t size, void *vaddr, bool readonly) |
331 | { |
332 | struct vfio_iommu_type1_dma_map map = { |
333 | .argsz = sizeof(map), |
334 | .flags = VFIO_DMA_MAP_FLAG_READ, |
335 | .vaddr = (__u64)(uintptr_t)vaddr, |
336 | .iova = iova, |
337 | .size = size, |
338 | }; |
339 | |
340 | if (!readonly) { |
341 | map.flags |= VFIO_DMA_MAP_FLAG_WRITE; |
342 | } |
343 | |
344 | /* |
345 | * Try the mapping, if it fails with EBUSY, unmap the region and try |
346 | * again. This shouldn't be necessary, but we sometimes see it in |
347 | * the VGA ROM space. |
348 | */ |
349 | if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || |
350 | (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && |
351 | ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { |
352 | return 0; |
353 | } |
354 | |
355 | error_report("VFIO_MAP_DMA: %d" , -errno); |
356 | return -errno; |
357 | } |
358 | |
359 | static void vfio_host_win_add(VFIOContainer *container, |
360 | hwaddr min_iova, hwaddr max_iova, |
361 | uint64_t iova_pgsizes) |
362 | { |
363 | VFIOHostDMAWindow *hostwin; |
364 | |
365 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { |
366 | if (ranges_overlap(hostwin->min_iova, |
367 | hostwin->max_iova - hostwin->min_iova + 1, |
368 | min_iova, |
369 | max_iova - min_iova + 1)) { |
370 | hw_error("%s: Overlapped IOMMU are not enabled" , __func__); |
371 | } |
372 | } |
373 | |
374 | hostwin = g_malloc0(sizeof(*hostwin)); |
375 | |
376 | hostwin->min_iova = min_iova; |
377 | hostwin->max_iova = max_iova; |
378 | hostwin->iova_pgsizes = iova_pgsizes; |
379 | QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); |
380 | } |
381 | |
382 | static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, |
383 | hwaddr max_iova) |
384 | { |
385 | VFIOHostDMAWindow *hostwin; |
386 | |
387 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { |
388 | if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { |
389 | QLIST_REMOVE(hostwin, hostwin_next); |
390 | return 0; |
391 | } |
392 | } |
393 | |
394 | return -1; |
395 | } |
396 | |
397 | static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
398 | { |
399 | return (!memory_region_is_ram(section->mr) && |
400 | !memory_region_is_iommu(section->mr)) || |
401 | /* |
402 | * Sizing an enabled 64-bit BAR can cause spurious mappings to |
403 | * addresses in the upper part of the 64-bit address space. These |
404 | * are never accessed by the CPU and beyond the address width of |
405 | * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. |
406 | */ |
407 | section->offset_within_address_space & (1ULL << 63); |
408 | } |
409 | |
410 | /* Called with rcu_read_lock held. */ |
411 | static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, |
412 | bool *read_only) |
413 | { |
414 | MemoryRegion *mr; |
415 | hwaddr xlat; |
416 | hwaddr len = iotlb->addr_mask + 1; |
417 | bool writable = iotlb->perm & IOMMU_WO; |
418 | |
419 | /* |
420 | * The IOMMU TLB entry we have just covers translation through |
421 | * this IOMMU to its immediate target. We need to translate |
422 | * it the rest of the way through to memory. |
423 | */ |
424 | mr = address_space_translate(&address_space_memory, |
425 | iotlb->translated_addr, |
426 | &xlat, &len, writable, |
427 | MEMTXATTRS_UNSPECIFIED); |
428 | if (!memory_region_is_ram(mr)) { |
429 | error_report("iommu map to non memory area %" HWADDR_PRIx"" , |
430 | xlat); |
431 | return false; |
432 | } |
433 | |
434 | /* |
435 | * Translation truncates length to the IOMMU page size, |
436 | * check that it did not truncate too much. |
437 | */ |
438 | if (len & iotlb->addr_mask) { |
439 | error_report("iommu has granularity incompatible with target AS" ); |
440 | return false; |
441 | } |
442 | |
443 | *vaddr = memory_region_get_ram_ptr(mr) + xlat; |
444 | *read_only = !writable || mr->readonly; |
445 | |
446 | return true; |
447 | } |
448 | |
449 | static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) |
450 | { |
451 | VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); |
452 | VFIOContainer *container = giommu->container; |
453 | hwaddr iova = iotlb->iova + giommu->iommu_offset; |
454 | bool read_only; |
455 | void *vaddr; |
456 | int ret; |
457 | |
458 | trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP" , |
459 | iova, iova + iotlb->addr_mask); |
460 | |
461 | if (iotlb->target_as != &address_space_memory) { |
462 | error_report("Wrong target AS \"%s\", only system memory is allowed" , |
463 | iotlb->target_as->name ? iotlb->target_as->name : "none" ); |
464 | return; |
465 | } |
466 | |
467 | rcu_read_lock(); |
468 | |
469 | if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { |
470 | if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { |
471 | goto out; |
472 | } |
473 | /* |
474 | * vaddr is only valid until rcu_read_unlock(). But after |
475 | * vfio_dma_map has set up the mapping the pages will be |
476 | * pinned by the kernel. This makes sure that the RAM backend |
477 | * of vaddr will always be there, even if the memory object is |
478 | * destroyed and its backing memory munmap-ed. |
479 | */ |
480 | ret = vfio_dma_map(container, iova, |
481 | iotlb->addr_mask + 1, vaddr, |
482 | read_only); |
483 | if (ret) { |
484 | error_report("vfio_dma_map(%p, 0x%" HWADDR_PRIx", " |
485 | "0x%" HWADDR_PRIx", %p) = %d (%m)" , |
486 | container, iova, |
487 | iotlb->addr_mask + 1, vaddr, ret); |
488 | } |
489 | } else { |
490 | ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1); |
491 | if (ret) { |
492 | error_report("vfio_dma_unmap(%p, 0x%" HWADDR_PRIx", " |
493 | "0x%" HWADDR_PRIx") = %d (%m)" , |
494 | container, iova, |
495 | iotlb->addr_mask + 1, ret); |
496 | } |
497 | } |
498 | out: |
499 | rcu_read_unlock(); |
500 | } |
501 | |
502 | static void vfio_listener_region_add(MemoryListener *listener, |
503 | MemoryRegionSection *section) |
504 | { |
505 | VFIOContainer *container = container_of(listener, VFIOContainer, listener); |
506 | hwaddr iova, end; |
507 | Int128 llend, llsize; |
508 | void *vaddr; |
509 | int ret; |
510 | VFIOHostDMAWindow *hostwin; |
511 | bool hostwin_found; |
512 | |
513 | if (vfio_listener_skipped_section(section)) { |
514 | trace_vfio_listener_region_add_skip( |
515 | section->offset_within_address_space, |
516 | section->offset_within_address_space + |
517 | int128_get64(int128_sub(section->size, int128_one()))); |
518 | return; |
519 | } |
520 | |
521 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != |
522 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { |
523 | error_report("%s received unaligned region" , __func__); |
524 | return; |
525 | } |
526 | |
527 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
528 | llend = int128_make64(section->offset_within_address_space); |
529 | llend = int128_add(llend, section->size); |
530 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); |
531 | |
532 | if (int128_ge(int128_make64(iova), llend)) { |
533 | return; |
534 | } |
535 | end = int128_get64(int128_sub(llend, int128_one())); |
536 | |
537 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
538 | hwaddr pgsize = 0; |
539 | |
540 | /* For now intersections are not allowed, we may relax this later */ |
541 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { |
542 | if (ranges_overlap(hostwin->min_iova, |
543 | hostwin->max_iova - hostwin->min_iova + 1, |
544 | section->offset_within_address_space, |
545 | int128_get64(section->size))) { |
546 | ret = -1; |
547 | goto fail; |
548 | } |
549 | } |
550 | |
551 | ret = vfio_spapr_create_window(container, section, &pgsize); |
552 | if (ret) { |
553 | goto fail; |
554 | } |
555 | |
556 | vfio_host_win_add(container, section->offset_within_address_space, |
557 | section->offset_within_address_space + |
558 | int128_get64(section->size) - 1, pgsize); |
559 | #ifdef CONFIG_KVM |
560 | if (kvm_enabled()) { |
561 | VFIOGroup *group; |
562 | IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); |
563 | struct kvm_vfio_spapr_tce param; |
564 | struct kvm_device_attr attr = { |
565 | .group = KVM_DEV_VFIO_GROUP, |
566 | .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, |
567 | .addr = (uint64_t)(unsigned long)¶m, |
568 | }; |
569 | |
570 | if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, |
571 | ¶m.tablefd)) { |
572 | QLIST_FOREACH(group, &container->group_list, container_next) { |
573 | param.groupfd = group->fd; |
574 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { |
575 | error_report("vfio: failed to setup fd %d " |
576 | "for a group with fd %d: %s" , |
577 | param.tablefd, param.groupfd, |
578 | strerror(errno)); |
579 | return; |
580 | } |
581 | trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); |
582 | } |
583 | } |
584 | } |
585 | #endif |
586 | } |
587 | |
588 | hostwin_found = false; |
589 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { |
590 | if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { |
591 | hostwin_found = true; |
592 | break; |
593 | } |
594 | } |
595 | |
596 | if (!hostwin_found) { |
597 | error_report("vfio: IOMMU container %p can't map guest IOVA region" |
598 | " 0x%" HWADDR_PRIx"..0x%" HWADDR_PRIx, |
599 | container, iova, end); |
600 | ret = -EFAULT; |
601 | goto fail; |
602 | } |
603 | |
604 | memory_region_ref(section->mr); |
605 | |
606 | if (memory_region_is_iommu(section->mr)) { |
607 | VFIOGuestIOMMU *giommu; |
608 | IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); |
609 | int iommu_idx; |
610 | |
611 | trace_vfio_listener_region_add_iommu(iova, end); |
612 | /* |
613 | * FIXME: For VFIO iommu types which have KVM acceleration to |
614 | * avoid bouncing all map/unmaps through qemu this way, this |
615 | * would be the right place to wire that up (tell the KVM |
616 | * device emulation the VFIO iommu handles to use). |
617 | */ |
618 | giommu = g_malloc0(sizeof(*giommu)); |
619 | giommu->iommu = iommu_mr; |
620 | giommu->iommu_offset = section->offset_within_address_space - |
621 | section->offset_within_region; |
622 | giommu->container = container; |
623 | llend = int128_add(int128_make64(section->offset_within_region), |
624 | section->size); |
625 | llend = int128_sub(llend, int128_one()); |
626 | iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, |
627 | MEMTXATTRS_UNSPECIFIED); |
628 | iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, |
629 | IOMMU_NOTIFIER_ALL, |
630 | section->offset_within_region, |
631 | int128_get64(llend), |
632 | iommu_idx); |
633 | QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); |
634 | |
635 | memory_region_register_iommu_notifier(section->mr, &giommu->n); |
636 | memory_region_iommu_replay(giommu->iommu, &giommu->n); |
637 | |
638 | return; |
639 | } |
640 | |
641 | /* Here we assume that memory_region_is_ram(section->mr)==true */ |
642 | |
643 | vaddr = memory_region_get_ram_ptr(section->mr) + |
644 | section->offset_within_region + |
645 | (iova - section->offset_within_address_space); |
646 | |
647 | trace_vfio_listener_region_add_ram(iova, end, vaddr); |
648 | |
649 | llsize = int128_sub(llend, int128_make64(iova)); |
650 | |
651 | if (memory_region_is_ram_device(section->mr)) { |
652 | hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; |
653 | |
654 | if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { |
655 | trace_vfio_listener_region_add_no_dma_map( |
656 | memory_region_name(section->mr), |
657 | section->offset_within_address_space, |
658 | int128_getlo(section->size), |
659 | pgmask + 1); |
660 | return; |
661 | } |
662 | } |
663 | |
664 | ret = vfio_dma_map(container, iova, int128_get64(llsize), |
665 | vaddr, section->readonly); |
666 | if (ret) { |
667 | error_report("vfio_dma_map(%p, 0x%" HWADDR_PRIx", " |
668 | "0x%" HWADDR_PRIx", %p) = %d (%m)" , |
669 | container, iova, int128_get64(llsize), vaddr, ret); |
670 | if (memory_region_is_ram_device(section->mr)) { |
671 | /* Allow unexpected mappings not to be fatal for RAM devices */ |
672 | return; |
673 | } |
674 | goto fail; |
675 | } |
676 | |
677 | return; |
678 | |
679 | fail: |
680 | if (memory_region_is_ram_device(section->mr)) { |
681 | error_report("failed to vfio_dma_map. pci p2p may not work" ); |
682 | return; |
683 | } |
684 | /* |
685 | * On the initfn path, store the first error in the container so we |
686 | * can gracefully fail. Runtime, there's not much we can do other |
687 | * than throw a hardware error. |
688 | */ |
689 | if (!container->initialized) { |
690 | if (!container->error) { |
691 | container->error = ret; |
692 | } |
693 | } else { |
694 | hw_error("vfio: DMA mapping failed, unable to continue" ); |
695 | } |
696 | } |
697 | |
698 | static void vfio_listener_region_del(MemoryListener *listener, |
699 | MemoryRegionSection *section) |
700 | { |
701 | VFIOContainer *container = container_of(listener, VFIOContainer, listener); |
702 | hwaddr iova, end; |
703 | Int128 llend, llsize; |
704 | int ret; |
705 | bool try_unmap = true; |
706 | |
707 | if (vfio_listener_skipped_section(section)) { |
708 | trace_vfio_listener_region_del_skip( |
709 | section->offset_within_address_space, |
710 | section->offset_within_address_space + |
711 | int128_get64(int128_sub(section->size, int128_one()))); |
712 | return; |
713 | } |
714 | |
715 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != |
716 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { |
717 | error_report("%s received unaligned region" , __func__); |
718 | return; |
719 | } |
720 | |
721 | if (memory_region_is_iommu(section->mr)) { |
722 | VFIOGuestIOMMU *giommu; |
723 | |
724 | QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { |
725 | if (MEMORY_REGION(giommu->iommu) == section->mr && |
726 | giommu->n.start == section->offset_within_region) { |
727 | memory_region_unregister_iommu_notifier(section->mr, |
728 | &giommu->n); |
729 | QLIST_REMOVE(giommu, giommu_next); |
730 | g_free(giommu); |
731 | break; |
732 | } |
733 | } |
734 | |
735 | /* |
736 | * FIXME: We assume the one big unmap below is adequate to |
737 | * remove any individual page mappings in the IOMMU which |
738 | * might have been copied into VFIO. This works for a page table |
739 | * based IOMMU where a big unmap flattens a large range of IO-PTEs. |
740 | * That may not be true for all IOMMU types. |
741 | */ |
742 | } |
743 | |
744 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
745 | llend = int128_make64(section->offset_within_address_space); |
746 | llend = int128_add(llend, section->size); |
747 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); |
748 | |
749 | if (int128_ge(int128_make64(iova), llend)) { |
750 | return; |
751 | } |
752 | end = int128_get64(int128_sub(llend, int128_one())); |
753 | |
754 | llsize = int128_sub(llend, int128_make64(iova)); |
755 | |
756 | trace_vfio_listener_region_del(iova, end); |
757 | |
758 | if (memory_region_is_ram_device(section->mr)) { |
759 | hwaddr pgmask; |
760 | VFIOHostDMAWindow *hostwin; |
761 | bool hostwin_found = false; |
762 | |
763 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { |
764 | if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { |
765 | hostwin_found = true; |
766 | break; |
767 | } |
768 | } |
769 | assert(hostwin_found); /* or region_add() would have failed */ |
770 | |
771 | pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; |
772 | try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); |
773 | } |
774 | |
775 | if (try_unmap) { |
776 | ret = vfio_dma_unmap(container, iova, int128_get64(llsize)); |
777 | if (ret) { |
778 | error_report("vfio_dma_unmap(%p, 0x%" HWADDR_PRIx", " |
779 | "0x%" HWADDR_PRIx") = %d (%m)" , |
780 | container, iova, int128_get64(llsize), ret); |
781 | } |
782 | } |
783 | |
784 | memory_region_unref(section->mr); |
785 | |
786 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
787 | vfio_spapr_remove_window(container, |
788 | section->offset_within_address_space); |
789 | if (vfio_host_win_del(container, |
790 | section->offset_within_address_space, |
791 | section->offset_within_address_space + |
792 | int128_get64(section->size) - 1) < 0) { |
793 | hw_error("%s: Cannot delete missing window at %" HWADDR_PRIx, |
794 | __func__, section->offset_within_address_space); |
795 | } |
796 | } |
797 | } |
798 | |
799 | static const MemoryListener vfio_memory_listener = { |
800 | .region_add = vfio_listener_region_add, |
801 | .region_del = vfio_listener_region_del, |
802 | }; |
803 | |
804 | static void vfio_listener_release(VFIOContainer *container) |
805 | { |
806 | memory_listener_unregister(&container->listener); |
807 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
808 | memory_listener_unregister(&container->prereg_listener); |
809 | } |
810 | } |
811 | |
812 | struct vfio_info_cap_header * |
813 | vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) |
814 | { |
815 | struct vfio_info_cap_header *hdr; |
816 | void *ptr = info; |
817 | |
818 | if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { |
819 | return NULL; |
820 | } |
821 | |
822 | for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { |
823 | if (hdr->id == id) { |
824 | return hdr; |
825 | } |
826 | } |
827 | |
828 | return NULL; |
829 | } |
830 | |
831 | static int vfio_setup_region_sparse_mmaps(VFIORegion *region, |
832 | struct vfio_region_info *info) |
833 | { |
834 | struct vfio_info_cap_header *hdr; |
835 | struct vfio_region_info_cap_sparse_mmap *sparse; |
836 | int i, j; |
837 | |
838 | hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); |
839 | if (!hdr) { |
840 | return -ENODEV; |
841 | } |
842 | |
843 | sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); |
844 | |
845 | trace_vfio_region_sparse_mmap_header(region->vbasedev->name, |
846 | region->nr, sparse->nr_areas); |
847 | |
848 | region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); |
849 | |
850 | for (i = 0, j = 0; i < sparse->nr_areas; i++) { |
851 | trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, |
852 | sparse->areas[i].offset + |
853 | sparse->areas[i].size); |
854 | |
855 | if (sparse->areas[i].size) { |
856 | region->mmaps[j].offset = sparse->areas[i].offset; |
857 | region->mmaps[j].size = sparse->areas[i].size; |
858 | j++; |
859 | } |
860 | } |
861 | |
862 | region->nr_mmaps = j; |
863 | region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); |
864 | |
865 | return 0; |
866 | } |
867 | |
868 | int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, |
869 | int index, const char *name) |
870 | { |
871 | struct vfio_region_info *info; |
872 | int ret; |
873 | |
874 | ret = vfio_get_region_info(vbasedev, index, &info); |
875 | if (ret) { |
876 | return ret; |
877 | } |
878 | |
879 | region->vbasedev = vbasedev; |
880 | region->flags = info->flags; |
881 | region->size = info->size; |
882 | region->fd_offset = info->offset; |
883 | region->nr = index; |
884 | |
885 | if (region->size) { |
886 | region->mem = g_new0(MemoryRegion, 1); |
887 | memory_region_init_io(region->mem, obj, &vfio_region_ops, |
888 | region, name, region->size); |
889 | |
890 | if (!vbasedev->no_mmap && |
891 | region->flags & VFIO_REGION_INFO_FLAG_MMAP) { |
892 | |
893 | ret = vfio_setup_region_sparse_mmaps(region, info); |
894 | |
895 | if (ret) { |
896 | region->nr_mmaps = 1; |
897 | region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); |
898 | region->mmaps[0].offset = 0; |
899 | region->mmaps[0].size = region->size; |
900 | } |
901 | } |
902 | } |
903 | |
904 | g_free(info); |
905 | |
906 | trace_vfio_region_setup(vbasedev->name, index, name, |
907 | region->flags, region->fd_offset, region->size); |
908 | return 0; |
909 | } |
910 | |
911 | int vfio_region_mmap(VFIORegion *region) |
912 | { |
913 | int i, prot = 0; |
914 | char *name; |
915 | |
916 | if (!region->mem) { |
917 | return 0; |
918 | } |
919 | |
920 | prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; |
921 | prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; |
922 | |
923 | for (i = 0; i < region->nr_mmaps; i++) { |
924 | region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, |
925 | MAP_SHARED, region->vbasedev->fd, |
926 | region->fd_offset + |
927 | region->mmaps[i].offset); |
928 | if (region->mmaps[i].mmap == MAP_FAILED) { |
929 | int ret = -errno; |
930 | |
931 | trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, |
932 | region->fd_offset + |
933 | region->mmaps[i].offset, |
934 | region->fd_offset + |
935 | region->mmaps[i].offset + |
936 | region->mmaps[i].size - 1, ret); |
937 | |
938 | region->mmaps[i].mmap = NULL; |
939 | |
940 | for (i--; i >= 0; i--) { |
941 | memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); |
942 | munmap(region->mmaps[i].mmap, region->mmaps[i].size); |
943 | object_unparent(OBJECT(®ion->mmaps[i].mem)); |
944 | region->mmaps[i].mmap = NULL; |
945 | } |
946 | |
947 | return ret; |
948 | } |
949 | |
950 | name = g_strdup_printf("%s mmaps[%d]" , |
951 | memory_region_name(region->mem), i); |
952 | memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, |
953 | memory_region_owner(region->mem), |
954 | name, region->mmaps[i].size, |
955 | region->mmaps[i].mmap); |
956 | g_free(name); |
957 | memory_region_add_subregion(region->mem, region->mmaps[i].offset, |
958 | ®ion->mmaps[i].mem); |
959 | |
960 | trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), |
961 | region->mmaps[i].offset, |
962 | region->mmaps[i].offset + |
963 | region->mmaps[i].size - 1); |
964 | } |
965 | |
966 | return 0; |
967 | } |
968 | |
969 | void vfio_region_exit(VFIORegion *region) |
970 | { |
971 | int i; |
972 | |
973 | if (!region->mem) { |
974 | return; |
975 | } |
976 | |
977 | for (i = 0; i < region->nr_mmaps; i++) { |
978 | if (region->mmaps[i].mmap) { |
979 | memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); |
980 | } |
981 | } |
982 | |
983 | trace_vfio_region_exit(region->vbasedev->name, region->nr); |
984 | } |
985 | |
986 | void vfio_region_finalize(VFIORegion *region) |
987 | { |
988 | int i; |
989 | |
990 | if (!region->mem) { |
991 | return; |
992 | } |
993 | |
994 | for (i = 0; i < region->nr_mmaps; i++) { |
995 | if (region->mmaps[i].mmap) { |
996 | munmap(region->mmaps[i].mmap, region->mmaps[i].size); |
997 | object_unparent(OBJECT(®ion->mmaps[i].mem)); |
998 | } |
999 | } |
1000 | |
1001 | object_unparent(OBJECT(region->mem)); |
1002 | |
1003 | g_free(region->mem); |
1004 | g_free(region->mmaps); |
1005 | |
1006 | trace_vfio_region_finalize(region->vbasedev->name, region->nr); |
1007 | |
1008 | region->mem = NULL; |
1009 | region->mmaps = NULL; |
1010 | region->nr_mmaps = 0; |
1011 | region->size = 0; |
1012 | region->flags = 0; |
1013 | region->nr = 0; |
1014 | } |
1015 | |
1016 | void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) |
1017 | { |
1018 | int i; |
1019 | |
1020 | if (!region->mem) { |
1021 | return; |
1022 | } |
1023 | |
1024 | for (i = 0; i < region->nr_mmaps; i++) { |
1025 | if (region->mmaps[i].mmap) { |
1026 | memory_region_set_enabled(®ion->mmaps[i].mem, enabled); |
1027 | } |
1028 | } |
1029 | |
1030 | trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), |
1031 | enabled); |
1032 | } |
1033 | |
1034 | void vfio_reset_handler(void *opaque) |
1035 | { |
1036 | VFIOGroup *group; |
1037 | VFIODevice *vbasedev; |
1038 | |
1039 | QLIST_FOREACH(group, &vfio_group_list, next) { |
1040 | QLIST_FOREACH(vbasedev, &group->device_list, next) { |
1041 | if (vbasedev->dev->realized) { |
1042 | vbasedev->ops->vfio_compute_needs_reset(vbasedev); |
1043 | } |
1044 | } |
1045 | } |
1046 | |
1047 | QLIST_FOREACH(group, &vfio_group_list, next) { |
1048 | QLIST_FOREACH(vbasedev, &group->device_list, next) { |
1049 | if (vbasedev->dev->realized && vbasedev->needs_reset) { |
1050 | vbasedev->ops->vfio_hot_reset_multi(vbasedev); |
1051 | } |
1052 | } |
1053 | } |
1054 | } |
1055 | |
1056 | static void vfio_kvm_device_add_group(VFIOGroup *group) |
1057 | { |
1058 | #ifdef CONFIG_KVM |
1059 | struct kvm_device_attr attr = { |
1060 | .group = KVM_DEV_VFIO_GROUP, |
1061 | .attr = KVM_DEV_VFIO_GROUP_ADD, |
1062 | .addr = (uint64_t)(unsigned long)&group->fd, |
1063 | }; |
1064 | |
1065 | if (!kvm_enabled()) { |
1066 | return; |
1067 | } |
1068 | |
1069 | if (vfio_kvm_device_fd < 0) { |
1070 | struct kvm_create_device cd = { |
1071 | .type = KVM_DEV_TYPE_VFIO, |
1072 | }; |
1073 | |
1074 | if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { |
1075 | error_report("Failed to create KVM VFIO device: %m" ); |
1076 | return; |
1077 | } |
1078 | |
1079 | vfio_kvm_device_fd = cd.fd; |
1080 | } |
1081 | |
1082 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { |
1083 | error_report("Failed to add group %d to KVM VFIO device: %m" , |
1084 | group->groupid); |
1085 | } |
1086 | #endif |
1087 | } |
1088 | |
1089 | static void vfio_kvm_device_del_group(VFIOGroup *group) |
1090 | { |
1091 | #ifdef CONFIG_KVM |
1092 | struct kvm_device_attr attr = { |
1093 | .group = KVM_DEV_VFIO_GROUP, |
1094 | .attr = KVM_DEV_VFIO_GROUP_DEL, |
1095 | .addr = (uint64_t)(unsigned long)&group->fd, |
1096 | }; |
1097 | |
1098 | if (vfio_kvm_device_fd < 0) { |
1099 | return; |
1100 | } |
1101 | |
1102 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { |
1103 | error_report("Failed to remove group %d from KVM VFIO device: %m" , |
1104 | group->groupid); |
1105 | } |
1106 | #endif |
1107 | } |
1108 | |
1109 | static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) |
1110 | { |
1111 | VFIOAddressSpace *space; |
1112 | |
1113 | QLIST_FOREACH(space, &vfio_address_spaces, list) { |
1114 | if (space->as == as) { |
1115 | return space; |
1116 | } |
1117 | } |
1118 | |
1119 | /* No suitable VFIOAddressSpace, create a new one */ |
1120 | space = g_malloc0(sizeof(*space)); |
1121 | space->as = as; |
1122 | QLIST_INIT(&space->containers); |
1123 | |
1124 | QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); |
1125 | |
1126 | return space; |
1127 | } |
1128 | |
1129 | static void vfio_put_address_space(VFIOAddressSpace *space) |
1130 | { |
1131 | if (QLIST_EMPTY(&space->containers)) { |
1132 | QLIST_REMOVE(space, list); |
1133 | g_free(space); |
1134 | } |
1135 | } |
1136 | |
1137 | /* |
1138 | * vfio_get_iommu_type - selects the richest iommu_type (v2 first) |
1139 | */ |
1140 | static int vfio_get_iommu_type(VFIOContainer *container, |
1141 | Error **errp) |
1142 | { |
1143 | int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, |
1144 | VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; |
1145 | int i; |
1146 | |
1147 | for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { |
1148 | if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { |
1149 | return iommu_types[i]; |
1150 | } |
1151 | } |
1152 | error_setg(errp, "No available IOMMU models" ); |
1153 | return -EINVAL; |
1154 | } |
1155 | |
1156 | static int vfio_init_container(VFIOContainer *container, int group_fd, |
1157 | Error **errp) |
1158 | { |
1159 | int iommu_type, ret; |
1160 | |
1161 | iommu_type = vfio_get_iommu_type(container, errp); |
1162 | if (iommu_type < 0) { |
1163 | return iommu_type; |
1164 | } |
1165 | |
1166 | ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); |
1167 | if (ret) { |
1168 | error_setg_errno(errp, errno, "Failed to set group container" ); |
1169 | return -errno; |
1170 | } |
1171 | |
1172 | while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { |
1173 | if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
1174 | /* |
1175 | * On sPAPR, despite the IOMMU subdriver always advertises v1 and |
1176 | * v2, the running platform may not support v2 and there is no |
1177 | * way to guess it until an IOMMU group gets added to the container. |
1178 | * So in case it fails with v2, try v1 as a fallback. |
1179 | */ |
1180 | iommu_type = VFIO_SPAPR_TCE_IOMMU; |
1181 | continue; |
1182 | } |
1183 | error_setg_errno(errp, errno, "Failed to set iommu for container" ); |
1184 | return -errno; |
1185 | } |
1186 | |
1187 | container->iommu_type = iommu_type; |
1188 | return 0; |
1189 | } |
1190 | |
1191 | static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, |
1192 | Error **errp) |
1193 | { |
1194 | VFIOContainer *container; |
1195 | int ret, fd; |
1196 | VFIOAddressSpace *space; |
1197 | |
1198 | space = vfio_get_address_space(as); |
1199 | |
1200 | /* |
1201 | * VFIO is currently incompatible with memory ballooning insofar as the |
1202 | * madvise to purge (zap) the page from QEMU's address space does not |
1203 | * interact with the memory API and therefore leaves stale virtual to |
1204 | * physical mappings in the IOMMU if the page was previously pinned. We |
1205 | * therefore add a balloon inhibit for each group added to a container, |
1206 | * whether the container is used individually or shared. This provides |
1207 | * us with options to allow devices within a group to opt-in and allow |
1208 | * ballooning, so long as it is done consistently for a group (for instance |
1209 | * if the device is an mdev device where it is known that the host vendor |
1210 | * driver will never pin pages outside of the working set of the guest |
1211 | * driver, which would thus not be ballooning candidates). |
1212 | * |
1213 | * The first opportunity to induce pinning occurs here where we attempt to |
1214 | * attach the group to existing containers within the AddressSpace. If any |
1215 | * pages are already zapped from the virtual address space, such as from a |
1216 | * previous ballooning opt-in, new pinning will cause valid mappings to be |
1217 | * re-established. Likewise, when the overall MemoryListener for a new |
1218 | * container is registered, a replay of mappings within the AddressSpace |
1219 | * will occur, re-establishing any previously zapped pages as well. |
1220 | * |
1221 | * NB. Balloon inhibiting does not currently block operation of the |
1222 | * balloon driver or revoke previously pinned pages, it only prevents |
1223 | * calling madvise to modify the virtual mapping of ballooned pages. |
1224 | */ |
1225 | qemu_balloon_inhibit(true); |
1226 | |
1227 | QLIST_FOREACH(container, &space->containers, next) { |
1228 | if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { |
1229 | group->container = container; |
1230 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
1231 | vfio_kvm_device_add_group(group); |
1232 | return 0; |
1233 | } |
1234 | } |
1235 | |
1236 | fd = qemu_open("/dev/vfio/vfio" , O_RDWR); |
1237 | if (fd < 0) { |
1238 | error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio" ); |
1239 | ret = -errno; |
1240 | goto put_space_exit; |
1241 | } |
1242 | |
1243 | ret = ioctl(fd, VFIO_GET_API_VERSION); |
1244 | if (ret != VFIO_API_VERSION) { |
1245 | error_setg(errp, "supported vfio version: %d, " |
1246 | "reported version: %d" , VFIO_API_VERSION, ret); |
1247 | ret = -EINVAL; |
1248 | goto close_fd_exit; |
1249 | } |
1250 | |
1251 | container = g_malloc0(sizeof(*container)); |
1252 | container->space = space; |
1253 | container->fd = fd; |
1254 | QLIST_INIT(&container->giommu_list); |
1255 | QLIST_INIT(&container->hostwin_list); |
1256 | |
1257 | ret = vfio_init_container(container, group->fd, errp); |
1258 | if (ret) { |
1259 | goto free_container_exit; |
1260 | } |
1261 | |
1262 | switch (container->iommu_type) { |
1263 | case VFIO_TYPE1v2_IOMMU: |
1264 | case VFIO_TYPE1_IOMMU: |
1265 | { |
1266 | struct vfio_iommu_type1_info info; |
1267 | |
1268 | /* |
1269 | * FIXME: This assumes that a Type1 IOMMU can map any 64-bit |
1270 | * IOVA whatsoever. That's not actually true, but the current |
1271 | * kernel interface doesn't tell us what it can map, and the |
1272 | * existing Type1 IOMMUs generally support any IOVA we're |
1273 | * going to actually try in practice. |
1274 | */ |
1275 | info.argsz = sizeof(info); |
1276 | ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info); |
1277 | /* Ignore errors */ |
1278 | if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) { |
1279 | /* Assume 4k IOVA page size */ |
1280 | info.iova_pgsizes = 4096; |
1281 | } |
1282 | vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes); |
1283 | container->pgsizes = info.iova_pgsizes; |
1284 | break; |
1285 | } |
1286 | case VFIO_SPAPR_TCE_v2_IOMMU: |
1287 | case VFIO_SPAPR_TCE_IOMMU: |
1288 | { |
1289 | struct vfio_iommu_spapr_tce_info info; |
1290 | bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; |
1291 | |
1292 | /* |
1293 | * The host kernel code implementing VFIO_IOMMU_DISABLE is called |
1294 | * when container fd is closed so we do not call it explicitly |
1295 | * in this file. |
1296 | */ |
1297 | if (!v2) { |
1298 | ret = ioctl(fd, VFIO_IOMMU_ENABLE); |
1299 | if (ret) { |
1300 | error_setg_errno(errp, errno, "failed to enable container" ); |
1301 | ret = -errno; |
1302 | goto free_container_exit; |
1303 | } |
1304 | } else { |
1305 | container->prereg_listener = vfio_prereg_listener; |
1306 | |
1307 | memory_listener_register(&container->prereg_listener, |
1308 | &address_space_memory); |
1309 | if (container->error) { |
1310 | memory_listener_unregister(&container->prereg_listener); |
1311 | ret = container->error; |
1312 | error_setg(errp, |
1313 | "RAM memory listener initialization failed for container" ); |
1314 | goto free_container_exit; |
1315 | } |
1316 | } |
1317 | |
1318 | info.argsz = sizeof(info); |
1319 | ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); |
1320 | if (ret) { |
1321 | error_setg_errno(errp, errno, |
1322 | "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed" ); |
1323 | ret = -errno; |
1324 | if (v2) { |
1325 | memory_listener_unregister(&container->prereg_listener); |
1326 | } |
1327 | goto free_container_exit; |
1328 | } |
1329 | |
1330 | if (v2) { |
1331 | container->pgsizes = info.ddw.pgsizes; |
1332 | /* |
1333 | * There is a default window in just created container. |
1334 | * To make region_add/del simpler, we better remove this |
1335 | * window now and let those iommu_listener callbacks |
1336 | * create/remove them when needed. |
1337 | */ |
1338 | ret = vfio_spapr_remove_window(container, info.dma32_window_start); |
1339 | if (ret) { |
1340 | error_setg_errno(errp, -ret, |
1341 | "failed to remove existing window" ); |
1342 | goto free_container_exit; |
1343 | } |
1344 | } else { |
1345 | /* The default table uses 4K pages */ |
1346 | container->pgsizes = 0x1000; |
1347 | vfio_host_win_add(container, info.dma32_window_start, |
1348 | info.dma32_window_start + |
1349 | info.dma32_window_size - 1, |
1350 | 0x1000); |
1351 | } |
1352 | } |
1353 | } |
1354 | |
1355 | vfio_kvm_device_add_group(group); |
1356 | |
1357 | QLIST_INIT(&container->group_list); |
1358 | QLIST_INSERT_HEAD(&space->containers, container, next); |
1359 | |
1360 | group->container = container; |
1361 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
1362 | |
1363 | container->listener = vfio_memory_listener; |
1364 | |
1365 | memory_listener_register(&container->listener, container->space->as); |
1366 | |
1367 | if (container->error) { |
1368 | ret = container->error; |
1369 | error_setg_errno(errp, -ret, |
1370 | "memory listener initialization failed for container" ); |
1371 | goto listener_release_exit; |
1372 | } |
1373 | |
1374 | container->initialized = true; |
1375 | |
1376 | return 0; |
1377 | listener_release_exit: |
1378 | QLIST_REMOVE(group, container_next); |
1379 | QLIST_REMOVE(container, next); |
1380 | vfio_kvm_device_del_group(group); |
1381 | vfio_listener_release(container); |
1382 | |
1383 | free_container_exit: |
1384 | g_free(container); |
1385 | |
1386 | close_fd_exit: |
1387 | close(fd); |
1388 | |
1389 | put_space_exit: |
1390 | qemu_balloon_inhibit(false); |
1391 | vfio_put_address_space(space); |
1392 | |
1393 | return ret; |
1394 | } |
1395 | |
1396 | static void vfio_disconnect_container(VFIOGroup *group) |
1397 | { |
1398 | VFIOContainer *container = group->container; |
1399 | |
1400 | QLIST_REMOVE(group, container_next); |
1401 | group->container = NULL; |
1402 | |
1403 | /* |
1404 | * Explicitly release the listener first before unset container, |
1405 | * since unset may destroy the backend container if it's the last |
1406 | * group. |
1407 | */ |
1408 | if (QLIST_EMPTY(&container->group_list)) { |
1409 | vfio_listener_release(container); |
1410 | } |
1411 | |
1412 | if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { |
1413 | error_report("vfio: error disconnecting group %d from container" , |
1414 | group->groupid); |
1415 | } |
1416 | |
1417 | if (QLIST_EMPTY(&container->group_list)) { |
1418 | VFIOAddressSpace *space = container->space; |
1419 | VFIOGuestIOMMU *giommu, *tmp; |
1420 | |
1421 | QLIST_REMOVE(container, next); |
1422 | |
1423 | QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { |
1424 | memory_region_unregister_iommu_notifier( |
1425 | MEMORY_REGION(giommu->iommu), &giommu->n); |
1426 | QLIST_REMOVE(giommu, giommu_next); |
1427 | g_free(giommu); |
1428 | } |
1429 | |
1430 | trace_vfio_disconnect_container(container->fd); |
1431 | close(container->fd); |
1432 | g_free(container); |
1433 | |
1434 | vfio_put_address_space(space); |
1435 | } |
1436 | } |
1437 | |
1438 | VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) |
1439 | { |
1440 | VFIOGroup *group; |
1441 | char path[32]; |
1442 | struct vfio_group_status status = { .argsz = sizeof(status) }; |
1443 | |
1444 | QLIST_FOREACH(group, &vfio_group_list, next) { |
1445 | if (group->groupid == groupid) { |
1446 | /* Found it. Now is it already in the right context? */ |
1447 | if (group->container->space->as == as) { |
1448 | return group; |
1449 | } else { |
1450 | error_setg(errp, "group %d used in multiple address spaces" , |
1451 | group->groupid); |
1452 | return NULL; |
1453 | } |
1454 | } |
1455 | } |
1456 | |
1457 | group = g_malloc0(sizeof(*group)); |
1458 | |
1459 | snprintf(path, sizeof(path), "/dev/vfio/%d" , groupid); |
1460 | group->fd = qemu_open(path, O_RDWR); |
1461 | if (group->fd < 0) { |
1462 | error_setg_errno(errp, errno, "failed to open %s" , path); |
1463 | goto free_group_exit; |
1464 | } |
1465 | |
1466 | if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { |
1467 | error_setg_errno(errp, errno, "failed to get group %d status" , groupid); |
1468 | goto close_fd_exit; |
1469 | } |
1470 | |
1471 | if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { |
1472 | error_setg(errp, "group %d is not viable" , groupid); |
1473 | error_append_hint(errp, |
1474 | "Please ensure all devices within the iommu_group " |
1475 | "are bound to their vfio bus driver.\n" ); |
1476 | goto close_fd_exit; |
1477 | } |
1478 | |
1479 | group->groupid = groupid; |
1480 | QLIST_INIT(&group->device_list); |
1481 | |
1482 | if (vfio_connect_container(group, as, errp)) { |
1483 | error_prepend(errp, "failed to setup container for group %d: " , |
1484 | groupid); |
1485 | goto close_fd_exit; |
1486 | } |
1487 | |
1488 | if (QLIST_EMPTY(&vfio_group_list)) { |
1489 | qemu_register_reset(vfio_reset_handler, NULL); |
1490 | } |
1491 | |
1492 | QLIST_INSERT_HEAD(&vfio_group_list, group, next); |
1493 | |
1494 | return group; |
1495 | |
1496 | close_fd_exit: |
1497 | close(group->fd); |
1498 | |
1499 | free_group_exit: |
1500 | g_free(group); |
1501 | |
1502 | return NULL; |
1503 | } |
1504 | |
1505 | void vfio_put_group(VFIOGroup *group) |
1506 | { |
1507 | if (!group || !QLIST_EMPTY(&group->device_list)) { |
1508 | return; |
1509 | } |
1510 | |
1511 | if (!group->balloon_allowed) { |
1512 | qemu_balloon_inhibit(false); |
1513 | } |
1514 | vfio_kvm_device_del_group(group); |
1515 | vfio_disconnect_container(group); |
1516 | QLIST_REMOVE(group, next); |
1517 | trace_vfio_put_group(group->fd); |
1518 | close(group->fd); |
1519 | g_free(group); |
1520 | |
1521 | if (QLIST_EMPTY(&vfio_group_list)) { |
1522 | qemu_unregister_reset(vfio_reset_handler, NULL); |
1523 | } |
1524 | } |
1525 | |
1526 | int vfio_get_device(VFIOGroup *group, const char *name, |
1527 | VFIODevice *vbasedev, Error **errp) |
1528 | { |
1529 | struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; |
1530 | int ret, fd; |
1531 | |
1532 | fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); |
1533 | if (fd < 0) { |
1534 | error_setg_errno(errp, errno, "error getting device from group %d" , |
1535 | group->groupid); |
1536 | error_append_hint(errp, |
1537 | "Verify all devices in group %d are bound to vfio-<bus> " |
1538 | "or pci-stub and not already in use\n" , group->groupid); |
1539 | return fd; |
1540 | } |
1541 | |
1542 | ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); |
1543 | if (ret) { |
1544 | error_setg_errno(errp, errno, "error getting device info" ); |
1545 | close(fd); |
1546 | return ret; |
1547 | } |
1548 | |
1549 | /* |
1550 | * Clear the balloon inhibitor for this group if the driver knows the |
1551 | * device operates compatibly with ballooning. Setting must be consistent |
1552 | * per group, but since compatibility is really only possible with mdev |
1553 | * currently, we expect singleton groups. |
1554 | */ |
1555 | if (vbasedev->balloon_allowed != group->balloon_allowed) { |
1556 | if (!QLIST_EMPTY(&group->device_list)) { |
1557 | error_setg(errp, |
1558 | "Inconsistent device balloon setting within group" ); |
1559 | close(fd); |
1560 | return -1; |
1561 | } |
1562 | |
1563 | if (!group->balloon_allowed) { |
1564 | group->balloon_allowed = true; |
1565 | qemu_balloon_inhibit(false); |
1566 | } |
1567 | } |
1568 | |
1569 | vbasedev->fd = fd; |
1570 | vbasedev->group = group; |
1571 | QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); |
1572 | |
1573 | vbasedev->num_irqs = dev_info.num_irqs; |
1574 | vbasedev->num_regions = dev_info.num_regions; |
1575 | vbasedev->flags = dev_info.flags; |
1576 | |
1577 | trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, |
1578 | dev_info.num_irqs); |
1579 | |
1580 | vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); |
1581 | return 0; |
1582 | } |
1583 | |
1584 | void vfio_put_base_device(VFIODevice *vbasedev) |
1585 | { |
1586 | if (!vbasedev->group) { |
1587 | return; |
1588 | } |
1589 | QLIST_REMOVE(vbasedev, next); |
1590 | vbasedev->group = NULL; |
1591 | trace_vfio_put_base_device(vbasedev->fd); |
1592 | close(vbasedev->fd); |
1593 | } |
1594 | |
1595 | int vfio_get_region_info(VFIODevice *vbasedev, int index, |
1596 | struct vfio_region_info **info) |
1597 | { |
1598 | size_t argsz = sizeof(struct vfio_region_info); |
1599 | |
1600 | *info = g_malloc0(argsz); |
1601 | |
1602 | (*info)->index = index; |
1603 | retry: |
1604 | (*info)->argsz = argsz; |
1605 | |
1606 | if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { |
1607 | g_free(*info); |
1608 | *info = NULL; |
1609 | return -errno; |
1610 | } |
1611 | |
1612 | if ((*info)->argsz > argsz) { |
1613 | argsz = (*info)->argsz; |
1614 | *info = g_realloc(*info, argsz); |
1615 | |
1616 | goto retry; |
1617 | } |
1618 | |
1619 | return 0; |
1620 | } |
1621 | |
1622 | int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, |
1623 | uint32_t subtype, struct vfio_region_info **info) |
1624 | { |
1625 | int i; |
1626 | |
1627 | for (i = 0; i < vbasedev->num_regions; i++) { |
1628 | struct vfio_info_cap_header *hdr; |
1629 | struct vfio_region_info_cap_type *cap_type; |
1630 | |
1631 | if (vfio_get_region_info(vbasedev, i, info)) { |
1632 | continue; |
1633 | } |
1634 | |
1635 | hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); |
1636 | if (!hdr) { |
1637 | g_free(*info); |
1638 | continue; |
1639 | } |
1640 | |
1641 | cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); |
1642 | |
1643 | trace_vfio_get_dev_region(vbasedev->name, i, |
1644 | cap_type->type, cap_type->subtype); |
1645 | |
1646 | if (cap_type->type == type && cap_type->subtype == subtype) { |
1647 | return 0; |
1648 | } |
1649 | |
1650 | g_free(*info); |
1651 | } |
1652 | |
1653 | *info = NULL; |
1654 | return -ENODEV; |
1655 | } |
1656 | |
1657 | bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) |
1658 | { |
1659 | struct vfio_region_info *info = NULL; |
1660 | bool ret = false; |
1661 | |
1662 | if (!vfio_get_region_info(vbasedev, region, &info)) { |
1663 | if (vfio_get_region_info_cap(info, cap_type)) { |
1664 | ret = true; |
1665 | } |
1666 | g_free(info); |
1667 | } |
1668 | |
1669 | return ret; |
1670 | } |
1671 | |
1672 | /* |
1673 | * Interfaces for IBM EEH (Enhanced Error Handling) |
1674 | */ |
1675 | static bool vfio_eeh_container_ok(VFIOContainer *container) |
1676 | { |
1677 | /* |
1678 | * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO |
1679 | * implementation is broken if there are multiple groups in a |
1680 | * container. The hardware works in units of Partitionable |
1681 | * Endpoints (== IOMMU groups) and the EEH operations naively |
1682 | * iterate across all groups in the container, without any logic |
1683 | * to make sure the groups have their state synchronized. For |
1684 | * certain operations (ENABLE) that might be ok, until an error |
1685 | * occurs, but for others (GET_STATE) it's clearly broken. |
1686 | */ |
1687 | |
1688 | /* |
1689 | * XXX Once fixed kernels exist, test for them here |
1690 | */ |
1691 | |
1692 | if (QLIST_EMPTY(&container->group_list)) { |
1693 | return false; |
1694 | } |
1695 | |
1696 | if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { |
1697 | return false; |
1698 | } |
1699 | |
1700 | return true; |
1701 | } |
1702 | |
1703 | static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) |
1704 | { |
1705 | struct vfio_eeh_pe_op pe_op = { |
1706 | .argsz = sizeof(pe_op), |
1707 | .op = op, |
1708 | }; |
1709 | int ret; |
1710 | |
1711 | if (!vfio_eeh_container_ok(container)) { |
1712 | error_report("vfio/eeh: EEH_PE_OP 0x%x: " |
1713 | "kernel requires a container with exactly one group" , op); |
1714 | return -EPERM; |
1715 | } |
1716 | |
1717 | ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); |
1718 | if (ret < 0) { |
1719 | error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m" , op); |
1720 | return -errno; |
1721 | } |
1722 | |
1723 | return ret; |
1724 | } |
1725 | |
1726 | static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) |
1727 | { |
1728 | VFIOAddressSpace *space = vfio_get_address_space(as); |
1729 | VFIOContainer *container = NULL; |
1730 | |
1731 | if (QLIST_EMPTY(&space->containers)) { |
1732 | /* No containers to act on */ |
1733 | goto out; |
1734 | } |
1735 | |
1736 | container = QLIST_FIRST(&space->containers); |
1737 | |
1738 | if (QLIST_NEXT(container, next)) { |
1739 | /* We don't yet have logic to synchronize EEH state across |
1740 | * multiple containers */ |
1741 | container = NULL; |
1742 | goto out; |
1743 | } |
1744 | |
1745 | out: |
1746 | vfio_put_address_space(space); |
1747 | return container; |
1748 | } |
1749 | |
1750 | bool vfio_eeh_as_ok(AddressSpace *as) |
1751 | { |
1752 | VFIOContainer *container = vfio_eeh_as_container(as); |
1753 | |
1754 | return (container != NULL) && vfio_eeh_container_ok(container); |
1755 | } |
1756 | |
1757 | int vfio_eeh_as_op(AddressSpace *as, uint32_t op) |
1758 | { |
1759 | VFIOContainer *container = vfio_eeh_as_container(as); |
1760 | |
1761 | if (!container) { |
1762 | return -ENODEV; |
1763 | } |
1764 | return vfio_eeh_container_op(container, op); |
1765 | } |
1766 | |