1 | /* |
2 | * Virtio Support |
3 | * |
4 | * Copyright IBM, Corp. 2007 |
5 | * |
6 | * Authors: |
7 | * Anthony Liguori <aliguori@us.ibm.com> |
8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
10 | * the COPYING file in the top-level directory. |
11 | * |
12 | */ |
13 | |
14 | #include "qemu/osdep.h" |
15 | #include "qapi/error.h" |
16 | #include "cpu.h" |
17 | #include "trace.h" |
18 | #include "exec/address-spaces.h" |
19 | #include "qemu/error-report.h" |
20 | #include "qemu/main-loop.h" |
21 | #include "qemu/module.h" |
22 | #include "hw/virtio/virtio.h" |
23 | #include "migration/qemu-file-types.h" |
24 | #include "qemu/atomic.h" |
25 | #include "hw/virtio/virtio-bus.h" |
26 | #include "hw/qdev-properties.h" |
27 | #include "hw/virtio/virtio-access.h" |
28 | #include "sysemu/dma.h" |
29 | #include "sysemu/runstate.h" |
30 | |
31 | /* |
32 | * The alignment to use between consumer and producer parts of vring. |
33 | * x86 pagesize again. This is the default, used by transports like PCI |
34 | * which don't provide a means for the guest to tell the host the alignment. |
35 | */ |
36 | #define VIRTIO_PCI_VRING_ALIGN 4096 |
37 | |
38 | typedef struct VRingDesc |
39 | { |
40 | uint64_t addr; |
41 | uint32_t len; |
42 | uint16_t flags; |
43 | uint16_t next; |
44 | } VRingDesc; |
45 | |
46 | typedef struct VRingAvail |
47 | { |
48 | uint16_t flags; |
49 | uint16_t idx; |
50 | uint16_t ring[0]; |
51 | } VRingAvail; |
52 | |
53 | typedef struct VRingUsedElem |
54 | { |
55 | uint32_t id; |
56 | uint32_t len; |
57 | } VRingUsedElem; |
58 | |
59 | typedef struct VRingUsed |
60 | { |
61 | uint16_t flags; |
62 | uint16_t idx; |
63 | VRingUsedElem ring[0]; |
64 | } VRingUsed; |
65 | |
66 | typedef struct VRingMemoryRegionCaches { |
67 | struct rcu_head rcu; |
68 | MemoryRegionCache desc; |
69 | MemoryRegionCache avail; |
70 | MemoryRegionCache used; |
71 | } VRingMemoryRegionCaches; |
72 | |
73 | typedef struct VRing |
74 | { |
75 | unsigned int num; |
76 | unsigned int num_default; |
77 | unsigned int align; |
78 | hwaddr desc; |
79 | hwaddr avail; |
80 | hwaddr used; |
81 | VRingMemoryRegionCaches *caches; |
82 | } VRing; |
83 | |
84 | struct VirtQueue |
85 | { |
86 | VRing vring; |
87 | |
88 | /* Next head to pop */ |
89 | uint16_t last_avail_idx; |
90 | |
91 | /* Last avail_idx read from VQ. */ |
92 | uint16_t shadow_avail_idx; |
93 | |
94 | uint16_t used_idx; |
95 | |
96 | /* Last used index value we have signalled on */ |
97 | uint16_t signalled_used; |
98 | |
99 | /* Last used index value we have signalled on */ |
100 | bool signalled_used_valid; |
101 | |
102 | /* Notification enabled? */ |
103 | bool notification; |
104 | |
105 | uint16_t queue_index; |
106 | |
107 | unsigned int inuse; |
108 | |
109 | uint16_t vector; |
110 | VirtIOHandleOutput handle_output; |
111 | VirtIOHandleAIOOutput handle_aio_output; |
112 | VirtIODevice *vdev; |
113 | EventNotifier guest_notifier; |
114 | EventNotifier host_notifier; |
115 | QLIST_ENTRY(VirtQueue) node; |
116 | }; |
117 | |
118 | static void virtio_free_region_cache(VRingMemoryRegionCaches *caches) |
119 | { |
120 | if (!caches) { |
121 | return; |
122 | } |
123 | |
124 | address_space_cache_destroy(&caches->desc); |
125 | address_space_cache_destroy(&caches->avail); |
126 | address_space_cache_destroy(&caches->used); |
127 | g_free(caches); |
128 | } |
129 | |
130 | static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) |
131 | { |
132 | VRingMemoryRegionCaches *caches; |
133 | |
134 | caches = atomic_read(&vq->vring.caches); |
135 | atomic_rcu_set(&vq->vring.caches, NULL); |
136 | if (caches) { |
137 | call_rcu(caches, virtio_free_region_cache, rcu); |
138 | } |
139 | } |
140 | |
141 | static void virtio_init_region_cache(VirtIODevice *vdev, int n) |
142 | { |
143 | VirtQueue *vq = &vdev->vq[n]; |
144 | VRingMemoryRegionCaches *old = vq->vring.caches; |
145 | VRingMemoryRegionCaches *new = NULL; |
146 | hwaddr addr, size; |
147 | int event_size; |
148 | int64_t len; |
149 | |
150 | event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; |
151 | |
152 | addr = vq->vring.desc; |
153 | if (!addr) { |
154 | goto out_no_cache; |
155 | } |
156 | new = g_new0(VRingMemoryRegionCaches, 1); |
157 | size = virtio_queue_get_desc_size(vdev, n); |
158 | len = address_space_cache_init(&new->desc, vdev->dma_as, |
159 | addr, size, false); |
160 | if (len < size) { |
161 | virtio_error(vdev, "Cannot map desc" ); |
162 | goto err_desc; |
163 | } |
164 | |
165 | size = virtio_queue_get_used_size(vdev, n) + event_size; |
166 | len = address_space_cache_init(&new->used, vdev->dma_as, |
167 | vq->vring.used, size, true); |
168 | if (len < size) { |
169 | virtio_error(vdev, "Cannot map used" ); |
170 | goto err_used; |
171 | } |
172 | |
173 | size = virtio_queue_get_avail_size(vdev, n) + event_size; |
174 | len = address_space_cache_init(&new->avail, vdev->dma_as, |
175 | vq->vring.avail, size, false); |
176 | if (len < size) { |
177 | virtio_error(vdev, "Cannot map avail" ); |
178 | goto err_avail; |
179 | } |
180 | |
181 | atomic_rcu_set(&vq->vring.caches, new); |
182 | if (old) { |
183 | call_rcu(old, virtio_free_region_cache, rcu); |
184 | } |
185 | return; |
186 | |
187 | err_avail: |
188 | address_space_cache_destroy(&new->avail); |
189 | err_used: |
190 | address_space_cache_destroy(&new->used); |
191 | err_desc: |
192 | address_space_cache_destroy(&new->desc); |
193 | out_no_cache: |
194 | g_free(new); |
195 | virtio_virtqueue_reset_region_cache(vq); |
196 | } |
197 | |
198 | /* virt queue functions */ |
199 | void virtio_queue_update_rings(VirtIODevice *vdev, int n) |
200 | { |
201 | VRing *vring = &vdev->vq[n].vring; |
202 | |
203 | if (!vring->num || !vring->desc || !vring->align) { |
204 | /* not yet setup -> nothing to do */ |
205 | return; |
206 | } |
207 | vring->avail = vring->desc + vring->num * sizeof(VRingDesc); |
208 | vring->used = vring_align(vring->avail + |
209 | offsetof(VRingAvail, ring[vring->num]), |
210 | vring->align); |
211 | virtio_init_region_cache(vdev, n); |
212 | } |
213 | |
214 | /* Called within rcu_read_lock(). */ |
215 | static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, |
216 | MemoryRegionCache *cache, int i) |
217 | { |
218 | address_space_read_cached(cache, i * sizeof(VRingDesc), |
219 | desc, sizeof(VRingDesc)); |
220 | virtio_tswap64s(vdev, &desc->addr); |
221 | virtio_tswap32s(vdev, &desc->len); |
222 | virtio_tswap16s(vdev, &desc->flags); |
223 | virtio_tswap16s(vdev, &desc->next); |
224 | } |
225 | |
226 | static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) |
227 | { |
228 | VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); |
229 | assert(caches != NULL); |
230 | return caches; |
231 | } |
232 | /* Called within rcu_read_lock(). */ |
233 | static inline uint16_t vring_avail_flags(VirtQueue *vq) |
234 | { |
235 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
236 | hwaddr pa = offsetof(VRingAvail, flags); |
237 | return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); |
238 | } |
239 | |
240 | /* Called within rcu_read_lock(). */ |
241 | static inline uint16_t vring_avail_idx(VirtQueue *vq) |
242 | { |
243 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
244 | hwaddr pa = offsetof(VRingAvail, idx); |
245 | vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); |
246 | return vq->shadow_avail_idx; |
247 | } |
248 | |
249 | /* Called within rcu_read_lock(). */ |
250 | static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) |
251 | { |
252 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
253 | hwaddr pa = offsetof(VRingAvail, ring[i]); |
254 | return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); |
255 | } |
256 | |
257 | /* Called within rcu_read_lock(). */ |
258 | static inline uint16_t vring_get_used_event(VirtQueue *vq) |
259 | { |
260 | return vring_avail_ring(vq, vq->vring.num); |
261 | } |
262 | |
263 | /* Called within rcu_read_lock(). */ |
264 | static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, |
265 | int i) |
266 | { |
267 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
268 | hwaddr pa = offsetof(VRingUsed, ring[i]); |
269 | virtio_tswap32s(vq->vdev, &uelem->id); |
270 | virtio_tswap32s(vq->vdev, &uelem->len); |
271 | address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem)); |
272 | address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem)); |
273 | } |
274 | |
275 | /* Called within rcu_read_lock(). */ |
276 | static uint16_t vring_used_idx(VirtQueue *vq) |
277 | { |
278 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
279 | hwaddr pa = offsetof(VRingUsed, idx); |
280 | return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); |
281 | } |
282 | |
283 | /* Called within rcu_read_lock(). */ |
284 | static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) |
285 | { |
286 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
287 | hwaddr pa = offsetof(VRingUsed, idx); |
288 | virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); |
289 | address_space_cache_invalidate(&caches->used, pa, sizeof(val)); |
290 | vq->used_idx = val; |
291 | } |
292 | |
293 | /* Called within rcu_read_lock(). */ |
294 | static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) |
295 | { |
296 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
297 | VirtIODevice *vdev = vq->vdev; |
298 | hwaddr pa = offsetof(VRingUsed, flags); |
299 | uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); |
300 | |
301 | virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask); |
302 | address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); |
303 | } |
304 | |
305 | /* Called within rcu_read_lock(). */ |
306 | static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) |
307 | { |
308 | VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); |
309 | VirtIODevice *vdev = vq->vdev; |
310 | hwaddr pa = offsetof(VRingUsed, flags); |
311 | uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); |
312 | |
313 | virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask); |
314 | address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); |
315 | } |
316 | |
317 | /* Called within rcu_read_lock(). */ |
318 | static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) |
319 | { |
320 | VRingMemoryRegionCaches *caches; |
321 | hwaddr pa; |
322 | if (!vq->notification) { |
323 | return; |
324 | } |
325 | |
326 | caches = vring_get_region_caches(vq); |
327 | pa = offsetof(VRingUsed, ring[vq->vring.num]); |
328 | virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); |
329 | address_space_cache_invalidate(&caches->used, pa, sizeof(val)); |
330 | } |
331 | |
332 | void virtio_queue_set_notification(VirtQueue *vq, int enable) |
333 | { |
334 | vq->notification = enable; |
335 | |
336 | if (!vq->vring.desc) { |
337 | return; |
338 | } |
339 | |
340 | rcu_read_lock(); |
341 | if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { |
342 | vring_set_avail_event(vq, vring_avail_idx(vq)); |
343 | } else if (enable) { |
344 | vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); |
345 | } else { |
346 | vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); |
347 | } |
348 | if (enable) { |
349 | /* Expose avail event/used flags before caller checks the avail idx. */ |
350 | smp_mb(); |
351 | } |
352 | rcu_read_unlock(); |
353 | } |
354 | |
355 | int virtio_queue_ready(VirtQueue *vq) |
356 | { |
357 | return vq->vring.avail != 0; |
358 | } |
359 | |
360 | /* Fetch avail_idx from VQ memory only when we really need to know if |
361 | * guest has added some buffers. |
362 | * Called within rcu_read_lock(). */ |
363 | static int virtio_queue_empty_rcu(VirtQueue *vq) |
364 | { |
365 | if (unlikely(vq->vdev->broken)) { |
366 | return 1; |
367 | } |
368 | |
369 | if (unlikely(!vq->vring.avail)) { |
370 | return 1; |
371 | } |
372 | |
373 | if (vq->shadow_avail_idx != vq->last_avail_idx) { |
374 | return 0; |
375 | } |
376 | |
377 | return vring_avail_idx(vq) == vq->last_avail_idx; |
378 | } |
379 | |
380 | int virtio_queue_empty(VirtQueue *vq) |
381 | { |
382 | bool empty; |
383 | |
384 | if (unlikely(vq->vdev->broken)) { |
385 | return 1; |
386 | } |
387 | |
388 | if (unlikely(!vq->vring.avail)) { |
389 | return 1; |
390 | } |
391 | |
392 | if (vq->shadow_avail_idx != vq->last_avail_idx) { |
393 | return 0; |
394 | } |
395 | |
396 | rcu_read_lock(); |
397 | empty = vring_avail_idx(vq) == vq->last_avail_idx; |
398 | rcu_read_unlock(); |
399 | return empty; |
400 | } |
401 | |
402 | static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, |
403 | unsigned int len) |
404 | { |
405 | AddressSpace *dma_as = vq->vdev->dma_as; |
406 | unsigned int offset; |
407 | int i; |
408 | |
409 | offset = 0; |
410 | for (i = 0; i < elem->in_num; i++) { |
411 | size_t size = MIN(len - offset, elem->in_sg[i].iov_len); |
412 | |
413 | dma_memory_unmap(dma_as, elem->in_sg[i].iov_base, |
414 | elem->in_sg[i].iov_len, |
415 | DMA_DIRECTION_FROM_DEVICE, size); |
416 | |
417 | offset += size; |
418 | } |
419 | |
420 | for (i = 0; i < elem->out_num; i++) |
421 | dma_memory_unmap(dma_as, elem->out_sg[i].iov_base, |
422 | elem->out_sg[i].iov_len, |
423 | DMA_DIRECTION_TO_DEVICE, |
424 | elem->out_sg[i].iov_len); |
425 | } |
426 | |
427 | /* virtqueue_detach_element: |
428 | * @vq: The #VirtQueue |
429 | * @elem: The #VirtQueueElement |
430 | * @len: number of bytes written |
431 | * |
432 | * Detach the element from the virtqueue. This function is suitable for device |
433 | * reset or other situations where a #VirtQueueElement is simply freed and will |
434 | * not be pushed or discarded. |
435 | */ |
436 | void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, |
437 | unsigned int len) |
438 | { |
439 | vq->inuse--; |
440 | virtqueue_unmap_sg(vq, elem, len); |
441 | } |
442 | |
443 | /* virtqueue_unpop: |
444 | * @vq: The #VirtQueue |
445 | * @elem: The #VirtQueueElement |
446 | * @len: number of bytes written |
447 | * |
448 | * Pretend the most recent element wasn't popped from the virtqueue. The next |
449 | * call to virtqueue_pop() will refetch the element. |
450 | */ |
451 | void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem, |
452 | unsigned int len) |
453 | { |
454 | vq->last_avail_idx--; |
455 | virtqueue_detach_element(vq, elem, len); |
456 | } |
457 | |
458 | /* virtqueue_rewind: |
459 | * @vq: The #VirtQueue |
460 | * @num: Number of elements to push back |
461 | * |
462 | * Pretend that elements weren't popped from the virtqueue. The next |
463 | * virtqueue_pop() will refetch the oldest element. |
464 | * |
465 | * Use virtqueue_unpop() instead if you have a VirtQueueElement. |
466 | * |
467 | * Returns: true on success, false if @num is greater than the number of in use |
468 | * elements. |
469 | */ |
470 | bool virtqueue_rewind(VirtQueue *vq, unsigned int num) |
471 | { |
472 | if (num > vq->inuse) { |
473 | return false; |
474 | } |
475 | vq->last_avail_idx -= num; |
476 | vq->inuse -= num; |
477 | return true; |
478 | } |
479 | |
480 | /* Called within rcu_read_lock(). */ |
481 | void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, |
482 | unsigned int len, unsigned int idx) |
483 | { |
484 | VRingUsedElem uelem; |
485 | |
486 | trace_virtqueue_fill(vq, elem, len, idx); |
487 | |
488 | virtqueue_unmap_sg(vq, elem, len); |
489 | |
490 | if (unlikely(vq->vdev->broken)) { |
491 | return; |
492 | } |
493 | |
494 | if (unlikely(!vq->vring.used)) { |
495 | return; |
496 | } |
497 | |
498 | idx = (idx + vq->used_idx) % vq->vring.num; |
499 | |
500 | uelem.id = elem->index; |
501 | uelem.len = len; |
502 | vring_used_write(vq, &uelem, idx); |
503 | } |
504 | |
505 | /* Called within rcu_read_lock(). */ |
506 | void virtqueue_flush(VirtQueue *vq, unsigned int count) |
507 | { |
508 | uint16_t old, new; |
509 | |
510 | if (unlikely(vq->vdev->broken)) { |
511 | vq->inuse -= count; |
512 | return; |
513 | } |
514 | |
515 | if (unlikely(!vq->vring.used)) { |
516 | return; |
517 | } |
518 | |
519 | /* Make sure buffer is written before we update index. */ |
520 | smp_wmb(); |
521 | trace_virtqueue_flush(vq, count); |
522 | old = vq->used_idx; |
523 | new = old + count; |
524 | vring_used_idx_set(vq, new); |
525 | vq->inuse -= count; |
526 | if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) |
527 | vq->signalled_used_valid = false; |
528 | } |
529 | |
530 | void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, |
531 | unsigned int len) |
532 | { |
533 | rcu_read_lock(); |
534 | virtqueue_fill(vq, elem, len, 0); |
535 | virtqueue_flush(vq, 1); |
536 | rcu_read_unlock(); |
537 | } |
538 | |
539 | /* Called within rcu_read_lock(). */ |
540 | static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) |
541 | { |
542 | uint16_t num_heads = vring_avail_idx(vq) - idx; |
543 | |
544 | /* Check it isn't doing very strange things with descriptor numbers. */ |
545 | if (num_heads > vq->vring.num) { |
546 | virtio_error(vq->vdev, "Guest moved used index from %u to %u" , |
547 | idx, vq->shadow_avail_idx); |
548 | return -EINVAL; |
549 | } |
550 | /* On success, callers read a descriptor at vq->last_avail_idx. |
551 | * Make sure descriptor read does not bypass avail index read. */ |
552 | if (num_heads) { |
553 | smp_rmb(); |
554 | } |
555 | |
556 | return num_heads; |
557 | } |
558 | |
559 | /* Called within rcu_read_lock(). */ |
560 | static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, |
561 | unsigned int *head) |
562 | { |
563 | /* Grab the next descriptor number they're advertising, and increment |
564 | * the index we've seen. */ |
565 | *head = vring_avail_ring(vq, idx % vq->vring.num); |
566 | |
567 | /* If their number is silly, that's a fatal mistake. */ |
568 | if (*head >= vq->vring.num) { |
569 | virtio_error(vq->vdev, "Guest says index %u is available" , *head); |
570 | return false; |
571 | } |
572 | |
573 | return true; |
574 | } |
575 | |
576 | enum { |
577 | VIRTQUEUE_READ_DESC_ERROR = -1, |
578 | VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ |
579 | VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ |
580 | }; |
581 | |
582 | static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, |
583 | MemoryRegionCache *desc_cache, unsigned int max, |
584 | unsigned int *next) |
585 | { |
586 | /* If this descriptor says it doesn't chain, we're done. */ |
587 | if (!(desc->flags & VRING_DESC_F_NEXT)) { |
588 | return VIRTQUEUE_READ_DESC_DONE; |
589 | } |
590 | |
591 | /* Check they're not leading us off end of descriptors. */ |
592 | *next = desc->next; |
593 | /* Make sure compiler knows to grab that: we don't want it changing! */ |
594 | smp_wmb(); |
595 | |
596 | if (*next >= max) { |
597 | virtio_error(vdev, "Desc next is %u" , *next); |
598 | return VIRTQUEUE_READ_DESC_ERROR; |
599 | } |
600 | |
601 | vring_desc_read(vdev, desc, desc_cache, *next); |
602 | return VIRTQUEUE_READ_DESC_MORE; |
603 | } |
604 | |
605 | void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, |
606 | unsigned int *out_bytes, |
607 | unsigned max_in_bytes, unsigned max_out_bytes) |
608 | { |
609 | VirtIODevice *vdev = vq->vdev; |
610 | unsigned int max, idx; |
611 | unsigned int total_bufs, in_total, out_total; |
612 | VRingMemoryRegionCaches *caches; |
613 | MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; |
614 | int64_t len = 0; |
615 | int rc; |
616 | |
617 | if (unlikely(!vq->vring.desc)) { |
618 | if (in_bytes) { |
619 | *in_bytes = 0; |
620 | } |
621 | if (out_bytes) { |
622 | *out_bytes = 0; |
623 | } |
624 | return; |
625 | } |
626 | |
627 | rcu_read_lock(); |
628 | idx = vq->last_avail_idx; |
629 | total_bufs = in_total = out_total = 0; |
630 | |
631 | max = vq->vring.num; |
632 | caches = vring_get_region_caches(vq); |
633 | if (caches->desc.len < max * sizeof(VRingDesc)) { |
634 | virtio_error(vdev, "Cannot map descriptor ring" ); |
635 | goto err; |
636 | } |
637 | |
638 | while ((rc = virtqueue_num_heads(vq, idx)) > 0) { |
639 | MemoryRegionCache *desc_cache = &caches->desc; |
640 | unsigned int num_bufs; |
641 | VRingDesc desc; |
642 | unsigned int i; |
643 | |
644 | num_bufs = total_bufs; |
645 | |
646 | if (!virtqueue_get_head(vq, idx++, &i)) { |
647 | goto err; |
648 | } |
649 | |
650 | vring_desc_read(vdev, &desc, desc_cache, i); |
651 | |
652 | if (desc.flags & VRING_DESC_F_INDIRECT) { |
653 | if (!desc.len || (desc.len % sizeof(VRingDesc))) { |
654 | virtio_error(vdev, "Invalid size for indirect buffer table" ); |
655 | goto err; |
656 | } |
657 | |
658 | /* If we've got too many, that implies a descriptor loop. */ |
659 | if (num_bufs >= max) { |
660 | virtio_error(vdev, "Looped descriptor" ); |
661 | goto err; |
662 | } |
663 | |
664 | /* loop over the indirect descriptor table */ |
665 | len = address_space_cache_init(&indirect_desc_cache, |
666 | vdev->dma_as, |
667 | desc.addr, desc.len, false); |
668 | desc_cache = &indirect_desc_cache; |
669 | if (len < desc.len) { |
670 | virtio_error(vdev, "Cannot map indirect buffer" ); |
671 | goto err; |
672 | } |
673 | |
674 | max = desc.len / sizeof(VRingDesc); |
675 | num_bufs = i = 0; |
676 | vring_desc_read(vdev, &desc, desc_cache, i); |
677 | } |
678 | |
679 | do { |
680 | /* If we've got too many, that implies a descriptor loop. */ |
681 | if (++num_bufs > max) { |
682 | virtio_error(vdev, "Looped descriptor" ); |
683 | goto err; |
684 | } |
685 | |
686 | if (desc.flags & VRING_DESC_F_WRITE) { |
687 | in_total += desc.len; |
688 | } else { |
689 | out_total += desc.len; |
690 | } |
691 | if (in_total >= max_in_bytes && out_total >= max_out_bytes) { |
692 | goto done; |
693 | } |
694 | |
695 | rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i); |
696 | } while (rc == VIRTQUEUE_READ_DESC_MORE); |
697 | |
698 | if (rc == VIRTQUEUE_READ_DESC_ERROR) { |
699 | goto err; |
700 | } |
701 | |
702 | if (desc_cache == &indirect_desc_cache) { |
703 | address_space_cache_destroy(&indirect_desc_cache); |
704 | total_bufs++; |
705 | } else { |
706 | total_bufs = num_bufs; |
707 | } |
708 | } |
709 | |
710 | if (rc < 0) { |
711 | goto err; |
712 | } |
713 | |
714 | done: |
715 | address_space_cache_destroy(&indirect_desc_cache); |
716 | if (in_bytes) { |
717 | *in_bytes = in_total; |
718 | } |
719 | if (out_bytes) { |
720 | *out_bytes = out_total; |
721 | } |
722 | rcu_read_unlock(); |
723 | return; |
724 | |
725 | err: |
726 | in_total = out_total = 0; |
727 | goto done; |
728 | } |
729 | |
730 | int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, |
731 | unsigned int out_bytes) |
732 | { |
733 | unsigned int in_total, out_total; |
734 | |
735 | virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); |
736 | return in_bytes <= in_total && out_bytes <= out_total; |
737 | } |
738 | |
739 | static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, |
740 | hwaddr *addr, struct iovec *iov, |
741 | unsigned int max_num_sg, bool is_write, |
742 | hwaddr pa, size_t sz) |
743 | { |
744 | bool ok = false; |
745 | unsigned num_sg = *p_num_sg; |
746 | assert(num_sg <= max_num_sg); |
747 | |
748 | if (!sz) { |
749 | virtio_error(vdev, "virtio: zero sized buffers are not allowed" ); |
750 | goto out; |
751 | } |
752 | |
753 | while (sz) { |
754 | hwaddr len = sz; |
755 | |
756 | if (num_sg == max_num_sg) { |
757 | virtio_error(vdev, "virtio: too many write descriptors in " |
758 | "indirect table" ); |
759 | goto out; |
760 | } |
761 | |
762 | iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len, |
763 | is_write ? |
764 | DMA_DIRECTION_FROM_DEVICE : |
765 | DMA_DIRECTION_TO_DEVICE); |
766 | if (!iov[num_sg].iov_base) { |
767 | virtio_error(vdev, "virtio: bogus descriptor or out of resources" ); |
768 | goto out; |
769 | } |
770 | |
771 | iov[num_sg].iov_len = len; |
772 | addr[num_sg] = pa; |
773 | |
774 | sz -= len; |
775 | pa += len; |
776 | num_sg++; |
777 | } |
778 | ok = true; |
779 | |
780 | out: |
781 | *p_num_sg = num_sg; |
782 | return ok; |
783 | } |
784 | |
785 | /* Only used by error code paths before we have a VirtQueueElement (therefore |
786 | * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to |
787 | * yet. |
788 | */ |
789 | static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num, |
790 | struct iovec *iov) |
791 | { |
792 | unsigned int i; |
793 | |
794 | for (i = 0; i < out_num + in_num; i++) { |
795 | int is_write = i >= out_num; |
796 | |
797 | cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0); |
798 | iov++; |
799 | } |
800 | } |
801 | |
802 | static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, |
803 | hwaddr *addr, unsigned int num_sg, |
804 | int is_write) |
805 | { |
806 | unsigned int i; |
807 | hwaddr len; |
808 | |
809 | for (i = 0; i < num_sg; i++) { |
810 | len = sg[i].iov_len; |
811 | sg[i].iov_base = dma_memory_map(vdev->dma_as, |
812 | addr[i], &len, is_write ? |
813 | DMA_DIRECTION_FROM_DEVICE : |
814 | DMA_DIRECTION_TO_DEVICE); |
815 | if (!sg[i].iov_base) { |
816 | error_report("virtio: error trying to map MMIO memory" ); |
817 | exit(1); |
818 | } |
819 | if (len != sg[i].iov_len) { |
820 | error_report("virtio: unexpected memory split" ); |
821 | exit(1); |
822 | } |
823 | } |
824 | } |
825 | |
826 | void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem) |
827 | { |
828 | virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, 1); |
829 | virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num, 0); |
830 | } |
831 | |
832 | static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num) |
833 | { |
834 | VirtQueueElement *elem; |
835 | size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0])); |
836 | size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]); |
837 | size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]); |
838 | size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0])); |
839 | size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); |
840 | size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); |
841 | |
842 | assert(sz >= sizeof(VirtQueueElement)); |
843 | elem = g_malloc(out_sg_end); |
844 | trace_virtqueue_alloc_element(elem, sz, in_num, out_num); |
845 | elem->out_num = out_num; |
846 | elem->in_num = in_num; |
847 | elem->in_addr = (void *)elem + in_addr_ofs; |
848 | elem->out_addr = (void *)elem + out_addr_ofs; |
849 | elem->in_sg = (void *)elem + in_sg_ofs; |
850 | elem->out_sg = (void *)elem + out_sg_ofs; |
851 | return elem; |
852 | } |
853 | |
854 | void *virtqueue_pop(VirtQueue *vq, size_t sz) |
855 | { |
856 | unsigned int i, head, max; |
857 | VRingMemoryRegionCaches *caches; |
858 | MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; |
859 | MemoryRegionCache *desc_cache; |
860 | int64_t len; |
861 | VirtIODevice *vdev = vq->vdev; |
862 | VirtQueueElement *elem = NULL; |
863 | unsigned out_num, in_num, elem_entries; |
864 | hwaddr addr[VIRTQUEUE_MAX_SIZE]; |
865 | struct iovec iov[VIRTQUEUE_MAX_SIZE]; |
866 | VRingDesc desc; |
867 | int rc; |
868 | |
869 | if (unlikely(vdev->broken)) { |
870 | return NULL; |
871 | } |
872 | rcu_read_lock(); |
873 | if (virtio_queue_empty_rcu(vq)) { |
874 | goto done; |
875 | } |
876 | /* Needed after virtio_queue_empty(), see comment in |
877 | * virtqueue_num_heads(). */ |
878 | smp_rmb(); |
879 | |
880 | /* When we start there are none of either input nor output. */ |
881 | out_num = in_num = elem_entries = 0; |
882 | |
883 | max = vq->vring.num; |
884 | |
885 | if (vq->inuse >= vq->vring.num) { |
886 | virtio_error(vdev, "Virtqueue size exceeded" ); |
887 | goto done; |
888 | } |
889 | |
890 | if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { |
891 | goto done; |
892 | } |
893 | |
894 | if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { |
895 | vring_set_avail_event(vq, vq->last_avail_idx); |
896 | } |
897 | |
898 | i = head; |
899 | |
900 | caches = vring_get_region_caches(vq); |
901 | if (caches->desc.len < max * sizeof(VRingDesc)) { |
902 | virtio_error(vdev, "Cannot map descriptor ring" ); |
903 | goto done; |
904 | } |
905 | |
906 | desc_cache = &caches->desc; |
907 | vring_desc_read(vdev, &desc, desc_cache, i); |
908 | if (desc.flags & VRING_DESC_F_INDIRECT) { |
909 | if (!desc.len || (desc.len % sizeof(VRingDesc))) { |
910 | virtio_error(vdev, "Invalid size for indirect buffer table" ); |
911 | goto done; |
912 | } |
913 | |
914 | /* loop over the indirect descriptor table */ |
915 | len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, |
916 | desc.addr, desc.len, false); |
917 | desc_cache = &indirect_desc_cache; |
918 | if (len < desc.len) { |
919 | virtio_error(vdev, "Cannot map indirect buffer" ); |
920 | goto done; |
921 | } |
922 | |
923 | max = desc.len / sizeof(VRingDesc); |
924 | i = 0; |
925 | vring_desc_read(vdev, &desc, desc_cache, i); |
926 | } |
927 | |
928 | /* Collect all the descriptors */ |
929 | do { |
930 | bool map_ok; |
931 | |
932 | if (desc.flags & VRING_DESC_F_WRITE) { |
933 | map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num, |
934 | iov + out_num, |
935 | VIRTQUEUE_MAX_SIZE - out_num, true, |
936 | desc.addr, desc.len); |
937 | } else { |
938 | if (in_num) { |
939 | virtio_error(vdev, "Incorrect order for descriptors" ); |
940 | goto err_undo_map; |
941 | } |
942 | map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov, |
943 | VIRTQUEUE_MAX_SIZE, false, |
944 | desc.addr, desc.len); |
945 | } |
946 | if (!map_ok) { |
947 | goto err_undo_map; |
948 | } |
949 | |
950 | /* If we've got too many, that implies a descriptor loop. */ |
951 | if (++elem_entries > max) { |
952 | virtio_error(vdev, "Looped descriptor" ); |
953 | goto err_undo_map; |
954 | } |
955 | |
956 | rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i); |
957 | } while (rc == VIRTQUEUE_READ_DESC_MORE); |
958 | |
959 | if (rc == VIRTQUEUE_READ_DESC_ERROR) { |
960 | goto err_undo_map; |
961 | } |
962 | |
963 | /* Now copy what we have collected and mapped */ |
964 | elem = virtqueue_alloc_element(sz, out_num, in_num); |
965 | elem->index = head; |
966 | for (i = 0; i < out_num; i++) { |
967 | elem->out_addr[i] = addr[i]; |
968 | elem->out_sg[i] = iov[i]; |
969 | } |
970 | for (i = 0; i < in_num; i++) { |
971 | elem->in_addr[i] = addr[out_num + i]; |
972 | elem->in_sg[i] = iov[out_num + i]; |
973 | } |
974 | |
975 | vq->inuse++; |
976 | |
977 | trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); |
978 | done: |
979 | address_space_cache_destroy(&indirect_desc_cache); |
980 | rcu_read_unlock(); |
981 | |
982 | return elem; |
983 | |
984 | err_undo_map: |
985 | virtqueue_undo_map_desc(out_num, in_num, iov); |
986 | goto done; |
987 | } |
988 | |
989 | /* virtqueue_drop_all: |
990 | * @vq: The #VirtQueue |
991 | * Drops all queued buffers and indicates them to the guest |
992 | * as if they are done. Useful when buffers can not be |
993 | * processed but must be returned to the guest. |
994 | */ |
995 | unsigned int virtqueue_drop_all(VirtQueue *vq) |
996 | { |
997 | unsigned int dropped = 0; |
998 | VirtQueueElement elem = {}; |
999 | VirtIODevice *vdev = vq->vdev; |
1000 | bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
1001 | |
1002 | if (unlikely(vdev->broken)) { |
1003 | return 0; |
1004 | } |
1005 | |
1006 | while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) { |
1007 | /* works similar to virtqueue_pop but does not map buffers |
1008 | * and does not allocate any memory */ |
1009 | smp_rmb(); |
1010 | if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) { |
1011 | break; |
1012 | } |
1013 | vq->inuse++; |
1014 | vq->last_avail_idx++; |
1015 | if (fEventIdx) { |
1016 | vring_set_avail_event(vq, vq->last_avail_idx); |
1017 | } |
1018 | /* immediately push the element, nothing to unmap |
1019 | * as both in_num and out_num are set to 0 */ |
1020 | virtqueue_push(vq, &elem, 0); |
1021 | dropped++; |
1022 | } |
1023 | |
1024 | return dropped; |
1025 | } |
1026 | |
1027 | /* Reading and writing a structure directly to QEMUFile is *awful*, but |
1028 | * it is what QEMU has always done by mistake. We can change it sooner |
1029 | * or later by bumping the version number of the affected vm states. |
1030 | * In the meanwhile, since the in-memory layout of VirtQueueElement |
1031 | * has changed, we need to marshal to and from the layout that was |
1032 | * used before the change. |
1033 | */ |
1034 | typedef struct VirtQueueElementOld { |
1035 | unsigned int index; |
1036 | unsigned int out_num; |
1037 | unsigned int in_num; |
1038 | hwaddr in_addr[VIRTQUEUE_MAX_SIZE]; |
1039 | hwaddr out_addr[VIRTQUEUE_MAX_SIZE]; |
1040 | struct iovec in_sg[VIRTQUEUE_MAX_SIZE]; |
1041 | struct iovec out_sg[VIRTQUEUE_MAX_SIZE]; |
1042 | } VirtQueueElementOld; |
1043 | |
1044 | void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz) |
1045 | { |
1046 | VirtQueueElement *elem; |
1047 | VirtQueueElementOld data; |
1048 | int i; |
1049 | |
1050 | qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); |
1051 | |
1052 | /* TODO: teach all callers that this can fail, and return failure instead |
1053 | * of asserting here. |
1054 | * This is just one thing (there are probably more) that must be |
1055 | * fixed before we can allow NDEBUG compilation. |
1056 | */ |
1057 | assert(ARRAY_SIZE(data.in_addr) >= data.in_num); |
1058 | assert(ARRAY_SIZE(data.out_addr) >= data.out_num); |
1059 | |
1060 | elem = virtqueue_alloc_element(sz, data.out_num, data.in_num); |
1061 | elem->index = data.index; |
1062 | |
1063 | for (i = 0; i < elem->in_num; i++) { |
1064 | elem->in_addr[i] = data.in_addr[i]; |
1065 | } |
1066 | |
1067 | for (i = 0; i < elem->out_num; i++) { |
1068 | elem->out_addr[i] = data.out_addr[i]; |
1069 | } |
1070 | |
1071 | for (i = 0; i < elem->in_num; i++) { |
1072 | /* Base is overwritten by virtqueue_map. */ |
1073 | elem->in_sg[i].iov_base = 0; |
1074 | elem->in_sg[i].iov_len = data.in_sg[i].iov_len; |
1075 | } |
1076 | |
1077 | for (i = 0; i < elem->out_num; i++) { |
1078 | /* Base is overwritten by virtqueue_map. */ |
1079 | elem->out_sg[i].iov_base = 0; |
1080 | elem->out_sg[i].iov_len = data.out_sg[i].iov_len; |
1081 | } |
1082 | |
1083 | virtqueue_map(vdev, elem); |
1084 | return elem; |
1085 | } |
1086 | |
1087 | void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem) |
1088 | { |
1089 | VirtQueueElementOld data; |
1090 | int i; |
1091 | |
1092 | memset(&data, 0, sizeof(data)); |
1093 | data.index = elem->index; |
1094 | data.in_num = elem->in_num; |
1095 | data.out_num = elem->out_num; |
1096 | |
1097 | for (i = 0; i < elem->in_num; i++) { |
1098 | data.in_addr[i] = elem->in_addr[i]; |
1099 | } |
1100 | |
1101 | for (i = 0; i < elem->out_num; i++) { |
1102 | data.out_addr[i] = elem->out_addr[i]; |
1103 | } |
1104 | |
1105 | for (i = 0; i < elem->in_num; i++) { |
1106 | /* Base is overwritten by virtqueue_map when loading. Do not |
1107 | * save it, as it would leak the QEMU address space layout. */ |
1108 | data.in_sg[i].iov_len = elem->in_sg[i].iov_len; |
1109 | } |
1110 | |
1111 | for (i = 0; i < elem->out_num; i++) { |
1112 | /* Do not save iov_base as above. */ |
1113 | data.out_sg[i].iov_len = elem->out_sg[i].iov_len; |
1114 | } |
1115 | qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); |
1116 | } |
1117 | |
1118 | /* virtio device */ |
1119 | static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) |
1120 | { |
1121 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
1122 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
1123 | |
1124 | if (unlikely(vdev->broken)) { |
1125 | return; |
1126 | } |
1127 | |
1128 | if (k->notify) { |
1129 | k->notify(qbus->parent, vector); |
1130 | } |
1131 | } |
1132 | |
1133 | void virtio_update_irq(VirtIODevice *vdev) |
1134 | { |
1135 | virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
1136 | } |
1137 | |
1138 | static int virtio_validate_features(VirtIODevice *vdev) |
1139 | { |
1140 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1141 | |
1142 | if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) && |
1143 | !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { |
1144 | return -EFAULT; |
1145 | } |
1146 | |
1147 | if (k->validate_features) { |
1148 | return k->validate_features(vdev); |
1149 | } else { |
1150 | return 0; |
1151 | } |
1152 | } |
1153 | |
1154 | int virtio_set_status(VirtIODevice *vdev, uint8_t val) |
1155 | { |
1156 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1157 | trace_virtio_set_status(vdev, val); |
1158 | |
1159 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
1160 | if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && |
1161 | val & VIRTIO_CONFIG_S_FEATURES_OK) { |
1162 | int ret = virtio_validate_features(vdev); |
1163 | |
1164 | if (ret) { |
1165 | return ret; |
1166 | } |
1167 | } |
1168 | } |
1169 | |
1170 | if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) != |
1171 | (val & VIRTIO_CONFIG_S_DRIVER_OK)) { |
1172 | virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK); |
1173 | } |
1174 | |
1175 | if (k->set_status) { |
1176 | k->set_status(vdev, val); |
1177 | } |
1178 | vdev->status = val; |
1179 | |
1180 | return 0; |
1181 | } |
1182 | |
1183 | static enum virtio_device_endian virtio_default_endian(void) |
1184 | { |
1185 | if (target_words_bigendian()) { |
1186 | return VIRTIO_DEVICE_ENDIAN_BIG; |
1187 | } else { |
1188 | return VIRTIO_DEVICE_ENDIAN_LITTLE; |
1189 | } |
1190 | } |
1191 | |
1192 | static enum virtio_device_endian virtio_current_cpu_endian(void) |
1193 | { |
1194 | CPUClass *cc = CPU_GET_CLASS(current_cpu); |
1195 | |
1196 | if (cc->virtio_is_big_endian(current_cpu)) { |
1197 | return VIRTIO_DEVICE_ENDIAN_BIG; |
1198 | } else { |
1199 | return VIRTIO_DEVICE_ENDIAN_LITTLE; |
1200 | } |
1201 | } |
1202 | |
1203 | void virtio_reset(void *opaque) |
1204 | { |
1205 | VirtIODevice *vdev = opaque; |
1206 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1207 | int i; |
1208 | |
1209 | virtio_set_status(vdev, 0); |
1210 | if (current_cpu) { |
1211 | /* Guest initiated reset */ |
1212 | vdev->device_endian = virtio_current_cpu_endian(); |
1213 | } else { |
1214 | /* System reset */ |
1215 | vdev->device_endian = virtio_default_endian(); |
1216 | } |
1217 | |
1218 | if (k->reset) { |
1219 | k->reset(vdev); |
1220 | } |
1221 | |
1222 | vdev->start_on_kick = false; |
1223 | vdev->started = false; |
1224 | vdev->broken = false; |
1225 | vdev->guest_features = 0; |
1226 | vdev->queue_sel = 0; |
1227 | vdev->status = 0; |
1228 | atomic_set(&vdev->isr, 0); |
1229 | vdev->config_vector = VIRTIO_NO_VECTOR; |
1230 | virtio_notify_vector(vdev, vdev->config_vector); |
1231 | |
1232 | for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
1233 | vdev->vq[i].vring.desc = 0; |
1234 | vdev->vq[i].vring.avail = 0; |
1235 | vdev->vq[i].vring.used = 0; |
1236 | vdev->vq[i].last_avail_idx = 0; |
1237 | vdev->vq[i].shadow_avail_idx = 0; |
1238 | vdev->vq[i].used_idx = 0; |
1239 | virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); |
1240 | vdev->vq[i].signalled_used = 0; |
1241 | vdev->vq[i].signalled_used_valid = false; |
1242 | vdev->vq[i].notification = true; |
1243 | vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; |
1244 | vdev->vq[i].inuse = 0; |
1245 | virtio_virtqueue_reset_region_cache(&vdev->vq[i]); |
1246 | } |
1247 | } |
1248 | |
1249 | uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) |
1250 | { |
1251 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1252 | uint8_t val; |
1253 | |
1254 | if (addr + sizeof(val) > vdev->config_len) { |
1255 | return (uint32_t)-1; |
1256 | } |
1257 | |
1258 | k->get_config(vdev, vdev->config); |
1259 | |
1260 | val = ldub_p(vdev->config + addr); |
1261 | return val; |
1262 | } |
1263 | |
1264 | uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) |
1265 | { |
1266 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1267 | uint16_t val; |
1268 | |
1269 | if (addr + sizeof(val) > vdev->config_len) { |
1270 | return (uint32_t)-1; |
1271 | } |
1272 | |
1273 | k->get_config(vdev, vdev->config); |
1274 | |
1275 | val = lduw_p(vdev->config + addr); |
1276 | return val; |
1277 | } |
1278 | |
1279 | uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) |
1280 | { |
1281 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1282 | uint32_t val; |
1283 | |
1284 | if (addr + sizeof(val) > vdev->config_len) { |
1285 | return (uint32_t)-1; |
1286 | } |
1287 | |
1288 | k->get_config(vdev, vdev->config); |
1289 | |
1290 | val = ldl_p(vdev->config + addr); |
1291 | return val; |
1292 | } |
1293 | |
1294 | void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) |
1295 | { |
1296 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1297 | uint8_t val = data; |
1298 | |
1299 | if (addr + sizeof(val) > vdev->config_len) { |
1300 | return; |
1301 | } |
1302 | |
1303 | stb_p(vdev->config + addr, val); |
1304 | |
1305 | if (k->set_config) { |
1306 | k->set_config(vdev, vdev->config); |
1307 | } |
1308 | } |
1309 | |
1310 | void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) |
1311 | { |
1312 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1313 | uint16_t val = data; |
1314 | |
1315 | if (addr + sizeof(val) > vdev->config_len) { |
1316 | return; |
1317 | } |
1318 | |
1319 | stw_p(vdev->config + addr, val); |
1320 | |
1321 | if (k->set_config) { |
1322 | k->set_config(vdev, vdev->config); |
1323 | } |
1324 | } |
1325 | |
1326 | void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) |
1327 | { |
1328 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1329 | uint32_t val = data; |
1330 | |
1331 | if (addr + sizeof(val) > vdev->config_len) { |
1332 | return; |
1333 | } |
1334 | |
1335 | stl_p(vdev->config + addr, val); |
1336 | |
1337 | if (k->set_config) { |
1338 | k->set_config(vdev, vdev->config); |
1339 | } |
1340 | } |
1341 | |
1342 | uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr) |
1343 | { |
1344 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1345 | uint8_t val; |
1346 | |
1347 | if (addr + sizeof(val) > vdev->config_len) { |
1348 | return (uint32_t)-1; |
1349 | } |
1350 | |
1351 | k->get_config(vdev, vdev->config); |
1352 | |
1353 | val = ldub_p(vdev->config + addr); |
1354 | return val; |
1355 | } |
1356 | |
1357 | uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr) |
1358 | { |
1359 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1360 | uint16_t val; |
1361 | |
1362 | if (addr + sizeof(val) > vdev->config_len) { |
1363 | return (uint32_t)-1; |
1364 | } |
1365 | |
1366 | k->get_config(vdev, vdev->config); |
1367 | |
1368 | val = lduw_le_p(vdev->config + addr); |
1369 | return val; |
1370 | } |
1371 | |
1372 | uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr) |
1373 | { |
1374 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1375 | uint32_t val; |
1376 | |
1377 | if (addr + sizeof(val) > vdev->config_len) { |
1378 | return (uint32_t)-1; |
1379 | } |
1380 | |
1381 | k->get_config(vdev, vdev->config); |
1382 | |
1383 | val = ldl_le_p(vdev->config + addr); |
1384 | return val; |
1385 | } |
1386 | |
1387 | void virtio_config_modern_writeb(VirtIODevice *vdev, |
1388 | uint32_t addr, uint32_t data) |
1389 | { |
1390 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1391 | uint8_t val = data; |
1392 | |
1393 | if (addr + sizeof(val) > vdev->config_len) { |
1394 | return; |
1395 | } |
1396 | |
1397 | stb_p(vdev->config + addr, val); |
1398 | |
1399 | if (k->set_config) { |
1400 | k->set_config(vdev, vdev->config); |
1401 | } |
1402 | } |
1403 | |
1404 | void virtio_config_modern_writew(VirtIODevice *vdev, |
1405 | uint32_t addr, uint32_t data) |
1406 | { |
1407 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1408 | uint16_t val = data; |
1409 | |
1410 | if (addr + sizeof(val) > vdev->config_len) { |
1411 | return; |
1412 | } |
1413 | |
1414 | stw_le_p(vdev->config + addr, val); |
1415 | |
1416 | if (k->set_config) { |
1417 | k->set_config(vdev, vdev->config); |
1418 | } |
1419 | } |
1420 | |
1421 | void virtio_config_modern_writel(VirtIODevice *vdev, |
1422 | uint32_t addr, uint32_t data) |
1423 | { |
1424 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1425 | uint32_t val = data; |
1426 | |
1427 | if (addr + sizeof(val) > vdev->config_len) { |
1428 | return; |
1429 | } |
1430 | |
1431 | stl_le_p(vdev->config + addr, val); |
1432 | |
1433 | if (k->set_config) { |
1434 | k->set_config(vdev, vdev->config); |
1435 | } |
1436 | } |
1437 | |
1438 | void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) |
1439 | { |
1440 | if (!vdev->vq[n].vring.num) { |
1441 | return; |
1442 | } |
1443 | vdev->vq[n].vring.desc = addr; |
1444 | virtio_queue_update_rings(vdev, n); |
1445 | } |
1446 | |
1447 | hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) |
1448 | { |
1449 | return vdev->vq[n].vring.desc; |
1450 | } |
1451 | |
1452 | void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, |
1453 | hwaddr avail, hwaddr used) |
1454 | { |
1455 | if (!vdev->vq[n].vring.num) { |
1456 | return; |
1457 | } |
1458 | vdev->vq[n].vring.desc = desc; |
1459 | vdev->vq[n].vring.avail = avail; |
1460 | vdev->vq[n].vring.used = used; |
1461 | virtio_init_region_cache(vdev, n); |
1462 | } |
1463 | |
1464 | void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) |
1465 | { |
1466 | /* Don't allow guest to flip queue between existent and |
1467 | * nonexistent states, or to set it to an invalid size. |
1468 | */ |
1469 | if (!!num != !!vdev->vq[n].vring.num || |
1470 | num > VIRTQUEUE_MAX_SIZE || |
1471 | num < 0) { |
1472 | return; |
1473 | } |
1474 | vdev->vq[n].vring.num = num; |
1475 | } |
1476 | |
1477 | VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) |
1478 | { |
1479 | return QLIST_FIRST(&vdev->vector_queues[vector]); |
1480 | } |
1481 | |
1482 | VirtQueue *virtio_vector_next_queue(VirtQueue *vq) |
1483 | { |
1484 | return QLIST_NEXT(vq, node); |
1485 | } |
1486 | |
1487 | int virtio_queue_get_num(VirtIODevice *vdev, int n) |
1488 | { |
1489 | return vdev->vq[n].vring.num; |
1490 | } |
1491 | |
1492 | int virtio_queue_get_max_num(VirtIODevice *vdev, int n) |
1493 | { |
1494 | return vdev->vq[n].vring.num_default; |
1495 | } |
1496 | |
1497 | int virtio_get_num_queues(VirtIODevice *vdev) |
1498 | { |
1499 | int i; |
1500 | |
1501 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
1502 | if (!virtio_queue_get_num(vdev, i)) { |
1503 | break; |
1504 | } |
1505 | } |
1506 | |
1507 | return i; |
1508 | } |
1509 | |
1510 | void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) |
1511 | { |
1512 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
1513 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
1514 | |
1515 | /* virtio-1 compliant devices cannot change the alignment */ |
1516 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
1517 | error_report("tried to modify queue alignment for virtio-1 device" ); |
1518 | return; |
1519 | } |
1520 | /* Check that the transport told us it was going to do this |
1521 | * (so a buggy transport will immediately assert rather than |
1522 | * silently failing to migrate this state) |
1523 | */ |
1524 | assert(k->has_variable_vring_alignment); |
1525 | |
1526 | if (align) { |
1527 | vdev->vq[n].vring.align = align; |
1528 | virtio_queue_update_rings(vdev, n); |
1529 | } |
1530 | } |
1531 | |
1532 | static bool virtio_queue_notify_aio_vq(VirtQueue *vq) |
1533 | { |
1534 | bool ret = false; |
1535 | |
1536 | if (vq->vring.desc && vq->handle_aio_output) { |
1537 | VirtIODevice *vdev = vq->vdev; |
1538 | |
1539 | trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); |
1540 | ret = vq->handle_aio_output(vdev, vq); |
1541 | |
1542 | if (unlikely(vdev->start_on_kick)) { |
1543 | virtio_set_started(vdev, true); |
1544 | } |
1545 | } |
1546 | |
1547 | return ret; |
1548 | } |
1549 | |
1550 | static void virtio_queue_notify_vq(VirtQueue *vq) |
1551 | { |
1552 | if (vq->vring.desc && vq->handle_output) { |
1553 | VirtIODevice *vdev = vq->vdev; |
1554 | |
1555 | if (unlikely(vdev->broken)) { |
1556 | return; |
1557 | } |
1558 | |
1559 | trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); |
1560 | vq->handle_output(vdev, vq); |
1561 | |
1562 | if (unlikely(vdev->start_on_kick)) { |
1563 | virtio_set_started(vdev, true); |
1564 | } |
1565 | } |
1566 | } |
1567 | |
1568 | void virtio_queue_notify(VirtIODevice *vdev, int n) |
1569 | { |
1570 | VirtQueue *vq = &vdev->vq[n]; |
1571 | |
1572 | if (unlikely(!vq->vring.desc || vdev->broken)) { |
1573 | return; |
1574 | } |
1575 | |
1576 | trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); |
1577 | if (vq->handle_aio_output) { |
1578 | event_notifier_set(&vq->host_notifier); |
1579 | } else if (vq->handle_output) { |
1580 | vq->handle_output(vdev, vq); |
1581 | |
1582 | if (unlikely(vdev->start_on_kick)) { |
1583 | virtio_set_started(vdev, true); |
1584 | } |
1585 | } |
1586 | } |
1587 | |
1588 | uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) |
1589 | { |
1590 | return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : |
1591 | VIRTIO_NO_VECTOR; |
1592 | } |
1593 | |
1594 | void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) |
1595 | { |
1596 | VirtQueue *vq = &vdev->vq[n]; |
1597 | |
1598 | if (n < VIRTIO_QUEUE_MAX) { |
1599 | if (vdev->vector_queues && |
1600 | vdev->vq[n].vector != VIRTIO_NO_VECTOR) { |
1601 | QLIST_REMOVE(vq, node); |
1602 | } |
1603 | vdev->vq[n].vector = vector; |
1604 | if (vdev->vector_queues && |
1605 | vector != VIRTIO_NO_VECTOR) { |
1606 | QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); |
1607 | } |
1608 | } |
1609 | } |
1610 | |
1611 | VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, |
1612 | VirtIOHandleOutput handle_output) |
1613 | { |
1614 | int i; |
1615 | |
1616 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
1617 | if (vdev->vq[i].vring.num == 0) |
1618 | break; |
1619 | } |
1620 | |
1621 | if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) |
1622 | abort(); |
1623 | |
1624 | vdev->vq[i].vring.num = queue_size; |
1625 | vdev->vq[i].vring.num_default = queue_size; |
1626 | vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; |
1627 | vdev->vq[i].handle_output = handle_output; |
1628 | vdev->vq[i].handle_aio_output = NULL; |
1629 | |
1630 | return &vdev->vq[i]; |
1631 | } |
1632 | |
1633 | void virtio_del_queue(VirtIODevice *vdev, int n) |
1634 | { |
1635 | if (n < 0 || n >= VIRTIO_QUEUE_MAX) { |
1636 | abort(); |
1637 | } |
1638 | |
1639 | vdev->vq[n].vring.num = 0; |
1640 | vdev->vq[n].vring.num_default = 0; |
1641 | vdev->vq[n].handle_output = NULL; |
1642 | vdev->vq[n].handle_aio_output = NULL; |
1643 | } |
1644 | |
1645 | static void virtio_set_isr(VirtIODevice *vdev, int value) |
1646 | { |
1647 | uint8_t old = atomic_read(&vdev->isr); |
1648 | |
1649 | /* Do not write ISR if it does not change, so that its cacheline remains |
1650 | * shared in the common case where the guest does not read it. |
1651 | */ |
1652 | if ((old & value) != value) { |
1653 | atomic_or(&vdev->isr, value); |
1654 | } |
1655 | } |
1656 | |
1657 | /* Called within rcu_read_lock(). */ |
1658 | static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) |
1659 | { |
1660 | uint16_t old, new; |
1661 | bool v; |
1662 | /* We need to expose used array entries before checking used event. */ |
1663 | smp_mb(); |
1664 | /* Always notify when queue is empty (when feature acknowledge) */ |
1665 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && |
1666 | !vq->inuse && virtio_queue_empty(vq)) { |
1667 | return true; |
1668 | } |
1669 | |
1670 | if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { |
1671 | return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); |
1672 | } |
1673 | |
1674 | v = vq->signalled_used_valid; |
1675 | vq->signalled_used_valid = true; |
1676 | old = vq->signalled_used; |
1677 | new = vq->signalled_used = vq->used_idx; |
1678 | return !v || vring_need_event(vring_get_used_event(vq), new, old); |
1679 | } |
1680 | |
1681 | void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) |
1682 | { |
1683 | bool should_notify; |
1684 | rcu_read_lock(); |
1685 | should_notify = virtio_should_notify(vdev, vq); |
1686 | rcu_read_unlock(); |
1687 | |
1688 | if (!should_notify) { |
1689 | return; |
1690 | } |
1691 | |
1692 | trace_virtio_notify_irqfd(vdev, vq); |
1693 | |
1694 | /* |
1695 | * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but |
1696 | * windows drivers included in virtio-win 1.8.0 (circa 2015) are |
1697 | * incorrectly polling this bit during crashdump and hibernation |
1698 | * in MSI mode, causing a hang if this bit is never updated. |
1699 | * Recent releases of Windows do not really shut down, but rather |
1700 | * log out and hibernate to make the next startup faster. Hence, |
1701 | * this manifested as a more serious hang during shutdown with |
1702 | * |
1703 | * Next driver release from 2016 fixed this problem, so working around it |
1704 | * is not a must, but it's easy to do so let's do it here. |
1705 | * |
1706 | * Note: it's safe to update ISR from any thread as it was switched |
1707 | * to an atomic operation. |
1708 | */ |
1709 | virtio_set_isr(vq->vdev, 0x1); |
1710 | event_notifier_set(&vq->guest_notifier); |
1711 | } |
1712 | |
1713 | static void virtio_irq(VirtQueue *vq) |
1714 | { |
1715 | virtio_set_isr(vq->vdev, 0x1); |
1716 | virtio_notify_vector(vq->vdev, vq->vector); |
1717 | } |
1718 | |
1719 | void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) |
1720 | { |
1721 | bool should_notify; |
1722 | rcu_read_lock(); |
1723 | should_notify = virtio_should_notify(vdev, vq); |
1724 | rcu_read_unlock(); |
1725 | |
1726 | if (!should_notify) { |
1727 | return; |
1728 | } |
1729 | |
1730 | trace_virtio_notify(vdev, vq); |
1731 | virtio_irq(vq); |
1732 | } |
1733 | |
1734 | void virtio_notify_config(VirtIODevice *vdev) |
1735 | { |
1736 | if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) |
1737 | return; |
1738 | |
1739 | virtio_set_isr(vdev, 0x3); |
1740 | vdev->generation++; |
1741 | virtio_notify_vector(vdev, vdev->config_vector); |
1742 | } |
1743 | |
1744 | static bool virtio_device_endian_needed(void *opaque) |
1745 | { |
1746 | VirtIODevice *vdev = opaque; |
1747 | |
1748 | assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); |
1749 | if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
1750 | return vdev->device_endian != virtio_default_endian(); |
1751 | } |
1752 | /* Devices conforming to VIRTIO 1.0 or later are always LE. */ |
1753 | return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; |
1754 | } |
1755 | |
1756 | static bool virtio_64bit_features_needed(void *opaque) |
1757 | { |
1758 | VirtIODevice *vdev = opaque; |
1759 | |
1760 | return (vdev->host_features >> 32) != 0; |
1761 | } |
1762 | |
1763 | static bool virtio_virtqueue_needed(void *opaque) |
1764 | { |
1765 | VirtIODevice *vdev = opaque; |
1766 | |
1767 | return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1); |
1768 | } |
1769 | |
1770 | static bool virtio_ringsize_needed(void *opaque) |
1771 | { |
1772 | VirtIODevice *vdev = opaque; |
1773 | int i; |
1774 | |
1775 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
1776 | if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) { |
1777 | return true; |
1778 | } |
1779 | } |
1780 | return false; |
1781 | } |
1782 | |
1783 | static bool (void *opaque) |
1784 | { |
1785 | VirtIODevice *vdev = opaque; |
1786 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
1787 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
1788 | |
1789 | return k->has_extra_state && |
1790 | k->has_extra_state(qbus->parent); |
1791 | } |
1792 | |
1793 | static bool virtio_broken_needed(void *opaque) |
1794 | { |
1795 | VirtIODevice *vdev = opaque; |
1796 | |
1797 | return vdev->broken; |
1798 | } |
1799 | |
1800 | static bool virtio_started_needed(void *opaque) |
1801 | { |
1802 | VirtIODevice *vdev = opaque; |
1803 | |
1804 | return vdev->started; |
1805 | } |
1806 | |
1807 | static const VMStateDescription vmstate_virtqueue = { |
1808 | .name = "virtqueue_state" , |
1809 | .version_id = 1, |
1810 | .minimum_version_id = 1, |
1811 | .fields = (VMStateField[]) { |
1812 | VMSTATE_UINT64(vring.avail, struct VirtQueue), |
1813 | VMSTATE_UINT64(vring.used, struct VirtQueue), |
1814 | VMSTATE_END_OF_LIST() |
1815 | } |
1816 | }; |
1817 | |
1818 | static const VMStateDescription vmstate_virtio_virtqueues = { |
1819 | .name = "virtio/virtqueues" , |
1820 | .version_id = 1, |
1821 | .minimum_version_id = 1, |
1822 | .needed = &virtio_virtqueue_needed, |
1823 | .fields = (VMStateField[]) { |
1824 | VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, |
1825 | VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue), |
1826 | VMSTATE_END_OF_LIST() |
1827 | } |
1828 | }; |
1829 | |
1830 | static const VMStateDescription vmstate_ringsize = { |
1831 | .name = "ringsize_state" , |
1832 | .version_id = 1, |
1833 | .minimum_version_id = 1, |
1834 | .fields = (VMStateField[]) { |
1835 | VMSTATE_UINT32(vring.num_default, struct VirtQueue), |
1836 | VMSTATE_END_OF_LIST() |
1837 | } |
1838 | }; |
1839 | |
1840 | static const VMStateDescription vmstate_virtio_ringsize = { |
1841 | .name = "virtio/ringsize" , |
1842 | .version_id = 1, |
1843 | .minimum_version_id = 1, |
1844 | .needed = &virtio_ringsize_needed, |
1845 | .fields = (VMStateField[]) { |
1846 | VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, |
1847 | VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue), |
1848 | VMSTATE_END_OF_LIST() |
1849 | } |
1850 | }; |
1851 | |
1852 | static int (QEMUFile *f, void *pv, size_t size, |
1853 | const VMStateField *field) |
1854 | { |
1855 | VirtIODevice *vdev = pv; |
1856 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
1857 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
1858 | |
1859 | if (!k->load_extra_state) { |
1860 | return -1; |
1861 | } else { |
1862 | return k->load_extra_state(qbus->parent, f); |
1863 | } |
1864 | } |
1865 | |
1866 | static int (QEMUFile *f, void *pv, size_t size, |
1867 | const VMStateField *field, QJSON *vmdesc) |
1868 | { |
1869 | VirtIODevice *vdev = pv; |
1870 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
1871 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
1872 | |
1873 | k->save_extra_state(qbus->parent, f); |
1874 | return 0; |
1875 | } |
1876 | |
1877 | static const VMStateInfo = { |
1878 | .name = "virtqueue_extra_state" , |
1879 | .get = get_extra_state, |
1880 | .put = put_extra_state, |
1881 | }; |
1882 | |
1883 | static const VMStateDescription = { |
1884 | .name = "virtio/extra_state" , |
1885 | .version_id = 1, |
1886 | .minimum_version_id = 1, |
1887 | .needed = &virtio_extra_state_needed, |
1888 | .fields = (VMStateField[]) { |
1889 | { |
1890 | .name = "extra_state" , |
1891 | .version_id = 0, |
1892 | .field_exists = NULL, |
1893 | .size = 0, |
1894 | .info = &vmstate_info_extra_state, |
1895 | .flags = VMS_SINGLE, |
1896 | .offset = 0, |
1897 | }, |
1898 | VMSTATE_END_OF_LIST() |
1899 | } |
1900 | }; |
1901 | |
1902 | static const VMStateDescription vmstate_virtio_device_endian = { |
1903 | .name = "virtio/device_endian" , |
1904 | .version_id = 1, |
1905 | .minimum_version_id = 1, |
1906 | .needed = &virtio_device_endian_needed, |
1907 | .fields = (VMStateField[]) { |
1908 | VMSTATE_UINT8(device_endian, VirtIODevice), |
1909 | VMSTATE_END_OF_LIST() |
1910 | } |
1911 | }; |
1912 | |
1913 | static const VMStateDescription vmstate_virtio_64bit_features = { |
1914 | .name = "virtio/64bit_features" , |
1915 | .version_id = 1, |
1916 | .minimum_version_id = 1, |
1917 | .needed = &virtio_64bit_features_needed, |
1918 | .fields = (VMStateField[]) { |
1919 | VMSTATE_UINT64(guest_features, VirtIODevice), |
1920 | VMSTATE_END_OF_LIST() |
1921 | } |
1922 | }; |
1923 | |
1924 | static const VMStateDescription vmstate_virtio_broken = { |
1925 | .name = "virtio/broken" , |
1926 | .version_id = 1, |
1927 | .minimum_version_id = 1, |
1928 | .needed = &virtio_broken_needed, |
1929 | .fields = (VMStateField[]) { |
1930 | VMSTATE_BOOL(broken, VirtIODevice), |
1931 | VMSTATE_END_OF_LIST() |
1932 | } |
1933 | }; |
1934 | |
1935 | static const VMStateDescription vmstate_virtio_started = { |
1936 | .name = "virtio/started" , |
1937 | .version_id = 1, |
1938 | .minimum_version_id = 1, |
1939 | .needed = &virtio_started_needed, |
1940 | .fields = (VMStateField[]) { |
1941 | VMSTATE_BOOL(started, VirtIODevice), |
1942 | VMSTATE_END_OF_LIST() |
1943 | } |
1944 | }; |
1945 | |
1946 | static const VMStateDescription vmstate_virtio = { |
1947 | .name = "virtio" , |
1948 | .version_id = 1, |
1949 | .minimum_version_id = 1, |
1950 | .minimum_version_id_old = 1, |
1951 | .fields = (VMStateField[]) { |
1952 | VMSTATE_END_OF_LIST() |
1953 | }, |
1954 | .subsections = (const VMStateDescription*[]) { |
1955 | &vmstate_virtio_device_endian, |
1956 | &vmstate_virtio_64bit_features, |
1957 | &vmstate_virtio_virtqueues, |
1958 | &vmstate_virtio_ringsize, |
1959 | &vmstate_virtio_broken, |
1960 | &vmstate_virtio_extra_state, |
1961 | &vmstate_virtio_started, |
1962 | NULL |
1963 | } |
1964 | }; |
1965 | |
1966 | int virtio_save(VirtIODevice *vdev, QEMUFile *f) |
1967 | { |
1968 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
1969 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
1970 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); |
1971 | uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); |
1972 | int i; |
1973 | |
1974 | if (k->save_config) { |
1975 | k->save_config(qbus->parent, f); |
1976 | } |
1977 | |
1978 | qemu_put_8s(f, &vdev->status); |
1979 | qemu_put_8s(f, &vdev->isr); |
1980 | qemu_put_be16s(f, &vdev->queue_sel); |
1981 | qemu_put_be32s(f, &guest_features_lo); |
1982 | qemu_put_be32(f, vdev->config_len); |
1983 | qemu_put_buffer(f, vdev->config, vdev->config_len); |
1984 | |
1985 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
1986 | if (vdev->vq[i].vring.num == 0) |
1987 | break; |
1988 | } |
1989 | |
1990 | qemu_put_be32(f, i); |
1991 | |
1992 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
1993 | if (vdev->vq[i].vring.num == 0) |
1994 | break; |
1995 | |
1996 | qemu_put_be32(f, vdev->vq[i].vring.num); |
1997 | if (k->has_variable_vring_alignment) { |
1998 | qemu_put_be32(f, vdev->vq[i].vring.align); |
1999 | } |
2000 | /* |
2001 | * Save desc now, the rest of the ring addresses are saved in |
2002 | * subsections for VIRTIO-1 devices. |
2003 | */ |
2004 | qemu_put_be64(f, vdev->vq[i].vring.desc); |
2005 | qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); |
2006 | if (k->save_queue) { |
2007 | k->save_queue(qbus->parent, i, f); |
2008 | } |
2009 | } |
2010 | |
2011 | if (vdc->save != NULL) { |
2012 | vdc->save(vdev, f); |
2013 | } |
2014 | |
2015 | if (vdc->vmsd) { |
2016 | int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL); |
2017 | if (ret) { |
2018 | return ret; |
2019 | } |
2020 | } |
2021 | |
2022 | /* Subsections */ |
2023 | return vmstate_save_state(f, &vmstate_virtio, vdev, NULL); |
2024 | } |
2025 | |
2026 | /* A wrapper for use as a VMState .put function */ |
2027 | static int virtio_device_put(QEMUFile *f, void *opaque, size_t size, |
2028 | const VMStateField *field, QJSON *vmdesc) |
2029 | { |
2030 | return virtio_save(VIRTIO_DEVICE(opaque), f); |
2031 | } |
2032 | |
2033 | /* A wrapper for use as a VMState .get function */ |
2034 | static int virtio_device_get(QEMUFile *f, void *opaque, size_t size, |
2035 | const VMStateField *field) |
2036 | { |
2037 | VirtIODevice *vdev = VIRTIO_DEVICE(opaque); |
2038 | DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev)); |
2039 | |
2040 | return virtio_load(vdev, f, dc->vmsd->version_id); |
2041 | } |
2042 | |
2043 | const VMStateInfo virtio_vmstate_info = { |
2044 | .name = "virtio" , |
2045 | .get = virtio_device_get, |
2046 | .put = virtio_device_put, |
2047 | }; |
2048 | |
2049 | static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) |
2050 | { |
2051 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
2052 | bool bad = (val & ~(vdev->host_features)) != 0; |
2053 | |
2054 | val &= vdev->host_features; |
2055 | if (k->set_features) { |
2056 | k->set_features(vdev, val); |
2057 | } |
2058 | vdev->guest_features = val; |
2059 | return bad ? -1 : 0; |
2060 | } |
2061 | |
2062 | int virtio_set_features(VirtIODevice *vdev, uint64_t val) |
2063 | { |
2064 | int ret; |
2065 | /* |
2066 | * The driver must not attempt to set features after feature negotiation |
2067 | * has finished. |
2068 | */ |
2069 | if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { |
2070 | return -EINVAL; |
2071 | } |
2072 | ret = virtio_set_features_nocheck(vdev, val); |
2073 | if (!ret) { |
2074 | if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { |
2075 | /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ |
2076 | int i; |
2077 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
2078 | if (vdev->vq[i].vring.num != 0) { |
2079 | virtio_init_region_cache(vdev, i); |
2080 | } |
2081 | } |
2082 | } |
2083 | |
2084 | if (!virtio_device_started(vdev, vdev->status) && |
2085 | !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
2086 | vdev->start_on_kick = true; |
2087 | } |
2088 | } |
2089 | return ret; |
2090 | } |
2091 | |
2092 | size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes, |
2093 | uint64_t host_features) |
2094 | { |
2095 | size_t config_size = 0; |
2096 | int i; |
2097 | |
2098 | for (i = 0; feature_sizes[i].flags != 0; i++) { |
2099 | if (host_features & feature_sizes[i].flags) { |
2100 | config_size = MAX(feature_sizes[i].end, config_size); |
2101 | } |
2102 | } |
2103 | |
2104 | return config_size; |
2105 | } |
2106 | |
2107 | int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) |
2108 | { |
2109 | int i, ret; |
2110 | int32_t config_len; |
2111 | uint32_t num; |
2112 | uint32_t features; |
2113 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2114 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
2115 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); |
2116 | |
2117 | /* |
2118 | * We poison the endianness to ensure it does not get used before |
2119 | * subsections have been loaded. |
2120 | */ |
2121 | vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; |
2122 | |
2123 | if (k->load_config) { |
2124 | ret = k->load_config(qbus->parent, f); |
2125 | if (ret) |
2126 | return ret; |
2127 | } |
2128 | |
2129 | qemu_get_8s(f, &vdev->status); |
2130 | qemu_get_8s(f, &vdev->isr); |
2131 | qemu_get_be16s(f, &vdev->queue_sel); |
2132 | if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { |
2133 | return -1; |
2134 | } |
2135 | qemu_get_be32s(f, &features); |
2136 | |
2137 | /* |
2138 | * Temporarily set guest_features low bits - needed by |
2139 | * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS |
2140 | * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ. |
2141 | * |
2142 | * Note: devices should always test host features in future - don't create |
2143 | * new dependencies like this. |
2144 | */ |
2145 | vdev->guest_features = features; |
2146 | |
2147 | config_len = qemu_get_be32(f); |
2148 | |
2149 | /* |
2150 | * There are cases where the incoming config can be bigger or smaller |
2151 | * than what we have; so load what we have space for, and skip |
2152 | * any excess that's in the stream. |
2153 | */ |
2154 | qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); |
2155 | |
2156 | while (config_len > vdev->config_len) { |
2157 | qemu_get_byte(f); |
2158 | config_len--; |
2159 | } |
2160 | |
2161 | num = qemu_get_be32(f); |
2162 | |
2163 | if (num > VIRTIO_QUEUE_MAX) { |
2164 | error_report("Invalid number of virtqueues: 0x%x" , num); |
2165 | return -1; |
2166 | } |
2167 | |
2168 | for (i = 0; i < num; i++) { |
2169 | vdev->vq[i].vring.num = qemu_get_be32(f); |
2170 | if (k->has_variable_vring_alignment) { |
2171 | vdev->vq[i].vring.align = qemu_get_be32(f); |
2172 | } |
2173 | vdev->vq[i].vring.desc = qemu_get_be64(f); |
2174 | qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); |
2175 | vdev->vq[i].signalled_used_valid = false; |
2176 | vdev->vq[i].notification = true; |
2177 | |
2178 | if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) { |
2179 | error_report("VQ %d address 0x0 " |
2180 | "inconsistent with Host index 0x%x" , |
2181 | i, vdev->vq[i].last_avail_idx); |
2182 | return -1; |
2183 | } |
2184 | if (k->load_queue) { |
2185 | ret = k->load_queue(qbus->parent, i, f); |
2186 | if (ret) |
2187 | return ret; |
2188 | } |
2189 | } |
2190 | |
2191 | virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
2192 | |
2193 | if (vdc->load != NULL) { |
2194 | ret = vdc->load(vdev, f, version_id); |
2195 | if (ret) { |
2196 | return ret; |
2197 | } |
2198 | } |
2199 | |
2200 | if (vdc->vmsd) { |
2201 | ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id); |
2202 | if (ret) { |
2203 | return ret; |
2204 | } |
2205 | } |
2206 | |
2207 | /* Subsections */ |
2208 | ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); |
2209 | if (ret) { |
2210 | return ret; |
2211 | } |
2212 | |
2213 | if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { |
2214 | vdev->device_endian = virtio_default_endian(); |
2215 | } |
2216 | |
2217 | if (virtio_64bit_features_needed(vdev)) { |
2218 | /* |
2219 | * Subsection load filled vdev->guest_features. Run them |
2220 | * through virtio_set_features to sanity-check them against |
2221 | * host_features. |
2222 | */ |
2223 | uint64_t features64 = vdev->guest_features; |
2224 | if (virtio_set_features_nocheck(vdev, features64) < 0) { |
2225 | error_report("Features 0x%" PRIx64 " unsupported. " |
2226 | "Allowed features: 0x%" PRIx64, |
2227 | features64, vdev->host_features); |
2228 | return -1; |
2229 | } |
2230 | } else { |
2231 | if (virtio_set_features_nocheck(vdev, features) < 0) { |
2232 | error_report("Features 0x%x unsupported. " |
2233 | "Allowed features: 0x%" PRIx64, |
2234 | features, vdev->host_features); |
2235 | return -1; |
2236 | } |
2237 | } |
2238 | |
2239 | if (!virtio_device_started(vdev, vdev->status) && |
2240 | !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
2241 | vdev->start_on_kick = true; |
2242 | } |
2243 | |
2244 | rcu_read_lock(); |
2245 | for (i = 0; i < num; i++) { |
2246 | if (vdev->vq[i].vring.desc) { |
2247 | uint16_t nheads; |
2248 | |
2249 | /* |
2250 | * VIRTIO-1 devices migrate desc, used, and avail ring addresses so |
2251 | * only the region cache needs to be set up. Legacy devices need |
2252 | * to calculate used and avail ring addresses based on the desc |
2253 | * address. |
2254 | */ |
2255 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
2256 | virtio_init_region_cache(vdev, i); |
2257 | } else { |
2258 | virtio_queue_update_rings(vdev, i); |
2259 | } |
2260 | |
2261 | nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; |
2262 | /* Check it isn't doing strange things with descriptor numbers. */ |
2263 | if (nheads > vdev->vq[i].vring.num) { |
2264 | error_report("VQ %d size 0x%x Guest index 0x%x " |
2265 | "inconsistent with Host index 0x%x: delta 0x%x" , |
2266 | i, vdev->vq[i].vring.num, |
2267 | vring_avail_idx(&vdev->vq[i]), |
2268 | vdev->vq[i].last_avail_idx, nheads); |
2269 | return -1; |
2270 | } |
2271 | vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]); |
2272 | vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]); |
2273 | |
2274 | /* |
2275 | * Some devices migrate VirtQueueElements that have been popped |
2276 | * from the avail ring but not yet returned to the used ring. |
2277 | * Since max ring size < UINT16_MAX it's safe to use modulo |
2278 | * UINT16_MAX + 1 subtraction. |
2279 | */ |
2280 | vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx - |
2281 | vdev->vq[i].used_idx); |
2282 | if (vdev->vq[i].inuse > vdev->vq[i].vring.num) { |
2283 | error_report("VQ %d size 0x%x < last_avail_idx 0x%x - " |
2284 | "used_idx 0x%x" , |
2285 | i, vdev->vq[i].vring.num, |
2286 | vdev->vq[i].last_avail_idx, |
2287 | vdev->vq[i].used_idx); |
2288 | return -1; |
2289 | } |
2290 | } |
2291 | } |
2292 | rcu_read_unlock(); |
2293 | |
2294 | return 0; |
2295 | } |
2296 | |
2297 | void virtio_cleanup(VirtIODevice *vdev) |
2298 | { |
2299 | qemu_del_vm_change_state_handler(vdev->vmstate); |
2300 | } |
2301 | |
2302 | static void virtio_vmstate_change(void *opaque, int running, RunState state) |
2303 | { |
2304 | VirtIODevice *vdev = opaque; |
2305 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2306 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
2307 | bool backend_run = running && virtio_device_started(vdev, vdev->status); |
2308 | vdev->vm_running = running; |
2309 | |
2310 | if (backend_run) { |
2311 | virtio_set_status(vdev, vdev->status); |
2312 | } |
2313 | |
2314 | if (k->vmstate_change) { |
2315 | k->vmstate_change(qbus->parent, backend_run); |
2316 | } |
2317 | |
2318 | if (!backend_run) { |
2319 | virtio_set_status(vdev, vdev->status); |
2320 | } |
2321 | } |
2322 | |
2323 | void virtio_instance_init_common(Object *proxy_obj, void *data, |
2324 | size_t vdev_size, const char *vdev_name) |
2325 | { |
2326 | DeviceState *vdev = data; |
2327 | |
2328 | object_initialize_child(proxy_obj, "virtio-backend" , vdev, vdev_size, |
2329 | vdev_name, &error_abort, NULL); |
2330 | qdev_alias_all_properties(vdev, proxy_obj); |
2331 | } |
2332 | |
2333 | void virtio_init(VirtIODevice *vdev, const char *name, |
2334 | uint16_t device_id, size_t config_size) |
2335 | { |
2336 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2337 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
2338 | int i; |
2339 | int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; |
2340 | |
2341 | if (nvectors) { |
2342 | vdev->vector_queues = |
2343 | g_malloc0(sizeof(*vdev->vector_queues) * nvectors); |
2344 | } |
2345 | |
2346 | vdev->start_on_kick = false; |
2347 | vdev->started = false; |
2348 | vdev->device_id = device_id; |
2349 | vdev->status = 0; |
2350 | atomic_set(&vdev->isr, 0); |
2351 | vdev->queue_sel = 0; |
2352 | vdev->config_vector = VIRTIO_NO_VECTOR; |
2353 | vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); |
2354 | vdev->vm_running = runstate_is_running(); |
2355 | vdev->broken = false; |
2356 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
2357 | vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
2358 | vdev->vq[i].vdev = vdev; |
2359 | vdev->vq[i].queue_index = i; |
2360 | } |
2361 | |
2362 | vdev->name = name; |
2363 | vdev->config_len = config_size; |
2364 | if (vdev->config_len) { |
2365 | vdev->config = g_malloc0(config_size); |
2366 | } else { |
2367 | vdev->config = NULL; |
2368 | } |
2369 | vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev), |
2370 | virtio_vmstate_change, vdev); |
2371 | vdev->device_endian = virtio_default_endian(); |
2372 | vdev->use_guest_notifier_mask = true; |
2373 | } |
2374 | |
2375 | hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) |
2376 | { |
2377 | return vdev->vq[n].vring.desc; |
2378 | } |
2379 | |
2380 | bool virtio_queue_enabled(VirtIODevice *vdev, int n) |
2381 | { |
2382 | return virtio_queue_get_desc_addr(vdev, n) != 0; |
2383 | } |
2384 | |
2385 | hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) |
2386 | { |
2387 | return vdev->vq[n].vring.avail; |
2388 | } |
2389 | |
2390 | hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) |
2391 | { |
2392 | return vdev->vq[n].vring.used; |
2393 | } |
2394 | |
2395 | hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) |
2396 | { |
2397 | return sizeof(VRingDesc) * vdev->vq[n].vring.num; |
2398 | } |
2399 | |
2400 | hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) |
2401 | { |
2402 | return offsetof(VRingAvail, ring) + |
2403 | sizeof(uint16_t) * vdev->vq[n].vring.num; |
2404 | } |
2405 | |
2406 | hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) |
2407 | { |
2408 | return offsetof(VRingUsed, ring) + |
2409 | sizeof(VRingUsedElem) * vdev->vq[n].vring.num; |
2410 | } |
2411 | |
2412 | uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) |
2413 | { |
2414 | return vdev->vq[n].last_avail_idx; |
2415 | } |
2416 | |
2417 | void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) |
2418 | { |
2419 | vdev->vq[n].last_avail_idx = idx; |
2420 | vdev->vq[n].shadow_avail_idx = idx; |
2421 | } |
2422 | |
2423 | void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) |
2424 | { |
2425 | rcu_read_lock(); |
2426 | if (vdev->vq[n].vring.desc) { |
2427 | vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]); |
2428 | vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx; |
2429 | } |
2430 | rcu_read_unlock(); |
2431 | } |
2432 | |
2433 | void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) |
2434 | { |
2435 | rcu_read_lock(); |
2436 | if (vdev->vq[n].vring.desc) { |
2437 | vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]); |
2438 | } |
2439 | rcu_read_unlock(); |
2440 | } |
2441 | |
2442 | void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) |
2443 | { |
2444 | vdev->vq[n].signalled_used_valid = false; |
2445 | } |
2446 | |
2447 | VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) |
2448 | { |
2449 | return vdev->vq + n; |
2450 | } |
2451 | |
2452 | uint16_t virtio_get_queue_index(VirtQueue *vq) |
2453 | { |
2454 | return vq->queue_index; |
2455 | } |
2456 | |
2457 | static void virtio_queue_guest_notifier_read(EventNotifier *n) |
2458 | { |
2459 | VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); |
2460 | if (event_notifier_test_and_clear(n)) { |
2461 | virtio_irq(vq); |
2462 | } |
2463 | } |
2464 | |
2465 | void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, |
2466 | bool with_irqfd) |
2467 | { |
2468 | if (assign && !with_irqfd) { |
2469 | event_notifier_set_handler(&vq->guest_notifier, |
2470 | virtio_queue_guest_notifier_read); |
2471 | } else { |
2472 | event_notifier_set_handler(&vq->guest_notifier, NULL); |
2473 | } |
2474 | if (!assign) { |
2475 | /* Test and clear notifier before closing it, |
2476 | * in case poll callback didn't have time to run. */ |
2477 | virtio_queue_guest_notifier_read(&vq->guest_notifier); |
2478 | } |
2479 | } |
2480 | |
2481 | EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) |
2482 | { |
2483 | return &vq->guest_notifier; |
2484 | } |
2485 | |
2486 | static void virtio_queue_host_notifier_aio_read(EventNotifier *n) |
2487 | { |
2488 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); |
2489 | if (event_notifier_test_and_clear(n)) { |
2490 | virtio_queue_notify_aio_vq(vq); |
2491 | } |
2492 | } |
2493 | |
2494 | static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n) |
2495 | { |
2496 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); |
2497 | |
2498 | virtio_queue_set_notification(vq, 0); |
2499 | } |
2500 | |
2501 | static bool virtio_queue_host_notifier_aio_poll(void *opaque) |
2502 | { |
2503 | EventNotifier *n = opaque; |
2504 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); |
2505 | bool progress; |
2506 | |
2507 | if (!vq->vring.desc || virtio_queue_empty(vq)) { |
2508 | return false; |
2509 | } |
2510 | |
2511 | progress = virtio_queue_notify_aio_vq(vq); |
2512 | |
2513 | /* In case the handler function re-enabled notifications */ |
2514 | virtio_queue_set_notification(vq, 0); |
2515 | return progress; |
2516 | } |
2517 | |
2518 | static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) |
2519 | { |
2520 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); |
2521 | |
2522 | /* Caller polls once more after this to catch requests that race with us */ |
2523 | virtio_queue_set_notification(vq, 1); |
2524 | } |
2525 | |
2526 | void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, |
2527 | VirtIOHandleAIOOutput handle_output) |
2528 | { |
2529 | if (handle_output) { |
2530 | vq->handle_aio_output = handle_output; |
2531 | aio_set_event_notifier(ctx, &vq->host_notifier, true, |
2532 | virtio_queue_host_notifier_aio_read, |
2533 | virtio_queue_host_notifier_aio_poll); |
2534 | aio_set_event_notifier_poll(ctx, &vq->host_notifier, |
2535 | virtio_queue_host_notifier_aio_poll_begin, |
2536 | virtio_queue_host_notifier_aio_poll_end); |
2537 | } else { |
2538 | aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL); |
2539 | /* Test and clear notifier before after disabling event, |
2540 | * in case poll callback didn't have time to run. */ |
2541 | virtio_queue_host_notifier_aio_read(&vq->host_notifier); |
2542 | vq->handle_aio_output = NULL; |
2543 | } |
2544 | } |
2545 | |
2546 | void virtio_queue_host_notifier_read(EventNotifier *n) |
2547 | { |
2548 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); |
2549 | if (event_notifier_test_and_clear(n)) { |
2550 | virtio_queue_notify_vq(vq); |
2551 | } |
2552 | } |
2553 | |
2554 | EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) |
2555 | { |
2556 | return &vq->host_notifier; |
2557 | } |
2558 | |
2559 | int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n, |
2560 | MemoryRegion *mr, bool assign) |
2561 | { |
2562 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2563 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); |
2564 | |
2565 | if (k->set_host_notifier_mr) { |
2566 | return k->set_host_notifier_mr(qbus->parent, n, mr, assign); |
2567 | } |
2568 | |
2569 | return -1; |
2570 | } |
2571 | |
2572 | void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) |
2573 | { |
2574 | g_free(vdev->bus_name); |
2575 | vdev->bus_name = g_strdup(bus_name); |
2576 | } |
2577 | |
2578 | void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...) |
2579 | { |
2580 | va_list ap; |
2581 | |
2582 | va_start(ap, fmt); |
2583 | error_vreport(fmt, ap); |
2584 | va_end(ap); |
2585 | |
2586 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
2587 | vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET; |
2588 | virtio_notify_config(vdev); |
2589 | } |
2590 | |
2591 | vdev->broken = true; |
2592 | } |
2593 | |
2594 | static void virtio_memory_listener_commit(MemoryListener *listener) |
2595 | { |
2596 | VirtIODevice *vdev = container_of(listener, VirtIODevice, listener); |
2597 | int i; |
2598 | |
2599 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
2600 | if (vdev->vq[i].vring.num == 0) { |
2601 | break; |
2602 | } |
2603 | virtio_init_region_cache(vdev, i); |
2604 | } |
2605 | } |
2606 | |
2607 | static void virtio_device_realize(DeviceState *dev, Error **errp) |
2608 | { |
2609 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
2610 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); |
2611 | Error *err = NULL; |
2612 | |
2613 | /* Devices should either use vmsd or the load/save methods */ |
2614 | assert(!vdc->vmsd || !vdc->load); |
2615 | |
2616 | if (vdc->realize != NULL) { |
2617 | vdc->realize(dev, &err); |
2618 | if (err != NULL) { |
2619 | error_propagate(errp, err); |
2620 | return; |
2621 | } |
2622 | } |
2623 | |
2624 | virtio_bus_device_plugged(vdev, &err); |
2625 | if (err != NULL) { |
2626 | error_propagate(errp, err); |
2627 | vdc->unrealize(dev, NULL); |
2628 | return; |
2629 | } |
2630 | |
2631 | vdev->listener.commit = virtio_memory_listener_commit; |
2632 | memory_listener_register(&vdev->listener, vdev->dma_as); |
2633 | } |
2634 | |
2635 | static void virtio_device_unrealize(DeviceState *dev, Error **errp) |
2636 | { |
2637 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
2638 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); |
2639 | Error *err = NULL; |
2640 | |
2641 | virtio_bus_device_unplugged(vdev); |
2642 | |
2643 | if (vdc->unrealize != NULL) { |
2644 | vdc->unrealize(dev, &err); |
2645 | if (err != NULL) { |
2646 | error_propagate(errp, err); |
2647 | return; |
2648 | } |
2649 | } |
2650 | |
2651 | g_free(vdev->bus_name); |
2652 | vdev->bus_name = NULL; |
2653 | } |
2654 | |
2655 | static void virtio_device_free_virtqueues(VirtIODevice *vdev) |
2656 | { |
2657 | int i; |
2658 | if (!vdev->vq) { |
2659 | return; |
2660 | } |
2661 | |
2662 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { |
2663 | if (vdev->vq[i].vring.num == 0) { |
2664 | break; |
2665 | } |
2666 | virtio_virtqueue_reset_region_cache(&vdev->vq[i]); |
2667 | } |
2668 | g_free(vdev->vq); |
2669 | } |
2670 | |
2671 | static void virtio_device_instance_finalize(Object *obj) |
2672 | { |
2673 | VirtIODevice *vdev = VIRTIO_DEVICE(obj); |
2674 | |
2675 | memory_listener_unregister(&vdev->listener); |
2676 | virtio_device_free_virtqueues(vdev); |
2677 | |
2678 | g_free(vdev->config); |
2679 | g_free(vdev->vector_queues); |
2680 | } |
2681 | |
2682 | static Property virtio_properties[] = { |
2683 | DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), |
2684 | DEFINE_PROP_BOOL("use-started" , VirtIODevice, use_started, true), |
2685 | DEFINE_PROP_END_OF_LIST(), |
2686 | }; |
2687 | |
2688 | static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev) |
2689 | { |
2690 | VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); |
2691 | int i, n, r, err; |
2692 | |
2693 | memory_region_transaction_begin(); |
2694 | for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { |
2695 | VirtQueue *vq = &vdev->vq[n]; |
2696 | if (!virtio_queue_get_num(vdev, n)) { |
2697 | continue; |
2698 | } |
2699 | r = virtio_bus_set_host_notifier(qbus, n, true); |
2700 | if (r < 0) { |
2701 | err = r; |
2702 | goto assign_error; |
2703 | } |
2704 | event_notifier_set_handler(&vq->host_notifier, |
2705 | virtio_queue_host_notifier_read); |
2706 | } |
2707 | |
2708 | for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { |
2709 | /* Kick right away to begin processing requests already in vring */ |
2710 | VirtQueue *vq = &vdev->vq[n]; |
2711 | if (!vq->vring.num) { |
2712 | continue; |
2713 | } |
2714 | event_notifier_set(&vq->host_notifier); |
2715 | } |
2716 | memory_region_transaction_commit(); |
2717 | return 0; |
2718 | |
2719 | assign_error: |
2720 | i = n; /* save n for a second iteration after transaction is committed. */ |
2721 | while (--n >= 0) { |
2722 | VirtQueue *vq = &vdev->vq[n]; |
2723 | if (!virtio_queue_get_num(vdev, n)) { |
2724 | continue; |
2725 | } |
2726 | |
2727 | event_notifier_set_handler(&vq->host_notifier, NULL); |
2728 | r = virtio_bus_set_host_notifier(qbus, n, false); |
2729 | assert(r >= 0); |
2730 | } |
2731 | memory_region_transaction_commit(); |
2732 | |
2733 | while (--i >= 0) { |
2734 | if (!virtio_queue_get_num(vdev, i)) { |
2735 | continue; |
2736 | } |
2737 | virtio_bus_cleanup_host_notifier(qbus, i); |
2738 | } |
2739 | return err; |
2740 | } |
2741 | |
2742 | int virtio_device_start_ioeventfd(VirtIODevice *vdev) |
2743 | { |
2744 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2745 | VirtioBusState *vbus = VIRTIO_BUS(qbus); |
2746 | |
2747 | return virtio_bus_start_ioeventfd(vbus); |
2748 | } |
2749 | |
2750 | static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev) |
2751 | { |
2752 | VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); |
2753 | int n, r; |
2754 | |
2755 | memory_region_transaction_begin(); |
2756 | for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { |
2757 | VirtQueue *vq = &vdev->vq[n]; |
2758 | |
2759 | if (!virtio_queue_get_num(vdev, n)) { |
2760 | continue; |
2761 | } |
2762 | event_notifier_set_handler(&vq->host_notifier, NULL); |
2763 | r = virtio_bus_set_host_notifier(qbus, n, false); |
2764 | assert(r >= 0); |
2765 | } |
2766 | memory_region_transaction_commit(); |
2767 | |
2768 | for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { |
2769 | if (!virtio_queue_get_num(vdev, n)) { |
2770 | continue; |
2771 | } |
2772 | virtio_bus_cleanup_host_notifier(qbus, n); |
2773 | } |
2774 | } |
2775 | |
2776 | void virtio_device_stop_ioeventfd(VirtIODevice *vdev) |
2777 | { |
2778 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2779 | VirtioBusState *vbus = VIRTIO_BUS(qbus); |
2780 | |
2781 | virtio_bus_stop_ioeventfd(vbus); |
2782 | } |
2783 | |
2784 | int virtio_device_grab_ioeventfd(VirtIODevice *vdev) |
2785 | { |
2786 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2787 | VirtioBusState *vbus = VIRTIO_BUS(qbus); |
2788 | |
2789 | return virtio_bus_grab_ioeventfd(vbus); |
2790 | } |
2791 | |
2792 | void virtio_device_release_ioeventfd(VirtIODevice *vdev) |
2793 | { |
2794 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2795 | VirtioBusState *vbus = VIRTIO_BUS(qbus); |
2796 | |
2797 | virtio_bus_release_ioeventfd(vbus); |
2798 | } |
2799 | |
2800 | static void virtio_device_class_init(ObjectClass *klass, void *data) |
2801 | { |
2802 | /* Set the default value here. */ |
2803 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); |
2804 | DeviceClass *dc = DEVICE_CLASS(klass); |
2805 | |
2806 | dc->realize = virtio_device_realize; |
2807 | dc->unrealize = virtio_device_unrealize; |
2808 | dc->bus_type = TYPE_VIRTIO_BUS; |
2809 | dc->props = virtio_properties; |
2810 | vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl; |
2811 | vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl; |
2812 | |
2813 | vdc->legacy_features |= VIRTIO_LEGACY_FEATURES; |
2814 | } |
2815 | |
2816 | bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) |
2817 | { |
2818 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
2819 | VirtioBusState *vbus = VIRTIO_BUS(qbus); |
2820 | |
2821 | return virtio_bus_ioeventfd_enabled(vbus); |
2822 | } |
2823 | |
2824 | static const TypeInfo virtio_device_info = { |
2825 | .name = TYPE_VIRTIO_DEVICE, |
2826 | .parent = TYPE_DEVICE, |
2827 | .instance_size = sizeof(VirtIODevice), |
2828 | .class_init = virtio_device_class_init, |
2829 | .instance_finalize = virtio_device_instance_finalize, |
2830 | .abstract = true, |
2831 | .class_size = sizeof(VirtioDeviceClass), |
2832 | }; |
2833 | |
2834 | static void virtio_register_types(void) |
2835 | { |
2836 | type_register_static(&virtio_device_info); |
2837 | } |
2838 | |
2839 | type_init(virtio_register_types) |
2840 | |