1/*
2 * Virtio MMIO bindings
3 *
4 * Copyright (c) 2011 Linaro Limited
5 *
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "qemu/osdep.h"
23#include "standard-headers/linux/virtio_mmio.h"
24#include "hw/irq.h"
25#include "hw/qdev-properties.h"
26#include "hw/sysbus.h"
27#include "hw/virtio/virtio.h"
28#include "migration/qemu-file-types.h"
29#include "qemu/host-utils.h"
30#include "qemu/module.h"
31#include "sysemu/kvm.h"
32#include "hw/virtio/virtio-bus.h"
33#include "qemu/error-report.h"
34#include "qemu/log.h"
35#include "trace.h"
36
37/* QOM macros */
38/* virtio-mmio-bus */
39#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
40#define VIRTIO_MMIO_BUS(obj) \
41 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
42#define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
43 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
44#define VIRTIO_MMIO_BUS_CLASS(klass) \
45 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
46
47/* virtio-mmio */
48#define TYPE_VIRTIO_MMIO "virtio-mmio"
49#define VIRTIO_MMIO(obj) \
50 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
51
52#define VIRT_MAGIC 0x74726976 /* 'virt' */
53#define VIRT_VERSION 1
54#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
55
56typedef struct {
57 /* Generic */
58 SysBusDevice parent_obj;
59 MemoryRegion iomem;
60 qemu_irq irq;
61 /* Guest accessible state needing migration and reset */
62 uint32_t host_features_sel;
63 uint32_t guest_features_sel;
64 uint32_t guest_page_shift;
65 /* virtio-bus */
66 VirtioBusState bus;
67 bool format_transport_address;
68} VirtIOMMIOProxy;
69
70static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
71{
72 return kvm_eventfds_enabled();
73}
74
75static int virtio_mmio_ioeventfd_assign(DeviceState *d,
76 EventNotifier *notifier,
77 int n, bool assign)
78{
79 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
80
81 if (assign) {
82 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
83 true, n, notifier);
84 } else {
85 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
86 true, n, notifier);
87 }
88 return 0;
89}
90
91static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
92{
93 virtio_bus_start_ioeventfd(&proxy->bus);
94}
95
96static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
97{
98 virtio_bus_stop_ioeventfd(&proxy->bus);
99}
100
101static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
102{
103 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
104 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
105
106 trace_virtio_mmio_read(offset);
107
108 if (!vdev) {
109 /* If no backend is present, we treat most registers as
110 * read-as-zero, except for the magic number, version and
111 * vendor ID. This is not strictly sanctioned by the virtio
112 * spec, but it allows us to provide transports with no backend
113 * plugged in which don't confuse Linux's virtio code: the
114 * probe won't complain about the bad magic number, but the
115 * device ID of zero means no backend will claim it.
116 */
117 switch (offset) {
118 case VIRTIO_MMIO_MAGIC_VALUE:
119 return VIRT_MAGIC;
120 case VIRTIO_MMIO_VERSION:
121 return VIRT_VERSION;
122 case VIRTIO_MMIO_VENDOR_ID:
123 return VIRT_VENDOR;
124 default:
125 return 0;
126 }
127 }
128
129 if (offset >= VIRTIO_MMIO_CONFIG) {
130 offset -= VIRTIO_MMIO_CONFIG;
131 switch (size) {
132 case 1:
133 return virtio_config_readb(vdev, offset);
134 case 2:
135 return virtio_config_readw(vdev, offset);
136 case 4:
137 return virtio_config_readl(vdev, offset);
138 default:
139 abort();
140 }
141 }
142 if (size != 4) {
143 qemu_log_mask(LOG_GUEST_ERROR,
144 "%s: wrong size access to register!\n",
145 __func__);
146 return 0;
147 }
148 switch (offset) {
149 case VIRTIO_MMIO_MAGIC_VALUE:
150 return VIRT_MAGIC;
151 case VIRTIO_MMIO_VERSION:
152 return VIRT_VERSION;
153 case VIRTIO_MMIO_DEVICE_ID:
154 return vdev->device_id;
155 case VIRTIO_MMIO_VENDOR_ID:
156 return VIRT_VENDOR;
157 case VIRTIO_MMIO_DEVICE_FEATURES:
158 if (proxy->host_features_sel) {
159 return 0;
160 }
161 return vdev->host_features;
162 case VIRTIO_MMIO_QUEUE_NUM_MAX:
163 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
164 return 0;
165 }
166 return VIRTQUEUE_MAX_SIZE;
167 case VIRTIO_MMIO_QUEUE_PFN:
168 return virtio_queue_get_addr(vdev, vdev->queue_sel)
169 >> proxy->guest_page_shift;
170 case VIRTIO_MMIO_INTERRUPT_STATUS:
171 return atomic_read(&vdev->isr);
172 case VIRTIO_MMIO_STATUS:
173 return vdev->status;
174 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
175 case VIRTIO_MMIO_DRIVER_FEATURES:
176 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
177 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
178 case VIRTIO_MMIO_QUEUE_SEL:
179 case VIRTIO_MMIO_QUEUE_NUM:
180 case VIRTIO_MMIO_QUEUE_ALIGN:
181 case VIRTIO_MMIO_QUEUE_NOTIFY:
182 case VIRTIO_MMIO_INTERRUPT_ACK:
183 qemu_log_mask(LOG_GUEST_ERROR,
184 "%s: read of write-only register\n",
185 __func__);
186 return 0;
187 default:
188 qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__);
189 return 0;
190 }
191 return 0;
192}
193
194static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
195 unsigned size)
196{
197 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
198 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
199
200 trace_virtio_mmio_write_offset(offset, value);
201
202 if (!vdev) {
203 /* If no backend is present, we just make all registers
204 * write-ignored. This allows us to provide transports with
205 * no backend plugged in.
206 */
207 return;
208 }
209
210 if (offset >= VIRTIO_MMIO_CONFIG) {
211 offset -= VIRTIO_MMIO_CONFIG;
212 switch (size) {
213 case 1:
214 virtio_config_writeb(vdev, offset, value);
215 break;
216 case 2:
217 virtio_config_writew(vdev, offset, value);
218 break;
219 case 4:
220 virtio_config_writel(vdev, offset, value);
221 break;
222 default:
223 abort();
224 }
225 return;
226 }
227 if (size != 4) {
228 qemu_log_mask(LOG_GUEST_ERROR,
229 "%s: wrong size access to register!\n",
230 __func__);
231 return;
232 }
233 switch (offset) {
234 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
235 proxy->host_features_sel = value;
236 break;
237 case VIRTIO_MMIO_DRIVER_FEATURES:
238 if (!proxy->guest_features_sel) {
239 virtio_set_features(vdev, value);
240 }
241 break;
242 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
243 proxy->guest_features_sel = value;
244 break;
245 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
246 proxy->guest_page_shift = ctz32(value);
247 if (proxy->guest_page_shift > 31) {
248 proxy->guest_page_shift = 0;
249 }
250 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
251 break;
252 case VIRTIO_MMIO_QUEUE_SEL:
253 if (value < VIRTIO_QUEUE_MAX) {
254 vdev->queue_sel = value;
255 }
256 break;
257 case VIRTIO_MMIO_QUEUE_NUM:
258 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
259 virtio_queue_set_num(vdev, vdev->queue_sel, value);
260 /* Note: only call this function for legacy devices */
261 virtio_queue_update_rings(vdev, vdev->queue_sel);
262 break;
263 case VIRTIO_MMIO_QUEUE_ALIGN:
264 /* Note: this is only valid for legacy devices */
265 virtio_queue_set_align(vdev, vdev->queue_sel, value);
266 break;
267 case VIRTIO_MMIO_QUEUE_PFN:
268 if (value == 0) {
269 virtio_reset(vdev);
270 } else {
271 virtio_queue_set_addr(vdev, vdev->queue_sel,
272 value << proxy->guest_page_shift);
273 }
274 break;
275 case VIRTIO_MMIO_QUEUE_NOTIFY:
276 if (value < VIRTIO_QUEUE_MAX) {
277 virtio_queue_notify(vdev, value);
278 }
279 break;
280 case VIRTIO_MMIO_INTERRUPT_ACK:
281 atomic_and(&vdev->isr, ~value);
282 virtio_update_irq(vdev);
283 break;
284 case VIRTIO_MMIO_STATUS:
285 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
286 virtio_mmio_stop_ioeventfd(proxy);
287 }
288
289 virtio_set_status(vdev, value & 0xff);
290
291 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
292 virtio_mmio_start_ioeventfd(proxy);
293 }
294
295 if (vdev->status == 0) {
296 virtio_reset(vdev);
297 }
298 break;
299 case VIRTIO_MMIO_MAGIC_VALUE:
300 case VIRTIO_MMIO_VERSION:
301 case VIRTIO_MMIO_DEVICE_ID:
302 case VIRTIO_MMIO_VENDOR_ID:
303 case VIRTIO_MMIO_DEVICE_FEATURES:
304 case VIRTIO_MMIO_QUEUE_NUM_MAX:
305 case VIRTIO_MMIO_INTERRUPT_STATUS:
306 qemu_log_mask(LOG_GUEST_ERROR,
307 "%s: write to readonly register\n",
308 __func__);
309 break;
310
311 default:
312 qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__);
313 }
314}
315
316static const MemoryRegionOps virtio_mem_ops = {
317 .read = virtio_mmio_read,
318 .write = virtio_mmio_write,
319 .endianness = DEVICE_NATIVE_ENDIAN,
320};
321
322static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
323{
324 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
325 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
326 int level;
327
328 if (!vdev) {
329 return;
330 }
331 level = (atomic_read(&vdev->isr) != 0);
332 trace_virtio_mmio_setting_irq(level);
333 qemu_set_irq(proxy->irq, level);
334}
335
336static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
337{
338 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
339
340 proxy->host_features_sel = qemu_get_be32(f);
341 proxy->guest_features_sel = qemu_get_be32(f);
342 proxy->guest_page_shift = qemu_get_be32(f);
343 return 0;
344}
345
346static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
347{
348 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
349
350 qemu_put_be32(f, proxy->host_features_sel);
351 qemu_put_be32(f, proxy->guest_features_sel);
352 qemu_put_be32(f, proxy->guest_page_shift);
353}
354
355static void virtio_mmio_reset(DeviceState *d)
356{
357 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
358
359 virtio_mmio_stop_ioeventfd(proxy);
360 virtio_bus_reset(&proxy->bus);
361 proxy->host_features_sel = 0;
362 proxy->guest_features_sel = 0;
363 proxy->guest_page_shift = 0;
364}
365
366static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
367 bool with_irqfd)
368{
369 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
370 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
371 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
372 VirtQueue *vq = virtio_get_queue(vdev, n);
373 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
374
375 if (assign) {
376 int r = event_notifier_init(notifier, 0);
377 if (r < 0) {
378 return r;
379 }
380 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
381 } else {
382 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
383 event_notifier_cleanup(notifier);
384 }
385
386 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
387 vdc->guest_notifier_mask(vdev, n, !assign);
388 }
389
390 return 0;
391}
392
393static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
394 bool assign)
395{
396 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
397 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
398 /* TODO: need to check if kvm-arm supports irqfd */
399 bool with_irqfd = false;
400 int r, n;
401
402 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
403
404 for (n = 0; n < nvqs; n++) {
405 if (!virtio_queue_get_num(vdev, n)) {
406 break;
407 }
408
409 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
410 if (r < 0) {
411 goto assign_error;
412 }
413 }
414
415 return 0;
416
417assign_error:
418 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
419 assert(assign);
420 while (--n >= 0) {
421 virtio_mmio_set_guest_notifier(d, n, !assign, false);
422 }
423 return r;
424}
425
426/* virtio-mmio device */
427
428static Property virtio_mmio_properties[] = {
429 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
430 format_transport_address, true),
431 DEFINE_PROP_END_OF_LIST(),
432};
433
434static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
435{
436 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
437 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
438
439 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
440 d, NULL);
441 sysbus_init_irq(sbd, &proxy->irq);
442 memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
443 TYPE_VIRTIO_MMIO, 0x200);
444 sysbus_init_mmio(sbd, &proxy->iomem);
445}
446
447static void virtio_mmio_class_init(ObjectClass *klass, void *data)
448{
449 DeviceClass *dc = DEVICE_CLASS(klass);
450
451 dc->realize = virtio_mmio_realizefn;
452 dc->reset = virtio_mmio_reset;
453 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
454 dc->props = virtio_mmio_properties;
455}
456
457static const TypeInfo virtio_mmio_info = {
458 .name = TYPE_VIRTIO_MMIO,
459 .parent = TYPE_SYS_BUS_DEVICE,
460 .instance_size = sizeof(VirtIOMMIOProxy),
461 .class_init = virtio_mmio_class_init,
462};
463
464/* virtio-mmio-bus. */
465
466static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
467{
468 BusState *virtio_mmio_bus;
469 VirtIOMMIOProxy *virtio_mmio_proxy;
470 char *proxy_path;
471 SysBusDevice *proxy_sbd;
472 char *path;
473
474 virtio_mmio_bus = qdev_get_parent_bus(dev);
475 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
476 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
477
478 /*
479 * If @format_transport_address is false, then we just perform the same as
480 * virtio_bus_get_dev_path(): we delegate the address formatting for the
481 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
482 * (i.e., the device that implements the virtio-mmio bus) resides on. In
483 * this case the base address of the virtio-mmio transport will be
484 * invisible.
485 */
486 if (!virtio_mmio_proxy->format_transport_address) {
487 return proxy_path;
488 }
489
490 /* Otherwise, we append the base address of the transport. */
491 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
492 assert(proxy_sbd->num_mmio == 1);
493 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
494
495 if (proxy_path) {
496 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
497 proxy_sbd->mmio[0].addr);
498 } else {
499 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
500 proxy_sbd->mmio[0].addr);
501 }
502 g_free(proxy_path);
503 return path;
504}
505
506static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
507{
508 BusClass *bus_class = BUS_CLASS(klass);
509 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
510
511 k->notify = virtio_mmio_update_irq;
512 k->save_config = virtio_mmio_save_config;
513 k->load_config = virtio_mmio_load_config;
514 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
515 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
516 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
517 k->has_variable_vring_alignment = true;
518 bus_class->max_dev = 1;
519 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
520}
521
522static const TypeInfo virtio_mmio_bus_info = {
523 .name = TYPE_VIRTIO_MMIO_BUS,
524 .parent = TYPE_VIRTIO_BUS,
525 .instance_size = sizeof(VirtioBusState),
526 .class_init = virtio_mmio_bus_class_init,
527};
528
529static void virtio_mmio_register_types(void)
530{
531 type_register_static(&virtio_mmio_bus_info);
532 type_register_static(&virtio_mmio_info);
533}
534
535type_init(virtio_mmio_register_types)
536