1/*
2 * Virtio PMEM device
3 *
4 * Copyright (C) 2018-2019 Red Hat, Inc.
5 *
6 * Authors:
7 * Pankaj Gupta <pagupta@redhat.com>
8 * David Hildenbrand <david@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2.
11 * See the COPYING file in the top-level directory.
12 */
13
14#include "qemu/osdep.h"
15#include "qapi/error.h"
16#include "qemu-common.h"
17#include "qemu/error-report.h"
18#include "qemu/main-loop.h"
19#include "hw/virtio/virtio-pmem.h"
20#include "hw/qdev-properties.h"
21#include "hw/virtio/virtio-access.h"
22#include "standard-headers/linux/virtio_ids.h"
23#include "standard-headers/linux/virtio_pmem.h"
24#include "sysemu/hostmem.h"
25#include "block/aio.h"
26#include "block/thread-pool.h"
27
28typedef struct VirtIODeviceRequest {
29 VirtQueueElement elem;
30 int fd;
31 VirtIOPMEM *pmem;
32 VirtIODevice *vdev;
33 struct virtio_pmem_req req;
34 struct virtio_pmem_resp resp;
35} VirtIODeviceRequest;
36
37static int worker_cb(void *opaque)
38{
39 VirtIODeviceRequest *req_data = opaque;
40 int err = 0;
41
42 /* flush raw backing image */
43 err = fsync(req_data->fd);
44 if (err != 0) {
45 err = 1;
46 }
47
48 virtio_stw_p(req_data->vdev, &req_data->resp.ret, err);
49
50 return 0;
51}
52
53static void done_cb(void *opaque, int ret)
54{
55 VirtIODeviceRequest *req_data = opaque;
56 int len = iov_from_buf(req_data->elem.in_sg, req_data->elem.in_num, 0,
57 &req_data->resp, sizeof(struct virtio_pmem_resp));
58
59 /* Callbacks are serialized, so no need to use atomic ops. */
60 virtqueue_push(req_data->pmem->rq_vq, &req_data->elem, len);
61 virtio_notify((VirtIODevice *)req_data->pmem, req_data->pmem->rq_vq);
62 g_free(req_data);
63}
64
65static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
66{
67 VirtIODeviceRequest *req_data;
68 VirtIOPMEM *pmem = VIRTIO_PMEM(vdev);
69 HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev);
70 ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
71
72 req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest));
73 if (!req_data) {
74 virtio_error(vdev, "virtio-pmem missing request data");
75 return;
76 }
77
78 if (req_data->elem.out_num < 1 || req_data->elem.in_num < 1) {
79 virtio_error(vdev, "virtio-pmem request not proper");
80 g_free(req_data);
81 return;
82 }
83 req_data->fd = memory_region_get_fd(&backend->mr);
84 req_data->pmem = pmem;
85 req_data->vdev = vdev;
86 thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data);
87}
88
89static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config)
90{
91 VirtIOPMEM *pmem = VIRTIO_PMEM(vdev);
92 struct virtio_pmem_config *pmemcfg = (struct virtio_pmem_config *) config;
93
94 virtio_stq_p(vdev, &pmemcfg->start, pmem->start);
95 virtio_stq_p(vdev, &pmemcfg->size, memory_region_size(&pmem->memdev->mr));
96}
97
98static uint64_t virtio_pmem_get_features(VirtIODevice *vdev, uint64_t features,
99 Error **errp)
100{
101 return features;
102}
103
104static void virtio_pmem_realize(DeviceState *dev, Error **errp)
105{
106 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
107 VirtIOPMEM *pmem = VIRTIO_PMEM(dev);
108
109 if (!pmem->memdev) {
110 error_setg(errp, "virtio-pmem memdev not set");
111 return;
112 }
113
114 if (host_memory_backend_is_mapped(pmem->memdev)) {
115 char *path = object_get_canonical_path_component(OBJECT(pmem->memdev));
116 error_setg(errp, "can't use already busy memdev: %s", path);
117 g_free(path);
118 return;
119 }
120
121 host_memory_backend_set_mapped(pmem->memdev, true);
122 virtio_init(vdev, TYPE_VIRTIO_PMEM, VIRTIO_ID_PMEM,
123 sizeof(struct virtio_pmem_config));
124 pmem->rq_vq = virtio_add_queue(vdev, 128, virtio_pmem_flush);
125}
126
127static void virtio_pmem_unrealize(DeviceState *dev, Error **errp)
128{
129 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
130 VirtIOPMEM *pmem = VIRTIO_PMEM(dev);
131
132 host_memory_backend_set_mapped(pmem->memdev, false);
133 virtio_cleanup(vdev);
134}
135
136static void virtio_pmem_fill_device_info(const VirtIOPMEM *pmem,
137 VirtioPMEMDeviceInfo *vi)
138{
139 vi->memaddr = pmem->start;
140 vi->size = memory_region_size(&pmem->memdev->mr);
141 vi->memdev = object_get_canonical_path(OBJECT(pmem->memdev));
142}
143
144static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem,
145 Error **errp)
146{
147 if (!pmem->memdev) {
148 error_setg(errp, "'%s' property must be set", VIRTIO_PMEM_MEMDEV_PROP);
149 return NULL;
150 }
151
152 return &pmem->memdev->mr;
153}
154
155static Property virtio_pmem_properties[] = {
156 DEFINE_PROP_UINT64(VIRTIO_PMEM_ADDR_PROP, VirtIOPMEM, start, 0),
157 DEFINE_PROP_LINK(VIRTIO_PMEM_MEMDEV_PROP, VirtIOPMEM, memdev,
158 TYPE_MEMORY_BACKEND, HostMemoryBackend *),
159 DEFINE_PROP_END_OF_LIST(),
160};
161
162static void virtio_pmem_class_init(ObjectClass *klass, void *data)
163{
164 DeviceClass *dc = DEVICE_CLASS(klass);
165 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
166 VirtIOPMEMClass *vpc = VIRTIO_PMEM_CLASS(klass);
167
168 dc->props = virtio_pmem_properties;
169
170 vdc->realize = virtio_pmem_realize;
171 vdc->unrealize = virtio_pmem_unrealize;
172 vdc->get_config = virtio_pmem_get_config;
173 vdc->get_features = virtio_pmem_get_features;
174
175 vpc->fill_device_info = virtio_pmem_fill_device_info;
176 vpc->get_memory_region = virtio_pmem_get_memory_region;
177}
178
179static TypeInfo virtio_pmem_info = {
180 .name = TYPE_VIRTIO_PMEM,
181 .parent = TYPE_VIRTIO_DEVICE,
182 .class_size = sizeof(VirtIOPMEMClass),
183 .class_init = virtio_pmem_class_init,
184 .instance_size = sizeof(VirtIOPMEM),
185};
186
187static void virtio_register_types(void)
188{
189 type_register_static(&virtio_pmem_info);
190}
191
192type_init(virtio_register_types)
193