1 | /* |
2 | * DMA helper functions |
3 | * |
4 | * Copyright (c) 2009 Red Hat |
5 | * |
6 | * This work is licensed under the terms of the GNU General Public License |
7 | * (GNU GPL), version 2 or later. |
8 | */ |
9 | |
10 | #include "qemu/osdep.h" |
11 | #include "sysemu/block-backend.h" |
12 | #include "sysemu/dma.h" |
13 | #include "trace-root.h" |
14 | #include "qemu/thread.h" |
15 | #include "qemu/main-loop.h" |
16 | |
17 | /* #define DEBUG_IOMMU */ |
18 | |
19 | int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len) |
20 | { |
21 | dma_barrier(as, DMA_DIRECTION_FROM_DEVICE); |
22 | |
23 | #define FILLBUF_SIZE 512 |
24 | uint8_t fillbuf[FILLBUF_SIZE]; |
25 | int l; |
26 | bool error = false; |
27 | |
28 | memset(fillbuf, c, FILLBUF_SIZE); |
29 | while (len > 0) { |
30 | l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
31 | error |= address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, |
32 | fillbuf, l, true); |
33 | len -= l; |
34 | addr += l; |
35 | } |
36 | |
37 | return error; |
38 | } |
39 | |
40 | void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, |
41 | AddressSpace *as) |
42 | { |
43 | qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry)); |
44 | qsg->nsg = 0; |
45 | qsg->nalloc = alloc_hint; |
46 | qsg->size = 0; |
47 | qsg->as = as; |
48 | qsg->dev = dev; |
49 | object_ref(OBJECT(dev)); |
50 | } |
51 | |
52 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len) |
53 | { |
54 | if (qsg->nsg == qsg->nalloc) { |
55 | qsg->nalloc = 2 * qsg->nalloc + 1; |
56 | qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry)); |
57 | } |
58 | qsg->sg[qsg->nsg].base = base; |
59 | qsg->sg[qsg->nsg].len = len; |
60 | qsg->size += len; |
61 | ++qsg->nsg; |
62 | } |
63 | |
64 | void qemu_sglist_destroy(QEMUSGList *qsg) |
65 | { |
66 | object_unref(OBJECT(qsg->dev)); |
67 | g_free(qsg->sg); |
68 | memset(qsg, 0, sizeof(*qsg)); |
69 | } |
70 | |
71 | typedef struct { |
72 | BlockAIOCB common; |
73 | AioContext *ctx; |
74 | BlockAIOCB *acb; |
75 | QEMUSGList *sg; |
76 | uint32_t align; |
77 | uint64_t offset; |
78 | DMADirection dir; |
79 | int sg_cur_index; |
80 | dma_addr_t sg_cur_byte; |
81 | QEMUIOVector iov; |
82 | QEMUBH *bh; |
83 | DMAIOFunc *io_func; |
84 | void *io_func_opaque; |
85 | } DMAAIOCB; |
86 | |
87 | static void dma_blk_cb(void *opaque, int ret); |
88 | |
89 | static void reschedule_dma(void *opaque) |
90 | { |
91 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
92 | |
93 | assert(!dbs->acb && dbs->bh); |
94 | qemu_bh_delete(dbs->bh); |
95 | dbs->bh = NULL; |
96 | dma_blk_cb(dbs, 0); |
97 | } |
98 | |
99 | static void dma_blk_unmap(DMAAIOCB *dbs) |
100 | { |
101 | int i; |
102 | |
103 | for (i = 0; i < dbs->iov.niov; ++i) { |
104 | dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base, |
105 | dbs->iov.iov[i].iov_len, dbs->dir, |
106 | dbs->iov.iov[i].iov_len); |
107 | } |
108 | qemu_iovec_reset(&dbs->iov); |
109 | } |
110 | |
111 | static void dma_complete(DMAAIOCB *dbs, int ret) |
112 | { |
113 | trace_dma_complete(dbs, ret, dbs->common.cb); |
114 | |
115 | assert(!dbs->acb && !dbs->bh); |
116 | dma_blk_unmap(dbs); |
117 | if (dbs->common.cb) { |
118 | dbs->common.cb(dbs->common.opaque, ret); |
119 | } |
120 | qemu_iovec_destroy(&dbs->iov); |
121 | qemu_aio_unref(dbs); |
122 | } |
123 | |
124 | static void dma_blk_cb(void *opaque, int ret) |
125 | { |
126 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
127 | dma_addr_t cur_addr, cur_len; |
128 | void *mem; |
129 | |
130 | trace_dma_blk_cb(dbs, ret); |
131 | |
132 | dbs->acb = NULL; |
133 | dbs->offset += dbs->iov.size; |
134 | |
135 | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
136 | dma_complete(dbs, ret); |
137 | return; |
138 | } |
139 | dma_blk_unmap(dbs); |
140 | |
141 | while (dbs->sg_cur_index < dbs->sg->nsg) { |
142 | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
143 | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
144 | mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir); |
145 | if (!mem) |
146 | break; |
147 | qemu_iovec_add(&dbs->iov, mem, cur_len); |
148 | dbs->sg_cur_byte += cur_len; |
149 | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) { |
150 | dbs->sg_cur_byte = 0; |
151 | ++dbs->sg_cur_index; |
152 | } |
153 | } |
154 | |
155 | if (dbs->iov.size == 0) { |
156 | trace_dma_map_wait(dbs); |
157 | dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs); |
158 | cpu_register_map_client(dbs->bh); |
159 | return; |
160 | } |
161 | |
162 | if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) { |
163 | qemu_iovec_discard_back(&dbs->iov, |
164 | QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align)); |
165 | } |
166 | |
167 | aio_context_acquire(dbs->ctx); |
168 | dbs->acb = dbs->io_func(dbs->offset, &dbs->iov, |
169 | dma_blk_cb, dbs, dbs->io_func_opaque); |
170 | aio_context_release(dbs->ctx); |
171 | assert(dbs->acb); |
172 | } |
173 | |
174 | static void dma_aio_cancel(BlockAIOCB *acb) |
175 | { |
176 | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
177 | |
178 | trace_dma_aio_cancel(dbs); |
179 | |
180 | assert(!(dbs->acb && dbs->bh)); |
181 | if (dbs->acb) { |
182 | /* This will invoke dma_blk_cb. */ |
183 | blk_aio_cancel_async(dbs->acb); |
184 | return; |
185 | } |
186 | |
187 | if (dbs->bh) { |
188 | cpu_unregister_map_client(dbs->bh); |
189 | qemu_bh_delete(dbs->bh); |
190 | dbs->bh = NULL; |
191 | } |
192 | if (dbs->common.cb) { |
193 | dbs->common.cb(dbs->common.opaque, -ECANCELED); |
194 | } |
195 | } |
196 | |
197 | static AioContext *dma_get_aio_context(BlockAIOCB *acb) |
198 | { |
199 | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
200 | |
201 | return dbs->ctx; |
202 | } |
203 | |
204 | static const AIOCBInfo dma_aiocb_info = { |
205 | .aiocb_size = sizeof(DMAAIOCB), |
206 | .cancel_async = dma_aio_cancel, |
207 | .get_aio_context = dma_get_aio_context, |
208 | }; |
209 | |
210 | BlockAIOCB *dma_blk_io(AioContext *ctx, |
211 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
212 | DMAIOFunc *io_func, void *io_func_opaque, |
213 | BlockCompletionFunc *cb, |
214 | void *opaque, DMADirection dir) |
215 | { |
216 | DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque); |
217 | |
218 | trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE)); |
219 | |
220 | dbs->acb = NULL; |
221 | dbs->sg = sg; |
222 | dbs->ctx = ctx; |
223 | dbs->offset = offset; |
224 | dbs->align = align; |
225 | dbs->sg_cur_index = 0; |
226 | dbs->sg_cur_byte = 0; |
227 | dbs->dir = dir; |
228 | dbs->io_func = io_func; |
229 | dbs->io_func_opaque = io_func_opaque; |
230 | dbs->bh = NULL; |
231 | qemu_iovec_init(&dbs->iov, sg->nsg); |
232 | dma_blk_cb(dbs, 0); |
233 | return &dbs->common; |
234 | } |
235 | |
236 | |
237 | static |
238 | BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov, |
239 | BlockCompletionFunc *cb, void *cb_opaque, |
240 | void *opaque) |
241 | { |
242 | BlockBackend *blk = opaque; |
243 | return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque); |
244 | } |
245 | |
246 | BlockAIOCB *dma_blk_read(BlockBackend *blk, |
247 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
248 | void (*cb)(void *opaque, int ret), void *opaque) |
249 | { |
250 | return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, |
251 | dma_blk_read_io_func, blk, cb, opaque, |
252 | DMA_DIRECTION_FROM_DEVICE); |
253 | } |
254 | |
255 | static |
256 | BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov, |
257 | BlockCompletionFunc *cb, void *cb_opaque, |
258 | void *opaque) |
259 | { |
260 | BlockBackend *blk = opaque; |
261 | return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque); |
262 | } |
263 | |
264 | BlockAIOCB *dma_blk_write(BlockBackend *blk, |
265 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
266 | void (*cb)(void *opaque, int ret), void *opaque) |
267 | { |
268 | return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, |
269 | dma_blk_write_io_func, blk, cb, opaque, |
270 | DMA_DIRECTION_TO_DEVICE); |
271 | } |
272 | |
273 | |
274 | static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, |
275 | DMADirection dir) |
276 | { |
277 | uint64_t resid; |
278 | int sg_cur_index; |
279 | |
280 | resid = sg->size; |
281 | sg_cur_index = 0; |
282 | len = MIN(len, resid); |
283 | while (len > 0) { |
284 | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
285 | int32_t xfer = MIN(len, entry.len); |
286 | dma_memory_rw(sg->as, entry.base, ptr, xfer, dir); |
287 | ptr += xfer; |
288 | len -= xfer; |
289 | resid -= xfer; |
290 | } |
291 | |
292 | return resid; |
293 | } |
294 | |
295 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
296 | { |
297 | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE); |
298 | } |
299 | |
300 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
301 | { |
302 | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE); |
303 | } |
304 | |
305 | void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, |
306 | QEMUSGList *sg, enum BlockAcctType type) |
307 | { |
308 | block_acct_start(blk_get_stats(blk), cookie, sg->size, type); |
309 | } |
310 | |