1 | /* |
2 | * QEMU NVM Express Controller |
3 | * |
4 | * Copyright (c) 2012, Intel Corporation |
5 | * |
6 | * Written by Keith Busch <keith.busch@intel.com> |
7 | * |
8 | * This code is licensed under the GNU GPL v2 or later. |
9 | */ |
10 | |
11 | /** |
12 | * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e |
13 | * |
14 | * http://www.nvmexpress.org/resources/ |
15 | */ |
16 | |
17 | /** |
18 | * Usage: add options: |
19 | * -drive file=<file>,if=none,id=<drive_id> |
20 | * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \ |
21 | * cmb_size_mb=<cmb_size_mb[optional]>, \ |
22 | * num_queues=<N[optional]> |
23 | * |
24 | * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at |
25 | * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. |
26 | */ |
27 | |
28 | #include "qemu/osdep.h" |
29 | #include "qemu/units.h" |
30 | #include "hw/block/block.h" |
31 | #include "hw/pci/msix.h" |
32 | #include "hw/pci/pci.h" |
33 | #include "hw/qdev-properties.h" |
34 | #include "migration/vmstate.h" |
35 | #include "sysemu/sysemu.h" |
36 | #include "qapi/error.h" |
37 | #include "qapi/visitor.h" |
38 | #include "sysemu/block-backend.h" |
39 | |
40 | #include "qemu/log.h" |
41 | #include "qemu/module.h" |
42 | #include "qemu/cutils.h" |
43 | #include "trace.h" |
44 | #include "nvme.h" |
45 | |
46 | #define NVME_GUEST_ERR(trace, fmt, ...) \ |
47 | do { \ |
48 | (trace_##trace)(__VA_ARGS__); \ |
49 | qemu_log_mask(LOG_GUEST_ERROR, #trace \ |
50 | " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \ |
51 | } while (0) |
52 | |
53 | static void nvme_process_sq(void *opaque); |
54 | |
55 | static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) |
56 | { |
57 | if (n->cmbsz && addr >= n->ctrl_mem.addr && |
58 | addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) { |
59 | memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size); |
60 | } else { |
61 | pci_dma_read(&n->parent_obj, addr, buf, size); |
62 | } |
63 | } |
64 | |
65 | static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) |
66 | { |
67 | return sqid < n->num_queues && n->sq[sqid] != NULL ? 0 : -1; |
68 | } |
69 | |
70 | static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) |
71 | { |
72 | return cqid < n->num_queues && n->cq[cqid] != NULL ? 0 : -1; |
73 | } |
74 | |
75 | static void nvme_inc_cq_tail(NvmeCQueue *cq) |
76 | { |
77 | cq->tail++; |
78 | if (cq->tail >= cq->size) { |
79 | cq->tail = 0; |
80 | cq->phase = !cq->phase; |
81 | } |
82 | } |
83 | |
84 | static void nvme_inc_sq_head(NvmeSQueue *sq) |
85 | { |
86 | sq->head = (sq->head + 1) % sq->size; |
87 | } |
88 | |
89 | static uint8_t nvme_cq_full(NvmeCQueue *cq) |
90 | { |
91 | return (cq->tail + 1) % cq->size == cq->head; |
92 | } |
93 | |
94 | static uint8_t nvme_sq_empty(NvmeSQueue *sq) |
95 | { |
96 | return sq->head == sq->tail; |
97 | } |
98 | |
99 | static void nvme_irq_check(NvmeCtrl *n) |
100 | { |
101 | if (msix_enabled(&(n->parent_obj))) { |
102 | return; |
103 | } |
104 | if (~n->bar.intms & n->irq_status) { |
105 | pci_irq_assert(&n->parent_obj); |
106 | } else { |
107 | pci_irq_deassert(&n->parent_obj); |
108 | } |
109 | } |
110 | |
111 | static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) |
112 | { |
113 | if (cq->irq_enabled) { |
114 | if (msix_enabled(&(n->parent_obj))) { |
115 | trace_nvme_irq_msix(cq->vector); |
116 | msix_notify(&(n->parent_obj), cq->vector); |
117 | } else { |
118 | trace_nvme_irq_pin(); |
119 | assert(cq->cqid < 64); |
120 | n->irq_status |= 1 << cq->cqid; |
121 | nvme_irq_check(n); |
122 | } |
123 | } else { |
124 | trace_nvme_irq_masked(); |
125 | } |
126 | } |
127 | |
128 | static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) |
129 | { |
130 | if (cq->irq_enabled) { |
131 | if (msix_enabled(&(n->parent_obj))) { |
132 | return; |
133 | } else { |
134 | assert(cq->cqid < 64); |
135 | n->irq_status &= ~(1 << cq->cqid); |
136 | nvme_irq_check(n); |
137 | } |
138 | } |
139 | } |
140 | |
141 | static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1, |
142 | uint64_t prp2, uint32_t len, NvmeCtrl *n) |
143 | { |
144 | hwaddr trans_len = n->page_size - (prp1 % n->page_size); |
145 | trans_len = MIN(len, trans_len); |
146 | int num_prps = (len >> n->page_bits) + 1; |
147 | |
148 | if (unlikely(!prp1)) { |
149 | trace_nvme_err_invalid_prp(); |
150 | return NVME_INVALID_FIELD | NVME_DNR; |
151 | } else if (n->cmbsz && prp1 >= n->ctrl_mem.addr && |
152 | prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) { |
153 | qsg->nsg = 0; |
154 | qemu_iovec_init(iov, num_prps); |
155 | qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len); |
156 | } else { |
157 | pci_dma_sglist_init(qsg, &n->parent_obj, num_prps); |
158 | qemu_sglist_add(qsg, prp1, trans_len); |
159 | } |
160 | len -= trans_len; |
161 | if (len) { |
162 | if (unlikely(!prp2)) { |
163 | trace_nvme_err_invalid_prp2_missing(); |
164 | goto unmap; |
165 | } |
166 | if (len > n->page_size) { |
167 | uint64_t prp_list[n->max_prp_ents]; |
168 | uint32_t nents, prp_trans; |
169 | int i = 0; |
170 | |
171 | nents = (len + n->page_size - 1) >> n->page_bits; |
172 | prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); |
173 | nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); |
174 | while (len != 0) { |
175 | uint64_t prp_ent = le64_to_cpu(prp_list[i]); |
176 | |
177 | if (i == n->max_prp_ents - 1 && len > n->page_size) { |
178 | if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { |
179 | trace_nvme_err_invalid_prplist_ent(prp_ent); |
180 | goto unmap; |
181 | } |
182 | |
183 | i = 0; |
184 | nents = (len + n->page_size - 1) >> n->page_bits; |
185 | prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); |
186 | nvme_addr_read(n, prp_ent, (void *)prp_list, |
187 | prp_trans); |
188 | prp_ent = le64_to_cpu(prp_list[i]); |
189 | } |
190 | |
191 | if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { |
192 | trace_nvme_err_invalid_prplist_ent(prp_ent); |
193 | goto unmap; |
194 | } |
195 | |
196 | trans_len = MIN(len, n->page_size); |
197 | if (qsg->nsg){ |
198 | qemu_sglist_add(qsg, prp_ent, trans_len); |
199 | } else { |
200 | qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len); |
201 | } |
202 | len -= trans_len; |
203 | i++; |
204 | } |
205 | } else { |
206 | if (unlikely(prp2 & (n->page_size - 1))) { |
207 | trace_nvme_err_invalid_prp2_align(prp2); |
208 | goto unmap; |
209 | } |
210 | if (qsg->nsg) { |
211 | qemu_sglist_add(qsg, prp2, len); |
212 | } else { |
213 | qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len); |
214 | } |
215 | } |
216 | } |
217 | return NVME_SUCCESS; |
218 | |
219 | unmap: |
220 | qemu_sglist_destroy(qsg); |
221 | return NVME_INVALID_FIELD | NVME_DNR; |
222 | } |
223 | |
224 | static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, |
225 | uint64_t prp1, uint64_t prp2) |
226 | { |
227 | QEMUSGList qsg; |
228 | QEMUIOVector iov; |
229 | uint16_t status = NVME_SUCCESS; |
230 | |
231 | if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) { |
232 | return NVME_INVALID_FIELD | NVME_DNR; |
233 | } |
234 | if (qsg.nsg > 0) { |
235 | if (dma_buf_write(ptr, len, &qsg)) { |
236 | status = NVME_INVALID_FIELD | NVME_DNR; |
237 | } |
238 | qemu_sglist_destroy(&qsg); |
239 | } else { |
240 | if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) { |
241 | status = NVME_INVALID_FIELD | NVME_DNR; |
242 | } |
243 | qemu_iovec_destroy(&iov); |
244 | } |
245 | return status; |
246 | } |
247 | |
248 | static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, |
249 | uint64_t prp1, uint64_t prp2) |
250 | { |
251 | QEMUSGList qsg; |
252 | QEMUIOVector iov; |
253 | uint16_t status = NVME_SUCCESS; |
254 | |
255 | trace_nvme_dma_read(prp1, prp2); |
256 | |
257 | if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) { |
258 | return NVME_INVALID_FIELD | NVME_DNR; |
259 | } |
260 | if (qsg.nsg > 0) { |
261 | if (unlikely(dma_buf_read(ptr, len, &qsg))) { |
262 | trace_nvme_err_invalid_dma(); |
263 | status = NVME_INVALID_FIELD | NVME_DNR; |
264 | } |
265 | qemu_sglist_destroy(&qsg); |
266 | } else { |
267 | if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) { |
268 | trace_nvme_err_invalid_dma(); |
269 | status = NVME_INVALID_FIELD | NVME_DNR; |
270 | } |
271 | qemu_iovec_destroy(&iov); |
272 | } |
273 | return status; |
274 | } |
275 | |
276 | static void nvme_post_cqes(void *opaque) |
277 | { |
278 | NvmeCQueue *cq = opaque; |
279 | NvmeCtrl *n = cq->ctrl; |
280 | NvmeRequest *req, *next; |
281 | |
282 | QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { |
283 | NvmeSQueue *sq; |
284 | hwaddr addr; |
285 | |
286 | if (nvme_cq_full(cq)) { |
287 | break; |
288 | } |
289 | |
290 | QTAILQ_REMOVE(&cq->req_list, req, entry); |
291 | sq = req->sq; |
292 | req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); |
293 | req->cqe.sq_id = cpu_to_le16(sq->sqid); |
294 | req->cqe.sq_head = cpu_to_le16(sq->head); |
295 | addr = cq->dma_addr + cq->tail * n->cqe_size; |
296 | nvme_inc_cq_tail(cq); |
297 | pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe, |
298 | sizeof(req->cqe)); |
299 | QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); |
300 | } |
301 | if (cq->tail != cq->head) { |
302 | nvme_irq_assert(n, cq); |
303 | } |
304 | } |
305 | |
306 | static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req) |
307 | { |
308 | assert(cq->cqid == req->sq->cqid); |
309 | QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); |
310 | QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); |
311 | timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
312 | } |
313 | |
314 | static void nvme_rw_cb(void *opaque, int ret) |
315 | { |
316 | NvmeRequest *req = opaque; |
317 | NvmeSQueue *sq = req->sq; |
318 | NvmeCtrl *n = sq->ctrl; |
319 | NvmeCQueue *cq = n->cq[sq->cqid]; |
320 | |
321 | if (!ret) { |
322 | block_acct_done(blk_get_stats(n->conf.blk), &req->acct); |
323 | req->status = NVME_SUCCESS; |
324 | } else { |
325 | block_acct_failed(blk_get_stats(n->conf.blk), &req->acct); |
326 | req->status = NVME_INTERNAL_DEV_ERROR; |
327 | } |
328 | if (req->has_sg) { |
329 | qemu_sglist_destroy(&req->qsg); |
330 | } |
331 | nvme_enqueue_req_completion(cq, req); |
332 | } |
333 | |
334 | static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, |
335 | NvmeRequest *req) |
336 | { |
337 | req->has_sg = false; |
338 | block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, |
339 | BLOCK_ACCT_FLUSH); |
340 | req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); |
341 | |
342 | return NVME_NO_COMPLETE; |
343 | } |
344 | |
345 | static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, |
346 | NvmeRequest *req) |
347 | { |
348 | NvmeRwCmd *rw = (NvmeRwCmd *)cmd; |
349 | const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); |
350 | const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds; |
351 | uint64_t slba = le64_to_cpu(rw->slba); |
352 | uint32_t nlb = le16_to_cpu(rw->nlb) + 1; |
353 | uint64_t offset = slba << data_shift; |
354 | uint32_t count = nlb << data_shift; |
355 | |
356 | if (unlikely(slba + nlb > ns->id_ns.nsze)) { |
357 | trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); |
358 | return NVME_LBA_RANGE | NVME_DNR; |
359 | } |
360 | |
361 | req->has_sg = false; |
362 | block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, |
363 | BLOCK_ACCT_WRITE); |
364 | req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, |
365 | BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); |
366 | return NVME_NO_COMPLETE; |
367 | } |
368 | |
369 | static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, |
370 | NvmeRequest *req) |
371 | { |
372 | NvmeRwCmd *rw = (NvmeRwCmd *)cmd; |
373 | uint32_t nlb = le32_to_cpu(rw->nlb) + 1; |
374 | uint64_t slba = le64_to_cpu(rw->slba); |
375 | uint64_t prp1 = le64_to_cpu(rw->prp1); |
376 | uint64_t prp2 = le64_to_cpu(rw->prp2); |
377 | |
378 | uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); |
379 | uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds; |
380 | uint64_t data_size = (uint64_t)nlb << data_shift; |
381 | uint64_t data_offset = slba << data_shift; |
382 | int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0; |
383 | enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; |
384 | |
385 | trace_nvme_rw(is_write ? "write" : "read" , nlb, data_size, slba); |
386 | |
387 | if (unlikely((slba + nlb) > ns->id_ns.nsze)) { |
388 | block_acct_invalid(blk_get_stats(n->conf.blk), acct); |
389 | trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); |
390 | return NVME_LBA_RANGE | NVME_DNR; |
391 | } |
392 | |
393 | if (nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, data_size, n)) { |
394 | block_acct_invalid(blk_get_stats(n->conf.blk), acct); |
395 | return NVME_INVALID_FIELD | NVME_DNR; |
396 | } |
397 | |
398 | dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct); |
399 | if (req->qsg.nsg > 0) { |
400 | req->has_sg = true; |
401 | req->aiocb = is_write ? |
402 | dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE, |
403 | nvme_rw_cb, req) : |
404 | dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE, |
405 | nvme_rw_cb, req); |
406 | } else { |
407 | req->has_sg = false; |
408 | req->aiocb = is_write ? |
409 | blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb, |
410 | req) : |
411 | blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb, |
412 | req); |
413 | } |
414 | |
415 | return NVME_NO_COMPLETE; |
416 | } |
417 | |
418 | static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) |
419 | { |
420 | NvmeNamespace *ns; |
421 | uint32_t nsid = le32_to_cpu(cmd->nsid); |
422 | |
423 | if (unlikely(nsid == 0 || nsid > n->num_namespaces)) { |
424 | trace_nvme_err_invalid_ns(nsid, n->num_namespaces); |
425 | return NVME_INVALID_NSID | NVME_DNR; |
426 | } |
427 | |
428 | ns = &n->namespaces[nsid - 1]; |
429 | switch (cmd->opcode) { |
430 | case NVME_CMD_FLUSH: |
431 | return nvme_flush(n, ns, cmd, req); |
432 | case NVME_CMD_WRITE_ZEROS: |
433 | return nvme_write_zeros(n, ns, cmd, req); |
434 | case NVME_CMD_WRITE: |
435 | case NVME_CMD_READ: |
436 | return nvme_rw(n, ns, cmd, req); |
437 | default: |
438 | trace_nvme_err_invalid_opc(cmd->opcode); |
439 | return NVME_INVALID_OPCODE | NVME_DNR; |
440 | } |
441 | } |
442 | |
443 | static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) |
444 | { |
445 | n->sq[sq->sqid] = NULL; |
446 | timer_del(sq->timer); |
447 | timer_free(sq->timer); |
448 | g_free(sq->io_req); |
449 | if (sq->sqid) { |
450 | g_free(sq); |
451 | } |
452 | } |
453 | |
454 | static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd) |
455 | { |
456 | NvmeDeleteQ *c = (NvmeDeleteQ *)cmd; |
457 | NvmeRequest *req, *next; |
458 | NvmeSQueue *sq; |
459 | NvmeCQueue *cq; |
460 | uint16_t qid = le16_to_cpu(c->qid); |
461 | |
462 | if (unlikely(!qid || nvme_check_sqid(n, qid))) { |
463 | trace_nvme_err_invalid_del_sq(qid); |
464 | return NVME_INVALID_QID | NVME_DNR; |
465 | } |
466 | |
467 | trace_nvme_del_sq(qid); |
468 | |
469 | sq = n->sq[qid]; |
470 | while (!QTAILQ_EMPTY(&sq->out_req_list)) { |
471 | req = QTAILQ_FIRST(&sq->out_req_list); |
472 | assert(req->aiocb); |
473 | blk_aio_cancel(req->aiocb); |
474 | } |
475 | if (!nvme_check_cqid(n, sq->cqid)) { |
476 | cq = n->cq[sq->cqid]; |
477 | QTAILQ_REMOVE(&cq->sq_list, sq, entry); |
478 | |
479 | nvme_post_cqes(cq); |
480 | QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { |
481 | if (req->sq == sq) { |
482 | QTAILQ_REMOVE(&cq->req_list, req, entry); |
483 | QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); |
484 | } |
485 | } |
486 | } |
487 | |
488 | nvme_free_sq(sq, n); |
489 | return NVME_SUCCESS; |
490 | } |
491 | |
492 | static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, |
493 | uint16_t sqid, uint16_t cqid, uint16_t size) |
494 | { |
495 | int i; |
496 | NvmeCQueue *cq; |
497 | |
498 | sq->ctrl = n; |
499 | sq->dma_addr = dma_addr; |
500 | sq->sqid = sqid; |
501 | sq->size = size; |
502 | sq->cqid = cqid; |
503 | sq->head = sq->tail = 0; |
504 | sq->io_req = g_new(NvmeRequest, sq->size); |
505 | |
506 | QTAILQ_INIT(&sq->req_list); |
507 | QTAILQ_INIT(&sq->out_req_list); |
508 | for (i = 0; i < sq->size; i++) { |
509 | sq->io_req[i].sq = sq; |
510 | QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); |
511 | } |
512 | sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq); |
513 | |
514 | assert(n->cq[cqid]); |
515 | cq = n->cq[cqid]; |
516 | QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); |
517 | n->sq[sqid] = sq; |
518 | } |
519 | |
520 | static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd) |
521 | { |
522 | NvmeSQueue *sq; |
523 | NvmeCreateSq *c = (NvmeCreateSq *)cmd; |
524 | |
525 | uint16_t cqid = le16_to_cpu(c->cqid); |
526 | uint16_t sqid = le16_to_cpu(c->sqid); |
527 | uint16_t qsize = le16_to_cpu(c->qsize); |
528 | uint16_t qflags = le16_to_cpu(c->sq_flags); |
529 | uint64_t prp1 = le64_to_cpu(c->prp1); |
530 | |
531 | trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); |
532 | |
533 | if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { |
534 | trace_nvme_err_invalid_create_sq_cqid(cqid); |
535 | return NVME_INVALID_CQID | NVME_DNR; |
536 | } |
537 | if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) { |
538 | trace_nvme_err_invalid_create_sq_sqid(sqid); |
539 | return NVME_INVALID_QID | NVME_DNR; |
540 | } |
541 | if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { |
542 | trace_nvme_err_invalid_create_sq_size(qsize); |
543 | return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; |
544 | } |
545 | if (unlikely(!prp1 || prp1 & (n->page_size - 1))) { |
546 | trace_nvme_err_invalid_create_sq_addr(prp1); |
547 | return NVME_INVALID_FIELD | NVME_DNR; |
548 | } |
549 | if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) { |
550 | trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); |
551 | return NVME_INVALID_FIELD | NVME_DNR; |
552 | } |
553 | sq = g_malloc0(sizeof(*sq)); |
554 | nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); |
555 | return NVME_SUCCESS; |
556 | } |
557 | |
558 | static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) |
559 | { |
560 | n->cq[cq->cqid] = NULL; |
561 | timer_del(cq->timer); |
562 | timer_free(cq->timer); |
563 | msix_vector_unuse(&n->parent_obj, cq->vector); |
564 | if (cq->cqid) { |
565 | g_free(cq); |
566 | } |
567 | } |
568 | |
569 | static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd) |
570 | { |
571 | NvmeDeleteQ *c = (NvmeDeleteQ *)cmd; |
572 | NvmeCQueue *cq; |
573 | uint16_t qid = le16_to_cpu(c->qid); |
574 | |
575 | if (unlikely(!qid || nvme_check_cqid(n, qid))) { |
576 | trace_nvme_err_invalid_del_cq_cqid(qid); |
577 | return NVME_INVALID_CQID | NVME_DNR; |
578 | } |
579 | |
580 | cq = n->cq[qid]; |
581 | if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { |
582 | trace_nvme_err_invalid_del_cq_notempty(qid); |
583 | return NVME_INVALID_QUEUE_DEL; |
584 | } |
585 | nvme_irq_deassert(n, cq); |
586 | trace_nvme_del_cq(qid); |
587 | nvme_free_cq(cq, n); |
588 | return NVME_SUCCESS; |
589 | } |
590 | |
591 | static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, |
592 | uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled) |
593 | { |
594 | cq->ctrl = n; |
595 | cq->cqid = cqid; |
596 | cq->size = size; |
597 | cq->dma_addr = dma_addr; |
598 | cq->phase = 1; |
599 | cq->irq_enabled = irq_enabled; |
600 | cq->vector = vector; |
601 | cq->head = cq->tail = 0; |
602 | QTAILQ_INIT(&cq->req_list); |
603 | QTAILQ_INIT(&cq->sq_list); |
604 | msix_vector_use(&n->parent_obj, cq->vector); |
605 | n->cq[cqid] = cq; |
606 | cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq); |
607 | } |
608 | |
609 | static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd) |
610 | { |
611 | NvmeCQueue *cq; |
612 | NvmeCreateCq *c = (NvmeCreateCq *)cmd; |
613 | uint16_t cqid = le16_to_cpu(c->cqid); |
614 | uint16_t vector = le16_to_cpu(c->irq_vector); |
615 | uint16_t qsize = le16_to_cpu(c->qsize); |
616 | uint16_t qflags = le16_to_cpu(c->cq_flags); |
617 | uint64_t prp1 = le64_to_cpu(c->prp1); |
618 | |
619 | trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags, |
620 | NVME_CQ_FLAGS_IEN(qflags) != 0); |
621 | |
622 | if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) { |
623 | trace_nvme_err_invalid_create_cq_cqid(cqid); |
624 | return NVME_INVALID_CQID | NVME_DNR; |
625 | } |
626 | if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { |
627 | trace_nvme_err_invalid_create_cq_size(qsize); |
628 | return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; |
629 | } |
630 | if (unlikely(!prp1)) { |
631 | trace_nvme_err_invalid_create_cq_addr(prp1); |
632 | return NVME_INVALID_FIELD | NVME_DNR; |
633 | } |
634 | if (unlikely(vector > n->num_queues)) { |
635 | trace_nvme_err_invalid_create_cq_vector(vector); |
636 | return NVME_INVALID_IRQ_VECTOR | NVME_DNR; |
637 | } |
638 | if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) { |
639 | trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); |
640 | return NVME_INVALID_FIELD | NVME_DNR; |
641 | } |
642 | |
643 | cq = g_malloc0(sizeof(*cq)); |
644 | nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1, |
645 | NVME_CQ_FLAGS_IEN(qflags)); |
646 | return NVME_SUCCESS; |
647 | } |
648 | |
649 | static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c) |
650 | { |
651 | uint64_t prp1 = le64_to_cpu(c->prp1); |
652 | uint64_t prp2 = le64_to_cpu(c->prp2); |
653 | |
654 | trace_nvme_identify_ctrl(); |
655 | |
656 | return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), |
657 | prp1, prp2); |
658 | } |
659 | |
660 | static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c) |
661 | { |
662 | NvmeNamespace *ns; |
663 | uint32_t nsid = le32_to_cpu(c->nsid); |
664 | uint64_t prp1 = le64_to_cpu(c->prp1); |
665 | uint64_t prp2 = le64_to_cpu(c->prp2); |
666 | |
667 | trace_nvme_identify_ns(nsid); |
668 | |
669 | if (unlikely(nsid == 0 || nsid > n->num_namespaces)) { |
670 | trace_nvme_err_invalid_ns(nsid, n->num_namespaces); |
671 | return NVME_INVALID_NSID | NVME_DNR; |
672 | } |
673 | |
674 | ns = &n->namespaces[nsid - 1]; |
675 | |
676 | return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), |
677 | prp1, prp2); |
678 | } |
679 | |
680 | static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c) |
681 | { |
682 | static const int data_len = 4 * KiB; |
683 | uint32_t min_nsid = le32_to_cpu(c->nsid); |
684 | uint64_t prp1 = le64_to_cpu(c->prp1); |
685 | uint64_t prp2 = le64_to_cpu(c->prp2); |
686 | uint32_t *list; |
687 | uint16_t ret; |
688 | int i, j = 0; |
689 | |
690 | trace_nvme_identify_nslist(min_nsid); |
691 | |
692 | list = g_malloc0(data_len); |
693 | for (i = 0; i < n->num_namespaces; i++) { |
694 | if (i < min_nsid) { |
695 | continue; |
696 | } |
697 | list[j++] = cpu_to_le32(i + 1); |
698 | if (j == data_len / sizeof(uint32_t)) { |
699 | break; |
700 | } |
701 | } |
702 | ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2); |
703 | g_free(list); |
704 | return ret; |
705 | } |
706 | |
707 | static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd) |
708 | { |
709 | NvmeIdentify *c = (NvmeIdentify *)cmd; |
710 | |
711 | switch (le32_to_cpu(c->cns)) { |
712 | case 0x00: |
713 | return nvme_identify_ns(n, c); |
714 | case 0x01: |
715 | return nvme_identify_ctrl(n, c); |
716 | case 0x02: |
717 | return nvme_identify_nslist(n, c); |
718 | default: |
719 | trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); |
720 | return NVME_INVALID_FIELD | NVME_DNR; |
721 | } |
722 | } |
723 | |
724 | static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) |
725 | { |
726 | trace_nvme_setfeat_timestamp(ts); |
727 | |
728 | n->host_timestamp = le64_to_cpu(ts); |
729 | n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); |
730 | } |
731 | |
732 | static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) |
733 | { |
734 | uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); |
735 | uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; |
736 | |
737 | union nvme_timestamp { |
738 | struct { |
739 | uint64_t timestamp:48; |
740 | uint64_t sync:1; |
741 | uint64_t origin:3; |
742 | uint64_t rsvd1:12; |
743 | }; |
744 | uint64_t all; |
745 | }; |
746 | |
747 | union nvme_timestamp ts; |
748 | ts.all = 0; |
749 | |
750 | /* |
751 | * If the sum of the Timestamp value set by the host and the elapsed |
752 | * time exceeds 2^48, the value returned should be reduced modulo 2^48. |
753 | */ |
754 | ts.timestamp = (n->host_timestamp + elapsed_time) & 0xffffffffffff; |
755 | |
756 | /* If the host timestamp is non-zero, set the timestamp origin */ |
757 | ts.origin = n->host_timestamp ? 0x01 : 0x00; |
758 | |
759 | trace_nvme_getfeat_timestamp(ts.all); |
760 | |
761 | return cpu_to_le64(ts.all); |
762 | } |
763 | |
764 | static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd) |
765 | { |
766 | uint64_t prp1 = le64_to_cpu(cmd->prp1); |
767 | uint64_t prp2 = le64_to_cpu(cmd->prp2); |
768 | |
769 | uint64_t timestamp = nvme_get_timestamp(n); |
770 | |
771 | return nvme_dma_read_prp(n, (uint8_t *)×tamp, |
772 | sizeof(timestamp), prp1, prp2); |
773 | } |
774 | |
775 | static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) |
776 | { |
777 | uint32_t dw10 = le32_to_cpu(cmd->cdw10); |
778 | uint32_t result; |
779 | |
780 | switch (dw10) { |
781 | case NVME_VOLATILE_WRITE_CACHE: |
782 | result = blk_enable_write_cache(n->conf.blk); |
783 | trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled" ); |
784 | break; |
785 | case NVME_NUMBER_OF_QUEUES: |
786 | result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); |
787 | trace_nvme_getfeat_numq(result); |
788 | break; |
789 | case NVME_TIMESTAMP: |
790 | return nvme_get_feature_timestamp(n, cmd); |
791 | break; |
792 | default: |
793 | trace_nvme_err_invalid_getfeat(dw10); |
794 | return NVME_INVALID_FIELD | NVME_DNR; |
795 | } |
796 | |
797 | req->cqe.result = result; |
798 | return NVME_SUCCESS; |
799 | } |
800 | |
801 | static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd) |
802 | { |
803 | uint16_t ret; |
804 | uint64_t timestamp; |
805 | uint64_t prp1 = le64_to_cpu(cmd->prp1); |
806 | uint64_t prp2 = le64_to_cpu(cmd->prp2); |
807 | |
808 | ret = nvme_dma_write_prp(n, (uint8_t *)×tamp, |
809 | sizeof(timestamp), prp1, prp2); |
810 | if (ret != NVME_SUCCESS) { |
811 | return ret; |
812 | } |
813 | |
814 | nvme_set_timestamp(n, timestamp); |
815 | |
816 | return NVME_SUCCESS; |
817 | } |
818 | |
819 | static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) |
820 | { |
821 | uint32_t dw10 = le32_to_cpu(cmd->cdw10); |
822 | uint32_t dw11 = le32_to_cpu(cmd->cdw11); |
823 | |
824 | switch (dw10) { |
825 | case NVME_VOLATILE_WRITE_CACHE: |
826 | blk_set_enable_write_cache(n->conf.blk, dw11 & 1); |
827 | break; |
828 | case NVME_NUMBER_OF_QUEUES: |
829 | trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1, |
830 | ((dw11 >> 16) & 0xFFFF) + 1, |
831 | n->num_queues - 1, n->num_queues - 1); |
832 | req->cqe.result = |
833 | cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); |
834 | break; |
835 | |
836 | case NVME_TIMESTAMP: |
837 | return nvme_set_feature_timestamp(n, cmd); |
838 | break; |
839 | |
840 | default: |
841 | trace_nvme_err_invalid_setfeat(dw10); |
842 | return NVME_INVALID_FIELD | NVME_DNR; |
843 | } |
844 | return NVME_SUCCESS; |
845 | } |
846 | |
847 | static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) |
848 | { |
849 | switch (cmd->opcode) { |
850 | case NVME_ADM_CMD_DELETE_SQ: |
851 | return nvme_del_sq(n, cmd); |
852 | case NVME_ADM_CMD_CREATE_SQ: |
853 | return nvme_create_sq(n, cmd); |
854 | case NVME_ADM_CMD_DELETE_CQ: |
855 | return nvme_del_cq(n, cmd); |
856 | case NVME_ADM_CMD_CREATE_CQ: |
857 | return nvme_create_cq(n, cmd); |
858 | case NVME_ADM_CMD_IDENTIFY: |
859 | return nvme_identify(n, cmd); |
860 | case NVME_ADM_CMD_SET_FEATURES: |
861 | return nvme_set_feature(n, cmd, req); |
862 | case NVME_ADM_CMD_GET_FEATURES: |
863 | return nvme_get_feature(n, cmd, req); |
864 | default: |
865 | trace_nvme_err_invalid_admin_opc(cmd->opcode); |
866 | return NVME_INVALID_OPCODE | NVME_DNR; |
867 | } |
868 | } |
869 | |
870 | static void nvme_process_sq(void *opaque) |
871 | { |
872 | NvmeSQueue *sq = opaque; |
873 | NvmeCtrl *n = sq->ctrl; |
874 | NvmeCQueue *cq = n->cq[sq->cqid]; |
875 | |
876 | uint16_t status; |
877 | hwaddr addr; |
878 | NvmeCmd cmd; |
879 | NvmeRequest *req; |
880 | |
881 | while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { |
882 | addr = sq->dma_addr + sq->head * n->sqe_size; |
883 | nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd)); |
884 | nvme_inc_sq_head(sq); |
885 | |
886 | req = QTAILQ_FIRST(&sq->req_list); |
887 | QTAILQ_REMOVE(&sq->req_list, req, entry); |
888 | QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); |
889 | memset(&req->cqe, 0, sizeof(req->cqe)); |
890 | req->cqe.cid = cmd.cid; |
891 | |
892 | status = sq->sqid ? nvme_io_cmd(n, &cmd, req) : |
893 | nvme_admin_cmd(n, &cmd, req); |
894 | if (status != NVME_NO_COMPLETE) { |
895 | req->status = status; |
896 | nvme_enqueue_req_completion(cq, req); |
897 | } |
898 | } |
899 | } |
900 | |
901 | static void nvme_clear_ctrl(NvmeCtrl *n) |
902 | { |
903 | int i; |
904 | |
905 | blk_drain(n->conf.blk); |
906 | |
907 | for (i = 0; i < n->num_queues; i++) { |
908 | if (n->sq[i] != NULL) { |
909 | nvme_free_sq(n->sq[i], n); |
910 | } |
911 | } |
912 | for (i = 0; i < n->num_queues; i++) { |
913 | if (n->cq[i] != NULL) { |
914 | nvme_free_cq(n->cq[i], n); |
915 | } |
916 | } |
917 | |
918 | blk_flush(n->conf.blk); |
919 | n->bar.cc = 0; |
920 | } |
921 | |
922 | static int nvme_start_ctrl(NvmeCtrl *n) |
923 | { |
924 | uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12; |
925 | uint32_t page_size = 1 << page_bits; |
926 | |
927 | if (unlikely(n->cq[0])) { |
928 | trace_nvme_err_startfail_cq(); |
929 | return -1; |
930 | } |
931 | if (unlikely(n->sq[0])) { |
932 | trace_nvme_err_startfail_sq(); |
933 | return -1; |
934 | } |
935 | if (unlikely(!n->bar.asq)) { |
936 | trace_nvme_err_startfail_nbarasq(); |
937 | return -1; |
938 | } |
939 | if (unlikely(!n->bar.acq)) { |
940 | trace_nvme_err_startfail_nbaracq(); |
941 | return -1; |
942 | } |
943 | if (unlikely(n->bar.asq & (page_size - 1))) { |
944 | trace_nvme_err_startfail_asq_misaligned(n->bar.asq); |
945 | return -1; |
946 | } |
947 | if (unlikely(n->bar.acq & (page_size - 1))) { |
948 | trace_nvme_err_startfail_acq_misaligned(n->bar.acq); |
949 | return -1; |
950 | } |
951 | if (unlikely(NVME_CC_MPS(n->bar.cc) < |
952 | NVME_CAP_MPSMIN(n->bar.cap))) { |
953 | trace_nvme_err_startfail_page_too_small( |
954 | NVME_CC_MPS(n->bar.cc), |
955 | NVME_CAP_MPSMIN(n->bar.cap)); |
956 | return -1; |
957 | } |
958 | if (unlikely(NVME_CC_MPS(n->bar.cc) > |
959 | NVME_CAP_MPSMAX(n->bar.cap))) { |
960 | trace_nvme_err_startfail_page_too_large( |
961 | NVME_CC_MPS(n->bar.cc), |
962 | NVME_CAP_MPSMAX(n->bar.cap)); |
963 | return -1; |
964 | } |
965 | if (unlikely(NVME_CC_IOCQES(n->bar.cc) < |
966 | NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) { |
967 | trace_nvme_err_startfail_cqent_too_small( |
968 | NVME_CC_IOCQES(n->bar.cc), |
969 | NVME_CTRL_CQES_MIN(n->bar.cap)); |
970 | return -1; |
971 | } |
972 | if (unlikely(NVME_CC_IOCQES(n->bar.cc) > |
973 | NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) { |
974 | trace_nvme_err_startfail_cqent_too_large( |
975 | NVME_CC_IOCQES(n->bar.cc), |
976 | NVME_CTRL_CQES_MAX(n->bar.cap)); |
977 | return -1; |
978 | } |
979 | if (unlikely(NVME_CC_IOSQES(n->bar.cc) < |
980 | NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) { |
981 | trace_nvme_err_startfail_sqent_too_small( |
982 | NVME_CC_IOSQES(n->bar.cc), |
983 | NVME_CTRL_SQES_MIN(n->bar.cap)); |
984 | return -1; |
985 | } |
986 | if (unlikely(NVME_CC_IOSQES(n->bar.cc) > |
987 | NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) { |
988 | trace_nvme_err_startfail_sqent_too_large( |
989 | NVME_CC_IOSQES(n->bar.cc), |
990 | NVME_CTRL_SQES_MAX(n->bar.cap)); |
991 | return -1; |
992 | } |
993 | if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) { |
994 | trace_nvme_err_startfail_asqent_sz_zero(); |
995 | return -1; |
996 | } |
997 | if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) { |
998 | trace_nvme_err_startfail_acqent_sz_zero(); |
999 | return -1; |
1000 | } |
1001 | |
1002 | n->page_bits = page_bits; |
1003 | n->page_size = page_size; |
1004 | n->max_prp_ents = n->page_size / sizeof(uint64_t); |
1005 | n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc); |
1006 | n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc); |
1007 | nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0, |
1008 | NVME_AQA_ACQS(n->bar.aqa) + 1, 1); |
1009 | nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0, |
1010 | NVME_AQA_ASQS(n->bar.aqa) + 1); |
1011 | |
1012 | nvme_set_timestamp(n, 0ULL); |
1013 | |
1014 | return 0; |
1015 | } |
1016 | |
1017 | static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, |
1018 | unsigned size) |
1019 | { |
1020 | if (unlikely(offset & (sizeof(uint32_t) - 1))) { |
1021 | NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32, |
1022 | "MMIO write not 32-bit aligned," |
1023 | " offset=0x%" PRIx64"" , offset); |
1024 | /* should be ignored, fall through for now */ |
1025 | } |
1026 | |
1027 | if (unlikely(size < sizeof(uint32_t))) { |
1028 | NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall, |
1029 | "MMIO write smaller than 32-bits," |
1030 | " offset=0x%" PRIx64", size=%u" , |
1031 | offset, size); |
1032 | /* should be ignored, fall through for now */ |
1033 | } |
1034 | |
1035 | switch (offset) { |
1036 | case 0xc: /* INTMS */ |
1037 | if (unlikely(msix_enabled(&(n->parent_obj)))) { |
1038 | NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix, |
1039 | "undefined access to interrupt mask set" |
1040 | " when MSI-X is enabled" ); |
1041 | /* should be ignored, fall through for now */ |
1042 | } |
1043 | n->bar.intms |= data & 0xffffffff; |
1044 | n->bar.intmc = n->bar.intms; |
1045 | trace_nvme_mmio_intm_set(data & 0xffffffff, |
1046 | n->bar.intmc); |
1047 | nvme_irq_check(n); |
1048 | break; |
1049 | case 0x10: /* INTMC */ |
1050 | if (unlikely(msix_enabled(&(n->parent_obj)))) { |
1051 | NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix, |
1052 | "undefined access to interrupt mask clr" |
1053 | " when MSI-X is enabled" ); |
1054 | /* should be ignored, fall through for now */ |
1055 | } |
1056 | n->bar.intms &= ~(data & 0xffffffff); |
1057 | n->bar.intmc = n->bar.intms; |
1058 | trace_nvme_mmio_intm_clr(data & 0xffffffff, |
1059 | n->bar.intmc); |
1060 | nvme_irq_check(n); |
1061 | break; |
1062 | case 0x14: /* CC */ |
1063 | trace_nvme_mmio_cfg(data & 0xffffffff); |
1064 | /* Windows first sends data, then sends enable bit */ |
1065 | if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && |
1066 | !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) |
1067 | { |
1068 | n->bar.cc = data; |
1069 | } |
1070 | |
1071 | if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { |
1072 | n->bar.cc = data; |
1073 | if (unlikely(nvme_start_ctrl(n))) { |
1074 | trace_nvme_err_startfail(); |
1075 | n->bar.csts = NVME_CSTS_FAILED; |
1076 | } else { |
1077 | trace_nvme_mmio_start_success(); |
1078 | n->bar.csts = NVME_CSTS_READY; |
1079 | } |
1080 | } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { |
1081 | trace_nvme_mmio_stopped(); |
1082 | nvme_clear_ctrl(n); |
1083 | n->bar.csts &= ~NVME_CSTS_READY; |
1084 | } |
1085 | if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { |
1086 | trace_nvme_mmio_shutdown_set(); |
1087 | nvme_clear_ctrl(n); |
1088 | n->bar.cc = data; |
1089 | n->bar.csts |= NVME_CSTS_SHST_COMPLETE; |
1090 | } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { |
1091 | trace_nvme_mmio_shutdown_cleared(); |
1092 | n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; |
1093 | n->bar.cc = data; |
1094 | } |
1095 | break; |
1096 | case 0x1C: /* CSTS */ |
1097 | if (data & (1 << 4)) { |
1098 | NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported, |
1099 | "attempted to W1C CSTS.NSSRO" |
1100 | " but CAP.NSSRS is zero (not supported)" ); |
1101 | } else if (data != 0) { |
1102 | NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts, |
1103 | "attempted to set a read only bit" |
1104 | " of controller status" ); |
1105 | } |
1106 | break; |
1107 | case 0x20: /* NSSR */ |
1108 | if (data == 0x4E564D65) { |
1109 | trace_nvme_ub_mmiowr_ssreset_unsupported(); |
1110 | } else { |
1111 | /* The spec says that writes of other values have no effect */ |
1112 | return; |
1113 | } |
1114 | break; |
1115 | case 0x24: /* AQA */ |
1116 | n->bar.aqa = data & 0xffffffff; |
1117 | trace_nvme_mmio_aqattr(data & 0xffffffff); |
1118 | break; |
1119 | case 0x28: /* ASQ */ |
1120 | n->bar.asq = data; |
1121 | trace_nvme_mmio_asqaddr(data); |
1122 | break; |
1123 | case 0x2c: /* ASQ hi */ |
1124 | n->bar.asq |= data << 32; |
1125 | trace_nvme_mmio_asqaddr_hi(data, n->bar.asq); |
1126 | break; |
1127 | case 0x30: /* ACQ */ |
1128 | trace_nvme_mmio_acqaddr(data); |
1129 | n->bar.acq = data; |
1130 | break; |
1131 | case 0x34: /* ACQ hi */ |
1132 | n->bar.acq |= data << 32; |
1133 | trace_nvme_mmio_acqaddr_hi(data, n->bar.acq); |
1134 | break; |
1135 | case 0x38: /* CMBLOC */ |
1136 | NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved, |
1137 | "invalid write to reserved CMBLOC" |
1138 | " when CMBSZ is zero, ignored" ); |
1139 | return; |
1140 | case 0x3C: /* CMBSZ */ |
1141 | NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly, |
1142 | "invalid write to read only CMBSZ, ignored" ); |
1143 | return; |
1144 | default: |
1145 | NVME_GUEST_ERR(nvme_ub_mmiowr_invalid, |
1146 | "invalid MMIO write," |
1147 | " offset=0x%" PRIx64", data=%" PRIx64"" , |
1148 | offset, data); |
1149 | break; |
1150 | } |
1151 | } |
1152 | |
1153 | static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size) |
1154 | { |
1155 | NvmeCtrl *n = (NvmeCtrl *)opaque; |
1156 | uint8_t *ptr = (uint8_t *)&n->bar; |
1157 | uint64_t val = 0; |
1158 | |
1159 | if (unlikely(addr & (sizeof(uint32_t) - 1))) { |
1160 | NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32, |
1161 | "MMIO read not 32-bit aligned," |
1162 | " offset=0x%" PRIx64"" , addr); |
1163 | /* should RAZ, fall through for now */ |
1164 | } else if (unlikely(size < sizeof(uint32_t))) { |
1165 | NVME_GUEST_ERR(nvme_ub_mmiord_toosmall, |
1166 | "MMIO read smaller than 32-bits," |
1167 | " offset=0x%" PRIx64"" , addr); |
1168 | /* should RAZ, fall through for now */ |
1169 | } |
1170 | |
1171 | if (addr < sizeof(n->bar)) { |
1172 | memcpy(&val, ptr + addr, size); |
1173 | } else { |
1174 | NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs, |
1175 | "MMIO read beyond last register," |
1176 | " offset=0x%" PRIx64", returning 0" , addr); |
1177 | } |
1178 | |
1179 | return val; |
1180 | } |
1181 | |
1182 | static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) |
1183 | { |
1184 | uint32_t qid; |
1185 | |
1186 | if (unlikely(addr & ((1 << 2) - 1))) { |
1187 | NVME_GUEST_ERR(nvme_ub_db_wr_misaligned, |
1188 | "doorbell write not 32-bit aligned," |
1189 | " offset=0x%" PRIx64", ignoring" , addr); |
1190 | return; |
1191 | } |
1192 | |
1193 | if (((addr - 0x1000) >> 2) & 1) { |
1194 | /* Completion queue doorbell write */ |
1195 | |
1196 | uint16_t new_head = val & 0xffff; |
1197 | int start_sqs; |
1198 | NvmeCQueue *cq; |
1199 | |
1200 | qid = (addr - (0x1000 + (1 << 2))) >> 3; |
1201 | if (unlikely(nvme_check_cqid(n, qid))) { |
1202 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq, |
1203 | "completion queue doorbell write" |
1204 | " for nonexistent queue," |
1205 | " sqid=%" PRIu32", ignoring" , qid); |
1206 | return; |
1207 | } |
1208 | |
1209 | cq = n->cq[qid]; |
1210 | if (unlikely(new_head >= cq->size)) { |
1211 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead, |
1212 | "completion queue doorbell write value" |
1213 | " beyond queue size, sqid=%" PRIu32"," |
1214 | " new_head=%" PRIu16", ignoring" , |
1215 | qid, new_head); |
1216 | return; |
1217 | } |
1218 | |
1219 | start_sqs = nvme_cq_full(cq) ? 1 : 0; |
1220 | cq->head = new_head; |
1221 | if (start_sqs) { |
1222 | NvmeSQueue *sq; |
1223 | QTAILQ_FOREACH(sq, &cq->sq_list, entry) { |
1224 | timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
1225 | } |
1226 | timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
1227 | } |
1228 | |
1229 | if (cq->tail == cq->head) { |
1230 | nvme_irq_deassert(n, cq); |
1231 | } |
1232 | } else { |
1233 | /* Submission queue doorbell write */ |
1234 | |
1235 | uint16_t new_tail = val & 0xffff; |
1236 | NvmeSQueue *sq; |
1237 | |
1238 | qid = (addr - 0x1000) >> 3; |
1239 | if (unlikely(nvme_check_sqid(n, qid))) { |
1240 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq, |
1241 | "submission queue doorbell write" |
1242 | " for nonexistent queue," |
1243 | " sqid=%" PRIu32", ignoring" , qid); |
1244 | return; |
1245 | } |
1246 | |
1247 | sq = n->sq[qid]; |
1248 | if (unlikely(new_tail >= sq->size)) { |
1249 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail, |
1250 | "submission queue doorbell write value" |
1251 | " beyond queue size, sqid=%" PRIu32"," |
1252 | " new_tail=%" PRIu16", ignoring" , |
1253 | qid, new_tail); |
1254 | return; |
1255 | } |
1256 | |
1257 | sq->tail = new_tail; |
1258 | timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
1259 | } |
1260 | } |
1261 | |
1262 | static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data, |
1263 | unsigned size) |
1264 | { |
1265 | NvmeCtrl *n = (NvmeCtrl *)opaque; |
1266 | if (addr < sizeof(n->bar)) { |
1267 | nvme_write_bar(n, addr, data, size); |
1268 | } else if (addr >= 0x1000) { |
1269 | nvme_process_db(n, addr, data); |
1270 | } |
1271 | } |
1272 | |
1273 | static const MemoryRegionOps nvme_mmio_ops = { |
1274 | .read = nvme_mmio_read, |
1275 | .write = nvme_mmio_write, |
1276 | .endianness = DEVICE_LITTLE_ENDIAN, |
1277 | .impl = { |
1278 | .min_access_size = 2, |
1279 | .max_access_size = 8, |
1280 | }, |
1281 | }; |
1282 | |
1283 | static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data, |
1284 | unsigned size) |
1285 | { |
1286 | NvmeCtrl *n = (NvmeCtrl *)opaque; |
1287 | stn_le_p(&n->cmbuf[addr], size, data); |
1288 | } |
1289 | |
1290 | static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size) |
1291 | { |
1292 | NvmeCtrl *n = (NvmeCtrl *)opaque; |
1293 | return ldn_le_p(&n->cmbuf[addr], size); |
1294 | } |
1295 | |
1296 | static const MemoryRegionOps nvme_cmb_ops = { |
1297 | .read = nvme_cmb_read, |
1298 | .write = nvme_cmb_write, |
1299 | .endianness = DEVICE_LITTLE_ENDIAN, |
1300 | .impl = { |
1301 | .min_access_size = 1, |
1302 | .max_access_size = 8, |
1303 | }, |
1304 | }; |
1305 | |
1306 | static void nvme_realize(PCIDevice *pci_dev, Error **errp) |
1307 | { |
1308 | NvmeCtrl *n = NVME(pci_dev); |
1309 | NvmeIdCtrl *id = &n->id_ctrl; |
1310 | |
1311 | int i; |
1312 | int64_t bs_size; |
1313 | uint8_t *pci_conf; |
1314 | |
1315 | if (!n->num_queues) { |
1316 | error_setg(errp, "num_queues can't be zero" ); |
1317 | return; |
1318 | } |
1319 | |
1320 | if (!n->conf.blk) { |
1321 | error_setg(errp, "drive property not set" ); |
1322 | return; |
1323 | } |
1324 | |
1325 | bs_size = blk_getlength(n->conf.blk); |
1326 | if (bs_size < 0) { |
1327 | error_setg(errp, "could not get backing file size" ); |
1328 | return; |
1329 | } |
1330 | |
1331 | if (!n->serial) { |
1332 | error_setg(errp, "serial property not set" ); |
1333 | return; |
1334 | } |
1335 | blkconf_blocksizes(&n->conf); |
1336 | if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk), |
1337 | false, errp)) { |
1338 | return; |
1339 | } |
1340 | |
1341 | pci_conf = pci_dev->config; |
1342 | pci_conf[PCI_INTERRUPT_PIN] = 1; |
1343 | pci_config_set_prog_interface(pci_dev->config, 0x2); |
1344 | pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS); |
1345 | pcie_endpoint_cap_init(pci_dev, 0x80); |
1346 | |
1347 | n->num_namespaces = 1; |
1348 | n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4); |
1349 | n->ns_size = bs_size / (uint64_t)n->num_namespaces; |
1350 | |
1351 | n->namespaces = g_new0(NvmeNamespace, n->num_namespaces); |
1352 | n->sq = g_new0(NvmeSQueue *, n->num_queues); |
1353 | n->cq = g_new0(NvmeCQueue *, n->num_queues); |
1354 | |
1355 | memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, |
1356 | "nvme" , n->reg_size); |
1357 | pci_register_bar(pci_dev, 0, |
1358 | PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, |
1359 | &n->iomem); |
1360 | msix_init_exclusive_bar(pci_dev, n->num_queues, 4, NULL); |
1361 | |
1362 | id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); |
1363 | id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); |
1364 | strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl" , ' '); |
1365 | strpadcpy((char *)id->fr, sizeof(id->fr), "1.0" , ' '); |
1366 | strpadcpy((char *)id->sn, sizeof(id->sn), n->serial, ' '); |
1367 | id->rab = 6; |
1368 | id->ieee[0] = 0x00; |
1369 | id->ieee[1] = 0x02; |
1370 | id->ieee[2] = 0xb3; |
1371 | id->oacs = cpu_to_le16(0); |
1372 | id->frmw = 7 << 1; |
1373 | id->lpa = 1 << 0; |
1374 | id->sqes = (0x6 << 4) | 0x6; |
1375 | id->cqes = (0x4 << 4) | 0x4; |
1376 | id->nn = cpu_to_le32(n->num_namespaces); |
1377 | id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP); |
1378 | id->psd[0].mp = cpu_to_le16(0x9c4); |
1379 | id->psd[0].enlat = cpu_to_le32(0x10); |
1380 | id->psd[0].exlat = cpu_to_le32(0x4); |
1381 | if (blk_enable_write_cache(n->conf.blk)) { |
1382 | id->vwc = 1; |
1383 | } |
1384 | |
1385 | n->bar.cap = 0; |
1386 | NVME_CAP_SET_MQES(n->bar.cap, 0x7ff); |
1387 | NVME_CAP_SET_CQR(n->bar.cap, 1); |
1388 | NVME_CAP_SET_TO(n->bar.cap, 0xf); |
1389 | NVME_CAP_SET_CSS(n->bar.cap, 1); |
1390 | NVME_CAP_SET_MPSMAX(n->bar.cap, 4); |
1391 | |
1392 | n->bar.vs = 0x00010200; |
1393 | n->bar.intmc = n->bar.intms = 0; |
1394 | |
1395 | if (n->cmb_size_mb) { |
1396 | |
1397 | NVME_CMBLOC_SET_BIR(n->bar.cmbloc, 2); |
1398 | NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0); |
1399 | |
1400 | NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1); |
1401 | NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0); |
1402 | NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0); |
1403 | NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1); |
1404 | NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1); |
1405 | NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */ |
1406 | NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->cmb_size_mb); |
1407 | |
1408 | n->cmbloc = n->bar.cmbloc; |
1409 | n->cmbsz = n->bar.cmbsz; |
1410 | |
1411 | n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz)); |
1412 | memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n, |
1413 | "nvme-cmb" , NVME_CMBSZ_GETSIZE(n->bar.cmbsz)); |
1414 | pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc), |
1415 | PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 | |
1416 | PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem); |
1417 | |
1418 | } |
1419 | |
1420 | for (i = 0; i < n->num_namespaces; i++) { |
1421 | NvmeNamespace *ns = &n->namespaces[i]; |
1422 | NvmeIdNs *id_ns = &ns->id_ns; |
1423 | id_ns->nsfeat = 0; |
1424 | id_ns->nlbaf = 0; |
1425 | id_ns->flbas = 0; |
1426 | id_ns->mc = 0; |
1427 | id_ns->dpc = 0; |
1428 | id_ns->dps = 0; |
1429 | id_ns->lbaf[0].ds = BDRV_SECTOR_BITS; |
1430 | id_ns->ncap = id_ns->nuse = id_ns->nsze = |
1431 | cpu_to_le64(n->ns_size >> |
1432 | id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds); |
1433 | } |
1434 | } |
1435 | |
1436 | static void nvme_exit(PCIDevice *pci_dev) |
1437 | { |
1438 | NvmeCtrl *n = NVME(pci_dev); |
1439 | |
1440 | nvme_clear_ctrl(n); |
1441 | g_free(n->namespaces); |
1442 | g_free(n->cq); |
1443 | g_free(n->sq); |
1444 | |
1445 | if (n->cmb_size_mb) { |
1446 | g_free(n->cmbuf); |
1447 | } |
1448 | msix_uninit_exclusive_bar(pci_dev); |
1449 | } |
1450 | |
1451 | static Property nvme_props[] = { |
1452 | DEFINE_BLOCK_PROPERTIES(NvmeCtrl, conf), |
1453 | DEFINE_PROP_STRING("serial" , NvmeCtrl, serial), |
1454 | DEFINE_PROP_UINT32("cmb_size_mb" , NvmeCtrl, cmb_size_mb, 0), |
1455 | DEFINE_PROP_UINT32("num_queues" , NvmeCtrl, num_queues, 64), |
1456 | DEFINE_PROP_END_OF_LIST(), |
1457 | }; |
1458 | |
1459 | static const VMStateDescription nvme_vmstate = { |
1460 | .name = "nvme" , |
1461 | .unmigratable = 1, |
1462 | }; |
1463 | |
1464 | static void nvme_class_init(ObjectClass *oc, void *data) |
1465 | { |
1466 | DeviceClass *dc = DEVICE_CLASS(oc); |
1467 | PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); |
1468 | |
1469 | pc->realize = nvme_realize; |
1470 | pc->exit = nvme_exit; |
1471 | pc->class_id = PCI_CLASS_STORAGE_EXPRESS; |
1472 | pc->vendor_id = PCI_VENDOR_ID_INTEL; |
1473 | pc->device_id = 0x5845; |
1474 | pc->revision = 2; |
1475 | |
1476 | set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); |
1477 | dc->desc = "Non-Volatile Memory Express" ; |
1478 | dc->props = nvme_props; |
1479 | dc->vmsd = &nvme_vmstate; |
1480 | } |
1481 | |
1482 | static void nvme_instance_init(Object *obj) |
1483 | { |
1484 | NvmeCtrl *s = NVME(obj); |
1485 | |
1486 | device_add_bootindex_property(obj, &s->conf.bootindex, |
1487 | "bootindex" , "/namespace@1,0" , |
1488 | DEVICE(obj), &error_abort); |
1489 | } |
1490 | |
1491 | static const TypeInfo nvme_info = { |
1492 | .name = TYPE_NVME, |
1493 | .parent = TYPE_PCI_DEVICE, |
1494 | .instance_size = sizeof(NvmeCtrl), |
1495 | .class_init = nvme_class_init, |
1496 | .instance_init = nvme_instance_init, |
1497 | .interfaces = (InterfaceInfo[]) { |
1498 | { INTERFACE_PCIE_DEVICE }, |
1499 | { } |
1500 | }, |
1501 | }; |
1502 | |
1503 | static void nvme_register_types(void) |
1504 | { |
1505 | type_register_static(&nvme_info); |
1506 | } |
1507 | |
1508 | type_init(nvme_register_types) |
1509 | |