1 | /* |
2 | * Virtio SCSI HBA |
3 | * |
4 | * Copyright IBM, Corp. 2010 |
5 | * Copyright Red Hat, Inc. 2011 |
6 | * |
7 | * Authors: |
8 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> |
9 | * Paolo Bonzini <pbonzini@redhat.com> |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
12 | * See the COPYING file in the top-level directory. |
13 | * |
14 | */ |
15 | |
16 | #include "qemu/osdep.h" |
17 | #include "qapi/error.h" |
18 | #include "standard-headers/linux/virtio_ids.h" |
19 | #include "hw/virtio/virtio-scsi.h" |
20 | #include "migration/qemu-file-types.h" |
21 | #include "qemu/error-report.h" |
22 | #include "qemu/iov.h" |
23 | #include "qemu/module.h" |
24 | #include "sysemu/block-backend.h" |
25 | #include "hw/qdev-properties.h" |
26 | #include "hw/scsi/scsi.h" |
27 | #include "scsi/constants.h" |
28 | #include "hw/virtio/virtio-bus.h" |
29 | #include "hw/virtio/virtio-access.h" |
30 | |
31 | static inline int virtio_scsi_get_lun(uint8_t *lun) |
32 | { |
33 | return ((lun[2] << 8) | lun[3]) & 0x3FFF; |
34 | } |
35 | |
36 | static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun) |
37 | { |
38 | if (lun[0] != 1) { |
39 | return NULL; |
40 | } |
41 | if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) { |
42 | return NULL; |
43 | } |
44 | return scsi_device_find(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun)); |
45 | } |
46 | |
47 | void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req) |
48 | { |
49 | VirtIODevice *vdev = VIRTIO_DEVICE(s); |
50 | const size_t zero_skip = |
51 | offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov); |
52 | |
53 | req->vq = vq; |
54 | req->dev = s; |
55 | qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as); |
56 | qemu_iovec_init(&req->resp_iov, 1); |
57 | memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip); |
58 | } |
59 | |
60 | void virtio_scsi_free_req(VirtIOSCSIReq *req) |
61 | { |
62 | qemu_iovec_destroy(&req->resp_iov); |
63 | qemu_sglist_destroy(&req->qsgl); |
64 | g_free(req); |
65 | } |
66 | |
67 | static void virtio_scsi_complete_req(VirtIOSCSIReq *req) |
68 | { |
69 | VirtIOSCSI *s = req->dev; |
70 | VirtQueue *vq = req->vq; |
71 | VirtIODevice *vdev = VIRTIO_DEVICE(s); |
72 | |
73 | qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size); |
74 | virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size); |
75 | if (s->dataplane_started && !s->dataplane_fenced) { |
76 | virtio_notify_irqfd(vdev, vq); |
77 | } else { |
78 | virtio_notify(vdev, vq); |
79 | } |
80 | |
81 | if (req->sreq) { |
82 | req->sreq->hba_private = NULL; |
83 | scsi_req_unref(req->sreq); |
84 | } |
85 | virtio_scsi_free_req(req); |
86 | } |
87 | |
88 | static void virtio_scsi_bad_req(VirtIOSCSIReq *req) |
89 | { |
90 | virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers" ); |
91 | virtqueue_detach_element(req->vq, &req->elem, 0); |
92 | virtio_scsi_free_req(req); |
93 | } |
94 | |
95 | static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov, |
96 | hwaddr *addr, int num, size_t skip) |
97 | { |
98 | QEMUSGList *qsgl = &req->qsgl; |
99 | size_t copied = 0; |
100 | |
101 | while (num) { |
102 | if (skip >= iov->iov_len) { |
103 | skip -= iov->iov_len; |
104 | } else { |
105 | qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip); |
106 | copied += iov->iov_len - skip; |
107 | skip = 0; |
108 | } |
109 | iov++; |
110 | addr++; |
111 | num--; |
112 | } |
113 | |
114 | assert(skip == 0); |
115 | return copied; |
116 | } |
117 | |
118 | static int virtio_scsi_parse_req(VirtIOSCSIReq *req, |
119 | unsigned req_size, unsigned resp_size) |
120 | { |
121 | VirtIODevice *vdev = (VirtIODevice *) req->dev; |
122 | size_t in_size, out_size; |
123 | |
124 | if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, |
125 | &req->req, req_size) < req_size) { |
126 | return -EINVAL; |
127 | } |
128 | |
129 | if (qemu_iovec_concat_iov(&req->resp_iov, |
130 | req->elem.in_sg, req->elem.in_num, 0, |
131 | resp_size) < resp_size) { |
132 | return -EINVAL; |
133 | } |
134 | |
135 | req->resp_size = resp_size; |
136 | |
137 | /* Old BIOSes left some padding by mistake after the req_size/resp_size. |
138 | * As a workaround, always consider the first buffer as the virtio-scsi |
139 | * request/response, making the payload start at the second element |
140 | * of the iovec. |
141 | * |
142 | * The actual length of the response header, stored in req->resp_size, |
143 | * does not change. |
144 | * |
145 | * TODO: always disable this workaround for virtio 1.0 devices. |
146 | */ |
147 | if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) { |
148 | if (req->elem.out_num) { |
149 | req_size = req->elem.out_sg[0].iov_len; |
150 | } |
151 | if (req->elem.in_num) { |
152 | resp_size = req->elem.in_sg[0].iov_len; |
153 | } |
154 | } |
155 | |
156 | out_size = qemu_sgl_concat(req, req->elem.out_sg, |
157 | &req->elem.out_addr[0], req->elem.out_num, |
158 | req_size); |
159 | in_size = qemu_sgl_concat(req, req->elem.in_sg, |
160 | &req->elem.in_addr[0], req->elem.in_num, |
161 | resp_size); |
162 | |
163 | if (out_size && in_size) { |
164 | return -ENOTSUP; |
165 | } |
166 | |
167 | if (out_size) { |
168 | req->mode = SCSI_XFER_TO_DEV; |
169 | } else if (in_size) { |
170 | req->mode = SCSI_XFER_FROM_DEV; |
171 | } |
172 | |
173 | return 0; |
174 | } |
175 | |
176 | static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq) |
177 | { |
178 | VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s; |
179 | VirtIOSCSIReq *req; |
180 | |
181 | req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size); |
182 | if (!req) { |
183 | return NULL; |
184 | } |
185 | virtio_scsi_init_req(s, vq, req); |
186 | return req; |
187 | } |
188 | |
189 | static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq) |
190 | { |
191 | VirtIOSCSIReq *req = sreq->hba_private; |
192 | VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev); |
193 | uint32_t n = virtio_get_queue_index(req->vq) - 2; |
194 | |
195 | assert(n < vs->conf.num_queues); |
196 | qemu_put_be32s(f, &n); |
197 | qemu_put_virtqueue_element(f, &req->elem); |
198 | } |
199 | |
200 | static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq) |
201 | { |
202 | SCSIBus *bus = sreq->bus; |
203 | VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); |
204 | VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); |
205 | VirtIODevice *vdev = VIRTIO_DEVICE(s); |
206 | VirtIOSCSIReq *req; |
207 | uint32_t n; |
208 | |
209 | qemu_get_be32s(f, &n); |
210 | assert(n < vs->conf.num_queues); |
211 | req = qemu_get_virtqueue_element(vdev, f, |
212 | sizeof(VirtIOSCSIReq) + vs->cdb_size); |
213 | virtio_scsi_init_req(s, vs->cmd_vqs[n], req); |
214 | |
215 | if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, |
216 | sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) { |
217 | error_report("invalid SCSI request migration data" ); |
218 | exit(1); |
219 | } |
220 | |
221 | scsi_req_ref(sreq); |
222 | req->sreq = sreq; |
223 | if (req->sreq->cmd.mode != SCSI_XFER_NONE) { |
224 | assert(req->sreq->cmd.mode == req->mode); |
225 | } |
226 | return req; |
227 | } |
228 | |
229 | typedef struct { |
230 | Notifier notifier; |
231 | VirtIOSCSIReq *tmf_req; |
232 | } VirtIOSCSICancelNotifier; |
233 | |
234 | static void virtio_scsi_cancel_notify(Notifier *notifier, void *data) |
235 | { |
236 | VirtIOSCSICancelNotifier *n = container_of(notifier, |
237 | VirtIOSCSICancelNotifier, |
238 | notifier); |
239 | |
240 | if (--n->tmf_req->remaining == 0) { |
241 | virtio_scsi_complete_req(n->tmf_req); |
242 | } |
243 | g_free(n); |
244 | } |
245 | |
246 | static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d) |
247 | { |
248 | if (s->dataplane_started && d && blk_is_available(d->conf.blk)) { |
249 | assert(blk_get_aio_context(d->conf.blk) == s->ctx); |
250 | } |
251 | } |
252 | |
253 | /* Return 0 if the request is ready to be completed and return to guest; |
254 | * -EINPROGRESS if the request is submitted and will be completed later, in the |
255 | * case of async cancellation. */ |
256 | static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) |
257 | { |
258 | SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun); |
259 | SCSIRequest *r, *next; |
260 | BusChild *kid; |
261 | int target; |
262 | int ret = 0; |
263 | |
264 | virtio_scsi_ctx_check(s, d); |
265 | /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */ |
266 | req->resp.tmf.response = VIRTIO_SCSI_S_OK; |
267 | |
268 | /* |
269 | * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s() |
270 | * to avoid compiler errors. |
271 | */ |
272 | req->req.tmf.subtype = |
273 | virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype); |
274 | |
275 | switch (req->req.tmf.subtype) { |
276 | case VIRTIO_SCSI_T_TMF_ABORT_TASK: |
277 | case VIRTIO_SCSI_T_TMF_QUERY_TASK: |
278 | if (!d) { |
279 | goto fail; |
280 | } |
281 | if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { |
282 | goto incorrect_lun; |
283 | } |
284 | QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { |
285 | VirtIOSCSIReq *cmd_req = r->hba_private; |
286 | if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) { |
287 | break; |
288 | } |
289 | } |
290 | if (r) { |
291 | /* |
292 | * Assert that the request has not been completed yet, we |
293 | * check for it in the loop above. |
294 | */ |
295 | assert(r->hba_private); |
296 | if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) { |
297 | /* "If the specified command is present in the task set, then |
298 | * return a service response set to FUNCTION SUCCEEDED". |
299 | */ |
300 | req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; |
301 | } else { |
302 | VirtIOSCSICancelNotifier *notifier; |
303 | |
304 | req->remaining = 1; |
305 | notifier = g_new(VirtIOSCSICancelNotifier, 1); |
306 | notifier->tmf_req = req; |
307 | notifier->notifier.notify = virtio_scsi_cancel_notify; |
308 | scsi_req_cancel_async(r, ¬ifier->notifier); |
309 | ret = -EINPROGRESS; |
310 | } |
311 | } |
312 | break; |
313 | |
314 | case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: |
315 | if (!d) { |
316 | goto fail; |
317 | } |
318 | if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { |
319 | goto incorrect_lun; |
320 | } |
321 | s->resetting++; |
322 | qdev_reset_all(&d->qdev); |
323 | s->resetting--; |
324 | break; |
325 | |
326 | case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: |
327 | case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: |
328 | case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET: |
329 | if (!d) { |
330 | goto fail; |
331 | } |
332 | if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { |
333 | goto incorrect_lun; |
334 | } |
335 | |
336 | /* Add 1 to "remaining" until virtio_scsi_do_tmf returns. |
337 | * This way, if the bus starts calling back to the notifiers |
338 | * even before we finish the loop, virtio_scsi_cancel_notify |
339 | * will not complete the TMF too early. |
340 | */ |
341 | req->remaining = 1; |
342 | QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { |
343 | if (r->hba_private) { |
344 | if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) { |
345 | /* "If there is any command present in the task set, then |
346 | * return a service response set to FUNCTION SUCCEEDED". |
347 | */ |
348 | req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; |
349 | break; |
350 | } else { |
351 | VirtIOSCSICancelNotifier *notifier; |
352 | |
353 | req->remaining++; |
354 | notifier = g_new(VirtIOSCSICancelNotifier, 1); |
355 | notifier->notifier.notify = virtio_scsi_cancel_notify; |
356 | notifier->tmf_req = req; |
357 | scsi_req_cancel_async(r, ¬ifier->notifier); |
358 | } |
359 | } |
360 | } |
361 | if (--req->remaining > 0) { |
362 | ret = -EINPROGRESS; |
363 | } |
364 | break; |
365 | |
366 | case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: |
367 | target = req->req.tmf.lun[1]; |
368 | s->resetting++; |
369 | QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { |
370 | d = SCSI_DEVICE(kid->child); |
371 | if (d->channel == 0 && d->id == target) { |
372 | qdev_reset_all(&d->qdev); |
373 | } |
374 | } |
375 | s->resetting--; |
376 | break; |
377 | |
378 | case VIRTIO_SCSI_T_TMF_CLEAR_ACA: |
379 | default: |
380 | req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; |
381 | break; |
382 | } |
383 | |
384 | return ret; |
385 | |
386 | incorrect_lun: |
387 | req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN; |
388 | return ret; |
389 | |
390 | fail: |
391 | req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; |
392 | return ret; |
393 | } |
394 | |
395 | static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) |
396 | { |
397 | VirtIODevice *vdev = (VirtIODevice *)s; |
398 | uint32_t type; |
399 | int r = 0; |
400 | |
401 | if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, |
402 | &type, sizeof(type)) < sizeof(type)) { |
403 | virtio_scsi_bad_req(req); |
404 | return; |
405 | } |
406 | |
407 | virtio_tswap32s(vdev, &type); |
408 | if (type == VIRTIO_SCSI_T_TMF) { |
409 | if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq), |
410 | sizeof(VirtIOSCSICtrlTMFResp)) < 0) { |
411 | virtio_scsi_bad_req(req); |
412 | return; |
413 | } else { |
414 | r = virtio_scsi_do_tmf(s, req); |
415 | } |
416 | |
417 | } else if (type == VIRTIO_SCSI_T_AN_QUERY || |
418 | type == VIRTIO_SCSI_T_AN_SUBSCRIBE) { |
419 | if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq), |
420 | sizeof(VirtIOSCSICtrlANResp)) < 0) { |
421 | virtio_scsi_bad_req(req); |
422 | return; |
423 | } else { |
424 | req->resp.an.event_actual = 0; |
425 | req->resp.an.response = VIRTIO_SCSI_S_OK; |
426 | } |
427 | } |
428 | if (r == 0) { |
429 | virtio_scsi_complete_req(req); |
430 | } else { |
431 | assert(r == -EINPROGRESS); |
432 | } |
433 | } |
434 | |
435 | bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) |
436 | { |
437 | VirtIOSCSIReq *req; |
438 | bool progress = false; |
439 | |
440 | while ((req = virtio_scsi_pop_req(s, vq))) { |
441 | progress = true; |
442 | virtio_scsi_handle_ctrl_req(s, req); |
443 | } |
444 | return progress; |
445 | } |
446 | |
447 | static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
448 | { |
449 | VirtIOSCSI *s = (VirtIOSCSI *)vdev; |
450 | |
451 | if (s->ctx) { |
452 | virtio_device_start_ioeventfd(vdev); |
453 | if (!s->dataplane_fenced) { |
454 | return; |
455 | } |
456 | } |
457 | virtio_scsi_acquire(s); |
458 | virtio_scsi_handle_ctrl_vq(s, vq); |
459 | virtio_scsi_release(s); |
460 | } |
461 | |
462 | static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req) |
463 | { |
464 | /* Sense data is not in req->resp and is copied separately |
465 | * in virtio_scsi_command_complete. |
466 | */ |
467 | req->resp_size = sizeof(VirtIOSCSICmdResp); |
468 | virtio_scsi_complete_req(req); |
469 | } |
470 | |
471 | static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status, |
472 | size_t resid) |
473 | { |
474 | VirtIOSCSIReq *req = r->hba_private; |
475 | uint8_t sense[SCSI_SENSE_BUF_SIZE]; |
476 | uint32_t sense_len; |
477 | VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); |
478 | |
479 | if (r->io_canceled) { |
480 | return; |
481 | } |
482 | |
483 | req->resp.cmd.response = VIRTIO_SCSI_S_OK; |
484 | req->resp.cmd.status = status; |
485 | if (req->resp.cmd.status == GOOD) { |
486 | req->resp.cmd.resid = virtio_tswap32(vdev, resid); |
487 | } else { |
488 | req->resp.cmd.resid = 0; |
489 | sense_len = scsi_req_get_sense(r, sense, sizeof(sense)); |
490 | sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd)); |
491 | qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd), |
492 | sense, sense_len); |
493 | req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len); |
494 | } |
495 | virtio_scsi_complete_cmd_req(req); |
496 | } |
497 | |
498 | static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, |
499 | uint8_t *buf, void *hba_private) |
500 | { |
501 | VirtIOSCSIReq *req = hba_private; |
502 | |
503 | if (cmd->len == 0) { |
504 | cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE); |
505 | memcpy(cmd->buf, buf, cmd->len); |
506 | } |
507 | |
508 | /* Extract the direction and mode directly from the request, for |
509 | * host device passthrough. |
510 | */ |
511 | cmd->xfer = req->qsgl.size; |
512 | cmd->mode = req->mode; |
513 | return 0; |
514 | } |
515 | |
516 | static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r) |
517 | { |
518 | VirtIOSCSIReq *req = r->hba_private; |
519 | |
520 | return &req->qsgl; |
521 | } |
522 | |
523 | static void virtio_scsi_request_cancelled(SCSIRequest *r) |
524 | { |
525 | VirtIOSCSIReq *req = r->hba_private; |
526 | |
527 | if (!req) { |
528 | return; |
529 | } |
530 | if (req->dev->resetting) { |
531 | req->resp.cmd.response = VIRTIO_SCSI_S_RESET; |
532 | } else { |
533 | req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED; |
534 | } |
535 | virtio_scsi_complete_cmd_req(req); |
536 | } |
537 | |
538 | static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req) |
539 | { |
540 | req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE; |
541 | virtio_scsi_complete_cmd_req(req); |
542 | } |
543 | |
544 | static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) |
545 | { |
546 | VirtIOSCSICommon *vs = &s->parent_obj; |
547 | SCSIDevice *d; |
548 | int rc; |
549 | |
550 | rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, |
551 | sizeof(VirtIOSCSICmdResp) + vs->sense_size); |
552 | if (rc < 0) { |
553 | if (rc == -ENOTSUP) { |
554 | virtio_scsi_fail_cmd_req(req); |
555 | return -ENOTSUP; |
556 | } else { |
557 | virtio_scsi_bad_req(req); |
558 | return -EINVAL; |
559 | } |
560 | } |
561 | |
562 | d = virtio_scsi_device_find(s, req->req.cmd.lun); |
563 | if (!d) { |
564 | req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; |
565 | virtio_scsi_complete_cmd_req(req); |
566 | return -ENOENT; |
567 | } |
568 | virtio_scsi_ctx_check(s, d); |
569 | req->sreq = scsi_req_new(d, req->req.cmd.tag, |
570 | virtio_scsi_get_lun(req->req.cmd.lun), |
571 | req->req.cmd.cdb, req); |
572 | |
573 | if (req->sreq->cmd.mode != SCSI_XFER_NONE |
574 | && (req->sreq->cmd.mode != req->mode || |
575 | req->sreq->cmd.xfer > req->qsgl.size)) { |
576 | req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN; |
577 | virtio_scsi_complete_cmd_req(req); |
578 | return -ENOBUFS; |
579 | } |
580 | scsi_req_ref(req->sreq); |
581 | blk_io_plug(d->conf.blk); |
582 | return 0; |
583 | } |
584 | |
585 | static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req) |
586 | { |
587 | SCSIRequest *sreq = req->sreq; |
588 | if (scsi_req_enqueue(sreq)) { |
589 | scsi_req_continue(sreq); |
590 | } |
591 | blk_io_unplug(sreq->dev->conf.blk); |
592 | scsi_req_unref(sreq); |
593 | } |
594 | |
595 | bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) |
596 | { |
597 | VirtIOSCSIReq *req, *next; |
598 | int ret = 0; |
599 | bool progress = false; |
600 | |
601 | QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs); |
602 | |
603 | do { |
604 | virtio_queue_set_notification(vq, 0); |
605 | |
606 | while ((req = virtio_scsi_pop_req(s, vq))) { |
607 | progress = true; |
608 | ret = virtio_scsi_handle_cmd_req_prepare(s, req); |
609 | if (!ret) { |
610 | QTAILQ_INSERT_TAIL(&reqs, req, next); |
611 | } else if (ret == -EINVAL) { |
612 | /* The device is broken and shouldn't process any request */ |
613 | while (!QTAILQ_EMPTY(&reqs)) { |
614 | req = QTAILQ_FIRST(&reqs); |
615 | QTAILQ_REMOVE(&reqs, req, next); |
616 | blk_io_unplug(req->sreq->dev->conf.blk); |
617 | scsi_req_unref(req->sreq); |
618 | virtqueue_detach_element(req->vq, &req->elem, 0); |
619 | virtio_scsi_free_req(req); |
620 | } |
621 | } |
622 | } |
623 | |
624 | virtio_queue_set_notification(vq, 1); |
625 | } while (ret != -EINVAL && !virtio_queue_empty(vq)); |
626 | |
627 | QTAILQ_FOREACH_SAFE(req, &reqs, next, next) { |
628 | virtio_scsi_handle_cmd_req_submit(s, req); |
629 | } |
630 | return progress; |
631 | } |
632 | |
633 | static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) |
634 | { |
635 | /* use non-QOM casts in the data path */ |
636 | VirtIOSCSI *s = (VirtIOSCSI *)vdev; |
637 | |
638 | if (s->ctx) { |
639 | virtio_device_start_ioeventfd(vdev); |
640 | if (!s->dataplane_fenced) { |
641 | return; |
642 | } |
643 | } |
644 | virtio_scsi_acquire(s); |
645 | virtio_scsi_handle_cmd_vq(s, vq); |
646 | virtio_scsi_release(s); |
647 | } |
648 | |
649 | static void virtio_scsi_get_config(VirtIODevice *vdev, |
650 | uint8_t *config) |
651 | { |
652 | VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config; |
653 | VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev); |
654 | |
655 | virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues); |
656 | virtio_stl_p(vdev, &scsiconf->seg_max, 128 - 2); |
657 | virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors); |
658 | virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun); |
659 | virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent)); |
660 | virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size); |
661 | virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size); |
662 | virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL); |
663 | virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET); |
664 | virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN); |
665 | } |
666 | |
667 | static void virtio_scsi_set_config(VirtIODevice *vdev, |
668 | const uint8_t *config) |
669 | { |
670 | VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config; |
671 | VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); |
672 | |
673 | if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 || |
674 | (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) { |
675 | virtio_error(vdev, |
676 | "bad data written to virtio-scsi configuration space" ); |
677 | return; |
678 | } |
679 | |
680 | vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size); |
681 | vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size); |
682 | } |
683 | |
684 | static uint64_t virtio_scsi_get_features(VirtIODevice *vdev, |
685 | uint64_t requested_features, |
686 | Error **errp) |
687 | { |
688 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); |
689 | |
690 | /* Firstly sync all virtio-scsi possible supported features */ |
691 | requested_features |= s->host_features; |
692 | return requested_features; |
693 | } |
694 | |
695 | static void virtio_scsi_reset(VirtIODevice *vdev) |
696 | { |
697 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); |
698 | VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); |
699 | |
700 | assert(!s->dataplane_started); |
701 | s->resetting++; |
702 | qbus_reset_all(BUS(&s->bus)); |
703 | s->resetting--; |
704 | |
705 | vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE; |
706 | vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE; |
707 | s->events_dropped = false; |
708 | } |
709 | |
710 | void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, |
711 | uint32_t event, uint32_t reason) |
712 | { |
713 | VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); |
714 | VirtIOSCSIReq *req; |
715 | VirtIOSCSIEvent *evt; |
716 | VirtIODevice *vdev = VIRTIO_DEVICE(s); |
717 | |
718 | if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { |
719 | return; |
720 | } |
721 | |
722 | req = virtio_scsi_pop_req(s, vs->event_vq); |
723 | if (!req) { |
724 | s->events_dropped = true; |
725 | return; |
726 | } |
727 | |
728 | if (s->events_dropped) { |
729 | event |= VIRTIO_SCSI_T_EVENTS_MISSED; |
730 | s->events_dropped = false; |
731 | } |
732 | |
733 | if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) { |
734 | virtio_scsi_bad_req(req); |
735 | return; |
736 | } |
737 | |
738 | evt = &req->resp.event; |
739 | memset(evt, 0, sizeof(VirtIOSCSIEvent)); |
740 | evt->event = virtio_tswap32(vdev, event); |
741 | evt->reason = virtio_tswap32(vdev, reason); |
742 | if (!dev) { |
743 | assert(event == VIRTIO_SCSI_T_EVENTS_MISSED); |
744 | } else { |
745 | evt->lun[0] = 1; |
746 | evt->lun[1] = dev->id; |
747 | |
748 | /* Linux wants us to keep the same encoding we use for REPORT LUNS. */ |
749 | if (dev->lun >= 256) { |
750 | evt->lun[2] = (dev->lun >> 8) | 0x40; |
751 | } |
752 | evt->lun[3] = dev->lun & 0xFF; |
753 | } |
754 | virtio_scsi_complete_req(req); |
755 | } |
756 | |
757 | bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) |
758 | { |
759 | if (s->events_dropped) { |
760 | virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); |
761 | return true; |
762 | } |
763 | return false; |
764 | } |
765 | |
766 | static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq) |
767 | { |
768 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); |
769 | |
770 | if (s->ctx) { |
771 | virtio_device_start_ioeventfd(vdev); |
772 | if (!s->dataplane_fenced) { |
773 | return; |
774 | } |
775 | } |
776 | virtio_scsi_acquire(s); |
777 | virtio_scsi_handle_event_vq(s, vq); |
778 | virtio_scsi_release(s); |
779 | } |
780 | |
781 | static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) |
782 | { |
783 | VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); |
784 | VirtIODevice *vdev = VIRTIO_DEVICE(s); |
785 | |
786 | if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) && |
787 | dev->type != TYPE_ROM) { |
788 | virtio_scsi_acquire(s); |
789 | virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE, |
790 | sense.asc | (sense.ascq << 8)); |
791 | virtio_scsi_release(s); |
792 | } |
793 | } |
794 | |
795 | static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev, |
796 | DeviceState *dev, Error **errp) |
797 | { |
798 | SCSIDevice *sd = SCSI_DEVICE(dev); |
799 | sd->hba_supports_iothread = true; |
800 | } |
801 | |
802 | static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, |
803 | Error **errp) |
804 | { |
805 | VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); |
806 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); |
807 | SCSIDevice *sd = SCSI_DEVICE(dev); |
808 | int ret; |
809 | |
810 | if (s->ctx && !s->dataplane_fenced) { |
811 | if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { |
812 | return; |
813 | } |
814 | virtio_scsi_acquire(s); |
815 | ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp); |
816 | virtio_scsi_release(s); |
817 | if (ret < 0) { |
818 | return; |
819 | } |
820 | } |
821 | |
822 | if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { |
823 | virtio_scsi_acquire(s); |
824 | virtio_scsi_push_event(s, sd, |
825 | VIRTIO_SCSI_T_TRANSPORT_RESET, |
826 | VIRTIO_SCSI_EVT_RESET_RESCAN); |
827 | virtio_scsi_release(s); |
828 | } |
829 | } |
830 | |
831 | static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, |
832 | Error **errp) |
833 | { |
834 | VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); |
835 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); |
836 | SCSIDevice *sd = SCSI_DEVICE(dev); |
837 | AioContext *ctx = s->ctx ?: qemu_get_aio_context(); |
838 | |
839 | if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { |
840 | virtio_scsi_acquire(s); |
841 | virtio_scsi_push_event(s, sd, |
842 | VIRTIO_SCSI_T_TRANSPORT_RESET, |
843 | VIRTIO_SCSI_EVT_RESET_REMOVED); |
844 | virtio_scsi_release(s); |
845 | } |
846 | |
847 | aio_disable_external(ctx); |
848 | qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); |
849 | aio_enable_external(ctx); |
850 | |
851 | if (s->ctx) { |
852 | virtio_scsi_acquire(s); |
853 | /* If other users keep the BlockBackend in the iothread, that's ok */ |
854 | blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); |
855 | virtio_scsi_release(s); |
856 | } |
857 | } |
858 | |
859 | static struct SCSIBusInfo virtio_scsi_scsi_info = { |
860 | .tcq = true, |
861 | .max_channel = VIRTIO_SCSI_MAX_CHANNEL, |
862 | .max_target = VIRTIO_SCSI_MAX_TARGET, |
863 | .max_lun = VIRTIO_SCSI_MAX_LUN, |
864 | |
865 | .complete = virtio_scsi_command_complete, |
866 | .cancel = virtio_scsi_request_cancelled, |
867 | .change = virtio_scsi_change, |
868 | .parse_cdb = virtio_scsi_parse_cdb, |
869 | .get_sg_list = virtio_scsi_get_sg_list, |
870 | .save_request = virtio_scsi_save_request, |
871 | .load_request = virtio_scsi_load_request, |
872 | }; |
873 | |
874 | void virtio_scsi_common_realize(DeviceState *dev, |
875 | VirtIOHandleOutput ctrl, |
876 | VirtIOHandleOutput evt, |
877 | VirtIOHandleOutput cmd, |
878 | Error **errp) |
879 | { |
880 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
881 | VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev); |
882 | int i; |
883 | |
884 | virtio_init(vdev, "virtio-scsi" , VIRTIO_ID_SCSI, |
885 | sizeof(VirtIOSCSIConfig)); |
886 | |
887 | if (s->conf.num_queues == 0 || |
888 | s->conf.num_queues > VIRTIO_QUEUE_MAX - 2) { |
889 | error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " |
890 | "must be a positive integer less than %d." , |
891 | s->conf.num_queues, VIRTIO_QUEUE_MAX - 2); |
892 | virtio_cleanup(vdev); |
893 | return; |
894 | } |
895 | s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues); |
896 | s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE; |
897 | s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE; |
898 | |
899 | s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl); |
900 | s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt); |
901 | for (i = 0; i < s->conf.num_queues; i++) { |
902 | s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd); |
903 | } |
904 | } |
905 | |
906 | static void virtio_scsi_device_realize(DeviceState *dev, Error **errp) |
907 | { |
908 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
909 | VirtIOSCSI *s = VIRTIO_SCSI(dev); |
910 | Error *err = NULL; |
911 | |
912 | virtio_scsi_common_realize(dev, |
913 | virtio_scsi_handle_ctrl, |
914 | virtio_scsi_handle_event, |
915 | virtio_scsi_handle_cmd, |
916 | &err); |
917 | if (err != NULL) { |
918 | error_propagate(errp, err); |
919 | return; |
920 | } |
921 | |
922 | scsi_bus_new(&s->bus, sizeof(s->bus), dev, |
923 | &virtio_scsi_scsi_info, vdev->bus_name); |
924 | /* override default SCSI bus hotplug-handler, with virtio-scsi's one */ |
925 | qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev), &error_abort); |
926 | |
927 | virtio_scsi_dataplane_setup(s, errp); |
928 | } |
929 | |
930 | void virtio_scsi_common_unrealize(DeviceState *dev) |
931 | { |
932 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
933 | VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); |
934 | |
935 | g_free(vs->cmd_vqs); |
936 | virtio_cleanup(vdev); |
937 | } |
938 | |
939 | static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp) |
940 | { |
941 | VirtIOSCSI *s = VIRTIO_SCSI(dev); |
942 | |
943 | qbus_set_hotplug_handler(BUS(&s->bus), NULL, &error_abort); |
944 | virtio_scsi_common_unrealize(dev); |
945 | } |
946 | |
947 | static Property virtio_scsi_properties[] = { |
948 | DEFINE_PROP_UINT32("num_queues" , VirtIOSCSI, parent_obj.conf.num_queues, 1), |
949 | DEFINE_PROP_UINT32("virtqueue_size" , VirtIOSCSI, |
950 | parent_obj.conf.virtqueue_size, 128), |
951 | DEFINE_PROP_UINT32("max_sectors" , VirtIOSCSI, parent_obj.conf.max_sectors, |
952 | 0xFFFF), |
953 | DEFINE_PROP_UINT32("cmd_per_lun" , VirtIOSCSI, parent_obj.conf.cmd_per_lun, |
954 | 128), |
955 | DEFINE_PROP_BIT("hotplug" , VirtIOSCSI, host_features, |
956 | VIRTIO_SCSI_F_HOTPLUG, true), |
957 | DEFINE_PROP_BIT("param_change" , VirtIOSCSI, host_features, |
958 | VIRTIO_SCSI_F_CHANGE, true), |
959 | DEFINE_PROP_LINK("iothread" , VirtIOSCSI, parent_obj.conf.iothread, |
960 | TYPE_IOTHREAD, IOThread *), |
961 | DEFINE_PROP_END_OF_LIST(), |
962 | }; |
963 | |
964 | static const VMStateDescription vmstate_virtio_scsi = { |
965 | .name = "virtio-scsi" , |
966 | .minimum_version_id = 1, |
967 | .version_id = 1, |
968 | .fields = (VMStateField[]) { |
969 | VMSTATE_VIRTIO_DEVICE, |
970 | VMSTATE_END_OF_LIST() |
971 | }, |
972 | }; |
973 | |
974 | static void virtio_scsi_common_class_init(ObjectClass *klass, void *data) |
975 | { |
976 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); |
977 | DeviceClass *dc = DEVICE_CLASS(klass); |
978 | |
979 | vdc->get_config = virtio_scsi_get_config; |
980 | set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); |
981 | } |
982 | |
983 | static void virtio_scsi_class_init(ObjectClass *klass, void *data) |
984 | { |
985 | DeviceClass *dc = DEVICE_CLASS(klass); |
986 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); |
987 | HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); |
988 | |
989 | dc->props = virtio_scsi_properties; |
990 | dc->vmsd = &vmstate_virtio_scsi; |
991 | set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); |
992 | vdc->realize = virtio_scsi_device_realize; |
993 | vdc->unrealize = virtio_scsi_device_unrealize; |
994 | vdc->set_config = virtio_scsi_set_config; |
995 | vdc->get_features = virtio_scsi_get_features; |
996 | vdc->reset = virtio_scsi_reset; |
997 | vdc->start_ioeventfd = virtio_scsi_dataplane_start; |
998 | vdc->stop_ioeventfd = virtio_scsi_dataplane_stop; |
999 | hc->pre_plug = virtio_scsi_pre_hotplug; |
1000 | hc->plug = virtio_scsi_hotplug; |
1001 | hc->unplug = virtio_scsi_hotunplug; |
1002 | } |
1003 | |
1004 | static const TypeInfo virtio_scsi_common_info = { |
1005 | .name = TYPE_VIRTIO_SCSI_COMMON, |
1006 | .parent = TYPE_VIRTIO_DEVICE, |
1007 | .instance_size = sizeof(VirtIOSCSICommon), |
1008 | .abstract = true, |
1009 | .class_init = virtio_scsi_common_class_init, |
1010 | }; |
1011 | |
1012 | static const TypeInfo virtio_scsi_info = { |
1013 | .name = TYPE_VIRTIO_SCSI, |
1014 | .parent = TYPE_VIRTIO_SCSI_COMMON, |
1015 | .instance_size = sizeof(VirtIOSCSI), |
1016 | .class_init = virtio_scsi_class_init, |
1017 | .interfaces = (InterfaceInfo[]) { |
1018 | { TYPE_HOTPLUG_HANDLER }, |
1019 | { } |
1020 | } |
1021 | }; |
1022 | |
1023 | static void virtio_register_types(void) |
1024 | { |
1025 | type_register_static(&virtio_scsi_common_info); |
1026 | type_register_static(&virtio_scsi_info); |
1027 | } |
1028 | |
1029 | type_init(virtio_register_types) |
1030 | |