1 | /* |
2 | * virtio ccw target implementation |
3 | * |
4 | * Copyright 2012,2015 IBM Corp. |
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * Pierre Morel <pmorel@linux.vnet.ibm.com> |
7 | * |
8 | * This work is licensed under the terms of the GNU GPL, version 2 or (at |
9 | * your option) any later version. See the COPYING file in the top-level |
10 | * directory. |
11 | */ |
12 | |
13 | #include "qemu/osdep.h" |
14 | #include "qapi/error.h" |
15 | #include "sysemu/kvm.h" |
16 | #include "net/net.h" |
17 | #include "hw/virtio/virtio.h" |
18 | #include "migration/qemu-file-types.h" |
19 | #include "hw/virtio/virtio-net.h" |
20 | #include "hw/sysbus.h" |
21 | #include "qemu/bitops.h" |
22 | #include "qemu/error-report.h" |
23 | #include "qemu/module.h" |
24 | #include "hw/virtio/virtio-access.h" |
25 | #include "hw/virtio/virtio-bus.h" |
26 | #include "hw/s390x/adapter.h" |
27 | #include "hw/s390x/s390_flic.h" |
28 | |
29 | #include "hw/s390x/ioinst.h" |
30 | #include "hw/s390x/css.h" |
31 | #include "virtio-ccw.h" |
32 | #include "trace.h" |
33 | #include "hw/s390x/css-bridge.h" |
34 | #include "hw/s390x/s390-virtio-ccw.h" |
35 | |
36 | #define NR_CLASSIC_INDICATOR_BITS 64 |
37 | |
38 | static int virtio_ccw_dev_post_load(void *opaque, int version_id) |
39 | { |
40 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque); |
41 | CcwDevice *ccw_dev = CCW_DEVICE(dev); |
42 | CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); |
43 | |
44 | ccw_dev->sch->driver_data = dev; |
45 | if (ccw_dev->sch->thinint_active) { |
46 | dev->routes.adapter.adapter_id = css_get_adapter_id( |
47 | CSS_IO_ADAPTER_VIRTIO, |
48 | dev->thinint_isc); |
49 | } |
50 | /* Re-fill subch_id after loading the subchannel states.*/ |
51 | if (ck->refill_ids) { |
52 | ck->refill_ids(ccw_dev); |
53 | } |
54 | return 0; |
55 | } |
56 | |
57 | typedef struct VirtioCcwDeviceTmp { |
58 | VirtioCcwDevice *parent; |
59 | uint16_t config_vector; |
60 | } VirtioCcwDeviceTmp; |
61 | |
62 | static int virtio_ccw_dev_tmp_pre_save(void *opaque) |
63 | { |
64 | VirtioCcwDeviceTmp *tmp = opaque; |
65 | VirtioCcwDevice *dev = tmp->parent; |
66 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
67 | |
68 | tmp->config_vector = vdev->config_vector; |
69 | |
70 | return 0; |
71 | } |
72 | |
73 | static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id) |
74 | { |
75 | VirtioCcwDeviceTmp *tmp = opaque; |
76 | VirtioCcwDevice *dev = tmp->parent; |
77 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
78 | |
79 | vdev->config_vector = tmp->config_vector; |
80 | return 0; |
81 | } |
82 | |
83 | const VMStateDescription vmstate_virtio_ccw_dev_tmp = { |
84 | .name = "s390_virtio_ccw_dev_tmp" , |
85 | .pre_save = virtio_ccw_dev_tmp_pre_save, |
86 | .post_load = virtio_ccw_dev_tmp_post_load, |
87 | .fields = (VMStateField[]) { |
88 | VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), |
89 | VMSTATE_END_OF_LIST() |
90 | } |
91 | }; |
92 | |
93 | const VMStateDescription vmstate_virtio_ccw_dev = { |
94 | .name = "s390_virtio_ccw_dev" , |
95 | .version_id = 1, |
96 | .minimum_version_id = 1, |
97 | .post_load = virtio_ccw_dev_post_load, |
98 | .fields = (VMStateField[]) { |
99 | VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), |
100 | VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), |
101 | VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), |
102 | VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice), |
103 | /* |
104 | * Ugly hack because VirtIODevice does not migrate itself. |
105 | * This also makes legacy via vmstate_save_state possible. |
106 | */ |
107 | VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp, |
108 | vmstate_virtio_ccw_dev_tmp), |
109 | VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes, |
110 | AdapterRoutes), |
111 | VMSTATE_UINT8(thinint_isc, VirtioCcwDevice), |
112 | VMSTATE_INT32(revision, VirtioCcwDevice), |
113 | VMSTATE_END_OF_LIST() |
114 | } |
115 | }; |
116 | |
117 | static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, |
118 | VirtioCcwDevice *dev); |
119 | |
120 | VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) |
121 | { |
122 | VirtIODevice *vdev = NULL; |
123 | VirtioCcwDevice *dev = sch->driver_data; |
124 | |
125 | if (dev) { |
126 | vdev = virtio_bus_get_device(&dev->bus); |
127 | } |
128 | return vdev; |
129 | } |
130 | |
131 | static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) |
132 | { |
133 | virtio_bus_start_ioeventfd(&dev->bus); |
134 | } |
135 | |
136 | static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) |
137 | { |
138 | virtio_bus_stop_ioeventfd(&dev->bus); |
139 | } |
140 | |
141 | static bool virtio_ccw_ioeventfd_enabled(DeviceState *d) |
142 | { |
143 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
144 | |
145 | return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0; |
146 | } |
147 | |
148 | static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, |
149 | int n, bool assign) |
150 | { |
151 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
152 | CcwDevice *ccw_dev = CCW_DEVICE(dev); |
153 | SubchDev *sch = ccw_dev->sch; |
154 | uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; |
155 | |
156 | return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); |
157 | } |
158 | |
159 | /* Communication blocks used by several channel commands. */ |
160 | typedef struct VqInfoBlockLegacy { |
161 | uint64_t queue; |
162 | uint32_t align; |
163 | uint16_t index; |
164 | uint16_t num; |
165 | } QEMU_PACKED VqInfoBlockLegacy; |
166 | |
167 | typedef struct VqInfoBlock { |
168 | uint64_t desc; |
169 | uint32_t res0; |
170 | uint16_t index; |
171 | uint16_t num; |
172 | uint64_t avail; |
173 | uint64_t used; |
174 | } QEMU_PACKED VqInfoBlock; |
175 | |
176 | typedef struct VqConfigBlock { |
177 | uint16_t index; |
178 | uint16_t num_max; |
179 | } QEMU_PACKED VqConfigBlock; |
180 | |
181 | typedef struct VirtioFeatDesc { |
182 | uint32_t features; |
183 | uint8_t index; |
184 | } QEMU_PACKED VirtioFeatDesc; |
185 | |
186 | typedef struct VirtioThinintInfo { |
187 | hwaddr summary_indicator; |
188 | hwaddr device_indicator; |
189 | uint64_t ind_bit; |
190 | uint8_t isc; |
191 | } QEMU_PACKED VirtioThinintInfo; |
192 | |
193 | typedef struct VirtioRevInfo { |
194 | uint16_t revision; |
195 | uint16_t length; |
196 | uint8_t data[0]; |
197 | } QEMU_PACKED VirtioRevInfo; |
198 | |
199 | /* Specify where the virtqueues for the subchannel are in guest memory. */ |
200 | static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, |
201 | VqInfoBlockLegacy *linfo) |
202 | { |
203 | VirtIODevice *vdev = virtio_ccw_get_vdev(sch); |
204 | uint16_t index = info ? info->index : linfo->index; |
205 | uint16_t num = info ? info->num : linfo->num; |
206 | uint64_t desc = info ? info->desc : linfo->queue; |
207 | |
208 | if (index >= VIRTIO_QUEUE_MAX) { |
209 | return -EINVAL; |
210 | } |
211 | |
212 | /* Current code in virtio.c relies on 4K alignment. */ |
213 | if (linfo && desc && (linfo->align != 4096)) { |
214 | return -EINVAL; |
215 | } |
216 | |
217 | if (!vdev) { |
218 | return -EINVAL; |
219 | } |
220 | |
221 | if (info) { |
222 | virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); |
223 | } else { |
224 | virtio_queue_set_addr(vdev, index, desc); |
225 | } |
226 | if (!desc) { |
227 | virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); |
228 | } else { |
229 | if (info) { |
230 | /* virtio-1 allows changing the ring size. */ |
231 | if (virtio_queue_get_max_num(vdev, index) < num) { |
232 | /* Fail if we exceed the maximum number. */ |
233 | return -EINVAL; |
234 | } |
235 | virtio_queue_set_num(vdev, index, num); |
236 | } else if (virtio_queue_get_num(vdev, index) > num) { |
237 | /* Fail if we don't have a big enough queue. */ |
238 | return -EINVAL; |
239 | } |
240 | /* We ignore possible increased num for legacy for compatibility. */ |
241 | virtio_queue_set_vector(vdev, index, index); |
242 | } |
243 | /* tell notify handler in case of config change */ |
244 | vdev->config_vector = VIRTIO_QUEUE_MAX; |
245 | return 0; |
246 | } |
247 | |
248 | static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) |
249 | { |
250 | CcwDevice *ccw_dev = CCW_DEVICE(dev); |
251 | |
252 | virtio_ccw_stop_ioeventfd(dev); |
253 | virtio_reset(vdev); |
254 | if (dev->indicators) { |
255 | release_indicator(&dev->routes.adapter, dev->indicators); |
256 | dev->indicators = NULL; |
257 | } |
258 | if (dev->indicators2) { |
259 | release_indicator(&dev->routes.adapter, dev->indicators2); |
260 | dev->indicators2 = NULL; |
261 | } |
262 | if (dev->summary_indicator) { |
263 | release_indicator(&dev->routes.adapter, dev->summary_indicator); |
264 | dev->summary_indicator = NULL; |
265 | } |
266 | ccw_dev->sch->thinint_active = false; |
267 | } |
268 | |
269 | static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, |
270 | bool is_legacy) |
271 | { |
272 | int ret; |
273 | VqInfoBlock info; |
274 | VqInfoBlockLegacy linfo; |
275 | size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info); |
276 | |
277 | if (check_len) { |
278 | if (ccw.count != info_len) { |
279 | return -EINVAL; |
280 | } |
281 | } else if (ccw.count < info_len) { |
282 | /* Can't execute command. */ |
283 | return -EINVAL; |
284 | } |
285 | if (!ccw.cda) { |
286 | return -EFAULT; |
287 | } |
288 | if (is_legacy) { |
289 | ccw_dstream_read(&sch->cds, linfo); |
290 | linfo.queue = be64_to_cpu(linfo.queue); |
291 | linfo.align = be32_to_cpu(linfo.align); |
292 | linfo.index = be16_to_cpu(linfo.index); |
293 | linfo.num = be16_to_cpu(linfo.num); |
294 | ret = virtio_ccw_set_vqs(sch, NULL, &linfo); |
295 | } else { |
296 | ccw_dstream_read(&sch->cds, info); |
297 | info.desc = be64_to_cpu(info.desc); |
298 | info.index = be16_to_cpu(info.index); |
299 | info.num = be16_to_cpu(info.num); |
300 | info.avail = be64_to_cpu(info.avail); |
301 | info.used = be64_to_cpu(info.used); |
302 | ret = virtio_ccw_set_vqs(sch, &info, NULL); |
303 | } |
304 | sch->curr_status.scsw.count = 0; |
305 | return ret; |
306 | } |
307 | |
308 | static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) |
309 | { |
310 | int ret; |
311 | VirtioRevInfo revinfo; |
312 | uint8_t status; |
313 | VirtioFeatDesc features; |
314 | hwaddr indicators; |
315 | VqConfigBlock vq_config; |
316 | VirtioCcwDevice *dev = sch->driver_data; |
317 | VirtIODevice *vdev = virtio_ccw_get_vdev(sch); |
318 | bool check_len; |
319 | int len; |
320 | VirtioThinintInfo thinint; |
321 | |
322 | if (!dev) { |
323 | return -EINVAL; |
324 | } |
325 | |
326 | trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, |
327 | ccw.cmd_code); |
328 | check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); |
329 | |
330 | if (dev->force_revision_1 && dev->revision < 0 && |
331 | ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) { |
332 | /* |
333 | * virtio-1 drivers must start with negotiating to a revision >= 1, |
334 | * so post a command reject for all other commands |
335 | */ |
336 | return -ENOSYS; |
337 | } |
338 | |
339 | /* Look at the command. */ |
340 | switch (ccw.cmd_code) { |
341 | case CCW_CMD_SET_VQ: |
342 | ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1); |
343 | break; |
344 | case CCW_CMD_VDEV_RESET: |
345 | virtio_ccw_reset_virtio(dev, vdev); |
346 | ret = 0; |
347 | break; |
348 | case CCW_CMD_READ_FEAT: |
349 | if (check_len) { |
350 | if (ccw.count != sizeof(features)) { |
351 | ret = -EINVAL; |
352 | break; |
353 | } |
354 | } else if (ccw.count < sizeof(features)) { |
355 | /* Can't execute command. */ |
356 | ret = -EINVAL; |
357 | break; |
358 | } |
359 | if (!ccw.cda) { |
360 | ret = -EFAULT; |
361 | } else { |
362 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); |
363 | |
364 | ccw_dstream_advance(&sch->cds, sizeof(features.features)); |
365 | ccw_dstream_read(&sch->cds, features.index); |
366 | if (features.index == 0) { |
367 | if (dev->revision >= 1) { |
368 | /* Don't offer legacy features for modern devices. */ |
369 | features.features = (uint32_t) |
370 | (vdev->host_features & ~vdc->legacy_features); |
371 | } else { |
372 | features.features = (uint32_t)vdev->host_features; |
373 | } |
374 | } else if ((features.index == 1) && (dev->revision >= 1)) { |
375 | /* |
376 | * Only offer feature bits beyond 31 if the guest has |
377 | * negotiated at least revision 1. |
378 | */ |
379 | features.features = (uint32_t)(vdev->host_features >> 32); |
380 | } else { |
381 | /* Return zeroes if the guest supports more feature bits. */ |
382 | features.features = 0; |
383 | } |
384 | ccw_dstream_rewind(&sch->cds); |
385 | features.features = cpu_to_le32(features.features); |
386 | ccw_dstream_write(&sch->cds, features.features); |
387 | sch->curr_status.scsw.count = ccw.count - sizeof(features); |
388 | ret = 0; |
389 | } |
390 | break; |
391 | case CCW_CMD_WRITE_FEAT: |
392 | if (check_len) { |
393 | if (ccw.count != sizeof(features)) { |
394 | ret = -EINVAL; |
395 | break; |
396 | } |
397 | } else if (ccw.count < sizeof(features)) { |
398 | /* Can't execute command. */ |
399 | ret = -EINVAL; |
400 | break; |
401 | } |
402 | if (!ccw.cda) { |
403 | ret = -EFAULT; |
404 | } else { |
405 | ccw_dstream_read(&sch->cds, features); |
406 | features.features = le32_to_cpu(features.features); |
407 | if (features.index == 0) { |
408 | virtio_set_features(vdev, |
409 | (vdev->guest_features & 0xffffffff00000000ULL) | |
410 | features.features); |
411 | } else if ((features.index == 1) && (dev->revision >= 1)) { |
412 | /* |
413 | * If the guest did not negotiate at least revision 1, |
414 | * we did not offer it any feature bits beyond 31. Such a |
415 | * guest passing us any bit here is therefore buggy. |
416 | */ |
417 | virtio_set_features(vdev, |
418 | (vdev->guest_features & 0x00000000ffffffffULL) | |
419 | ((uint64_t)features.features << 32)); |
420 | } else { |
421 | /* |
422 | * If the guest supports more feature bits, assert that it |
423 | * passes us zeroes for those we don't support. |
424 | */ |
425 | if (features.features) { |
426 | qemu_log_mask(LOG_GUEST_ERROR, |
427 | "Guest bug: features[%i]=%x (expected 0)" , |
428 | features.index, features.features); |
429 | /* XXX: do a unit check here? */ |
430 | } |
431 | } |
432 | sch->curr_status.scsw.count = ccw.count - sizeof(features); |
433 | ret = 0; |
434 | } |
435 | break; |
436 | case CCW_CMD_READ_CONF: |
437 | if (check_len) { |
438 | if (ccw.count > vdev->config_len) { |
439 | ret = -EINVAL; |
440 | break; |
441 | } |
442 | } |
443 | len = MIN(ccw.count, vdev->config_len); |
444 | if (!ccw.cda) { |
445 | ret = -EFAULT; |
446 | } else { |
447 | virtio_bus_get_vdev_config(&dev->bus, vdev->config); |
448 | ccw_dstream_write_buf(&sch->cds, vdev->config, len); |
449 | sch->curr_status.scsw.count = ccw.count - len; |
450 | ret = 0; |
451 | } |
452 | break; |
453 | case CCW_CMD_WRITE_CONF: |
454 | if (check_len) { |
455 | if (ccw.count > vdev->config_len) { |
456 | ret = -EINVAL; |
457 | break; |
458 | } |
459 | } |
460 | len = MIN(ccw.count, vdev->config_len); |
461 | if (!ccw.cda) { |
462 | ret = -EFAULT; |
463 | } else { |
464 | ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len); |
465 | if (!ret) { |
466 | virtio_bus_set_vdev_config(&dev->bus, vdev->config); |
467 | sch->curr_status.scsw.count = ccw.count - len; |
468 | } |
469 | } |
470 | break; |
471 | case CCW_CMD_READ_STATUS: |
472 | if (check_len) { |
473 | if (ccw.count != sizeof(status)) { |
474 | ret = -EINVAL; |
475 | break; |
476 | } |
477 | } else if (ccw.count < sizeof(status)) { |
478 | /* Can't execute command. */ |
479 | ret = -EINVAL; |
480 | break; |
481 | } |
482 | if (!ccw.cda) { |
483 | ret = -EFAULT; |
484 | } else { |
485 | address_space_stb(&address_space_memory, ccw.cda, vdev->status, |
486 | MEMTXATTRS_UNSPECIFIED, NULL); |
487 | sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status); |
488 | ret = 0; |
489 | } |
490 | break; |
491 | case CCW_CMD_WRITE_STATUS: |
492 | if (check_len) { |
493 | if (ccw.count != sizeof(status)) { |
494 | ret = -EINVAL; |
495 | break; |
496 | } |
497 | } else if (ccw.count < sizeof(status)) { |
498 | /* Can't execute command. */ |
499 | ret = -EINVAL; |
500 | break; |
501 | } |
502 | if (!ccw.cda) { |
503 | ret = -EFAULT; |
504 | } else { |
505 | ccw_dstream_read(&sch->cds, status); |
506 | if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { |
507 | virtio_ccw_stop_ioeventfd(dev); |
508 | } |
509 | if (virtio_set_status(vdev, status) == 0) { |
510 | if (vdev->status == 0) { |
511 | virtio_ccw_reset_virtio(dev, vdev); |
512 | } |
513 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) { |
514 | virtio_ccw_start_ioeventfd(dev); |
515 | } |
516 | sch->curr_status.scsw.count = ccw.count - sizeof(status); |
517 | ret = 0; |
518 | } else { |
519 | /* Trigger a command reject. */ |
520 | ret = -ENOSYS; |
521 | } |
522 | } |
523 | break; |
524 | case CCW_CMD_SET_IND: |
525 | if (check_len) { |
526 | if (ccw.count != sizeof(indicators)) { |
527 | ret = -EINVAL; |
528 | break; |
529 | } |
530 | } else if (ccw.count < sizeof(indicators)) { |
531 | /* Can't execute command. */ |
532 | ret = -EINVAL; |
533 | break; |
534 | } |
535 | if (sch->thinint_active) { |
536 | /* Trigger a command reject. */ |
537 | ret = -ENOSYS; |
538 | break; |
539 | } |
540 | if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) { |
541 | /* More queues than indicator bits --> trigger a reject */ |
542 | ret = -ENOSYS; |
543 | break; |
544 | } |
545 | if (!ccw.cda) { |
546 | ret = -EFAULT; |
547 | } else { |
548 | ccw_dstream_read(&sch->cds, indicators); |
549 | indicators = be64_to_cpu(indicators); |
550 | dev->indicators = get_indicator(indicators, sizeof(uint64_t)); |
551 | sch->curr_status.scsw.count = ccw.count - sizeof(indicators); |
552 | ret = 0; |
553 | } |
554 | break; |
555 | case CCW_CMD_SET_CONF_IND: |
556 | if (check_len) { |
557 | if (ccw.count != sizeof(indicators)) { |
558 | ret = -EINVAL; |
559 | break; |
560 | } |
561 | } else if (ccw.count < sizeof(indicators)) { |
562 | /* Can't execute command. */ |
563 | ret = -EINVAL; |
564 | break; |
565 | } |
566 | if (!ccw.cda) { |
567 | ret = -EFAULT; |
568 | } else { |
569 | ccw_dstream_read(&sch->cds, indicators); |
570 | indicators = be64_to_cpu(indicators); |
571 | dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); |
572 | sch->curr_status.scsw.count = ccw.count - sizeof(indicators); |
573 | ret = 0; |
574 | } |
575 | break; |
576 | case CCW_CMD_READ_VQ_CONF: |
577 | if (check_len) { |
578 | if (ccw.count != sizeof(vq_config)) { |
579 | ret = -EINVAL; |
580 | break; |
581 | } |
582 | } else if (ccw.count < sizeof(vq_config)) { |
583 | /* Can't execute command. */ |
584 | ret = -EINVAL; |
585 | break; |
586 | } |
587 | if (!ccw.cda) { |
588 | ret = -EFAULT; |
589 | } else { |
590 | ccw_dstream_read(&sch->cds, vq_config.index); |
591 | vq_config.index = be16_to_cpu(vq_config.index); |
592 | if (vq_config.index >= VIRTIO_QUEUE_MAX) { |
593 | ret = -EINVAL; |
594 | break; |
595 | } |
596 | vq_config.num_max = virtio_queue_get_num(vdev, |
597 | vq_config.index); |
598 | vq_config.num_max = cpu_to_be16(vq_config.num_max); |
599 | ccw_dstream_write(&sch->cds, vq_config.num_max); |
600 | sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); |
601 | ret = 0; |
602 | } |
603 | break; |
604 | case CCW_CMD_SET_IND_ADAPTER: |
605 | if (check_len) { |
606 | if (ccw.count != sizeof(thinint)) { |
607 | ret = -EINVAL; |
608 | break; |
609 | } |
610 | } else if (ccw.count < sizeof(thinint)) { |
611 | /* Can't execute command. */ |
612 | ret = -EINVAL; |
613 | break; |
614 | } |
615 | if (!ccw.cda) { |
616 | ret = -EFAULT; |
617 | } else if (dev->indicators && !sch->thinint_active) { |
618 | /* Trigger a command reject. */ |
619 | ret = -ENOSYS; |
620 | } else { |
621 | if (ccw_dstream_read(&sch->cds, thinint)) { |
622 | ret = -EFAULT; |
623 | } else { |
624 | thinint.ind_bit = be64_to_cpu(thinint.ind_bit); |
625 | thinint.summary_indicator = |
626 | be64_to_cpu(thinint.summary_indicator); |
627 | thinint.device_indicator = |
628 | be64_to_cpu(thinint.device_indicator); |
629 | |
630 | dev->summary_indicator = |
631 | get_indicator(thinint.summary_indicator, sizeof(uint8_t)); |
632 | dev->indicators = |
633 | get_indicator(thinint.device_indicator, |
634 | thinint.ind_bit / 8 + 1); |
635 | dev->thinint_isc = thinint.isc; |
636 | dev->routes.adapter.ind_offset = thinint.ind_bit; |
637 | dev->routes.adapter.summary_offset = 7; |
638 | dev->routes.adapter.adapter_id = css_get_adapter_id( |
639 | CSS_IO_ADAPTER_VIRTIO, |
640 | dev->thinint_isc); |
641 | sch->thinint_active = ((dev->indicators != NULL) && |
642 | (dev->summary_indicator != NULL)); |
643 | sch->curr_status.scsw.count = ccw.count - sizeof(thinint); |
644 | ret = 0; |
645 | } |
646 | } |
647 | break; |
648 | case CCW_CMD_SET_VIRTIO_REV: |
649 | len = sizeof(revinfo); |
650 | if (ccw.count < len) { |
651 | ret = -EINVAL; |
652 | break; |
653 | } |
654 | if (!ccw.cda) { |
655 | ret = -EFAULT; |
656 | break; |
657 | } |
658 | ccw_dstream_read_buf(&sch->cds, &revinfo, 4); |
659 | revinfo.revision = be16_to_cpu(revinfo.revision); |
660 | revinfo.length = be16_to_cpu(revinfo.length); |
661 | if (ccw.count < len + revinfo.length || |
662 | (check_len && ccw.count > len + revinfo.length)) { |
663 | ret = -EINVAL; |
664 | break; |
665 | } |
666 | /* |
667 | * Once we start to support revisions with additional data, we'll |
668 | * need to fetch it here. Nothing to do for now, though. |
669 | */ |
670 | if (dev->revision >= 0 || |
671 | revinfo.revision > virtio_ccw_rev_max(dev) || |
672 | (dev->force_revision_1 && !revinfo.revision)) { |
673 | ret = -ENOSYS; |
674 | break; |
675 | } |
676 | ret = 0; |
677 | dev->revision = revinfo.revision; |
678 | break; |
679 | default: |
680 | ret = -ENOSYS; |
681 | break; |
682 | } |
683 | return ret; |
684 | } |
685 | |
686 | static void virtio_sch_disable_cb(SubchDev *sch) |
687 | { |
688 | VirtioCcwDevice *dev = sch->driver_data; |
689 | |
690 | dev->revision = -1; |
691 | } |
692 | |
693 | static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) |
694 | { |
695 | VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); |
696 | CcwDevice *ccw_dev = CCW_DEVICE(dev); |
697 | CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); |
698 | SubchDev *sch; |
699 | Error *err = NULL; |
700 | |
701 | sch = css_create_sch(ccw_dev->devno, errp); |
702 | if (!sch) { |
703 | return; |
704 | } |
705 | if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) { |
706 | error_setg(&err, "Invalid value of property max_rev " |
707 | "(is %d expected >= 1)" , virtio_ccw_rev_max(dev)); |
708 | goto out_err; |
709 | } |
710 | |
711 | sch->driver_data = dev; |
712 | sch->ccw_cb = virtio_ccw_cb; |
713 | sch->disable_cb = virtio_sch_disable_cb; |
714 | sch->id.reserved = 0xff; |
715 | sch->id.cu_type = VIRTIO_CCW_CU_TYPE; |
716 | sch->do_subchannel_work = do_subchannel_work_virtual; |
717 | ccw_dev->sch = sch; |
718 | dev->indicators = NULL; |
719 | dev->revision = -1; |
720 | css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); |
721 | |
722 | trace_virtio_ccw_new_device( |
723 | sch->cssid, sch->ssid, sch->schid, sch->devno, |
724 | ccw_dev->devno.valid ? "user-configured" : "auto-configured" ); |
725 | |
726 | if (kvm_enabled() && !kvm_eventfds_enabled()) { |
727 | dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; |
728 | } |
729 | |
730 | if (k->realize) { |
731 | k->realize(dev, &err); |
732 | if (err) { |
733 | goto out_err; |
734 | } |
735 | } |
736 | |
737 | ck->realize(ccw_dev, &err); |
738 | if (err) { |
739 | goto out_err; |
740 | } |
741 | |
742 | return; |
743 | |
744 | out_err: |
745 | error_propagate(errp, err); |
746 | css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); |
747 | ccw_dev->sch = NULL; |
748 | g_free(sch); |
749 | } |
750 | |
751 | static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev, Error **errp) |
752 | { |
753 | VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); |
754 | CcwDevice *ccw_dev = CCW_DEVICE(dev); |
755 | SubchDev *sch = ccw_dev->sch; |
756 | |
757 | if (dc->unrealize) { |
758 | dc->unrealize(dev, errp); |
759 | } |
760 | |
761 | if (sch) { |
762 | css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); |
763 | g_free(sch); |
764 | ccw_dev->sch = NULL; |
765 | } |
766 | if (dev->indicators) { |
767 | release_indicator(&dev->routes.adapter, dev->indicators); |
768 | dev->indicators = NULL; |
769 | } |
770 | } |
771 | |
772 | /* DeviceState to VirtioCcwDevice. Note: used on datapath, |
773 | * be careful and test performance if you change this. |
774 | */ |
775 | static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d) |
776 | { |
777 | CcwDevice *ccw_dev = to_ccw_dev_fast(d); |
778 | |
779 | return container_of(ccw_dev, VirtioCcwDevice, parent_obj); |
780 | } |
781 | |
782 | static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, |
783 | uint8_t to_be_set) |
784 | { |
785 | uint8_t ind_old, ind_new; |
786 | hwaddr len = 1; |
787 | uint8_t *ind_addr; |
788 | |
789 | ind_addr = cpu_physical_memory_map(ind_loc, &len, 1); |
790 | if (!ind_addr) { |
791 | error_report("%s(%x.%x.%04x): unable to access indicator" , |
792 | __func__, sch->cssid, sch->ssid, sch->schid); |
793 | return -1; |
794 | } |
795 | do { |
796 | ind_old = *ind_addr; |
797 | ind_new = ind_old | to_be_set; |
798 | } while (atomic_cmpxchg(ind_addr, ind_old, ind_new) != ind_old); |
799 | trace_virtio_ccw_set_ind(ind_loc, ind_old, ind_new); |
800 | cpu_physical_memory_unmap(ind_addr, len, 1, len); |
801 | |
802 | return ind_old; |
803 | } |
804 | |
805 | static void virtio_ccw_notify(DeviceState *d, uint16_t vector) |
806 | { |
807 | VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); |
808 | CcwDevice *ccw_dev = to_ccw_dev_fast(d); |
809 | SubchDev *sch = ccw_dev->sch; |
810 | uint64_t indicators; |
811 | |
812 | if (vector == VIRTIO_NO_VECTOR) { |
813 | return; |
814 | } |
815 | /* |
816 | * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue |
817 | * vector == VIRTIO_QUEUE_MAX: configuration change notification |
818 | * bits beyond that are unused and should never be notified for |
819 | */ |
820 | assert(vector <= VIRTIO_QUEUE_MAX); |
821 | |
822 | if (vector < VIRTIO_QUEUE_MAX) { |
823 | if (!dev->indicators) { |
824 | return; |
825 | } |
826 | if (sch->thinint_active) { |
827 | /* |
828 | * In the adapter interrupt case, indicators points to a |
829 | * memory area that may be (way) larger than 64 bit and |
830 | * ind_bit indicates the start of the indicators in a big |
831 | * endian notation. |
832 | */ |
833 | uint64_t ind_bit = dev->routes.adapter.ind_offset; |
834 | |
835 | virtio_set_ind_atomic(sch, dev->indicators->addr + |
836 | (ind_bit + vector) / 8, |
837 | 0x80 >> ((ind_bit + vector) % 8)); |
838 | if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr, |
839 | 0x01)) { |
840 | css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc); |
841 | } |
842 | } else { |
843 | assert(vector < NR_CLASSIC_INDICATOR_BITS); |
844 | indicators = address_space_ldq(&address_space_memory, |
845 | dev->indicators->addr, |
846 | MEMTXATTRS_UNSPECIFIED, |
847 | NULL); |
848 | indicators |= 1ULL << vector; |
849 | address_space_stq(&address_space_memory, dev->indicators->addr, |
850 | indicators, MEMTXATTRS_UNSPECIFIED, NULL); |
851 | css_conditional_io_interrupt(sch); |
852 | } |
853 | } else { |
854 | if (!dev->indicators2) { |
855 | return; |
856 | } |
857 | indicators = address_space_ldq(&address_space_memory, |
858 | dev->indicators2->addr, |
859 | MEMTXATTRS_UNSPECIFIED, |
860 | NULL); |
861 | indicators |= 1ULL; |
862 | address_space_stq(&address_space_memory, dev->indicators2->addr, |
863 | indicators, MEMTXATTRS_UNSPECIFIED, NULL); |
864 | css_conditional_io_interrupt(sch); |
865 | } |
866 | } |
867 | |
868 | static void virtio_ccw_reset(DeviceState *d) |
869 | { |
870 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
871 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
872 | VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); |
873 | |
874 | virtio_ccw_reset_virtio(dev, vdev); |
875 | if (vdc->parent_reset) { |
876 | vdc->parent_reset(d); |
877 | } |
878 | } |
879 | |
880 | static void virtio_ccw_vmstate_change(DeviceState *d, bool running) |
881 | { |
882 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
883 | |
884 | if (running) { |
885 | virtio_ccw_start_ioeventfd(dev); |
886 | } else { |
887 | virtio_ccw_stop_ioeventfd(dev); |
888 | } |
889 | } |
890 | |
891 | static bool virtio_ccw_query_guest_notifiers(DeviceState *d) |
892 | { |
893 | CcwDevice *dev = CCW_DEVICE(d); |
894 | |
895 | return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); |
896 | } |
897 | |
898 | static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) |
899 | { |
900 | int r; |
901 | CcwDevice *ccw_dev = CCW_DEVICE(dev); |
902 | |
903 | if (!ccw_dev->sch->thinint_active) { |
904 | return -EINVAL; |
905 | } |
906 | |
907 | r = map_indicator(&dev->routes.adapter, dev->summary_indicator); |
908 | if (r) { |
909 | return r; |
910 | } |
911 | r = map_indicator(&dev->routes.adapter, dev->indicators); |
912 | if (r) { |
913 | return r; |
914 | } |
915 | dev->routes.adapter.summary_addr = dev->summary_indicator->map; |
916 | dev->routes.adapter.ind_addr = dev->indicators->map; |
917 | |
918 | return 0; |
919 | } |
920 | |
921 | static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs) |
922 | { |
923 | int i; |
924 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
925 | int ret; |
926 | S390FLICState *fs = s390_get_flic(); |
927 | S390FLICStateClass *fsc = s390_get_flic_class(fs); |
928 | |
929 | ret = virtio_ccw_get_mappings(dev); |
930 | if (ret) { |
931 | return ret; |
932 | } |
933 | for (i = 0; i < nvqs; i++) { |
934 | if (!virtio_queue_get_num(vdev, i)) { |
935 | break; |
936 | } |
937 | } |
938 | dev->routes.num_routes = i; |
939 | return fsc->add_adapter_routes(fs, &dev->routes); |
940 | } |
941 | |
942 | static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs) |
943 | { |
944 | S390FLICState *fs = s390_get_flic(); |
945 | S390FLICStateClass *fsc = s390_get_flic_class(fs); |
946 | |
947 | fsc->release_adapter_routes(fs, &dev->routes); |
948 | } |
949 | |
950 | static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n) |
951 | { |
952 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
953 | VirtQueue *vq = virtio_get_queue(vdev, n); |
954 | EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); |
955 | |
956 | return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL, |
957 | dev->routes.gsi[n]); |
958 | } |
959 | |
960 | static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n) |
961 | { |
962 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
963 | VirtQueue *vq = virtio_get_queue(vdev, n); |
964 | EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); |
965 | int ret; |
966 | |
967 | ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier, |
968 | dev->routes.gsi[n]); |
969 | assert(ret == 0); |
970 | } |
971 | |
972 | static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n, |
973 | bool assign, bool with_irqfd) |
974 | { |
975 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
976 | VirtQueue *vq = virtio_get_queue(vdev, n); |
977 | EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); |
978 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
979 | |
980 | if (assign) { |
981 | int r = event_notifier_init(notifier, 0); |
982 | |
983 | if (r < 0) { |
984 | return r; |
985 | } |
986 | virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); |
987 | if (with_irqfd) { |
988 | r = virtio_ccw_add_irqfd(dev, n); |
989 | if (r) { |
990 | virtio_queue_set_guest_notifier_fd_handler(vq, false, |
991 | with_irqfd); |
992 | return r; |
993 | } |
994 | } |
995 | /* |
996 | * We do not support individual masking for channel devices, so we |
997 | * need to manually trigger any guest masking callbacks here. |
998 | */ |
999 | if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { |
1000 | k->guest_notifier_mask(vdev, n, false); |
1001 | } |
1002 | /* get lost events and re-inject */ |
1003 | if (k->guest_notifier_pending && |
1004 | k->guest_notifier_pending(vdev, n)) { |
1005 | event_notifier_set(notifier); |
1006 | } |
1007 | } else { |
1008 | if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { |
1009 | k->guest_notifier_mask(vdev, n, true); |
1010 | } |
1011 | if (with_irqfd) { |
1012 | virtio_ccw_remove_irqfd(dev, n); |
1013 | } |
1014 | virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); |
1015 | event_notifier_cleanup(notifier); |
1016 | } |
1017 | return 0; |
1018 | } |
1019 | |
1020 | static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs, |
1021 | bool assigned) |
1022 | { |
1023 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1024 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
1025 | CcwDevice *ccw_dev = CCW_DEVICE(d); |
1026 | bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled(); |
1027 | int r, n; |
1028 | |
1029 | if (with_irqfd && assigned) { |
1030 | /* irq routes need to be set up before assigning irqfds */ |
1031 | r = virtio_ccw_setup_irqroutes(dev, nvqs); |
1032 | if (r < 0) { |
1033 | goto irqroute_error; |
1034 | } |
1035 | } |
1036 | for (n = 0; n < nvqs; n++) { |
1037 | if (!virtio_queue_get_num(vdev, n)) { |
1038 | break; |
1039 | } |
1040 | r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd); |
1041 | if (r < 0) { |
1042 | goto assign_error; |
1043 | } |
1044 | } |
1045 | if (with_irqfd && !assigned) { |
1046 | /* release irq routes after irqfds have been released */ |
1047 | virtio_ccw_release_irqroutes(dev, nvqs); |
1048 | } |
1049 | return 0; |
1050 | |
1051 | assign_error: |
1052 | while (--n >= 0) { |
1053 | virtio_ccw_set_guest_notifier(dev, n, !assigned, false); |
1054 | } |
1055 | irqroute_error: |
1056 | if (with_irqfd && assigned) { |
1057 | virtio_ccw_release_irqroutes(dev, nvqs); |
1058 | } |
1059 | return r; |
1060 | } |
1061 | |
1062 | static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f) |
1063 | { |
1064 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1065 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
1066 | |
1067 | qemu_put_be16(f, virtio_queue_vector(vdev, n)); |
1068 | } |
1069 | |
1070 | static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) |
1071 | { |
1072 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1073 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
1074 | uint16_t vector; |
1075 | |
1076 | qemu_get_be16s(f, &vector); |
1077 | virtio_queue_set_vector(vdev, n , vector); |
1078 | |
1079 | return 0; |
1080 | } |
1081 | |
1082 | static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) |
1083 | { |
1084 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1085 | vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); |
1086 | } |
1087 | |
1088 | static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) |
1089 | { |
1090 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1091 | return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); |
1092 | } |
1093 | |
1094 | static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) |
1095 | { |
1096 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1097 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
1098 | |
1099 | if (dev->max_rev >= 1) { |
1100 | virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); |
1101 | } |
1102 | } |
1103 | |
1104 | /* This is called by virtio-bus just after the device is plugged. */ |
1105 | static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) |
1106 | { |
1107 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1108 | VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); |
1109 | CcwDevice *ccw_dev = CCW_DEVICE(d); |
1110 | SubchDev *sch = ccw_dev->sch; |
1111 | int n = virtio_get_num_queues(vdev); |
1112 | S390FLICState *flic = s390_get_flic(); |
1113 | |
1114 | if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { |
1115 | dev->max_rev = 0; |
1116 | } |
1117 | |
1118 | if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) { |
1119 | error_setg(errp, "The number of virtqueues %d " |
1120 | "exceeds virtio limit %d" , n, |
1121 | VIRTIO_QUEUE_MAX); |
1122 | return; |
1123 | } |
1124 | if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) { |
1125 | error_setg(errp, "The number of virtqueues %d " |
1126 | "exceeds flic adapter route limit %d" , n, |
1127 | flic->adapter_routes_max_batch); |
1128 | return; |
1129 | } |
1130 | |
1131 | sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus); |
1132 | |
1133 | |
1134 | css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, |
1135 | d->hotplugged, 1); |
1136 | } |
1137 | |
1138 | static void virtio_ccw_device_unplugged(DeviceState *d) |
1139 | { |
1140 | VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); |
1141 | |
1142 | virtio_ccw_stop_ioeventfd(dev); |
1143 | } |
1144 | /**************** Virtio-ccw Bus Device Descriptions *******************/ |
1145 | |
1146 | static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp) |
1147 | { |
1148 | VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; |
1149 | |
1150 | virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev); |
1151 | virtio_ccw_device_realize(_dev, errp); |
1152 | } |
1153 | |
1154 | static void virtio_ccw_busdev_unrealize(DeviceState *dev, Error **errp) |
1155 | { |
1156 | VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; |
1157 | |
1158 | virtio_ccw_device_unrealize(_dev, errp); |
1159 | } |
1160 | |
1161 | static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, |
1162 | DeviceState *dev, Error **errp) |
1163 | { |
1164 | VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev); |
1165 | |
1166 | virtio_ccw_stop_ioeventfd(_dev); |
1167 | } |
1168 | |
1169 | static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) |
1170 | { |
1171 | DeviceClass *dc = DEVICE_CLASS(klass); |
1172 | CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); |
1173 | VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass); |
1174 | |
1175 | k->unplug = virtio_ccw_busdev_unplug; |
1176 | dc->realize = virtio_ccw_busdev_realize; |
1177 | dc->unrealize = virtio_ccw_busdev_unrealize; |
1178 | dc->bus_type = TYPE_VIRTUAL_CSS_BUS; |
1179 | device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset); |
1180 | } |
1181 | |
1182 | static const TypeInfo virtio_ccw_device_info = { |
1183 | .name = TYPE_VIRTIO_CCW_DEVICE, |
1184 | .parent = TYPE_CCW_DEVICE, |
1185 | .instance_size = sizeof(VirtioCcwDevice), |
1186 | .class_init = virtio_ccw_device_class_init, |
1187 | .class_size = sizeof(VirtIOCCWDeviceClass), |
1188 | .abstract = true, |
1189 | }; |
1190 | |
1191 | /* virtio-ccw-bus */ |
1192 | |
1193 | static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, |
1194 | VirtioCcwDevice *dev) |
1195 | { |
1196 | DeviceState *qdev = DEVICE(dev); |
1197 | char virtio_bus_name[] = "virtio-bus" ; |
1198 | |
1199 | qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS, |
1200 | qdev, virtio_bus_name); |
1201 | } |
1202 | |
1203 | static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) |
1204 | { |
1205 | VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); |
1206 | BusClass *bus_class = BUS_CLASS(klass); |
1207 | |
1208 | bus_class->max_dev = 1; |
1209 | k->notify = virtio_ccw_notify; |
1210 | k->vmstate_change = virtio_ccw_vmstate_change; |
1211 | k->query_guest_notifiers = virtio_ccw_query_guest_notifiers; |
1212 | k->set_guest_notifiers = virtio_ccw_set_guest_notifiers; |
1213 | k->save_queue = virtio_ccw_save_queue; |
1214 | k->load_queue = virtio_ccw_load_queue; |
1215 | k->save_config = virtio_ccw_save_config; |
1216 | k->load_config = virtio_ccw_load_config; |
1217 | k->pre_plugged = virtio_ccw_pre_plugged; |
1218 | k->device_plugged = virtio_ccw_device_plugged; |
1219 | k->device_unplugged = virtio_ccw_device_unplugged; |
1220 | k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled; |
1221 | k->ioeventfd_assign = virtio_ccw_ioeventfd_assign; |
1222 | } |
1223 | |
1224 | static const TypeInfo virtio_ccw_bus_info = { |
1225 | .name = TYPE_VIRTIO_CCW_BUS, |
1226 | .parent = TYPE_VIRTIO_BUS, |
1227 | .instance_size = sizeof(VirtioCcwBusState), |
1228 | .class_init = virtio_ccw_bus_class_init, |
1229 | }; |
1230 | |
1231 | static void virtio_ccw_register(void) |
1232 | { |
1233 | type_register_static(&virtio_ccw_bus_info); |
1234 | type_register_static(&virtio_ccw_device_info); |
1235 | } |
1236 | |
1237 | type_init(virtio_ccw_register) |
1238 | |