1 | /* |
2 | * QEMU Cryptodev backend for QEMU cipher APIs |
3 | * |
4 | * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD. |
5 | * |
6 | * Authors: |
7 | * Gonglei <arei.gonglei@huawei.com> |
8 | * Jay Zhou <jianjay.zhou@huawei.com> |
9 | * |
10 | * This library is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU Lesser General Public |
12 | * License as published by the Free Software Foundation; either |
13 | * version 2 of the License, or (at your option) any later version. |
14 | * |
15 | * This library is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 | * Lesser General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU Lesser General Public |
21 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
22 | * |
23 | */ |
24 | |
25 | #include "qemu/osdep.h" |
26 | #include "hw/virtio/virtio-bus.h" |
27 | #include "sysemu/cryptodev-vhost.h" |
28 | |
29 | #ifdef CONFIG_VHOST_CRYPTO |
30 | #include "qapi/error.h" |
31 | #include "qapi/qmp/qerror.h" |
32 | #include "qemu/error-report.h" |
33 | #include "hw/virtio/virtio-crypto.h" |
34 | #include "sysemu/cryptodev-vhost-user.h" |
35 | |
36 | uint64_t |
37 | cryptodev_vhost_get_max_queues( |
38 | CryptoDevBackendVhost *crypto) |
39 | { |
40 | return crypto->dev.max_queues; |
41 | } |
42 | |
43 | void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto) |
44 | { |
45 | vhost_dev_cleanup(&crypto->dev); |
46 | g_free(crypto); |
47 | } |
48 | |
49 | struct CryptoDevBackendVhost * |
50 | cryptodev_vhost_init( |
51 | CryptoDevBackendVhostOptions *options) |
52 | { |
53 | int r; |
54 | CryptoDevBackendVhost *crypto; |
55 | |
56 | crypto = g_new(CryptoDevBackendVhost, 1); |
57 | crypto->dev.max_queues = 1; |
58 | crypto->dev.nvqs = 1; |
59 | crypto->dev.vqs = crypto->vqs; |
60 | |
61 | crypto->cc = options->cc; |
62 | |
63 | crypto->dev.protocol_features = 0; |
64 | crypto->backend = -1; |
65 | |
66 | /* vhost-user needs vq_index to initiate a specific queue pair */ |
67 | crypto->dev.vq_index = crypto->cc->queue_index * crypto->dev.nvqs; |
68 | |
69 | r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0); |
70 | if (r < 0) { |
71 | goto fail; |
72 | } |
73 | |
74 | return crypto; |
75 | fail: |
76 | g_free(crypto); |
77 | return NULL; |
78 | } |
79 | |
80 | static int |
81 | cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto, |
82 | VirtIODevice *dev) |
83 | { |
84 | int r; |
85 | |
86 | crypto->dev.nvqs = 1; |
87 | crypto->dev.vqs = crypto->vqs; |
88 | |
89 | r = vhost_dev_enable_notifiers(&crypto->dev, dev); |
90 | if (r < 0) { |
91 | goto fail_notifiers; |
92 | } |
93 | |
94 | r = vhost_dev_start(&crypto->dev, dev); |
95 | if (r < 0) { |
96 | goto fail_start; |
97 | } |
98 | |
99 | return 0; |
100 | |
101 | fail_start: |
102 | vhost_dev_disable_notifiers(&crypto->dev, dev); |
103 | fail_notifiers: |
104 | return r; |
105 | } |
106 | |
107 | static void |
108 | cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto, |
109 | VirtIODevice *dev) |
110 | { |
111 | vhost_dev_stop(&crypto->dev, dev); |
112 | vhost_dev_disable_notifiers(&crypto->dev, dev); |
113 | } |
114 | |
115 | CryptoDevBackendVhost * |
116 | cryptodev_get_vhost(CryptoDevBackendClient *cc, |
117 | CryptoDevBackend *b, |
118 | uint16_t queue) |
119 | { |
120 | CryptoDevBackendVhost *vhost_crypto = NULL; |
121 | |
122 | if (!cc) { |
123 | return NULL; |
124 | } |
125 | |
126 | switch (cc->type) { |
127 | #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX) |
128 | case CRYPTODEV_BACKEND_TYPE_VHOST_USER: |
129 | vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue); |
130 | break; |
131 | #endif |
132 | default: |
133 | break; |
134 | } |
135 | |
136 | return vhost_crypto; |
137 | } |
138 | |
139 | static void |
140 | cryptodev_vhost_set_vq_index(CryptoDevBackendVhost *crypto, |
141 | int vq_index) |
142 | { |
143 | crypto->dev.vq_index = vq_index; |
144 | } |
145 | |
146 | static int |
147 | vhost_set_vring_enable(CryptoDevBackendClient *cc, |
148 | CryptoDevBackend *b, |
149 | uint16_t queue, int enable) |
150 | { |
151 | CryptoDevBackendVhost *crypto = |
152 | cryptodev_get_vhost(cc, b, queue); |
153 | const VhostOps *vhost_ops; |
154 | |
155 | cc->vring_enable = enable; |
156 | |
157 | if (!crypto) { |
158 | return 0; |
159 | } |
160 | |
161 | vhost_ops = crypto->dev.vhost_ops; |
162 | if (vhost_ops->vhost_set_vring_enable) { |
163 | return vhost_ops->vhost_set_vring_enable(&crypto->dev, enable); |
164 | } |
165 | |
166 | return 0; |
167 | } |
168 | |
169 | int cryptodev_vhost_start(VirtIODevice *dev, int total_queues) |
170 | { |
171 | VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); |
172 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); |
173 | VirtioBusState *vbus = VIRTIO_BUS(qbus); |
174 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); |
175 | int r, e; |
176 | int i; |
177 | CryptoDevBackend *b = vcrypto->cryptodev; |
178 | CryptoDevBackendVhost *vhost_crypto; |
179 | CryptoDevBackendClient *cc; |
180 | |
181 | if (!k->set_guest_notifiers) { |
182 | error_report("binding does not support guest notifiers" ); |
183 | return -ENOSYS; |
184 | } |
185 | |
186 | for (i = 0; i < total_queues; i++) { |
187 | cc = b->conf.peers.ccs[i]; |
188 | |
189 | vhost_crypto = cryptodev_get_vhost(cc, b, i); |
190 | cryptodev_vhost_set_vq_index(vhost_crypto, i); |
191 | |
192 | /* Suppress the masking guest notifiers on vhost user |
193 | * because vhost user doesn't interrupt masking/unmasking |
194 | * properly. |
195 | */ |
196 | if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) { |
197 | dev->use_guest_notifier_mask = false; |
198 | } |
199 | } |
200 | |
201 | r = k->set_guest_notifiers(qbus->parent, total_queues, true); |
202 | if (r < 0) { |
203 | error_report("error binding guest notifier: %d" , -r); |
204 | goto err; |
205 | } |
206 | |
207 | for (i = 0; i < total_queues; i++) { |
208 | cc = b->conf.peers.ccs[i]; |
209 | |
210 | vhost_crypto = cryptodev_get_vhost(cc, b, i); |
211 | r = cryptodev_vhost_start_one(vhost_crypto, dev); |
212 | |
213 | if (r < 0) { |
214 | goto err_start; |
215 | } |
216 | |
217 | if (cc->vring_enable) { |
218 | /* restore vring enable state */ |
219 | r = vhost_set_vring_enable(cc, b, i, cc->vring_enable); |
220 | |
221 | if (r < 0) { |
222 | goto err_start; |
223 | } |
224 | } |
225 | } |
226 | |
227 | return 0; |
228 | |
229 | err_start: |
230 | while (--i >= 0) { |
231 | cc = b->conf.peers.ccs[i]; |
232 | vhost_crypto = cryptodev_get_vhost(cc, b, i); |
233 | cryptodev_vhost_stop_one(vhost_crypto, dev); |
234 | } |
235 | e = k->set_guest_notifiers(qbus->parent, total_queues, false); |
236 | if (e < 0) { |
237 | error_report("vhost guest notifier cleanup failed: %d" , e); |
238 | } |
239 | err: |
240 | return r; |
241 | } |
242 | |
243 | void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues) |
244 | { |
245 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); |
246 | VirtioBusState *vbus = VIRTIO_BUS(qbus); |
247 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); |
248 | VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); |
249 | CryptoDevBackend *b = vcrypto->cryptodev; |
250 | CryptoDevBackendVhost *vhost_crypto; |
251 | CryptoDevBackendClient *cc; |
252 | size_t i; |
253 | int r; |
254 | |
255 | for (i = 0; i < total_queues; i++) { |
256 | cc = b->conf.peers.ccs[i]; |
257 | |
258 | vhost_crypto = cryptodev_get_vhost(cc, b, i); |
259 | cryptodev_vhost_stop_one(vhost_crypto, dev); |
260 | } |
261 | |
262 | r = k->set_guest_notifiers(qbus->parent, total_queues, false); |
263 | if (r < 0) { |
264 | error_report("vhost guest notifier cleanup failed: %d" , r); |
265 | } |
266 | assert(r >= 0); |
267 | } |
268 | |
269 | void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev, |
270 | int queue, |
271 | int idx, bool mask) |
272 | { |
273 | VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); |
274 | CryptoDevBackend *b = vcrypto->cryptodev; |
275 | CryptoDevBackendVhost *vhost_crypto; |
276 | CryptoDevBackendClient *cc; |
277 | |
278 | assert(queue < MAX_CRYPTO_QUEUE_NUM); |
279 | |
280 | cc = b->conf.peers.ccs[queue]; |
281 | vhost_crypto = cryptodev_get_vhost(cc, b, queue); |
282 | |
283 | vhost_virtqueue_mask(&vhost_crypto->dev, dev, idx, mask); |
284 | } |
285 | |
286 | bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev, |
287 | int queue, int idx) |
288 | { |
289 | VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); |
290 | CryptoDevBackend *b = vcrypto->cryptodev; |
291 | CryptoDevBackendVhost *vhost_crypto; |
292 | CryptoDevBackendClient *cc; |
293 | |
294 | assert(queue < MAX_CRYPTO_QUEUE_NUM); |
295 | |
296 | cc = b->conf.peers.ccs[queue]; |
297 | vhost_crypto = cryptodev_get_vhost(cc, b, queue); |
298 | |
299 | return vhost_virtqueue_pending(&vhost_crypto->dev, idx); |
300 | } |
301 | |
302 | #else |
303 | uint64_t |
304 | cryptodev_vhost_get_max_queues(CryptoDevBackendVhost *crypto) |
305 | { |
306 | return 0; |
307 | } |
308 | |
309 | void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto) |
310 | { |
311 | } |
312 | |
313 | struct CryptoDevBackendVhost * |
314 | cryptodev_vhost_init(CryptoDevBackendVhostOptions *options) |
315 | { |
316 | return NULL; |
317 | } |
318 | |
319 | CryptoDevBackendVhost * |
320 | cryptodev_get_vhost(CryptoDevBackendClient *cc, |
321 | CryptoDevBackend *b, |
322 | uint16_t queue) |
323 | { |
324 | return NULL; |
325 | } |
326 | |
327 | int cryptodev_vhost_start(VirtIODevice *dev, int total_queues) |
328 | { |
329 | return -1; |
330 | } |
331 | |
332 | void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues) |
333 | { |
334 | } |
335 | |
336 | void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev, |
337 | int queue, |
338 | int idx, bool mask) |
339 | { |
340 | } |
341 | |
342 | bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev, |
343 | int queue, int idx) |
344 | { |
345 | return false; |
346 | } |
347 | #endif |
348 | |