1/*
2 * QEMU Block driver for NBD
3 *
4 * Copyright (C) 2016 Red Hat, Inc.
5 * Copyright (C) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
7 *
8 * Some parts:
9 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30#include "qemu/osdep.h"
31
32#include "trace.h"
33#include "qemu/uri.h"
34#include "qemu/option.h"
35#include "qemu/cutils.h"
36#include "qemu/main-loop.h"
37
38#include "qapi/qapi-visit-sockets.h"
39#include "qapi/qmp/qstring.h"
40
41#include "block/qdict.h"
42#include "block/nbd.h"
43#include "block/block_int.h"
44
45#define EN_OPTSTR ":exportname="
46#define MAX_NBD_REQUESTS 16
47
48#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
49#define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
50
51typedef struct {
52 Coroutine *coroutine;
53 uint64_t offset; /* original offset of the request */
54 bool receiving; /* waiting for connection_co? */
55} NBDClientRequest;
56
57typedef enum NBDClientState {
58 NBD_CLIENT_CONNECTED,
59 NBD_CLIENT_QUIT
60} NBDClientState;
61
62typedef struct BDRVNBDState {
63 QIOChannelSocket *sioc; /* The master data channel */
64 QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
65 NBDExportInfo info;
66
67 CoMutex send_mutex;
68 CoQueue free_sema;
69 Coroutine *connection_co;
70 int in_flight;
71 NBDClientState state;
72
73 NBDClientRequest requests[MAX_NBD_REQUESTS];
74 NBDReply reply;
75 BlockDriverState *bs;
76
77 /* Connection parameters */
78 uint32_t reconnect_delay;
79 SocketAddress *saddr;
80 char *export, *tlscredsid;
81 QCryptoTLSCreds *tlscreds;
82 const char *hostname;
83 char *x_dirty_bitmap;
84} BDRVNBDState;
85
86/* @ret will be used for reconnect in future */
87static void nbd_channel_error(BDRVNBDState *s, int ret)
88{
89 s->state = NBD_CLIENT_QUIT;
90}
91
92static void nbd_recv_coroutines_wake_all(BDRVNBDState *s)
93{
94 int i;
95
96 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
97 NBDClientRequest *req = &s->requests[i];
98
99 if (req->coroutine && req->receiving) {
100 aio_co_wake(req->coroutine);
101 }
102 }
103}
104
105static void nbd_client_detach_aio_context(BlockDriverState *bs)
106{
107 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
108
109 qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
110}
111
112static void nbd_client_attach_aio_context_bh(void *opaque)
113{
114 BlockDriverState *bs = opaque;
115 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
116
117 /*
118 * The node is still drained, so we know the coroutine has yielded in
119 * nbd_read_eof(), the only place where bs->in_flight can reach 0, or it is
120 * entered for the first time. Both places are safe for entering the
121 * coroutine.
122 */
123 qemu_aio_coroutine_enter(bs->aio_context, s->connection_co);
124 bdrv_dec_in_flight(bs);
125}
126
127static void nbd_client_attach_aio_context(BlockDriverState *bs,
128 AioContext *new_context)
129{
130 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
131
132 qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context);
133
134 bdrv_inc_in_flight(bs);
135
136 /*
137 * Need to wait here for the BH to run because the BH must run while the
138 * node is still drained.
139 */
140 aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs);
141}
142
143
144static void nbd_teardown_connection(BlockDriverState *bs)
145{
146 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
147
148 assert(s->ioc);
149
150 /* finish any pending coroutines */
151 qio_channel_shutdown(s->ioc,
152 QIO_CHANNEL_SHUTDOWN_BOTH,
153 NULL);
154 BDRV_POLL_WHILE(bs, s->connection_co);
155
156 nbd_client_detach_aio_context(bs);
157 object_unref(OBJECT(s->sioc));
158 s->sioc = NULL;
159 object_unref(OBJECT(s->ioc));
160 s->ioc = NULL;
161}
162
163static coroutine_fn void nbd_connection_entry(void *opaque)
164{
165 BDRVNBDState *s = opaque;
166 uint64_t i;
167 int ret = 0;
168 Error *local_err = NULL;
169
170 while (s->state != NBD_CLIENT_QUIT) {
171 /*
172 * The NBD client can only really be considered idle when it has
173 * yielded from qio_channel_readv_all_eof(), waiting for data. This is
174 * the point where the additional scheduled coroutine entry happens
175 * after nbd_client_attach_aio_context().
176 *
177 * Therefore we keep an additional in_flight reference all the time and
178 * only drop it temporarily here.
179 */
180 assert(s->reply.handle == 0);
181 ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err);
182
183 if (local_err) {
184 trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err));
185 error_free(local_err);
186 }
187 if (ret <= 0) {
188 nbd_channel_error(s, ret ? ret : -EIO);
189 break;
190 }
191
192 /*
193 * There's no need for a mutex on the receive side, because the
194 * handler acts as a synchronization point and ensures that only
195 * one coroutine is called until the reply finishes.
196 */
197 i = HANDLE_TO_INDEX(s, s->reply.handle);
198 if (i >= MAX_NBD_REQUESTS ||
199 !s->requests[i].coroutine ||
200 !s->requests[i].receiving ||
201 (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
202 {
203 nbd_channel_error(s, -EINVAL);
204 break;
205 }
206
207 /*
208 * We're woken up again by the request itself. Note that there
209 * is no race between yielding and reentering connection_co. This
210 * is because:
211 *
212 * - if the request runs on the same AioContext, it is only
213 * entered after we yield
214 *
215 * - if the request runs on a different AioContext, reentering
216 * connection_co happens through a bottom half, which can only
217 * run after we yield.
218 */
219 aio_co_wake(s->requests[i].coroutine);
220 qemu_coroutine_yield();
221 }
222
223 nbd_recv_coroutines_wake_all(s);
224 bdrv_dec_in_flight(s->bs);
225
226 s->connection_co = NULL;
227 aio_wait_kick();
228}
229
230static int nbd_co_send_request(BlockDriverState *bs,
231 NBDRequest *request,
232 QEMUIOVector *qiov)
233{
234 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
235 int rc, i = -1;
236
237 qemu_co_mutex_lock(&s->send_mutex);
238 while (s->in_flight == MAX_NBD_REQUESTS) {
239 qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
240 }
241
242 if (s->state != NBD_CLIENT_CONNECTED) {
243 rc = -EIO;
244 goto err;
245 }
246
247 s->in_flight++;
248
249 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
250 if (s->requests[i].coroutine == NULL) {
251 break;
252 }
253 }
254
255 g_assert(qemu_in_coroutine());
256 assert(i < MAX_NBD_REQUESTS);
257
258 s->requests[i].coroutine = qemu_coroutine_self();
259 s->requests[i].offset = request->from;
260 s->requests[i].receiving = false;
261
262 request->handle = INDEX_TO_HANDLE(s, i);
263
264 assert(s->ioc);
265
266 if (qiov) {
267 qio_channel_set_cork(s->ioc, true);
268 rc = nbd_send_request(s->ioc, request);
269 if (rc >= 0 && s->state == NBD_CLIENT_CONNECTED) {
270 if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
271 NULL) < 0) {
272 rc = -EIO;
273 }
274 } else if (rc >= 0) {
275 rc = -EIO;
276 }
277 qio_channel_set_cork(s->ioc, false);
278 } else {
279 rc = nbd_send_request(s->ioc, request);
280 }
281
282err:
283 if (rc < 0) {
284 nbd_channel_error(s, rc);
285 if (i != -1) {
286 s->requests[i].coroutine = NULL;
287 s->in_flight--;
288 }
289 qemu_co_queue_next(&s->free_sema);
290 }
291 qemu_co_mutex_unlock(&s->send_mutex);
292 return rc;
293}
294
295static inline uint16_t payload_advance16(uint8_t **payload)
296{
297 *payload += 2;
298 return lduw_be_p(*payload - 2);
299}
300
301static inline uint32_t payload_advance32(uint8_t **payload)
302{
303 *payload += 4;
304 return ldl_be_p(*payload - 4);
305}
306
307static inline uint64_t payload_advance64(uint8_t **payload)
308{
309 *payload += 8;
310 return ldq_be_p(*payload - 8);
311}
312
313static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
314 NBDStructuredReplyChunk *chunk,
315 uint8_t *payload, uint64_t orig_offset,
316 QEMUIOVector *qiov, Error **errp)
317{
318 uint64_t offset;
319 uint32_t hole_size;
320
321 if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
322 error_setg(errp, "Protocol error: invalid payload for "
323 "NBD_REPLY_TYPE_OFFSET_HOLE");
324 return -EINVAL;
325 }
326
327 offset = payload_advance64(&payload);
328 hole_size = payload_advance32(&payload);
329
330 if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
331 offset > orig_offset + qiov->size - hole_size) {
332 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
333 " region");
334 return -EINVAL;
335 }
336 if (s->info.min_block &&
337 !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
338 trace_nbd_structured_read_compliance("hole");
339 }
340
341 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
342
343 return 0;
344}
345
346/*
347 * nbd_parse_blockstatus_payload
348 * Based on our request, we expect only one extent in reply, for the
349 * base:allocation context.
350 */
351static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
352 NBDStructuredReplyChunk *chunk,
353 uint8_t *payload, uint64_t orig_length,
354 NBDExtent *extent, Error **errp)
355{
356 uint32_t context_id;
357
358 /* The server succeeded, so it must have sent [at least] one extent */
359 if (chunk->length < sizeof(context_id) + sizeof(*extent)) {
360 error_setg(errp, "Protocol error: invalid payload for "
361 "NBD_REPLY_TYPE_BLOCK_STATUS");
362 return -EINVAL;
363 }
364
365 context_id = payload_advance32(&payload);
366 if (s->info.context_id != context_id) {
367 error_setg(errp, "Protocol error: unexpected context id %d for "
368 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
369 "id is %d", context_id,
370 s->info.context_id);
371 return -EINVAL;
372 }
373
374 extent->length = payload_advance32(&payload);
375 extent->flags = payload_advance32(&payload);
376
377 if (extent->length == 0) {
378 error_setg(errp, "Protocol error: server sent status chunk with "
379 "zero length");
380 return -EINVAL;
381 }
382
383 /*
384 * A server sending unaligned block status is in violation of the
385 * protocol, but as qemu-nbd 3.1 is such a server (at least for
386 * POSIX files that are not a multiple of 512 bytes, since qemu
387 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
388 * still sees an implicit hole beyond the real EOF), it's nicer to
389 * work around the misbehaving server. If the request included
390 * more than the final unaligned block, truncate it back to an
391 * aligned result; if the request was only the final block, round
392 * up to the full block and change the status to fully-allocated
393 * (always a safe status, even if it loses information).
394 */
395 if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
396 s->info.min_block)) {
397 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
398 if (extent->length > s->info.min_block) {
399 extent->length = QEMU_ALIGN_DOWN(extent->length,
400 s->info.min_block);
401 } else {
402 extent->length = s->info.min_block;
403 extent->flags = 0;
404 }
405 }
406
407 /*
408 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
409 * sent us any more than one extent, nor should it have included
410 * status beyond our request in that extent. However, it's easy
411 * enough to ignore the server's noncompliance without killing the
412 * connection; just ignore trailing extents, and clamp things to
413 * the length of our request.
414 */
415 if (chunk->length > sizeof(context_id) + sizeof(*extent)) {
416 trace_nbd_parse_blockstatus_compliance("more than one extent");
417 }
418 if (extent->length > orig_length) {
419 extent->length = orig_length;
420 trace_nbd_parse_blockstatus_compliance("extent length too large");
421 }
422
423 return 0;
424}
425
426/*
427 * nbd_parse_error_payload
428 * on success @errp contains message describing nbd error reply
429 */
430static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
431 uint8_t *payload, int *request_ret,
432 Error **errp)
433{
434 uint32_t error;
435 uint16_t message_size;
436
437 assert(chunk->type & (1 << 15));
438
439 if (chunk->length < sizeof(error) + sizeof(message_size)) {
440 error_setg(errp,
441 "Protocol error: invalid payload for structured error");
442 return -EINVAL;
443 }
444
445 error = nbd_errno_to_system_errno(payload_advance32(&payload));
446 if (error == 0) {
447 error_setg(errp, "Protocol error: server sent structured error chunk "
448 "with error = 0");
449 return -EINVAL;
450 }
451
452 *request_ret = -error;
453 message_size = payload_advance16(&payload);
454
455 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
456 error_setg(errp, "Protocol error: server sent structured error chunk "
457 "with incorrect message size");
458 return -EINVAL;
459 }
460
461 /* TODO: Add a trace point to mention the server complaint */
462
463 /* TODO handle ERROR_OFFSET */
464
465 return 0;
466}
467
468static int nbd_co_receive_offset_data_payload(BDRVNBDState *s,
469 uint64_t orig_offset,
470 QEMUIOVector *qiov, Error **errp)
471{
472 QEMUIOVector sub_qiov;
473 uint64_t offset;
474 size_t data_size;
475 int ret;
476 NBDStructuredReplyChunk *chunk = &s->reply.structured;
477
478 assert(nbd_reply_is_structured(&s->reply));
479
480 /* The NBD spec requires at least one byte of payload */
481 if (chunk->length <= sizeof(offset)) {
482 error_setg(errp, "Protocol error: invalid payload for "
483 "NBD_REPLY_TYPE_OFFSET_DATA");
484 return -EINVAL;
485 }
486
487 if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
488 return -EIO;
489 }
490
491 data_size = chunk->length - sizeof(offset);
492 assert(data_size);
493 if (offset < orig_offset || data_size > qiov->size ||
494 offset > orig_offset + qiov->size - data_size) {
495 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
496 " region");
497 return -EINVAL;
498 }
499 if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
500 trace_nbd_structured_read_compliance("data");
501 }
502
503 qemu_iovec_init(&sub_qiov, qiov->niov);
504 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
505 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
506 qemu_iovec_destroy(&sub_qiov);
507
508 return ret < 0 ? -EIO : 0;
509}
510
511#define NBD_MAX_MALLOC_PAYLOAD 1000
512static coroutine_fn int nbd_co_receive_structured_payload(
513 BDRVNBDState *s, void **payload, Error **errp)
514{
515 int ret;
516 uint32_t len;
517
518 assert(nbd_reply_is_structured(&s->reply));
519
520 len = s->reply.structured.length;
521
522 if (len == 0) {
523 return 0;
524 }
525
526 if (payload == NULL) {
527 error_setg(errp, "Unexpected structured payload");
528 return -EINVAL;
529 }
530
531 if (len > NBD_MAX_MALLOC_PAYLOAD) {
532 error_setg(errp, "Payload too large");
533 return -EINVAL;
534 }
535
536 *payload = g_new(char, len);
537 ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
538 if (ret < 0) {
539 g_free(*payload);
540 *payload = NULL;
541 return ret;
542 }
543
544 return 0;
545}
546
547/*
548 * nbd_co_do_receive_one_chunk
549 * for simple reply:
550 * set request_ret to received reply error
551 * if qiov is not NULL: read payload to @qiov
552 * for structured reply chunk:
553 * if error chunk: read payload, set @request_ret, do not set @payload
554 * else if offset_data chunk: read payload data to @qiov, do not set @payload
555 * else: read payload to @payload
556 *
557 * If function fails, @errp contains corresponding error message, and the
558 * connection with the server is suspect. If it returns 0, then the
559 * transaction succeeded (although @request_ret may be a negative errno
560 * corresponding to the server's error reply), and errp is unchanged.
561 */
562static coroutine_fn int nbd_co_do_receive_one_chunk(
563 BDRVNBDState *s, uint64_t handle, bool only_structured,
564 int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
565{
566 int ret;
567 int i = HANDLE_TO_INDEX(s, handle);
568 void *local_payload = NULL;
569 NBDStructuredReplyChunk *chunk;
570
571 if (payload) {
572 *payload = NULL;
573 }
574 *request_ret = 0;
575
576 /* Wait until we're woken up by nbd_connection_entry. */
577 s->requests[i].receiving = true;
578 qemu_coroutine_yield();
579 s->requests[i].receiving = false;
580 if (s->state != NBD_CLIENT_CONNECTED) {
581 error_setg(errp, "Connection closed");
582 return -EIO;
583 }
584 assert(s->ioc);
585
586 assert(s->reply.handle == handle);
587
588 if (nbd_reply_is_simple(&s->reply)) {
589 if (only_structured) {
590 error_setg(errp, "Protocol error: simple reply when structured "
591 "reply chunk was expected");
592 return -EINVAL;
593 }
594
595 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
596 if (*request_ret < 0 || !qiov) {
597 return 0;
598 }
599
600 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
601 errp) < 0 ? -EIO : 0;
602 }
603
604 /* handle structured reply chunk */
605 assert(s->info.structured_reply);
606 chunk = &s->reply.structured;
607
608 if (chunk->type == NBD_REPLY_TYPE_NONE) {
609 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
610 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
611 " NBD_REPLY_FLAG_DONE flag set");
612 return -EINVAL;
613 }
614 if (chunk->length) {
615 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
616 " nonzero length");
617 return -EINVAL;
618 }
619 return 0;
620 }
621
622 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
623 if (!qiov) {
624 error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
625 return -EINVAL;
626 }
627
628 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
629 qiov, errp);
630 }
631
632 if (nbd_reply_type_is_error(chunk->type)) {
633 payload = &local_payload;
634 }
635
636 ret = nbd_co_receive_structured_payload(s, payload, errp);
637 if (ret < 0) {
638 return ret;
639 }
640
641 if (nbd_reply_type_is_error(chunk->type)) {
642 ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
643 g_free(local_payload);
644 return ret;
645 }
646
647 return 0;
648}
649
650/*
651 * nbd_co_receive_one_chunk
652 * Read reply, wake up connection_co and set s->quit if needed.
653 * Return value is a fatal error code or normal nbd reply error code
654 */
655static coroutine_fn int nbd_co_receive_one_chunk(
656 BDRVNBDState *s, uint64_t handle, bool only_structured,
657 int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
658 Error **errp)
659{
660 int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
661 request_ret, qiov, payload, errp);
662
663 if (ret < 0) {
664 memset(reply, 0, sizeof(*reply));
665 nbd_channel_error(s, ret);
666 } else {
667 /* For assert at loop start in nbd_connection_entry */
668 *reply = s->reply;
669 s->reply.handle = 0;
670 }
671
672 if (s->connection_co) {
673 aio_co_wake(s->connection_co);
674 }
675
676 return ret;
677}
678
679typedef struct NBDReplyChunkIter {
680 int ret;
681 int request_ret;
682 Error *err;
683 bool done, only_structured;
684} NBDReplyChunkIter;
685
686static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
687 int ret, Error **local_err)
688{
689 assert(ret < 0);
690
691 if (!iter->ret) {
692 iter->ret = ret;
693 error_propagate(&iter->err, *local_err);
694 } else {
695 error_free(*local_err);
696 }
697
698 *local_err = NULL;
699}
700
701static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
702{
703 assert(ret < 0);
704
705 if (!iter->request_ret) {
706 iter->request_ret = ret;
707 }
708}
709
710/*
711 * NBD_FOREACH_REPLY_CHUNK
712 * The pointer stored in @payload requires g_free() to free it.
713 */
714#define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
715 qiov, reply, payload) \
716 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
717 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
718
719/*
720 * nbd_reply_chunk_iter_receive
721 * The pointer stored in @payload requires g_free() to free it.
722 */
723static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
724 NBDReplyChunkIter *iter,
725 uint64_t handle,
726 QEMUIOVector *qiov, NBDReply *reply,
727 void **payload)
728{
729 int ret, request_ret;
730 NBDReply local_reply;
731 NBDStructuredReplyChunk *chunk;
732 Error *local_err = NULL;
733 if (s->state != NBD_CLIENT_CONNECTED) {
734 error_setg(&local_err, "Connection closed");
735 nbd_iter_channel_error(iter, -EIO, &local_err);
736 goto break_loop;
737 }
738
739 if (iter->done) {
740 /* Previous iteration was last. */
741 goto break_loop;
742 }
743
744 if (reply == NULL) {
745 reply = &local_reply;
746 }
747
748 ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
749 &request_ret, qiov, reply, payload,
750 &local_err);
751 if (ret < 0) {
752 nbd_iter_channel_error(iter, ret, &local_err);
753 } else if (request_ret < 0) {
754 nbd_iter_request_error(iter, request_ret);
755 }
756
757 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
758 if (nbd_reply_is_simple(reply) || s->state != NBD_CLIENT_CONNECTED) {
759 goto break_loop;
760 }
761
762 chunk = &reply->structured;
763 iter->only_structured = true;
764
765 if (chunk->type == NBD_REPLY_TYPE_NONE) {
766 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
767 assert(chunk->flags & NBD_REPLY_FLAG_DONE);
768 goto break_loop;
769 }
770
771 if (chunk->flags & NBD_REPLY_FLAG_DONE) {
772 /* This iteration is last. */
773 iter->done = true;
774 }
775
776 /* Execute the loop body */
777 return true;
778
779break_loop:
780 s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
781
782 qemu_co_mutex_lock(&s->send_mutex);
783 s->in_flight--;
784 qemu_co_queue_next(&s->free_sema);
785 qemu_co_mutex_unlock(&s->send_mutex);
786
787 return false;
788}
789
790static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
791 int *request_ret, Error **errp)
792{
793 NBDReplyChunkIter iter;
794
795 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
796 /* nbd_reply_chunk_iter_receive does all the work */
797 }
798
799 error_propagate(errp, iter.err);
800 *request_ret = iter.request_ret;
801 return iter.ret;
802}
803
804static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
805 uint64_t offset, QEMUIOVector *qiov,
806 int *request_ret, Error **errp)
807{
808 NBDReplyChunkIter iter;
809 NBDReply reply;
810 void *payload = NULL;
811 Error *local_err = NULL;
812
813 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
814 qiov, &reply, &payload)
815 {
816 int ret;
817 NBDStructuredReplyChunk *chunk = &reply.structured;
818
819 assert(nbd_reply_is_structured(&reply));
820
821 switch (chunk->type) {
822 case NBD_REPLY_TYPE_OFFSET_DATA:
823 /*
824 * special cased in nbd_co_receive_one_chunk, data is already
825 * in qiov
826 */
827 break;
828 case NBD_REPLY_TYPE_OFFSET_HOLE:
829 ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
830 offset, qiov, &local_err);
831 if (ret < 0) {
832 nbd_channel_error(s, ret);
833 nbd_iter_channel_error(&iter, ret, &local_err);
834 }
835 break;
836 default:
837 if (!nbd_reply_type_is_error(chunk->type)) {
838 /* not allowed reply type */
839 nbd_channel_error(s, -EINVAL);
840 error_setg(&local_err,
841 "Unexpected reply type: %d (%s) for CMD_READ",
842 chunk->type, nbd_reply_type_lookup(chunk->type));
843 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
844 }
845 }
846
847 g_free(payload);
848 payload = NULL;
849 }
850
851 error_propagate(errp, iter.err);
852 *request_ret = iter.request_ret;
853 return iter.ret;
854}
855
856static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
857 uint64_t handle, uint64_t length,
858 NBDExtent *extent,
859 int *request_ret, Error **errp)
860{
861 NBDReplyChunkIter iter;
862 NBDReply reply;
863 void *payload = NULL;
864 Error *local_err = NULL;
865 bool received = false;
866
867 assert(!extent->length);
868 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) {
869 int ret;
870 NBDStructuredReplyChunk *chunk = &reply.structured;
871
872 assert(nbd_reply_is_structured(&reply));
873
874 switch (chunk->type) {
875 case NBD_REPLY_TYPE_BLOCK_STATUS:
876 if (received) {
877 nbd_channel_error(s, -EINVAL);
878 error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
879 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
880 }
881 received = true;
882
883 ret = nbd_parse_blockstatus_payload(s, &reply.structured,
884 payload, length, extent,
885 &local_err);
886 if (ret < 0) {
887 nbd_channel_error(s, ret);
888 nbd_iter_channel_error(&iter, ret, &local_err);
889 }
890 break;
891 default:
892 if (!nbd_reply_type_is_error(chunk->type)) {
893 nbd_channel_error(s, -EINVAL);
894 error_setg(&local_err,
895 "Unexpected reply type: %d (%s) "
896 "for CMD_BLOCK_STATUS",
897 chunk->type, nbd_reply_type_lookup(chunk->type));
898 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
899 }
900 }
901
902 g_free(payload);
903 payload = NULL;
904 }
905
906 if (!extent->length && !iter.request_ret) {
907 error_setg(&local_err, "Server did not reply with any status extents");
908 nbd_iter_channel_error(&iter, -EIO, &local_err);
909 }
910
911 error_propagate(errp, iter.err);
912 *request_ret = iter.request_ret;
913 return iter.ret;
914}
915
916static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
917 QEMUIOVector *write_qiov)
918{
919 int ret, request_ret;
920 Error *local_err = NULL;
921 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
922
923 assert(request->type != NBD_CMD_READ);
924 if (write_qiov) {
925 assert(request->type == NBD_CMD_WRITE);
926 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
927 } else {
928 assert(request->type != NBD_CMD_WRITE);
929 }
930 ret = nbd_co_send_request(bs, request, write_qiov);
931 if (ret < 0) {
932 return ret;
933 }
934
935 ret = nbd_co_receive_return_code(s, request->handle,
936 &request_ret, &local_err);
937 if (local_err) {
938 trace_nbd_co_request_fail(request->from, request->len, request->handle,
939 request->flags, request->type,
940 nbd_cmd_lookup(request->type),
941 ret, error_get_pretty(local_err));
942 error_free(local_err);
943 }
944 return ret ? ret : request_ret;
945}
946
947static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
948 uint64_t bytes, QEMUIOVector *qiov, int flags)
949{
950 int ret, request_ret;
951 Error *local_err = NULL;
952 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
953 NBDRequest request = {
954 .type = NBD_CMD_READ,
955 .from = offset,
956 .len = bytes,
957 };
958
959 assert(bytes <= NBD_MAX_BUFFER_SIZE);
960 assert(!flags);
961
962 if (!bytes) {
963 return 0;
964 }
965 /*
966 * Work around the fact that the block layer doesn't do
967 * byte-accurate sizing yet - if the read exceeds the server's
968 * advertised size because the block layer rounded size up, then
969 * truncate the request to the server and tail-pad with zero.
970 */
971 if (offset >= s->info.size) {
972 assert(bytes < BDRV_SECTOR_SIZE);
973 qemu_iovec_memset(qiov, 0, 0, bytes);
974 return 0;
975 }
976 if (offset + bytes > s->info.size) {
977 uint64_t slop = offset + bytes - s->info.size;
978
979 assert(slop < BDRV_SECTOR_SIZE);
980 qemu_iovec_memset(qiov, bytes - slop, 0, slop);
981 request.len -= slop;
982 }
983
984 ret = nbd_co_send_request(bs, &request, NULL);
985 if (ret < 0) {
986 return ret;
987 }
988
989 ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov,
990 &request_ret, &local_err);
991 if (local_err) {
992 trace_nbd_co_request_fail(request.from, request.len, request.handle,
993 request.flags, request.type,
994 nbd_cmd_lookup(request.type),
995 ret, error_get_pretty(local_err));
996 error_free(local_err);
997 }
998 return ret ? ret : request_ret;
999}
1000
1001static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
1002 uint64_t bytes, QEMUIOVector *qiov, int flags)
1003{
1004 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1005 NBDRequest request = {
1006 .type = NBD_CMD_WRITE,
1007 .from = offset,
1008 .len = bytes,
1009 };
1010
1011 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1012 if (flags & BDRV_REQ_FUA) {
1013 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1014 request.flags |= NBD_CMD_FLAG_FUA;
1015 }
1016
1017 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1018
1019 if (!bytes) {
1020 return 0;
1021 }
1022 return nbd_co_request(bs, &request, qiov);
1023}
1024
1025static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1026 int bytes, BdrvRequestFlags flags)
1027{
1028 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1029 NBDRequest request = {
1030 .type = NBD_CMD_WRITE_ZEROES,
1031 .from = offset,
1032 .len = bytes,
1033 };
1034
1035 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1036 if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
1037 return -ENOTSUP;
1038 }
1039
1040 if (flags & BDRV_REQ_FUA) {
1041 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1042 request.flags |= NBD_CMD_FLAG_FUA;
1043 }
1044 if (!(flags & BDRV_REQ_MAY_UNMAP)) {
1045 request.flags |= NBD_CMD_FLAG_NO_HOLE;
1046 }
1047
1048 if (!bytes) {
1049 return 0;
1050 }
1051 return nbd_co_request(bs, &request, NULL);
1052}
1053
1054static int nbd_client_co_flush(BlockDriverState *bs)
1055{
1056 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1057 NBDRequest request = { .type = NBD_CMD_FLUSH };
1058
1059 if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
1060 return 0;
1061 }
1062
1063 request.from = 0;
1064 request.len = 0;
1065
1066 return nbd_co_request(bs, &request, NULL);
1067}
1068
1069static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
1070 int bytes)
1071{
1072 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1073 NBDRequest request = {
1074 .type = NBD_CMD_TRIM,
1075 .from = offset,
1076 .len = bytes,
1077 };
1078
1079 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1080 if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
1081 return 0;
1082 }
1083
1084 return nbd_co_request(bs, &request, NULL);
1085}
1086
1087static int coroutine_fn nbd_client_co_block_status(
1088 BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
1089 int64_t *pnum, int64_t *map, BlockDriverState **file)
1090{
1091 int ret, request_ret;
1092 NBDExtent extent = { 0 };
1093 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1094 Error *local_err = NULL;
1095
1096 NBDRequest request = {
1097 .type = NBD_CMD_BLOCK_STATUS,
1098 .from = offset,
1099 .len = MIN(MIN_NON_ZERO(QEMU_ALIGN_DOWN(INT_MAX,
1100 bs->bl.request_alignment),
1101 s->info.max_block),
1102 MIN(bytes, s->info.size - offset)),
1103 .flags = NBD_CMD_FLAG_REQ_ONE,
1104 };
1105
1106 if (!s->info.base_allocation) {
1107 *pnum = bytes;
1108 *map = offset;
1109 *file = bs;
1110 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
1111 }
1112
1113 /*
1114 * Work around the fact that the block layer doesn't do
1115 * byte-accurate sizing yet - if the status request exceeds the
1116 * server's advertised size because the block layer rounded size
1117 * up, we truncated the request to the server (above), or are
1118 * called on just the hole.
1119 */
1120 if (offset >= s->info.size) {
1121 *pnum = bytes;
1122 assert(bytes < BDRV_SECTOR_SIZE);
1123 /* Intentionally don't report offset_valid for the hole */
1124 return BDRV_BLOCK_ZERO;
1125 }
1126
1127 if (s->info.min_block) {
1128 assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
1129 }
1130 ret = nbd_co_send_request(bs, &request, NULL);
1131 if (ret < 0) {
1132 return ret;
1133 }
1134
1135 ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes,
1136 &extent, &request_ret, &local_err);
1137 if (local_err) {
1138 trace_nbd_co_request_fail(request.from, request.len, request.handle,
1139 request.flags, request.type,
1140 nbd_cmd_lookup(request.type),
1141 ret, error_get_pretty(local_err));
1142 error_free(local_err);
1143 }
1144 if (ret < 0 || request_ret < 0) {
1145 return ret ? ret : request_ret;
1146 }
1147
1148 assert(extent.length);
1149 *pnum = extent.length;
1150 *map = offset;
1151 *file = bs;
1152 return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
1153 (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
1154 BDRV_BLOCK_OFFSET_VALID;
1155}
1156
1157static void nbd_client_close(BlockDriverState *bs)
1158{
1159 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1160 NBDRequest request = { .type = NBD_CMD_DISC };
1161
1162 assert(s->ioc);
1163
1164 nbd_send_request(s->ioc, &request);
1165
1166 nbd_teardown_connection(bs);
1167}
1168
1169static QIOChannelSocket *nbd_establish_connection(SocketAddress *saddr,
1170 Error **errp)
1171{
1172 QIOChannelSocket *sioc;
1173 Error *local_err = NULL;
1174
1175 sioc = qio_channel_socket_new();
1176 qio_channel_set_name(QIO_CHANNEL(sioc), "nbd-client");
1177
1178 qio_channel_socket_connect_sync(sioc, saddr, &local_err);
1179 if (local_err) {
1180 object_unref(OBJECT(sioc));
1181 error_propagate(errp, local_err);
1182 return NULL;
1183 }
1184
1185 qio_channel_set_delay(QIO_CHANNEL(sioc), false);
1186
1187 return sioc;
1188}
1189
1190static int nbd_client_connect(BlockDriverState *bs, Error **errp)
1191{
1192 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1193 AioContext *aio_context = bdrv_get_aio_context(bs);
1194 int ret;
1195
1196 /*
1197 * establish TCP connection, return error if it fails
1198 * TODO: Configurable retry-until-timeout behaviour.
1199 */
1200 QIOChannelSocket *sioc = nbd_establish_connection(s->saddr, errp);
1201
1202 if (!sioc) {
1203 return -ECONNREFUSED;
1204 }
1205
1206 /* NBD handshake */
1207 trace_nbd_client_connect(s->export);
1208 qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
1209 qio_channel_attach_aio_context(QIO_CHANNEL(sioc), aio_context);
1210
1211 s->info.request_sizes = true;
1212 s->info.structured_reply = true;
1213 s->info.base_allocation = true;
1214 s->info.x_dirty_bitmap = g_strdup(s->x_dirty_bitmap);
1215 s->info.name = g_strdup(s->export ?: "");
1216 ret = nbd_receive_negotiate(aio_context, QIO_CHANNEL(sioc), s->tlscreds,
1217 s->hostname, &s->ioc, &s->info, errp);
1218 g_free(s->info.x_dirty_bitmap);
1219 g_free(s->info.name);
1220 if (ret < 0) {
1221 object_unref(OBJECT(sioc));
1222 return ret;
1223 }
1224 if (s->x_dirty_bitmap && !s->info.base_allocation) {
1225 error_setg(errp, "requested x-dirty-bitmap %s not found",
1226 s->x_dirty_bitmap);
1227 ret = -EINVAL;
1228 goto fail;
1229 }
1230 if (s->info.flags & NBD_FLAG_READ_ONLY) {
1231 ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
1232 if (ret < 0) {
1233 goto fail;
1234 }
1235 }
1236 if (s->info.flags & NBD_FLAG_SEND_FUA) {
1237 bs->supported_write_flags = BDRV_REQ_FUA;
1238 bs->supported_zero_flags |= BDRV_REQ_FUA;
1239 }
1240 if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
1241 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
1242 }
1243
1244 s->sioc = sioc;
1245
1246 if (!s->ioc) {
1247 s->ioc = QIO_CHANNEL(sioc);
1248 object_ref(OBJECT(s->ioc));
1249 }
1250
1251 trace_nbd_client_connect_success(s->export);
1252
1253 return 0;
1254
1255 fail:
1256 /*
1257 * We have connected, but must fail for other reasons.
1258 * Send NBD_CMD_DISC as a courtesy to the server.
1259 */
1260 {
1261 NBDRequest request = { .type = NBD_CMD_DISC };
1262
1263 nbd_send_request(s->ioc ?: QIO_CHANNEL(sioc), &request);
1264
1265 object_unref(OBJECT(sioc));
1266
1267 return ret;
1268 }
1269}
1270
1271/*
1272 * Parse nbd_open options
1273 */
1274
1275static int nbd_parse_uri(const char *filename, QDict *options)
1276{
1277 URI *uri;
1278 const char *p;
1279 QueryParams *qp = NULL;
1280 int ret = 0;
1281 bool is_unix;
1282
1283 uri = uri_parse(filename);
1284 if (!uri) {
1285 return -EINVAL;
1286 }
1287
1288 /* transport */
1289 if (!g_strcmp0(uri->scheme, "nbd")) {
1290 is_unix = false;
1291 } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
1292 is_unix = false;
1293 } else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
1294 is_unix = true;
1295 } else {
1296 ret = -EINVAL;
1297 goto out;
1298 }
1299
1300 p = uri->path ? uri->path : "/";
1301 p += strspn(p, "/");
1302 if (p[0]) {
1303 qdict_put_str(options, "export", p);
1304 }
1305
1306 qp = query_params_parse(uri->query);
1307 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
1308 ret = -EINVAL;
1309 goto out;
1310 }
1311
1312 if (is_unix) {
1313 /* nbd+unix:///export?socket=path */
1314 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
1315 ret = -EINVAL;
1316 goto out;
1317 }
1318 qdict_put_str(options, "server.type", "unix");
1319 qdict_put_str(options, "server.path", qp->p[0].value);
1320 } else {
1321 QString *host;
1322 char *port_str;
1323
1324 /* nbd[+tcp]://host[:port]/export */
1325 if (!uri->server) {
1326 ret = -EINVAL;
1327 goto out;
1328 }
1329
1330 /* strip braces from literal IPv6 address */
1331 if (uri->server[0] == '[') {
1332 host = qstring_from_substr(uri->server, 1,
1333 strlen(uri->server) - 1);
1334 } else {
1335 host = qstring_from_str(uri->server);
1336 }
1337
1338 qdict_put_str(options, "server.type", "inet");
1339 qdict_put(options, "server.host", host);
1340
1341 port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT);
1342 qdict_put_str(options, "server.port", port_str);
1343 g_free(port_str);
1344 }
1345
1346out:
1347 if (qp) {
1348 query_params_free(qp);
1349 }
1350 uri_free(uri);
1351 return ret;
1352}
1353
1354static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
1355{
1356 const QDictEntry *e;
1357
1358 for (e = qdict_first(options); e; e = qdict_next(options, e)) {
1359 if (!strcmp(e->key, "host") ||
1360 !strcmp(e->key, "port") ||
1361 !strcmp(e->key, "path") ||
1362 !strcmp(e->key, "export") ||
1363 strstart(e->key, "server.", NULL))
1364 {
1365 error_setg(errp, "Option '%s' cannot be used with a file name",
1366 e->key);
1367 return true;
1368 }
1369 }
1370
1371 return false;
1372}
1373
1374static void nbd_parse_filename(const char *filename, QDict *options,
1375 Error **errp)
1376{
1377 char *file;
1378 char *export_name;
1379 const char *host_spec;
1380 const char *unixpath;
1381
1382 if (nbd_has_filename_options_conflict(options, errp)) {
1383 return;
1384 }
1385
1386 if (strstr(filename, "://")) {
1387 int ret = nbd_parse_uri(filename, options);
1388 if (ret < 0) {
1389 error_setg(errp, "No valid URL specified");
1390 }
1391 return;
1392 }
1393
1394 file = g_strdup(filename);
1395
1396 export_name = strstr(file, EN_OPTSTR);
1397 if (export_name) {
1398 if (export_name[strlen(EN_OPTSTR)] == 0) {
1399 goto out;
1400 }
1401 export_name[0] = 0; /* truncate 'file' */
1402 export_name += strlen(EN_OPTSTR);
1403
1404 qdict_put_str(options, "export", export_name);
1405 }
1406
1407 /* extract the host_spec - fail if it's not nbd:... */
1408 if (!strstart(file, "nbd:", &host_spec)) {
1409 error_setg(errp, "File name string for NBD must start with 'nbd:'");
1410 goto out;
1411 }
1412
1413 if (!*host_spec) {
1414 goto out;
1415 }
1416
1417 /* are we a UNIX or TCP socket? */
1418 if (strstart(host_spec, "unix:", &unixpath)) {
1419 qdict_put_str(options, "server.type", "unix");
1420 qdict_put_str(options, "server.path", unixpath);
1421 } else {
1422 InetSocketAddress *addr = g_new(InetSocketAddress, 1);
1423
1424 if (inet_parse(addr, host_spec, errp)) {
1425 goto out_inet;
1426 }
1427
1428 qdict_put_str(options, "server.type", "inet");
1429 qdict_put_str(options, "server.host", addr->host);
1430 qdict_put_str(options, "server.port", addr->port);
1431 out_inet:
1432 qapi_free_InetSocketAddress(addr);
1433 }
1434
1435out:
1436 g_free(file);
1437}
1438
1439static bool nbd_process_legacy_socket_options(QDict *output_options,
1440 QemuOpts *legacy_opts,
1441 Error **errp)
1442{
1443 const char *path = qemu_opt_get(legacy_opts, "path");
1444 const char *host = qemu_opt_get(legacy_opts, "host");
1445 const char *port = qemu_opt_get(legacy_opts, "port");
1446 const QDictEntry *e;
1447
1448 if (!path && !host && !port) {
1449 return true;
1450 }
1451
1452 for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
1453 {
1454 if (strstart(e->key, "server.", NULL)) {
1455 error_setg(errp, "Cannot use 'server' and path/host/port at the "
1456 "same time");
1457 return false;
1458 }
1459 }
1460
1461 if (path && host) {
1462 error_setg(errp, "path and host may not be used at the same time");
1463 return false;
1464 } else if (path) {
1465 if (port) {
1466 error_setg(errp, "port may not be used without host");
1467 return false;
1468 }
1469
1470 qdict_put_str(output_options, "server.type", "unix");
1471 qdict_put_str(output_options, "server.path", path);
1472 } else if (host) {
1473 qdict_put_str(output_options, "server.type", "inet");
1474 qdict_put_str(output_options, "server.host", host);
1475 qdict_put_str(output_options, "server.port",
1476 port ?: stringify(NBD_DEFAULT_PORT));
1477 }
1478
1479 return true;
1480}
1481
1482static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
1483 Error **errp)
1484{
1485 SocketAddress *saddr = NULL;
1486 QDict *addr = NULL;
1487 Visitor *iv = NULL;
1488 Error *local_err = NULL;
1489
1490 qdict_extract_subqdict(options, &addr, "server.");
1491 if (!qdict_size(addr)) {
1492 error_setg(errp, "NBD server address missing");
1493 goto done;
1494 }
1495
1496 iv = qobject_input_visitor_new_flat_confused(addr, errp);
1497 if (!iv) {
1498 goto done;
1499 }
1500
1501 visit_type_SocketAddress(iv, NULL, &saddr, &local_err);
1502 if (local_err) {
1503 error_propagate(errp, local_err);
1504 goto done;
1505 }
1506
1507done:
1508 qobject_unref(addr);
1509 visit_free(iv);
1510 return saddr;
1511}
1512
1513static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
1514{
1515 Object *obj;
1516 QCryptoTLSCreds *creds;
1517
1518 obj = object_resolve_path_component(
1519 object_get_objects_root(), id);
1520 if (!obj) {
1521 error_setg(errp, "No TLS credentials with id '%s'",
1522 id);
1523 return NULL;
1524 }
1525 creds = (QCryptoTLSCreds *)
1526 object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
1527 if (!creds) {
1528 error_setg(errp, "Object with id '%s' is not TLS credentials",
1529 id);
1530 return NULL;
1531 }
1532
1533 if (creds->endpoint != QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT) {
1534 error_setg(errp,
1535 "Expecting TLS credentials with a client endpoint");
1536 return NULL;
1537 }
1538 object_ref(obj);
1539 return creds;
1540}
1541
1542
1543static QemuOptsList nbd_runtime_opts = {
1544 .name = "nbd",
1545 .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
1546 .desc = {
1547 {
1548 .name = "host",
1549 .type = QEMU_OPT_STRING,
1550 .help = "TCP host to connect to",
1551 },
1552 {
1553 .name = "port",
1554 .type = QEMU_OPT_STRING,
1555 .help = "TCP port to connect to",
1556 },
1557 {
1558 .name = "path",
1559 .type = QEMU_OPT_STRING,
1560 .help = "Unix socket path to connect to",
1561 },
1562 {
1563 .name = "export",
1564 .type = QEMU_OPT_STRING,
1565 .help = "Name of the NBD export to open",
1566 },
1567 {
1568 .name = "tls-creds",
1569 .type = QEMU_OPT_STRING,
1570 .help = "ID of the TLS credentials to use",
1571 },
1572 {
1573 .name = "x-dirty-bitmap",
1574 .type = QEMU_OPT_STRING,
1575 .help = "experimental: expose named dirty bitmap in place of "
1576 "block status",
1577 },
1578 {
1579 .name = "reconnect-delay",
1580 .type = QEMU_OPT_NUMBER,
1581 .help = "On an unexpected disconnect, the nbd client tries to "
1582 "connect again until succeeding or encountering a serious "
1583 "error. During the first @reconnect-delay seconds, all "
1584 "requests are paused and will be rerun on a successful "
1585 "reconnect. After that time, any delayed requests and all "
1586 "future requests before a successful reconnect will "
1587 "immediately fail. Default 0",
1588 },
1589 { /* end of list */ }
1590 },
1591};
1592
1593static int nbd_process_options(BlockDriverState *bs, QDict *options,
1594 Error **errp)
1595{
1596 BDRVNBDState *s = bs->opaque;
1597 QemuOpts *opts;
1598 Error *local_err = NULL;
1599 int ret = -EINVAL;
1600
1601 opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
1602 qemu_opts_absorb_qdict(opts, options, &local_err);
1603 if (local_err) {
1604 error_propagate(errp, local_err);
1605 goto error;
1606 }
1607
1608 /* Translate @host, @port, and @path to a SocketAddress */
1609 if (!nbd_process_legacy_socket_options(options, opts, errp)) {
1610 goto error;
1611 }
1612
1613 /* Pop the config into our state object. Exit if invalid. */
1614 s->saddr = nbd_config(s, options, errp);
1615 if (!s->saddr) {
1616 goto error;
1617 }
1618
1619 s->export = g_strdup(qemu_opt_get(opts, "export"));
1620
1621 s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
1622 if (s->tlscredsid) {
1623 s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
1624 if (!s->tlscreds) {
1625 goto error;
1626 }
1627
1628 /* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */
1629 if (s->saddr->type != SOCKET_ADDRESS_TYPE_INET) {
1630 error_setg(errp, "TLS only supported over IP sockets");
1631 goto error;
1632 }
1633 s->hostname = s->saddr->u.inet.host;
1634 }
1635
1636 s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
1637 s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
1638
1639 ret = 0;
1640
1641 error:
1642 if (ret < 0) {
1643 object_unref(OBJECT(s->tlscreds));
1644 qapi_free_SocketAddress(s->saddr);
1645 g_free(s->export);
1646 g_free(s->tlscredsid);
1647 }
1648 qemu_opts_del(opts);
1649 return ret;
1650}
1651
1652static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
1653 Error **errp)
1654{
1655 int ret;
1656 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1657
1658 ret = nbd_process_options(bs, options, errp);
1659 if (ret < 0) {
1660 return ret;
1661 }
1662
1663 s->bs = bs;
1664 qemu_co_mutex_init(&s->send_mutex);
1665 qemu_co_queue_init(&s->free_sema);
1666
1667 ret = nbd_client_connect(bs, errp);
1668 if (ret < 0) {
1669 return ret;
1670 }
1671 /* successfully connected */
1672 s->state = NBD_CLIENT_CONNECTED;
1673
1674 s->connection_co = qemu_coroutine_create(nbd_connection_entry, s);
1675 bdrv_inc_in_flight(bs);
1676 aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co);
1677
1678 return 0;
1679}
1680
1681static int nbd_co_flush(BlockDriverState *bs)
1682{
1683 return nbd_client_co_flush(bs);
1684}
1685
1686static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
1687{
1688 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1689 uint32_t min = s->info.min_block;
1690 uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
1691
1692 /*
1693 * If the server did not advertise an alignment:
1694 * - a size that is not sector-aligned implies that an alignment
1695 * of 1 can be used to access those tail bytes
1696 * - advertisement of block status requires an alignment of 1, so
1697 * that we don't violate block layer constraints that block
1698 * status is always aligned (as we can't control whether the
1699 * server will report sub-sector extents, such as a hole at EOF
1700 * on an unaligned POSIX file)
1701 * - otherwise, assume the server is so old that we are safer avoiding
1702 * sub-sector requests
1703 */
1704 if (!min) {
1705 min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
1706 s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
1707 }
1708
1709 bs->bl.request_alignment = min;
1710 bs->bl.max_pdiscard = max;
1711 bs->bl.max_pwrite_zeroes = max;
1712 bs->bl.max_transfer = max;
1713
1714 if (s->info.opt_block &&
1715 s->info.opt_block > bs->bl.opt_transfer) {
1716 bs->bl.opt_transfer = s->info.opt_block;
1717 }
1718}
1719
1720static void nbd_close(BlockDriverState *bs)
1721{
1722 BDRVNBDState *s = bs->opaque;
1723
1724 nbd_client_close(bs);
1725
1726 object_unref(OBJECT(s->tlscreds));
1727 qapi_free_SocketAddress(s->saddr);
1728 g_free(s->export);
1729 g_free(s->tlscredsid);
1730 g_free(s->x_dirty_bitmap);
1731}
1732
1733static int64_t nbd_getlength(BlockDriverState *bs)
1734{
1735 BDRVNBDState *s = bs->opaque;
1736
1737 return s->info.size;
1738}
1739
1740static void nbd_refresh_filename(BlockDriverState *bs)
1741{
1742 BDRVNBDState *s = bs->opaque;
1743 const char *host = NULL, *port = NULL, *path = NULL;
1744
1745 if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
1746 const InetSocketAddress *inet = &s->saddr->u.inet;
1747 if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
1748 host = inet->host;
1749 port = inet->port;
1750 }
1751 } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
1752 path = s->saddr->u.q_unix.path;
1753 } /* else can't represent as pseudo-filename */
1754
1755 if (path && s->export) {
1756 snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1757 "nbd+unix:///%s?socket=%s", s->export, path);
1758 } else if (path && !s->export) {
1759 snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1760 "nbd+unix://?socket=%s", path);
1761 } else if (host && s->export) {
1762 snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1763 "nbd://%s:%s/%s", host, port, s->export);
1764 } else if (host && !s->export) {
1765 snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1766 "nbd://%s:%s", host, port);
1767 }
1768}
1769
1770static char *nbd_dirname(BlockDriverState *bs, Error **errp)
1771{
1772 /* The generic bdrv_dirname() implementation is able to work out some
1773 * directory name for NBD nodes, but that would be wrong. So far there is no
1774 * specification for how "export paths" would work, so NBD does not have
1775 * directory names. */
1776 error_setg(errp, "Cannot generate a base directory for NBD nodes");
1777 return NULL;
1778}
1779
1780static const char *const nbd_strong_runtime_opts[] = {
1781 "path",
1782 "host",
1783 "port",
1784 "export",
1785 "tls-creds",
1786 "server.",
1787
1788 NULL
1789};
1790
1791static BlockDriver bdrv_nbd = {
1792 .format_name = "nbd",
1793 .protocol_name = "nbd",
1794 .instance_size = sizeof(BDRVNBDState),
1795 .bdrv_parse_filename = nbd_parse_filename,
1796 .bdrv_file_open = nbd_open,
1797 .bdrv_co_preadv = nbd_client_co_preadv,
1798 .bdrv_co_pwritev = nbd_client_co_pwritev,
1799 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
1800 .bdrv_close = nbd_close,
1801 .bdrv_co_flush_to_os = nbd_co_flush,
1802 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
1803 .bdrv_refresh_limits = nbd_refresh_limits,
1804 .bdrv_getlength = nbd_getlength,
1805 .bdrv_detach_aio_context = nbd_client_detach_aio_context,
1806 .bdrv_attach_aio_context = nbd_client_attach_aio_context,
1807 .bdrv_refresh_filename = nbd_refresh_filename,
1808 .bdrv_co_block_status = nbd_client_co_block_status,
1809 .bdrv_dirname = nbd_dirname,
1810 .strong_runtime_opts = nbd_strong_runtime_opts,
1811};
1812
1813static BlockDriver bdrv_nbd_tcp = {
1814 .format_name = "nbd",
1815 .protocol_name = "nbd+tcp",
1816 .instance_size = sizeof(BDRVNBDState),
1817 .bdrv_parse_filename = nbd_parse_filename,
1818 .bdrv_file_open = nbd_open,
1819 .bdrv_co_preadv = nbd_client_co_preadv,
1820 .bdrv_co_pwritev = nbd_client_co_pwritev,
1821 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
1822 .bdrv_close = nbd_close,
1823 .bdrv_co_flush_to_os = nbd_co_flush,
1824 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
1825 .bdrv_refresh_limits = nbd_refresh_limits,
1826 .bdrv_getlength = nbd_getlength,
1827 .bdrv_detach_aio_context = nbd_client_detach_aio_context,
1828 .bdrv_attach_aio_context = nbd_client_attach_aio_context,
1829 .bdrv_refresh_filename = nbd_refresh_filename,
1830 .bdrv_co_block_status = nbd_client_co_block_status,
1831 .bdrv_dirname = nbd_dirname,
1832 .strong_runtime_opts = nbd_strong_runtime_opts,
1833};
1834
1835static BlockDriver bdrv_nbd_unix = {
1836 .format_name = "nbd",
1837 .protocol_name = "nbd+unix",
1838 .instance_size = sizeof(BDRVNBDState),
1839 .bdrv_parse_filename = nbd_parse_filename,
1840 .bdrv_file_open = nbd_open,
1841 .bdrv_co_preadv = nbd_client_co_preadv,
1842 .bdrv_co_pwritev = nbd_client_co_pwritev,
1843 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
1844 .bdrv_close = nbd_close,
1845 .bdrv_co_flush_to_os = nbd_co_flush,
1846 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
1847 .bdrv_refresh_limits = nbd_refresh_limits,
1848 .bdrv_getlength = nbd_getlength,
1849 .bdrv_detach_aio_context = nbd_client_detach_aio_context,
1850 .bdrv_attach_aio_context = nbd_client_attach_aio_context,
1851 .bdrv_refresh_filename = nbd_refresh_filename,
1852 .bdrv_co_block_status = nbd_client_co_block_status,
1853 .bdrv_dirname = nbd_dirname,
1854 .strong_runtime_opts = nbd_strong_runtime_opts,
1855};
1856
1857static void bdrv_nbd_init(void)
1858{
1859 bdrv_register(&bdrv_nbd);
1860 bdrv_register(&bdrv_nbd_tcp);
1861 bdrv_register(&bdrv_nbd_unix);
1862}
1863
1864block_init(bdrv_nbd_init);
1865