1 | /* |
2 | * Image mirroring |
3 | * |
4 | * Copyright Red Hat, Inc. 2012 |
5 | * |
6 | * Authors: |
7 | * Paolo Bonzini <pbonzini@redhat.com> |
8 | * |
9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
10 | * See the COPYING.LIB file in the top-level directory. |
11 | * |
12 | */ |
13 | |
14 | #include "qemu/osdep.h" |
15 | #include "qemu/cutils.h" |
16 | #include "qemu/coroutine.h" |
17 | #include "qemu/range.h" |
18 | #include "trace.h" |
19 | #include "block/blockjob_int.h" |
20 | #include "block/block_int.h" |
21 | #include "sysemu/block-backend.h" |
22 | #include "qapi/error.h" |
23 | #include "qapi/qmp/qerror.h" |
24 | #include "qemu/ratelimit.h" |
25 | #include "qemu/bitmap.h" |
26 | |
27 | #define MAX_IN_FLIGHT 16 |
28 | #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ |
29 | #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) |
30 | |
31 | /* The mirroring buffer is a list of granularity-sized chunks. |
32 | * Free chunks are organized in a list. |
33 | */ |
34 | typedef struct MirrorBuffer { |
35 | QSIMPLEQ_ENTRY(MirrorBuffer) next; |
36 | } MirrorBuffer; |
37 | |
38 | typedef struct MirrorOp MirrorOp; |
39 | |
40 | typedef struct MirrorBlockJob { |
41 | BlockJob common; |
42 | BlockBackend *target; |
43 | BlockDriverState *mirror_top_bs; |
44 | BlockDriverState *base; |
45 | |
46 | /* The name of the graph node to replace */ |
47 | char *replaces; |
48 | /* The BDS to replace */ |
49 | BlockDriverState *to_replace; |
50 | /* Used to block operations on the drive-mirror-replace target */ |
51 | Error *replace_blocker; |
52 | bool is_none_mode; |
53 | BlockMirrorBackingMode backing_mode; |
54 | /* Whether the target image requires explicit zero-initialization */ |
55 | bool zero_target; |
56 | MirrorCopyMode copy_mode; |
57 | BlockdevOnError on_source_error, on_target_error; |
58 | bool synced; |
59 | /* Set when the target is synced (dirty bitmap is clean, nothing |
60 | * in flight) and the job is running in active mode */ |
61 | bool actively_synced; |
62 | bool should_complete; |
63 | int64_t granularity; |
64 | size_t buf_size; |
65 | int64_t bdev_length; |
66 | unsigned long *cow_bitmap; |
67 | BdrvDirtyBitmap *dirty_bitmap; |
68 | BdrvDirtyBitmapIter *dbi; |
69 | uint8_t *buf; |
70 | QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; |
71 | int buf_free_count; |
72 | |
73 | uint64_t last_pause_ns; |
74 | unsigned long *in_flight_bitmap; |
75 | int in_flight; |
76 | int64_t bytes_in_flight; |
77 | QTAILQ_HEAD(, MirrorOp) ops_in_flight; |
78 | int ret; |
79 | bool unmap; |
80 | int target_cluster_size; |
81 | int max_iov; |
82 | bool initial_zeroing_ongoing; |
83 | int in_active_write_counter; |
84 | bool prepared; |
85 | bool in_drain; |
86 | } MirrorBlockJob; |
87 | |
88 | typedef struct MirrorBDSOpaque { |
89 | MirrorBlockJob *job; |
90 | bool stop; |
91 | } MirrorBDSOpaque; |
92 | |
93 | struct MirrorOp { |
94 | MirrorBlockJob *s; |
95 | QEMUIOVector qiov; |
96 | int64_t offset; |
97 | uint64_t bytes; |
98 | |
99 | /* The pointee is set by mirror_co_read(), mirror_co_zero(), and |
100 | * mirror_co_discard() before yielding for the first time */ |
101 | int64_t *bytes_handled; |
102 | |
103 | bool is_pseudo_op; |
104 | bool is_active_write; |
105 | CoQueue waiting_requests; |
106 | |
107 | QTAILQ_ENTRY(MirrorOp) next; |
108 | }; |
109 | |
110 | typedef enum MirrorMethod { |
111 | MIRROR_METHOD_COPY, |
112 | MIRROR_METHOD_ZERO, |
113 | MIRROR_METHOD_DISCARD, |
114 | } MirrorMethod; |
115 | |
116 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, |
117 | int error) |
118 | { |
119 | s->synced = false; |
120 | s->actively_synced = false; |
121 | if (read) { |
122 | return block_job_error_action(&s->common, s->on_source_error, |
123 | true, error); |
124 | } else { |
125 | return block_job_error_action(&s->common, s->on_target_error, |
126 | false, error); |
127 | } |
128 | } |
129 | |
130 | static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, |
131 | MirrorBlockJob *s, |
132 | uint64_t offset, |
133 | uint64_t bytes) |
134 | { |
135 | uint64_t self_start_chunk = offset / s->granularity; |
136 | uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); |
137 | uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; |
138 | |
139 | while (find_next_bit(s->in_flight_bitmap, self_end_chunk, |
140 | self_start_chunk) < self_end_chunk && |
141 | s->ret >= 0) |
142 | { |
143 | MirrorOp *op; |
144 | |
145 | QTAILQ_FOREACH(op, &s->ops_in_flight, next) { |
146 | uint64_t op_start_chunk = op->offset / s->granularity; |
147 | uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, |
148 | s->granularity) - |
149 | op_start_chunk; |
150 | |
151 | if (op == self) { |
152 | continue; |
153 | } |
154 | |
155 | if (ranges_overlap(self_start_chunk, self_nb_chunks, |
156 | op_start_chunk, op_nb_chunks)) |
157 | { |
158 | qemu_co_queue_wait(&op->waiting_requests, NULL); |
159 | break; |
160 | } |
161 | } |
162 | } |
163 | } |
164 | |
165 | static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) |
166 | { |
167 | MirrorBlockJob *s = op->s; |
168 | struct iovec *iov; |
169 | int64_t chunk_num; |
170 | int i, nb_chunks; |
171 | |
172 | trace_mirror_iteration_done(s, op->offset, op->bytes, ret); |
173 | |
174 | s->in_flight--; |
175 | s->bytes_in_flight -= op->bytes; |
176 | iov = op->qiov.iov; |
177 | for (i = 0; i < op->qiov.niov; i++) { |
178 | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; |
179 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); |
180 | s->buf_free_count++; |
181 | } |
182 | |
183 | chunk_num = op->offset / s->granularity; |
184 | nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); |
185 | |
186 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); |
187 | QTAILQ_REMOVE(&s->ops_in_flight, op, next); |
188 | if (ret >= 0) { |
189 | if (s->cow_bitmap) { |
190 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); |
191 | } |
192 | if (!s->initial_zeroing_ongoing) { |
193 | job_progress_update(&s->common.job, op->bytes); |
194 | } |
195 | } |
196 | qemu_iovec_destroy(&op->qiov); |
197 | |
198 | qemu_co_queue_restart_all(&op->waiting_requests); |
199 | g_free(op); |
200 | } |
201 | |
202 | static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) |
203 | { |
204 | MirrorBlockJob *s = op->s; |
205 | |
206 | if (ret < 0) { |
207 | BlockErrorAction action; |
208 | |
209 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); |
210 | action = mirror_error_action(s, false, -ret); |
211 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
212 | s->ret = ret; |
213 | } |
214 | } |
215 | |
216 | mirror_iteration_done(op, ret); |
217 | } |
218 | |
219 | static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) |
220 | { |
221 | MirrorBlockJob *s = op->s; |
222 | |
223 | if (ret < 0) { |
224 | BlockErrorAction action; |
225 | |
226 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); |
227 | action = mirror_error_action(s, true, -ret); |
228 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
229 | s->ret = ret; |
230 | } |
231 | |
232 | mirror_iteration_done(op, ret); |
233 | return; |
234 | } |
235 | |
236 | ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); |
237 | mirror_write_complete(op, ret); |
238 | } |
239 | |
240 | /* Clip bytes relative to offset to not exceed end-of-file */ |
241 | static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, |
242 | int64_t offset, |
243 | int64_t bytes) |
244 | { |
245 | return MIN(bytes, s->bdev_length - offset); |
246 | } |
247 | |
248 | /* Round offset and/or bytes to target cluster if COW is needed, and |
249 | * return the offset of the adjusted tail against original. */ |
250 | static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, |
251 | uint64_t *bytes) |
252 | { |
253 | bool need_cow; |
254 | int ret = 0; |
255 | int64_t align_offset = *offset; |
256 | int64_t align_bytes = *bytes; |
257 | int max_bytes = s->granularity * s->max_iov; |
258 | |
259 | need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); |
260 | need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, |
261 | s->cow_bitmap); |
262 | if (need_cow) { |
263 | bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, |
264 | &align_offset, &align_bytes); |
265 | } |
266 | |
267 | if (align_bytes > max_bytes) { |
268 | align_bytes = max_bytes; |
269 | if (need_cow) { |
270 | align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); |
271 | } |
272 | } |
273 | /* Clipping may result in align_bytes unaligned to chunk boundary, but |
274 | * that doesn't matter because it's already the end of source image. */ |
275 | align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); |
276 | |
277 | ret = align_offset + align_bytes - (*offset + *bytes); |
278 | *offset = align_offset; |
279 | *bytes = align_bytes; |
280 | assert(ret >= 0); |
281 | return ret; |
282 | } |
283 | |
284 | static inline void coroutine_fn |
285 | mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) |
286 | { |
287 | MirrorOp *op; |
288 | |
289 | QTAILQ_FOREACH(op, &s->ops_in_flight, next) { |
290 | /* Do not wait on pseudo ops, because it may in turn wait on |
291 | * some other operation to start, which may in fact be the |
292 | * caller of this function. Since there is only one pseudo op |
293 | * at any given time, we will always find some real operation |
294 | * to wait on. */ |
295 | if (!op->is_pseudo_op && op->is_active_write == active) { |
296 | qemu_co_queue_wait(&op->waiting_requests, NULL); |
297 | return; |
298 | } |
299 | } |
300 | abort(); |
301 | } |
302 | |
303 | static inline void coroutine_fn |
304 | mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) |
305 | { |
306 | /* Only non-active operations use up in-flight slots */ |
307 | mirror_wait_for_any_operation(s, false); |
308 | } |
309 | |
310 | /* Perform a mirror copy operation. |
311 | * |
312 | * *op->bytes_handled is set to the number of bytes copied after and |
313 | * including offset, excluding any bytes copied prior to offset due |
314 | * to alignment. This will be op->bytes if no alignment is necessary, |
315 | * or (new_end - op->offset) if the tail is rounded up or down due to |
316 | * alignment or buffer limit. |
317 | */ |
318 | static void coroutine_fn mirror_co_read(void *opaque) |
319 | { |
320 | MirrorOp *op = opaque; |
321 | MirrorBlockJob *s = op->s; |
322 | int nb_chunks; |
323 | uint64_t ret; |
324 | uint64_t max_bytes; |
325 | |
326 | max_bytes = s->granularity * s->max_iov; |
327 | |
328 | /* We can only handle as much as buf_size at a time. */ |
329 | op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); |
330 | assert(op->bytes); |
331 | assert(op->bytes < BDRV_REQUEST_MAX_BYTES); |
332 | *op->bytes_handled = op->bytes; |
333 | |
334 | if (s->cow_bitmap) { |
335 | *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); |
336 | } |
337 | /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ |
338 | assert(*op->bytes_handled <= UINT_MAX); |
339 | assert(op->bytes <= s->buf_size); |
340 | /* The offset is granularity-aligned because: |
341 | * 1) Caller passes in aligned values; |
342 | * 2) mirror_cow_align is used only when target cluster is larger. */ |
343 | assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); |
344 | /* The range is sector-aligned, since bdrv_getlength() rounds up. */ |
345 | assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); |
346 | nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); |
347 | |
348 | while (s->buf_free_count < nb_chunks) { |
349 | trace_mirror_yield_in_flight(s, op->offset, s->in_flight); |
350 | mirror_wait_for_free_in_flight_slot(s); |
351 | } |
352 | |
353 | /* Now make a QEMUIOVector taking enough granularity-sized chunks |
354 | * from s->buf_free. |
355 | */ |
356 | qemu_iovec_init(&op->qiov, nb_chunks); |
357 | while (nb_chunks-- > 0) { |
358 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); |
359 | size_t remaining = op->bytes - op->qiov.size; |
360 | |
361 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); |
362 | s->buf_free_count--; |
363 | qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); |
364 | } |
365 | |
366 | /* Copy the dirty cluster. */ |
367 | s->in_flight++; |
368 | s->bytes_in_flight += op->bytes; |
369 | trace_mirror_one_iteration(s, op->offset, op->bytes); |
370 | |
371 | ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, |
372 | &op->qiov, 0); |
373 | mirror_read_complete(op, ret); |
374 | } |
375 | |
376 | static void coroutine_fn mirror_co_zero(void *opaque) |
377 | { |
378 | MirrorOp *op = opaque; |
379 | int ret; |
380 | |
381 | op->s->in_flight++; |
382 | op->s->bytes_in_flight += op->bytes; |
383 | *op->bytes_handled = op->bytes; |
384 | |
385 | ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, |
386 | op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); |
387 | mirror_write_complete(op, ret); |
388 | } |
389 | |
390 | static void coroutine_fn mirror_co_discard(void *opaque) |
391 | { |
392 | MirrorOp *op = opaque; |
393 | int ret; |
394 | |
395 | op->s->in_flight++; |
396 | op->s->bytes_in_flight += op->bytes; |
397 | *op->bytes_handled = op->bytes; |
398 | |
399 | ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); |
400 | mirror_write_complete(op, ret); |
401 | } |
402 | |
403 | static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, |
404 | unsigned bytes, MirrorMethod mirror_method) |
405 | { |
406 | MirrorOp *op; |
407 | Coroutine *co; |
408 | int64_t bytes_handled = -1; |
409 | |
410 | op = g_new(MirrorOp, 1); |
411 | *op = (MirrorOp){ |
412 | .s = s, |
413 | .offset = offset, |
414 | .bytes = bytes, |
415 | .bytes_handled = &bytes_handled, |
416 | }; |
417 | qemu_co_queue_init(&op->waiting_requests); |
418 | |
419 | switch (mirror_method) { |
420 | case MIRROR_METHOD_COPY: |
421 | co = qemu_coroutine_create(mirror_co_read, op); |
422 | break; |
423 | case MIRROR_METHOD_ZERO: |
424 | co = qemu_coroutine_create(mirror_co_zero, op); |
425 | break; |
426 | case MIRROR_METHOD_DISCARD: |
427 | co = qemu_coroutine_create(mirror_co_discard, op); |
428 | break; |
429 | default: |
430 | abort(); |
431 | } |
432 | |
433 | QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); |
434 | qemu_coroutine_enter(co); |
435 | /* At this point, ownership of op has been moved to the coroutine |
436 | * and the object may already be freed */ |
437 | |
438 | /* Assert that this value has been set */ |
439 | assert(bytes_handled >= 0); |
440 | |
441 | /* Same assertion as in mirror_co_read() (and for mirror_co_read() |
442 | * and mirror_co_discard(), bytes_handled == op->bytes, which |
443 | * is the @bytes parameter given to this function) */ |
444 | assert(bytes_handled <= UINT_MAX); |
445 | return bytes_handled; |
446 | } |
447 | |
448 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) |
449 | { |
450 | BlockDriverState *source = s->mirror_top_bs->backing->bs; |
451 | MirrorOp *pseudo_op; |
452 | int64_t offset; |
453 | uint64_t delay_ns = 0, ret = 0; |
454 | /* At least the first dirty chunk is mirrored in one iteration. */ |
455 | int nb_chunks = 1; |
456 | bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); |
457 | int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); |
458 | |
459 | bdrv_dirty_bitmap_lock(s->dirty_bitmap); |
460 | offset = bdrv_dirty_iter_next(s->dbi); |
461 | if (offset < 0) { |
462 | bdrv_set_dirty_iter(s->dbi, 0); |
463 | offset = bdrv_dirty_iter_next(s->dbi); |
464 | trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); |
465 | assert(offset >= 0); |
466 | } |
467 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); |
468 | |
469 | mirror_wait_on_conflicts(NULL, s, offset, 1); |
470 | |
471 | job_pause_point(&s->common.job); |
472 | |
473 | /* Find the number of consective dirty chunks following the first dirty |
474 | * one, and wait for in flight requests in them. */ |
475 | bdrv_dirty_bitmap_lock(s->dirty_bitmap); |
476 | while (nb_chunks * s->granularity < s->buf_size) { |
477 | int64_t next_dirty; |
478 | int64_t next_offset = offset + nb_chunks * s->granularity; |
479 | int64_t next_chunk = next_offset / s->granularity; |
480 | if (next_offset >= s->bdev_length || |
481 | !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { |
482 | break; |
483 | } |
484 | if (test_bit(next_chunk, s->in_flight_bitmap)) { |
485 | break; |
486 | } |
487 | |
488 | next_dirty = bdrv_dirty_iter_next(s->dbi); |
489 | if (next_dirty > next_offset || next_dirty < 0) { |
490 | /* The bitmap iterator's cache is stale, refresh it */ |
491 | bdrv_set_dirty_iter(s->dbi, next_offset); |
492 | next_dirty = bdrv_dirty_iter_next(s->dbi); |
493 | } |
494 | assert(next_dirty == next_offset); |
495 | nb_chunks++; |
496 | } |
497 | |
498 | /* Clear dirty bits before querying the block status, because |
499 | * calling bdrv_block_status_above could yield - if some blocks are |
500 | * marked dirty in this window, we need to know. |
501 | */ |
502 | bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, |
503 | nb_chunks * s->granularity); |
504 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); |
505 | |
506 | /* Before claiming an area in the in-flight bitmap, we have to |
507 | * create a MirrorOp for it so that conflicting requests can wait |
508 | * for it. mirror_perform() will create the real MirrorOps later, |
509 | * for now we just create a pseudo operation that will wake up all |
510 | * conflicting requests once all real operations have been |
511 | * launched. */ |
512 | pseudo_op = g_new(MirrorOp, 1); |
513 | *pseudo_op = (MirrorOp){ |
514 | .offset = offset, |
515 | .bytes = nb_chunks * s->granularity, |
516 | .is_pseudo_op = true, |
517 | }; |
518 | qemu_co_queue_init(&pseudo_op->waiting_requests); |
519 | QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); |
520 | |
521 | bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); |
522 | while (nb_chunks > 0 && offset < s->bdev_length) { |
523 | int ret; |
524 | int64_t io_bytes; |
525 | int64_t io_bytes_acct; |
526 | MirrorMethod mirror_method = MIRROR_METHOD_COPY; |
527 | |
528 | assert(!(offset % s->granularity)); |
529 | ret = bdrv_block_status_above(source, NULL, offset, |
530 | nb_chunks * s->granularity, |
531 | &io_bytes, NULL, NULL); |
532 | if (ret < 0) { |
533 | io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); |
534 | } else if (ret & BDRV_BLOCK_DATA) { |
535 | io_bytes = MIN(io_bytes, max_io_bytes); |
536 | } |
537 | |
538 | io_bytes -= io_bytes % s->granularity; |
539 | if (io_bytes < s->granularity) { |
540 | io_bytes = s->granularity; |
541 | } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { |
542 | int64_t target_offset; |
543 | int64_t target_bytes; |
544 | bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, |
545 | &target_offset, &target_bytes); |
546 | if (target_offset == offset && |
547 | target_bytes == io_bytes) { |
548 | mirror_method = ret & BDRV_BLOCK_ZERO ? |
549 | MIRROR_METHOD_ZERO : |
550 | MIRROR_METHOD_DISCARD; |
551 | } |
552 | } |
553 | |
554 | while (s->in_flight >= MAX_IN_FLIGHT) { |
555 | trace_mirror_yield_in_flight(s, offset, s->in_flight); |
556 | mirror_wait_for_free_in_flight_slot(s); |
557 | } |
558 | |
559 | if (s->ret < 0) { |
560 | ret = 0; |
561 | goto fail; |
562 | } |
563 | |
564 | io_bytes = mirror_clip_bytes(s, offset, io_bytes); |
565 | io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); |
566 | if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { |
567 | io_bytes_acct = 0; |
568 | } else { |
569 | io_bytes_acct = io_bytes; |
570 | } |
571 | assert(io_bytes); |
572 | offset += io_bytes; |
573 | nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); |
574 | delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); |
575 | } |
576 | |
577 | ret = delay_ns; |
578 | fail: |
579 | QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); |
580 | qemu_co_queue_restart_all(&pseudo_op->waiting_requests); |
581 | g_free(pseudo_op); |
582 | |
583 | return ret; |
584 | } |
585 | |
586 | static void mirror_free_init(MirrorBlockJob *s) |
587 | { |
588 | int granularity = s->granularity; |
589 | size_t buf_size = s->buf_size; |
590 | uint8_t *buf = s->buf; |
591 | |
592 | assert(s->buf_free_count == 0); |
593 | QSIMPLEQ_INIT(&s->buf_free); |
594 | while (buf_size != 0) { |
595 | MirrorBuffer *cur = (MirrorBuffer *)buf; |
596 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); |
597 | s->buf_free_count++; |
598 | buf_size -= granularity; |
599 | buf += granularity; |
600 | } |
601 | } |
602 | |
603 | /* This is also used for the .pause callback. There is no matching |
604 | * mirror_resume() because mirror_run() will begin iterating again |
605 | * when the job is resumed. |
606 | */ |
607 | static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s) |
608 | { |
609 | while (s->in_flight > 0) { |
610 | mirror_wait_for_free_in_flight_slot(s); |
611 | } |
612 | } |
613 | |
614 | /** |
615 | * mirror_exit_common: handle both abort() and prepare() cases. |
616 | * for .prepare, returns 0 on success and -errno on failure. |
617 | * for .abort cases, denoted by abort = true, MUST return 0. |
618 | */ |
619 | static int mirror_exit_common(Job *job) |
620 | { |
621 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
622 | BlockJob *bjob = &s->common; |
623 | MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque; |
624 | AioContext *replace_aio_context = NULL; |
625 | BlockDriverState *src = s->mirror_top_bs->backing->bs; |
626 | BlockDriverState *target_bs = blk_bs(s->target); |
627 | BlockDriverState *mirror_top_bs = s->mirror_top_bs; |
628 | Error *local_err = NULL; |
629 | bool abort = job->ret < 0; |
630 | int ret = 0; |
631 | |
632 | if (s->prepared) { |
633 | return 0; |
634 | } |
635 | s->prepared = true; |
636 | |
637 | if (bdrv_chain_contains(src, target_bs)) { |
638 | bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs); |
639 | } |
640 | |
641 | bdrv_release_dirty_bitmap(src, s->dirty_bitmap); |
642 | |
643 | /* Make sure that the source BDS doesn't go away during bdrv_replace_node, |
644 | * before we can call bdrv_drained_end */ |
645 | bdrv_ref(src); |
646 | bdrv_ref(mirror_top_bs); |
647 | bdrv_ref(target_bs); |
648 | |
649 | /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before |
650 | * inserting target_bs at s->to_replace, where we might not be able to get |
651 | * these permissions. |
652 | * |
653 | * Note that blk_unref() alone doesn't necessarily drop permissions because |
654 | * we might be running nested inside mirror_drain(), which takes an extra |
655 | * reference, so use an explicit blk_set_perm() first. */ |
656 | blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort); |
657 | blk_unref(s->target); |
658 | s->target = NULL; |
659 | |
660 | /* We don't access the source any more. Dropping any WRITE/RESIZE is |
661 | * required before it could become a backing file of target_bs. Not having |
662 | * these permissions any more means that we can't allow any new requests on |
663 | * mirror_top_bs from now on, so keep it drained. */ |
664 | bdrv_drained_begin(mirror_top_bs); |
665 | bs_opaque->stop = true; |
666 | bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, |
667 | &error_abort); |
668 | if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { |
669 | BlockDriverState *backing = s->is_none_mode ? src : s->base; |
670 | if (backing_bs(target_bs) != backing) { |
671 | bdrv_set_backing_hd(target_bs, backing, &local_err); |
672 | if (local_err) { |
673 | error_report_err(local_err); |
674 | ret = -EPERM; |
675 | } |
676 | } |
677 | } |
678 | |
679 | if (s->to_replace) { |
680 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
681 | aio_context_acquire(replace_aio_context); |
682 | } |
683 | |
684 | if (s->should_complete && !abort) { |
685 | BlockDriverState *to_replace = s->to_replace ?: src; |
686 | bool ro = bdrv_is_read_only(to_replace); |
687 | |
688 | if (ro != bdrv_is_read_only(target_bs)) { |
689 | bdrv_reopen_set_read_only(target_bs, ro, NULL); |
690 | } |
691 | |
692 | /* The mirror job has no requests in flight any more, but we need to |
693 | * drain potential other users of the BDS before changing the graph. */ |
694 | assert(s->in_drain); |
695 | bdrv_drained_begin(target_bs); |
696 | bdrv_replace_node(to_replace, target_bs, &local_err); |
697 | bdrv_drained_end(target_bs); |
698 | if (local_err) { |
699 | error_report_err(local_err); |
700 | ret = -EPERM; |
701 | } |
702 | } |
703 | if (s->to_replace) { |
704 | bdrv_op_unblock_all(s->to_replace, s->replace_blocker); |
705 | error_free(s->replace_blocker); |
706 | bdrv_unref(s->to_replace); |
707 | } |
708 | if (replace_aio_context) { |
709 | aio_context_release(replace_aio_context); |
710 | } |
711 | g_free(s->replaces); |
712 | bdrv_unref(target_bs); |
713 | |
714 | /* |
715 | * Remove the mirror filter driver from the graph. Before this, get rid of |
716 | * the blockers on the intermediate nodes so that the resulting state is |
717 | * valid. |
718 | */ |
719 | block_job_remove_all_bdrv(bjob); |
720 | bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); |
721 | |
722 | /* We just changed the BDS the job BB refers to (with either or both of the |
723 | * bdrv_replace_node() calls), so switch the BB back so the cleanup does |
724 | * the right thing. We don't need any permissions any more now. */ |
725 | blk_remove_bs(bjob->blk); |
726 | blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); |
727 | blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); |
728 | |
729 | bs_opaque->job = NULL; |
730 | |
731 | bdrv_drained_end(src); |
732 | bdrv_drained_end(mirror_top_bs); |
733 | s->in_drain = false; |
734 | bdrv_unref(mirror_top_bs); |
735 | bdrv_unref(src); |
736 | |
737 | return ret; |
738 | } |
739 | |
740 | static int mirror_prepare(Job *job) |
741 | { |
742 | return mirror_exit_common(job); |
743 | } |
744 | |
745 | static void mirror_abort(Job *job) |
746 | { |
747 | int ret = mirror_exit_common(job); |
748 | assert(ret == 0); |
749 | } |
750 | |
751 | static void coroutine_fn mirror_throttle(MirrorBlockJob *s) |
752 | { |
753 | int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
754 | |
755 | if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { |
756 | s->last_pause_ns = now; |
757 | job_sleep_ns(&s->common.job, 0); |
758 | } else { |
759 | job_pause_point(&s->common.job); |
760 | } |
761 | } |
762 | |
763 | static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) |
764 | { |
765 | int64_t offset; |
766 | BlockDriverState *base = s->base; |
767 | BlockDriverState *bs = s->mirror_top_bs->backing->bs; |
768 | BlockDriverState *target_bs = blk_bs(s->target); |
769 | int ret; |
770 | int64_t count; |
771 | |
772 | if (s->zero_target) { |
773 | if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { |
774 | bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); |
775 | return 0; |
776 | } |
777 | |
778 | s->initial_zeroing_ongoing = true; |
779 | for (offset = 0; offset < s->bdev_length; ) { |
780 | int bytes = MIN(s->bdev_length - offset, |
781 | QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); |
782 | |
783 | mirror_throttle(s); |
784 | |
785 | if (job_is_cancelled(&s->common.job)) { |
786 | s->initial_zeroing_ongoing = false; |
787 | return 0; |
788 | } |
789 | |
790 | if (s->in_flight >= MAX_IN_FLIGHT) { |
791 | trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, |
792 | s->in_flight); |
793 | mirror_wait_for_free_in_flight_slot(s); |
794 | continue; |
795 | } |
796 | |
797 | mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); |
798 | offset += bytes; |
799 | } |
800 | |
801 | mirror_wait_for_all_io(s); |
802 | s->initial_zeroing_ongoing = false; |
803 | } |
804 | |
805 | /* First part, loop on the sectors and initialize the dirty bitmap. */ |
806 | for (offset = 0; offset < s->bdev_length; ) { |
807 | /* Just to make sure we are not exceeding int limit. */ |
808 | int bytes = MIN(s->bdev_length - offset, |
809 | QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); |
810 | |
811 | mirror_throttle(s); |
812 | |
813 | if (job_is_cancelled(&s->common.job)) { |
814 | return 0; |
815 | } |
816 | |
817 | ret = bdrv_is_allocated_above(bs, base, false, offset, bytes, &count); |
818 | if (ret < 0) { |
819 | return ret; |
820 | } |
821 | |
822 | assert(count); |
823 | if (ret == 1) { |
824 | bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); |
825 | } |
826 | offset += count; |
827 | } |
828 | return 0; |
829 | } |
830 | |
831 | /* Called when going out of the streaming phase to flush the bulk of the |
832 | * data to the medium, or just before completing. |
833 | */ |
834 | static int mirror_flush(MirrorBlockJob *s) |
835 | { |
836 | int ret = blk_flush(s->target); |
837 | if (ret < 0) { |
838 | if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { |
839 | s->ret = ret; |
840 | } |
841 | } |
842 | return ret; |
843 | } |
844 | |
845 | static int coroutine_fn mirror_run(Job *job, Error **errp) |
846 | { |
847 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
848 | BlockDriverState *bs = s->mirror_top_bs->backing->bs; |
849 | BlockDriverState *target_bs = blk_bs(s->target); |
850 | bool need_drain = true; |
851 | int64_t length; |
852 | BlockDriverInfo bdi; |
853 | char backing_filename[2]; /* we only need 2 characters because we are only |
854 | checking for a NULL string */ |
855 | int ret = 0; |
856 | |
857 | if (job_is_cancelled(&s->common.job)) { |
858 | goto immediate_exit; |
859 | } |
860 | |
861 | s->bdev_length = bdrv_getlength(bs); |
862 | if (s->bdev_length < 0) { |
863 | ret = s->bdev_length; |
864 | goto immediate_exit; |
865 | } |
866 | |
867 | /* Active commit must resize the base image if its size differs from the |
868 | * active layer. */ |
869 | if (s->base == blk_bs(s->target)) { |
870 | int64_t base_length; |
871 | |
872 | base_length = blk_getlength(s->target); |
873 | if (base_length < 0) { |
874 | ret = base_length; |
875 | goto immediate_exit; |
876 | } |
877 | |
878 | if (s->bdev_length > base_length) { |
879 | ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF, |
880 | NULL); |
881 | if (ret < 0) { |
882 | goto immediate_exit; |
883 | } |
884 | } |
885 | } |
886 | |
887 | if (s->bdev_length == 0) { |
888 | /* Transition to the READY state and wait for complete. */ |
889 | job_transition_to_ready(&s->common.job); |
890 | s->synced = true; |
891 | s->actively_synced = true; |
892 | while (!job_is_cancelled(&s->common.job) && !s->should_complete) { |
893 | job_yield(&s->common.job); |
894 | } |
895 | s->common.job.cancelled = false; |
896 | goto immediate_exit; |
897 | } |
898 | |
899 | length = DIV_ROUND_UP(s->bdev_length, s->granularity); |
900 | s->in_flight_bitmap = bitmap_new(length); |
901 | |
902 | /* If we have no backing file yet in the destination, we cannot let |
903 | * the destination do COW. Instead, we copy sectors around the |
904 | * dirty data if needed. We need a bitmap to do that. |
905 | */ |
906 | bdrv_get_backing_filename(target_bs, backing_filename, |
907 | sizeof(backing_filename)); |
908 | if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { |
909 | s->target_cluster_size = bdi.cluster_size; |
910 | } else { |
911 | s->target_cluster_size = BDRV_SECTOR_SIZE; |
912 | } |
913 | if (backing_filename[0] && !target_bs->backing && |
914 | s->granularity < s->target_cluster_size) { |
915 | s->buf_size = MAX(s->buf_size, s->target_cluster_size); |
916 | s->cow_bitmap = bitmap_new(length); |
917 | } |
918 | s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); |
919 | |
920 | s->buf = qemu_try_blockalign(bs, s->buf_size); |
921 | if (s->buf == NULL) { |
922 | ret = -ENOMEM; |
923 | goto immediate_exit; |
924 | } |
925 | |
926 | mirror_free_init(s); |
927 | |
928 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
929 | if (!s->is_none_mode) { |
930 | ret = mirror_dirty_init(s); |
931 | if (ret < 0 || job_is_cancelled(&s->common.job)) { |
932 | goto immediate_exit; |
933 | } |
934 | } |
935 | |
936 | assert(!s->dbi); |
937 | s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); |
938 | for (;;) { |
939 | uint64_t delay_ns = 0; |
940 | int64_t cnt, delta; |
941 | bool should_complete; |
942 | |
943 | /* Do not start passive operations while there are active |
944 | * writes in progress */ |
945 | while (s->in_active_write_counter) { |
946 | mirror_wait_for_any_operation(s, true); |
947 | } |
948 | |
949 | if (s->ret < 0) { |
950 | ret = s->ret; |
951 | goto immediate_exit; |
952 | } |
953 | |
954 | job_pause_point(&s->common.job); |
955 | |
956 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
957 | /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is |
958 | * the number of bytes currently being processed; together those are |
959 | * the current remaining operation length */ |
960 | job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); |
961 | |
962 | /* Note that even when no rate limit is applied we need to yield |
963 | * periodically with no pending I/O so that bdrv_drain_all() returns. |
964 | * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is |
965 | * an error, or when the source is clean, whichever comes first. */ |
966 | delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; |
967 | if (delta < BLOCK_JOB_SLICE_TIME && |
968 | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
969 | if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || |
970 | (cnt == 0 && s->in_flight > 0)) { |
971 | trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); |
972 | mirror_wait_for_free_in_flight_slot(s); |
973 | continue; |
974 | } else if (cnt != 0) { |
975 | delay_ns = mirror_iteration(s); |
976 | } |
977 | } |
978 | |
979 | should_complete = false; |
980 | if (s->in_flight == 0 && cnt == 0) { |
981 | trace_mirror_before_flush(s); |
982 | if (!s->synced) { |
983 | if (mirror_flush(s) < 0) { |
984 | /* Go check s->ret. */ |
985 | continue; |
986 | } |
987 | /* We're out of the streaming phase. From now on, if the job |
988 | * is cancelled we will actually complete all pending I/O and |
989 | * report completion. This way, block-job-cancel will leave |
990 | * the target in a consistent state. |
991 | */ |
992 | job_transition_to_ready(&s->common.job); |
993 | s->synced = true; |
994 | if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { |
995 | s->actively_synced = true; |
996 | } |
997 | } |
998 | |
999 | should_complete = s->should_complete || |
1000 | job_is_cancelled(&s->common.job); |
1001 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
1002 | } |
1003 | |
1004 | if (cnt == 0 && should_complete) { |
1005 | /* The dirty bitmap is not updated while operations are pending. |
1006 | * If we're about to exit, wait for pending operations before |
1007 | * calling bdrv_get_dirty_count(bs), or we may exit while the |
1008 | * source has dirty data to copy! |
1009 | * |
1010 | * Note that I/O can be submitted by the guest while |
1011 | * mirror_populate runs, so pause it now. Before deciding |
1012 | * whether to switch to target check one last time if I/O has |
1013 | * come in the meanwhile, and if not flush the data to disk. |
1014 | */ |
1015 | trace_mirror_before_drain(s, cnt); |
1016 | |
1017 | s->in_drain = true; |
1018 | bdrv_drained_begin(bs); |
1019 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
1020 | if (cnt > 0 || mirror_flush(s) < 0) { |
1021 | bdrv_drained_end(bs); |
1022 | s->in_drain = false; |
1023 | continue; |
1024 | } |
1025 | |
1026 | /* The two disks are in sync. Exit and report successful |
1027 | * completion. |
1028 | */ |
1029 | assert(QLIST_EMPTY(&bs->tracked_requests)); |
1030 | s->common.job.cancelled = false; |
1031 | need_drain = false; |
1032 | break; |
1033 | } |
1034 | |
1035 | ret = 0; |
1036 | |
1037 | if (s->synced && !should_complete) { |
1038 | delay_ns = (s->in_flight == 0 && |
1039 | cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); |
1040 | } |
1041 | trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); |
1042 | job_sleep_ns(&s->common.job, delay_ns); |
1043 | if (job_is_cancelled(&s->common.job) && |
1044 | (!s->synced || s->common.job.force_cancel)) |
1045 | { |
1046 | break; |
1047 | } |
1048 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
1049 | } |
1050 | |
1051 | immediate_exit: |
1052 | if (s->in_flight > 0) { |
1053 | /* We get here only if something went wrong. Either the job failed, |
1054 | * or it was cancelled prematurely so that we do not guarantee that |
1055 | * the target is a copy of the source. |
1056 | */ |
1057 | assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && |
1058 | job_is_cancelled(&s->common.job))); |
1059 | assert(need_drain); |
1060 | mirror_wait_for_all_io(s); |
1061 | } |
1062 | |
1063 | assert(s->in_flight == 0); |
1064 | qemu_vfree(s->buf); |
1065 | g_free(s->cow_bitmap); |
1066 | g_free(s->in_flight_bitmap); |
1067 | bdrv_dirty_iter_free(s->dbi); |
1068 | |
1069 | if (need_drain) { |
1070 | s->in_drain = true; |
1071 | bdrv_drained_begin(bs); |
1072 | } |
1073 | |
1074 | return ret; |
1075 | } |
1076 | |
1077 | static void mirror_complete(Job *job, Error **errp) |
1078 | { |
1079 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
1080 | BlockDriverState *target; |
1081 | |
1082 | target = blk_bs(s->target); |
1083 | |
1084 | if (!s->synced) { |
1085 | error_setg(errp, "The active block job '%s' cannot be completed" , |
1086 | job->id); |
1087 | return; |
1088 | } |
1089 | |
1090 | if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { |
1091 | int ret; |
1092 | |
1093 | assert(!target->backing); |
1094 | ret = bdrv_open_backing_file(target, NULL, "backing" , errp); |
1095 | if (ret < 0) { |
1096 | return; |
1097 | } |
1098 | } |
1099 | |
1100 | /* block all operations on to_replace bs */ |
1101 | if (s->replaces) { |
1102 | AioContext *replace_aio_context; |
1103 | |
1104 | s->to_replace = bdrv_find_node(s->replaces); |
1105 | if (!s->to_replace) { |
1106 | error_setg(errp, "Node name '%s' not found" , s->replaces); |
1107 | return; |
1108 | } |
1109 | |
1110 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
1111 | aio_context_acquire(replace_aio_context); |
1112 | |
1113 | /* TODO Translate this into permission system. Current definition of |
1114 | * GRAPH_MOD would require to request it for the parents; they might |
1115 | * not even be BlockDriverStates, however, so a BdrvChild can't address |
1116 | * them. May need redefinition of GRAPH_MOD. */ |
1117 | error_setg(&s->replace_blocker, |
1118 | "block device is in use by block-job-complete" ); |
1119 | bdrv_op_block_all(s->to_replace, s->replace_blocker); |
1120 | bdrv_ref(s->to_replace); |
1121 | |
1122 | aio_context_release(replace_aio_context); |
1123 | } |
1124 | |
1125 | s->should_complete = true; |
1126 | job_enter(job); |
1127 | } |
1128 | |
1129 | static void coroutine_fn mirror_pause(Job *job) |
1130 | { |
1131 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
1132 | |
1133 | mirror_wait_for_all_io(s); |
1134 | } |
1135 | |
1136 | static bool mirror_drained_poll(BlockJob *job) |
1137 | { |
1138 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
1139 | |
1140 | /* If the job isn't paused nor cancelled, we can't be sure that it won't |
1141 | * issue more requests. We make an exception if we've reached this point |
1142 | * from one of our own drain sections, to avoid a deadlock waiting for |
1143 | * ourselves. |
1144 | */ |
1145 | if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) { |
1146 | return true; |
1147 | } |
1148 | |
1149 | return !!s->in_flight; |
1150 | } |
1151 | |
1152 | static void mirror_drain(BlockJob *job) |
1153 | { |
1154 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
1155 | |
1156 | /* Need to keep a reference in case blk_drain triggers execution |
1157 | * of mirror_complete... |
1158 | */ |
1159 | if (s->target) { |
1160 | BlockBackend *target = s->target; |
1161 | blk_ref(target); |
1162 | blk_drain(target); |
1163 | blk_unref(target); |
1164 | } |
1165 | } |
1166 | |
1167 | static const BlockJobDriver mirror_job_driver = { |
1168 | .job_driver = { |
1169 | .instance_size = sizeof(MirrorBlockJob), |
1170 | .job_type = JOB_TYPE_MIRROR, |
1171 | .free = block_job_free, |
1172 | .user_resume = block_job_user_resume, |
1173 | .drain = block_job_drain, |
1174 | .run = mirror_run, |
1175 | .prepare = mirror_prepare, |
1176 | .abort = mirror_abort, |
1177 | .pause = mirror_pause, |
1178 | .complete = mirror_complete, |
1179 | }, |
1180 | .drained_poll = mirror_drained_poll, |
1181 | .drain = mirror_drain, |
1182 | }; |
1183 | |
1184 | static const BlockJobDriver commit_active_job_driver = { |
1185 | .job_driver = { |
1186 | .instance_size = sizeof(MirrorBlockJob), |
1187 | .job_type = JOB_TYPE_COMMIT, |
1188 | .free = block_job_free, |
1189 | .user_resume = block_job_user_resume, |
1190 | .drain = block_job_drain, |
1191 | .run = mirror_run, |
1192 | .prepare = mirror_prepare, |
1193 | .abort = mirror_abort, |
1194 | .pause = mirror_pause, |
1195 | .complete = mirror_complete, |
1196 | }, |
1197 | .drained_poll = mirror_drained_poll, |
1198 | .drain = mirror_drain, |
1199 | }; |
1200 | |
1201 | static void coroutine_fn |
1202 | do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, |
1203 | uint64_t offset, uint64_t bytes, |
1204 | QEMUIOVector *qiov, int flags) |
1205 | { |
1206 | QEMUIOVector target_qiov; |
1207 | uint64_t dirty_offset = offset; |
1208 | uint64_t dirty_bytes; |
1209 | |
1210 | if (qiov) { |
1211 | qemu_iovec_init(&target_qiov, qiov->niov); |
1212 | } |
1213 | |
1214 | while (true) { |
1215 | bool valid_area; |
1216 | int ret; |
1217 | |
1218 | bdrv_dirty_bitmap_lock(job->dirty_bitmap); |
1219 | dirty_bytes = MIN(offset + bytes - dirty_offset, INT_MAX); |
1220 | valid_area = bdrv_dirty_bitmap_next_dirty_area(job->dirty_bitmap, |
1221 | &dirty_offset, |
1222 | &dirty_bytes); |
1223 | if (!valid_area) { |
1224 | bdrv_dirty_bitmap_unlock(job->dirty_bitmap); |
1225 | break; |
1226 | } |
1227 | |
1228 | bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap, |
1229 | dirty_offset, dirty_bytes); |
1230 | bdrv_dirty_bitmap_unlock(job->dirty_bitmap); |
1231 | |
1232 | job_progress_increase_remaining(&job->common.job, dirty_bytes); |
1233 | |
1234 | assert(dirty_offset - offset <= SIZE_MAX); |
1235 | if (qiov) { |
1236 | qemu_iovec_reset(&target_qiov); |
1237 | qemu_iovec_concat(&target_qiov, qiov, |
1238 | dirty_offset - offset, dirty_bytes); |
1239 | } |
1240 | |
1241 | switch (method) { |
1242 | case MIRROR_METHOD_COPY: |
1243 | ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes, |
1244 | qiov ? &target_qiov : NULL, flags); |
1245 | break; |
1246 | |
1247 | case MIRROR_METHOD_ZERO: |
1248 | assert(!qiov); |
1249 | ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes, |
1250 | flags); |
1251 | break; |
1252 | |
1253 | case MIRROR_METHOD_DISCARD: |
1254 | assert(!qiov); |
1255 | ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes); |
1256 | break; |
1257 | |
1258 | default: |
1259 | abort(); |
1260 | } |
1261 | |
1262 | if (ret >= 0) { |
1263 | job_progress_update(&job->common.job, dirty_bytes); |
1264 | } else { |
1265 | BlockErrorAction action; |
1266 | |
1267 | bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes); |
1268 | job->actively_synced = false; |
1269 | |
1270 | action = mirror_error_action(job, false, -ret); |
1271 | if (action == BLOCK_ERROR_ACTION_REPORT) { |
1272 | if (!job->ret) { |
1273 | job->ret = ret; |
1274 | } |
1275 | break; |
1276 | } |
1277 | } |
1278 | |
1279 | dirty_offset += dirty_bytes; |
1280 | } |
1281 | |
1282 | if (qiov) { |
1283 | qemu_iovec_destroy(&target_qiov); |
1284 | } |
1285 | } |
1286 | |
1287 | static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, |
1288 | uint64_t offset, |
1289 | uint64_t bytes) |
1290 | { |
1291 | MirrorOp *op; |
1292 | uint64_t start_chunk = offset / s->granularity; |
1293 | uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); |
1294 | |
1295 | op = g_new(MirrorOp, 1); |
1296 | *op = (MirrorOp){ |
1297 | .s = s, |
1298 | .offset = offset, |
1299 | .bytes = bytes, |
1300 | .is_active_write = true, |
1301 | }; |
1302 | qemu_co_queue_init(&op->waiting_requests); |
1303 | QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); |
1304 | |
1305 | s->in_active_write_counter++; |
1306 | |
1307 | mirror_wait_on_conflicts(op, s, offset, bytes); |
1308 | |
1309 | bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); |
1310 | |
1311 | return op; |
1312 | } |
1313 | |
1314 | static void coroutine_fn active_write_settle(MirrorOp *op) |
1315 | { |
1316 | uint64_t start_chunk = op->offset / op->s->granularity; |
1317 | uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, |
1318 | op->s->granularity); |
1319 | |
1320 | if (!--op->s->in_active_write_counter && op->s->actively_synced) { |
1321 | BdrvChild *source = op->s->mirror_top_bs->backing; |
1322 | |
1323 | if (QLIST_FIRST(&source->bs->parents) == source && |
1324 | QLIST_NEXT(source, next_parent) == NULL) |
1325 | { |
1326 | /* Assert that we are back in sync once all active write |
1327 | * operations are settled. |
1328 | * Note that we can only assert this if the mirror node |
1329 | * is the source node's only parent. */ |
1330 | assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); |
1331 | } |
1332 | } |
1333 | bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); |
1334 | QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); |
1335 | qemu_co_queue_restart_all(&op->waiting_requests); |
1336 | g_free(op); |
1337 | } |
1338 | |
1339 | static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, |
1340 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) |
1341 | { |
1342 | return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); |
1343 | } |
1344 | |
1345 | static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, |
1346 | MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, |
1347 | int flags) |
1348 | { |
1349 | MirrorOp *op = NULL; |
1350 | MirrorBDSOpaque *s = bs->opaque; |
1351 | int ret = 0; |
1352 | bool copy_to_target; |
1353 | |
1354 | copy_to_target = s->job->ret >= 0 && |
1355 | s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; |
1356 | |
1357 | if (copy_to_target) { |
1358 | op = active_write_prepare(s->job, offset, bytes); |
1359 | } |
1360 | |
1361 | switch (method) { |
1362 | case MIRROR_METHOD_COPY: |
1363 | ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); |
1364 | break; |
1365 | |
1366 | case MIRROR_METHOD_ZERO: |
1367 | ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); |
1368 | break; |
1369 | |
1370 | case MIRROR_METHOD_DISCARD: |
1371 | ret = bdrv_co_pdiscard(bs->backing, offset, bytes); |
1372 | break; |
1373 | |
1374 | default: |
1375 | abort(); |
1376 | } |
1377 | |
1378 | if (ret < 0) { |
1379 | goto out; |
1380 | } |
1381 | |
1382 | if (copy_to_target) { |
1383 | do_sync_target_write(s->job, method, offset, bytes, qiov, flags); |
1384 | } |
1385 | |
1386 | out: |
1387 | if (copy_to_target) { |
1388 | active_write_settle(op); |
1389 | } |
1390 | return ret; |
1391 | } |
1392 | |
1393 | static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, |
1394 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) |
1395 | { |
1396 | MirrorBDSOpaque *s = bs->opaque; |
1397 | QEMUIOVector bounce_qiov; |
1398 | void *bounce_buf; |
1399 | int ret = 0; |
1400 | bool copy_to_target; |
1401 | |
1402 | copy_to_target = s->job->ret >= 0 && |
1403 | s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; |
1404 | |
1405 | if (copy_to_target) { |
1406 | /* The guest might concurrently modify the data to write; but |
1407 | * the data on source and destination must match, so we have |
1408 | * to use a bounce buffer if we are going to write to the |
1409 | * target now. */ |
1410 | bounce_buf = qemu_blockalign(bs, bytes); |
1411 | iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); |
1412 | |
1413 | qemu_iovec_init(&bounce_qiov, 1); |
1414 | qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); |
1415 | qiov = &bounce_qiov; |
1416 | } |
1417 | |
1418 | ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, |
1419 | flags); |
1420 | |
1421 | if (copy_to_target) { |
1422 | qemu_iovec_destroy(&bounce_qiov); |
1423 | qemu_vfree(bounce_buf); |
1424 | } |
1425 | |
1426 | return ret; |
1427 | } |
1428 | |
1429 | static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) |
1430 | { |
1431 | if (bs->backing == NULL) { |
1432 | /* we can be here after failed bdrv_append in mirror_start_job */ |
1433 | return 0; |
1434 | } |
1435 | return bdrv_co_flush(bs->backing->bs); |
1436 | } |
1437 | |
1438 | static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, |
1439 | int64_t offset, int bytes, BdrvRequestFlags flags) |
1440 | { |
1441 | return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, |
1442 | flags); |
1443 | } |
1444 | |
1445 | static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, |
1446 | int64_t offset, int bytes) |
1447 | { |
1448 | return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, |
1449 | NULL, 0); |
1450 | } |
1451 | |
1452 | static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) |
1453 | { |
1454 | if (bs->backing == NULL) { |
1455 | /* we can be here after failed bdrv_attach_child in |
1456 | * bdrv_set_backing_hd */ |
1457 | return; |
1458 | } |
1459 | pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), |
1460 | bs->backing->bs->filename); |
1461 | } |
1462 | |
1463 | static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, |
1464 | const BdrvChildRole *role, |
1465 | BlockReopenQueue *reopen_queue, |
1466 | uint64_t perm, uint64_t shared, |
1467 | uint64_t *nperm, uint64_t *nshared) |
1468 | { |
1469 | MirrorBDSOpaque *s = bs->opaque; |
1470 | |
1471 | if (s->stop) { |
1472 | /* |
1473 | * If the job is to be stopped, we do not need to forward |
1474 | * anything to the real image. |
1475 | */ |
1476 | *nperm = 0; |
1477 | *nshared = BLK_PERM_ALL; |
1478 | return; |
1479 | } |
1480 | |
1481 | /* Must be able to forward guest writes to the real image */ |
1482 | *nperm = 0; |
1483 | if (perm & BLK_PERM_WRITE) { |
1484 | *nperm |= BLK_PERM_WRITE; |
1485 | } |
1486 | |
1487 | *nshared = BLK_PERM_ALL; |
1488 | } |
1489 | |
1490 | static void bdrv_mirror_top_refresh_limits(BlockDriverState *bs, Error **errp) |
1491 | { |
1492 | MirrorBDSOpaque *s = bs->opaque; |
1493 | |
1494 | if (s && s->job && s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { |
1495 | bs->bl.request_alignment = s->job->granularity; |
1496 | } |
1497 | } |
1498 | |
1499 | /* Dummy node that provides consistent read to its users without requiring it |
1500 | * from its backing file and that allows writes on the backing file chain. */ |
1501 | static BlockDriver bdrv_mirror_top = { |
1502 | .format_name = "mirror_top" , |
1503 | .bdrv_co_preadv = bdrv_mirror_top_preadv, |
1504 | .bdrv_co_pwritev = bdrv_mirror_top_pwritev, |
1505 | .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, |
1506 | .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, |
1507 | .bdrv_co_flush = bdrv_mirror_top_flush, |
1508 | .bdrv_co_block_status = bdrv_co_block_status_from_backing, |
1509 | .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, |
1510 | .bdrv_child_perm = bdrv_mirror_top_child_perm, |
1511 | .bdrv_refresh_limits = bdrv_mirror_top_refresh_limits, |
1512 | }; |
1513 | |
1514 | static BlockJob *mirror_start_job( |
1515 | const char *job_id, BlockDriverState *bs, |
1516 | int creation_flags, BlockDriverState *target, |
1517 | const char *replaces, int64_t speed, |
1518 | uint32_t granularity, int64_t buf_size, |
1519 | BlockMirrorBackingMode backing_mode, |
1520 | bool zero_target, |
1521 | BlockdevOnError on_source_error, |
1522 | BlockdevOnError on_target_error, |
1523 | bool unmap, |
1524 | BlockCompletionFunc *cb, |
1525 | void *opaque, |
1526 | const BlockJobDriver *driver, |
1527 | bool is_none_mode, BlockDriverState *base, |
1528 | bool auto_complete, const char *filter_node_name, |
1529 | bool is_mirror, MirrorCopyMode copy_mode, |
1530 | Error **errp) |
1531 | { |
1532 | MirrorBlockJob *s; |
1533 | MirrorBDSOpaque *bs_opaque; |
1534 | BlockDriverState *mirror_top_bs; |
1535 | bool target_graph_mod; |
1536 | bool target_is_backing; |
1537 | Error *local_err = NULL; |
1538 | int ret; |
1539 | |
1540 | if (granularity == 0) { |
1541 | granularity = bdrv_get_default_bitmap_granularity(target); |
1542 | } |
1543 | |
1544 | assert(is_power_of_2(granularity)); |
1545 | |
1546 | if (buf_size < 0) { |
1547 | error_setg(errp, "Invalid parameter 'buf-size'" ); |
1548 | return NULL; |
1549 | } |
1550 | |
1551 | if (buf_size == 0) { |
1552 | buf_size = DEFAULT_MIRROR_BUF_SIZE; |
1553 | } |
1554 | |
1555 | if (bs == target) { |
1556 | error_setg(errp, "Can't mirror node into itself" ); |
1557 | return NULL; |
1558 | } |
1559 | |
1560 | /* In the case of active commit, add dummy driver to provide consistent |
1561 | * reads on the top, while disabling it in the intermediate nodes, and make |
1562 | * the backing chain writable. */ |
1563 | mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, |
1564 | BDRV_O_RDWR, errp); |
1565 | if (mirror_top_bs == NULL) { |
1566 | return NULL; |
1567 | } |
1568 | if (!filter_node_name) { |
1569 | mirror_top_bs->implicit = true; |
1570 | } |
1571 | |
1572 | /* So that we can always drop this node */ |
1573 | mirror_top_bs->never_freeze = true; |
1574 | |
1575 | mirror_top_bs->total_sectors = bs->total_sectors; |
1576 | mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; |
1577 | mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | |
1578 | BDRV_REQ_NO_FALLBACK; |
1579 | bs_opaque = g_new0(MirrorBDSOpaque, 1); |
1580 | mirror_top_bs->opaque = bs_opaque; |
1581 | |
1582 | /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep |
1583 | * it alive until block_job_create() succeeds even if bs has no parent. */ |
1584 | bdrv_ref(mirror_top_bs); |
1585 | bdrv_drained_begin(bs); |
1586 | bdrv_append(mirror_top_bs, bs, &local_err); |
1587 | bdrv_drained_end(bs); |
1588 | |
1589 | if (local_err) { |
1590 | bdrv_unref(mirror_top_bs); |
1591 | error_propagate(errp, local_err); |
1592 | return NULL; |
1593 | } |
1594 | |
1595 | /* Make sure that the source is not resized while the job is running */ |
1596 | s = block_job_create(job_id, driver, NULL, mirror_top_bs, |
1597 | BLK_PERM_CONSISTENT_READ, |
1598 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | |
1599 | BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, |
1600 | creation_flags, cb, opaque, errp); |
1601 | if (!s) { |
1602 | goto fail; |
1603 | } |
1604 | bs_opaque->job = s; |
1605 | |
1606 | /* The block job now has a reference to this node */ |
1607 | bdrv_unref(mirror_top_bs); |
1608 | |
1609 | s->mirror_top_bs = mirror_top_bs; |
1610 | |
1611 | /* No resize for the target either; while the mirror is still running, a |
1612 | * consistent read isn't necessarily possible. We could possibly allow |
1613 | * writes and graph modifications, though it would likely defeat the |
1614 | * purpose of a mirror, so leave them blocked for now. |
1615 | * |
1616 | * In the case of active commit, things look a bit different, though, |
1617 | * because the target is an already populated backing file in active use. |
1618 | * We can allow anything except resize there.*/ |
1619 | target_is_backing = bdrv_chain_contains(bs, target); |
1620 | target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); |
1621 | s->target = blk_new(s->common.job.aio_context, |
1622 | BLK_PERM_WRITE | BLK_PERM_RESIZE | |
1623 | (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), |
1624 | BLK_PERM_WRITE_UNCHANGED | |
1625 | (target_is_backing ? BLK_PERM_CONSISTENT_READ | |
1626 | BLK_PERM_WRITE | |
1627 | BLK_PERM_GRAPH_MOD : 0)); |
1628 | ret = blk_insert_bs(s->target, target, errp); |
1629 | if (ret < 0) { |
1630 | goto fail; |
1631 | } |
1632 | if (is_mirror) { |
1633 | /* XXX: Mirror target could be a NBD server of target QEMU in the case |
1634 | * of non-shared block migration. To allow migration completion, we |
1635 | * have to allow "inactivate" of the target BB. When that happens, we |
1636 | * know the job is drained, and the vcpus are stopped, so no write |
1637 | * operation will be performed. Block layer already has assertions to |
1638 | * ensure that. */ |
1639 | blk_set_force_allow_inactivate(s->target); |
1640 | } |
1641 | blk_set_allow_aio_context_change(s->target, true); |
1642 | blk_set_disable_request_queuing(s->target, true); |
1643 | |
1644 | s->replaces = g_strdup(replaces); |
1645 | s->on_source_error = on_source_error; |
1646 | s->on_target_error = on_target_error; |
1647 | s->is_none_mode = is_none_mode; |
1648 | s->backing_mode = backing_mode; |
1649 | s->zero_target = zero_target; |
1650 | s->copy_mode = copy_mode; |
1651 | s->base = base; |
1652 | s->granularity = granularity; |
1653 | s->buf_size = ROUND_UP(buf_size, granularity); |
1654 | s->unmap = unmap; |
1655 | if (auto_complete) { |
1656 | s->should_complete = true; |
1657 | } |
1658 | |
1659 | /* |
1660 | * Must be called before we start tracking writes, but after |
1661 | * |
1662 | * ((MirrorBlockJob *) |
1663 | * ((MirrorBDSOpaque *) |
1664 | * mirror_top_bs->opaque |
1665 | * )->job |
1666 | * )->copy_mode |
1667 | * |
1668 | * has the correct value. |
1669 | * (We start tracking writes as of the following |
1670 | * bdrv_create_dirty_bitmap() call.) |
1671 | */ |
1672 | bdrv_refresh_limits(mirror_top_bs, &local_err); |
1673 | if (local_err) { |
1674 | error_propagate(errp, local_err); |
1675 | goto fail; |
1676 | } |
1677 | |
1678 | s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); |
1679 | if (!s->dirty_bitmap) { |
1680 | goto fail; |
1681 | } |
1682 | |
1683 | ret = block_job_add_bdrv(&s->common, "source" , bs, 0, |
1684 | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | |
1685 | BLK_PERM_CONSISTENT_READ, |
1686 | errp); |
1687 | if (ret < 0) { |
1688 | goto fail; |
1689 | } |
1690 | |
1691 | /* Required permissions are already taken with blk_new() */ |
1692 | block_job_add_bdrv(&s->common, "target" , target, 0, BLK_PERM_ALL, |
1693 | &error_abort); |
1694 | |
1695 | /* In commit_active_start() all intermediate nodes disappear, so |
1696 | * any jobs in them must be blocked */ |
1697 | if (target_is_backing) { |
1698 | BlockDriverState *iter; |
1699 | for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { |
1700 | /* XXX BLK_PERM_WRITE needs to be allowed so we don't block |
1701 | * ourselves at s->base (if writes are blocked for a node, they are |
1702 | * also blocked for its backing file). The other options would be a |
1703 | * second filter driver above s->base (== target). */ |
1704 | ret = block_job_add_bdrv(&s->common, "intermediate node" , iter, 0, |
1705 | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, |
1706 | errp); |
1707 | if (ret < 0) { |
1708 | goto fail; |
1709 | } |
1710 | } |
1711 | |
1712 | if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { |
1713 | goto fail; |
1714 | } |
1715 | } |
1716 | |
1717 | QTAILQ_INIT(&s->ops_in_flight); |
1718 | |
1719 | trace_mirror_start(bs, s, opaque); |
1720 | job_start(&s->common.job); |
1721 | |
1722 | return &s->common; |
1723 | |
1724 | fail: |
1725 | if (s) { |
1726 | /* Make sure this BDS does not go away until we have completed the graph |
1727 | * changes below */ |
1728 | bdrv_ref(mirror_top_bs); |
1729 | |
1730 | g_free(s->replaces); |
1731 | blk_unref(s->target); |
1732 | bs_opaque->job = NULL; |
1733 | if (s->dirty_bitmap) { |
1734 | bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); |
1735 | } |
1736 | job_early_fail(&s->common.job); |
1737 | } |
1738 | |
1739 | bs_opaque->stop = true; |
1740 | bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, |
1741 | &error_abort); |
1742 | bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); |
1743 | |
1744 | bdrv_unref(mirror_top_bs); |
1745 | |
1746 | return NULL; |
1747 | } |
1748 | |
1749 | void mirror_start(const char *job_id, BlockDriverState *bs, |
1750 | BlockDriverState *target, const char *replaces, |
1751 | int creation_flags, int64_t speed, |
1752 | uint32_t granularity, int64_t buf_size, |
1753 | MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, |
1754 | bool zero_target, |
1755 | BlockdevOnError on_source_error, |
1756 | BlockdevOnError on_target_error, |
1757 | bool unmap, const char *filter_node_name, |
1758 | MirrorCopyMode copy_mode, Error **errp) |
1759 | { |
1760 | bool is_none_mode; |
1761 | BlockDriverState *base; |
1762 | |
1763 | if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || |
1764 | (mode == MIRROR_SYNC_MODE_BITMAP)) { |
1765 | error_setg(errp, "Sync mode '%s' not supported" , |
1766 | MirrorSyncMode_str(mode)); |
1767 | return; |
1768 | } |
1769 | is_none_mode = mode == MIRROR_SYNC_MODE_NONE; |
1770 | base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; |
1771 | mirror_start_job(job_id, bs, creation_flags, target, replaces, |
1772 | speed, granularity, buf_size, backing_mode, zero_target, |
1773 | on_source_error, on_target_error, unmap, NULL, NULL, |
1774 | &mirror_job_driver, is_none_mode, base, false, |
1775 | filter_node_name, true, copy_mode, errp); |
1776 | } |
1777 | |
1778 | BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, |
1779 | BlockDriverState *base, int creation_flags, |
1780 | int64_t speed, BlockdevOnError on_error, |
1781 | const char *filter_node_name, |
1782 | BlockCompletionFunc *cb, void *opaque, |
1783 | bool auto_complete, Error **errp) |
1784 | { |
1785 | bool base_read_only; |
1786 | Error *local_err = NULL; |
1787 | BlockJob *ret; |
1788 | |
1789 | base_read_only = bdrv_is_read_only(base); |
1790 | |
1791 | if (base_read_only) { |
1792 | if (bdrv_reopen_set_read_only(base, false, errp) < 0) { |
1793 | return NULL; |
1794 | } |
1795 | } |
1796 | |
1797 | ret = mirror_start_job( |
1798 | job_id, bs, creation_flags, base, NULL, speed, 0, 0, |
1799 | MIRROR_LEAVE_BACKING_CHAIN, false, |
1800 | on_error, on_error, true, cb, opaque, |
1801 | &commit_active_job_driver, false, base, auto_complete, |
1802 | filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, |
1803 | &local_err); |
1804 | if (local_err) { |
1805 | error_propagate(errp, local_err); |
1806 | goto error_restore_flags; |
1807 | } |
1808 | |
1809 | return ret; |
1810 | |
1811 | error_restore_flags: |
1812 | /* ignore error and errp for bdrv_reopen, because we want to propagate |
1813 | * the original error */ |
1814 | if (base_read_only) { |
1815 | bdrv_reopen_set_read_only(base, true, NULL); |
1816 | } |
1817 | return NULL; |
1818 | } |
1819 | |