1 | /* |
2 | * Block driver for the QCOW version 2 format |
3 | * |
4 | * Copyright (c) 2004-2006 Fabrice Bellard |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | * of this software and associated documentation files (the "Software"), to deal |
8 | * in the Software without restriction, including without limitation the rights |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
10 | * copies of the Software, and to permit persons to whom the Software is |
11 | * furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
22 | * THE SOFTWARE. |
23 | */ |
24 | |
25 | #include "qemu/osdep.h" |
26 | #include <zlib.h> |
27 | |
28 | #include "qapi/error.h" |
29 | #include "qcow2.h" |
30 | #include "qemu/bswap.h" |
31 | #include "trace.h" |
32 | |
33 | int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) |
34 | { |
35 | BDRVQcow2State *s = bs->opaque; |
36 | int new_l1_size, i, ret; |
37 | |
38 | if (exact_size >= s->l1_size) { |
39 | return 0; |
40 | } |
41 | |
42 | new_l1_size = exact_size; |
43 | |
44 | #ifdef DEBUG_ALLOC2 |
45 | fprintf(stderr, "shrink l1_table from %d to %d\n" , s->l1_size, new_l1_size); |
46 | #endif |
47 | |
48 | BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); |
49 | ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + |
50 | new_l1_size * sizeof(uint64_t), |
51 | (s->l1_size - new_l1_size) * sizeof(uint64_t), 0); |
52 | if (ret < 0) { |
53 | goto fail; |
54 | } |
55 | |
56 | ret = bdrv_flush(bs->file->bs); |
57 | if (ret < 0) { |
58 | goto fail; |
59 | } |
60 | |
61 | BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); |
62 | for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { |
63 | if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { |
64 | continue; |
65 | } |
66 | qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, |
67 | s->cluster_size, QCOW2_DISCARD_ALWAYS); |
68 | s->l1_table[i] = 0; |
69 | } |
70 | return 0; |
71 | |
72 | fail: |
73 | /* |
74 | * If the write in the l1_table failed the image may contain a partially |
75 | * overwritten l1_table. In this case it would be better to clear the |
76 | * l1_table in memory to avoid possible image corruption. |
77 | */ |
78 | memset(s->l1_table + new_l1_size, 0, |
79 | (s->l1_size - new_l1_size) * sizeof(uint64_t)); |
80 | return ret; |
81 | } |
82 | |
83 | int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, |
84 | bool exact_size) |
85 | { |
86 | BDRVQcow2State *s = bs->opaque; |
87 | int new_l1_size2, ret, i; |
88 | uint64_t *new_l1_table; |
89 | int64_t old_l1_table_offset, old_l1_size; |
90 | int64_t new_l1_table_offset, new_l1_size; |
91 | uint8_t data[12]; |
92 | |
93 | if (min_size <= s->l1_size) |
94 | return 0; |
95 | |
96 | /* Do a sanity check on min_size before trying to calculate new_l1_size |
97 | * (this prevents overflows during the while loop for the calculation of |
98 | * new_l1_size) */ |
99 | if (min_size > INT_MAX / sizeof(uint64_t)) { |
100 | return -EFBIG; |
101 | } |
102 | |
103 | if (exact_size) { |
104 | new_l1_size = min_size; |
105 | } else { |
106 | /* Bump size up to reduce the number of times we have to grow */ |
107 | new_l1_size = s->l1_size; |
108 | if (new_l1_size == 0) { |
109 | new_l1_size = 1; |
110 | } |
111 | while (min_size > new_l1_size) { |
112 | new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); |
113 | } |
114 | } |
115 | |
116 | QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); |
117 | if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { |
118 | return -EFBIG; |
119 | } |
120 | |
121 | #ifdef DEBUG_ALLOC2 |
122 | fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n" , |
123 | s->l1_size, new_l1_size); |
124 | #endif |
125 | |
126 | new_l1_size2 = sizeof(uint64_t) * new_l1_size; |
127 | new_l1_table = qemu_try_blockalign(bs->file->bs, |
128 | ROUND_UP(new_l1_size2, 512)); |
129 | if (new_l1_table == NULL) { |
130 | return -ENOMEM; |
131 | } |
132 | memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512)); |
133 | |
134 | if (s->l1_size) { |
135 | memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); |
136 | } |
137 | |
138 | /* write new table (align to cluster) */ |
139 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
140 | new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
141 | if (new_l1_table_offset < 0) { |
142 | qemu_vfree(new_l1_table); |
143 | return new_l1_table_offset; |
144 | } |
145 | |
146 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
147 | if (ret < 0) { |
148 | goto fail; |
149 | } |
150 | |
151 | /* the L1 position has not yet been updated, so these clusters must |
152 | * indeed be completely free */ |
153 | ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, |
154 | new_l1_size2, false); |
155 | if (ret < 0) { |
156 | goto fail; |
157 | } |
158 | |
159 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
160 | for(i = 0; i < s->l1_size; i++) |
161 | new_l1_table[i] = cpu_to_be64(new_l1_table[i]); |
162 | ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, |
163 | new_l1_table, new_l1_size2); |
164 | if (ret < 0) |
165 | goto fail; |
166 | for(i = 0; i < s->l1_size; i++) |
167 | new_l1_table[i] = be64_to_cpu(new_l1_table[i]); |
168 | |
169 | /* set new table */ |
170 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
171 | stl_be_p(data, new_l1_size); |
172 | stq_be_p(data + 4, new_l1_table_offset); |
173 | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), |
174 | data, sizeof(data)); |
175 | if (ret < 0) { |
176 | goto fail; |
177 | } |
178 | qemu_vfree(s->l1_table); |
179 | old_l1_table_offset = s->l1_table_offset; |
180 | s->l1_table_offset = new_l1_table_offset; |
181 | s->l1_table = new_l1_table; |
182 | old_l1_size = s->l1_size; |
183 | s->l1_size = new_l1_size; |
184 | qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), |
185 | QCOW2_DISCARD_OTHER); |
186 | return 0; |
187 | fail: |
188 | qemu_vfree(new_l1_table); |
189 | qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, |
190 | QCOW2_DISCARD_OTHER); |
191 | return ret; |
192 | } |
193 | |
194 | /* |
195 | * l2_load |
196 | * |
197 | * @bs: The BlockDriverState |
198 | * @offset: A guest offset, used to calculate what slice of the L2 |
199 | * table to load. |
200 | * @l2_offset: Offset to the L2 table in the image file. |
201 | * @l2_slice: Location to store the pointer to the L2 slice. |
202 | * |
203 | * Loads a L2 slice into memory (L2 slices are the parts of L2 tables |
204 | * that are loaded by the qcow2 cache). If the slice is in the cache, |
205 | * the cache is used; otherwise the L2 slice is loaded from the image |
206 | * file. |
207 | */ |
208 | static int l2_load(BlockDriverState *bs, uint64_t offset, |
209 | uint64_t l2_offset, uint64_t **l2_slice) |
210 | { |
211 | BDRVQcow2State *s = bs->opaque; |
212 | int start_of_slice = sizeof(uint64_t) * |
213 | (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); |
214 | |
215 | return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, |
216 | (void **)l2_slice); |
217 | } |
218 | |
219 | /* |
220 | * Writes one sector of the L1 table to the disk (can't update single entries |
221 | * and we really don't want bdrv_pread to perform a read-modify-write) |
222 | */ |
223 | #define L1_ENTRIES_PER_SECTOR (512 / 8) |
224 | int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) |
225 | { |
226 | BDRVQcow2State *s = bs->opaque; |
227 | uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; |
228 | int l1_start_index; |
229 | int i, ret; |
230 | |
231 | l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); |
232 | for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; |
233 | i++) |
234 | { |
235 | buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); |
236 | } |
237 | |
238 | ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, |
239 | s->l1_table_offset + 8 * l1_start_index, sizeof(buf), false); |
240 | if (ret < 0) { |
241 | return ret; |
242 | } |
243 | |
244 | BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
245 | ret = bdrv_pwrite_sync(bs->file, |
246 | s->l1_table_offset + 8 * l1_start_index, |
247 | buf, sizeof(buf)); |
248 | if (ret < 0) { |
249 | return ret; |
250 | } |
251 | |
252 | return 0; |
253 | } |
254 | |
255 | /* |
256 | * l2_allocate |
257 | * |
258 | * Allocate a new l2 entry in the file. If l1_index points to an already |
259 | * used entry in the L2 table (i.e. we are doing a copy on write for the L2 |
260 | * table) copy the contents of the old L2 table into the newly allocated one. |
261 | * Otherwise the new table is initialized with zeros. |
262 | * |
263 | */ |
264 | |
265 | static int l2_allocate(BlockDriverState *bs, int l1_index) |
266 | { |
267 | BDRVQcow2State *s = bs->opaque; |
268 | uint64_t old_l2_offset; |
269 | uint64_t *l2_slice = NULL; |
270 | unsigned slice, slice_size2, n_slices; |
271 | int64_t l2_offset; |
272 | int ret; |
273 | |
274 | old_l2_offset = s->l1_table[l1_index]; |
275 | |
276 | trace_qcow2_l2_allocate(bs, l1_index); |
277 | |
278 | /* allocate a new l2 entry */ |
279 | |
280 | l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); |
281 | if (l2_offset < 0) { |
282 | ret = l2_offset; |
283 | goto fail; |
284 | } |
285 | |
286 | /* The offset must fit in the offset field of the L1 table entry */ |
287 | assert((l2_offset & L1E_OFFSET_MASK) == l2_offset); |
288 | |
289 | /* If we're allocating the table at offset 0 then something is wrong */ |
290 | if (l2_offset == 0) { |
291 | qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " |
292 | "allocation of L2 table at offset 0" ); |
293 | ret = -EIO; |
294 | goto fail; |
295 | } |
296 | |
297 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
298 | if (ret < 0) { |
299 | goto fail; |
300 | } |
301 | |
302 | /* allocate a new entry in the l2 cache */ |
303 | |
304 | slice_size2 = s->l2_slice_size * sizeof(uint64_t); |
305 | n_slices = s->cluster_size / slice_size2; |
306 | |
307 | trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
308 | for (slice = 0; slice < n_slices; slice++) { |
309 | ret = qcow2_cache_get_empty(bs, s->l2_table_cache, |
310 | l2_offset + slice * slice_size2, |
311 | (void **) &l2_slice); |
312 | if (ret < 0) { |
313 | goto fail; |
314 | } |
315 | |
316 | if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { |
317 | /* if there was no old l2 table, clear the new slice */ |
318 | memset(l2_slice, 0, slice_size2); |
319 | } else { |
320 | uint64_t *old_slice; |
321 | uint64_t old_l2_slice_offset = |
322 | (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; |
323 | |
324 | /* if there was an old l2 table, read a slice from the disk */ |
325 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
326 | ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, |
327 | (void **) &old_slice); |
328 | if (ret < 0) { |
329 | goto fail; |
330 | } |
331 | |
332 | memcpy(l2_slice, old_slice, slice_size2); |
333 | |
334 | qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); |
335 | } |
336 | |
337 | /* write the l2 slice to the file */ |
338 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
339 | |
340 | trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
341 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
342 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
343 | } |
344 | |
345 | ret = qcow2_cache_flush(bs, s->l2_table_cache); |
346 | if (ret < 0) { |
347 | goto fail; |
348 | } |
349 | |
350 | /* update the L1 entry */ |
351 | trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
352 | s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
353 | ret = qcow2_write_l1_entry(bs, l1_index); |
354 | if (ret < 0) { |
355 | goto fail; |
356 | } |
357 | |
358 | trace_qcow2_l2_allocate_done(bs, l1_index, 0); |
359 | return 0; |
360 | |
361 | fail: |
362 | trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
363 | if (l2_slice != NULL) { |
364 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
365 | } |
366 | s->l1_table[l1_index] = old_l2_offset; |
367 | if (l2_offset > 0) { |
368 | qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), |
369 | QCOW2_DISCARD_ALWAYS); |
370 | } |
371 | return ret; |
372 | } |
373 | |
374 | /* |
375 | * Checks how many clusters in a given L2 slice are contiguous in the image |
376 | * file. As soon as one of the flags in the bitmask stop_flags changes compared |
377 | * to the first cluster, the search is stopped and the cluster is not counted |
378 | * as contiguous. (This allows it, for example, to stop at the first compressed |
379 | * cluster which may require a different handling) |
380 | */ |
381 | static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters, |
382 | int cluster_size, uint64_t *l2_slice, uint64_t stop_flags) |
383 | { |
384 | int i; |
385 | QCow2ClusterType first_cluster_type; |
386 | uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; |
387 | uint64_t first_entry = be64_to_cpu(l2_slice[0]); |
388 | uint64_t offset = first_entry & mask; |
389 | |
390 | first_cluster_type = qcow2_get_cluster_type(bs, first_entry); |
391 | if (first_cluster_type == QCOW2_CLUSTER_UNALLOCATED) { |
392 | return 0; |
393 | } |
394 | |
395 | /* must be allocated */ |
396 | assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || |
397 | first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC); |
398 | |
399 | for (i = 0; i < nb_clusters; i++) { |
400 | uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask; |
401 | if (offset + (uint64_t) i * cluster_size != l2_entry) { |
402 | break; |
403 | } |
404 | } |
405 | |
406 | return i; |
407 | } |
408 | |
409 | /* |
410 | * Checks how many consecutive unallocated clusters in a given L2 |
411 | * slice have the same cluster type. |
412 | */ |
413 | static int count_contiguous_clusters_unallocated(BlockDriverState *bs, |
414 | int nb_clusters, |
415 | uint64_t *l2_slice, |
416 | QCow2ClusterType wanted_type) |
417 | { |
418 | int i; |
419 | |
420 | assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN || |
421 | wanted_type == QCOW2_CLUSTER_UNALLOCATED); |
422 | for (i = 0; i < nb_clusters; i++) { |
423 | uint64_t entry = be64_to_cpu(l2_slice[i]); |
424 | QCow2ClusterType type = qcow2_get_cluster_type(bs, entry); |
425 | |
426 | if (type != wanted_type) { |
427 | break; |
428 | } |
429 | } |
430 | |
431 | return i; |
432 | } |
433 | |
434 | static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, |
435 | uint64_t src_cluster_offset, |
436 | unsigned offset_in_cluster, |
437 | QEMUIOVector *qiov) |
438 | { |
439 | int ret; |
440 | |
441 | if (qiov->size == 0) { |
442 | return 0; |
443 | } |
444 | |
445 | BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
446 | |
447 | if (!bs->drv) { |
448 | return -ENOMEDIUM; |
449 | } |
450 | |
451 | /* Call .bdrv_co_readv() directly instead of using the public block-layer |
452 | * interface. This avoids double I/O throttling and request tracking, |
453 | * which can lead to deadlock when block layer copy-on-read is enabled. |
454 | */ |
455 | ret = bs->drv->bdrv_co_preadv_part(bs, |
456 | src_cluster_offset + offset_in_cluster, |
457 | qiov->size, qiov, 0, 0); |
458 | if (ret < 0) { |
459 | return ret; |
460 | } |
461 | |
462 | return 0; |
463 | } |
464 | |
465 | static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, |
466 | uint64_t src_cluster_offset, |
467 | uint64_t cluster_offset, |
468 | unsigned offset_in_cluster, |
469 | uint8_t *buffer, |
470 | unsigned bytes) |
471 | { |
472 | if (bytes && bs->encrypted) { |
473 | BDRVQcow2State *s = bs->opaque; |
474 | assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); |
475 | assert((bytes & ~BDRV_SECTOR_MASK) == 0); |
476 | assert(s->crypto); |
477 | if (qcow2_co_encrypt(bs, cluster_offset, |
478 | src_cluster_offset + offset_in_cluster, |
479 | buffer, bytes) < 0) { |
480 | return false; |
481 | } |
482 | } |
483 | return true; |
484 | } |
485 | |
486 | static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, |
487 | uint64_t cluster_offset, |
488 | unsigned offset_in_cluster, |
489 | QEMUIOVector *qiov) |
490 | { |
491 | BDRVQcow2State *s = bs->opaque; |
492 | int ret; |
493 | |
494 | if (qiov->size == 0) { |
495 | return 0; |
496 | } |
497 | |
498 | ret = qcow2_pre_write_overlap_check(bs, 0, |
499 | cluster_offset + offset_in_cluster, qiov->size, true); |
500 | if (ret < 0) { |
501 | return ret; |
502 | } |
503 | |
504 | BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
505 | ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster, |
506 | qiov->size, qiov, 0); |
507 | if (ret < 0) { |
508 | return ret; |
509 | } |
510 | |
511 | return 0; |
512 | } |
513 | |
514 | |
515 | /* |
516 | * get_cluster_offset |
517 | * |
518 | * For a given offset of the virtual disk, find the cluster type and offset in |
519 | * the qcow2 file. The offset is stored in *cluster_offset. |
520 | * |
521 | * On entry, *bytes is the maximum number of contiguous bytes starting at |
522 | * offset that we are interested in. |
523 | * |
524 | * On exit, *bytes is the number of bytes starting at offset that have the same |
525 | * cluster type and (if applicable) are stored contiguously in the image file. |
526 | * Compressed clusters are always returned one by one. |
527 | * |
528 | * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error |
529 | * cases. |
530 | */ |
531 | int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, |
532 | unsigned int *bytes, uint64_t *cluster_offset) |
533 | { |
534 | BDRVQcow2State *s = bs->opaque; |
535 | unsigned int l2_index; |
536 | uint64_t l1_index, l2_offset, *l2_slice; |
537 | int c; |
538 | unsigned int offset_in_cluster; |
539 | uint64_t bytes_available, bytes_needed, nb_clusters; |
540 | QCow2ClusterType type; |
541 | int ret; |
542 | |
543 | offset_in_cluster = offset_into_cluster(s, offset); |
544 | bytes_needed = (uint64_t) *bytes + offset_in_cluster; |
545 | |
546 | /* compute how many bytes there are between the start of the cluster |
547 | * containing offset and the end of the l2 slice that contains |
548 | * the entry pointing to it */ |
549 | bytes_available = |
550 | ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) |
551 | << s->cluster_bits; |
552 | |
553 | if (bytes_needed > bytes_available) { |
554 | bytes_needed = bytes_available; |
555 | } |
556 | |
557 | *cluster_offset = 0; |
558 | |
559 | /* seek to the l2 offset in the l1 table */ |
560 | |
561 | l1_index = offset_to_l1_index(s, offset); |
562 | if (l1_index >= s->l1_size) { |
563 | type = QCOW2_CLUSTER_UNALLOCATED; |
564 | goto out; |
565 | } |
566 | |
567 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
568 | if (!l2_offset) { |
569 | type = QCOW2_CLUSTER_UNALLOCATED; |
570 | goto out; |
571 | } |
572 | |
573 | if (offset_into_cluster(s, l2_offset)) { |
574 | qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 |
575 | " unaligned (L1 index: %#" PRIx64 ")" , |
576 | l2_offset, l1_index); |
577 | return -EIO; |
578 | } |
579 | |
580 | /* load the l2 slice in memory */ |
581 | |
582 | ret = l2_load(bs, offset, l2_offset, &l2_slice); |
583 | if (ret < 0) { |
584 | return ret; |
585 | } |
586 | |
587 | /* find the cluster offset for the given disk offset */ |
588 | |
589 | l2_index = offset_to_l2_slice_index(s, offset); |
590 | *cluster_offset = be64_to_cpu(l2_slice[l2_index]); |
591 | |
592 | nb_clusters = size_to_clusters(s, bytes_needed); |
593 | /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned |
594 | * integers; the minimum cluster size is 512, so this assertion is always |
595 | * true */ |
596 | assert(nb_clusters <= INT_MAX); |
597 | |
598 | type = qcow2_get_cluster_type(bs, *cluster_offset); |
599 | if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN || |
600 | type == QCOW2_CLUSTER_ZERO_ALLOC)) { |
601 | qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" |
602 | " in pre-v3 image (L2 offset: %#" PRIx64 |
603 | ", L2 index: %#x)" , l2_offset, l2_index); |
604 | ret = -EIO; |
605 | goto fail; |
606 | } |
607 | switch (type) { |
608 | case QCOW2_CLUSTER_COMPRESSED: |
609 | if (has_data_file(bs)) { |
610 | qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster " |
611 | "entry found in image with external data " |
612 | "file (L2 offset: %#" PRIx64 ", L2 index: " |
613 | "%#x)" , l2_offset, l2_index); |
614 | ret = -EIO; |
615 | goto fail; |
616 | } |
617 | /* Compressed clusters can only be processed one by one */ |
618 | c = 1; |
619 | *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; |
620 | break; |
621 | case QCOW2_CLUSTER_ZERO_PLAIN: |
622 | case QCOW2_CLUSTER_UNALLOCATED: |
623 | /* how many empty clusters ? */ |
624 | c = count_contiguous_clusters_unallocated(bs, nb_clusters, |
625 | &l2_slice[l2_index], type); |
626 | *cluster_offset = 0; |
627 | break; |
628 | case QCOW2_CLUSTER_ZERO_ALLOC: |
629 | case QCOW2_CLUSTER_NORMAL: |
630 | /* how many allocated clusters ? */ |
631 | c = count_contiguous_clusters(bs, nb_clusters, s->cluster_size, |
632 | &l2_slice[l2_index], QCOW_OFLAG_ZERO); |
633 | *cluster_offset &= L2E_OFFSET_MASK; |
634 | if (offset_into_cluster(s, *cluster_offset)) { |
635 | qcow2_signal_corruption(bs, true, -1, -1, |
636 | "Cluster allocation offset %#" |
637 | PRIx64 " unaligned (L2 offset: %#" PRIx64 |
638 | ", L2 index: %#x)" , *cluster_offset, |
639 | l2_offset, l2_index); |
640 | ret = -EIO; |
641 | goto fail; |
642 | } |
643 | if (has_data_file(bs) && *cluster_offset != offset - offset_in_cluster) |
644 | { |
645 | qcow2_signal_corruption(bs, true, -1, -1, |
646 | "External data file host cluster offset %#" |
647 | PRIx64 " does not match guest cluster " |
648 | "offset: %#" PRIx64 |
649 | ", L2 index: %#x)" , *cluster_offset, |
650 | offset - offset_in_cluster, l2_index); |
651 | ret = -EIO; |
652 | goto fail; |
653 | } |
654 | break; |
655 | default: |
656 | abort(); |
657 | } |
658 | |
659 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
660 | |
661 | bytes_available = (int64_t)c * s->cluster_size; |
662 | |
663 | out: |
664 | if (bytes_available > bytes_needed) { |
665 | bytes_available = bytes_needed; |
666 | } |
667 | |
668 | /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; |
669 | * subtracting offset_in_cluster will therefore definitely yield something |
670 | * not exceeding UINT_MAX */ |
671 | assert(bytes_available - offset_in_cluster <= UINT_MAX); |
672 | *bytes = bytes_available - offset_in_cluster; |
673 | |
674 | return type; |
675 | |
676 | fail: |
677 | qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); |
678 | return ret; |
679 | } |
680 | |
681 | /* |
682 | * get_cluster_table |
683 | * |
684 | * for a given disk offset, load (and allocate if needed) |
685 | * the appropriate slice of its l2 table. |
686 | * |
687 | * the cluster index in the l2 slice is given to the caller. |
688 | * |
689 | * Returns 0 on success, -errno in failure case |
690 | */ |
691 | static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
692 | uint64_t **new_l2_slice, |
693 | int *new_l2_index) |
694 | { |
695 | BDRVQcow2State *s = bs->opaque; |
696 | unsigned int l2_index; |
697 | uint64_t l1_index, l2_offset; |
698 | uint64_t *l2_slice = NULL; |
699 | int ret; |
700 | |
701 | /* seek to the l2 offset in the l1 table */ |
702 | |
703 | l1_index = offset_to_l1_index(s, offset); |
704 | if (l1_index >= s->l1_size) { |
705 | ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
706 | if (ret < 0) { |
707 | return ret; |
708 | } |
709 | } |
710 | |
711 | assert(l1_index < s->l1_size); |
712 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
713 | if (offset_into_cluster(s, l2_offset)) { |
714 | qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 |
715 | " unaligned (L1 index: %#" PRIx64 ")" , |
716 | l2_offset, l1_index); |
717 | return -EIO; |
718 | } |
719 | |
720 | if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { |
721 | /* First allocate a new L2 table (and do COW if needed) */ |
722 | ret = l2_allocate(bs, l1_index); |
723 | if (ret < 0) { |
724 | return ret; |
725 | } |
726 | |
727 | /* Then decrease the refcount of the old table */ |
728 | if (l2_offset) { |
729 | qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), |
730 | QCOW2_DISCARD_OTHER); |
731 | } |
732 | |
733 | /* Get the offset of the newly-allocated l2 table */ |
734 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
735 | assert(offset_into_cluster(s, l2_offset) == 0); |
736 | } |
737 | |
738 | /* load the l2 slice in memory */ |
739 | ret = l2_load(bs, offset, l2_offset, &l2_slice); |
740 | if (ret < 0) { |
741 | return ret; |
742 | } |
743 | |
744 | /* find the cluster offset for the given disk offset */ |
745 | |
746 | l2_index = offset_to_l2_slice_index(s, offset); |
747 | |
748 | *new_l2_slice = l2_slice; |
749 | *new_l2_index = l2_index; |
750 | |
751 | return 0; |
752 | } |
753 | |
754 | /* |
755 | * alloc_compressed_cluster_offset |
756 | * |
757 | * For a given offset on the virtual disk, allocate a new compressed cluster |
758 | * and put the host offset of the cluster into *host_offset. If a cluster is |
759 | * already allocated at the offset, return an error. |
760 | * |
761 | * Return 0 on success and -errno in error cases |
762 | */ |
763 | int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
764 | uint64_t offset, |
765 | int compressed_size, |
766 | uint64_t *host_offset) |
767 | { |
768 | BDRVQcow2State *s = bs->opaque; |
769 | int l2_index, ret; |
770 | uint64_t *l2_slice; |
771 | int64_t cluster_offset; |
772 | int nb_csectors; |
773 | |
774 | if (has_data_file(bs)) { |
775 | return 0; |
776 | } |
777 | |
778 | ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); |
779 | if (ret < 0) { |
780 | return ret; |
781 | } |
782 | |
783 | /* Compression can't overwrite anything. Fail if the cluster was already |
784 | * allocated. */ |
785 | cluster_offset = be64_to_cpu(l2_slice[l2_index]); |
786 | if (cluster_offset & L2E_OFFSET_MASK) { |
787 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
788 | return -EIO; |
789 | } |
790 | |
791 | cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
792 | if (cluster_offset < 0) { |
793 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
794 | return cluster_offset; |
795 | } |
796 | |
797 | nb_csectors = |
798 | (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE - |
799 | (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE); |
800 | |
801 | cluster_offset |= QCOW_OFLAG_COMPRESSED | |
802 | ((uint64_t)nb_csectors << s->csize_shift); |
803 | |
804 | /* update L2 table */ |
805 | |
806 | /* compressed clusters never have the copied flag */ |
807 | |
808 | BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
809 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
810 | l2_slice[l2_index] = cpu_to_be64(cluster_offset); |
811 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
812 | |
813 | *host_offset = cluster_offset & s->cluster_offset_mask; |
814 | return 0; |
815 | } |
816 | |
817 | static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) |
818 | { |
819 | BDRVQcow2State *s = bs->opaque; |
820 | Qcow2COWRegion *start = &m->cow_start; |
821 | Qcow2COWRegion *end = &m->cow_end; |
822 | unsigned buffer_size; |
823 | unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); |
824 | bool merge_reads; |
825 | uint8_t *start_buffer, *end_buffer; |
826 | QEMUIOVector qiov; |
827 | int ret; |
828 | |
829 | assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); |
830 | assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); |
831 | assert(start->offset + start->nb_bytes <= end->offset); |
832 | |
833 | if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) { |
834 | return 0; |
835 | } |
836 | |
837 | /* If we have to read both the start and end COW regions and the |
838 | * middle region is not too large then perform just one read |
839 | * operation */ |
840 | merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; |
841 | if (merge_reads) { |
842 | buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; |
843 | } else { |
844 | /* If we have to do two reads, add some padding in the middle |
845 | * if necessary to make sure that the end region is optimally |
846 | * aligned. */ |
847 | size_t align = bdrv_opt_mem_align(bs); |
848 | assert(align > 0 && align <= UINT_MAX); |
849 | assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= |
850 | UINT_MAX - end->nb_bytes); |
851 | buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; |
852 | } |
853 | |
854 | /* Reserve a buffer large enough to store all the data that we're |
855 | * going to read */ |
856 | start_buffer = qemu_try_blockalign(bs, buffer_size); |
857 | if (start_buffer == NULL) { |
858 | return -ENOMEM; |
859 | } |
860 | /* The part of the buffer where the end region is located */ |
861 | end_buffer = start_buffer + buffer_size - end->nb_bytes; |
862 | |
863 | qemu_iovec_init(&qiov, 2 + (m->data_qiov ? |
864 | qemu_iovec_subvec_niov(m->data_qiov, |
865 | m->data_qiov_offset, |
866 | data_bytes) |
867 | : 0)); |
868 | |
869 | qemu_co_mutex_unlock(&s->lock); |
870 | /* First we read the existing data from both COW regions. We |
871 | * either read the whole region in one go, or the start and end |
872 | * regions separately. */ |
873 | if (merge_reads) { |
874 | qemu_iovec_add(&qiov, start_buffer, buffer_size); |
875 | ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); |
876 | } else { |
877 | qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); |
878 | ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); |
879 | if (ret < 0) { |
880 | goto fail; |
881 | } |
882 | |
883 | qemu_iovec_reset(&qiov); |
884 | qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); |
885 | ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); |
886 | } |
887 | if (ret < 0) { |
888 | goto fail; |
889 | } |
890 | |
891 | /* Encrypt the data if necessary before writing it */ |
892 | if (bs->encrypted) { |
893 | if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, |
894 | start->offset, start_buffer, |
895 | start->nb_bytes) || |
896 | !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, |
897 | end->offset, end_buffer, end->nb_bytes)) { |
898 | ret = -EIO; |
899 | goto fail; |
900 | } |
901 | } |
902 | |
903 | /* And now we can write everything. If we have the guest data we |
904 | * can write everything in one single operation */ |
905 | if (m->data_qiov) { |
906 | qemu_iovec_reset(&qiov); |
907 | if (start->nb_bytes) { |
908 | qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); |
909 | } |
910 | qemu_iovec_concat(&qiov, m->data_qiov, m->data_qiov_offset, data_bytes); |
911 | if (end->nb_bytes) { |
912 | qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); |
913 | } |
914 | /* NOTE: we have a write_aio blkdebug event here followed by |
915 | * a cow_write one in do_perform_cow_write(), but there's only |
916 | * one single I/O operation */ |
917 | BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); |
918 | ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); |
919 | } else { |
920 | /* If there's no guest data then write both COW regions separately */ |
921 | qemu_iovec_reset(&qiov); |
922 | qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); |
923 | ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); |
924 | if (ret < 0) { |
925 | goto fail; |
926 | } |
927 | |
928 | qemu_iovec_reset(&qiov); |
929 | qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); |
930 | ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); |
931 | } |
932 | |
933 | fail: |
934 | qemu_co_mutex_lock(&s->lock); |
935 | |
936 | /* |
937 | * Before we update the L2 table to actually point to the new cluster, we |
938 | * need to be sure that the refcounts have been increased and COW was |
939 | * handled. |
940 | */ |
941 | if (ret == 0) { |
942 | qcow2_cache_depends_on_flush(s->l2_table_cache); |
943 | } |
944 | |
945 | qemu_vfree(start_buffer); |
946 | qemu_iovec_destroy(&qiov); |
947 | return ret; |
948 | } |
949 | |
950 | int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
951 | { |
952 | BDRVQcow2State *s = bs->opaque; |
953 | int i, j = 0, l2_index, ret; |
954 | uint64_t *old_cluster, *l2_slice; |
955 | uint64_t cluster_offset = m->alloc_offset; |
956 | |
957 | trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
958 | assert(m->nb_clusters > 0); |
959 | |
960 | old_cluster = g_try_new(uint64_t, m->nb_clusters); |
961 | if (old_cluster == NULL) { |
962 | ret = -ENOMEM; |
963 | goto err; |
964 | } |
965 | |
966 | /* copy content of unmodified sectors */ |
967 | ret = perform_cow(bs, m); |
968 | if (ret < 0) { |
969 | goto err; |
970 | } |
971 | |
972 | /* Update L2 table. */ |
973 | if (s->use_lazy_refcounts) { |
974 | qcow2_mark_dirty(bs); |
975 | } |
976 | if (qcow2_need_accurate_refcounts(s)) { |
977 | qcow2_cache_set_dependency(bs, s->l2_table_cache, |
978 | s->refcount_block_cache); |
979 | } |
980 | |
981 | ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); |
982 | if (ret < 0) { |
983 | goto err; |
984 | } |
985 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
986 | |
987 | assert(l2_index + m->nb_clusters <= s->l2_slice_size); |
988 | for (i = 0; i < m->nb_clusters; i++) { |
989 | /* if two concurrent writes happen to the same unallocated cluster |
990 | * each write allocates separate cluster and writes data concurrently. |
991 | * The first one to complete updates l2 table with pointer to its |
992 | * cluster the second one has to do RMW (which is done above by |
993 | * perform_cow()), update l2 table with its cluster pointer and free |
994 | * old cluster. This is what this loop does */ |
995 | if (l2_slice[l2_index + i] != 0) { |
996 | old_cluster[j++] = l2_slice[l2_index + i]; |
997 | } |
998 | |
999 | l2_slice[l2_index + i] = cpu_to_be64((cluster_offset + |
1000 | (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); |
1001 | } |
1002 | |
1003 | |
1004 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
1005 | |
1006 | /* |
1007 | * If this was a COW, we need to decrease the refcount of the old cluster. |
1008 | * |
1009 | * Don't discard clusters that reach a refcount of 0 (e.g. compressed |
1010 | * clusters), the next write will reuse them anyway. |
1011 | */ |
1012 | if (!m->keep_old_clusters && j != 0) { |
1013 | for (i = 0; i < j; i++) { |
1014 | qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, |
1015 | QCOW2_DISCARD_NEVER); |
1016 | } |
1017 | } |
1018 | |
1019 | ret = 0; |
1020 | err: |
1021 | g_free(old_cluster); |
1022 | return ret; |
1023 | } |
1024 | |
1025 | /** |
1026 | * Frees the allocated clusters because the request failed and they won't |
1027 | * actually be linked. |
1028 | */ |
1029 | void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) |
1030 | { |
1031 | BDRVQcow2State *s = bs->opaque; |
1032 | qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits, |
1033 | QCOW2_DISCARD_NEVER); |
1034 | } |
1035 | |
1036 | /* |
1037 | * Returns the number of contiguous clusters that can be used for an allocating |
1038 | * write, but require COW to be performed (this includes yet unallocated space, |
1039 | * which must copy from the backing file) |
1040 | */ |
1041 | static int count_cow_clusters(BlockDriverState *bs, int nb_clusters, |
1042 | uint64_t *l2_slice, int l2_index) |
1043 | { |
1044 | int i; |
1045 | |
1046 | for (i = 0; i < nb_clusters; i++) { |
1047 | uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]); |
1048 | QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry); |
1049 | |
1050 | switch(cluster_type) { |
1051 | case QCOW2_CLUSTER_NORMAL: |
1052 | if (l2_entry & QCOW_OFLAG_COPIED) { |
1053 | goto out; |
1054 | } |
1055 | break; |
1056 | case QCOW2_CLUSTER_UNALLOCATED: |
1057 | case QCOW2_CLUSTER_COMPRESSED: |
1058 | case QCOW2_CLUSTER_ZERO_PLAIN: |
1059 | case QCOW2_CLUSTER_ZERO_ALLOC: |
1060 | break; |
1061 | default: |
1062 | abort(); |
1063 | } |
1064 | } |
1065 | |
1066 | out: |
1067 | assert(i <= nb_clusters); |
1068 | return i; |
1069 | } |
1070 | |
1071 | /* |
1072 | * Check if there already is an AIO write request in flight which allocates |
1073 | * the same cluster. In this case we need to wait until the previous |
1074 | * request has completed and updated the L2 table accordingly. |
1075 | * |
1076 | * Returns: |
1077 | * 0 if there was no dependency. *cur_bytes indicates the number of |
1078 | * bytes from guest_offset that can be read before the next |
1079 | * dependency must be processed (or the request is complete) |
1080 | * |
1081 | * -EAGAIN if we had to wait for another request, previously gathered |
1082 | * information on cluster allocation may be invalid now. The caller |
1083 | * must start over anyway, so consider *cur_bytes undefined. |
1084 | */ |
1085 | static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, |
1086 | uint64_t *cur_bytes, QCowL2Meta **m) |
1087 | { |
1088 | BDRVQcow2State *s = bs->opaque; |
1089 | QCowL2Meta *old_alloc; |
1090 | uint64_t bytes = *cur_bytes; |
1091 | |
1092 | QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
1093 | |
1094 | uint64_t start = guest_offset; |
1095 | uint64_t end = start + bytes; |
1096 | uint64_t old_start = l2meta_cow_start(old_alloc); |
1097 | uint64_t old_end = l2meta_cow_end(old_alloc); |
1098 | |
1099 | if (end <= old_start || start >= old_end) { |
1100 | /* No intersection */ |
1101 | } else { |
1102 | if (start < old_start) { |
1103 | /* Stop at the start of a running allocation */ |
1104 | bytes = old_start - start; |
1105 | } else { |
1106 | bytes = 0; |
1107 | } |
1108 | |
1109 | /* Stop if already an l2meta exists. After yielding, it wouldn't |
1110 | * be valid any more, so we'd have to clean up the old L2Metas |
1111 | * and deal with requests depending on them before starting to |
1112 | * gather new ones. Not worth the trouble. */ |
1113 | if (bytes == 0 && *m) { |
1114 | *cur_bytes = 0; |
1115 | return 0; |
1116 | } |
1117 | |
1118 | if (bytes == 0) { |
1119 | /* Wait for the dependency to complete. We need to recheck |
1120 | * the free/allocated clusters when we continue. */ |
1121 | qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); |
1122 | return -EAGAIN; |
1123 | } |
1124 | } |
1125 | } |
1126 | |
1127 | /* Make sure that existing clusters and new allocations are only used up to |
1128 | * the next dependency if we shortened the request above */ |
1129 | *cur_bytes = bytes; |
1130 | |
1131 | return 0; |
1132 | } |
1133 | |
1134 | /* |
1135 | * Checks how many already allocated clusters that don't require a copy on |
1136 | * write there are at the given guest_offset (up to *bytes). If *host_offset is |
1137 | * not INV_OFFSET, only physically contiguous clusters beginning at this host |
1138 | * offset are counted. |
1139 | * |
1140 | * Note that guest_offset may not be cluster aligned. In this case, the |
1141 | * returned *host_offset points to exact byte referenced by guest_offset and |
1142 | * therefore isn't cluster aligned as well. |
1143 | * |
1144 | * Returns: |
1145 | * 0: if no allocated clusters are available at the given offset. |
1146 | * *bytes is normally unchanged. It is set to 0 if the cluster |
1147 | * is allocated and doesn't need COW, but doesn't have the right |
1148 | * physical offset. |
1149 | * |
1150 | * 1: if allocated clusters that don't require a COW are available at |
1151 | * the requested offset. *bytes may have decreased and describes |
1152 | * the length of the area that can be written to. |
1153 | * |
1154 | * -errno: in error cases |
1155 | */ |
1156 | static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, |
1157 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
1158 | { |
1159 | BDRVQcow2State *s = bs->opaque; |
1160 | int l2_index; |
1161 | uint64_t cluster_offset; |
1162 | uint64_t *l2_slice; |
1163 | uint64_t nb_clusters; |
1164 | unsigned int keep_clusters; |
1165 | int ret; |
1166 | |
1167 | trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, |
1168 | *bytes); |
1169 | |
1170 | assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset) |
1171 | == offset_into_cluster(s, *host_offset)); |
1172 | |
1173 | /* |
1174 | * Calculate the number of clusters to look for. We stop at L2 slice |
1175 | * boundaries to keep things simple. |
1176 | */ |
1177 | nb_clusters = |
1178 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); |
1179 | |
1180 | l2_index = offset_to_l2_slice_index(s, guest_offset); |
1181 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); |
1182 | assert(nb_clusters <= INT_MAX); |
1183 | |
1184 | /* Find L2 entry for the first involved cluster */ |
1185 | ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); |
1186 | if (ret < 0) { |
1187 | return ret; |
1188 | } |
1189 | |
1190 | cluster_offset = be64_to_cpu(l2_slice[l2_index]); |
1191 | |
1192 | /* Check how many clusters are already allocated and don't need COW */ |
1193 | if (qcow2_get_cluster_type(bs, cluster_offset) == QCOW2_CLUSTER_NORMAL |
1194 | && (cluster_offset & QCOW_OFLAG_COPIED)) |
1195 | { |
1196 | /* If a specific host_offset is required, check it */ |
1197 | bool offset_matches = |
1198 | (cluster_offset & L2E_OFFSET_MASK) == *host_offset; |
1199 | |
1200 | if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { |
1201 | qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " |
1202 | "%#llx unaligned (guest offset: %#" PRIx64 |
1203 | ")" , cluster_offset & L2E_OFFSET_MASK, |
1204 | guest_offset); |
1205 | ret = -EIO; |
1206 | goto out; |
1207 | } |
1208 | |
1209 | if (*host_offset != INV_OFFSET && !offset_matches) { |
1210 | *bytes = 0; |
1211 | ret = 0; |
1212 | goto out; |
1213 | } |
1214 | |
1215 | /* We keep all QCOW_OFLAG_COPIED clusters */ |
1216 | keep_clusters = |
1217 | count_contiguous_clusters(bs, nb_clusters, s->cluster_size, |
1218 | &l2_slice[l2_index], |
1219 | QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); |
1220 | assert(keep_clusters <= nb_clusters); |
1221 | |
1222 | *bytes = MIN(*bytes, |
1223 | keep_clusters * s->cluster_size |
1224 | - offset_into_cluster(s, guest_offset)); |
1225 | |
1226 | ret = 1; |
1227 | } else { |
1228 | ret = 0; |
1229 | } |
1230 | |
1231 | /* Cleanup */ |
1232 | out: |
1233 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
1234 | |
1235 | /* Only return a host offset if we actually made progress. Otherwise we |
1236 | * would make requirements for handle_alloc() that it can't fulfill */ |
1237 | if (ret > 0) { |
1238 | *host_offset = (cluster_offset & L2E_OFFSET_MASK) |
1239 | + offset_into_cluster(s, guest_offset); |
1240 | } |
1241 | |
1242 | return ret; |
1243 | } |
1244 | |
1245 | /* |
1246 | * Allocates new clusters for the given guest_offset. |
1247 | * |
1248 | * At most *nb_clusters are allocated, and on return *nb_clusters is updated to |
1249 | * contain the number of clusters that have been allocated and are contiguous |
1250 | * in the image file. |
1251 | * |
1252 | * If *host_offset is not INV_OFFSET, it specifies the offset in the image file |
1253 | * at which the new clusters must start. *nb_clusters can be 0 on return in |
1254 | * this case if the cluster at host_offset is already in use. If *host_offset |
1255 | * is INV_OFFSET, the clusters can be allocated anywhere in the image file. |
1256 | * |
1257 | * *host_offset is updated to contain the offset into the image file at which |
1258 | * the first allocated cluster starts. |
1259 | * |
1260 | * Return 0 on success and -errno in error cases. -EAGAIN means that the |
1261 | * function has been waiting for another request and the allocation must be |
1262 | * restarted, but the whole request should not be failed. |
1263 | */ |
1264 | static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, |
1265 | uint64_t *host_offset, uint64_t *nb_clusters) |
1266 | { |
1267 | BDRVQcow2State *s = bs->opaque; |
1268 | |
1269 | trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, |
1270 | *host_offset, *nb_clusters); |
1271 | |
1272 | if (has_data_file(bs)) { |
1273 | assert(*host_offset == INV_OFFSET || |
1274 | *host_offset == start_of_cluster(s, guest_offset)); |
1275 | *host_offset = start_of_cluster(s, guest_offset); |
1276 | return 0; |
1277 | } |
1278 | |
1279 | /* Allocate new clusters */ |
1280 | trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); |
1281 | if (*host_offset == INV_OFFSET) { |
1282 | int64_t cluster_offset = |
1283 | qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); |
1284 | if (cluster_offset < 0) { |
1285 | return cluster_offset; |
1286 | } |
1287 | *host_offset = cluster_offset; |
1288 | return 0; |
1289 | } else { |
1290 | int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); |
1291 | if (ret < 0) { |
1292 | return ret; |
1293 | } |
1294 | *nb_clusters = ret; |
1295 | return 0; |
1296 | } |
1297 | } |
1298 | |
1299 | /* |
1300 | * Allocates new clusters for an area that either is yet unallocated or needs a |
1301 | * copy on write. If *host_offset is not INV_OFFSET, clusters are only |
1302 | * allocated if the new allocation can match the specified host offset. |
1303 | * |
1304 | * Note that guest_offset may not be cluster aligned. In this case, the |
1305 | * returned *host_offset points to exact byte referenced by guest_offset and |
1306 | * therefore isn't cluster aligned as well. |
1307 | * |
1308 | * Returns: |
1309 | * 0: if no clusters could be allocated. *bytes is set to 0, |
1310 | * *host_offset is left unchanged. |
1311 | * |
1312 | * 1: if new clusters were allocated. *bytes may be decreased if the |
1313 | * new allocation doesn't cover all of the requested area. |
1314 | * *host_offset is updated to contain the host offset of the first |
1315 | * newly allocated cluster. |
1316 | * |
1317 | * -errno: in error cases |
1318 | */ |
1319 | static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, |
1320 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
1321 | { |
1322 | BDRVQcow2State *s = bs->opaque; |
1323 | int l2_index; |
1324 | uint64_t *l2_slice; |
1325 | uint64_t entry; |
1326 | uint64_t nb_clusters; |
1327 | int ret; |
1328 | bool keep_old_clusters = false; |
1329 | |
1330 | uint64_t alloc_cluster_offset = INV_OFFSET; |
1331 | |
1332 | trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, |
1333 | *bytes); |
1334 | assert(*bytes > 0); |
1335 | |
1336 | /* |
1337 | * Calculate the number of clusters to look for. We stop at L2 slice |
1338 | * boundaries to keep things simple. |
1339 | */ |
1340 | nb_clusters = |
1341 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); |
1342 | |
1343 | l2_index = offset_to_l2_slice_index(s, guest_offset); |
1344 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); |
1345 | assert(nb_clusters <= INT_MAX); |
1346 | |
1347 | /* Find L2 entry for the first involved cluster */ |
1348 | ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); |
1349 | if (ret < 0) { |
1350 | return ret; |
1351 | } |
1352 | |
1353 | entry = be64_to_cpu(l2_slice[l2_index]); |
1354 | |
1355 | /* For the moment, overwrite compressed clusters one by one */ |
1356 | if (entry & QCOW_OFLAG_COMPRESSED) { |
1357 | nb_clusters = 1; |
1358 | } else { |
1359 | nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index); |
1360 | } |
1361 | |
1362 | /* This function is only called when there were no non-COW clusters, so if |
1363 | * we can't find any unallocated or COW clusters either, something is |
1364 | * wrong with our code. */ |
1365 | assert(nb_clusters > 0); |
1366 | |
1367 | if (qcow2_get_cluster_type(bs, entry) == QCOW2_CLUSTER_ZERO_ALLOC && |
1368 | (entry & QCOW_OFLAG_COPIED) && |
1369 | (*host_offset == INV_OFFSET || |
1370 | start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK))) |
1371 | { |
1372 | int preallocated_nb_clusters; |
1373 | |
1374 | if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) { |
1375 | qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero " |
1376 | "cluster offset %#llx unaligned (guest " |
1377 | "offset: %#" PRIx64 ")" , |
1378 | entry & L2E_OFFSET_MASK, guest_offset); |
1379 | ret = -EIO; |
1380 | goto fail; |
1381 | } |
1382 | |
1383 | /* Try to reuse preallocated zero clusters; contiguous normal clusters |
1384 | * would be fine, too, but count_cow_clusters() above has limited |
1385 | * nb_clusters already to a range of COW clusters */ |
1386 | preallocated_nb_clusters = |
1387 | count_contiguous_clusters(bs, nb_clusters, s->cluster_size, |
1388 | &l2_slice[l2_index], QCOW_OFLAG_COPIED); |
1389 | assert(preallocated_nb_clusters > 0); |
1390 | |
1391 | nb_clusters = preallocated_nb_clusters; |
1392 | alloc_cluster_offset = entry & L2E_OFFSET_MASK; |
1393 | |
1394 | /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2() |
1395 | * should not free them. */ |
1396 | keep_old_clusters = true; |
1397 | } |
1398 | |
1399 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
1400 | |
1401 | if (alloc_cluster_offset == INV_OFFSET) { |
1402 | /* Allocate, if necessary at a given offset in the image file */ |
1403 | alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET : |
1404 | start_of_cluster(s, *host_offset); |
1405 | ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, |
1406 | &nb_clusters); |
1407 | if (ret < 0) { |
1408 | goto fail; |
1409 | } |
1410 | |
1411 | /* Can't extend contiguous allocation */ |
1412 | if (nb_clusters == 0) { |
1413 | *bytes = 0; |
1414 | return 0; |
1415 | } |
1416 | |
1417 | assert(alloc_cluster_offset != INV_OFFSET); |
1418 | } |
1419 | |
1420 | /* |
1421 | * Save info needed for meta data update. |
1422 | * |
1423 | * requested_bytes: Number of bytes from the start of the first |
1424 | * newly allocated cluster to the end of the (possibly shortened |
1425 | * before) write request. |
1426 | * |
1427 | * avail_bytes: Number of bytes from the start of the first |
1428 | * newly allocated to the end of the last newly allocated cluster. |
1429 | * |
1430 | * nb_bytes: The number of bytes from the start of the first |
1431 | * newly allocated cluster to the end of the area that the write |
1432 | * request actually writes to (excluding COW at the end) |
1433 | */ |
1434 | uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); |
1435 | int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); |
1436 | int nb_bytes = MIN(requested_bytes, avail_bytes); |
1437 | QCowL2Meta *old_m = *m; |
1438 | |
1439 | *m = g_malloc0(sizeof(**m)); |
1440 | |
1441 | **m = (QCowL2Meta) { |
1442 | .next = old_m, |
1443 | |
1444 | .alloc_offset = alloc_cluster_offset, |
1445 | .offset = start_of_cluster(s, guest_offset), |
1446 | .nb_clusters = nb_clusters, |
1447 | |
1448 | .keep_old_clusters = keep_old_clusters, |
1449 | |
1450 | .cow_start = { |
1451 | .offset = 0, |
1452 | .nb_bytes = offset_into_cluster(s, guest_offset), |
1453 | }, |
1454 | .cow_end = { |
1455 | .offset = nb_bytes, |
1456 | .nb_bytes = avail_bytes - nb_bytes, |
1457 | }, |
1458 | }; |
1459 | qemu_co_queue_init(&(*m)->dependent_requests); |
1460 | QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); |
1461 | |
1462 | *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); |
1463 | *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); |
1464 | assert(*bytes != 0); |
1465 | |
1466 | return 1; |
1467 | |
1468 | fail: |
1469 | if (*m && (*m)->nb_clusters > 0) { |
1470 | QLIST_REMOVE(*m, next_in_flight); |
1471 | } |
1472 | return ret; |
1473 | } |
1474 | |
1475 | /* |
1476 | * alloc_cluster_offset |
1477 | * |
1478 | * For a given offset on the virtual disk, find the cluster offset in qcow2 |
1479 | * file. If the offset is not found, allocate a new cluster. |
1480 | * |
1481 | * If the cluster was already allocated, m->nb_clusters is set to 0 and |
1482 | * other fields in m are meaningless. |
1483 | * |
1484 | * If the cluster is newly allocated, m->nb_clusters is set to the number of |
1485 | * contiguous clusters that have been allocated. In this case, the other |
1486 | * fields of m are valid and contain information about the first allocated |
1487 | * cluster. |
1488 | * |
1489 | * If the request conflicts with another write request in flight, the coroutine |
1490 | * is queued and will be reentered when the dependency has completed. |
1491 | * |
1492 | * Return 0 on success and -errno in error cases |
1493 | */ |
1494 | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, |
1495 | unsigned int *bytes, uint64_t *host_offset, |
1496 | QCowL2Meta **m) |
1497 | { |
1498 | BDRVQcow2State *s = bs->opaque; |
1499 | uint64_t start, remaining; |
1500 | uint64_t cluster_offset; |
1501 | uint64_t cur_bytes; |
1502 | int ret; |
1503 | |
1504 | trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); |
1505 | |
1506 | again: |
1507 | start = offset; |
1508 | remaining = *bytes; |
1509 | cluster_offset = INV_OFFSET; |
1510 | *host_offset = INV_OFFSET; |
1511 | cur_bytes = 0; |
1512 | *m = NULL; |
1513 | |
1514 | while (true) { |
1515 | |
1516 | if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) { |
1517 | *host_offset = start_of_cluster(s, cluster_offset); |
1518 | } |
1519 | |
1520 | assert(remaining >= cur_bytes); |
1521 | |
1522 | start += cur_bytes; |
1523 | remaining -= cur_bytes; |
1524 | |
1525 | if (cluster_offset != INV_OFFSET) { |
1526 | cluster_offset += cur_bytes; |
1527 | } |
1528 | |
1529 | if (remaining == 0) { |
1530 | break; |
1531 | } |
1532 | |
1533 | cur_bytes = remaining; |
1534 | |
1535 | /* |
1536 | * Now start gathering as many contiguous clusters as possible: |
1537 | * |
1538 | * 1. Check for overlaps with in-flight allocations |
1539 | * |
1540 | * a) Overlap not in the first cluster -> shorten this request and |
1541 | * let the caller handle the rest in its next loop iteration. |
1542 | * |
1543 | * b) Real overlaps of two requests. Yield and restart the search |
1544 | * for contiguous clusters (the situation could have changed |
1545 | * while we were sleeping) |
1546 | * |
1547 | * c) TODO: Request starts in the same cluster as the in-flight |
1548 | * allocation ends. Shorten the COW of the in-fight allocation, |
1549 | * set cluster_offset to write to the same cluster and set up |
1550 | * the right synchronisation between the in-flight request and |
1551 | * the new one. |
1552 | */ |
1553 | ret = handle_dependencies(bs, start, &cur_bytes, m); |
1554 | if (ret == -EAGAIN) { |
1555 | /* Currently handle_dependencies() doesn't yield if we already had |
1556 | * an allocation. If it did, we would have to clean up the L2Meta |
1557 | * structs before starting over. */ |
1558 | assert(*m == NULL); |
1559 | goto again; |
1560 | } else if (ret < 0) { |
1561 | return ret; |
1562 | } else if (cur_bytes == 0) { |
1563 | break; |
1564 | } else { |
1565 | /* handle_dependencies() may have decreased cur_bytes (shortened |
1566 | * the allocations below) so that the next dependency is processed |
1567 | * correctly during the next loop iteration. */ |
1568 | } |
1569 | |
1570 | /* |
1571 | * 2. Count contiguous COPIED clusters. |
1572 | */ |
1573 | ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); |
1574 | if (ret < 0) { |
1575 | return ret; |
1576 | } else if (ret) { |
1577 | continue; |
1578 | } else if (cur_bytes == 0) { |
1579 | break; |
1580 | } |
1581 | |
1582 | /* |
1583 | * 3. If the request still hasn't completed, allocate new clusters, |
1584 | * considering any cluster_offset of steps 1c or 2. |
1585 | */ |
1586 | ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); |
1587 | if (ret < 0) { |
1588 | return ret; |
1589 | } else if (ret) { |
1590 | continue; |
1591 | } else { |
1592 | assert(cur_bytes == 0); |
1593 | break; |
1594 | } |
1595 | } |
1596 | |
1597 | *bytes -= remaining; |
1598 | assert(*bytes > 0); |
1599 | assert(*host_offset != INV_OFFSET); |
1600 | |
1601 | return 0; |
1602 | } |
1603 | |
1604 | /* |
1605 | * This discards as many clusters of nb_clusters as possible at once (i.e. |
1606 | * all clusters in the same L2 slice) and returns the number of discarded |
1607 | * clusters. |
1608 | */ |
1609 | static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, |
1610 | uint64_t nb_clusters, |
1611 | enum qcow2_discard_type type, bool full_discard) |
1612 | { |
1613 | BDRVQcow2State *s = bs->opaque; |
1614 | uint64_t *l2_slice; |
1615 | int l2_index; |
1616 | int ret; |
1617 | int i; |
1618 | |
1619 | ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); |
1620 | if (ret < 0) { |
1621 | return ret; |
1622 | } |
1623 | |
1624 | /* Limit nb_clusters to one L2 slice */ |
1625 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); |
1626 | assert(nb_clusters <= INT_MAX); |
1627 | |
1628 | for (i = 0; i < nb_clusters; i++) { |
1629 | uint64_t old_l2_entry; |
1630 | |
1631 | old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]); |
1632 | |
1633 | /* |
1634 | * If full_discard is false, make sure that a discarded area reads back |
1635 | * as zeroes for v3 images (we cannot do it for v2 without actually |
1636 | * writing a zero-filled buffer). We can skip the operation if the |
1637 | * cluster is already marked as zero, or if it's unallocated and we |
1638 | * don't have a backing file. |
1639 | * |
1640 | * TODO We might want to use bdrv_block_status(bs) here, but we're |
1641 | * holding s->lock, so that doesn't work today. |
1642 | * |
1643 | * If full_discard is true, the sector should not read back as zeroes, |
1644 | * but rather fall through to the backing file. |
1645 | */ |
1646 | switch (qcow2_get_cluster_type(bs, old_l2_entry)) { |
1647 | case QCOW2_CLUSTER_UNALLOCATED: |
1648 | if (full_discard || !bs->backing) { |
1649 | continue; |
1650 | } |
1651 | break; |
1652 | |
1653 | case QCOW2_CLUSTER_ZERO_PLAIN: |
1654 | if (!full_discard) { |
1655 | continue; |
1656 | } |
1657 | break; |
1658 | |
1659 | case QCOW2_CLUSTER_ZERO_ALLOC: |
1660 | case QCOW2_CLUSTER_NORMAL: |
1661 | case QCOW2_CLUSTER_COMPRESSED: |
1662 | break; |
1663 | |
1664 | default: |
1665 | abort(); |
1666 | } |
1667 | |
1668 | /* First remove L2 entries */ |
1669 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
1670 | if (!full_discard && s->qcow_version >= 3) { |
1671 | l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); |
1672 | } else { |
1673 | l2_slice[l2_index + i] = cpu_to_be64(0); |
1674 | } |
1675 | |
1676 | /* Then decrease the refcount */ |
1677 | qcow2_free_any_clusters(bs, old_l2_entry, 1, type); |
1678 | } |
1679 | |
1680 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
1681 | |
1682 | return nb_clusters; |
1683 | } |
1684 | |
1685 | int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, |
1686 | uint64_t bytes, enum qcow2_discard_type type, |
1687 | bool full_discard) |
1688 | { |
1689 | BDRVQcow2State *s = bs->opaque; |
1690 | uint64_t end_offset = offset + bytes; |
1691 | uint64_t nb_clusters; |
1692 | int64_t cleared; |
1693 | int ret; |
1694 | |
1695 | /* Caller must pass aligned values, except at image end */ |
1696 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); |
1697 | assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || |
1698 | end_offset == bs->total_sectors << BDRV_SECTOR_BITS); |
1699 | |
1700 | nb_clusters = size_to_clusters(s, bytes); |
1701 | |
1702 | s->cache_discards = true; |
1703 | |
1704 | /* Each L2 slice is handled by its own loop iteration */ |
1705 | while (nb_clusters > 0) { |
1706 | cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, |
1707 | full_discard); |
1708 | if (cleared < 0) { |
1709 | ret = cleared; |
1710 | goto fail; |
1711 | } |
1712 | |
1713 | nb_clusters -= cleared; |
1714 | offset += (cleared * s->cluster_size); |
1715 | } |
1716 | |
1717 | ret = 0; |
1718 | fail: |
1719 | s->cache_discards = false; |
1720 | qcow2_process_discards(bs, ret); |
1721 | |
1722 | return ret; |
1723 | } |
1724 | |
1725 | /* |
1726 | * This zeroes as many clusters of nb_clusters as possible at once (i.e. |
1727 | * all clusters in the same L2 slice) and returns the number of zeroed |
1728 | * clusters. |
1729 | */ |
1730 | static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, |
1731 | uint64_t nb_clusters, int flags) |
1732 | { |
1733 | BDRVQcow2State *s = bs->opaque; |
1734 | uint64_t *l2_slice; |
1735 | int l2_index; |
1736 | int ret; |
1737 | int i; |
1738 | bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP); |
1739 | |
1740 | ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); |
1741 | if (ret < 0) { |
1742 | return ret; |
1743 | } |
1744 | |
1745 | /* Limit nb_clusters to one L2 slice */ |
1746 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); |
1747 | assert(nb_clusters <= INT_MAX); |
1748 | |
1749 | for (i = 0; i < nb_clusters; i++) { |
1750 | uint64_t old_offset; |
1751 | QCow2ClusterType cluster_type; |
1752 | |
1753 | old_offset = be64_to_cpu(l2_slice[l2_index + i]); |
1754 | |
1755 | /* |
1756 | * Minimize L2 changes if the cluster already reads back as |
1757 | * zeroes with correct allocation. |
1758 | */ |
1759 | cluster_type = qcow2_get_cluster_type(bs, old_offset); |
1760 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN || |
1761 | (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) { |
1762 | continue; |
1763 | } |
1764 | |
1765 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
1766 | if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) { |
1767 | l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); |
1768 | qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); |
1769 | } else { |
1770 | l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); |
1771 | } |
1772 | } |
1773 | |
1774 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
1775 | |
1776 | return nb_clusters; |
1777 | } |
1778 | |
1779 | int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset, |
1780 | uint64_t bytes, int flags) |
1781 | { |
1782 | BDRVQcow2State *s = bs->opaque; |
1783 | uint64_t end_offset = offset + bytes; |
1784 | uint64_t nb_clusters; |
1785 | int64_t cleared; |
1786 | int ret; |
1787 | |
1788 | /* If we have to stay in sync with an external data file, zero out |
1789 | * s->data_file first. */ |
1790 | if (data_file_is_raw(bs)) { |
1791 | assert(has_data_file(bs)); |
1792 | ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags); |
1793 | if (ret < 0) { |
1794 | return ret; |
1795 | } |
1796 | } |
1797 | |
1798 | /* Caller must pass aligned values, except at image end */ |
1799 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); |
1800 | assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || |
1801 | end_offset == bs->total_sectors << BDRV_SECTOR_BITS); |
1802 | |
1803 | /* The zero flag is only supported by version 3 and newer */ |
1804 | if (s->qcow_version < 3) { |
1805 | return -ENOTSUP; |
1806 | } |
1807 | |
1808 | /* Each L2 slice is handled by its own loop iteration */ |
1809 | nb_clusters = size_to_clusters(s, bytes); |
1810 | |
1811 | s->cache_discards = true; |
1812 | |
1813 | while (nb_clusters > 0) { |
1814 | cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); |
1815 | if (cleared < 0) { |
1816 | ret = cleared; |
1817 | goto fail; |
1818 | } |
1819 | |
1820 | nb_clusters -= cleared; |
1821 | offset += (cleared * s->cluster_size); |
1822 | } |
1823 | |
1824 | ret = 0; |
1825 | fail: |
1826 | s->cache_discards = false; |
1827 | qcow2_process_discards(bs, ret); |
1828 | |
1829 | return ret; |
1830 | } |
1831 | |
1832 | /* |
1833 | * Expands all zero clusters in a specific L1 table (or deallocates them, for |
1834 | * non-backed non-pre-allocated zero clusters). |
1835 | * |
1836 | * l1_entries and *visited_l1_entries are used to keep track of progress for |
1837 | * status_cb(). l1_entries contains the total number of L1 entries and |
1838 | * *visited_l1_entries counts all visited L1 entries. |
1839 | */ |
1840 | static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, |
1841 | int l1_size, int64_t *visited_l1_entries, |
1842 | int64_t l1_entries, |
1843 | BlockDriverAmendStatusCB *status_cb, |
1844 | void *cb_opaque) |
1845 | { |
1846 | BDRVQcow2State *s = bs->opaque; |
1847 | bool is_active_l1 = (l1_table == s->l1_table); |
1848 | uint64_t *l2_slice = NULL; |
1849 | unsigned slice, slice_size2, n_slices; |
1850 | int ret; |
1851 | int i, j; |
1852 | |
1853 | slice_size2 = s->l2_slice_size * sizeof(uint64_t); |
1854 | n_slices = s->cluster_size / slice_size2; |
1855 | |
1856 | if (!is_active_l1) { |
1857 | /* inactive L2 tables require a buffer to be stored in when loading |
1858 | * them from disk */ |
1859 | l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); |
1860 | if (l2_slice == NULL) { |
1861 | return -ENOMEM; |
1862 | } |
1863 | } |
1864 | |
1865 | for (i = 0; i < l1_size; i++) { |
1866 | uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; |
1867 | uint64_t l2_refcount; |
1868 | |
1869 | if (!l2_offset) { |
1870 | /* unallocated */ |
1871 | (*visited_l1_entries)++; |
1872 | if (status_cb) { |
1873 | status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); |
1874 | } |
1875 | continue; |
1876 | } |
1877 | |
1878 | if (offset_into_cluster(s, l2_offset)) { |
1879 | qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" |
1880 | PRIx64 " unaligned (L1 index: %#x)" , |
1881 | l2_offset, i); |
1882 | ret = -EIO; |
1883 | goto fail; |
1884 | } |
1885 | |
1886 | ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, |
1887 | &l2_refcount); |
1888 | if (ret < 0) { |
1889 | goto fail; |
1890 | } |
1891 | |
1892 | for (slice = 0; slice < n_slices; slice++) { |
1893 | uint64_t slice_offset = l2_offset + slice * slice_size2; |
1894 | bool l2_dirty = false; |
1895 | if (is_active_l1) { |
1896 | /* get active L2 tables from cache */ |
1897 | ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, |
1898 | (void **)&l2_slice); |
1899 | } else { |
1900 | /* load inactive L2 tables from disk */ |
1901 | ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); |
1902 | } |
1903 | if (ret < 0) { |
1904 | goto fail; |
1905 | } |
1906 | |
1907 | for (j = 0; j < s->l2_slice_size; j++) { |
1908 | uint64_t l2_entry = be64_to_cpu(l2_slice[j]); |
1909 | int64_t offset = l2_entry & L2E_OFFSET_MASK; |
1910 | QCow2ClusterType cluster_type = |
1911 | qcow2_get_cluster_type(bs, l2_entry); |
1912 | |
1913 | if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && |
1914 | cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { |
1915 | continue; |
1916 | } |
1917 | |
1918 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { |
1919 | if (!bs->backing) { |
1920 | /* not backed; therefore we can simply deallocate the |
1921 | * cluster */ |
1922 | l2_slice[j] = 0; |
1923 | l2_dirty = true; |
1924 | continue; |
1925 | } |
1926 | |
1927 | offset = qcow2_alloc_clusters(bs, s->cluster_size); |
1928 | if (offset < 0) { |
1929 | ret = offset; |
1930 | goto fail; |
1931 | } |
1932 | |
1933 | if (l2_refcount > 1) { |
1934 | /* For shared L2 tables, set the refcount accordingly |
1935 | * (it is already 1 and needs to be l2_refcount) */ |
1936 | ret = qcow2_update_cluster_refcount( |
1937 | bs, offset >> s->cluster_bits, |
1938 | refcount_diff(1, l2_refcount), false, |
1939 | QCOW2_DISCARD_OTHER); |
1940 | if (ret < 0) { |
1941 | qcow2_free_clusters(bs, offset, s->cluster_size, |
1942 | QCOW2_DISCARD_OTHER); |
1943 | goto fail; |
1944 | } |
1945 | } |
1946 | } |
1947 | |
1948 | if (offset_into_cluster(s, offset)) { |
1949 | int l2_index = slice * s->l2_slice_size + j; |
1950 | qcow2_signal_corruption( |
1951 | bs, true, -1, -1, |
1952 | "Cluster allocation offset " |
1953 | "%#" PRIx64 " unaligned (L2 offset: %#" |
1954 | PRIx64 ", L2 index: %#x)" , offset, |
1955 | l2_offset, l2_index); |
1956 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { |
1957 | qcow2_free_clusters(bs, offset, s->cluster_size, |
1958 | QCOW2_DISCARD_ALWAYS); |
1959 | } |
1960 | ret = -EIO; |
1961 | goto fail; |
1962 | } |
1963 | |
1964 | ret = qcow2_pre_write_overlap_check(bs, 0, offset, |
1965 | s->cluster_size, true); |
1966 | if (ret < 0) { |
1967 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { |
1968 | qcow2_free_clusters(bs, offset, s->cluster_size, |
1969 | QCOW2_DISCARD_ALWAYS); |
1970 | } |
1971 | goto fail; |
1972 | } |
1973 | |
1974 | ret = bdrv_pwrite_zeroes(s->data_file, offset, |
1975 | s->cluster_size, 0); |
1976 | if (ret < 0) { |
1977 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { |
1978 | qcow2_free_clusters(bs, offset, s->cluster_size, |
1979 | QCOW2_DISCARD_ALWAYS); |
1980 | } |
1981 | goto fail; |
1982 | } |
1983 | |
1984 | if (l2_refcount == 1) { |
1985 | l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); |
1986 | } else { |
1987 | l2_slice[j] = cpu_to_be64(offset); |
1988 | } |
1989 | l2_dirty = true; |
1990 | } |
1991 | |
1992 | if (is_active_l1) { |
1993 | if (l2_dirty) { |
1994 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
1995 | qcow2_cache_depends_on_flush(s->l2_table_cache); |
1996 | } |
1997 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
1998 | } else { |
1999 | if (l2_dirty) { |
2000 | ret = qcow2_pre_write_overlap_check( |
2001 | bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, |
2002 | slice_offset, slice_size2, false); |
2003 | if (ret < 0) { |
2004 | goto fail; |
2005 | } |
2006 | |
2007 | ret = bdrv_pwrite(bs->file, slice_offset, |
2008 | l2_slice, slice_size2); |
2009 | if (ret < 0) { |
2010 | goto fail; |
2011 | } |
2012 | } |
2013 | } |
2014 | } |
2015 | |
2016 | (*visited_l1_entries)++; |
2017 | if (status_cb) { |
2018 | status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); |
2019 | } |
2020 | } |
2021 | |
2022 | ret = 0; |
2023 | |
2024 | fail: |
2025 | if (l2_slice) { |
2026 | if (!is_active_l1) { |
2027 | qemu_vfree(l2_slice); |
2028 | } else { |
2029 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
2030 | } |
2031 | } |
2032 | return ret; |
2033 | } |
2034 | |
2035 | /* |
2036 | * For backed images, expands all zero clusters on the image. For non-backed |
2037 | * images, deallocates all non-pre-allocated zero clusters (and claims the |
2038 | * allocation for pre-allocated ones). This is important for downgrading to a |
2039 | * qcow2 version which doesn't yet support metadata zero clusters. |
2040 | */ |
2041 | int qcow2_expand_zero_clusters(BlockDriverState *bs, |
2042 | BlockDriverAmendStatusCB *status_cb, |
2043 | void *cb_opaque) |
2044 | { |
2045 | BDRVQcow2State *s = bs->opaque; |
2046 | uint64_t *l1_table = NULL; |
2047 | int64_t l1_entries = 0, visited_l1_entries = 0; |
2048 | int ret; |
2049 | int i, j; |
2050 | |
2051 | if (status_cb) { |
2052 | l1_entries = s->l1_size; |
2053 | for (i = 0; i < s->nb_snapshots; i++) { |
2054 | l1_entries += s->snapshots[i].l1_size; |
2055 | } |
2056 | } |
2057 | |
2058 | ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, |
2059 | &visited_l1_entries, l1_entries, |
2060 | status_cb, cb_opaque); |
2061 | if (ret < 0) { |
2062 | goto fail; |
2063 | } |
2064 | |
2065 | /* Inactive L1 tables may point to active L2 tables - therefore it is |
2066 | * necessary to flush the L2 table cache before trying to access the L2 |
2067 | * tables pointed to by inactive L1 entries (else we might try to expand |
2068 | * zero clusters that have already been expanded); furthermore, it is also |
2069 | * necessary to empty the L2 table cache, since it may contain tables which |
2070 | * are now going to be modified directly on disk, bypassing the cache. |
2071 | * qcow2_cache_empty() does both for us. */ |
2072 | ret = qcow2_cache_empty(bs, s->l2_table_cache); |
2073 | if (ret < 0) { |
2074 | goto fail; |
2075 | } |
2076 | |
2077 | for (i = 0; i < s->nb_snapshots; i++) { |
2078 | int l1_size2; |
2079 | uint64_t *new_l1_table; |
2080 | Error *local_err = NULL; |
2081 | |
2082 | ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset, |
2083 | s->snapshots[i].l1_size, sizeof(uint64_t), |
2084 | QCOW_MAX_L1_SIZE, "Snapshot L1 table" , |
2085 | &local_err); |
2086 | if (ret < 0) { |
2087 | error_report_err(local_err); |
2088 | goto fail; |
2089 | } |
2090 | |
2091 | l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t); |
2092 | new_l1_table = g_try_realloc(l1_table, l1_size2); |
2093 | |
2094 | if (!new_l1_table) { |
2095 | ret = -ENOMEM; |
2096 | goto fail; |
2097 | } |
2098 | |
2099 | l1_table = new_l1_table; |
2100 | |
2101 | ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, |
2102 | l1_table, l1_size2); |
2103 | if (ret < 0) { |
2104 | goto fail; |
2105 | } |
2106 | |
2107 | for (j = 0; j < s->snapshots[i].l1_size; j++) { |
2108 | be64_to_cpus(&l1_table[j]); |
2109 | } |
2110 | |
2111 | ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, |
2112 | &visited_l1_entries, l1_entries, |
2113 | status_cb, cb_opaque); |
2114 | if (ret < 0) { |
2115 | goto fail; |
2116 | } |
2117 | } |
2118 | |
2119 | ret = 0; |
2120 | |
2121 | fail: |
2122 | g_free(l1_table); |
2123 | return ret; |
2124 | } |
2125 | |