1 | #ifndef BLOCK_H |
2 | #define BLOCK_H |
3 | |
4 | #include "block/aio.h" |
5 | #include "block/aio-wait.h" |
6 | #include "qemu/iov.h" |
7 | #include "qemu/coroutine.h" |
8 | #include "block/accounting.h" |
9 | #include "block/dirty-bitmap.h" |
10 | #include "block/blockjob.h" |
11 | #include "qemu/hbitmap.h" |
12 | |
13 | /* block.c */ |
14 | typedef struct BlockDriver BlockDriver; |
15 | typedef struct BdrvChild BdrvChild; |
16 | typedef struct BdrvChildRole BdrvChildRole; |
17 | |
18 | typedef struct BlockDriverInfo { |
19 | /* in bytes, 0 if irrelevant */ |
20 | int cluster_size; |
21 | /* offset at which the VM state can be saved (0 if not possible) */ |
22 | int64_t vm_state_offset; |
23 | bool is_dirty; |
24 | /* |
25 | * True if unallocated blocks read back as zeroes. This is equivalent |
26 | * to the LBPRZ flag in the SCSI logical block provisioning page. |
27 | */ |
28 | bool unallocated_blocks_are_zero; |
29 | /* |
30 | * True if this block driver only supports compressed writes |
31 | */ |
32 | bool needs_compressed_writes; |
33 | } BlockDriverInfo; |
34 | |
35 | typedef struct BlockFragInfo { |
36 | uint64_t allocated_clusters; |
37 | uint64_t total_clusters; |
38 | uint64_t fragmented_clusters; |
39 | uint64_t compressed_clusters; |
40 | } BlockFragInfo; |
41 | |
42 | typedef enum { |
43 | BDRV_REQ_COPY_ON_READ = 0x1, |
44 | BDRV_REQ_ZERO_WRITE = 0x2, |
45 | |
46 | /* |
47 | * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate |
48 | * that the block driver should unmap (discard) blocks if it is guaranteed |
49 | * that the result will read back as zeroes. The flag is only passed to the |
50 | * driver if the block device is opened with BDRV_O_UNMAP. |
51 | */ |
52 | BDRV_REQ_MAY_UNMAP = 0x4, |
53 | |
54 | /* |
55 | * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that |
56 | * we don't want wait_serialising_requests() during the read operation. |
57 | * |
58 | * This flag is used for backup copy-on-write operations, when we need to |
59 | * read old data before write (write notifier triggered). It is okay since |
60 | * we already waited for other serializing requests in the initiating write |
61 | * (see bdrv_aligned_pwritev), and it is necessary if the initiating write |
62 | * is already serializing (without the flag, the read would deadlock |
63 | * waiting for the serialising write to complete). |
64 | */ |
65 | BDRV_REQ_NO_SERIALISING = 0x8, |
66 | BDRV_REQ_FUA = 0x10, |
67 | BDRV_REQ_WRITE_COMPRESSED = 0x20, |
68 | |
69 | /* Signifies that this write request will not change the visible disk |
70 | * content. */ |
71 | BDRV_REQ_WRITE_UNCHANGED = 0x40, |
72 | |
73 | /* |
74 | * BDRV_REQ_SERIALISING forces request serialisation for writes. |
75 | * It is used to ensure that writes to the backing file of a backup process |
76 | * target cannot race with a read of the backup target that defers to the |
77 | * backing file. |
78 | * |
79 | * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to |
80 | * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be |
81 | * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long. |
82 | */ |
83 | BDRV_REQ_SERIALISING = 0x80, |
84 | |
85 | /* Execute the request only if the operation can be offloaded or otherwise |
86 | * be executed efficiently, but return an error instead of using a slow |
87 | * fallback. */ |
88 | BDRV_REQ_NO_FALLBACK = 0x100, |
89 | |
90 | /* |
91 | * BDRV_REQ_PREFETCH may be used only together with BDRV_REQ_COPY_ON_READ |
92 | * on read request and means that caller doesn't really need data to be |
93 | * written to qiov parameter which may be NULL. |
94 | */ |
95 | BDRV_REQ_PREFETCH = 0x200, |
96 | /* Mask of valid flags */ |
97 | BDRV_REQ_MASK = 0x3ff, |
98 | } BdrvRequestFlags; |
99 | |
100 | typedef struct BlockSizes { |
101 | uint32_t phys; |
102 | uint32_t log; |
103 | } BlockSizes; |
104 | |
105 | typedef struct HDGeometry { |
106 | uint32_t heads; |
107 | uint32_t sectors; |
108 | uint32_t cylinders; |
109 | } HDGeometry; |
110 | |
111 | #define BDRV_O_RDWR 0x0002 |
112 | #define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */ |
113 | #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ |
114 | #define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */ |
115 | #define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ |
116 | #define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */ |
117 | #define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ |
118 | #define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ |
119 | #define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ |
120 | #define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */ |
121 | #define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ |
122 | #define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ |
123 | #define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ |
124 | #define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given: |
125 | select an appropriate protocol driver, |
126 | ignoring the format layer */ |
127 | #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */ |
128 | #define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */ |
129 | |
130 | #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH) |
131 | |
132 | |
133 | /* Option names of options parsed by the block layer */ |
134 | |
135 | #define BDRV_OPT_CACHE_WB "cache.writeback" |
136 | #define BDRV_OPT_CACHE_DIRECT "cache.direct" |
137 | #define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush" |
138 | #define BDRV_OPT_READ_ONLY "read-only" |
139 | #define BDRV_OPT_AUTO_READ_ONLY "auto-read-only" |
140 | #define BDRV_OPT_DISCARD "discard" |
141 | #define BDRV_OPT_FORCE_SHARE "force-share" |
142 | |
143 | |
144 | #define BDRV_SECTOR_BITS 9 |
145 | #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) |
146 | #define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1) |
147 | |
148 | #define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \ |
149 | INT_MAX >> BDRV_SECTOR_BITS) |
150 | #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) |
151 | |
152 | /* |
153 | * Allocation status flags for bdrv_block_status() and friends. |
154 | * |
155 | * Public flags: |
156 | * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer |
157 | * BDRV_BLOCK_ZERO: offset reads as zero |
158 | * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data |
159 | * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this |
160 | * layer rather than any backing, set by block layer |
161 | * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this |
162 | * layer, set by block layer |
163 | * |
164 | * Internal flags: |
165 | * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request |
166 | * that the block layer recompute the answer from the returned |
167 | * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID. |
168 | * BDRV_BLOCK_RECURSE: request that the block layer will recursively search for |
169 | * zeroes in file child of current block node inside |
170 | * returned region. Only valid together with both |
171 | * BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not |
172 | * appear with BDRV_BLOCK_ZERO. |
173 | * |
174 | * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the |
175 | * host offset within the returned BDS that is allocated for the |
176 | * corresponding raw guest data. However, whether that offset |
177 | * actually contains data also depends on BDRV_BLOCK_DATA, as follows: |
178 | * |
179 | * DATA ZERO OFFSET_VALID |
180 | * t t t sectors read as zero, returned file is zero at offset |
181 | * t f t sectors read as valid from file at offset |
182 | * f t t sectors preallocated, read as zero, returned file not |
183 | * necessarily zero at offset |
184 | * f f t sectors preallocated but read from backing_hd, |
185 | * returned file contains garbage at offset |
186 | * t t f sectors preallocated, read as zero, unknown offset |
187 | * t f f sectors read from unknown file or offset |
188 | * f t f not allocated or unknown offset, read as zero |
189 | * f f f not allocated or unknown offset, read from backing_hd |
190 | */ |
191 | #define BDRV_BLOCK_DATA 0x01 |
192 | #define BDRV_BLOCK_ZERO 0x02 |
193 | #define BDRV_BLOCK_OFFSET_VALID 0x04 |
194 | #define BDRV_BLOCK_RAW 0x08 |
195 | #define BDRV_BLOCK_ALLOCATED 0x10 |
196 | #define BDRV_BLOCK_EOF 0x20 |
197 | #define BDRV_BLOCK_RECURSE 0x40 |
198 | #define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK |
199 | |
200 | typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; |
201 | |
202 | typedef struct BDRVReopenState { |
203 | BlockDriverState *bs; |
204 | int flags; |
205 | BlockdevDetectZeroesOptions detect_zeroes; |
206 | bool backing_missing; |
207 | bool replace_backing_bs; /* new_backing_bs is ignored if this is false */ |
208 | BlockDriverState *new_backing_bs; /* If NULL then detach the current bs */ |
209 | uint64_t perm, shared_perm; |
210 | QDict *options; |
211 | QDict *explicit_options; |
212 | void *opaque; |
213 | } BDRVReopenState; |
214 | |
215 | /* |
216 | * Block operation types |
217 | */ |
218 | typedef enum BlockOpType { |
219 | BLOCK_OP_TYPE_BACKUP_SOURCE, |
220 | BLOCK_OP_TYPE_BACKUP_TARGET, |
221 | BLOCK_OP_TYPE_CHANGE, |
222 | BLOCK_OP_TYPE_COMMIT_SOURCE, |
223 | BLOCK_OP_TYPE_COMMIT_TARGET, |
224 | BLOCK_OP_TYPE_DATAPLANE, |
225 | BLOCK_OP_TYPE_DRIVE_DEL, |
226 | BLOCK_OP_TYPE_EJECT, |
227 | BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, |
228 | BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, |
229 | BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, |
230 | BLOCK_OP_TYPE_MIRROR_SOURCE, |
231 | BLOCK_OP_TYPE_MIRROR_TARGET, |
232 | BLOCK_OP_TYPE_RESIZE, |
233 | BLOCK_OP_TYPE_STREAM, |
234 | BLOCK_OP_TYPE_REPLACE, |
235 | BLOCK_OP_TYPE_MAX, |
236 | } BlockOpType; |
237 | |
238 | /* Block node permission constants */ |
239 | enum { |
240 | /** |
241 | * A user that has the "permission" of consistent reads is guaranteed that |
242 | * their view of the contents of the block device is complete and |
243 | * self-consistent, representing the contents of a disk at a specific |
244 | * point. |
245 | * |
246 | * For most block devices (including their backing files) this is true, but |
247 | * the property cannot be maintained in a few situations like for |
248 | * intermediate nodes of a commit block job. |
249 | */ |
250 | BLK_PERM_CONSISTENT_READ = 0x01, |
251 | |
252 | /** This permission is required to change the visible disk contents. */ |
253 | BLK_PERM_WRITE = 0x02, |
254 | |
255 | /** |
256 | * This permission (which is weaker than BLK_PERM_WRITE) is both enough and |
257 | * required for writes to the block node when the caller promises that |
258 | * the visible disk content doesn't change. |
259 | * |
260 | * As the BLK_PERM_WRITE permission is strictly stronger, either is |
261 | * sufficient to perform an unchanging write. |
262 | */ |
263 | BLK_PERM_WRITE_UNCHANGED = 0x04, |
264 | |
265 | /** This permission is required to change the size of a block node. */ |
266 | BLK_PERM_RESIZE = 0x08, |
267 | |
268 | /** |
269 | * This permission is required to change the node that this BdrvChild |
270 | * points to. |
271 | */ |
272 | BLK_PERM_GRAPH_MOD = 0x10, |
273 | |
274 | BLK_PERM_ALL = 0x1f, |
275 | |
276 | DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ |
277 | | BLK_PERM_WRITE |
278 | | BLK_PERM_WRITE_UNCHANGED |
279 | | BLK_PERM_RESIZE, |
280 | |
281 | DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH, |
282 | }; |
283 | |
284 | char *bdrv_perm_names(uint64_t perm); |
285 | |
286 | /* disk I/O throttling */ |
287 | void bdrv_init(void); |
288 | void bdrv_init_with_whitelist(void); |
289 | bool bdrv_uses_whitelist(void); |
290 | int bdrv_is_whitelisted(BlockDriver *drv, bool read_only); |
291 | BlockDriver *bdrv_find_protocol(const char *filename, |
292 | bool allow_protocol_prefix, |
293 | Error **errp); |
294 | BlockDriver *bdrv_find_format(const char *format_name); |
295 | int bdrv_create(BlockDriver *drv, const char* filename, |
296 | QemuOpts *opts, Error **errp); |
297 | int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp); |
298 | BlockDriverState *bdrv_new(void); |
299 | void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, |
300 | Error **errp); |
301 | void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, |
302 | Error **errp); |
303 | |
304 | int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); |
305 | int bdrv_parse_discard_flags(const char *mode, int *flags); |
306 | BdrvChild *bdrv_open_child(const char *filename, |
307 | QDict *options, const char *bdref_key, |
308 | BlockDriverState* parent, |
309 | const BdrvChildRole *child_role, |
310 | bool allow_none, Error **errp); |
311 | BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); |
312 | void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, |
313 | Error **errp); |
314 | int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, |
315 | const char *bdref_key, Error **errp); |
316 | BlockDriverState *bdrv_open(const char *filename, const char *reference, |
317 | QDict *options, int flags, Error **errp); |
318 | BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, |
319 | int flags, Error **errp); |
320 | BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, |
321 | BlockDriverState *bs, QDict *options, |
322 | bool keep_old_opts); |
323 | int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); |
324 | int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, |
325 | Error **errp); |
326 | int bdrv_reopen_prepare(BDRVReopenState *reopen_state, |
327 | BlockReopenQueue *queue, Error **errp); |
328 | void bdrv_reopen_commit(BDRVReopenState *reopen_state); |
329 | void bdrv_reopen_abort(BDRVReopenState *reopen_state); |
330 | int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, |
331 | int bytes, BdrvRequestFlags flags); |
332 | int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags); |
333 | int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes); |
334 | int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov); |
335 | int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes); |
336 | int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov); |
337 | int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, |
338 | const void *buf, int count); |
339 | /* |
340 | * Efficiently zero a region of the disk image. Note that this is a regular |
341 | * I/O request like read or write and should have a reasonable size. This |
342 | * function is not suitable for zeroing the entire image in a single request |
343 | * because it may allocate memory for the entire region. |
344 | */ |
345 | int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, |
346 | int bytes, BdrvRequestFlags flags); |
347 | BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, |
348 | const char *backing_file); |
349 | void bdrv_refresh_filename(BlockDriverState *bs); |
350 | |
351 | int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, |
352 | PreallocMode prealloc, Error **errp); |
353 | int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc, |
354 | Error **errp); |
355 | |
356 | int64_t bdrv_nb_sectors(BlockDriverState *bs); |
357 | int64_t bdrv_getlength(BlockDriverState *bs); |
358 | int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); |
359 | BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, |
360 | BlockDriverState *in_bs, Error **errp); |
361 | void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); |
362 | void bdrv_refresh_limits(BlockDriverState *bs, Error **errp); |
363 | int bdrv_commit(BlockDriverState *bs); |
364 | int bdrv_change_backing_file(BlockDriverState *bs, |
365 | const char *backing_file, const char *backing_fmt); |
366 | void bdrv_register(BlockDriver *bdrv); |
367 | int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, |
368 | const char *backing_file_str); |
369 | BlockDriverState *bdrv_find_overlay(BlockDriverState *active, |
370 | BlockDriverState *bs); |
371 | BlockDriverState *bdrv_find_base(BlockDriverState *bs); |
372 | bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base, |
373 | Error **errp); |
374 | int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base, |
375 | Error **errp); |
376 | void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base); |
377 | |
378 | |
379 | typedef struct BdrvCheckResult { |
380 | int corruptions; |
381 | int leaks; |
382 | int check_errors; |
383 | int corruptions_fixed; |
384 | int leaks_fixed; |
385 | int64_t image_end_offset; |
386 | BlockFragInfo bfi; |
387 | } BdrvCheckResult; |
388 | |
389 | typedef enum { |
390 | BDRV_FIX_LEAKS = 1, |
391 | BDRV_FIX_ERRORS = 2, |
392 | } BdrvCheckMode; |
393 | |
394 | int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); |
395 | |
396 | /* The units of offset and total_work_size may be chosen arbitrarily by the |
397 | * block driver; total_work_size may change during the course of the amendment |
398 | * operation */ |
399 | typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset, |
400 | int64_t total_work_size, void *opaque); |
401 | int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts, |
402 | BlockDriverAmendStatusCB *status_cb, void *cb_opaque, |
403 | Error **errp); |
404 | |
405 | /* external snapshots */ |
406 | bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, |
407 | BlockDriverState *candidate); |
408 | bool bdrv_is_first_non_filter(BlockDriverState *candidate); |
409 | |
410 | /* check if a named node can be replaced when doing drive-mirror */ |
411 | BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, |
412 | const char *node_name, Error **errp); |
413 | |
414 | /* async block I/O */ |
415 | void bdrv_aio_cancel(BlockAIOCB *acb); |
416 | void bdrv_aio_cancel_async(BlockAIOCB *acb); |
417 | |
418 | /* sg packet commands */ |
419 | int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); |
420 | |
421 | /* Invalidate any cached metadata used by image formats */ |
422 | void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp); |
423 | void bdrv_invalidate_cache_all(Error **errp); |
424 | int bdrv_inactivate_all(void); |
425 | |
426 | /* Ensure contents are flushed to disk. */ |
427 | int bdrv_flush(BlockDriverState *bs); |
428 | int coroutine_fn bdrv_co_flush(BlockDriverState *bs); |
429 | int bdrv_flush_all(void); |
430 | void bdrv_close_all(void); |
431 | void bdrv_drain(BlockDriverState *bs); |
432 | void coroutine_fn bdrv_co_drain(BlockDriverState *bs); |
433 | void bdrv_drain_all_begin(void); |
434 | void bdrv_drain_all_end(void); |
435 | void bdrv_drain_all(void); |
436 | |
437 | #define BDRV_POLL_WHILE(bs, cond) ({ \ |
438 | BlockDriverState *bs_ = (bs); \ |
439 | AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \ |
440 | cond); }) |
441 | |
442 | int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes); |
443 | int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes); |
444 | int bdrv_has_zero_init_1(BlockDriverState *bs); |
445 | int bdrv_has_zero_init(BlockDriverState *bs); |
446 | int bdrv_has_zero_init_truncate(BlockDriverState *bs); |
447 | bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs); |
448 | bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); |
449 | int bdrv_block_status(BlockDriverState *bs, int64_t offset, |
450 | int64_t bytes, int64_t *pnum, int64_t *map, |
451 | BlockDriverState **file); |
452 | int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, |
453 | int64_t offset, int64_t bytes, int64_t *pnum, |
454 | int64_t *map, BlockDriverState **file); |
455 | int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, |
456 | int64_t *pnum); |
457 | int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, |
458 | bool include_base, int64_t offset, int64_t bytes, |
459 | int64_t *pnum); |
460 | |
461 | bool bdrv_is_read_only(BlockDriverState *bs); |
462 | int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only, |
463 | bool ignore_allow_rdw, Error **errp); |
464 | int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg, |
465 | Error **errp); |
466 | bool bdrv_is_writable(BlockDriverState *bs); |
467 | bool bdrv_is_sg(BlockDriverState *bs); |
468 | bool bdrv_is_inserted(BlockDriverState *bs); |
469 | void bdrv_lock_medium(BlockDriverState *bs, bool locked); |
470 | void bdrv_eject(BlockDriverState *bs, bool eject_flag); |
471 | const char *bdrv_get_format_name(BlockDriverState *bs); |
472 | BlockDriverState *bdrv_find_node(const char *node_name); |
473 | BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp); |
474 | XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp); |
475 | BlockDriverState *bdrv_lookup_bs(const char *device, |
476 | const char *node_name, |
477 | Error **errp); |
478 | bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base); |
479 | BlockDriverState *bdrv_next_node(BlockDriverState *bs); |
480 | BlockDriverState *bdrv_next_all_states(BlockDriverState *bs); |
481 | |
482 | typedef struct BdrvNextIterator { |
483 | enum { |
484 | BDRV_NEXT_BACKEND_ROOTS, |
485 | BDRV_NEXT_MONITOR_OWNED, |
486 | } phase; |
487 | BlockBackend *blk; |
488 | BlockDriverState *bs; |
489 | } BdrvNextIterator; |
490 | |
491 | BlockDriverState *bdrv_first(BdrvNextIterator *it); |
492 | BlockDriverState *bdrv_next(BdrvNextIterator *it); |
493 | void bdrv_next_cleanup(BdrvNextIterator *it); |
494 | |
495 | BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs); |
496 | bool bdrv_is_encrypted(BlockDriverState *bs); |
497 | void bdrv_iterate_format(void (*it)(void *opaque, const char *name), |
498 | void *opaque, bool read_only); |
499 | const char *bdrv_get_node_name(const BlockDriverState *bs); |
500 | const char *bdrv_get_device_name(const BlockDriverState *bs); |
501 | const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); |
502 | int bdrv_get_flags(BlockDriverState *bs); |
503 | int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); |
504 | ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs, |
505 | Error **errp); |
506 | void bdrv_round_to_clusters(BlockDriverState *bs, |
507 | int64_t offset, int64_t bytes, |
508 | int64_t *cluster_offset, |
509 | int64_t *cluster_bytes); |
510 | |
511 | void bdrv_get_backing_filename(BlockDriverState *bs, |
512 | char *filename, int filename_size); |
513 | char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp); |
514 | char *bdrv_get_full_backing_filename_from_filename(const char *backed, |
515 | const char *backing, |
516 | Error **errp); |
517 | char *bdrv_dirname(BlockDriverState *bs, Error **errp); |
518 | |
519 | int path_has_protocol(const char *path); |
520 | int path_is_absolute(const char *path); |
521 | char *path_combine(const char *base_path, const char *filename); |
522 | |
523 | int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); |
524 | int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); |
525 | int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, |
526 | int64_t pos, int size); |
527 | |
528 | int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, |
529 | int64_t pos, int size); |
530 | |
531 | void bdrv_img_create(const char *filename, const char *fmt, |
532 | const char *base_filename, const char *base_fmt, |
533 | char *options, uint64_t img_size, int flags, |
534 | bool quiet, Error **errp); |
535 | |
536 | /* Returns the alignment in bytes that is required so that no bounce buffer |
537 | * is required throughout the stack */ |
538 | size_t bdrv_min_mem_align(BlockDriverState *bs); |
539 | /* Returns optimal alignment in bytes for bounce buffer */ |
540 | size_t bdrv_opt_mem_align(BlockDriverState *bs); |
541 | void *qemu_blockalign(BlockDriverState *bs, size_t size); |
542 | void *qemu_blockalign0(BlockDriverState *bs, size_t size); |
543 | void *qemu_try_blockalign(BlockDriverState *bs, size_t size); |
544 | void *qemu_try_blockalign0(BlockDriverState *bs, size_t size); |
545 | bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov); |
546 | |
547 | void bdrv_enable_copy_on_read(BlockDriverState *bs); |
548 | void bdrv_disable_copy_on_read(BlockDriverState *bs); |
549 | |
550 | void bdrv_ref(BlockDriverState *bs); |
551 | void bdrv_unref(BlockDriverState *bs); |
552 | void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child); |
553 | BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, |
554 | BlockDriverState *child_bs, |
555 | const char *child_name, |
556 | const BdrvChildRole *child_role, |
557 | Error **errp); |
558 | |
559 | bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp); |
560 | void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason); |
561 | void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason); |
562 | void bdrv_op_block_all(BlockDriverState *bs, Error *reason); |
563 | void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason); |
564 | bool bdrv_op_blocker_is_empty(BlockDriverState *bs); |
565 | |
566 | #define BLKDBG_EVENT(child, evt) \ |
567 | do { \ |
568 | if (child) { \ |
569 | bdrv_debug_event(child->bs, evt); \ |
570 | } \ |
571 | } while (0) |
572 | |
573 | void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); |
574 | |
575 | int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, |
576 | const char *tag); |
577 | int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); |
578 | int bdrv_debug_resume(BlockDriverState *bs, const char *tag); |
579 | bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); |
580 | |
581 | /** |
582 | * bdrv_get_aio_context: |
583 | * |
584 | * Returns: the currently bound #AioContext |
585 | */ |
586 | AioContext *bdrv_get_aio_context(BlockDriverState *bs); |
587 | |
588 | /** |
589 | * Transfer control to @co in the aio context of @bs |
590 | */ |
591 | void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co); |
592 | |
593 | void bdrv_set_aio_context_ignore(BlockDriverState *bs, |
594 | AioContext *new_context, GSList **ignore); |
595 | int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx, |
596 | Error **errp); |
597 | int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx, |
598 | BdrvChild *ignore_child, Error **errp); |
599 | bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx, |
600 | GSList **ignore, Error **errp); |
601 | bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx, |
602 | GSList **ignore, Error **errp); |
603 | int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); |
604 | int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); |
605 | |
606 | void bdrv_io_plug(BlockDriverState *bs); |
607 | void bdrv_io_unplug(BlockDriverState *bs); |
608 | |
609 | /** |
610 | * bdrv_parent_drained_begin_single: |
611 | * |
612 | * Begin a quiesced section for the parent of @c. If @poll is true, wait for |
613 | * any pending activity to cease. |
614 | */ |
615 | void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll); |
616 | |
617 | /** |
618 | * bdrv_parent_drained_end_single: |
619 | * |
620 | * End a quiesced section for the parent of @c. |
621 | * |
622 | * This polls @bs's AioContext until all scheduled sub-drained_ends |
623 | * have settled, which may result in graph changes. |
624 | */ |
625 | void bdrv_parent_drained_end_single(BdrvChild *c); |
626 | |
627 | /** |
628 | * bdrv_drain_poll: |
629 | * |
630 | * Poll for pending requests in @bs, its parents (except for @ignore_parent), |
631 | * and if @recursive is true its children as well (used for subtree drain). |
632 | * |
633 | * If @ignore_bds_parents is true, parents that are BlockDriverStates must |
634 | * ignore the drain request because they will be drained separately (used for |
635 | * drain_all). |
636 | * |
637 | * This is part of bdrv_drained_begin. |
638 | */ |
639 | bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, |
640 | BdrvChild *ignore_parent, bool ignore_bds_parents); |
641 | |
642 | /** |
643 | * bdrv_drained_begin: |
644 | * |
645 | * Begin a quiesced section for exclusive access to the BDS, by disabling |
646 | * external request sources including NBD server and device model. Note that |
647 | * this doesn't block timers or coroutines from submitting more requests, which |
648 | * means block_job_pause is still necessary. |
649 | * |
650 | * This function can be recursive. |
651 | */ |
652 | void bdrv_drained_begin(BlockDriverState *bs); |
653 | |
654 | /** |
655 | * bdrv_do_drained_begin_quiesce: |
656 | * |
657 | * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already |
658 | * running requests to complete. |
659 | */ |
660 | void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, |
661 | BdrvChild *parent, bool ignore_bds_parents); |
662 | |
663 | /** |
664 | * Like bdrv_drained_begin, but recursively begins a quiesced section for |
665 | * exclusive access to all child nodes as well. |
666 | */ |
667 | void bdrv_subtree_drained_begin(BlockDriverState *bs); |
668 | |
669 | /** |
670 | * bdrv_drained_end: |
671 | * |
672 | * End a quiescent section started by bdrv_drained_begin(). |
673 | * |
674 | * This polls @bs's AioContext until all scheduled sub-drained_ends |
675 | * have settled. On one hand, that may result in graph changes. On |
676 | * the other, this requires that the caller either runs in the main |
677 | * loop; or that all involved nodes (@bs and all of its parents) are |
678 | * in the caller's AioContext. |
679 | */ |
680 | void bdrv_drained_end(BlockDriverState *bs); |
681 | |
682 | /** |
683 | * bdrv_drained_end_no_poll: |
684 | * |
685 | * Same as bdrv_drained_end(), but do not poll for the subgraph to |
686 | * actually become unquiesced. Therefore, no graph changes will occur |
687 | * with this function. |
688 | * |
689 | * *drained_end_counter is incremented for every background operation |
690 | * that is scheduled, and will be decremented for every operation once |
691 | * it settles. The caller must poll until it reaches 0. The counter |
692 | * should be accessed using atomic operations only. |
693 | */ |
694 | void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter); |
695 | |
696 | /** |
697 | * End a quiescent section started by bdrv_subtree_drained_begin(). |
698 | */ |
699 | void bdrv_subtree_drained_end(BlockDriverState *bs); |
700 | |
701 | void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child, |
702 | Error **errp); |
703 | void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp); |
704 | |
705 | bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, |
706 | uint32_t granularity, Error **errp); |
707 | /** |
708 | * |
709 | * bdrv_register_buf/bdrv_unregister_buf: |
710 | * |
711 | * Register/unregister a buffer for I/O. For example, VFIO drivers are |
712 | * interested to know the memory areas that would later be used for I/O, so |
713 | * that they can prepare IOMMU mapping etc., to get better performance. |
714 | */ |
715 | void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size); |
716 | void bdrv_unregister_buf(BlockDriverState *bs, void *host); |
717 | |
718 | /** |
719 | * |
720 | * bdrv_co_copy_range: |
721 | * |
722 | * Do offloaded copy between two children. If the operation is not implemented |
723 | * by the driver, or if the backend storage doesn't support it, a negative |
724 | * error code will be returned. |
725 | * |
726 | * Note: block layer doesn't emulate or fallback to a bounce buffer approach |
727 | * because usually the caller shouldn't attempt offloaded copy any more (e.g. |
728 | * calling copy_file_range(2)) after the first error, thus it should fall back |
729 | * to a read+write path in the caller level. |
730 | * |
731 | * @src: Source child to copy data from |
732 | * @src_offset: offset in @src image to read data |
733 | * @dst: Destination child to copy data to |
734 | * @dst_offset: offset in @dst image to write data |
735 | * @bytes: number of bytes to copy |
736 | * @flags: request flags. Supported flags: |
737 | * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero |
738 | * write on @dst as if bdrv_co_pwrite_zeroes is |
739 | * called. Used to simplify caller code, or |
740 | * during BlockDriver.bdrv_co_copy_range_from() |
741 | * recursion. |
742 | * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping |
743 | * requests currently in flight. |
744 | * |
745 | * Returns: 0 if succeeded; negative error code if failed. |
746 | **/ |
747 | int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, |
748 | BdrvChild *dst, uint64_t dst_offset, |
749 | uint64_t bytes, BdrvRequestFlags read_flags, |
750 | BdrvRequestFlags write_flags); |
751 | #endif |
752 | |