]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU System Emulator block driver | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | #include "config-host.h" | |
25 | #include "qemu-common.h" | |
26 | #include "trace.h" | |
27 | #include "block/block_int.h" | |
28 | #include "block/blockjob.h" | |
29 | #include "qemu/module.h" | |
30 | #include "qapi/qmp/qjson.h" | |
31 | #include "sysemu/block-backend.h" | |
32 | #include "sysemu/sysemu.h" | |
33 | #include "qemu/notify.h" | |
34 | #include "block/coroutine.h" | |
35 | #include "block/qapi.h" | |
36 | #include "qmp-commands.h" | |
37 | #include "qemu/timer.h" | |
38 | #include "qapi-event.h" | |
39 | ||
40 | #ifdef CONFIG_BSD | |
41 | #include <sys/types.h> | |
42 | #include <sys/stat.h> | |
43 | #include <sys/ioctl.h> | |
44 | #include <sys/queue.h> | |
45 | #ifndef __DragonFly__ | |
46 | #include <sys/disk.h> | |
47 | #endif | |
48 | #endif | |
49 | ||
50 | #ifdef _WIN32 | |
51 | #include <windows.h> | |
52 | #endif | |
53 | ||
54 | struct BdrvDirtyBitmap { | |
55 | HBitmap *bitmap; | |
56 | QLIST_ENTRY(BdrvDirtyBitmap) list; | |
57 | }; | |
58 | ||
59 | #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ | |
60 | ||
61 | static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, | |
62 | int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, | |
63 | BlockCompletionFunc *cb, void *opaque); | |
64 | static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, | |
65 | int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, | |
66 | BlockCompletionFunc *cb, void *opaque); | |
67 | static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, | |
68 | int64_t sector_num, int nb_sectors, | |
69 | QEMUIOVector *iov); | |
70 | static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, | |
71 | int64_t sector_num, int nb_sectors, | |
72 | QEMUIOVector *iov); | |
73 | static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, | |
74 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, | |
75 | BdrvRequestFlags flags); | |
76 | static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, | |
77 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, | |
78 | BdrvRequestFlags flags); | |
79 | static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, | |
80 | int64_t sector_num, | |
81 | QEMUIOVector *qiov, | |
82 | int nb_sectors, | |
83 | BdrvRequestFlags flags, | |
84 | BlockCompletionFunc *cb, | |
85 | void *opaque, | |
86 | bool is_write); | |
87 | static void coroutine_fn bdrv_co_do_rw(void *opaque); | |
88 | static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, | |
89 | int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); | |
90 | ||
91 | static QTAILQ_HEAD(, BlockDriverState) bdrv_states = | |
92 | QTAILQ_HEAD_INITIALIZER(bdrv_states); | |
93 | ||
94 | static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states = | |
95 | QTAILQ_HEAD_INITIALIZER(graph_bdrv_states); | |
96 | ||
97 | static QLIST_HEAD(, BlockDriver) bdrv_drivers = | |
98 | QLIST_HEAD_INITIALIZER(bdrv_drivers); | |
99 | ||
100 | static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, | |
101 | int nr_sectors); | |
102 | static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, | |
103 | int nr_sectors); | |
104 | /* If non-zero, use only whitelisted block drivers */ | |
105 | static int use_bdrv_whitelist; | |
106 | ||
107 | #ifdef _WIN32 | |
108 | static int is_windows_drive_prefix(const char *filename) | |
109 | { | |
110 | return (((filename[0] >= 'a' && filename[0] <= 'z') || | |
111 | (filename[0] >= 'A' && filename[0] <= 'Z')) && | |
112 | filename[1] == ':'); | |
113 | } | |
114 | ||
115 | int is_windows_drive(const char *filename) | |
116 | { | |
117 | if (is_windows_drive_prefix(filename) && | |
118 | filename[2] == '\0') | |
119 | return 1; | |
120 | if (strstart(filename, "\\\\.\\", NULL) || | |
121 | strstart(filename, "//./", NULL)) | |
122 | return 1; | |
123 | return 0; | |
124 | } | |
125 | #endif | |
126 | ||
127 | /* throttling disk I/O limits */ | |
128 | void bdrv_set_io_limits(BlockDriverState *bs, | |
129 | ThrottleConfig *cfg) | |
130 | { | |
131 | int i; | |
132 | ||
133 | throttle_config(&bs->throttle_state, cfg); | |
134 | ||
135 | for (i = 0; i < 2; i++) { | |
136 | qemu_co_enter_next(&bs->throttled_reqs[i]); | |
137 | } | |
138 | } | |
139 | ||
140 | /* this function drain all the throttled IOs */ | |
141 | static bool bdrv_start_throttled_reqs(BlockDriverState *bs) | |
142 | { | |
143 | bool drained = false; | |
144 | bool enabled = bs->io_limits_enabled; | |
145 | int i; | |
146 | ||
147 | bs->io_limits_enabled = false; | |
148 | ||
149 | for (i = 0; i < 2; i++) { | |
150 | while (qemu_co_enter_next(&bs->throttled_reqs[i])) { | |
151 | drained = true; | |
152 | } | |
153 | } | |
154 | ||
155 | bs->io_limits_enabled = enabled; | |
156 | ||
157 | return drained; | |
158 | } | |
159 | ||
160 | void bdrv_io_limits_disable(BlockDriverState *bs) | |
161 | { | |
162 | bs->io_limits_enabled = false; | |
163 | ||
164 | bdrv_start_throttled_reqs(bs); | |
165 | ||
166 | throttle_destroy(&bs->throttle_state); | |
167 | } | |
168 | ||
169 | static void bdrv_throttle_read_timer_cb(void *opaque) | |
170 | { | |
171 | BlockDriverState *bs = opaque; | |
172 | qemu_co_enter_next(&bs->throttled_reqs[0]); | |
173 | } | |
174 | ||
175 | static void bdrv_throttle_write_timer_cb(void *opaque) | |
176 | { | |
177 | BlockDriverState *bs = opaque; | |
178 | qemu_co_enter_next(&bs->throttled_reqs[1]); | |
179 | } | |
180 | ||
181 | /* should be called before bdrv_set_io_limits if a limit is set */ | |
182 | void bdrv_io_limits_enable(BlockDriverState *bs) | |
183 | { | |
184 | assert(!bs->io_limits_enabled); | |
185 | throttle_init(&bs->throttle_state, | |
186 | bdrv_get_aio_context(bs), | |
187 | QEMU_CLOCK_VIRTUAL, | |
188 | bdrv_throttle_read_timer_cb, | |
189 | bdrv_throttle_write_timer_cb, | |
190 | bs); | |
191 | bs->io_limits_enabled = true; | |
192 | } | |
193 | ||
194 | /* This function makes an IO wait if needed | |
195 | * | |
196 | * @nb_sectors: the number of sectors of the IO | |
197 | * @is_write: is the IO a write | |
198 | */ | |
199 | static void bdrv_io_limits_intercept(BlockDriverState *bs, | |
200 | unsigned int bytes, | |
201 | bool is_write) | |
202 | { | |
203 | /* does this io must wait */ | |
204 | bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); | |
205 | ||
206 | /* if must wait or any request of this type throttled queue the IO */ | |
207 | if (must_wait || | |
208 | !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { | |
209 | qemu_co_queue_wait(&bs->throttled_reqs[is_write]); | |
210 | } | |
211 | ||
212 | /* the IO will be executed, do the accounting */ | |
213 | throttle_account(&bs->throttle_state, is_write, bytes); | |
214 | ||
215 | ||
216 | /* if the next request must wait -> do nothing */ | |
217 | if (throttle_schedule_timer(&bs->throttle_state, is_write)) { | |
218 | return; | |
219 | } | |
220 | ||
221 | /* else queue next request for execution */ | |
222 | qemu_co_queue_next(&bs->throttled_reqs[is_write]); | |
223 | } | |
224 | ||
225 | size_t bdrv_opt_mem_align(BlockDriverState *bs) | |
226 | { | |
227 | if (!bs || !bs->drv) { | |
228 | /* 4k should be on the safe side */ | |
229 | return 4096; | |
230 | } | |
231 | ||
232 | return bs->bl.opt_mem_alignment; | |
233 | } | |
234 | ||
235 | /* check if the path starts with "<protocol>:" */ | |
236 | int path_has_protocol(const char *path) | |
237 | { | |
238 | const char *p; | |
239 | ||
240 | #ifdef _WIN32 | |
241 | if (is_windows_drive(path) || | |
242 | is_windows_drive_prefix(path)) { | |
243 | return 0; | |
244 | } | |
245 | p = path + strcspn(path, ":/\\"); | |
246 | #else | |
247 | p = path + strcspn(path, ":/"); | |
248 | #endif | |
249 | ||
250 | return *p == ':'; | |
251 | } | |
252 | ||
253 | int path_is_absolute(const char *path) | |
254 | { | |
255 | #ifdef _WIN32 | |
256 | /* specific case for names like: "\\.\d:" */ | |
257 | if (is_windows_drive(path) || is_windows_drive_prefix(path)) { | |
258 | return 1; | |
259 | } | |
260 | return (*path == '/' || *path == '\\'); | |
261 | #else | |
262 | return (*path == '/'); | |
263 | #endif | |
264 | } | |
265 | ||
266 | /* if filename is absolute, just copy it to dest. Otherwise, build a | |
267 | path to it by considering it is relative to base_path. URL are | |
268 | supported. */ | |
269 | void path_combine(char *dest, int dest_size, | |
270 | const char *base_path, | |
271 | const char *filename) | |
272 | { | |
273 | const char *p, *p1; | |
274 | int len; | |
275 | ||
276 | if (dest_size <= 0) | |
277 | return; | |
278 | if (path_is_absolute(filename)) { | |
279 | pstrcpy(dest, dest_size, filename); | |
280 | } else { | |
281 | p = strchr(base_path, ':'); | |
282 | if (p) | |
283 | p++; | |
284 | else | |
285 | p = base_path; | |
286 | p1 = strrchr(base_path, '/'); | |
287 | #ifdef _WIN32 | |
288 | { | |
289 | const char *p2; | |
290 | p2 = strrchr(base_path, '\\'); | |
291 | if (!p1 || p2 > p1) | |
292 | p1 = p2; | |
293 | } | |
294 | #endif | |
295 | if (p1) | |
296 | p1++; | |
297 | else | |
298 | p1 = base_path; | |
299 | if (p1 > p) | |
300 | p = p1; | |
301 | len = p - base_path; | |
302 | if (len > dest_size - 1) | |
303 | len = dest_size - 1; | |
304 | memcpy(dest, base_path, len); | |
305 | dest[len] = '\0'; | |
306 | pstrcat(dest, dest_size, filename); | |
307 | } | |
308 | } | |
309 | ||
310 | void bdrv_get_full_backing_filename_from_filename(const char *backed, | |
311 | const char *backing, | |
312 | char *dest, size_t sz, | |
313 | Error **errp) | |
314 | { | |
315 | if (backing[0] == '\0' || path_has_protocol(backing) || | |
316 | path_is_absolute(backing)) | |
317 | { | |
318 | pstrcpy(dest, sz, backing); | |
319 | } else if (backed[0] == '\0' || strstart(backed, "json:", NULL)) { | |
320 | error_setg(errp, "Cannot use relative backing file names for '%s'", | |
321 | backed); | |
322 | } else { | |
323 | path_combine(dest, sz, backed, backing); | |
324 | } | |
325 | } | |
326 | ||
327 | void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz, | |
328 | Error **errp) | |
329 | { | |
330 | char *backed = bs->exact_filename[0] ? bs->exact_filename : bs->filename; | |
331 | ||
332 | bdrv_get_full_backing_filename_from_filename(backed, bs->backing_file, | |
333 | dest, sz, errp); | |
334 | } | |
335 | ||
336 | void bdrv_register(BlockDriver *bdrv) | |
337 | { | |
338 | /* Block drivers without coroutine functions need emulation */ | |
339 | if (!bdrv->bdrv_co_readv) { | |
340 | bdrv->bdrv_co_readv = bdrv_co_readv_em; | |
341 | bdrv->bdrv_co_writev = bdrv_co_writev_em; | |
342 | ||
343 | /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if | |
344 | * the block driver lacks aio we need to emulate that too. | |
345 | */ | |
346 | if (!bdrv->bdrv_aio_readv) { | |
347 | /* add AIO emulation layer */ | |
348 | bdrv->bdrv_aio_readv = bdrv_aio_readv_em; | |
349 | bdrv->bdrv_aio_writev = bdrv_aio_writev_em; | |
350 | } | |
351 | } | |
352 | ||
353 | QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); | |
354 | } | |
355 | ||
356 | BlockDriverState *bdrv_new_root(void) | |
357 | { | |
358 | BlockDriverState *bs = bdrv_new(); | |
359 | ||
360 | QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list); | |
361 | return bs; | |
362 | } | |
363 | ||
364 | BlockDriverState *bdrv_new(void) | |
365 | { | |
366 | BlockDriverState *bs; | |
367 | int i; | |
368 | ||
369 | bs = g_new0(BlockDriverState, 1); | |
370 | QLIST_INIT(&bs->dirty_bitmaps); | |
371 | for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { | |
372 | QLIST_INIT(&bs->op_blockers[i]); | |
373 | } | |
374 | bdrv_iostatus_disable(bs); | |
375 | notifier_list_init(&bs->close_notifiers); | |
376 | notifier_with_return_list_init(&bs->before_write_notifiers); | |
377 | qemu_co_queue_init(&bs->throttled_reqs[0]); | |
378 | qemu_co_queue_init(&bs->throttled_reqs[1]); | |
379 | bs->refcnt = 1; | |
380 | bs->aio_context = qemu_get_aio_context(); | |
381 | ||
382 | return bs; | |
383 | } | |
384 | ||
385 | void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify) | |
386 | { | |
387 | notifier_list_add(&bs->close_notifiers, notify); | |
388 | } | |
389 | ||
390 | BlockDriver *bdrv_find_format(const char *format_name) | |
391 | { | |
392 | BlockDriver *drv1; | |
393 | QLIST_FOREACH(drv1, &bdrv_drivers, list) { | |
394 | if (!strcmp(drv1->format_name, format_name)) { | |
395 | return drv1; | |
396 | } | |
397 | } | |
398 | return NULL; | |
399 | } | |
400 | ||
401 | static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only) | |
402 | { | |
403 | static const char *whitelist_rw[] = { | |
404 | CONFIG_BDRV_RW_WHITELIST | |
405 | }; | |
406 | static const char *whitelist_ro[] = { | |
407 | CONFIG_BDRV_RO_WHITELIST | |
408 | }; | |
409 | const char **p; | |
410 | ||
411 | if (!whitelist_rw[0] && !whitelist_ro[0]) { | |
412 | return 1; /* no whitelist, anything goes */ | |
413 | } | |
414 | ||
415 | for (p = whitelist_rw; *p; p++) { | |
416 | if (!strcmp(drv->format_name, *p)) { | |
417 | return 1; | |
418 | } | |
419 | } | |
420 | if (read_only) { | |
421 | for (p = whitelist_ro; *p; p++) { | |
422 | if (!strcmp(drv->format_name, *p)) { | |
423 | return 1; | |
424 | } | |
425 | } | |
426 | } | |
427 | return 0; | |
428 | } | |
429 | ||
430 | BlockDriver *bdrv_find_whitelisted_format(const char *format_name, | |
431 | bool read_only) | |
432 | { | |
433 | BlockDriver *drv = bdrv_find_format(format_name); | |
434 | return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL; | |
435 | } | |
436 | ||
437 | typedef struct CreateCo { | |
438 | BlockDriver *drv; | |
439 | char *filename; | |
440 | QemuOpts *opts; | |
441 | int ret; | |
442 | Error *err; | |
443 | } CreateCo; | |
444 | ||
445 | static void coroutine_fn bdrv_create_co_entry(void *opaque) | |
446 | { | |
447 | Error *local_err = NULL; | |
448 | int ret; | |
449 | ||
450 | CreateCo *cco = opaque; | |
451 | assert(cco->drv); | |
452 | ||
453 | ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err); | |
454 | if (local_err) { | |
455 | error_propagate(&cco->err, local_err); | |
456 | } | |
457 | cco->ret = ret; | |
458 | } | |
459 | ||
460 | int bdrv_create(BlockDriver *drv, const char* filename, | |
461 | QemuOpts *opts, Error **errp) | |
462 | { | |
463 | int ret; | |
464 | ||
465 | Coroutine *co; | |
466 | CreateCo cco = { | |
467 | .drv = drv, | |
468 | .filename = g_strdup(filename), | |
469 | .opts = opts, | |
470 | .ret = NOT_DONE, | |
471 | .err = NULL, | |
472 | }; | |
473 | ||
474 | if (!drv->bdrv_create) { | |
475 | error_setg(errp, "Driver '%s' does not support image creation", drv->format_name); | |
476 | ret = -ENOTSUP; | |
477 | goto out; | |
478 | } | |
479 | ||
480 | if (qemu_in_coroutine()) { | |
481 | /* Fast-path if already in coroutine context */ | |
482 | bdrv_create_co_entry(&cco); | |
483 | } else { | |
484 | co = qemu_coroutine_create(bdrv_create_co_entry); | |
485 | qemu_coroutine_enter(co, &cco); | |
486 | while (cco.ret == NOT_DONE) { | |
487 | aio_poll(qemu_get_aio_context(), true); | |
488 | } | |
489 | } | |
490 | ||
491 | ret = cco.ret; | |
492 | if (ret < 0) { | |
493 | if (cco.err) { | |
494 | error_propagate(errp, cco.err); | |
495 | } else { | |
496 | error_setg_errno(errp, -ret, "Could not create image"); | |
497 | } | |
498 | } | |
499 | ||
500 | out: | |
501 | g_free(cco.filename); | |
502 | return ret; | |
503 | } | |
504 | ||
505 | int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) | |
506 | { | |
507 | BlockDriver *drv; | |
508 | Error *local_err = NULL; | |
509 | int ret; | |
510 | ||
511 | drv = bdrv_find_protocol(filename, true); | |
512 | if (drv == NULL) { | |
513 | error_setg(errp, "Could not find protocol for file '%s'", filename); | |
514 | return -ENOENT; | |
515 | } | |
516 | ||
517 | ret = bdrv_create(drv, filename, opts, &local_err); | |
518 | if (local_err) { | |
519 | error_propagate(errp, local_err); | |
520 | } | |
521 | return ret; | |
522 | } | |
523 | ||
524 | void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) | |
525 | { | |
526 | BlockDriver *drv = bs->drv; | |
527 | Error *local_err = NULL; | |
528 | ||
529 | memset(&bs->bl, 0, sizeof(bs->bl)); | |
530 | ||
531 | if (!drv) { | |
532 | return; | |
533 | } | |
534 | ||
535 | /* Take some limits from the children as a default */ | |
536 | if (bs->file) { | |
537 | bdrv_refresh_limits(bs->file, &local_err); | |
538 | if (local_err) { | |
539 | error_propagate(errp, local_err); | |
540 | return; | |
541 | } | |
542 | bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; | |
543 | bs->bl.max_transfer_length = bs->file->bl.max_transfer_length; | |
544 | bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; | |
545 | } else { | |
546 | bs->bl.opt_mem_alignment = 512; | |
547 | } | |
548 | ||
549 | if (bs->backing_hd) { | |
550 | bdrv_refresh_limits(bs->backing_hd, &local_err); | |
551 | if (local_err) { | |
552 | error_propagate(errp, local_err); | |
553 | return; | |
554 | } | |
555 | bs->bl.opt_transfer_length = | |
556 | MAX(bs->bl.opt_transfer_length, | |
557 | bs->backing_hd->bl.opt_transfer_length); | |
558 | bs->bl.max_transfer_length = | |
559 | MIN_NON_ZERO(bs->bl.max_transfer_length, | |
560 | bs->backing_hd->bl.max_transfer_length); | |
561 | bs->bl.opt_mem_alignment = | |
562 | MAX(bs->bl.opt_mem_alignment, | |
563 | bs->backing_hd->bl.opt_mem_alignment); | |
564 | } | |
565 | ||
566 | /* Then let the driver override it */ | |
567 | if (drv->bdrv_refresh_limits) { | |
568 | drv->bdrv_refresh_limits(bs, errp); | |
569 | } | |
570 | } | |
571 | ||
572 | /* | |
573 | * Create a uniquely-named empty temporary file. | |
574 | * Return 0 upon success, otherwise a negative errno value. | |
575 | */ | |
576 | int get_tmp_filename(char *filename, int size) | |
577 | { | |
578 | #ifdef _WIN32 | |
579 | char temp_dir[MAX_PATH]; | |
580 | /* GetTempFileName requires that its output buffer (4th param) | |
581 | have length MAX_PATH or greater. */ | |
582 | assert(size >= MAX_PATH); | |
583 | return (GetTempPath(MAX_PATH, temp_dir) | |
584 | && GetTempFileName(temp_dir, "qem", 0, filename) | |
585 | ? 0 : -GetLastError()); | |
586 | #else | |
587 | int fd; | |
588 | const char *tmpdir; | |
589 | tmpdir = getenv("TMPDIR"); | |
590 | if (!tmpdir) { | |
591 | tmpdir = "/var/tmp"; | |
592 | } | |
593 | if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) { | |
594 | return -EOVERFLOW; | |
595 | } | |
596 | fd = mkstemp(filename); | |
597 | if (fd < 0) { | |
598 | return -errno; | |
599 | } | |
600 | if (close(fd) != 0) { | |
601 | unlink(filename); | |
602 | return -errno; | |
603 | } | |
604 | return 0; | |
605 | #endif | |
606 | } | |
607 | ||
608 | /* | |
609 | * Detect host devices. By convention, /dev/cdrom[N] is always | |
610 | * recognized as a host CDROM. | |
611 | */ | |
612 | static BlockDriver *find_hdev_driver(const char *filename) | |
613 | { | |
614 | int score_max = 0, score; | |
615 | BlockDriver *drv = NULL, *d; | |
616 | ||
617 | QLIST_FOREACH(d, &bdrv_drivers, list) { | |
618 | if (d->bdrv_probe_device) { | |
619 | score = d->bdrv_probe_device(filename); | |
620 | if (score > score_max) { | |
621 | score_max = score; | |
622 | drv = d; | |
623 | } | |
624 | } | |
625 | } | |
626 | ||
627 | return drv; | |
628 | } | |
629 | ||
630 | BlockDriver *bdrv_find_protocol(const char *filename, | |
631 | bool allow_protocol_prefix) | |
632 | { | |
633 | BlockDriver *drv1; | |
634 | char protocol[128]; | |
635 | int len; | |
636 | const char *p; | |
637 | ||
638 | /* TODO Drivers without bdrv_file_open must be specified explicitly */ | |
639 | ||
640 | /* | |
641 | * XXX(hch): we really should not let host device detection | |
642 | * override an explicit protocol specification, but moving this | |
643 | * later breaks access to device names with colons in them. | |
644 | * Thanks to the brain-dead persistent naming schemes on udev- | |
645 | * based Linux systems those actually are quite common. | |
646 | */ | |
647 | drv1 = find_hdev_driver(filename); | |
648 | if (drv1) { | |
649 | return drv1; | |
650 | } | |
651 | ||
652 | if (!path_has_protocol(filename) || !allow_protocol_prefix) { | |
653 | return &bdrv_file; | |
654 | } | |
655 | ||
656 | p = strchr(filename, ':'); | |
657 | assert(p != NULL); | |
658 | len = p - filename; | |
659 | if (len > sizeof(protocol) - 1) | |
660 | len = sizeof(protocol) - 1; | |
661 | memcpy(protocol, filename, len); | |
662 | protocol[len] = '\0'; | |
663 | QLIST_FOREACH(drv1, &bdrv_drivers, list) { | |
664 | if (drv1->protocol_name && | |
665 | !strcmp(drv1->protocol_name, protocol)) { | |
666 | return drv1; | |
667 | } | |
668 | } | |
669 | return NULL; | |
670 | } | |
671 | ||
672 | /* | |
673 | * Guess image format by probing its contents. | |
674 | * This is not a good idea when your image is raw (CVE-2008-2004), but | |
675 | * we do it anyway for backward compatibility. | |
676 | * | |
677 | * @buf contains the image's first @buf_size bytes. | |
678 | * @buf_size is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE, | |
679 | * but can be smaller if the image file is smaller) | |
680 | * @filename is its filename. | |
681 | * | |
682 | * For all block drivers, call the bdrv_probe() method to get its | |
683 | * probing score. | |
684 | * Return the first block driver with the highest probing score. | |
685 | */ | |
686 | BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, | |
687 | const char *filename) | |
688 | { | |
689 | int score_max = 0, score; | |
690 | BlockDriver *drv = NULL, *d; | |
691 | ||
692 | QLIST_FOREACH(d, &bdrv_drivers, list) { | |
693 | if (d->bdrv_probe) { | |
694 | score = d->bdrv_probe(buf, buf_size, filename); | |
695 | if (score > score_max) { | |
696 | score_max = score; | |
697 | drv = d; | |
698 | } | |
699 | } | |
700 | } | |
701 | ||
702 | return drv; | |
703 | } | |
704 | ||
705 | static int find_image_format(BlockDriverState *bs, const char *filename, | |
706 | BlockDriver **pdrv, Error **errp) | |
707 | { | |
708 | BlockDriver *drv; | |
709 | uint8_t buf[BLOCK_PROBE_BUF_SIZE]; | |
710 | int ret = 0; | |
711 | ||
712 | /* Return the raw BlockDriver * to scsi-generic devices or empty drives */ | |
713 | if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) { | |
714 | *pdrv = &bdrv_raw; | |
715 | return ret; | |
716 | } | |
717 | ||
718 | ret = bdrv_pread(bs, 0, buf, sizeof(buf)); | |
719 | if (ret < 0) { | |
720 | error_setg_errno(errp, -ret, "Could not read image for determining its " | |
721 | "format"); | |
722 | *pdrv = NULL; | |
723 | return ret; | |
724 | } | |
725 | ||
726 | drv = bdrv_probe_all(buf, ret, filename); | |
727 | if (!drv) { | |
728 | error_setg(errp, "Could not determine image format: No compatible " | |
729 | "driver found"); | |
730 | ret = -ENOENT; | |
731 | } | |
732 | *pdrv = drv; | |
733 | return ret; | |
734 | } | |
735 | ||
736 | /** | |
737 | * Set the current 'total_sectors' value | |
738 | * Return 0 on success, -errno on error. | |
739 | */ | |
740 | static int refresh_total_sectors(BlockDriverState *bs, int64_t hint) | |
741 | { | |
742 | BlockDriver *drv = bs->drv; | |
743 | ||
744 | /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */ | |
745 | if (bs->sg) | |
746 | return 0; | |
747 | ||
748 | /* query actual device if possible, otherwise just trust the hint */ | |
749 | if (drv->bdrv_getlength) { | |
750 | int64_t length = drv->bdrv_getlength(bs); | |
751 | if (length < 0) { | |
752 | return length; | |
753 | } | |
754 | hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE); | |
755 | } | |
756 | ||
757 | bs->total_sectors = hint; | |
758 | return 0; | |
759 | } | |
760 | ||
761 | /** | |
762 | * Set open flags for a given discard mode | |
763 | * | |
764 | * Return 0 on success, -1 if the discard mode was invalid. | |
765 | */ | |
766 | int bdrv_parse_discard_flags(const char *mode, int *flags) | |
767 | { | |
768 | *flags &= ~BDRV_O_UNMAP; | |
769 | ||
770 | if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) { | |
771 | /* do nothing */ | |
772 | } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) { | |
773 | *flags |= BDRV_O_UNMAP; | |
774 | } else { | |
775 | return -1; | |
776 | } | |
777 | ||
778 | return 0; | |
779 | } | |
780 | ||
781 | /** | |
782 | * Set open flags for a given cache mode | |
783 | * | |
784 | * Return 0 on success, -1 if the cache mode was invalid. | |
785 | */ | |
786 | int bdrv_parse_cache_flags(const char *mode, int *flags) | |
787 | { | |
788 | *flags &= ~BDRV_O_CACHE_MASK; | |
789 | ||
790 | if (!strcmp(mode, "off") || !strcmp(mode, "none")) { | |
791 | *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; | |
792 | } else if (!strcmp(mode, "directsync")) { | |
793 | *flags |= BDRV_O_NOCACHE; | |
794 | } else if (!strcmp(mode, "writeback")) { | |
795 | *flags |= BDRV_O_CACHE_WB; | |
796 | } else if (!strcmp(mode, "unsafe")) { | |
797 | *flags |= BDRV_O_CACHE_WB; | |
798 | *flags |= BDRV_O_NO_FLUSH; | |
799 | } else if (!strcmp(mode, "writethrough")) { | |
800 | /* this is the default */ | |
801 | } else { | |
802 | return -1; | |
803 | } | |
804 | ||
805 | return 0; | |
806 | } | |
807 | ||
808 | /** | |
809 | * The copy-on-read flag is actually a reference count so multiple users may | |
810 | * use the feature without worrying about clobbering its previous state. | |
811 | * Copy-on-read stays enabled until all users have called to disable it. | |
812 | */ | |
813 | void bdrv_enable_copy_on_read(BlockDriverState *bs) | |
814 | { | |
815 | bs->copy_on_read++; | |
816 | } | |
817 | ||
818 | void bdrv_disable_copy_on_read(BlockDriverState *bs) | |
819 | { | |
820 | assert(bs->copy_on_read > 0); | |
821 | bs->copy_on_read--; | |
822 | } | |
823 | ||
824 | /* | |
825 | * Returns the flags that a temporary snapshot should get, based on the | |
826 | * originally requested flags (the originally requested image will have flags | |
827 | * like a backing file) | |
828 | */ | |
829 | static int bdrv_temp_snapshot_flags(int flags) | |
830 | { | |
831 | return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY; | |
832 | } | |
833 | ||
834 | /* | |
835 | * Returns the flags that bs->file should get, based on the given flags for | |
836 | * the parent BDS | |
837 | */ | |
838 | static int bdrv_inherited_flags(int flags) | |
839 | { | |
840 | /* Enable protocol handling, disable format probing for bs->file */ | |
841 | flags |= BDRV_O_PROTOCOL; | |
842 | ||
843 | /* Our block drivers take care to send flushes and respect unmap policy, | |
844 | * so we can enable both unconditionally on lower layers. */ | |
845 | flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP; | |
846 | ||
847 | /* Clear flags that only apply to the top layer */ | |
848 | flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ); | |
849 | ||
850 | return flags; | |
851 | } | |
852 | ||
853 | /* | |
854 | * Returns the flags that bs->backing_hd should get, based on the given flags | |
855 | * for the parent BDS | |
856 | */ | |
857 | static int bdrv_backing_flags(int flags) | |
858 | { | |
859 | /* backing files always opened read-only */ | |
860 | flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ); | |
861 | ||
862 | /* snapshot=on is handled on the top layer */ | |
863 | flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY); | |
864 | ||
865 | return flags; | |
866 | } | |
867 | ||
868 | static int bdrv_open_flags(BlockDriverState *bs, int flags) | |
869 | { | |
870 | int open_flags = flags | BDRV_O_CACHE_WB; | |
871 | ||
872 | /* | |
873 | * Clear flags that are internal to the block layer before opening the | |
874 | * image. | |
875 | */ | |
876 | open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL); | |
877 | ||
878 | /* | |
879 | * Snapshots should be writable. | |
880 | */ | |
881 | if (flags & BDRV_O_TEMPORARY) { | |
882 | open_flags |= BDRV_O_RDWR; | |
883 | } | |
884 | ||
885 | return open_flags; | |
886 | } | |
887 | ||
888 | static void bdrv_assign_node_name(BlockDriverState *bs, | |
889 | const char *node_name, | |
890 | Error **errp) | |
891 | { | |
892 | if (!node_name) { | |
893 | return; | |
894 | } | |
895 | ||
896 | /* Check for empty string or invalid characters */ | |
897 | if (!id_wellformed(node_name)) { | |
898 | error_setg(errp, "Invalid node name"); | |
899 | return; | |
900 | } | |
901 | ||
902 | /* takes care of avoiding namespaces collisions */ | |
903 | if (blk_by_name(node_name)) { | |
904 | error_setg(errp, "node-name=%s is conflicting with a device id", | |
905 | node_name); | |
906 | return; | |
907 | } | |
908 | ||
909 | /* takes care of avoiding duplicates node names */ | |
910 | if (bdrv_find_node(node_name)) { | |
911 | error_setg(errp, "Duplicate node name"); | |
912 | return; | |
913 | } | |
914 | ||
915 | /* copy node name into the bs and insert it into the graph list */ | |
916 | pstrcpy(bs->node_name, sizeof(bs->node_name), node_name); | |
917 | QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list); | |
918 | } | |
919 | ||
920 | /* | |
921 | * Common part for opening disk images and files | |
922 | * | |
923 | * Removes all processed options from *options. | |
924 | */ | |
925 | static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, | |
926 | QDict *options, int flags, BlockDriver *drv, Error **errp) | |
927 | { | |
928 | int ret, open_flags; | |
929 | const char *filename; | |
930 | const char *node_name = NULL; | |
931 | Error *local_err = NULL; | |
932 | ||
933 | assert(drv != NULL); | |
934 | assert(bs->file == NULL); | |
935 | assert(options != NULL && bs->options != options); | |
936 | ||
937 | if (file != NULL) { | |
938 | filename = file->filename; | |
939 | } else { | |
940 | filename = qdict_get_try_str(options, "filename"); | |
941 | } | |
942 | ||
943 | if (drv->bdrv_needs_filename && !filename) { | |
944 | error_setg(errp, "The '%s' block driver requires a file name", | |
945 | drv->format_name); | |
946 | return -EINVAL; | |
947 | } | |
948 | ||
949 | trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name); | |
950 | ||
951 | node_name = qdict_get_try_str(options, "node-name"); | |
952 | bdrv_assign_node_name(bs, node_name, &local_err); | |
953 | if (local_err) { | |
954 | error_propagate(errp, local_err); | |
955 | return -EINVAL; | |
956 | } | |
957 | qdict_del(options, "node-name"); | |
958 | ||
959 | /* bdrv_open() with directly using a protocol as drv. This layer is already | |
960 | * opened, so assign it to bs (while file becomes a closed BlockDriverState) | |
961 | * and return immediately. */ | |
962 | if (file != NULL && drv->bdrv_file_open) { | |
963 | bdrv_swap(file, bs); | |
964 | return 0; | |
965 | } | |
966 | ||
967 | bs->open_flags = flags; | |
968 | bs->guest_block_size = 512; | |
969 | bs->request_alignment = 512; | |
970 | bs->zero_beyond_eof = true; | |
971 | open_flags = bdrv_open_flags(bs, flags); | |
972 | bs->read_only = !(open_flags & BDRV_O_RDWR); | |
973 | bs->growable = !!(flags & BDRV_O_PROTOCOL); | |
974 | ||
975 | if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) { | |
976 | error_setg(errp, | |
977 | !bs->read_only && bdrv_is_whitelisted(drv, true) | |
978 | ? "Driver '%s' can only be used for read-only devices" | |
979 | : "Driver '%s' is not whitelisted", | |
980 | drv->format_name); | |
981 | return -ENOTSUP; | |
982 | } | |
983 | ||
984 | assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ | |
985 | if (flags & BDRV_O_COPY_ON_READ) { | |
986 | if (!bs->read_only) { | |
987 | bdrv_enable_copy_on_read(bs); | |
988 | } else { | |
989 | error_setg(errp, "Can't use copy-on-read on read-only device"); | |
990 | return -EINVAL; | |
991 | } | |
992 | } | |
993 | ||
994 | if (filename != NULL) { | |
995 | pstrcpy(bs->filename, sizeof(bs->filename), filename); | |
996 | } else { | |
997 | bs->filename[0] = '\0'; | |
998 | } | |
999 | pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename); | |
1000 | ||
1001 | bs->drv = drv; | |
1002 | bs->opaque = g_malloc0(drv->instance_size); | |
1003 | ||
1004 | bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); | |
1005 | ||
1006 | /* Open the image, either directly or using a protocol */ | |
1007 | if (drv->bdrv_file_open) { | |
1008 | assert(file == NULL); | |
1009 | assert(!drv->bdrv_needs_filename || filename != NULL); | |
1010 | ret = drv->bdrv_file_open(bs, options, open_flags, &local_err); | |
1011 | } else { | |
1012 | if (file == NULL) { | |
1013 | error_setg(errp, "Can't use '%s' as a block driver for the " | |
1014 | "protocol level", drv->format_name); | |
1015 | ret = -EINVAL; | |
1016 | goto free_and_fail; | |
1017 | } | |
1018 | bs->file = file; | |
1019 | ret = drv->bdrv_open(bs, options, open_flags, &local_err); | |
1020 | } | |
1021 | ||
1022 | if (ret < 0) { | |
1023 | if (local_err) { | |
1024 | error_propagate(errp, local_err); | |
1025 | } else if (bs->filename[0]) { | |
1026 | error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename); | |
1027 | } else { | |
1028 | error_setg_errno(errp, -ret, "Could not open image"); | |
1029 | } | |
1030 | goto free_and_fail; | |
1031 | } | |
1032 | ||
1033 | ret = refresh_total_sectors(bs, bs->total_sectors); | |
1034 | if (ret < 0) { | |
1035 | error_setg_errno(errp, -ret, "Could not refresh total sector count"); | |
1036 | goto free_and_fail; | |
1037 | } | |
1038 | ||
1039 | bdrv_refresh_limits(bs, &local_err); | |
1040 | if (local_err) { | |
1041 | error_propagate(errp, local_err); | |
1042 | ret = -EINVAL; | |
1043 | goto free_and_fail; | |
1044 | } | |
1045 | ||
1046 | assert(bdrv_opt_mem_align(bs) != 0); | |
1047 | assert((bs->request_alignment != 0) || bs->sg); | |
1048 | return 0; | |
1049 | ||
1050 | free_and_fail: | |
1051 | bs->file = NULL; | |
1052 | g_free(bs->opaque); | |
1053 | bs->opaque = NULL; | |
1054 | bs->drv = NULL; | |
1055 | return ret; | |
1056 | } | |
1057 | ||
1058 | static QDict *parse_json_filename(const char *filename, Error **errp) | |
1059 | { | |
1060 | QObject *options_obj; | |
1061 | QDict *options; | |
1062 | int ret; | |
1063 | ||
1064 | ret = strstart(filename, "json:", &filename); | |
1065 | assert(ret); | |
1066 | ||
1067 | options_obj = qobject_from_json(filename); | |
1068 | if (!options_obj) { | |
1069 | error_setg(errp, "Could not parse the JSON options"); | |
1070 | return NULL; | |
1071 | } | |
1072 | ||
1073 | if (qobject_type(options_obj) != QTYPE_QDICT) { | |
1074 | qobject_decref(options_obj); | |
1075 | error_setg(errp, "Invalid JSON object given"); | |
1076 | return NULL; | |
1077 | } | |
1078 | ||
1079 | options = qobject_to_qdict(options_obj); | |
1080 | qdict_flatten(options); | |
1081 | ||
1082 | return options; | |
1083 | } | |
1084 | ||
1085 | /* | |
1086 | * Fills in default options for opening images and converts the legacy | |
1087 | * filename/flags pair to option QDict entries. | |
1088 | */ | |
1089 | static int bdrv_fill_options(QDict **options, const char **pfilename, int flags, | |
1090 | BlockDriver *drv, Error **errp) | |
1091 | { | |
1092 | const char *filename = *pfilename; | |
1093 | const char *drvname; | |
1094 | bool protocol = flags & BDRV_O_PROTOCOL; | |
1095 | bool parse_filename = false; | |
1096 | Error *local_err = NULL; | |
1097 | ||
1098 | /* Parse json: pseudo-protocol */ | |
1099 | if (filename && g_str_has_prefix(filename, "json:")) { | |
1100 | QDict *json_options = parse_json_filename(filename, &local_err); | |
1101 | if (local_err) { | |
1102 | error_propagate(errp, local_err); | |
1103 | return -EINVAL; | |
1104 | } | |
1105 | ||
1106 | /* Options given in the filename have lower priority than options | |
1107 | * specified directly */ | |
1108 | qdict_join(*options, json_options, false); | |
1109 | QDECREF(json_options); | |
1110 | *pfilename = filename = NULL; | |
1111 | } | |
1112 | ||
1113 | /* Fetch the file name from the options QDict if necessary */ | |
1114 | if (protocol && filename) { | |
1115 | if (!qdict_haskey(*options, "filename")) { | |
1116 | qdict_put(*options, "filename", qstring_from_str(filename)); | |
1117 | parse_filename = true; | |
1118 | } else { | |
1119 | error_setg(errp, "Can't specify 'file' and 'filename' options at " | |
1120 | "the same time"); | |
1121 | return -EINVAL; | |
1122 | } | |
1123 | } | |
1124 | ||
1125 | /* Find the right block driver */ | |
1126 | filename = qdict_get_try_str(*options, "filename"); | |
1127 | drvname = qdict_get_try_str(*options, "driver"); | |
1128 | ||
1129 | if (drv) { | |
1130 | if (drvname) { | |
1131 | error_setg(errp, "Driver specified twice"); | |
1132 | return -EINVAL; | |
1133 | } | |
1134 | drvname = drv->format_name; | |
1135 | qdict_put(*options, "driver", qstring_from_str(drvname)); | |
1136 | } else { | |
1137 | if (!drvname && protocol) { | |
1138 | if (filename) { | |
1139 | drv = bdrv_find_protocol(filename, parse_filename); | |
1140 | if (!drv) { | |
1141 | error_setg(errp, "Unknown protocol"); | |
1142 | return -EINVAL; | |
1143 | } | |
1144 | ||
1145 | drvname = drv->format_name; | |
1146 | qdict_put(*options, "driver", qstring_from_str(drvname)); | |
1147 | } else { | |
1148 | error_setg(errp, "Must specify either driver or file"); | |
1149 | return -EINVAL; | |
1150 | } | |
1151 | } else if (drvname) { | |
1152 | drv = bdrv_find_format(drvname); | |
1153 | if (!drv) { | |
1154 | error_setg(errp, "Unknown driver '%s'", drvname); | |
1155 | return -ENOENT; | |
1156 | } | |
1157 | } | |
1158 | } | |
1159 | ||
1160 | assert(drv || !protocol); | |
1161 | ||
1162 | /* Driver-specific filename parsing */ | |
1163 | if (drv && drv->bdrv_parse_filename && parse_filename) { | |
1164 | drv->bdrv_parse_filename(filename, *options, &local_err); | |
1165 | if (local_err) { | |
1166 | error_propagate(errp, local_err); | |
1167 | return -EINVAL; | |
1168 | } | |
1169 | ||
1170 | if (!drv->bdrv_needs_filename) { | |
1171 | qdict_del(*options, "filename"); | |
1172 | } | |
1173 | } | |
1174 | ||
1175 | return 0; | |
1176 | } | |
1177 | ||
1178 | void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd) | |
1179 | { | |
1180 | ||
1181 | if (bs->backing_hd) { | |
1182 | assert(bs->backing_blocker); | |
1183 | bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker); | |
1184 | } else if (backing_hd) { | |
1185 | error_setg(&bs->backing_blocker, | |
1186 | "device is used as backing hd of '%s'", | |
1187 | bdrv_get_device_name(bs)); | |
1188 | } | |
1189 | ||
1190 | bs->backing_hd = backing_hd; | |
1191 | if (!backing_hd) { | |
1192 | error_free(bs->backing_blocker); | |
1193 | bs->backing_blocker = NULL; | |
1194 | goto out; | |
1195 | } | |
1196 | bs->open_flags &= ~BDRV_O_NO_BACKING; | |
1197 | pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename); | |
1198 | pstrcpy(bs->backing_format, sizeof(bs->backing_format), | |
1199 | backing_hd->drv ? backing_hd->drv->format_name : ""); | |
1200 | ||
1201 | bdrv_op_block_all(bs->backing_hd, bs->backing_blocker); | |
1202 | /* Otherwise we won't be able to commit due to check in bdrv_commit */ | |
1203 | bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET, | |
1204 | bs->backing_blocker); | |
1205 | out: | |
1206 | bdrv_refresh_limits(bs, NULL); | |
1207 | } | |
1208 | ||
1209 | /* | |
1210 | * Opens the backing file for a BlockDriverState if not yet open | |
1211 | * | |
1212 | * options is a QDict of options to pass to the block drivers, or NULL for an | |
1213 | * empty set of options. The reference to the QDict is transferred to this | |
1214 | * function (even on failure), so if the caller intends to reuse the dictionary, | |
1215 | * it needs to use QINCREF() before calling bdrv_file_open. | |
1216 | */ | |
1217 | int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) | |
1218 | { | |
1219 | char *backing_filename = g_malloc0(PATH_MAX); | |
1220 | int ret = 0; | |
1221 | BlockDriverState *backing_hd; | |
1222 | Error *local_err = NULL; | |
1223 | ||
1224 | if (bs->backing_hd != NULL) { | |
1225 | QDECREF(options); | |
1226 | goto free_exit; | |
1227 | } | |
1228 | ||
1229 | /* NULL means an empty set of options */ | |
1230 | if (options == NULL) { | |
1231 | options = qdict_new(); | |
1232 | } | |
1233 | ||
1234 | bs->open_flags &= ~BDRV_O_NO_BACKING; | |
1235 | if (qdict_haskey(options, "file.filename")) { | |
1236 | backing_filename[0] = '\0'; | |
1237 | } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) { | |
1238 | QDECREF(options); | |
1239 | goto free_exit; | |
1240 | } else { | |
1241 | bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX, | |
1242 | &local_err); | |
1243 | if (local_err) { | |
1244 | ret = -EINVAL; | |
1245 | error_propagate(errp, local_err); | |
1246 | QDECREF(options); | |
1247 | goto free_exit; | |
1248 | } | |
1249 | } | |
1250 | ||
1251 | if (!bs->drv || !bs->drv->supports_backing) { | |
1252 | ret = -EINVAL; | |
1253 | error_setg(errp, "Driver doesn't support backing files"); | |
1254 | QDECREF(options); | |
1255 | goto free_exit; | |
1256 | } | |
1257 | ||
1258 | backing_hd = bdrv_new(); | |
1259 | ||
1260 | if (bs->backing_format[0] != '\0' && !qdict_haskey(options, "driver")) { | |
1261 | qdict_put(options, "driver", qstring_from_str(bs->backing_format)); | |
1262 | } | |
1263 | ||
1264 | assert(bs->backing_hd == NULL); | |
1265 | ret = bdrv_open(&backing_hd, | |
1266 | *backing_filename ? backing_filename : NULL, NULL, options, | |
1267 | bdrv_backing_flags(bs->open_flags), NULL, &local_err); | |
1268 | if (ret < 0) { | |
1269 | bdrv_unref(backing_hd); | |
1270 | backing_hd = NULL; | |
1271 | bs->open_flags |= BDRV_O_NO_BACKING; | |
1272 | error_setg(errp, "Could not open backing file: %s", | |
1273 | error_get_pretty(local_err)); | |
1274 | error_free(local_err); | |
1275 | goto free_exit; | |
1276 | } | |
1277 | bdrv_set_backing_hd(bs, backing_hd); | |
1278 | ||
1279 | free_exit: | |
1280 | g_free(backing_filename); | |
1281 | return ret; | |
1282 | } | |
1283 | ||
1284 | /* | |
1285 | * Opens a disk image whose options are given as BlockdevRef in another block | |
1286 | * device's options. | |
1287 | * | |
1288 | * If allow_none is true, no image will be opened if filename is false and no | |
1289 | * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned. | |
1290 | * | |
1291 | * bdrev_key specifies the key for the image's BlockdevRef in the options QDict. | |
1292 | * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict | |
1293 | * itself, all options starting with "${bdref_key}." are considered part of the | |
1294 | * BlockdevRef. | |
1295 | * | |
1296 | * The BlockdevRef will be removed from the options QDict. | |
1297 | * | |
1298 | * To conform with the behavior of bdrv_open(), *pbs has to be NULL. | |
1299 | */ | |
1300 | int bdrv_open_image(BlockDriverState **pbs, const char *filename, | |
1301 | QDict *options, const char *bdref_key, int flags, | |
1302 | bool allow_none, Error **errp) | |
1303 | { | |
1304 | QDict *image_options; | |
1305 | int ret; | |
1306 | char *bdref_key_dot; | |
1307 | const char *reference; | |
1308 | ||
1309 | assert(pbs); | |
1310 | assert(*pbs == NULL); | |
1311 | ||
1312 | bdref_key_dot = g_strdup_printf("%s.", bdref_key); | |
1313 | qdict_extract_subqdict(options, &image_options, bdref_key_dot); | |
1314 | g_free(bdref_key_dot); | |
1315 | ||
1316 | reference = qdict_get_try_str(options, bdref_key); | |
1317 | if (!filename && !reference && !qdict_size(image_options)) { | |
1318 | if (allow_none) { | |
1319 | ret = 0; | |
1320 | } else { | |
1321 | error_setg(errp, "A block device must be specified for \"%s\"", | |
1322 | bdref_key); | |
1323 | ret = -EINVAL; | |
1324 | } | |
1325 | QDECREF(image_options); | |
1326 | goto done; | |
1327 | } | |
1328 | ||
1329 | ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp); | |
1330 | ||
1331 | done: | |
1332 | qdict_del(options, bdref_key); | |
1333 | return ret; | |
1334 | } | |
1335 | ||
1336 | int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp) | |
1337 | { | |
1338 | /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ | |
1339 | char *tmp_filename = g_malloc0(PATH_MAX + 1); | |
1340 | int64_t total_size; | |
1341 | QemuOpts *opts = NULL; | |
1342 | QDict *snapshot_options; | |
1343 | BlockDriverState *bs_snapshot; | |
1344 | Error *local_err; | |
1345 | int ret; | |
1346 | ||
1347 | /* if snapshot, we create a temporary backing file and open it | |
1348 | instead of opening 'filename' directly */ | |
1349 | ||
1350 | /* Get the required size from the image */ | |
1351 | total_size = bdrv_getlength(bs); | |
1352 | if (total_size < 0) { | |
1353 | ret = total_size; | |
1354 | error_setg_errno(errp, -total_size, "Could not get image size"); | |
1355 | goto out; | |
1356 | } | |
1357 | ||
1358 | /* Create the temporary image */ | |
1359 | ret = get_tmp_filename(tmp_filename, PATH_MAX + 1); | |
1360 | if (ret < 0) { | |
1361 | error_setg_errno(errp, -ret, "Could not get temporary filename"); | |
1362 | goto out; | |
1363 | } | |
1364 | ||
1365 | opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0, | |
1366 | &error_abort); | |
1367 | qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size); | |
1368 | ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err); | |
1369 | qemu_opts_del(opts); | |
1370 | if (ret < 0) { | |
1371 | error_setg_errno(errp, -ret, "Could not create temporary overlay " | |
1372 | "'%s': %s", tmp_filename, | |
1373 | error_get_pretty(local_err)); | |
1374 | error_free(local_err); | |
1375 | goto out; | |
1376 | } | |
1377 | ||
1378 | /* Prepare a new options QDict for the temporary file */ | |
1379 | snapshot_options = qdict_new(); | |
1380 | qdict_put(snapshot_options, "file.driver", | |
1381 | qstring_from_str("file")); | |
1382 | qdict_put(snapshot_options, "file.filename", | |
1383 | qstring_from_str(tmp_filename)); | |
1384 | ||
1385 | bs_snapshot = bdrv_new(); | |
1386 | ||
1387 | ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options, | |
1388 | flags, &bdrv_qcow2, &local_err); | |
1389 | if (ret < 0) { | |
1390 | error_propagate(errp, local_err); | |
1391 | goto out; | |
1392 | } | |
1393 | ||
1394 | bdrv_append(bs_snapshot, bs); | |
1395 | ||
1396 | out: | |
1397 | g_free(tmp_filename); | |
1398 | return ret; | |
1399 | } | |
1400 | ||
1401 | /* | |
1402 | * Opens a disk image (raw, qcow2, vmdk, ...) | |
1403 | * | |
1404 | * options is a QDict of options to pass to the block drivers, or NULL for an | |
1405 | * empty set of options. The reference to the QDict belongs to the block layer | |
1406 | * after the call (even on failure), so if the caller intends to reuse the | |
1407 | * dictionary, it needs to use QINCREF() before calling bdrv_open. | |
1408 | * | |
1409 | * If *pbs is NULL, a new BDS will be created with a pointer to it stored there. | |
1410 | * If it is not NULL, the referenced BDS will be reused. | |
1411 | * | |
1412 | * The reference parameter may be used to specify an existing block device which | |
1413 | * should be opened. If specified, neither options nor a filename may be given, | |
1414 | * nor can an existing BDS be reused (that is, *pbs has to be NULL). | |
1415 | */ | |
1416 | int bdrv_open(BlockDriverState **pbs, const char *filename, | |
1417 | const char *reference, QDict *options, int flags, | |
1418 | BlockDriver *drv, Error **errp) | |
1419 | { | |
1420 | int ret; | |
1421 | BlockDriverState *file = NULL, *bs; | |
1422 | const char *drvname; | |
1423 | Error *local_err = NULL; | |
1424 | int snapshot_flags = 0; | |
1425 | ||
1426 | assert(pbs); | |
1427 | ||
1428 | if (reference) { | |
1429 | bool options_non_empty = options ? qdict_size(options) : false; | |
1430 | QDECREF(options); | |
1431 | ||
1432 | if (*pbs) { | |
1433 | error_setg(errp, "Cannot reuse an existing BDS when referencing " | |
1434 | "another block device"); | |
1435 | return -EINVAL; | |
1436 | } | |
1437 | ||
1438 | if (filename || options_non_empty) { | |
1439 | error_setg(errp, "Cannot reference an existing block device with " | |
1440 | "additional options or a new filename"); | |
1441 | return -EINVAL; | |
1442 | } | |
1443 | ||
1444 | bs = bdrv_lookup_bs(reference, reference, errp); | |
1445 | if (!bs) { | |
1446 | return -ENODEV; | |
1447 | } | |
1448 | bdrv_ref(bs); | |
1449 | *pbs = bs; | |
1450 | return 0; | |
1451 | } | |
1452 | ||
1453 | if (*pbs) { | |
1454 | bs = *pbs; | |
1455 | } else { | |
1456 | bs = bdrv_new(); | |
1457 | } | |
1458 | ||
1459 | /* NULL means an empty set of options */ | |
1460 | if (options == NULL) { | |
1461 | options = qdict_new(); | |
1462 | } | |
1463 | ||
1464 | ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err); | |
1465 | if (local_err) { | |
1466 | goto fail; | |
1467 | } | |
1468 | ||
1469 | /* Find the right image format driver */ | |
1470 | drv = NULL; | |
1471 | drvname = qdict_get_try_str(options, "driver"); | |
1472 | if (drvname) { | |
1473 | drv = bdrv_find_format(drvname); | |
1474 | qdict_del(options, "driver"); | |
1475 | if (!drv) { | |
1476 | error_setg(errp, "Unknown driver: '%s'", drvname); | |
1477 | ret = -EINVAL; | |
1478 | goto fail; | |
1479 | } | |
1480 | } | |
1481 | ||
1482 | assert(drvname || !(flags & BDRV_O_PROTOCOL)); | |
1483 | if (drv && !drv->bdrv_file_open) { | |
1484 | /* If the user explicitly wants a format driver here, we'll need to add | |
1485 | * another layer for the protocol in bs->file */ | |
1486 | flags &= ~BDRV_O_PROTOCOL; | |
1487 | } | |
1488 | ||
1489 | bs->options = options; | |
1490 | options = qdict_clone_shallow(options); | |
1491 | ||
1492 | /* Open image file without format layer */ | |
1493 | if ((flags & BDRV_O_PROTOCOL) == 0) { | |
1494 | if (flags & BDRV_O_RDWR) { | |
1495 | flags |= BDRV_O_ALLOW_RDWR; | |
1496 | } | |
1497 | if (flags & BDRV_O_SNAPSHOT) { | |
1498 | snapshot_flags = bdrv_temp_snapshot_flags(flags); | |
1499 | flags = bdrv_backing_flags(flags); | |
1500 | } | |
1501 | ||
1502 | assert(file == NULL); | |
1503 | ret = bdrv_open_image(&file, filename, options, "file", | |
1504 | bdrv_inherited_flags(flags), | |
1505 | true, &local_err); | |
1506 | if (ret < 0) { | |
1507 | goto fail; | |
1508 | } | |
1509 | } | |
1510 | ||
1511 | /* Image format probing */ | |
1512 | bs->probed = !drv; | |
1513 | if (!drv && file) { | |
1514 | ret = find_image_format(file, filename, &drv, &local_err); | |
1515 | if (ret < 0) { | |
1516 | goto fail; | |
1517 | } | |
1518 | } else if (!drv) { | |
1519 | error_setg(errp, "Must specify either driver or file"); | |
1520 | ret = -EINVAL; | |
1521 | goto fail; | |
1522 | } | |
1523 | ||
1524 | /* Open the image */ | |
1525 | ret = bdrv_open_common(bs, file, options, flags, drv, &local_err); | |
1526 | if (ret < 0) { | |
1527 | goto fail; | |
1528 | } | |
1529 | ||
1530 | if (file && (bs->file != file)) { | |
1531 | bdrv_unref(file); | |
1532 | file = NULL; | |
1533 | } | |
1534 | ||
1535 | /* If there is a backing file, use it */ | |
1536 | if ((flags & BDRV_O_NO_BACKING) == 0) { | |
1537 | QDict *backing_options; | |
1538 | ||
1539 | qdict_extract_subqdict(options, &backing_options, "backing."); | |
1540 | ret = bdrv_open_backing_file(bs, backing_options, &local_err); | |
1541 | if (ret < 0) { | |
1542 | goto close_and_fail; | |
1543 | } | |
1544 | } | |
1545 | ||
1546 | bdrv_refresh_filename(bs); | |
1547 | ||
1548 | /* For snapshot=on, create a temporary qcow2 overlay. bs points to the | |
1549 | * temporary snapshot afterwards. */ | |
1550 | if (snapshot_flags) { | |
1551 | ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err); | |
1552 | if (local_err) { | |
1553 | goto close_and_fail; | |
1554 | } | |
1555 | } | |
1556 | ||
1557 | /* Check if any unknown options were used */ | |
1558 | if (options && (qdict_size(options) != 0)) { | |
1559 | const QDictEntry *entry = qdict_first(options); | |
1560 | if (flags & BDRV_O_PROTOCOL) { | |
1561 | error_setg(errp, "Block protocol '%s' doesn't support the option " | |
1562 | "'%s'", drv->format_name, entry->key); | |
1563 | } else { | |
1564 | error_setg(errp, "Block format '%s' used by device '%s' doesn't " | |
1565 | "support the option '%s'", drv->format_name, | |
1566 | bdrv_get_device_name(bs), entry->key); | |
1567 | } | |
1568 | ||
1569 | ret = -EINVAL; | |
1570 | goto close_and_fail; | |
1571 | } | |
1572 | ||
1573 | if (!bdrv_key_required(bs)) { | |
1574 | if (bs->blk) { | |
1575 | blk_dev_change_media_cb(bs->blk, true); | |
1576 | } | |
1577 | } else if (!runstate_check(RUN_STATE_PRELAUNCH) | |
1578 | && !runstate_check(RUN_STATE_INMIGRATE) | |
1579 | && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */ | |
1580 | error_setg(errp, | |
1581 | "Guest must be stopped for opening of encrypted image"); | |
1582 | ret = -EBUSY; | |
1583 | goto close_and_fail; | |
1584 | } | |
1585 | ||
1586 | QDECREF(options); | |
1587 | *pbs = bs; | |
1588 | return 0; | |
1589 | ||
1590 | fail: | |
1591 | if (file != NULL) { | |
1592 | bdrv_unref(file); | |
1593 | } | |
1594 | QDECREF(bs->options); | |
1595 | QDECREF(options); | |
1596 | bs->options = NULL; | |
1597 | if (!*pbs) { | |
1598 | /* If *pbs is NULL, a new BDS has been created in this function and | |
1599 | needs to be freed now. Otherwise, it does not need to be closed, | |
1600 | since it has not really been opened yet. */ | |
1601 | bdrv_unref(bs); | |
1602 | } | |
1603 | if (local_err) { | |
1604 | error_propagate(errp, local_err); | |
1605 | } | |
1606 | return ret; | |
1607 | ||
1608 | close_and_fail: | |
1609 | /* See fail path, but now the BDS has to be always closed */ | |
1610 | if (*pbs) { | |
1611 | bdrv_close(bs); | |
1612 | } else { | |
1613 | bdrv_unref(bs); | |
1614 | } | |
1615 | QDECREF(options); | |
1616 | if (local_err) { | |
1617 | error_propagate(errp, local_err); | |
1618 | } | |
1619 | return ret; | |
1620 | } | |
1621 | ||
1622 | typedef struct BlockReopenQueueEntry { | |
1623 | bool prepared; | |
1624 | BDRVReopenState state; | |
1625 | QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry; | |
1626 | } BlockReopenQueueEntry; | |
1627 | ||
1628 | /* | |
1629 | * Adds a BlockDriverState to a simple queue for an atomic, transactional | |
1630 | * reopen of multiple devices. | |
1631 | * | |
1632 | * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT | |
1633 | * already performed, or alternatively may be NULL a new BlockReopenQueue will | |
1634 | * be created and initialized. This newly created BlockReopenQueue should be | |
1635 | * passed back in for subsequent calls that are intended to be of the same | |
1636 | * atomic 'set'. | |
1637 | * | |
1638 | * bs is the BlockDriverState to add to the reopen queue. | |
1639 | * | |
1640 | * flags contains the open flags for the associated bs | |
1641 | * | |
1642 | * returns a pointer to bs_queue, which is either the newly allocated | |
1643 | * bs_queue, or the existing bs_queue being used. | |
1644 | * | |
1645 | */ | |
1646 | BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, | |
1647 | BlockDriverState *bs, int flags) | |
1648 | { | |
1649 | assert(bs != NULL); | |
1650 | ||
1651 | BlockReopenQueueEntry *bs_entry; | |
1652 | if (bs_queue == NULL) { | |
1653 | bs_queue = g_new0(BlockReopenQueue, 1); | |
1654 | QSIMPLEQ_INIT(bs_queue); | |
1655 | } | |
1656 | ||
1657 | /* bdrv_open() masks this flag out */ | |
1658 | flags &= ~BDRV_O_PROTOCOL; | |
1659 | ||
1660 | if (bs->file) { | |
1661 | bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags)); | |
1662 | } | |
1663 | ||
1664 | bs_entry = g_new0(BlockReopenQueueEntry, 1); | |
1665 | QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry); | |
1666 | ||
1667 | bs_entry->state.bs = bs; | |
1668 | bs_entry->state.flags = flags; | |
1669 | ||
1670 | return bs_queue; | |
1671 | } | |
1672 | ||
1673 | /* | |
1674 | * Reopen multiple BlockDriverStates atomically & transactionally. | |
1675 | * | |
1676 | * The queue passed in (bs_queue) must have been built up previous | |
1677 | * via bdrv_reopen_queue(). | |
1678 | * | |
1679 | * Reopens all BDS specified in the queue, with the appropriate | |
1680 | * flags. All devices are prepared for reopen, and failure of any | |
1681 | * device will cause all device changes to be abandonded, and intermediate | |
1682 | * data cleaned up. | |
1683 | * | |
1684 | * If all devices prepare successfully, then the changes are committed | |
1685 | * to all devices. | |
1686 | * | |
1687 | */ | |
1688 | int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | |
1689 | { | |
1690 | int ret = -1; | |
1691 | BlockReopenQueueEntry *bs_entry, *next; | |
1692 | Error *local_err = NULL; | |
1693 | ||
1694 | assert(bs_queue != NULL); | |
1695 | ||
1696 | bdrv_drain_all(); | |
1697 | ||
1698 | QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { | |
1699 | if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) { | |
1700 | error_propagate(errp, local_err); | |
1701 | goto cleanup; | |
1702 | } | |
1703 | bs_entry->prepared = true; | |
1704 | } | |
1705 | ||
1706 | /* If we reach this point, we have success and just need to apply the | |
1707 | * changes | |
1708 | */ | |
1709 | QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { | |
1710 | bdrv_reopen_commit(&bs_entry->state); | |
1711 | } | |
1712 | ||
1713 | ret = 0; | |
1714 | ||
1715 | cleanup: | |
1716 | QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { | |
1717 | if (ret && bs_entry->prepared) { | |
1718 | bdrv_reopen_abort(&bs_entry->state); | |
1719 | } | |
1720 | g_free(bs_entry); | |
1721 | } | |
1722 | g_free(bs_queue); | |
1723 | return ret; | |
1724 | } | |
1725 | ||
1726 | ||
1727 | /* Reopen a single BlockDriverState with the specified flags. */ | |
1728 | int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp) | |
1729 | { | |
1730 | int ret = -1; | |
1731 | Error *local_err = NULL; | |
1732 | BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags); | |
1733 | ||
1734 | ret = bdrv_reopen_multiple(queue, &local_err); | |
1735 | if (local_err != NULL) { | |
1736 | error_propagate(errp, local_err); | |
1737 | } | |
1738 | return ret; | |
1739 | } | |
1740 | ||
1741 | ||
1742 | /* | |
1743 | * Prepares a BlockDriverState for reopen. All changes are staged in the | |
1744 | * 'opaque' field of the BDRVReopenState, which is used and allocated by | |
1745 | * the block driver layer .bdrv_reopen_prepare() | |
1746 | * | |
1747 | * bs is the BlockDriverState to reopen | |
1748 | * flags are the new open flags | |
1749 | * queue is the reopen queue | |
1750 | * | |
1751 | * Returns 0 on success, non-zero on error. On error errp will be set | |
1752 | * as well. | |
1753 | * | |
1754 | * On failure, bdrv_reopen_abort() will be called to clean up any data. | |
1755 | * It is the responsibility of the caller to then call the abort() or | |
1756 | * commit() for any other BDS that have been left in a prepare() state | |
1757 | * | |
1758 | */ | |
1759 | int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, | |
1760 | Error **errp) | |
1761 | { | |
1762 | int ret = -1; | |
1763 | Error *local_err = NULL; | |
1764 | BlockDriver *drv; | |
1765 | ||
1766 | assert(reopen_state != NULL); | |
1767 | assert(reopen_state->bs->drv != NULL); | |
1768 | drv = reopen_state->bs->drv; | |
1769 | ||
1770 | /* if we are to stay read-only, do not allow permission change | |
1771 | * to r/w */ | |
1772 | if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) && | |
1773 | reopen_state->flags & BDRV_O_RDWR) { | |
1774 | error_set(errp, QERR_DEVICE_IS_READ_ONLY, | |
1775 | bdrv_get_device_name(reopen_state->bs)); | |
1776 | goto error; | |
1777 | } | |
1778 | ||
1779 | ||
1780 | ret = bdrv_flush(reopen_state->bs); | |
1781 | if (ret) { | |
1782 | error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive", | |
1783 | strerror(-ret)); | |
1784 | goto error; | |
1785 | } | |
1786 | ||
1787 | if (drv->bdrv_reopen_prepare) { | |
1788 | ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err); | |
1789 | if (ret) { | |
1790 | if (local_err != NULL) { | |
1791 | error_propagate(errp, local_err); | |
1792 | } else { | |
1793 | error_setg(errp, "failed while preparing to reopen image '%s'", | |
1794 | reopen_state->bs->filename); | |
1795 | } | |
1796 | goto error; | |
1797 | } | |
1798 | } else { | |
1799 | /* It is currently mandatory to have a bdrv_reopen_prepare() | |
1800 | * handler for each supported drv. */ | |
1801 | error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, | |
1802 | drv->format_name, bdrv_get_device_name(reopen_state->bs), | |
1803 | "reopening of file"); | |
1804 | ret = -1; | |
1805 | goto error; | |
1806 | } | |
1807 | ||
1808 | ret = 0; | |
1809 | ||
1810 | error: | |
1811 | return ret; | |
1812 | } | |
1813 | ||
1814 | /* | |
1815 | * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and | |
1816 | * makes them final by swapping the staging BlockDriverState contents into | |
1817 | * the active BlockDriverState contents. | |
1818 | */ | |
1819 | void bdrv_reopen_commit(BDRVReopenState *reopen_state) | |
1820 | { | |
1821 | BlockDriver *drv; | |
1822 | ||
1823 | assert(reopen_state != NULL); | |
1824 | drv = reopen_state->bs->drv; | |
1825 | assert(drv != NULL); | |
1826 | ||
1827 | /* If there are any driver level actions to take */ | |
1828 | if (drv->bdrv_reopen_commit) { | |
1829 | drv->bdrv_reopen_commit(reopen_state); | |
1830 | } | |
1831 | ||
1832 | /* set BDS specific flags now */ | |
1833 | reopen_state->bs->open_flags = reopen_state->flags; | |
1834 | reopen_state->bs->enable_write_cache = !!(reopen_state->flags & | |
1835 | BDRV_O_CACHE_WB); | |
1836 | reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR); | |
1837 | ||
1838 | bdrv_refresh_limits(reopen_state->bs, NULL); | |
1839 | } | |
1840 | ||
1841 | /* | |
1842 | * Abort the reopen, and delete and free the staged changes in | |
1843 | * reopen_state | |
1844 | */ | |
1845 | void bdrv_reopen_abort(BDRVReopenState *reopen_state) | |
1846 | { | |
1847 | BlockDriver *drv; | |
1848 | ||
1849 | assert(reopen_state != NULL); | |
1850 | drv = reopen_state->bs->drv; | |
1851 | assert(drv != NULL); | |
1852 | ||
1853 | if (drv->bdrv_reopen_abort) { | |
1854 | drv->bdrv_reopen_abort(reopen_state); | |
1855 | } | |
1856 | } | |
1857 | ||
1858 | ||
1859 | void bdrv_close(BlockDriverState *bs) | |
1860 | { | |
1861 | BdrvAioNotifier *ban, *ban_next; | |
1862 | ||
1863 | if (bs->job) { | |
1864 | block_job_cancel_sync(bs->job); | |
1865 | } | |
1866 | bdrv_drain_all(); /* complete I/O */ | |
1867 | bdrv_flush(bs); | |
1868 | bdrv_drain_all(); /* in case flush left pending I/O */ | |
1869 | notifier_list_notify(&bs->close_notifiers, bs); | |
1870 | ||
1871 | if (bs->drv) { | |
1872 | if (bs->backing_hd) { | |
1873 | BlockDriverState *backing_hd = bs->backing_hd; | |
1874 | bdrv_set_backing_hd(bs, NULL); | |
1875 | bdrv_unref(backing_hd); | |
1876 | } | |
1877 | bs->drv->bdrv_close(bs); | |
1878 | g_free(bs->opaque); | |
1879 | bs->opaque = NULL; | |
1880 | bs->drv = NULL; | |
1881 | bs->copy_on_read = 0; | |
1882 | bs->backing_file[0] = '\0'; | |
1883 | bs->backing_format[0] = '\0'; | |
1884 | bs->total_sectors = 0; | |
1885 | bs->encrypted = 0; | |
1886 | bs->valid_key = 0; | |
1887 | bs->sg = 0; | |
1888 | bs->growable = 0; | |
1889 | bs->zero_beyond_eof = false; | |
1890 | QDECREF(bs->options); | |
1891 | bs->options = NULL; | |
1892 | QDECREF(bs->full_open_options); | |
1893 | bs->full_open_options = NULL; | |
1894 | ||
1895 | if (bs->file != NULL) { | |
1896 | bdrv_unref(bs->file); | |
1897 | bs->file = NULL; | |
1898 | } | |
1899 | } | |
1900 | ||
1901 | if (bs->blk) { | |
1902 | blk_dev_change_media_cb(bs->blk, false); | |
1903 | } | |
1904 | ||
1905 | /*throttling disk I/O limits*/ | |
1906 | if (bs->io_limits_enabled) { | |
1907 | bdrv_io_limits_disable(bs); | |
1908 | } | |
1909 | ||
1910 | QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { | |
1911 | g_free(ban); | |
1912 | } | |
1913 | QLIST_INIT(&bs->aio_notifiers); | |
1914 | } | |
1915 | ||
1916 | void bdrv_close_all(void) | |
1917 | { | |
1918 | BlockDriverState *bs; | |
1919 | ||
1920 | QTAILQ_FOREACH(bs, &bdrv_states, device_list) { | |
1921 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
1922 | ||
1923 | aio_context_acquire(aio_context); | |
1924 | bdrv_close(bs); | |
1925 | aio_context_release(aio_context); | |
1926 | } | |
1927 | } | |
1928 | ||
1929 | /* Check if any requests are in-flight (including throttled requests) */ | |
1930 | static bool bdrv_requests_pending(BlockDriverState *bs) | |
1931 | { | |
1932 | if (!QLIST_EMPTY(&bs->tracked_requests)) { | |
1933 | return true; | |
1934 | } | |
1935 | if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { | |
1936 | return true; | |
1937 | } | |
1938 | if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { | |
1939 | return true; | |
1940 | } | |
1941 | if (bs->file && bdrv_requests_pending(bs->file)) { | |
1942 | return true; | |
1943 | } | |
1944 | if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { | |
1945 | return true; | |
1946 | } | |
1947 | return false; | |
1948 | } | |
1949 | ||
1950 | static bool bdrv_drain_one(BlockDriverState *bs) | |
1951 | { | |
1952 | bool bs_busy; | |
1953 | ||
1954 | bdrv_flush_io_queue(bs); | |
1955 | bdrv_start_throttled_reqs(bs); | |
1956 | bs_busy = bdrv_requests_pending(bs); | |
1957 | bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy); | |
1958 | return bs_busy; | |
1959 | } | |
1960 | ||
1961 | /* | |
1962 | * Wait for pending requests to complete on a single BlockDriverState subtree | |
1963 | * | |
1964 | * See the warning in bdrv_drain_all(). This function can only be called if | |
1965 | * you are sure nothing can generate I/O because you have op blockers | |
1966 | * installed. | |
1967 | * | |
1968 | * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState | |
1969 | * AioContext. | |
1970 | */ | |
1971 | void bdrv_drain(BlockDriverState *bs) | |
1972 | { | |
1973 | while (bdrv_drain_one(bs)) { | |
1974 | /* Keep iterating */ | |
1975 | } | |
1976 | } | |
1977 | ||
1978 | /* | |
1979 | * Wait for pending requests to complete across all BlockDriverStates | |
1980 | * | |
1981 | * This function does not flush data to disk, use bdrv_flush_all() for that | |
1982 | * after calling this function. | |
1983 | * | |
1984 | * Note that completion of an asynchronous I/O operation can trigger any | |
1985 | * number of other I/O operations on other devices---for example a coroutine | |
1986 | * can be arbitrarily complex and a constant flow of I/O can come until the | |
1987 | * coroutine is complete. Because of this, it is not possible to have a | |
1988 | * function to drain a single device's I/O queue. | |
1989 | */ | |
1990 | void bdrv_drain_all(void) | |
1991 | { | |
1992 | /* Always run first iteration so any pending completion BHs run */ | |
1993 | bool busy = true; | |
1994 | BlockDriverState *bs; | |
1995 | ||
1996 | while (busy) { | |
1997 | busy = false; | |
1998 | ||
1999 | QTAILQ_FOREACH(bs, &bdrv_states, device_list) { | |
2000 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
2001 | ||
2002 | aio_context_acquire(aio_context); | |
2003 | busy |= bdrv_drain_one(bs); | |
2004 | aio_context_release(aio_context); | |
2005 | } | |
2006 | } | |
2007 | } | |
2008 | ||
2009 | /* make a BlockDriverState anonymous by removing from bdrv_state and | |
2010 | * graph_bdrv_state list. | |
2011 | Also, NULL terminate the device_name to prevent double remove */ | |
2012 | void bdrv_make_anon(BlockDriverState *bs) | |
2013 | { | |
2014 | /* | |
2015 | * Take care to remove bs from bdrv_states only when it's actually | |
2016 | * in it. Note that bs->device_list.tqe_prev is initially null, | |
2017 | * and gets set to non-null by QTAILQ_INSERT_TAIL(). Establish | |
2018 | * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by | |
2019 | * resetting it to null on remove. | |
2020 | */ | |
2021 | if (bs->device_list.tqe_prev) { | |
2022 | QTAILQ_REMOVE(&bdrv_states, bs, device_list); | |
2023 | bs->device_list.tqe_prev = NULL; | |
2024 | } | |
2025 | if (bs->node_name[0] != '\0') { | |
2026 | QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list); | |
2027 | } | |
2028 | bs->node_name[0] = '\0'; | |
2029 | } | |
2030 | ||
2031 | static void bdrv_rebind(BlockDriverState *bs) | |
2032 | { | |
2033 | if (bs->drv && bs->drv->bdrv_rebind) { | |
2034 | bs->drv->bdrv_rebind(bs); | |
2035 | } | |
2036 | } | |
2037 | ||
2038 | static void bdrv_move_feature_fields(BlockDriverState *bs_dest, | |
2039 | BlockDriverState *bs_src) | |
2040 | { | |
2041 | /* move some fields that need to stay attached to the device */ | |
2042 | ||
2043 | /* dev info */ | |
2044 | bs_dest->guest_block_size = bs_src->guest_block_size; | |
2045 | bs_dest->copy_on_read = bs_src->copy_on_read; | |
2046 | ||
2047 | bs_dest->enable_write_cache = bs_src->enable_write_cache; | |
2048 | ||
2049 | /* i/o throttled req */ | |
2050 | memcpy(&bs_dest->throttle_state, | |
2051 | &bs_src->throttle_state, | |
2052 | sizeof(ThrottleState)); | |
2053 | bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0]; | |
2054 | bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1]; | |
2055 | bs_dest->io_limits_enabled = bs_src->io_limits_enabled; | |
2056 | ||
2057 | /* r/w error */ | |
2058 | bs_dest->on_read_error = bs_src->on_read_error; | |
2059 | bs_dest->on_write_error = bs_src->on_write_error; | |
2060 | ||
2061 | /* i/o status */ | |
2062 | bs_dest->iostatus_enabled = bs_src->iostatus_enabled; | |
2063 | bs_dest->iostatus = bs_src->iostatus; | |
2064 | ||
2065 | /* dirty bitmap */ | |
2066 | bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps; | |
2067 | ||
2068 | /* reference count */ | |
2069 | bs_dest->refcnt = bs_src->refcnt; | |
2070 | ||
2071 | /* job */ | |
2072 | bs_dest->job = bs_src->job; | |
2073 | ||
2074 | /* keep the same entry in bdrv_states */ | |
2075 | bs_dest->device_list = bs_src->device_list; | |
2076 | bs_dest->blk = bs_src->blk; | |
2077 | ||
2078 | memcpy(bs_dest->op_blockers, bs_src->op_blockers, | |
2079 | sizeof(bs_dest->op_blockers)); | |
2080 | } | |
2081 | ||
2082 | /* | |
2083 | * Swap bs contents for two image chains while they are live, | |
2084 | * while keeping required fields on the BlockDriverState that is | |
2085 | * actually attached to a device. | |
2086 | * | |
2087 | * This will modify the BlockDriverState fields, and swap contents | |
2088 | * between bs_new and bs_old. Both bs_new and bs_old are modified. | |
2089 | * | |
2090 | * bs_new must not be attached to a BlockBackend. | |
2091 | * | |
2092 | * This function does not create any image files. | |
2093 | */ | |
2094 | void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old) | |
2095 | { | |
2096 | BlockDriverState tmp; | |
2097 | ||
2098 | /* The code needs to swap the node_name but simply swapping node_list won't | |
2099 | * work so first remove the nodes from the graph list, do the swap then | |
2100 | * insert them back if needed. | |
2101 | */ | |
2102 | if (bs_new->node_name[0] != '\0') { | |
2103 | QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list); | |
2104 | } | |
2105 | if (bs_old->node_name[0] != '\0') { | |
2106 | QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list); | |
2107 | } | |
2108 | ||
2109 | /* bs_new must be unattached and shouldn't have anything fancy enabled */ | |
2110 | assert(!bs_new->blk); | |
2111 | assert(QLIST_EMPTY(&bs_new->dirty_bitmaps)); | |
2112 | assert(bs_new->job == NULL); | |
2113 | assert(bs_new->io_limits_enabled == false); | |
2114 | assert(!throttle_have_timer(&bs_new->throttle_state)); | |
2115 | ||
2116 | tmp = *bs_new; | |
2117 | *bs_new = *bs_old; | |
2118 | *bs_old = tmp; | |
2119 | ||
2120 | /* there are some fields that should not be swapped, move them back */ | |
2121 | bdrv_move_feature_fields(&tmp, bs_old); | |
2122 | bdrv_move_feature_fields(bs_old, bs_new); | |
2123 | bdrv_move_feature_fields(bs_new, &tmp); | |
2124 | ||
2125 | /* bs_new must remain unattached */ | |
2126 | assert(!bs_new->blk); | |
2127 | ||
2128 | /* Check a few fields that should remain attached to the device */ | |
2129 | assert(bs_new->job == NULL); | |
2130 | assert(bs_new->io_limits_enabled == false); | |
2131 | assert(!throttle_have_timer(&bs_new->throttle_state)); | |
2132 | ||
2133 | /* insert the nodes back into the graph node list if needed */ | |
2134 | if (bs_new->node_name[0] != '\0') { | |
2135 | QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list); | |
2136 | } | |
2137 | if (bs_old->node_name[0] != '\0') { | |
2138 | QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list); | |
2139 | } | |
2140 | ||
2141 | bdrv_rebind(bs_new); | |
2142 | bdrv_rebind(bs_old); | |
2143 | } | |
2144 | ||
2145 | /* | |
2146 | * Add new bs contents at the top of an image chain while the chain is | |
2147 | * live, while keeping required fields on the top layer. | |
2148 | * | |
2149 | * This will modify the BlockDriverState fields, and swap contents | |
2150 | * between bs_new and bs_top. Both bs_new and bs_top are modified. | |
2151 | * | |
2152 | * bs_new must not be attached to a BlockBackend. | |
2153 | * | |
2154 | * This function does not create any image files. | |
2155 | */ | |
2156 | void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) | |
2157 | { | |
2158 | bdrv_swap(bs_new, bs_top); | |
2159 | ||
2160 | /* The contents of 'tmp' will become bs_top, as we are | |
2161 | * swapping bs_new and bs_top contents. */ | |
2162 | bdrv_set_backing_hd(bs_top, bs_new); | |
2163 | } | |
2164 | ||
2165 | static void bdrv_delete(BlockDriverState *bs) | |
2166 | { | |
2167 | assert(!bs->job); | |
2168 | assert(bdrv_op_blocker_is_empty(bs)); | |
2169 | assert(!bs->refcnt); | |
2170 | assert(QLIST_EMPTY(&bs->dirty_bitmaps)); | |
2171 | ||
2172 | bdrv_close(bs); | |
2173 | ||
2174 | /* remove from list, if necessary */ | |
2175 | bdrv_make_anon(bs); | |
2176 | ||
2177 | g_free(bs); | |
2178 | } | |
2179 | ||
2180 | /* | |
2181 | * Run consistency checks on an image | |
2182 | * | |
2183 | * Returns 0 if the check could be completed (it doesn't mean that the image is | |
2184 | * free of errors) or -errno when an internal error occurred. The results of the | |
2185 | * check are stored in res. | |
2186 | */ | |
2187 | int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) | |
2188 | { | |
2189 | if (bs->drv == NULL) { | |
2190 | return -ENOMEDIUM; | |
2191 | } | |
2192 | if (bs->drv->bdrv_check == NULL) { | |
2193 | return -ENOTSUP; | |
2194 | } | |
2195 | ||
2196 | memset(res, 0, sizeof(*res)); | |
2197 | return bs->drv->bdrv_check(bs, res, fix); | |
2198 | } | |
2199 | ||
2200 | #define COMMIT_BUF_SECTORS 2048 | |
2201 | ||
2202 | /* commit COW file into the raw image */ | |
2203 | int bdrv_commit(BlockDriverState *bs) | |
2204 | { | |
2205 | BlockDriver *drv = bs->drv; | |
2206 | int64_t sector, total_sectors, length, backing_length; | |
2207 | int n, ro, open_flags; | |
2208 | int ret = 0; | |
2209 | uint8_t *buf = NULL; | |
2210 | ||
2211 | if (!drv) | |
2212 | return -ENOMEDIUM; | |
2213 | ||
2214 | if (!bs->backing_hd) { | |
2215 | return -ENOTSUP; | |
2216 | } | |
2217 | ||
2218 | if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) || | |
2219 | bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) { | |
2220 | return -EBUSY; | |
2221 | } | |
2222 | ||
2223 | ro = bs->backing_hd->read_only; | |
2224 | open_flags = bs->backing_hd->open_flags; | |
2225 | ||
2226 | if (ro) { | |
2227 | if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) { | |
2228 | return -EACCES; | |
2229 | } | |
2230 | } | |
2231 | ||
2232 | length = bdrv_getlength(bs); | |
2233 | if (length < 0) { | |
2234 | ret = length; | |
2235 | goto ro_cleanup; | |
2236 | } | |
2237 | ||
2238 | backing_length = bdrv_getlength(bs->backing_hd); | |
2239 | if (backing_length < 0) { | |
2240 | ret = backing_length; | |
2241 | goto ro_cleanup; | |
2242 | } | |
2243 | ||
2244 | /* If our top snapshot is larger than the backing file image, | |
2245 | * grow the backing file image if possible. If not possible, | |
2246 | * we must return an error */ | |
2247 | if (length > backing_length) { | |
2248 | ret = bdrv_truncate(bs->backing_hd, length); | |
2249 | if (ret < 0) { | |
2250 | goto ro_cleanup; | |
2251 | } | |
2252 | } | |
2253 | ||
2254 | total_sectors = length >> BDRV_SECTOR_BITS; | |
2255 | ||
2256 | /* qemu_try_blockalign() for bs will choose an alignment that works for | |
2257 | * bs->backing_hd as well, so no need to compare the alignment manually. */ | |
2258 | buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE); | |
2259 | if (buf == NULL) { | |
2260 | ret = -ENOMEM; | |
2261 | goto ro_cleanup; | |
2262 | } | |
2263 | ||
2264 | for (sector = 0; sector < total_sectors; sector += n) { | |
2265 | ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n); | |
2266 | if (ret < 0) { | |
2267 | goto ro_cleanup; | |
2268 | } | |
2269 | if (ret) { | |
2270 | ret = bdrv_read(bs, sector, buf, n); | |
2271 | if (ret < 0) { | |
2272 | goto ro_cleanup; | |
2273 | } | |
2274 | ||
2275 | ret = bdrv_write(bs->backing_hd, sector, buf, n); | |
2276 | if (ret < 0) { | |
2277 | goto ro_cleanup; | |
2278 | } | |
2279 | } | |
2280 | } | |
2281 | ||
2282 | if (drv->bdrv_make_empty) { | |
2283 | ret = drv->bdrv_make_empty(bs); | |
2284 | if (ret < 0) { | |
2285 | goto ro_cleanup; | |
2286 | } | |
2287 | bdrv_flush(bs); | |
2288 | } | |
2289 | ||
2290 | /* | |
2291 | * Make sure all data we wrote to the backing device is actually | |
2292 | * stable on disk. | |
2293 | */ | |
2294 | if (bs->backing_hd) { | |
2295 | bdrv_flush(bs->backing_hd); | |
2296 | } | |
2297 | ||
2298 | ret = 0; | |
2299 | ro_cleanup: | |
2300 | qemu_vfree(buf); | |
2301 | ||
2302 | if (ro) { | |
2303 | /* ignoring error return here */ | |
2304 | bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL); | |
2305 | } | |
2306 | ||
2307 | return ret; | |
2308 | } | |
2309 | ||
2310 | int bdrv_commit_all(void) | |
2311 | { | |
2312 | BlockDriverState *bs; | |
2313 | ||
2314 | QTAILQ_FOREACH(bs, &bdrv_states, device_list) { | |
2315 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
2316 | ||
2317 | aio_context_acquire(aio_context); | |
2318 | if (bs->drv && bs->backing_hd) { | |
2319 | int ret = bdrv_commit(bs); | |
2320 | if (ret < 0) { | |
2321 | aio_context_release(aio_context); | |
2322 | return ret; | |
2323 | } | |
2324 | } | |
2325 | aio_context_release(aio_context); | |
2326 | } | |
2327 | return 0; | |
2328 | } | |
2329 | ||
2330 | /** | |
2331 | * Remove an active request from the tracked requests list | |
2332 | * | |
2333 | * This function should be called when a tracked request is completing. | |
2334 | */ | |
2335 | static void tracked_request_end(BdrvTrackedRequest *req) | |
2336 | { | |
2337 | if (req->serialising) { | |
2338 | req->bs->serialising_in_flight--; | |
2339 | } | |
2340 | ||
2341 | QLIST_REMOVE(req, list); | |
2342 | qemu_co_queue_restart_all(&req->wait_queue); | |
2343 | } | |
2344 | ||
2345 | /** | |
2346 | * Add an active request to the tracked requests list | |
2347 | */ | |
2348 | static void tracked_request_begin(BdrvTrackedRequest *req, | |
2349 | BlockDriverState *bs, | |
2350 | int64_t offset, | |
2351 | unsigned int bytes, bool is_write) | |
2352 | { | |
2353 | *req = (BdrvTrackedRequest){ | |
2354 | .bs = bs, | |
2355 | .offset = offset, | |
2356 | .bytes = bytes, | |
2357 | .is_write = is_write, | |
2358 | .co = qemu_coroutine_self(), | |
2359 | .serialising = false, | |
2360 | .overlap_offset = offset, | |
2361 | .overlap_bytes = bytes, | |
2362 | }; | |
2363 | ||
2364 | qemu_co_queue_init(&req->wait_queue); | |
2365 | ||
2366 | QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); | |
2367 | } | |
2368 | ||
2369 | static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | |
2370 | { | |
2371 | int64_t overlap_offset = req->offset & ~(align - 1); | |
2372 | unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) | |
2373 | - overlap_offset; | |
2374 | ||
2375 | if (!req->serialising) { | |
2376 | req->bs->serialising_in_flight++; | |
2377 | req->serialising = true; | |
2378 | } | |
2379 | ||
2380 | req->overlap_offset = MIN(req->overlap_offset, overlap_offset); | |
2381 | req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); | |
2382 | } | |
2383 | ||
2384 | /** | |
2385 | * Round a region to cluster boundaries | |
2386 | */ | |
2387 | void bdrv_round_to_clusters(BlockDriverState *bs, | |
2388 | int64_t sector_num, int nb_sectors, | |
2389 | int64_t *cluster_sector_num, | |
2390 | int *cluster_nb_sectors) | |
2391 | { | |
2392 | BlockDriverInfo bdi; | |
2393 | ||
2394 | if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { | |
2395 | *cluster_sector_num = sector_num; | |
2396 | *cluster_nb_sectors = nb_sectors; | |
2397 | } else { | |
2398 | int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; | |
2399 | *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); | |
2400 | *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + | |
2401 | nb_sectors, c); | |
2402 | } | |
2403 | } | |
2404 | ||
2405 | static int bdrv_get_cluster_size(BlockDriverState *bs) | |
2406 | { | |
2407 | BlockDriverInfo bdi; | |
2408 | int ret; | |
2409 | ||
2410 | ret = bdrv_get_info(bs, &bdi); | |
2411 | if (ret < 0 || bdi.cluster_size == 0) { | |
2412 | return bs->request_alignment; | |
2413 | } else { | |
2414 | return bdi.cluster_size; | |
2415 | } | |
2416 | } | |
2417 | ||
2418 | static bool tracked_request_overlaps(BdrvTrackedRequest *req, | |
2419 | int64_t offset, unsigned int bytes) | |
2420 | { | |
2421 | /* aaaa bbbb */ | |
2422 | if (offset >= req->overlap_offset + req->overlap_bytes) { | |
2423 | return false; | |
2424 | } | |
2425 | /* bbbb aaaa */ | |
2426 | if (req->overlap_offset >= offset + bytes) { | |
2427 | return false; | |
2428 | } | |
2429 | return true; | |
2430 | } | |
2431 | ||
2432 | static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) | |
2433 | { | |
2434 | BlockDriverState *bs = self->bs; | |
2435 | BdrvTrackedRequest *req; | |
2436 | bool retry; | |
2437 | bool waited = false; | |
2438 | ||
2439 | if (!bs->serialising_in_flight) { | |
2440 | return false; | |
2441 | } | |
2442 | ||
2443 | do { | |
2444 | retry = false; | |
2445 | QLIST_FOREACH(req, &bs->tracked_requests, list) { | |
2446 | if (req == self || (!req->serialising && !self->serialising)) { | |
2447 | continue; | |
2448 | } | |
2449 | if (tracked_request_overlaps(req, self->overlap_offset, | |
2450 | self->overlap_bytes)) | |
2451 | { | |
2452 | /* Hitting this means there was a reentrant request, for | |
2453 | * example, a block driver issuing nested requests. This must | |
2454 | * never happen since it means deadlock. | |
2455 | */ | |
2456 | assert(qemu_coroutine_self() != req->co); | |
2457 | ||
2458 | /* If the request is already (indirectly) waiting for us, or | |
2459 | * will wait for us as soon as it wakes up, then just go on | |
2460 | * (instead of producing a deadlock in the former case). */ | |
2461 | if (!req->waiting_for) { | |
2462 | self->waiting_for = req; | |
2463 | qemu_co_queue_wait(&req->wait_queue); | |
2464 | self->waiting_for = NULL; | |
2465 | retry = true; | |
2466 | waited = true; | |
2467 | break; | |
2468 | } | |
2469 | } | |
2470 | } | |
2471 | } while (retry); | |
2472 | ||
2473 | return waited; | |
2474 | } | |
2475 | ||
2476 | /* | |
2477 | * Return values: | |
2478 | * 0 - success | |
2479 | * -EINVAL - backing format specified, but no file | |
2480 | * -ENOSPC - can't update the backing file because no space is left in the | |
2481 | * image file header | |
2482 | * -ENOTSUP - format driver doesn't support changing the backing file | |
2483 | */ | |
2484 | int bdrv_change_backing_file(BlockDriverState *bs, | |
2485 | const char *backing_file, const char *backing_fmt) | |
2486 | { | |
2487 | BlockDriver *drv = bs->drv; | |
2488 | int ret; | |
2489 | ||
2490 | /* Backing file format doesn't make sense without a backing file */ | |
2491 | if (backing_fmt && !backing_file) { | |
2492 | return -EINVAL; | |
2493 | } | |
2494 | ||
2495 | if (drv->bdrv_change_backing_file != NULL) { | |
2496 | ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt); | |
2497 | } else { | |
2498 | ret = -ENOTSUP; | |
2499 | } | |
2500 | ||
2501 | if (ret == 0) { | |
2502 | pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); | |
2503 | pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); | |
2504 | } | |
2505 | return ret; | |
2506 | } | |
2507 | ||
2508 | /* | |
2509 | * Finds the image layer in the chain that has 'bs' as its backing file. | |
2510 | * | |
2511 | * active is the current topmost image. | |
2512 | * | |
2513 | * Returns NULL if bs is not found in active's image chain, | |
2514 | * or if active == bs. | |
2515 | * | |
2516 | * Returns the bottommost base image if bs == NULL. | |
2517 | */ | |
2518 | BlockDriverState *bdrv_find_overlay(BlockDriverState *active, | |
2519 | BlockDriverState *bs) | |
2520 | { | |
2521 | while (active && bs != active->backing_hd) { | |
2522 | active = active->backing_hd; | |
2523 | } | |
2524 | ||
2525 | return active; | |
2526 | } | |
2527 | ||
2528 | /* Given a BDS, searches for the base layer. */ | |
2529 | BlockDriverState *bdrv_find_base(BlockDriverState *bs) | |
2530 | { | |
2531 | return bdrv_find_overlay(bs, NULL); | |
2532 | } | |
2533 | ||
2534 | typedef struct BlkIntermediateStates { | |
2535 | BlockDriverState *bs; | |
2536 | QSIMPLEQ_ENTRY(BlkIntermediateStates) entry; | |
2537 | } BlkIntermediateStates; | |
2538 | ||
2539 | ||
2540 | /* | |
2541 | * Drops images above 'base' up to and including 'top', and sets the image | |
2542 | * above 'top' to have base as its backing file. | |
2543 | * | |
2544 | * Requires that the overlay to 'top' is opened r/w, so that the backing file | |
2545 | * information in 'bs' can be properly updated. | |
2546 | * | |
2547 | * E.g., this will convert the following chain: | |
2548 | * bottom <- base <- intermediate <- top <- active | |
2549 | * | |
2550 | * to | |
2551 | * | |
2552 | * bottom <- base <- active | |
2553 | * | |
2554 | * It is allowed for bottom==base, in which case it converts: | |
2555 | * | |
2556 | * base <- intermediate <- top <- active | |
2557 | * | |
2558 | * to | |
2559 | * | |
2560 | * base <- active | |
2561 | * | |
2562 | * If backing_file_str is non-NULL, it will be used when modifying top's | |
2563 | * overlay image metadata. | |
2564 | * | |
2565 | * Error conditions: | |
2566 | * if active == top, that is considered an error | |
2567 | * | |
2568 | */ | |
2569 | int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top, | |
2570 | BlockDriverState *base, const char *backing_file_str) | |
2571 | { | |
2572 | BlockDriverState *intermediate; | |
2573 | BlockDriverState *base_bs = NULL; | |
2574 | BlockDriverState *new_top_bs = NULL; | |
2575 | BlkIntermediateStates *intermediate_state, *next; | |
2576 | int ret = -EIO; | |
2577 | ||
2578 | QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete; | |
2579 | QSIMPLEQ_INIT(&states_to_delete); | |
2580 | ||
2581 | if (!top->drv || !base->drv) { | |
2582 | goto exit; | |
2583 | } | |
2584 | ||
2585 | new_top_bs = bdrv_find_overlay(active, top); | |
2586 | ||
2587 | if (new_top_bs == NULL) { | |
2588 | /* we could not find the image above 'top', this is an error */ | |
2589 | goto exit; | |
2590 | } | |
2591 | ||
2592 | /* special case of new_top_bs->backing_hd already pointing to base - nothing | |
2593 | * to do, no intermediate images */ | |
2594 | if (new_top_bs->backing_hd == base) { | |
2595 | ret = 0; | |
2596 | goto exit; | |
2597 | } | |
2598 | ||
2599 | intermediate = top; | |
2600 | ||
2601 | /* now we will go down through the list, and add each BDS we find | |
2602 | * into our deletion queue, until we hit the 'base' | |
2603 | */ | |
2604 | while (intermediate) { | |
2605 | intermediate_state = g_new0(BlkIntermediateStates, 1); | |
2606 | intermediate_state->bs = intermediate; | |
2607 | QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry); | |
2608 | ||
2609 | if (intermediate->backing_hd == base) { | |
2610 | base_bs = intermediate->backing_hd; | |
2611 | break; | |
2612 | } | |
2613 | intermediate = intermediate->backing_hd; | |
2614 | } | |
2615 | if (base_bs == NULL) { | |
2616 | /* something went wrong, we did not end at the base. safely | |
2617 | * unravel everything, and exit with error */ | |
2618 | goto exit; | |
2619 | } | |
2620 | ||
2621 | /* success - we can delete the intermediate states, and link top->base */ | |
2622 | backing_file_str = backing_file_str ? backing_file_str : base_bs->filename; | |
2623 | ret = bdrv_change_backing_file(new_top_bs, backing_file_str, | |
2624 | base_bs->drv ? base_bs->drv->format_name : ""); | |
2625 | if (ret) { | |
2626 | goto exit; | |
2627 | } | |
2628 | bdrv_set_backing_hd(new_top_bs, base_bs); | |
2629 | ||
2630 | QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { | |
2631 | /* so that bdrv_close() does not recursively close the chain */ | |
2632 | bdrv_set_backing_hd(intermediate_state->bs, NULL); | |
2633 | bdrv_unref(intermediate_state->bs); | |
2634 | } | |
2635 | ret = 0; | |
2636 | ||
2637 | exit: | |
2638 | QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { | |
2639 | g_free(intermediate_state); | |
2640 | } | |
2641 | return ret; | |
2642 | } | |
2643 | ||
2644 | ||
2645 | static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, | |
2646 | size_t size) | |
2647 | { | |
2648 | int64_t len; | |
2649 | ||
2650 | if (size > INT_MAX) { | |
2651 | return -EIO; | |
2652 | } | |
2653 | ||
2654 | if (!bdrv_is_inserted(bs)) | |
2655 | return -ENOMEDIUM; | |
2656 | ||
2657 | if (bs->growable) | |
2658 | return 0; | |
2659 | ||
2660 | len = bdrv_getlength(bs); | |
2661 | ||
2662 | if (offset < 0) | |
2663 | return -EIO; | |
2664 | ||
2665 | if ((offset > len) || (len - offset < size)) | |
2666 | return -EIO; | |
2667 | ||
2668 | return 0; | |
2669 | } | |
2670 | ||
2671 | static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, | |
2672 | int nb_sectors) | |
2673 | { | |
2674 | if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { | |
2675 | return -EIO; | |
2676 | } | |
2677 | ||
2678 | return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, | |
2679 | nb_sectors * BDRV_SECTOR_SIZE); | |
2680 | } | |
2681 | ||
2682 | typedef struct RwCo { | |
2683 | BlockDriverState *bs; | |
2684 | int64_t offset; | |
2685 | QEMUIOVector *qiov; | |
2686 | bool is_write; | |
2687 | int ret; | |
2688 | BdrvRequestFlags flags; | |
2689 | } RwCo; | |
2690 | ||
2691 | static void coroutine_fn bdrv_rw_co_entry(void *opaque) | |
2692 | { | |
2693 | RwCo *rwco = opaque; | |
2694 | ||
2695 | if (!rwco->is_write) { | |
2696 | rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, | |
2697 | rwco->qiov->size, rwco->qiov, | |
2698 | rwco->flags); | |
2699 | } else { | |
2700 | rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, | |
2701 | rwco->qiov->size, rwco->qiov, | |
2702 | rwco->flags); | |
2703 | } | |
2704 | } | |
2705 | ||
2706 | /* | |
2707 | * Process a vectored synchronous request using coroutines | |
2708 | */ | |
2709 | static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, | |
2710 | QEMUIOVector *qiov, bool is_write, | |
2711 | BdrvRequestFlags flags) | |
2712 | { | |
2713 | Coroutine *co; | |
2714 | RwCo rwco = { | |
2715 | .bs = bs, | |
2716 | .offset = offset, | |
2717 | .qiov = qiov, | |
2718 | .is_write = is_write, | |
2719 | .ret = NOT_DONE, | |
2720 | .flags = flags, | |
2721 | }; | |
2722 | ||
2723 | /** | |
2724 | * In sync call context, when the vcpu is blocked, this throttling timer | |
2725 | * will not fire; so the I/O throttling function has to be disabled here | |
2726 | * if it has been enabled. | |
2727 | */ | |
2728 | if (bs->io_limits_enabled) { | |
2729 | fprintf(stderr, "Disabling I/O throttling on '%s' due " | |
2730 | "to synchronous I/O.\n", bdrv_get_device_name(bs)); | |
2731 | bdrv_io_limits_disable(bs); | |
2732 | } | |
2733 | ||
2734 | if (qemu_in_coroutine()) { | |
2735 | /* Fast-path if already in coroutine context */ | |
2736 | bdrv_rw_co_entry(&rwco); | |
2737 | } else { | |
2738 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
2739 | ||
2740 | co = qemu_coroutine_create(bdrv_rw_co_entry); | |
2741 | qemu_coroutine_enter(co, &rwco); | |
2742 | while (rwco.ret == NOT_DONE) { | |
2743 | aio_poll(aio_context, true); | |
2744 | } | |
2745 | } | |
2746 | return rwco.ret; | |
2747 | } | |
2748 | ||
2749 | /* | |
2750 | * Process a synchronous request using coroutines | |
2751 | */ | |
2752 | static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, | |
2753 | int nb_sectors, bool is_write, BdrvRequestFlags flags) | |
2754 | { | |
2755 | QEMUIOVector qiov; | |
2756 | struct iovec iov = { | |
2757 | .iov_base = (void *)buf, | |
2758 | .iov_len = nb_sectors * BDRV_SECTOR_SIZE, | |
2759 | }; | |
2760 | ||
2761 | if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { | |
2762 | return -EINVAL; | |
2763 | } | |
2764 | ||
2765 | qemu_iovec_init_external(&qiov, &iov, 1); | |
2766 | return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, | |
2767 | &qiov, is_write, flags); | |
2768 | } | |
2769 | ||
2770 | /* return < 0 if error. See bdrv_write() for the return codes */ | |
2771 | int bdrv_read(BlockDriverState *bs, int64_t sector_num, | |
2772 | uint8_t *buf, int nb_sectors) | |
2773 | { | |
2774 | return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); | |
2775 | } | |
2776 | ||
2777 | /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ | |
2778 | int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, | |
2779 | uint8_t *buf, int nb_sectors) | |
2780 | { | |
2781 | bool enabled; | |
2782 | int ret; | |
2783 | ||
2784 | enabled = bs->io_limits_enabled; | |
2785 | bs->io_limits_enabled = false; | |
2786 | ret = bdrv_read(bs, sector_num, buf, nb_sectors); | |
2787 | bs->io_limits_enabled = enabled; | |
2788 | return ret; | |
2789 | } | |
2790 | ||
2791 | /* Return < 0 if error. Important errors are: | |
2792 | -EIO generic I/O error (may happen for all errors) | |
2793 | -ENOMEDIUM No media inserted. | |
2794 | -EINVAL Invalid sector number or nb_sectors | |
2795 | -EACCES Trying to write a read-only device | |
2796 | */ | |
2797 | int bdrv_write(BlockDriverState *bs, int64_t sector_num, | |
2798 | const uint8_t *buf, int nb_sectors) | |
2799 | { | |
2800 | return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); | |
2801 | } | |
2802 | ||
2803 | int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, | |
2804 | int nb_sectors, BdrvRequestFlags flags) | |
2805 | { | |
2806 | return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, | |
2807 | BDRV_REQ_ZERO_WRITE | flags); | |
2808 | } | |
2809 | ||
2810 | /* | |
2811 | * Completely zero out a block device with the help of bdrv_write_zeroes. | |
2812 | * The operation is sped up by checking the block status and only writing | |
2813 | * zeroes to the device if they currently do not return zeroes. Optional | |
2814 | * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). | |
2815 | * | |
2816 | * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). | |
2817 | */ | |
2818 | int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) | |
2819 | { | |
2820 | int64_t target_sectors, ret, nb_sectors, sector_num = 0; | |
2821 | int n; | |
2822 | ||
2823 | target_sectors = bdrv_nb_sectors(bs); | |
2824 | if (target_sectors < 0) { | |
2825 | return target_sectors; | |
2826 | } | |
2827 | ||
2828 | for (;;) { | |
2829 | nb_sectors = target_sectors - sector_num; | |
2830 | if (nb_sectors <= 0) { | |
2831 | return 0; | |
2832 | } | |
2833 | if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { | |
2834 | nb_sectors = INT_MAX / BDRV_SECTOR_SIZE; | |
2835 | } | |
2836 | ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); | |
2837 | if (ret < 0) { | |
2838 | error_report("error getting block status at sector %" PRId64 ": %s", | |
2839 | sector_num, strerror(-ret)); | |
2840 | return ret; | |
2841 | } | |
2842 | if (ret & BDRV_BLOCK_ZERO) { | |
2843 | sector_num += n; | |
2844 | continue; | |
2845 | } | |
2846 | ret = bdrv_write_zeroes(bs, sector_num, n, flags); | |
2847 | if (ret < 0) { | |
2848 | error_report("error writing zeroes at sector %" PRId64 ": %s", | |
2849 | sector_num, strerror(-ret)); | |
2850 | return ret; | |
2851 | } | |
2852 | sector_num += n; | |
2853 | } | |
2854 | } | |
2855 | ||
2856 | int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) | |
2857 | { | |
2858 | QEMUIOVector qiov; | |
2859 | struct iovec iov = { | |
2860 | .iov_base = (void *)buf, | |
2861 | .iov_len = bytes, | |
2862 | }; | |
2863 | int ret; | |
2864 | ||
2865 | if (bytes < 0) { | |
2866 | return -EINVAL; | |
2867 | } | |
2868 | ||
2869 | qemu_iovec_init_external(&qiov, &iov, 1); | |
2870 | ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); | |
2871 | if (ret < 0) { | |
2872 | return ret; | |
2873 | } | |
2874 | ||
2875 | return bytes; | |
2876 | } | |
2877 | ||
2878 | int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) | |
2879 | { | |
2880 | int ret; | |
2881 | ||
2882 | ret = bdrv_prwv_co(bs, offset, qiov, true, 0); | |
2883 | if (ret < 0) { | |
2884 | return ret; | |
2885 | } | |
2886 | ||
2887 | return qiov->size; | |
2888 | } | |
2889 | ||
2890 | int bdrv_pwrite(BlockDriverState *bs, int64_t offset, | |
2891 | const void *buf, int bytes) | |
2892 | { | |
2893 | QEMUIOVector qiov; | |
2894 | struct iovec iov = { | |
2895 | .iov_base = (void *) buf, | |
2896 | .iov_len = bytes, | |
2897 | }; | |
2898 | ||
2899 | if (bytes < 0) { | |
2900 | return -EINVAL; | |
2901 | } | |
2902 | ||
2903 | qemu_iovec_init_external(&qiov, &iov, 1); | |
2904 | return bdrv_pwritev(bs, offset, &qiov); | |
2905 | } | |
2906 | ||
2907 | /* | |
2908 | * Writes to the file and ensures that no writes are reordered across this | |
2909 | * request (acts as a barrier) | |
2910 | * | |
2911 | * Returns 0 on success, -errno in error cases. | |
2912 | */ | |
2913 | int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, | |
2914 | const void *buf, int count) | |
2915 | { | |
2916 | int ret; | |
2917 | ||
2918 | ret = bdrv_pwrite(bs, offset, buf, count); | |
2919 | if (ret < 0) { | |
2920 | return ret; | |
2921 | } | |
2922 | ||
2923 | /* No flush needed for cache modes that already do it */ | |
2924 | if (bs->enable_write_cache) { | |
2925 | bdrv_flush(bs); | |
2926 | } | |
2927 | ||
2928 | return 0; | |
2929 | } | |
2930 | ||
2931 | static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, | |
2932 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) | |
2933 | { | |
2934 | /* Perform I/O through a temporary buffer so that users who scribble over | |
2935 | * their read buffer while the operation is in progress do not end up | |
2936 | * modifying the image file. This is critical for zero-copy guest I/O | |
2937 | * where anything might happen inside guest memory. | |
2938 | */ | |
2939 | void *bounce_buffer; | |
2940 | ||
2941 | BlockDriver *drv = bs->drv; | |
2942 | struct iovec iov; | |
2943 | QEMUIOVector bounce_qiov; | |
2944 | int64_t cluster_sector_num; | |
2945 | int cluster_nb_sectors; | |
2946 | size_t skip_bytes; | |
2947 | int ret; | |
2948 | ||
2949 | /* Cover entire cluster so no additional backing file I/O is required when | |
2950 | * allocating cluster in the image file. | |
2951 | */ | |
2952 | bdrv_round_to_clusters(bs, sector_num, nb_sectors, | |
2953 | &cluster_sector_num, &cluster_nb_sectors); | |
2954 | ||
2955 | trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, | |
2956 | cluster_sector_num, cluster_nb_sectors); | |
2957 | ||
2958 | iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; | |
2959 | iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); | |
2960 | if (bounce_buffer == NULL) { | |
2961 | ret = -ENOMEM; | |
2962 | goto err; | |
2963 | } | |
2964 | ||
2965 | qemu_iovec_init_external(&bounce_qiov, &iov, 1); | |
2966 | ||
2967 | ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, | |
2968 | &bounce_qiov); | |
2969 | if (ret < 0) { | |
2970 | goto err; | |
2971 | } | |
2972 | ||
2973 | if (drv->bdrv_co_write_zeroes && | |
2974 | buffer_is_zero(bounce_buffer, iov.iov_len)) { | |
2975 | ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, | |
2976 | cluster_nb_sectors, 0); | |
2977 | } else { | |
2978 | /* This does not change the data on the disk, it is not necessary | |
2979 | * to flush even in cache=writethrough mode. | |
2980 | */ | |
2981 | ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, | |
2982 | &bounce_qiov); | |
2983 | } | |
2984 | ||
2985 | if (ret < 0) { | |
2986 | /* It might be okay to ignore write errors for guest requests. If this | |
2987 | * is a deliberate copy-on-read then we don't want to ignore the error. | |
2988 | * Simply report it in all cases. | |
2989 | */ | |
2990 | goto err; | |
2991 | } | |
2992 | ||
2993 | skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; | |
2994 | qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, | |
2995 | nb_sectors * BDRV_SECTOR_SIZE); | |
2996 | ||
2997 | err: | |
2998 | qemu_vfree(bounce_buffer); | |
2999 | return ret; | |
3000 | } | |
3001 | ||
3002 | /* | |
3003 | * Forwards an already correctly aligned request to the BlockDriver. This | |
3004 | * handles copy on read and zeroing after EOF; any other features must be | |
3005 | * implemented by the caller. | |
3006 | */ | |
3007 | static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, | |
3008 | BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, | |
3009 | int64_t align, QEMUIOVector *qiov, int flags) | |
3010 | { | |
3011 | BlockDriver *drv = bs->drv; | |
3012 | int ret; | |
3013 | ||
3014 | int64_t sector_num = offset >> BDRV_SECTOR_BITS; | |
3015 | unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; | |
3016 | ||
3017 | assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); | |
3018 | assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); | |
3019 | assert(!qiov || bytes == qiov->size); | |
3020 | ||
3021 | /* Handle Copy on Read and associated serialisation */ | |
3022 | if (flags & BDRV_REQ_COPY_ON_READ) { | |
3023 | /* If we touch the same cluster it counts as an overlap. This | |
3024 | * guarantees that allocating writes will be serialized and not race | |
3025 | * with each other for the same cluster. For example, in copy-on-read | |
3026 | * it ensures that the CoR read and write operations are atomic and | |
3027 | * guest writes cannot interleave between them. */ | |
3028 | mark_request_serialising(req, bdrv_get_cluster_size(bs)); | |
3029 | } | |
3030 | ||
3031 | wait_serialising_requests(req); | |
3032 | ||
3033 | if (flags & BDRV_REQ_COPY_ON_READ) { | |
3034 | int pnum; | |
3035 | ||
3036 | ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); | |
3037 | if (ret < 0) { | |
3038 | goto out; | |
3039 | } | |
3040 | ||
3041 | if (!ret || pnum != nb_sectors) { | |
3042 | ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); | |
3043 | goto out; | |
3044 | } | |
3045 | } | |
3046 | ||
3047 | /* Forward the request to the BlockDriver */ | |
3048 | if (!(bs->zero_beyond_eof && bs->growable)) { | |
3049 | ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); | |
3050 | } else { | |
3051 | /* Read zeros after EOF of growable BDSes */ | |
3052 | int64_t total_sectors, max_nb_sectors; | |
3053 | ||
3054 | total_sectors = bdrv_nb_sectors(bs); | |
3055 | if (total_sectors < 0) { | |
3056 | ret = total_sectors; | |
3057 | goto out; | |
3058 | } | |
3059 | ||
3060 | max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), | |
3061 | align >> BDRV_SECTOR_BITS); | |
3062 | if (nb_sectors < max_nb_sectors) { | |
3063 | ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); | |
3064 | } else if (max_nb_sectors > 0) { | |
3065 | QEMUIOVector local_qiov; | |
3066 | ||
3067 | qemu_iovec_init(&local_qiov, qiov->niov); | |
3068 | qemu_iovec_concat(&local_qiov, qiov, 0, | |
3069 | max_nb_sectors * BDRV_SECTOR_SIZE); | |
3070 | ||
3071 | ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors, | |
3072 | &local_qiov); | |
3073 | ||
3074 | qemu_iovec_destroy(&local_qiov); | |
3075 | } else { | |
3076 | ret = 0; | |
3077 | } | |
3078 | ||
3079 | /* Reading beyond end of file is supposed to produce zeroes */ | |
3080 | if (ret == 0 && total_sectors < sector_num + nb_sectors) { | |
3081 | uint64_t offset = MAX(0, total_sectors - sector_num); | |
3082 | uint64_t bytes = (sector_num + nb_sectors - offset) * | |
3083 | BDRV_SECTOR_SIZE; | |
3084 | qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); | |
3085 | } | |
3086 | } | |
3087 | ||
3088 | out: | |
3089 | return ret; | |
3090 | } | |
3091 | ||
3092 | /* | |
3093 | * Handle a read request in coroutine context | |
3094 | */ | |
3095 | static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, | |
3096 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, | |
3097 | BdrvRequestFlags flags) | |
3098 | { | |
3099 | BlockDriver *drv = bs->drv; | |
3100 | BdrvTrackedRequest req; | |
3101 | ||
3102 | /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ | |
3103 | uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); | |
3104 | uint8_t *head_buf = NULL; | |
3105 | uint8_t *tail_buf = NULL; | |
3106 | QEMUIOVector local_qiov; | |
3107 | bool use_local_qiov = false; | |
3108 | int ret; | |
3109 | ||
3110 | if (!drv) { | |
3111 | return -ENOMEDIUM; | |
3112 | } | |
3113 | if (bdrv_check_byte_request(bs, offset, bytes)) { | |
3114 | return -EIO; | |
3115 | } | |
3116 | ||
3117 | if (bs->copy_on_read) { | |
3118 | flags |= BDRV_REQ_COPY_ON_READ; | |
3119 | } | |
3120 | ||
3121 | /* throttling disk I/O */ | |
3122 | if (bs->io_limits_enabled) { | |
3123 | bdrv_io_limits_intercept(bs, bytes, false); | |
3124 | } | |
3125 | ||
3126 | /* Align read if necessary by padding qiov */ | |
3127 | if (offset & (align - 1)) { | |
3128 | head_buf = qemu_blockalign(bs, align); | |
3129 | qemu_iovec_init(&local_qiov, qiov->niov + 2); | |
3130 | qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); | |
3131 | qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); | |
3132 | use_local_qiov = true; | |
3133 | ||
3134 | bytes += offset & (align - 1); | |
3135 | offset = offset & ~(align - 1); | |
3136 | } | |
3137 | ||
3138 | if ((offset + bytes) & (align - 1)) { | |
3139 | if (!use_local_qiov) { | |
3140 | qemu_iovec_init(&local_qiov, qiov->niov + 1); | |
3141 | qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); | |
3142 | use_local_qiov = true; | |
3143 | } | |
3144 | tail_buf = qemu_blockalign(bs, align); | |
3145 | qemu_iovec_add(&local_qiov, tail_buf, | |
3146 | align - ((offset + bytes) & (align - 1))); | |
3147 | ||
3148 | bytes = ROUND_UP(bytes, align); | |
3149 | } | |
3150 | ||
3151 | tracked_request_begin(&req, bs, offset, bytes, false); | |
3152 | ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, | |
3153 | use_local_qiov ? &local_qiov : qiov, | |
3154 | flags); | |
3155 | tracked_request_end(&req); | |
3156 | ||
3157 | if (use_local_qiov) { | |
3158 | qemu_iovec_destroy(&local_qiov); | |
3159 | qemu_vfree(head_buf); | |
3160 | qemu_vfree(tail_buf); | |
3161 | } | |
3162 | ||
3163 | return ret; | |
3164 | } | |
3165 | ||
3166 | static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, | |
3167 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, | |
3168 | BdrvRequestFlags flags) | |
3169 | { | |
3170 | if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) { | |
3171 | return -EINVAL; | |
3172 | } | |
3173 | ||
3174 | return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, | |
3175 | nb_sectors << BDRV_SECTOR_BITS, qiov, flags); | |
3176 | } | |
3177 | ||
3178 | int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, | |
3179 | int nb_sectors, QEMUIOVector *qiov) | |
3180 | { | |
3181 | trace_bdrv_co_readv(bs, sector_num, nb_sectors); | |
3182 | ||
3183 | return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); | |
3184 | } | |
3185 | ||
3186 | int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, | |
3187 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) | |
3188 | { | |
3189 | trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); | |
3190 | ||
3191 | return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, | |
3192 | BDRV_REQ_COPY_ON_READ); | |
3193 | } | |
3194 | ||
3195 | /* if no limit is specified in the BlockLimits use a default | |
3196 | * of 32768 512-byte sectors (16 MiB) per request. | |
3197 | */ | |
3198 | #define MAX_WRITE_ZEROES_DEFAULT 32768 | |
3199 | ||
3200 | static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, | |
3201 | int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) | |
3202 | { | |
3203 | BlockDriver *drv = bs->drv; | |
3204 | QEMUIOVector qiov; | |
3205 | struct iovec iov = {0}; | |
3206 | int ret = 0; | |
3207 | ||
3208 | int max_write_zeroes = bs->bl.max_write_zeroes ? | |
3209 | bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT; | |
3210 | ||
3211 | while (nb_sectors > 0 && !ret) { | |
3212 | int num = nb_sectors; | |
3213 | ||
3214 | /* Align request. Block drivers can expect the "bulk" of the request | |
3215 | * to be aligned. | |
3216 | */ | |
3217 | if (bs->bl.write_zeroes_alignment | |
3218 | && num > bs->bl.write_zeroes_alignment) { | |
3219 | if (sector_num % bs->bl.write_zeroes_alignment != 0) { | |
3220 | /* Make a small request up to the first aligned sector. */ | |
3221 | num = bs->bl.write_zeroes_alignment; | |
3222 | num -= sector_num % bs->bl.write_zeroes_alignment; | |
3223 | } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { | |
3224 | /* Shorten the request to the last aligned sector. num cannot | |
3225 | * underflow because num > bs->bl.write_zeroes_alignment. | |
3226 | */ | |
3227 | num -= (sector_num + num) % bs->bl.write_zeroes_alignment; | |
3228 | } | |
3229 | } | |
3230 | ||
3231 | /* limit request size */ | |
3232 | if (num > max_write_zeroes) { | |
3233 | num = max_write_zeroes; | |
3234 | } | |
3235 | ||
3236 | ret = -ENOTSUP; | |
3237 | /* First try the efficient write zeroes operation */ | |
3238 | if (drv->bdrv_co_write_zeroes) { | |
3239 | ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); | |
3240 | } | |
3241 | ||
3242 | if (ret == -ENOTSUP) { | |
3243 | /* Fall back to bounce buffer if write zeroes is unsupported */ | |
3244 | int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, | |
3245 | MAX_WRITE_ZEROES_DEFAULT); | |
3246 | num = MIN(num, max_xfer_len); | |
3247 | iov.iov_len = num * BDRV_SECTOR_SIZE; | |
3248 | if (iov.iov_base == NULL) { | |
3249 | iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); | |
3250 | if (iov.iov_base == NULL) { | |
3251 | ret = -ENOMEM; | |
3252 | goto fail; | |
3253 | } | |
3254 | memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); | |
3255 | } | |
3256 | qemu_iovec_init_external(&qiov, &iov, 1); | |
3257 | ||
3258 | ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); | |
3259 | ||
3260 | /* Keep bounce buffer around if it is big enough for all | |
3261 | * all future requests. | |
3262 | */ | |
3263 | if (num < max_xfer_len) { | |
3264 | qemu_vfree(iov.iov_base); | |
3265 | iov.iov_base = NULL; | |
3266 | } | |
3267 | } | |
3268 | ||
3269 | sector_num += num; | |
3270 | nb_sectors -= num; | |
3271 | } | |
3272 | ||
3273 | fail: | |
3274 | qemu_vfree(iov.iov_base); | |
3275 | return ret; | |
3276 | } | |
3277 | ||
3278 | /* | |
3279 | * Forwards an already correctly aligned write request to the BlockDriver. | |
3280 | */ | |
3281 | static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, | |
3282 | BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, | |
3283 | QEMUIOVector *qiov, int flags) | |
3284 | { | |
3285 | BlockDriver *drv = bs->drv; | |
3286 | bool waited; | |
3287 | int ret; | |
3288 | ||
3289 | int64_t sector_num = offset >> BDRV_SECTOR_BITS; | |
3290 | unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; | |
3291 | ||
3292 | assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); | |
3293 | assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); | |
3294 | assert(!qiov || bytes == qiov->size); | |
3295 | ||
3296 | waited = wait_serialising_requests(req); | |
3297 | assert(!waited || !req->serialising); | |
3298 | assert(req->overlap_offset <= offset); | |
3299 | assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); | |
3300 | ||
3301 | ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); | |
3302 | ||
3303 | if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && | |
3304 | !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && | |
3305 | qemu_iovec_is_zero(qiov)) { | |
3306 | flags |= BDRV_REQ_ZERO_WRITE; | |
3307 | if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { | |
3308 | flags |= BDRV_REQ_MAY_UNMAP; | |
3309 | } | |
3310 | } | |
3311 | ||
3312 | if (ret < 0) { | |
3313 | /* Do nothing, write notifier decided to fail this request */ | |
3314 | } else if (flags & BDRV_REQ_ZERO_WRITE) { | |
3315 | BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); | |
3316 | ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); | |
3317 | } else { | |
3318 | BLKDBG_EVENT(bs, BLKDBG_PWRITEV); | |
3319 | ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); | |
3320 | } | |
3321 | BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); | |
3322 | ||
3323 | if (ret == 0 && !bs->enable_write_cache) { | |
3324 | ret = bdrv_co_flush(bs); | |
3325 | } | |
3326 | ||
3327 | bdrv_set_dirty(bs, sector_num, nb_sectors); | |
3328 | ||
3329 | block_acct_highest_sector(&bs->stats, sector_num, nb_sectors); | |
3330 | ||
3331 | if (bs->growable && ret >= 0) { | |
3332 | bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); | |
3333 | } | |
3334 | ||
3335 | return ret; | |
3336 | } | |
3337 | ||
3338 | /* | |
3339 | * Handle a write request in coroutine context | |
3340 | */ | |
3341 | static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, | |
3342 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, | |
3343 | BdrvRequestFlags flags) | |
3344 | { | |
3345 | BdrvTrackedRequest req; | |
3346 | /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ | |
3347 | uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); | |
3348 | uint8_t *head_buf = NULL; | |
3349 | uint8_t *tail_buf = NULL; | |
3350 | QEMUIOVector local_qiov; | |
3351 | bool use_local_qiov = false; | |
3352 | int ret; | |
3353 | ||
3354 | if (!bs->drv) { | |
3355 | return -ENOMEDIUM; | |
3356 | } | |
3357 | if (bs->read_only) { | |
3358 | return -EACCES; | |
3359 | } | |
3360 | if (bdrv_check_byte_request(bs, offset, bytes)) { | |
3361 | return -EIO; | |
3362 | } | |
3363 | ||
3364 | /* throttling disk I/O */ | |
3365 | if (bs->io_limits_enabled) { | |
3366 | bdrv_io_limits_intercept(bs, bytes, true); | |
3367 | } | |
3368 | ||
3369 | /* | |
3370 | * Align write if necessary by performing a read-modify-write cycle. | |
3371 | * Pad qiov with the read parts and be sure to have a tracked request not | |
3372 | * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. | |
3373 | */ | |
3374 | tracked_request_begin(&req, bs, offset, bytes, true); | |
3375 | ||
3376 | if (offset & (align - 1)) { | |
3377 | QEMUIOVector head_qiov; | |
3378 | struct iovec head_iov; | |
3379 | ||
3380 | mark_request_serialising(&req, align); | |
3381 | wait_serialising_requests(&req); | |
3382 | ||
3383 | head_buf = qemu_blockalign(bs, align); | |
3384 | head_iov = (struct iovec) { | |
3385 | .iov_base = head_buf, | |
3386 | .iov_len = align, | |
3387 | }; | |
3388 | qemu_iovec_init_external(&head_qiov, &head_iov, 1); | |
3389 | ||
3390 | BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); | |
3391 | ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, | |
3392 | align, &head_qiov, 0); | |
3393 | if (ret < 0) { | |
3394 | goto fail; | |
3395 | } | |
3396 | BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); | |
3397 | ||
3398 | qemu_iovec_init(&local_qiov, qiov->niov + 2); | |
3399 | qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); | |
3400 | qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); | |
3401 | use_local_qiov = true; | |
3402 | ||
3403 | bytes += offset & (align - 1); | |
3404 | offset = offset & ~(align - 1); | |
3405 | } | |
3406 | ||
3407 | if ((offset + bytes) & (align - 1)) { | |
3408 | QEMUIOVector tail_qiov; | |
3409 | struct iovec tail_iov; | |
3410 | size_t tail_bytes; | |
3411 | bool waited; | |
3412 | ||
3413 | mark_request_serialising(&req, align); | |
3414 | waited = wait_serialising_requests(&req); | |
3415 | assert(!waited || !use_local_qiov); | |
3416 | ||
3417 | tail_buf = qemu_blockalign(bs, align); | |
3418 | tail_iov = (struct iovec) { | |
3419 | .iov_base = tail_buf, | |
3420 | .iov_len = align, | |
3421 | }; | |
3422 | qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); | |
3423 | ||
3424 | BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); | |
3425 | ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, | |
3426 | align, &tail_qiov, 0); | |
3427 | if (ret < 0) { | |
3428 | goto fail; | |
3429 | } | |
3430 | BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); | |
3431 | ||
3432 | if (!use_local_qiov) { | |
3433 | qemu_iovec_init(&local_qiov, qiov->niov + 1); | |
3434 | qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); | |
3435 | use_local_qiov = true; | |
3436 | } | |
3437 | ||
3438 | tail_bytes = (offset + bytes) & (align - 1); | |
3439 | qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); | |
3440 | ||
3441 | bytes = ROUND_UP(bytes, align); | |
3442 | } | |
3443 | ||
3444 | ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, | |
3445 | use_local_qiov ? &local_qiov : qiov, | |
3446 | flags); | |
3447 | ||
3448 | fail: | |
3449 | tracked_request_end(&req); | |
3450 | ||
3451 | if (use_local_qiov) { | |
3452 | qemu_iovec_destroy(&local_qiov); | |
3453 | } | |
3454 | qemu_vfree(head_buf); | |
3455 | qemu_vfree(tail_buf); | |
3456 | ||
3457 | return ret; | |
3458 | } | |
3459 | ||
3460 | static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, | |
3461 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, | |
3462 | BdrvRequestFlags flags) | |
3463 | { | |
3464 | if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) { | |
3465 | return -EINVAL; | |
3466 | } | |
3467 | ||
3468 | return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, | |
3469 | nb_sectors << BDRV_SECTOR_BITS, qiov, flags); | |
3470 | } | |
3471 | ||
3472 | int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, | |
3473 | int nb_sectors, QEMUIOVector *qiov) | |
3474 | { | |
3475 | trace_bdrv_co_writev(bs, sector_num, nb_sectors); | |
3476 | ||
3477 | return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); | |
3478 | } | |
3479 | ||
3480 | int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, | |
3481 | int64_t sector_num, int nb_sectors, | |
3482 | BdrvRequestFlags flags) | |
3483 | { | |
3484 | trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); | |
3485 | ||
3486 | if (!(bs->open_flags & BDRV_O_UNMAP)) { | |
3487 | flags &= ~BDRV_REQ_MAY_UNMAP; | |
3488 | } | |
3489 | ||
3490 | return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, | |
3491 | BDRV_REQ_ZERO_WRITE | flags); | |
3492 | } | |
3493 | ||
3494 | /** | |
3495 | * Truncate file to 'offset' bytes (needed only for file protocols) | |
3496 | */ | |
3497 | int bdrv_truncate(BlockDriverState *bs, int64_t offset) | |
3498 | { | |
3499 | BlockDriver *drv = bs->drv; | |
3500 | int ret; | |
3501 | if (!drv) | |
3502 | return -ENOMEDIUM; | |
3503 | if (!drv->bdrv_truncate) | |
3504 | return -ENOTSUP; | |
3505 | if (bs->read_only) | |
3506 | return -EACCES; | |
3507 | ||
3508 | ret = drv->bdrv_truncate(bs, offset); | |
3509 | if (ret == 0) { | |
3510 | ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); | |
3511 | if (bs->blk) { | |
3512 | blk_dev_resize_cb(bs->blk); | |
3513 | } | |
3514 | } | |
3515 | return ret; | |
3516 | } | |
3517 | ||
3518 | /** | |
3519 | * Length of a allocated file in bytes. Sparse files are counted by actual | |
3520 | * allocated space. Return < 0 if error or unknown. | |
3521 | */ | |
3522 | int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) | |
3523 | { | |
3524 | BlockDriver *drv = bs->drv; | |
3525 | if (!drv) { | |
3526 | return -ENOMEDIUM; | |
3527 | } | |
3528 | if (drv->bdrv_get_allocated_file_size) { | |
3529 | return drv->bdrv_get_allocated_file_size(bs); | |
3530 | } | |
3531 | if (bs->file) { | |
3532 | return bdrv_get_allocated_file_size(bs->file); | |
3533 | } | |
3534 | return -ENOTSUP; | |
3535 | } | |
3536 | ||
3537 | /** | |
3538 | * Return number of sectors on success, -errno on error. | |
3539 | */ | |
3540 | int64_t bdrv_nb_sectors(BlockDriverState *bs) | |
3541 | { | |
3542 | BlockDriver *drv = bs->drv; | |
3543 | ||
3544 | if (!drv) | |
3545 | return -ENOMEDIUM; | |
3546 | ||
3547 | if (drv->has_variable_length) { | |
3548 | int ret = refresh_total_sectors(bs, bs->total_sectors); | |
3549 | if (ret < 0) { | |
3550 | return ret; | |
3551 | } | |
3552 | } | |
3553 | return bs->total_sectors; | |
3554 | } | |
3555 | ||
3556 | /** | |
3557 | * Return length in bytes on success, -errno on error. | |
3558 | * The length is always a multiple of BDRV_SECTOR_SIZE. | |
3559 | */ | |
3560 | int64_t bdrv_getlength(BlockDriverState *bs) | |
3561 | { | |
3562 | int64_t ret = bdrv_nb_sectors(bs); | |
3563 | ||
3564 | return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE; | |
3565 | } | |
3566 | ||
3567 | /* return 0 as number of sectors if no device present or error */ | |
3568 | void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) | |
3569 | { | |
3570 | int64_t nb_sectors = bdrv_nb_sectors(bs); | |
3571 | ||
3572 | *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors; | |
3573 | } | |
3574 | ||
3575 | void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error, | |
3576 | BlockdevOnError on_write_error) | |
3577 | { | |
3578 | bs->on_read_error = on_read_error; | |
3579 | bs->on_write_error = on_write_error; | |
3580 | } | |
3581 | ||
3582 | BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read) | |
3583 | { | |
3584 | return is_read ? bs->on_read_error : bs->on_write_error; | |
3585 | } | |
3586 | ||
3587 | BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error) | |
3588 | { | |
3589 | BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error; | |
3590 | ||
3591 | switch (on_err) { | |
3592 | case BLOCKDEV_ON_ERROR_ENOSPC: | |
3593 | return (error == ENOSPC) ? | |
3594 | BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; | |
3595 | case BLOCKDEV_ON_ERROR_STOP: | |
3596 | return BLOCK_ERROR_ACTION_STOP; | |
3597 | case BLOCKDEV_ON_ERROR_REPORT: | |
3598 | return BLOCK_ERROR_ACTION_REPORT; | |
3599 | case BLOCKDEV_ON_ERROR_IGNORE: | |
3600 | return BLOCK_ERROR_ACTION_IGNORE; | |
3601 | default: | |
3602 | abort(); | |
3603 | } | |
3604 | } | |
3605 | ||
3606 | static void send_qmp_error_event(BlockDriverState *bs, | |
3607 | BlockErrorAction action, | |
3608 | bool is_read, int error) | |
3609 | { | |
3610 | IoOperationType optype; | |
3611 | ||
3612 | optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; | |
3613 | qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action, | |
3614 | bdrv_iostatus_is_enabled(bs), | |
3615 | error == ENOSPC, strerror(error), | |
3616 | &error_abort); | |
3617 | } | |
3618 | ||
3619 | /* This is done by device models because, while the block layer knows | |
3620 | * about the error, it does not know whether an operation comes from | |
3621 | * the device or the block layer (from a job, for example). | |
3622 | */ | |
3623 | void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, | |
3624 | bool is_read, int error) | |
3625 | { | |
3626 | assert(error >= 0); | |
3627 | ||
3628 | if (action == BLOCK_ERROR_ACTION_STOP) { | |
3629 | /* First set the iostatus, so that "info block" returns an iostatus | |
3630 | * that matches the events raised so far (an additional error iostatus | |
3631 | * is fine, but not a lost one). | |
3632 | */ | |
3633 | bdrv_iostatus_set_err(bs, error); | |
3634 | ||
3635 | /* Then raise the request to stop the VM and the event. | |
3636 | * qemu_system_vmstop_request_prepare has two effects. First, | |
3637 | * it ensures that the STOP event always comes after the | |
3638 | * BLOCK_IO_ERROR event. Second, it ensures that even if management | |
3639 | * can observe the STOP event and do a "cont" before the STOP | |
3640 | * event is issued, the VM will not stop. In this case, vm_start() | |
3641 | * also ensures that the STOP/RESUME pair of events is emitted. | |
3642 | */ | |
3643 | qemu_system_vmstop_request_prepare(); | |
3644 | send_qmp_error_event(bs, action, is_read, error); | |
3645 | qemu_system_vmstop_request(RUN_STATE_IO_ERROR); | |
3646 | } else { | |
3647 | send_qmp_error_event(bs, action, is_read, error); | |
3648 | } | |
3649 | } | |
3650 | ||
3651 | int bdrv_is_read_only(BlockDriverState *bs) | |
3652 | { | |
3653 | return bs->read_only; | |
3654 | } | |
3655 | ||
3656 | int bdrv_is_sg(BlockDriverState *bs) | |
3657 | { | |
3658 | return bs->sg; | |
3659 | } | |
3660 | ||
3661 | int bdrv_enable_write_cache(BlockDriverState *bs) | |
3662 | { | |
3663 | return bs->enable_write_cache; | |
3664 | } | |
3665 | ||
3666 | void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce) | |
3667 | { | |
3668 | bs->enable_write_cache = wce; | |
3669 | ||
3670 | /* so a reopen() will preserve wce */ | |
3671 | if (wce) { | |
3672 | bs->open_flags |= BDRV_O_CACHE_WB; | |
3673 | } else { | |
3674 | bs->open_flags &= ~BDRV_O_CACHE_WB; | |
3675 | } | |
3676 | } | |
3677 | ||
3678 | int bdrv_is_encrypted(BlockDriverState *bs) | |
3679 | { | |
3680 | if (bs->backing_hd && bs->backing_hd->encrypted) | |
3681 | return 1; | |
3682 | return bs->encrypted; | |
3683 | } | |
3684 | ||
3685 | int bdrv_key_required(BlockDriverState *bs) | |
3686 | { | |
3687 | BlockDriverState *backing_hd = bs->backing_hd; | |
3688 | ||
3689 | if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key) | |
3690 | return 1; | |
3691 | return (bs->encrypted && !bs->valid_key); | |
3692 | } | |
3693 | ||
3694 | int bdrv_set_key(BlockDriverState *bs, const char *key) | |
3695 | { | |
3696 | int ret; | |
3697 | if (bs->backing_hd && bs->backing_hd->encrypted) { | |
3698 | ret = bdrv_set_key(bs->backing_hd, key); | |
3699 | if (ret < 0) | |
3700 | return ret; | |
3701 | if (!bs->encrypted) | |
3702 | return 0; | |
3703 | } | |
3704 | if (!bs->encrypted) { | |
3705 | return -EINVAL; | |
3706 | } else if (!bs->drv || !bs->drv->bdrv_set_key) { | |
3707 | return -ENOMEDIUM; | |
3708 | } | |
3709 | ret = bs->drv->bdrv_set_key(bs, key); | |
3710 | if (ret < 0) { | |
3711 | bs->valid_key = 0; | |
3712 | } else if (!bs->valid_key) { | |
3713 | bs->valid_key = 1; | |
3714 | if (bs->blk) { | |
3715 | /* call the change callback now, we skipped it on open */ | |
3716 | blk_dev_change_media_cb(bs->blk, true); | |
3717 | } | |
3718 | } | |
3719 | return ret; | |
3720 | } | |
3721 | ||
3722 | const char *bdrv_get_format_name(BlockDriverState *bs) | |
3723 | { | |
3724 | return bs->drv ? bs->drv->format_name : NULL; | |
3725 | } | |
3726 | ||
3727 | static int qsort_strcmp(const void *a, const void *b) | |
3728 | { | |
3729 | return strcmp(a, b); | |
3730 | } | |
3731 | ||
3732 | void bdrv_iterate_format(void (*it)(void *opaque, const char *name), | |
3733 | void *opaque) | |
3734 | { | |
3735 | BlockDriver *drv; | |
3736 | int count = 0; | |
3737 | int i; | |
3738 | const char **formats = NULL; | |
3739 | ||
3740 | QLIST_FOREACH(drv, &bdrv_drivers, list) { | |
3741 | if (drv->format_name) { | |
3742 | bool found = false; | |
3743 | int i = count; | |
3744 | while (formats && i && !found) { | |
3745 | found = !strcmp(formats[--i], drv->format_name); | |
3746 | } | |
3747 | ||
3748 | if (!found) { | |
3749 | formats = g_renew(const char *, formats, count + 1); | |
3750 | formats[count++] = drv->format_name; | |
3751 | } | |
3752 | } | |
3753 | } | |
3754 | ||
3755 | qsort(formats, count, sizeof(formats[0]), qsort_strcmp); | |
3756 | ||
3757 | for (i = 0; i < count; i++) { | |
3758 | it(opaque, formats[i]); | |
3759 | } | |
3760 | ||
3761 | g_free(formats); | |
3762 | } | |
3763 | ||
3764 | /* This function is to find block backend bs */ | |
3765 | /* TODO convert callers to blk_by_name(), then remove */ | |
3766 | BlockDriverState *bdrv_find(const char *name) | |
3767 | { | |
3768 | BlockBackend *blk = blk_by_name(name); | |
3769 | ||
3770 | return blk ? blk_bs(blk) : NULL; | |
3771 | } | |
3772 | ||
3773 | /* This function is to find a node in the bs graph */ | |
3774 | BlockDriverState *bdrv_find_node(const char *node_name) | |
3775 | { | |
3776 | BlockDriverState *bs; | |
3777 | ||
3778 | assert(node_name); | |
3779 | ||
3780 | QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { | |
3781 | if (!strcmp(node_name, bs->node_name)) { | |
3782 | return bs; | |
3783 | } | |
3784 | } | |
3785 | return NULL; | |
3786 | } | |
3787 | ||
3788 | /* Put this QMP function here so it can access the static graph_bdrv_states. */ | |
3789 | BlockDeviceInfoList *bdrv_named_nodes_list(void) | |
3790 | { | |
3791 | BlockDeviceInfoList *list, *entry; | |
3792 | BlockDriverState *bs; | |
3793 | ||
3794 | list = NULL; | |
3795 | QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { | |
3796 | entry = g_malloc0(sizeof(*entry)); | |
3797 | entry->value = bdrv_block_device_info(bs); | |
3798 | entry->next = list; | |
3799 | list = entry; | |
3800 | } | |
3801 | ||
3802 | return list; | |
3803 | } | |
3804 | ||
3805 | BlockDriverState *bdrv_lookup_bs(const char *device, | |
3806 | const char *node_name, | |
3807 | Error **errp) | |
3808 | { | |
3809 | BlockBackend *blk; | |
3810 | BlockDriverState *bs; | |
3811 | ||
3812 | if (device) { | |
3813 | blk = blk_by_name(device); | |
3814 | ||
3815 | if (blk) { | |
3816 | return blk_bs(blk); | |
3817 | } | |
3818 | } | |
3819 | ||
3820 | if (node_name) { | |
3821 | bs = bdrv_find_node(node_name); | |
3822 | ||
3823 | if (bs) { | |
3824 | return bs; | |
3825 | } | |
3826 | } | |
3827 | ||
3828 | error_setg(errp, "Cannot find device=%s nor node_name=%s", | |
3829 | device ? device : "", | |
3830 | node_name ? node_name : ""); | |
3831 | return NULL; | |
3832 | } | |
3833 | ||
3834 | /* If 'base' is in the same chain as 'top', return true. Otherwise, | |
3835 | * return false. If either argument is NULL, return false. */ | |
3836 | bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base) | |
3837 | { | |
3838 | while (top && top != base) { | |
3839 | top = top->backing_hd; | |
3840 | } | |
3841 | ||
3842 | return top != NULL; | |
3843 | } | |
3844 | ||
3845 | BlockDriverState *bdrv_next_node(BlockDriverState *bs) | |
3846 | { | |
3847 | if (!bs) { | |
3848 | return QTAILQ_FIRST(&graph_bdrv_states); | |
3849 | } | |
3850 | return QTAILQ_NEXT(bs, node_list); | |
3851 | } | |
3852 | ||
3853 | BlockDriverState *bdrv_next(BlockDriverState *bs) | |
3854 | { | |
3855 | if (!bs) { | |
3856 | return QTAILQ_FIRST(&bdrv_states); | |
3857 | } | |
3858 | return QTAILQ_NEXT(bs, device_list); | |
3859 | } | |
3860 | ||
3861 | const char *bdrv_get_node_name(const BlockDriverState *bs) | |
3862 | { | |
3863 | return bs->node_name; | |
3864 | } | |
3865 | ||
3866 | /* TODO check what callers really want: bs->node_name or blk_name() */ | |
3867 | const char *bdrv_get_device_name(const BlockDriverState *bs) | |
3868 | { | |
3869 | return bs->blk ? blk_name(bs->blk) : ""; | |
3870 | } | |
3871 | ||
3872 | int bdrv_get_flags(BlockDriverState *bs) | |
3873 | { | |
3874 | return bs->open_flags; | |
3875 | } | |
3876 | ||
3877 | int bdrv_flush_all(void) | |
3878 | { | |
3879 | BlockDriverState *bs; | |
3880 | int result = 0; | |
3881 | ||
3882 | QTAILQ_FOREACH(bs, &bdrv_states, device_list) { | |
3883 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
3884 | int ret; | |
3885 | ||
3886 | aio_context_acquire(aio_context); | |
3887 | ret = bdrv_flush(bs); | |
3888 | if (ret < 0 && !result) { | |
3889 | result = ret; | |
3890 | } | |
3891 | aio_context_release(aio_context); | |
3892 | } | |
3893 | ||
3894 | return result; | |
3895 | } | |
3896 | ||
3897 | int bdrv_has_zero_init_1(BlockDriverState *bs) | |
3898 | { | |
3899 | return 1; | |
3900 | } | |
3901 | ||
3902 | int bdrv_has_zero_init(BlockDriverState *bs) | |
3903 | { | |
3904 | assert(bs->drv); | |
3905 | ||
3906 | /* If BS is a copy on write image, it is initialized to | |
3907 | the contents of the base image, which may not be zeroes. */ | |
3908 | if (bs->backing_hd) { | |
3909 | return 0; | |
3910 | } | |
3911 | if (bs->drv->bdrv_has_zero_init) { | |
3912 | return bs->drv->bdrv_has_zero_init(bs); | |
3913 | } | |
3914 | ||
3915 | /* safe default */ | |
3916 | return 0; | |
3917 | } | |
3918 | ||
3919 | bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs) | |
3920 | { | |
3921 | BlockDriverInfo bdi; | |
3922 | ||
3923 | if (bs->backing_hd) { | |
3924 | return false; | |
3925 | } | |
3926 | ||
3927 | if (bdrv_get_info(bs, &bdi) == 0) { | |
3928 | return bdi.unallocated_blocks_are_zero; | |
3929 | } | |
3930 | ||
3931 | return false; | |
3932 | } | |
3933 | ||
3934 | bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) | |
3935 | { | |
3936 | BlockDriverInfo bdi; | |
3937 | ||
3938 | if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) { | |
3939 | return false; | |
3940 | } | |
3941 | ||
3942 | if (bdrv_get_info(bs, &bdi) == 0) { | |
3943 | return bdi.can_write_zeroes_with_unmap; | |
3944 | } | |
3945 | ||
3946 | return false; | |
3947 | } | |
3948 | ||
3949 | typedef struct BdrvCoGetBlockStatusData { | |
3950 | BlockDriverState *bs; | |
3951 | BlockDriverState *base; | |
3952 | int64_t sector_num; | |
3953 | int nb_sectors; | |
3954 | int *pnum; | |
3955 | int64_t ret; | |
3956 | bool done; | |
3957 | } BdrvCoGetBlockStatusData; | |
3958 | ||
3959 | /* | |
3960 | * Returns the allocation status of the specified sectors. | |
3961 | * Drivers not implementing the functionality are assumed to not support | |
3962 | * backing files, hence all their sectors are reported as allocated. | |
3963 | * | |
3964 | * If 'sector_num' is beyond the end of the disk image the return value is 0 | |
3965 | * and 'pnum' is set to 0. | |
3966 | * | |
3967 | * 'pnum' is set to the number of sectors (including and immediately following | |
3968 | * the specified sector) that are known to be in the same | |
3969 | * allocated/unallocated state. | |
3970 | * | |
3971 | * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes | |
3972 | * beyond the end of the disk image it will be clamped. | |
3973 | */ | |
3974 | static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, | |
3975 | int64_t sector_num, | |
3976 | int nb_sectors, int *pnum) | |
3977 | { | |
3978 | int64_t total_sectors; | |
3979 | int64_t n; | |
3980 | int64_t ret, ret2; | |
3981 | ||
3982 | total_sectors = bdrv_nb_sectors(bs); | |
3983 | if (total_sectors < 0) { | |
3984 | return total_sectors; | |
3985 | } | |
3986 | ||
3987 | if (sector_num >= total_sectors) { | |
3988 | *pnum = 0; | |
3989 | return 0; | |
3990 | } | |
3991 | ||
3992 | n = total_sectors - sector_num; | |
3993 | if (n < nb_sectors) { | |
3994 | nb_sectors = n; | |
3995 | } | |
3996 | ||
3997 | if (!bs->drv->bdrv_co_get_block_status) { | |
3998 | *pnum = nb_sectors; | |
3999 | ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; | |
4000 | if (bs->drv->protocol_name) { | |
4001 | ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); | |
4002 | } | |
4003 | return ret; | |
4004 | } | |
4005 | ||
4006 | ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); | |
4007 | if (ret < 0) { | |
4008 | *pnum = 0; | |
4009 | return ret; | |
4010 | } | |
4011 | ||
4012 | if (ret & BDRV_BLOCK_RAW) { | |
4013 | assert(ret & BDRV_BLOCK_OFFSET_VALID); | |
4014 | return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, | |
4015 | *pnum, pnum); | |
4016 | } | |
4017 | ||
4018 | if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { | |
4019 | ret |= BDRV_BLOCK_ALLOCATED; | |
4020 | } | |
4021 | ||
4022 | if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) { | |
4023 | if (bdrv_unallocated_blocks_are_zero(bs)) { | |
4024 | ret |= BDRV_BLOCK_ZERO; | |
4025 | } else if (bs->backing_hd) { | |
4026 | BlockDriverState *bs2 = bs->backing_hd; | |
4027 | int64_t nb_sectors2 = bdrv_nb_sectors(bs2); | |
4028 | if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { | |
4029 | ret |= BDRV_BLOCK_ZERO; | |
4030 | } | |
4031 | } | |
4032 | } | |
4033 | ||
4034 | if (bs->file && | |
4035 | (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && | |
4036 | (ret & BDRV_BLOCK_OFFSET_VALID)) { | |
4037 | int file_pnum; | |
4038 | ||
4039 | ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, | |
4040 | *pnum, &file_pnum); | |
4041 | if (ret2 >= 0) { | |
4042 | /* Ignore errors. This is just providing extra information, it | |
4043 | * is useful but not necessary. | |
4044 | */ | |
4045 | if (!file_pnum) { | |
4046 | /* !file_pnum indicates an offset at or beyond the EOF; it is | |
4047 | * perfectly valid for the format block driver to point to such | |
4048 | * offsets, so catch it and mark everything as zero */ | |
4049 | ret |= BDRV_BLOCK_ZERO; | |
4050 | } else { | |
4051 | /* Limit request to the range reported by the protocol driver */ | |
4052 | *pnum = file_pnum; | |
4053 | ret |= (ret2 & BDRV_BLOCK_ZERO); | |
4054 | } | |
4055 | } | |
4056 | } | |
4057 | ||
4058 | return ret; | |
4059 | } | |
4060 | ||
4061 | /* Coroutine wrapper for bdrv_get_block_status() */ | |
4062 | static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) | |
4063 | { | |
4064 | BdrvCoGetBlockStatusData *data = opaque; | |
4065 | BlockDriverState *bs = data->bs; | |
4066 | ||
4067 | data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, | |
4068 | data->pnum); | |
4069 | data->done = true; | |
4070 | } | |
4071 | ||
4072 | /* | |
4073 | * Synchronous wrapper around bdrv_co_get_block_status(). | |
4074 | * | |
4075 | * See bdrv_co_get_block_status() for details. | |
4076 | */ | |
4077 | int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, | |
4078 | int nb_sectors, int *pnum) | |
4079 | { | |
4080 | Coroutine *co; | |
4081 | BdrvCoGetBlockStatusData data = { | |
4082 | .bs = bs, | |
4083 | .sector_num = sector_num, | |
4084 | .nb_sectors = nb_sectors, | |
4085 | .pnum = pnum, | |
4086 | .done = false, | |
4087 | }; | |
4088 | ||
4089 | if (qemu_in_coroutine()) { | |
4090 | /* Fast-path if already in coroutine context */ | |
4091 | bdrv_get_block_status_co_entry(&data); | |
4092 | } else { | |
4093 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
4094 | ||
4095 | co = qemu_coroutine_create(bdrv_get_block_status_co_entry); | |
4096 | qemu_coroutine_enter(co, &data); | |
4097 | while (!data.done) { | |
4098 | aio_poll(aio_context, true); | |
4099 | } | |
4100 | } | |
4101 | return data.ret; | |
4102 | } | |
4103 | ||
4104 | int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, | |
4105 | int nb_sectors, int *pnum) | |
4106 | { | |
4107 | int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); | |
4108 | if (ret < 0) { | |
4109 | return ret; | |
4110 | } | |
4111 | return !!(ret & BDRV_BLOCK_ALLOCATED); | |
4112 | } | |
4113 | ||
4114 | /* | |
4115 | * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] | |
4116 | * | |
4117 | * Return true if the given sector is allocated in any image between | |
4118 | * BASE and TOP (inclusive). BASE can be NULL to check if the given | |
4119 | * sector is allocated in any image of the chain. Return false otherwise. | |
4120 | * | |
4121 | * 'pnum' is set to the number of sectors (including and immediately following | |
4122 | * the specified sector) that are known to be in the same | |
4123 | * allocated/unallocated state. | |
4124 | * | |
4125 | */ | |
4126 | int bdrv_is_allocated_above(BlockDriverState *top, | |
4127 | BlockDriverState *base, | |
4128 | int64_t sector_num, | |
4129 | int nb_sectors, int *pnum) | |
4130 | { | |
4131 | BlockDriverState *intermediate; | |
4132 | int ret, n = nb_sectors; | |
4133 | ||
4134 | intermediate = top; | |
4135 | while (intermediate && intermediate != base) { | |
4136 | int pnum_inter; | |
4137 | ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, | |
4138 | &pnum_inter); | |
4139 | if (ret < 0) { | |
4140 | return ret; | |
4141 | } else if (ret) { | |
4142 | *pnum = pnum_inter; | |
4143 | return 1; | |
4144 | } | |
4145 | ||
4146 | /* | |
4147 | * [sector_num, nb_sectors] is unallocated on top but intermediate | |
4148 | * might have | |
4149 | * | |
4150 | * [sector_num+x, nr_sectors] allocated. | |
4151 | */ | |
4152 | if (n > pnum_inter && | |
4153 | (intermediate == top || | |
4154 | sector_num + pnum_inter < intermediate->total_sectors)) { | |
4155 | n = pnum_inter; | |
4156 | } | |
4157 | ||
4158 | intermediate = intermediate->backing_hd; | |
4159 | } | |
4160 | ||
4161 | *pnum = n; | |
4162 | return 0; | |
4163 | } | |
4164 | ||
4165 | const char *bdrv_get_encrypted_filename(BlockDriverState *bs) | |
4166 | { | |
4167 | if (bs->backing_hd && bs->backing_hd->encrypted) | |
4168 | return bs->backing_file; | |
4169 | else if (bs->encrypted) | |
4170 | return bs->filename; | |
4171 | else | |
4172 | return NULL; | |
4173 | } | |
4174 | ||
4175 | void bdrv_get_backing_filename(BlockDriverState *bs, | |
4176 | char *filename, int filename_size) | |
4177 | { | |
4178 | pstrcpy(filename, filename_size, bs->backing_file); | |
4179 | } | |
4180 | ||
4181 | int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, | |
4182 | const uint8_t *buf, int nb_sectors) | |
4183 | { | |
4184 | BlockDriver *drv = bs->drv; | |
4185 | if (!drv) | |
4186 | return -ENOMEDIUM; | |
4187 | if (!drv->bdrv_write_compressed) | |
4188 | return -ENOTSUP; | |
4189 | if (bdrv_check_request(bs, sector_num, nb_sectors)) | |
4190 | return -EIO; | |
4191 | ||
4192 | assert(QLIST_EMPTY(&bs->dirty_bitmaps)); | |
4193 | ||
4194 | return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); | |
4195 | } | |
4196 | ||
4197 | int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) | |
4198 | { | |
4199 | BlockDriver *drv = bs->drv; | |
4200 | if (!drv) | |
4201 | return -ENOMEDIUM; | |
4202 | if (!drv->bdrv_get_info) | |
4203 | return -ENOTSUP; | |
4204 | memset(bdi, 0, sizeof(*bdi)); | |
4205 | return drv->bdrv_get_info(bs, bdi); | |
4206 | } | |
4207 | ||
4208 | ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs) | |
4209 | { | |
4210 | BlockDriver *drv = bs->drv; | |
4211 | if (drv && drv->bdrv_get_specific_info) { | |
4212 | return drv->bdrv_get_specific_info(bs); | |
4213 | } | |
4214 | return NULL; | |
4215 | } | |
4216 | ||
4217 | int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, | |
4218 | int64_t pos, int size) | |
4219 | { | |
4220 | QEMUIOVector qiov; | |
4221 | struct iovec iov = { | |
4222 | .iov_base = (void *) buf, | |
4223 | .iov_len = size, | |
4224 | }; | |
4225 | ||
4226 | qemu_iovec_init_external(&qiov, &iov, 1); | |
4227 | return bdrv_writev_vmstate(bs, &qiov, pos); | |
4228 | } | |
4229 | ||
4230 | int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) | |
4231 | { | |
4232 | BlockDriver *drv = bs->drv; | |
4233 | ||
4234 | if (!drv) { | |
4235 | return -ENOMEDIUM; | |
4236 | } else if (drv->bdrv_save_vmstate) { | |
4237 | return drv->bdrv_save_vmstate(bs, qiov, pos); | |
4238 | } else if (bs->file) { | |
4239 | return bdrv_writev_vmstate(bs->file, qiov, pos); | |
4240 | } | |
4241 | ||
4242 | return -ENOTSUP; | |
4243 | } | |
4244 | ||
4245 | int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, | |
4246 | int64_t pos, int size) | |
4247 | { | |
4248 | BlockDriver *drv = bs->drv; | |
4249 | if (!drv) | |
4250 | return -ENOMEDIUM; | |
4251 | if (drv->bdrv_load_vmstate) | |
4252 | return drv->bdrv_load_vmstate(bs, buf, pos, size); | |
4253 | if (bs->file) | |
4254 | return bdrv_load_vmstate(bs->file, buf, pos, size); | |
4255 | return -ENOTSUP; | |
4256 | } | |
4257 | ||
4258 | void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) | |
4259 | { | |
4260 | if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { | |
4261 | return; | |
4262 | } | |
4263 | ||
4264 | bs->drv->bdrv_debug_event(bs, event); | |
4265 | } | |
4266 | ||
4267 | int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, | |
4268 | const char *tag) | |
4269 | { | |
4270 | while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) { | |
4271 | bs = bs->file; | |
4272 | } | |
4273 | ||
4274 | if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) { | |
4275 | return bs->drv->bdrv_debug_breakpoint(bs, event, tag); | |
4276 | } | |
4277 | ||
4278 | return -ENOTSUP; | |
4279 | } | |
4280 | ||
4281 | int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag) | |
4282 | { | |
4283 | while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) { | |
4284 | bs = bs->file; | |
4285 | } | |
4286 | ||
4287 | if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) { | |
4288 | return bs->drv->bdrv_debug_remove_breakpoint(bs, tag); | |
4289 | } | |
4290 | ||
4291 | return -ENOTSUP; | |
4292 | } | |
4293 | ||
4294 | int bdrv_debug_resume(BlockDriverState *bs, const char *tag) | |
4295 | { | |
4296 | while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) { | |
4297 | bs = bs->file; | |
4298 | } | |
4299 | ||
4300 | if (bs && bs->drv && bs->drv->bdrv_debug_resume) { | |
4301 | return bs->drv->bdrv_debug_resume(bs, tag); | |
4302 | } | |
4303 | ||
4304 | return -ENOTSUP; | |
4305 | } | |
4306 | ||
4307 | bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag) | |
4308 | { | |
4309 | while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) { | |
4310 | bs = bs->file; | |
4311 | } | |
4312 | ||
4313 | if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) { | |
4314 | return bs->drv->bdrv_debug_is_suspended(bs, tag); | |
4315 | } | |
4316 | ||
4317 | return false; | |
4318 | } | |
4319 | ||
4320 | int bdrv_is_snapshot(BlockDriverState *bs) | |
4321 | { | |
4322 | return !!(bs->open_flags & BDRV_O_SNAPSHOT); | |
4323 | } | |
4324 | ||
4325 | /* backing_file can either be relative, or absolute, or a protocol. If it is | |
4326 | * relative, it must be relative to the chain. So, passing in bs->filename | |
4327 | * from a BDS as backing_file should not be done, as that may be relative to | |
4328 | * the CWD rather than the chain. */ | |
4329 | BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, | |
4330 | const char *backing_file) | |
4331 | { | |
4332 | char *filename_full = NULL; | |
4333 | char *backing_file_full = NULL; | |
4334 | char *filename_tmp = NULL; | |
4335 | int is_protocol = 0; | |
4336 | BlockDriverState *curr_bs = NULL; | |
4337 | BlockDriverState *retval = NULL; | |
4338 | ||
4339 | if (!bs || !bs->drv || !backing_file) { | |
4340 | return NULL; | |
4341 | } | |
4342 | ||
4343 | filename_full = g_malloc(PATH_MAX); | |
4344 | backing_file_full = g_malloc(PATH_MAX); | |
4345 | filename_tmp = g_malloc(PATH_MAX); | |
4346 | ||
4347 | is_protocol = path_has_protocol(backing_file); | |
4348 | ||
4349 | for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) { | |
4350 | ||
4351 | /* If either of the filename paths is actually a protocol, then | |
4352 | * compare unmodified paths; otherwise make paths relative */ | |
4353 | if (is_protocol || path_has_protocol(curr_bs->backing_file)) { | |
4354 | if (strcmp(backing_file, curr_bs->backing_file) == 0) { | |
4355 | retval = curr_bs->backing_hd; | |
4356 | break; | |
4357 | } | |
4358 | } else { | |
4359 | /* If not an absolute filename path, make it relative to the current | |
4360 | * image's filename path */ | |
4361 | path_combine(filename_tmp, PATH_MAX, curr_bs->filename, | |
4362 | backing_file); | |
4363 | ||
4364 | /* We are going to compare absolute pathnames */ | |
4365 | if (!realpath(filename_tmp, filename_full)) { | |
4366 | continue; | |
4367 | } | |
4368 | ||
4369 | /* We need to make sure the backing filename we are comparing against | |
4370 | * is relative to the current image filename (or absolute) */ | |
4371 | path_combine(filename_tmp, PATH_MAX, curr_bs->filename, | |
4372 | curr_bs->backing_file); | |
4373 | ||
4374 | if (!realpath(filename_tmp, backing_file_full)) { | |
4375 | continue; | |
4376 | } | |
4377 | ||
4378 | if (strcmp(backing_file_full, filename_full) == 0) { | |
4379 | retval = curr_bs->backing_hd; | |
4380 | break; | |
4381 | } | |
4382 | } | |
4383 | } | |
4384 | ||
4385 | g_free(filename_full); | |
4386 | g_free(backing_file_full); | |
4387 | g_free(filename_tmp); | |
4388 | return retval; | |
4389 | } | |
4390 | ||
4391 | int bdrv_get_backing_file_depth(BlockDriverState *bs) | |
4392 | { | |
4393 | if (!bs->drv) { | |
4394 | return 0; | |
4395 | } | |
4396 | ||
4397 | if (!bs->backing_hd) { | |
4398 | return 0; | |
4399 | } | |
4400 | ||
4401 | return 1 + bdrv_get_backing_file_depth(bs->backing_hd); | |
4402 | } | |
4403 | ||
4404 | /**************************************************************/ | |
4405 | /* async I/Os */ | |
4406 | ||
4407 | BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, | |
4408 | QEMUIOVector *qiov, int nb_sectors, | |
4409 | BlockCompletionFunc *cb, void *opaque) | |
4410 | { | |
4411 | trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); | |
4412 | ||
4413 | return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, | |
4414 | cb, opaque, false); | |
4415 | } | |
4416 | ||
4417 | BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, | |
4418 | QEMUIOVector *qiov, int nb_sectors, | |
4419 | BlockCompletionFunc *cb, void *opaque) | |
4420 | { | |
4421 | trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); | |
4422 | ||
4423 | return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, | |
4424 | cb, opaque, true); | |
4425 | } | |
4426 | ||
4427 | BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, | |
4428 | int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, | |
4429 | BlockCompletionFunc *cb, void *opaque) | |
4430 | { | |
4431 | trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); | |
4432 | ||
4433 | return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, | |
4434 | BDRV_REQ_ZERO_WRITE | flags, | |
4435 | cb, opaque, true); | |
4436 | } | |
4437 | ||
4438 | ||
4439 | typedef struct MultiwriteCB { | |
4440 | int error; | |
4441 | int num_requests; | |
4442 | int num_callbacks; | |
4443 | struct { | |
4444 | BlockCompletionFunc *cb; | |
4445 | void *opaque; | |
4446 | QEMUIOVector *free_qiov; | |
4447 | } callbacks[]; | |
4448 | } MultiwriteCB; | |
4449 | ||
4450 | static void multiwrite_user_cb(MultiwriteCB *mcb) | |
4451 | { | |
4452 | int i; | |
4453 | ||
4454 | for (i = 0; i < mcb->num_callbacks; i++) { | |
4455 | mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); | |
4456 | if (mcb->callbacks[i].free_qiov) { | |
4457 | qemu_iovec_destroy(mcb->callbacks[i].free_qiov); | |
4458 | } | |
4459 | g_free(mcb->callbacks[i].free_qiov); | |
4460 | } | |
4461 | } | |
4462 | ||
4463 | static void multiwrite_cb(void *opaque, int ret) | |
4464 | { | |
4465 | MultiwriteCB *mcb = opaque; | |
4466 | ||
4467 | trace_multiwrite_cb(mcb, ret); | |
4468 | ||
4469 | if (ret < 0 && !mcb->error) { | |
4470 | mcb->error = ret; | |
4471 | } | |
4472 | ||
4473 | mcb->num_requests--; | |
4474 | if (mcb->num_requests == 0) { | |
4475 | multiwrite_user_cb(mcb); | |
4476 | g_free(mcb); | |
4477 | } | |
4478 | } | |
4479 | ||
4480 | static int multiwrite_req_compare(const void *a, const void *b) | |
4481 | { | |
4482 | const BlockRequest *req1 = a, *req2 = b; | |
4483 | ||
4484 | /* | |
4485 | * Note that we can't simply subtract req2->sector from req1->sector | |
4486 | * here as that could overflow the return value. | |
4487 | */ | |
4488 | if (req1->sector > req2->sector) { | |
4489 | return 1; | |
4490 | } else if (req1->sector < req2->sector) { | |
4491 | return -1; | |
4492 | } else { | |
4493 | return 0; | |
4494 | } | |
4495 | } | |
4496 | ||
4497 | /* | |
4498 | * Takes a bunch of requests and tries to merge them. Returns the number of | |
4499 | * requests that remain after merging. | |
4500 | */ | |
4501 | static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, | |
4502 | int num_reqs, MultiwriteCB *mcb) | |
4503 | { | |
4504 | int i, outidx; | |
4505 | ||
4506 | // Sort requests by start sector | |
4507 | qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); | |
4508 | ||
4509 | // Check if adjacent requests touch the same clusters. If so, combine them, | |
4510 | // filling up gaps with zero sectors. | |
4511 | outidx = 0; | |
4512 | for (i = 1; i < num_reqs; i++) { | |
4513 | int merge = 0; | |
4514 | int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; | |
4515 | ||
4516 | // Handle exactly sequential writes and overlapping writes. | |
4517 | if (reqs[i].sector <= oldreq_last) { | |
4518 | merge = 1; | |
4519 | } | |
4520 | ||
4521 | if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { | |
4522 | merge = 0; | |
4523 | } | |
4524 | ||
4525 | if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors + | |
4526 | reqs[i].nb_sectors > bs->bl.max_transfer_length) { | |
4527 | merge = 0; | |
4528 | } | |
4529 | ||
4530 | if (merge) { | |
4531 | size_t size; | |
4532 | QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); | |
4533 | qemu_iovec_init(qiov, | |
4534 | reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); | |
4535 | ||
4536 | // Add the first request to the merged one. If the requests are | |
4537 | // overlapping, drop the last sectors of the first request. | |
4538 | size = (reqs[i].sector - reqs[outidx].sector) << 9; | |
4539 | qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); | |
4540 | ||
4541 | // We should need to add any zeros between the two requests | |
4542 | assert (reqs[i].sector <= oldreq_last); | |
4543 | ||
4544 | // Add the second request | |
4545 | qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); | |
4546 | ||
4547 | // Add tail of first request, if necessary | |
4548 | if (qiov->size < reqs[outidx].qiov->size) { | |
4549 | qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, | |
4550 | reqs[outidx].qiov->size - qiov->size); | |
4551 | } | |
4552 | ||
4553 | reqs[outidx].nb_sectors = qiov->size >> 9; | |
4554 | reqs[outidx].qiov = qiov; | |
4555 | ||
4556 | mcb->callbacks[i].free_qiov = reqs[outidx].qiov; | |
4557 | } else { | |
4558 | outidx++; | |
4559 | reqs[outidx].sector = reqs[i].sector; | |
4560 | reqs[outidx].nb_sectors = reqs[i].nb_sectors; | |
4561 | reqs[outidx].qiov = reqs[i].qiov; | |
4562 | } | |
4563 | } | |
4564 | ||
4565 | return outidx + 1; | |
4566 | } | |
4567 | ||
4568 | /* | |
4569 | * Submit multiple AIO write requests at once. | |
4570 | * | |
4571 | * On success, the function returns 0 and all requests in the reqs array have | |
4572 | * been submitted. In error case this function returns -1, and any of the | |
4573 | * requests may or may not be submitted yet. In particular, this means that the | |
4574 | * callback will be called for some of the requests, for others it won't. The | |
4575 | * caller must check the error field of the BlockRequest to wait for the right | |
4576 | * callbacks (if error != 0, no callback will be called). | |
4577 | * | |
4578 | * The implementation may modify the contents of the reqs array, e.g. to merge | |
4579 | * requests. However, the fields opaque and error are left unmodified as they | |
4580 | * are used to signal failure for a single request to the caller. | |
4581 | */ | |
4582 | int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) | |
4583 | { | |
4584 | MultiwriteCB *mcb; | |
4585 | int i; | |
4586 | ||
4587 | /* don't submit writes if we don't have a medium */ | |
4588 | if (bs->drv == NULL) { | |
4589 | for (i = 0; i < num_reqs; i++) { | |
4590 | reqs[i].error = -ENOMEDIUM; | |
4591 | } | |
4592 | return -1; | |
4593 | } | |
4594 | ||
4595 | if (num_reqs == 0) { | |
4596 | return 0; | |
4597 | } | |
4598 | ||
4599 | // Create MultiwriteCB structure | |
4600 | mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); | |
4601 | mcb->num_requests = 0; | |
4602 | mcb->num_callbacks = num_reqs; | |
4603 | ||
4604 | for (i = 0; i < num_reqs; i++) { | |
4605 | mcb->callbacks[i].cb = reqs[i].cb; | |
4606 | mcb->callbacks[i].opaque = reqs[i].opaque; | |
4607 | } | |
4608 | ||
4609 | // Check for mergable requests | |
4610 | num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); | |
4611 | ||
4612 | trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); | |
4613 | ||
4614 | /* Run the aio requests. */ | |
4615 | mcb->num_requests = num_reqs; | |
4616 | for (i = 0; i < num_reqs; i++) { | |
4617 | bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, | |
4618 | reqs[i].nb_sectors, reqs[i].flags, | |
4619 | multiwrite_cb, mcb, | |
4620 | true); | |
4621 | } | |
4622 | ||
4623 | return 0; | |
4624 | } | |
4625 | ||
4626 | void bdrv_aio_cancel(BlockAIOCB *acb) | |
4627 | { | |
4628 | qemu_aio_ref(acb); | |
4629 | bdrv_aio_cancel_async(acb); | |
4630 | while (acb->refcnt > 1) { | |
4631 | if (acb->aiocb_info->get_aio_context) { | |
4632 | aio_poll(acb->aiocb_info->get_aio_context(acb), true); | |
4633 | } else if (acb->bs) { | |
4634 | aio_poll(bdrv_get_aio_context(acb->bs), true); | |
4635 | } else { | |
4636 | abort(); | |
4637 | } | |
4638 | } | |
4639 | qemu_aio_unref(acb); | |
4640 | } | |
4641 | ||
4642 | /* Async version of aio cancel. The caller is not blocked if the acb implements | |
4643 | * cancel_async, otherwise we do nothing and let the request normally complete. | |
4644 | * In either case the completion callback must be called. */ | |
4645 | void bdrv_aio_cancel_async(BlockAIOCB *acb) | |
4646 | { | |
4647 | if (acb->aiocb_info->cancel_async) { | |
4648 | acb->aiocb_info->cancel_async(acb); | |
4649 | } | |
4650 | } | |
4651 | ||
4652 | /**************************************************************/ | |
4653 | /* async block device emulation */ | |
4654 | ||
4655 | typedef struct BlockAIOCBSync { | |
4656 | BlockAIOCB common; | |
4657 | QEMUBH *bh; | |
4658 | int ret; | |
4659 | /* vector translation state */ | |
4660 | QEMUIOVector *qiov; | |
4661 | uint8_t *bounce; | |
4662 | int is_write; | |
4663 | } BlockAIOCBSync; | |
4664 | ||
4665 | static const AIOCBInfo bdrv_em_aiocb_info = { | |
4666 | .aiocb_size = sizeof(BlockAIOCBSync), | |
4667 | }; | |
4668 | ||
4669 | static void bdrv_aio_bh_cb(void *opaque) | |
4670 | { | |
4671 | BlockAIOCBSync *acb = opaque; | |
4672 | ||
4673 | if (!acb->is_write && acb->ret >= 0) { | |
4674 | qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); | |
4675 | } | |
4676 | qemu_vfree(acb->bounce); | |
4677 | acb->common.cb(acb->common.opaque, acb->ret); | |
4678 | qemu_bh_delete(acb->bh); | |
4679 | acb->bh = NULL; | |
4680 | qemu_aio_unref(acb); | |
4681 | } | |
4682 | ||
4683 | static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, | |
4684 | int64_t sector_num, | |
4685 | QEMUIOVector *qiov, | |
4686 | int nb_sectors, | |
4687 | BlockCompletionFunc *cb, | |
4688 | void *opaque, | |
4689 | int is_write) | |
4690 | ||
4691 | { | |
4692 | BlockAIOCBSync *acb; | |
4693 | ||
4694 | acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); | |
4695 | acb->is_write = is_write; | |
4696 | acb->qiov = qiov; | |
4697 | acb->bounce = qemu_try_blockalign(bs, qiov->size); | |
4698 | acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); | |
4699 | ||
4700 | if (acb->bounce == NULL) { | |
4701 | acb->ret = -ENOMEM; | |
4702 | } else if (is_write) { | |
4703 | qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); | |
4704 | acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); | |
4705 | } else { | |
4706 | acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); | |
4707 | } | |
4708 | ||
4709 | qemu_bh_schedule(acb->bh); | |
4710 | ||
4711 | return &acb->common; | |
4712 | } | |
4713 | ||
4714 | static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, | |
4715 | int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, | |
4716 | BlockCompletionFunc *cb, void *opaque) | |
4717 | { | |
4718 | return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); | |
4719 | } | |
4720 | ||
4721 | static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, | |
4722 | int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, | |
4723 | BlockCompletionFunc *cb, void *opaque) | |
4724 | { | |
4725 | return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); | |
4726 | } | |
4727 | ||
4728 | ||
4729 | typedef struct BlockAIOCBCoroutine { | |
4730 | BlockAIOCB common; | |
4731 | BlockRequest req; | |
4732 | bool is_write; | |
4733 | bool *done; | |
4734 | QEMUBH* bh; | |
4735 | } BlockAIOCBCoroutine; | |
4736 | ||
4737 | static const AIOCBInfo bdrv_em_co_aiocb_info = { | |
4738 | .aiocb_size = sizeof(BlockAIOCBCoroutine), | |
4739 | }; | |
4740 | ||
4741 | static void bdrv_co_em_bh(void *opaque) | |
4742 | { | |
4743 | BlockAIOCBCoroutine *acb = opaque; | |
4744 | ||
4745 | acb->common.cb(acb->common.opaque, acb->req.error); | |
4746 | ||
4747 | qemu_bh_delete(acb->bh); | |
4748 | qemu_aio_unref(acb); | |
4749 | } | |
4750 | ||
4751 | /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ | |
4752 | static void coroutine_fn bdrv_co_do_rw(void *opaque) | |
4753 | { | |
4754 | BlockAIOCBCoroutine *acb = opaque; | |
4755 | BlockDriverState *bs = acb->common.bs; | |
4756 | ||
4757 | if (!acb->is_write) { | |
4758 | acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, | |
4759 | acb->req.nb_sectors, acb->req.qiov, acb->req.flags); | |
4760 | } else { | |
4761 | acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, | |
4762 | acb->req.nb_sectors, acb->req.qiov, acb->req.flags); | |
4763 | } | |
4764 | ||
4765 | acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); | |
4766 | qemu_bh_schedule(acb->bh); | |
4767 | } | |
4768 | ||
4769 | static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, | |
4770 | int64_t sector_num, | |
4771 | QEMUIOVector *qiov, | |
4772 | int nb_sectors, | |
4773 | BdrvRequestFlags flags, | |
4774 | BlockCompletionFunc *cb, | |
4775 | void *opaque, | |
4776 | bool is_write) | |
4777 | { | |
4778 | Coroutine *co; | |
4779 | BlockAIOCBCoroutine *acb; | |
4780 | ||
4781 | acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); | |
4782 | acb->req.sector = sector_num; | |
4783 | acb->req.nb_sectors = nb_sectors; | |
4784 | acb->req.qiov = qiov; | |
4785 | acb->req.flags = flags; | |
4786 | acb->is_write = is_write; | |
4787 | ||
4788 | co = qemu_coroutine_create(bdrv_co_do_rw); | |
4789 | qemu_coroutine_enter(co, acb); | |
4790 | ||
4791 | return &acb->common; | |
4792 | } | |
4793 | ||
4794 | static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) | |
4795 | { | |
4796 | BlockAIOCBCoroutine *acb = opaque; | |
4797 | BlockDriverState *bs = acb->common.bs; | |
4798 | ||
4799 | acb->req.error = bdrv_co_flush(bs); | |
4800 | acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); | |
4801 | qemu_bh_schedule(acb->bh); | |
4802 | } | |
4803 | ||
4804 | BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, | |
4805 | BlockCompletionFunc *cb, void *opaque) | |
4806 | { | |
4807 | trace_bdrv_aio_flush(bs, opaque); | |
4808 | ||
4809 | Coroutine *co; | |
4810 | BlockAIOCBCoroutine *acb; | |
4811 | ||
4812 | acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); | |
4813 | ||
4814 | co = qemu_coroutine_create(bdrv_aio_flush_co_entry); | |
4815 | qemu_coroutine_enter(co, acb); | |
4816 | ||
4817 | return &acb->common; | |
4818 | } | |
4819 | ||
4820 | static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) | |
4821 | { | |
4822 | BlockAIOCBCoroutine *acb = opaque; | |
4823 | BlockDriverState *bs = acb->common.bs; | |
4824 | ||
4825 | acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); | |
4826 | acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); | |
4827 | qemu_bh_schedule(acb->bh); | |
4828 | } | |
4829 | ||
4830 | BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, | |
4831 | int64_t sector_num, int nb_sectors, | |
4832 | BlockCompletionFunc *cb, void *opaque) | |
4833 | { | |
4834 | Coroutine *co; | |
4835 | BlockAIOCBCoroutine *acb; | |
4836 | ||
4837 | trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); | |
4838 | ||
4839 | acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); | |
4840 | acb->req.sector = sector_num; | |
4841 | acb->req.nb_sectors = nb_sectors; | |
4842 | co = qemu_coroutine_create(bdrv_aio_discard_co_entry); | |
4843 | qemu_coroutine_enter(co, acb); | |
4844 | ||
4845 | return &acb->common; | |
4846 | } | |
4847 | ||
4848 | void bdrv_init(void) | |
4849 | { | |
4850 | module_call_init(MODULE_INIT_BLOCK); | |
4851 | } | |
4852 | ||
4853 | void bdrv_init_with_whitelist(void) | |
4854 | { | |
4855 | use_bdrv_whitelist = 1; | |
4856 | bdrv_init(); | |
4857 | } | |
4858 | ||
4859 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, | |
4860 | BlockCompletionFunc *cb, void *opaque) | |
4861 | { | |
4862 | BlockAIOCB *acb; | |
4863 | ||
4864 | acb = g_slice_alloc(aiocb_info->aiocb_size); | |
4865 | acb->aiocb_info = aiocb_info; | |
4866 | acb->bs = bs; | |
4867 | acb->cb = cb; | |
4868 | acb->opaque = opaque; | |
4869 | acb->refcnt = 1; | |
4870 | return acb; | |
4871 | } | |
4872 | ||
4873 | void qemu_aio_ref(void *p) | |
4874 | { | |
4875 | BlockAIOCB *acb = p; | |
4876 | acb->refcnt++; | |
4877 | } | |
4878 | ||
4879 | void qemu_aio_unref(void *p) | |
4880 | { | |
4881 | BlockAIOCB *acb = p; | |
4882 | assert(acb->refcnt > 0); | |
4883 | if (--acb->refcnt == 0) { | |
4884 | g_slice_free1(acb->aiocb_info->aiocb_size, acb); | |
4885 | } | |
4886 | } | |
4887 | ||
4888 | /**************************************************************/ | |
4889 | /* Coroutine block device emulation */ | |
4890 | ||
4891 | typedef struct CoroutineIOCompletion { | |
4892 | Coroutine *coroutine; | |
4893 | int ret; | |
4894 | } CoroutineIOCompletion; | |
4895 | ||
4896 | static void bdrv_co_io_em_complete(void *opaque, int ret) | |
4897 | { | |
4898 | CoroutineIOCompletion *co = opaque; | |
4899 | ||
4900 | co->ret = ret; | |
4901 | qemu_coroutine_enter(co->coroutine, NULL); | |
4902 | } | |
4903 | ||
4904 | static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, | |
4905 | int nb_sectors, QEMUIOVector *iov, | |
4906 | bool is_write) | |
4907 | { | |
4908 | CoroutineIOCompletion co = { | |
4909 | .coroutine = qemu_coroutine_self(), | |
4910 | }; | |
4911 | BlockAIOCB *acb; | |
4912 | ||
4913 | if (is_write) { | |
4914 | acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, | |
4915 | bdrv_co_io_em_complete, &co); | |
4916 | } else { | |
4917 | acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, | |
4918 | bdrv_co_io_em_complete, &co); | |
4919 | } | |
4920 | ||
4921 | trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); | |
4922 | if (!acb) { | |
4923 | return -EIO; | |
4924 | } | |
4925 | qemu_coroutine_yield(); | |
4926 | ||
4927 | return co.ret; | |
4928 | } | |
4929 | ||
4930 | static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, | |
4931 | int64_t sector_num, int nb_sectors, | |
4932 | QEMUIOVector *iov) | |
4933 | { | |
4934 | return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); | |
4935 | } | |
4936 | ||
4937 | static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, | |
4938 | int64_t sector_num, int nb_sectors, | |
4939 | QEMUIOVector *iov) | |
4940 | { | |
4941 | return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); | |
4942 | } | |
4943 | ||
4944 | static void coroutine_fn bdrv_flush_co_entry(void *opaque) | |
4945 | { | |
4946 | RwCo *rwco = opaque; | |
4947 | ||
4948 | rwco->ret = bdrv_co_flush(rwco->bs); | |
4949 | } | |
4950 | ||
4951 | int coroutine_fn bdrv_co_flush(BlockDriverState *bs) | |
4952 | { | |
4953 | int ret; | |
4954 | ||
4955 | if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { | |
4956 | return 0; | |
4957 | } | |
4958 | ||
4959 | /* Write back cached data to the OS even with cache=unsafe */ | |
4960 | BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); | |
4961 | if (bs->drv->bdrv_co_flush_to_os) { | |
4962 | ret = bs->drv->bdrv_co_flush_to_os(bs); | |
4963 | if (ret < 0) { | |
4964 | return ret; | |
4965 | } | |
4966 | } | |
4967 | ||
4968 | /* But don't actually force it to the disk with cache=unsafe */ | |
4969 | if (bs->open_flags & BDRV_O_NO_FLUSH) { | |
4970 | goto flush_parent; | |
4971 | } | |
4972 | ||
4973 | BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); | |
4974 | if (bs->drv->bdrv_co_flush_to_disk) { | |
4975 | ret = bs->drv->bdrv_co_flush_to_disk(bs); | |
4976 | } else if (bs->drv->bdrv_aio_flush) { | |
4977 | BlockAIOCB *acb; | |
4978 | CoroutineIOCompletion co = { | |
4979 | .coroutine = qemu_coroutine_self(), | |
4980 | }; | |
4981 | ||
4982 | acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); | |
4983 | if (acb == NULL) { | |
4984 | ret = -EIO; | |
4985 | } else { | |
4986 | qemu_coroutine_yield(); | |
4987 | ret = co.ret; | |
4988 | } | |
4989 | } else { | |
4990 | /* | |
4991 | * Some block drivers always operate in either writethrough or unsafe | |
4992 | * mode and don't support bdrv_flush therefore. Usually qemu doesn't | |
4993 | * know how the server works (because the behaviour is hardcoded or | |
4994 | * depends on server-side configuration), so we can't ensure that | |
4995 | * everything is safe on disk. Returning an error doesn't work because | |
4996 | * that would break guests even if the server operates in writethrough | |
4997 | * mode. | |
4998 | * | |
4999 | * Let's hope the user knows what he's doing. | |
5000 | */ | |
5001 | ret = 0; | |
5002 | } | |
5003 | if (ret < 0) { | |
5004 | return ret; | |
5005 | } | |
5006 | ||
5007 | /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH | |
5008 | * in the case of cache=unsafe, so there are no useless flushes. | |
5009 | */ | |
5010 | flush_parent: | |
5011 | return bdrv_co_flush(bs->file); | |
5012 | } | |
5013 | ||
5014 | void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp) | |
5015 | { | |
5016 | Error *local_err = NULL; | |
5017 | int ret; | |
5018 | ||
5019 | if (!bs->drv) { | |
5020 | return; | |
5021 | } | |
5022 | ||
5023 | if (!(bs->open_flags & BDRV_O_INCOMING)) { | |
5024 | return; | |
5025 | } | |
5026 | bs->open_flags &= ~BDRV_O_INCOMING; | |
5027 | ||
5028 | if (bs->drv->bdrv_invalidate_cache) { | |
5029 | bs->drv->bdrv_invalidate_cache(bs, &local_err); | |
5030 | } else if (bs->file) { | |
5031 | bdrv_invalidate_cache(bs->file, &local_err); | |
5032 | } | |
5033 | if (local_err) { | |
5034 | error_propagate(errp, local_err); | |
5035 | return; | |
5036 | } | |
5037 | ||
5038 | ret = refresh_total_sectors(bs, bs->total_sectors); | |
5039 | if (ret < 0) { | |
5040 | error_setg_errno(errp, -ret, "Could not refresh total sector count"); | |
5041 | return; | |
5042 | } | |
5043 | } | |
5044 | ||
5045 | void bdrv_invalidate_cache_all(Error **errp) | |
5046 | { | |
5047 | BlockDriverState *bs; | |
5048 | Error *local_err = NULL; | |
5049 | ||
5050 | QTAILQ_FOREACH(bs, &bdrv_states, device_list) { | |
5051 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
5052 | ||
5053 | aio_context_acquire(aio_context); | |
5054 | bdrv_invalidate_cache(bs, &local_err); | |
5055 | aio_context_release(aio_context); | |
5056 | if (local_err) { | |
5057 | error_propagate(errp, local_err); | |
5058 | return; | |
5059 | } | |
5060 | } | |
5061 | } | |
5062 | ||
5063 | int bdrv_flush(BlockDriverState *bs) | |
5064 | { | |
5065 | Coroutine *co; | |
5066 | RwCo rwco = { | |
5067 | .bs = bs, | |
5068 | .ret = NOT_DONE, | |
5069 | }; | |
5070 | ||
5071 | if (qemu_in_coroutine()) { | |
5072 | /* Fast-path if already in coroutine context */ | |
5073 | bdrv_flush_co_entry(&rwco); | |
5074 | } else { | |
5075 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
5076 | ||
5077 | co = qemu_coroutine_create(bdrv_flush_co_entry); | |
5078 | qemu_coroutine_enter(co, &rwco); | |
5079 | while (rwco.ret == NOT_DONE) { | |
5080 | aio_poll(aio_context, true); | |
5081 | } | |
5082 | } | |
5083 | ||
5084 | return rwco.ret; | |
5085 | } | |
5086 | ||
5087 | typedef struct DiscardCo { | |
5088 | BlockDriverState *bs; | |
5089 | int64_t sector_num; | |
5090 | int nb_sectors; | |
5091 | int ret; | |
5092 | } DiscardCo; | |
5093 | static void coroutine_fn bdrv_discard_co_entry(void *opaque) | |
5094 | { | |
5095 | DiscardCo *rwco = opaque; | |
5096 | ||
5097 | rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); | |
5098 | } | |
5099 | ||
5100 | /* if no limit is specified in the BlockLimits use a default | |
5101 | * of 32768 512-byte sectors (16 MiB) per request. | |
5102 | */ | |
5103 | #define MAX_DISCARD_DEFAULT 32768 | |
5104 | ||
5105 | int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, | |
5106 | int nb_sectors) | |
5107 | { | |
5108 | int max_discard; | |
5109 | ||
5110 | if (!bs->drv) { | |
5111 | return -ENOMEDIUM; | |
5112 | } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { | |
5113 | return -EIO; | |
5114 | } else if (bs->read_only) { | |
5115 | return -EROFS; | |
5116 | } | |
5117 | ||
5118 | bdrv_reset_dirty(bs, sector_num, nb_sectors); | |
5119 | ||
5120 | /* Do nothing if disabled. */ | |
5121 | if (!(bs->open_flags & BDRV_O_UNMAP)) { | |
5122 | return 0; | |
5123 | } | |
5124 | ||
5125 | if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { | |
5126 | return 0; | |
5127 | } | |
5128 | ||
5129 | max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT; | |
5130 | while (nb_sectors > 0) { | |
5131 | int ret; | |
5132 | int num = nb_sectors; | |
5133 | ||
5134 | /* align request */ | |
5135 | if (bs->bl.discard_alignment && | |
5136 | num >= bs->bl.discard_alignment && | |
5137 | sector_num % bs->bl.discard_alignment) { | |
5138 | if (num > bs->bl.discard_alignment) { | |
5139 | num = bs->bl.discard_alignment; | |
5140 | } | |
5141 | num -= sector_num % bs->bl.discard_alignment; | |
5142 | } | |
5143 | ||
5144 | /* limit request size */ | |
5145 | if (num > max_discard) { | |
5146 | num = max_discard; | |
5147 | } | |
5148 | ||
5149 | if (bs->drv->bdrv_co_discard) { | |
5150 | ret = bs->drv->bdrv_co_discard(bs, sector_num, num); | |
5151 | } else { | |
5152 | BlockAIOCB *acb; | |
5153 | CoroutineIOCompletion co = { | |
5154 | .coroutine = qemu_coroutine_self(), | |
5155 | }; | |
5156 | ||
5157 | acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, | |
5158 | bdrv_co_io_em_complete, &co); | |
5159 | if (acb == NULL) { | |
5160 | return -EIO; | |
5161 | } else { | |
5162 | qemu_coroutine_yield(); | |
5163 | ret = co.ret; | |
5164 | } | |
5165 | } | |
5166 | if (ret && ret != -ENOTSUP) { | |
5167 | return ret; | |
5168 | } | |
5169 | ||
5170 | sector_num += num; | |
5171 | nb_sectors -= num; | |
5172 | } | |
5173 | return 0; | |
5174 | } | |
5175 | ||
5176 | int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) | |
5177 | { | |
5178 | Coroutine *co; | |
5179 | DiscardCo rwco = { | |
5180 | .bs = bs, | |
5181 | .sector_num = sector_num, | |
5182 | .nb_sectors = nb_sectors, | |
5183 | .ret = NOT_DONE, | |
5184 | }; | |
5185 | ||
5186 | if (qemu_in_coroutine()) { | |
5187 | /* Fast-path if already in coroutine context */ | |
5188 | bdrv_discard_co_entry(&rwco); | |
5189 | } else { | |
5190 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
5191 | ||
5192 | co = qemu_coroutine_create(bdrv_discard_co_entry); | |
5193 | qemu_coroutine_enter(co, &rwco); | |
5194 | while (rwco.ret == NOT_DONE) { | |
5195 | aio_poll(aio_context, true); | |
5196 | } | |
5197 | } | |
5198 | ||
5199 | return rwco.ret; | |
5200 | } | |
5201 | ||
5202 | /**************************************************************/ | |
5203 | /* removable device support */ | |
5204 | ||
5205 | /** | |
5206 | * Return TRUE if the media is present | |
5207 | */ | |
5208 | int bdrv_is_inserted(BlockDriverState *bs) | |
5209 | { | |
5210 | BlockDriver *drv = bs->drv; | |
5211 | ||
5212 | if (!drv) | |
5213 | return 0; | |
5214 | if (!drv->bdrv_is_inserted) | |
5215 | return 1; | |
5216 | return drv->bdrv_is_inserted(bs); | |
5217 | } | |
5218 | ||
5219 | /** | |
5220 | * Return whether the media changed since the last call to this | |
5221 | * function, or -ENOTSUP if we don't know. Most drivers don't know. | |
5222 | */ | |
5223 | int bdrv_media_changed(BlockDriverState *bs) | |
5224 | { | |
5225 | BlockDriver *drv = bs->drv; | |
5226 | ||
5227 | if (drv && drv->bdrv_media_changed) { | |
5228 | return drv->bdrv_media_changed(bs); | |
5229 | } | |
5230 | return -ENOTSUP; | |
5231 | } | |
5232 | ||
5233 | /** | |
5234 | * If eject_flag is TRUE, eject the media. Otherwise, close the tray | |
5235 | */ | |
5236 | void bdrv_eject(BlockDriverState *bs, bool eject_flag) | |
5237 | { | |
5238 | BlockDriver *drv = bs->drv; | |
5239 | const char *device_name; | |
5240 | ||
5241 | if (drv && drv->bdrv_eject) { | |
5242 | drv->bdrv_eject(bs, eject_flag); | |
5243 | } | |
5244 | ||
5245 | device_name = bdrv_get_device_name(bs); | |
5246 | if (device_name[0] != '\0') { | |
5247 | qapi_event_send_device_tray_moved(device_name, | |
5248 | eject_flag, &error_abort); | |
5249 | } | |
5250 | } | |
5251 | ||
5252 | /** | |
5253 | * Lock or unlock the media (if it is locked, the user won't be able | |
5254 | * to eject it manually). | |
5255 | */ | |
5256 | void bdrv_lock_medium(BlockDriverState *bs, bool locked) | |
5257 | { | |
5258 | BlockDriver *drv = bs->drv; | |
5259 | ||
5260 | trace_bdrv_lock_medium(bs, locked); | |
5261 | ||
5262 | if (drv && drv->bdrv_lock_medium) { | |
5263 | drv->bdrv_lock_medium(bs, locked); | |
5264 | } | |
5265 | } | |
5266 | ||
5267 | /* needed for generic scsi interface */ | |
5268 | ||
5269 | int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) | |
5270 | { | |
5271 | BlockDriver *drv = bs->drv; | |
5272 | ||
5273 | if (drv && drv->bdrv_ioctl) | |
5274 | return drv->bdrv_ioctl(bs, req, buf); | |
5275 | return -ENOTSUP; | |
5276 | } | |
5277 | ||
5278 | BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, | |
5279 | unsigned long int req, void *buf, | |
5280 | BlockCompletionFunc *cb, void *opaque) | |
5281 | { | |
5282 | BlockDriver *drv = bs->drv; | |
5283 | ||
5284 | if (drv && drv->bdrv_aio_ioctl) | |
5285 | return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); | |
5286 | return NULL; | |
5287 | } | |
5288 | ||
5289 | void bdrv_set_guest_block_size(BlockDriverState *bs, int align) | |
5290 | { | |
5291 | bs->guest_block_size = align; | |
5292 | } | |
5293 | ||
5294 | void *qemu_blockalign(BlockDriverState *bs, size_t size) | |
5295 | { | |
5296 | return qemu_memalign(bdrv_opt_mem_align(bs), size); | |
5297 | } | |
5298 | ||
5299 | void *qemu_blockalign0(BlockDriverState *bs, size_t size) | |
5300 | { | |
5301 | return memset(qemu_blockalign(bs, size), 0, size); | |
5302 | } | |
5303 | ||
5304 | void *qemu_try_blockalign(BlockDriverState *bs, size_t size) | |
5305 | { | |
5306 | size_t align = bdrv_opt_mem_align(bs); | |
5307 | ||
5308 | /* Ensure that NULL is never returned on success */ | |
5309 | assert(align > 0); | |
5310 | if (size == 0) { | |
5311 | size = align; | |
5312 | } | |
5313 | ||
5314 | return qemu_try_memalign(align, size); | |
5315 | } | |
5316 | ||
5317 | void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) | |
5318 | { | |
5319 | void *mem = qemu_try_blockalign(bs, size); | |
5320 | ||
5321 | if (mem) { | |
5322 | memset(mem, 0, size); | |
5323 | } | |
5324 | ||
5325 | return mem; | |
5326 | } | |
5327 | ||
5328 | /* | |
5329 | * Check if all memory in this vector is sector aligned. | |
5330 | */ | |
5331 | bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) | |
5332 | { | |
5333 | int i; | |
5334 | size_t alignment = bdrv_opt_mem_align(bs); | |
5335 | ||
5336 | for (i = 0; i < qiov->niov; i++) { | |
5337 | if ((uintptr_t) qiov->iov[i].iov_base % alignment) { | |
5338 | return false; | |
5339 | } | |
5340 | if (qiov->iov[i].iov_len % alignment) { | |
5341 | return false; | |
5342 | } | |
5343 | } | |
5344 | ||
5345 | return true; | |
5346 | } | |
5347 | ||
5348 | BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity, | |
5349 | Error **errp) | |
5350 | { | |
5351 | int64_t bitmap_size; | |
5352 | BdrvDirtyBitmap *bitmap; | |
5353 | ||
5354 | assert((granularity & (granularity - 1)) == 0); | |
5355 | ||
5356 | granularity >>= BDRV_SECTOR_BITS; | |
5357 | assert(granularity); | |
5358 | bitmap_size = bdrv_nb_sectors(bs); | |
5359 | if (bitmap_size < 0) { | |
5360 | error_setg_errno(errp, -bitmap_size, "could not get length of device"); | |
5361 | errno = -bitmap_size; | |
5362 | return NULL; | |
5363 | } | |
5364 | bitmap = g_new0(BdrvDirtyBitmap, 1); | |
5365 | bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1); | |
5366 | QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list); | |
5367 | return bitmap; | |
5368 | } | |
5369 | ||
5370 | void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) | |
5371 | { | |
5372 | BdrvDirtyBitmap *bm, *next; | |
5373 | QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) { | |
5374 | if (bm == bitmap) { | |
5375 | QLIST_REMOVE(bitmap, list); | |
5376 | hbitmap_free(bitmap->bitmap); | |
5377 | g_free(bitmap); | |
5378 | return; | |
5379 | } | |
5380 | } | |
5381 | } | |
5382 | ||
5383 | BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) | |
5384 | { | |
5385 | BdrvDirtyBitmap *bm; | |
5386 | BlockDirtyInfoList *list = NULL; | |
5387 | BlockDirtyInfoList **plist = &list; | |
5388 | ||
5389 | QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { | |
5390 | BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1); | |
5391 | BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1); | |
5392 | info->count = bdrv_get_dirty_count(bs, bm); | |
5393 | info->granularity = | |
5394 | ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap)); | |
5395 | entry->value = info; | |
5396 | *plist = entry; | |
5397 | plist = &entry->next; | |
5398 | } | |
5399 | ||
5400 | return list; | |
5401 | } | |
5402 | ||
5403 | int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector) | |
5404 | { | |
5405 | if (bitmap) { | |
5406 | return hbitmap_get(bitmap->bitmap, sector); | |
5407 | } else { | |
5408 | return 0; | |
5409 | } | |
5410 | } | |
5411 | ||
5412 | void bdrv_dirty_iter_init(BlockDriverState *bs, | |
5413 | BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) | |
5414 | { | |
5415 | hbitmap_iter_init(hbi, bitmap->bitmap, 0); | |
5416 | } | |
5417 | ||
5418 | void bdrv_set_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, | |
5419 | int64_t cur_sector, int nr_sectors) | |
5420 | { | |
5421 | hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); | |
5422 | } | |
5423 | ||
5424 | void bdrv_reset_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, | |
5425 | int64_t cur_sector, int nr_sectors) | |
5426 | { | |
5427 | hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); | |
5428 | } | |
5429 | ||
5430 | static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, | |
5431 | int nr_sectors) | |
5432 | { | |
5433 | BdrvDirtyBitmap *bitmap; | |
5434 | QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { | |
5435 | hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); | |
5436 | } | |
5437 | } | |
5438 | ||
5439 | static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, | |
5440 | int nr_sectors) | |
5441 | { | |
5442 | BdrvDirtyBitmap *bitmap; | |
5443 | QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { | |
5444 | hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); | |
5445 | } | |
5446 | } | |
5447 | ||
5448 | int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) | |
5449 | { | |
5450 | return hbitmap_count(bitmap->bitmap); | |
5451 | } | |
5452 | ||
5453 | /* Get a reference to bs */ | |
5454 | void bdrv_ref(BlockDriverState *bs) | |
5455 | { | |
5456 | bs->refcnt++; | |
5457 | } | |
5458 | ||
5459 | /* Release a previously grabbed reference to bs. | |
5460 | * If after releasing, reference count is zero, the BlockDriverState is | |
5461 | * deleted. */ | |
5462 | void bdrv_unref(BlockDriverState *bs) | |
5463 | { | |
5464 | if (!bs) { | |
5465 | return; | |
5466 | } | |
5467 | assert(bs->refcnt > 0); | |
5468 | if (--bs->refcnt == 0) { | |
5469 | bdrv_delete(bs); | |
5470 | } | |
5471 | } | |
5472 | ||
5473 | struct BdrvOpBlocker { | |
5474 | Error *reason; | |
5475 | QLIST_ENTRY(BdrvOpBlocker) list; | |
5476 | }; | |
5477 | ||
5478 | bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) | |
5479 | { | |
5480 | BdrvOpBlocker *blocker; | |
5481 | assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); | |
5482 | if (!QLIST_EMPTY(&bs->op_blockers[op])) { | |
5483 | blocker = QLIST_FIRST(&bs->op_blockers[op]); | |
5484 | if (errp) { | |
5485 | error_setg(errp, "Device '%s' is busy: %s", | |
5486 | bdrv_get_device_name(bs), | |
5487 | error_get_pretty(blocker->reason)); | |
5488 | } | |
5489 | return true; | |
5490 | } | |
5491 | return false; | |
5492 | } | |
5493 | ||
5494 | void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason) | |
5495 | { | |
5496 | BdrvOpBlocker *blocker; | |
5497 | assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); | |
5498 | ||
5499 | blocker = g_new0(BdrvOpBlocker, 1); | |
5500 | blocker->reason = reason; | |
5501 | QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list); | |
5502 | } | |
5503 | ||
5504 | void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason) | |
5505 | { | |
5506 | BdrvOpBlocker *blocker, *next; | |
5507 | assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); | |
5508 | QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) { | |
5509 | if (blocker->reason == reason) { | |
5510 | QLIST_REMOVE(blocker, list); | |
5511 | g_free(blocker); | |
5512 | } | |
5513 | } | |
5514 | } | |
5515 | ||
5516 | void bdrv_op_block_all(BlockDriverState *bs, Error *reason) | |
5517 | { | |
5518 | int i; | |
5519 | for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { | |
5520 | bdrv_op_block(bs, i, reason); | |
5521 | } | |
5522 | } | |
5523 | ||
5524 | void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason) | |
5525 | { | |
5526 | int i; | |
5527 | for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { | |
5528 | bdrv_op_unblock(bs, i, reason); | |
5529 | } | |
5530 | } | |
5531 | ||
5532 | bool bdrv_op_blocker_is_empty(BlockDriverState *bs) | |
5533 | { | |
5534 | int i; | |
5535 | ||
5536 | for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { | |
5537 | if (!QLIST_EMPTY(&bs->op_blockers[i])) { | |
5538 | return false; | |
5539 | } | |
5540 | } | |
5541 | return true; | |
5542 | } | |
5543 | ||
5544 | void bdrv_iostatus_enable(BlockDriverState *bs) | |
5545 | { | |
5546 | bs->iostatus_enabled = true; | |
5547 | bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; | |
5548 | } | |
5549 | ||
5550 | /* The I/O status is only enabled if the drive explicitly | |
5551 | * enables it _and_ the VM is configured to stop on errors */ | |
5552 | bool bdrv_iostatus_is_enabled(const BlockDriverState *bs) | |
5553 | { | |
5554 | return (bs->iostatus_enabled && | |
5555 | (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || | |
5556 | bs->on_write_error == BLOCKDEV_ON_ERROR_STOP || | |
5557 | bs->on_read_error == BLOCKDEV_ON_ERROR_STOP)); | |
5558 | } | |
5559 | ||
5560 | void bdrv_iostatus_disable(BlockDriverState *bs) | |
5561 | { | |
5562 | bs->iostatus_enabled = false; | |
5563 | } | |
5564 | ||
5565 | void bdrv_iostatus_reset(BlockDriverState *bs) | |
5566 | { | |
5567 | if (bdrv_iostatus_is_enabled(bs)) { | |
5568 | bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; | |
5569 | if (bs->job) { | |
5570 | block_job_iostatus_reset(bs->job); | |
5571 | } | |
5572 | } | |
5573 | } | |
5574 | ||
5575 | void bdrv_iostatus_set_err(BlockDriverState *bs, int error) | |
5576 | { | |
5577 | assert(bdrv_iostatus_is_enabled(bs)); | |
5578 | if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | |
5579 | bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : | |
5580 | BLOCK_DEVICE_IO_STATUS_FAILED; | |
5581 | } | |
5582 | } | |
5583 | ||
5584 | void bdrv_img_create(const char *filename, const char *fmt, | |
5585 | const char *base_filename, const char *base_fmt, | |
5586 | char *options, uint64_t img_size, int flags, | |
5587 | Error **errp, bool quiet) | |
5588 | { | |
5589 | QemuOptsList *create_opts = NULL; | |
5590 | QemuOpts *opts = NULL; | |
5591 | const char *backing_fmt, *backing_file; | |
5592 | int64_t size; | |
5593 | BlockDriver *drv, *proto_drv; | |
5594 | BlockDriver *backing_drv = NULL; | |
5595 | Error *local_err = NULL; | |
5596 | int ret = 0; | |
5597 | ||
5598 | /* Find driver and parse its options */ | |
5599 | drv = bdrv_find_format(fmt); | |
5600 | if (!drv) { | |
5601 | error_setg(errp, "Unknown file format '%s'", fmt); | |
5602 | return; | |
5603 | } | |
5604 | ||
5605 | proto_drv = bdrv_find_protocol(filename, true); | |
5606 | if (!proto_drv) { | |
5607 | error_setg(errp, "Unknown protocol '%s'", filename); | |
5608 | return; | |
5609 | } | |
5610 | ||
5611 | if (!drv->create_opts) { | |
5612 | error_setg(errp, "Format driver '%s' does not support image creation", | |
5613 | drv->format_name); | |
5614 | return; | |
5615 | } | |
5616 | ||
5617 | if (!proto_drv->create_opts) { | |
5618 | error_setg(errp, "Protocol driver '%s' does not support image creation", | |
5619 | proto_drv->format_name); | |
5620 | return; | |
5621 | } | |
5622 | ||
5623 | create_opts = qemu_opts_append(create_opts, drv->create_opts); | |
5624 | create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); | |
5625 | ||
5626 | /* Create parameter list with default values */ | |
5627 | opts = qemu_opts_create(create_opts, NULL, 0, &error_abort); | |
5628 | qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size); | |
5629 | ||
5630 | /* Parse -o options */ | |
5631 | if (options) { | |
5632 | if (qemu_opts_do_parse(opts, options, NULL) != 0) { | |
5633 | error_setg(errp, "Invalid options for file format '%s'", fmt); | |
5634 | goto out; | |
5635 | } | |
5636 | } | |
5637 | ||
5638 | if (base_filename) { | |
5639 | if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) { | |
5640 | error_setg(errp, "Backing file not supported for file format '%s'", | |
5641 | fmt); | |
5642 | goto out; | |
5643 | } | |
5644 | } | |
5645 | ||
5646 | if (base_fmt) { | |
5647 | if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) { | |
5648 | error_setg(errp, "Backing file format not supported for file " | |
5649 | "format '%s'", fmt); | |
5650 | goto out; | |
5651 | } | |
5652 | } | |
5653 | ||
5654 | backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); | |
5655 | if (backing_file) { | |
5656 | if (!strcmp(filename, backing_file)) { | |
5657 | error_setg(errp, "Error: Trying to create an image with the " | |
5658 | "same filename as the backing file"); | |
5659 | goto out; | |
5660 | } | |
5661 | } | |
5662 | ||
5663 | backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); | |
5664 | if (backing_fmt) { | |
5665 | backing_drv = bdrv_find_format(backing_fmt); | |
5666 | if (!backing_drv) { | |
5667 | error_setg(errp, "Unknown backing file format '%s'", | |
5668 | backing_fmt); | |
5669 | goto out; | |
5670 | } | |
5671 | } | |
5672 | ||
5673 | // The size for the image must always be specified, with one exception: | |
5674 | // If we are using a backing file, we can obtain the size from there | |
5675 | size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); | |
5676 | if (size == -1) { | |
5677 | if (backing_file) { | |
5678 | BlockDriverState *bs; | |
5679 | char *full_backing = g_new0(char, PATH_MAX); | |
5680 | int64_t size; | |
5681 | int back_flags; | |
5682 | ||
5683 | bdrv_get_full_backing_filename_from_filename(filename, backing_file, | |
5684 | full_backing, PATH_MAX, | |
5685 | &local_err); | |
5686 | if (local_err) { | |
5687 | g_free(full_backing); | |
5688 | goto out; | |
5689 | } | |
5690 | ||
5691 | /* backing files always opened read-only */ | |
5692 | back_flags = | |
5693 | flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); | |
5694 | ||
5695 | bs = NULL; | |
5696 | ret = bdrv_open(&bs, full_backing, NULL, NULL, back_flags, | |
5697 | backing_drv, &local_err); | |
5698 | g_free(full_backing); | |
5699 | if (ret < 0) { | |
5700 | goto out; | |
5701 | } | |
5702 | size = bdrv_getlength(bs); | |
5703 | if (size < 0) { | |
5704 | error_setg_errno(errp, -size, "Could not get size of '%s'", | |
5705 | backing_file); | |
5706 | bdrv_unref(bs); | |
5707 | goto out; | |
5708 | } | |
5709 | ||
5710 | qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size); | |
5711 | ||
5712 | bdrv_unref(bs); | |
5713 | } else { | |
5714 | error_setg(errp, "Image creation needs a size parameter"); | |
5715 | goto out; | |
5716 | } | |
5717 | } | |
5718 | ||
5719 | if (!quiet) { | |
5720 | printf("Formatting '%s', fmt=%s", filename, fmt); | |
5721 | qemu_opts_print(opts, " "); | |
5722 | puts(""); | |
5723 | } | |
5724 | ||
5725 | ret = bdrv_create(drv, filename, opts, &local_err); | |
5726 | ||
5727 | if (ret == -EFBIG) { | |
5728 | /* This is generally a better message than whatever the driver would | |
5729 | * deliver (especially because of the cluster_size_hint), since that | |
5730 | * is most probably not much different from "image too large". */ | |
5731 | const char *cluster_size_hint = ""; | |
5732 | if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) { | |
5733 | cluster_size_hint = " (try using a larger cluster size)"; | |
5734 | } | |
5735 | error_setg(errp, "The image size is too large for file format '%s'" | |
5736 | "%s", fmt, cluster_size_hint); | |
5737 | error_free(local_err); | |
5738 | local_err = NULL; | |
5739 | } | |
5740 | ||
5741 | out: | |
5742 | qemu_opts_del(opts); | |
5743 | qemu_opts_free(create_opts); | |
5744 | if (local_err) { | |
5745 | error_propagate(errp, local_err); | |
5746 | } | |
5747 | } | |
5748 | ||
5749 | AioContext *bdrv_get_aio_context(BlockDriverState *bs) | |
5750 | { | |
5751 | return bs->aio_context; | |
5752 | } | |
5753 | ||
5754 | void bdrv_detach_aio_context(BlockDriverState *bs) | |
5755 | { | |
5756 | BdrvAioNotifier *baf; | |
5757 | ||
5758 | if (!bs->drv) { | |
5759 | return; | |
5760 | } | |
5761 | ||
5762 | QLIST_FOREACH(baf, &bs->aio_notifiers, list) { | |
5763 | baf->detach_aio_context(baf->opaque); | |
5764 | } | |
5765 | ||
5766 | if (bs->io_limits_enabled) { | |
5767 | throttle_detach_aio_context(&bs->throttle_state); | |
5768 | } | |
5769 | if (bs->drv->bdrv_detach_aio_context) { | |
5770 | bs->drv->bdrv_detach_aio_context(bs); | |
5771 | } | |
5772 | if (bs->file) { | |
5773 | bdrv_detach_aio_context(bs->file); | |
5774 | } | |
5775 | if (bs->backing_hd) { | |
5776 | bdrv_detach_aio_context(bs->backing_hd); | |
5777 | } | |
5778 | ||
5779 | bs->aio_context = NULL; | |
5780 | } | |
5781 | ||
5782 | void bdrv_attach_aio_context(BlockDriverState *bs, | |
5783 | AioContext *new_context) | |
5784 | { | |
5785 | BdrvAioNotifier *ban; | |
5786 | ||
5787 | if (!bs->drv) { | |
5788 | return; | |
5789 | } | |
5790 | ||
5791 | bs->aio_context = new_context; | |
5792 | ||
5793 | if (bs->backing_hd) { | |
5794 | bdrv_attach_aio_context(bs->backing_hd, new_context); | |
5795 | } | |
5796 | if (bs->file) { | |
5797 | bdrv_attach_aio_context(bs->file, new_context); | |
5798 | } | |
5799 | if (bs->drv->bdrv_attach_aio_context) { | |
5800 | bs->drv->bdrv_attach_aio_context(bs, new_context); | |
5801 | } | |
5802 | if (bs->io_limits_enabled) { | |
5803 | throttle_attach_aio_context(&bs->throttle_state, new_context); | |
5804 | } | |
5805 | ||
5806 | QLIST_FOREACH(ban, &bs->aio_notifiers, list) { | |
5807 | ban->attached_aio_context(new_context, ban->opaque); | |
5808 | } | |
5809 | } | |
5810 | ||
5811 | void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context) | |
5812 | { | |
5813 | bdrv_drain_all(); /* ensure there are no in-flight requests */ | |
5814 | ||
5815 | bdrv_detach_aio_context(bs); | |
5816 | ||
5817 | /* This function executes in the old AioContext so acquire the new one in | |
5818 | * case it runs in a different thread. | |
5819 | */ | |
5820 | aio_context_acquire(new_context); | |
5821 | bdrv_attach_aio_context(bs, new_context); | |
5822 | aio_context_release(new_context); | |
5823 | } | |
5824 | ||
5825 | void bdrv_add_aio_context_notifier(BlockDriverState *bs, | |
5826 | void (*attached_aio_context)(AioContext *new_context, void *opaque), | |
5827 | void (*detach_aio_context)(void *opaque), void *opaque) | |
5828 | { | |
5829 | BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1); | |
5830 | *ban = (BdrvAioNotifier){ | |
5831 | .attached_aio_context = attached_aio_context, | |
5832 | .detach_aio_context = detach_aio_context, | |
5833 | .opaque = opaque | |
5834 | }; | |
5835 | ||
5836 | QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list); | |
5837 | } | |
5838 | ||
5839 | void bdrv_remove_aio_context_notifier(BlockDriverState *bs, | |
5840 | void (*attached_aio_context)(AioContext *, | |
5841 | void *), | |
5842 | void (*detach_aio_context)(void *), | |
5843 | void *opaque) | |
5844 | { | |
5845 | BdrvAioNotifier *ban, *ban_next; | |
5846 | ||
5847 | QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { | |
5848 | if (ban->attached_aio_context == attached_aio_context && | |
5849 | ban->detach_aio_context == detach_aio_context && | |
5850 | ban->opaque == opaque) | |
5851 | { | |
5852 | QLIST_REMOVE(ban, list); | |
5853 | g_free(ban); | |
5854 | ||
5855 | return; | |
5856 | } | |
5857 | } | |
5858 | ||
5859 | abort(); | |
5860 | } | |
5861 | ||
5862 | void bdrv_add_before_write_notifier(BlockDriverState *bs, | |
5863 | NotifierWithReturn *notifier) | |
5864 | { | |
5865 | notifier_with_return_list_add(&bs->before_write_notifiers, notifier); | |
5866 | } | |
5867 | ||
5868 | int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts, | |
5869 | BlockDriverAmendStatusCB *status_cb) | |
5870 | { | |
5871 | if (!bs->drv->bdrv_amend_options) { | |
5872 | return -ENOTSUP; | |
5873 | } | |
5874 | return bs->drv->bdrv_amend_options(bs, opts, status_cb); | |
5875 | } | |
5876 | ||
5877 | /* This function will be called by the bdrv_recurse_is_first_non_filter method | |
5878 | * of block filter and by bdrv_is_first_non_filter. | |
5879 | * It is used to test if the given bs is the candidate or recurse more in the | |
5880 | * node graph. | |
5881 | */ | |
5882 | bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, | |
5883 | BlockDriverState *candidate) | |
5884 | { | |
5885 | /* return false if basic checks fails */ | |
5886 | if (!bs || !bs->drv) { | |
5887 | return false; | |
5888 | } | |
5889 | ||
5890 | /* the code reached a non block filter driver -> check if the bs is | |
5891 | * the same as the candidate. It's the recursion termination condition. | |
5892 | */ | |
5893 | if (!bs->drv->is_filter) { | |
5894 | return bs == candidate; | |
5895 | } | |
5896 | /* Down this path the driver is a block filter driver */ | |
5897 | ||
5898 | /* If the block filter recursion method is defined use it to recurse down | |
5899 | * the node graph. | |
5900 | */ | |
5901 | if (bs->drv->bdrv_recurse_is_first_non_filter) { | |
5902 | return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate); | |
5903 | } | |
5904 | ||
5905 | /* the driver is a block filter but don't allow to recurse -> return false | |
5906 | */ | |
5907 | return false; | |
5908 | } | |
5909 | ||
5910 | /* This function checks if the candidate is the first non filter bs down it's | |
5911 | * bs chain. Since we don't have pointers to parents it explore all bs chains | |
5912 | * from the top. Some filters can choose not to pass down the recursion. | |
5913 | */ | |
5914 | bool bdrv_is_first_non_filter(BlockDriverState *candidate) | |
5915 | { | |
5916 | BlockDriverState *bs; | |
5917 | ||
5918 | /* walk down the bs forest recursively */ | |
5919 | QTAILQ_FOREACH(bs, &bdrv_states, device_list) { | |
5920 | bool perm; | |
5921 | ||
5922 | /* try to recurse in this top level bs */ | |
5923 | perm = bdrv_recurse_is_first_non_filter(bs, candidate); | |
5924 | ||
5925 | /* candidate is the first non filter */ | |
5926 | if (perm) { | |
5927 | return true; | |
5928 | } | |
5929 | } | |
5930 | ||
5931 | return false; | |
5932 | } | |
5933 | ||
5934 | BlockDriverState *check_to_replace_node(const char *node_name, Error **errp) | |
5935 | { | |
5936 | BlockDriverState *to_replace_bs = bdrv_find_node(node_name); | |
5937 | AioContext *aio_context; | |
5938 | ||
5939 | if (!to_replace_bs) { | |
5940 | error_setg(errp, "Node name '%s' not found", node_name); | |
5941 | return NULL; | |
5942 | } | |
5943 | ||
5944 | aio_context = bdrv_get_aio_context(to_replace_bs); | |
5945 | aio_context_acquire(aio_context); | |
5946 | ||
5947 | if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) { | |
5948 | to_replace_bs = NULL; | |
5949 | goto out; | |
5950 | } | |
5951 | ||
5952 | /* We don't want arbitrary node of the BDS chain to be replaced only the top | |
5953 | * most non filter in order to prevent data corruption. | |
5954 | * Another benefit is that this tests exclude backing files which are | |
5955 | * blocked by the backing blockers. | |
5956 | */ | |
5957 | if (!bdrv_is_first_non_filter(to_replace_bs)) { | |
5958 | error_setg(errp, "Only top most non filter can be replaced"); | |
5959 | to_replace_bs = NULL; | |
5960 | goto out; | |
5961 | } | |
5962 | ||
5963 | out: | |
5964 | aio_context_release(aio_context); | |
5965 | return to_replace_bs; | |
5966 | } | |
5967 | ||
5968 | void bdrv_io_plug(BlockDriverState *bs) | |
5969 | { | |
5970 | BlockDriver *drv = bs->drv; | |
5971 | if (drv && drv->bdrv_io_plug) { | |
5972 | drv->bdrv_io_plug(bs); | |
5973 | } else if (bs->file) { | |
5974 | bdrv_io_plug(bs->file); | |
5975 | } | |
5976 | } | |
5977 | ||
5978 | void bdrv_io_unplug(BlockDriverState *bs) | |
5979 | { | |
5980 | BlockDriver *drv = bs->drv; | |
5981 | if (drv && drv->bdrv_io_unplug) { | |
5982 | drv->bdrv_io_unplug(bs); | |
5983 | } else if (bs->file) { | |
5984 | bdrv_io_unplug(bs->file); | |
5985 | } | |
5986 | } | |
5987 | ||
5988 | void bdrv_flush_io_queue(BlockDriverState *bs) | |
5989 | { | |
5990 | BlockDriver *drv = bs->drv; | |
5991 | if (drv && drv->bdrv_flush_io_queue) { | |
5992 | drv->bdrv_flush_io_queue(bs); | |
5993 | } else if (bs->file) { | |
5994 | bdrv_flush_io_queue(bs->file); | |
5995 | } | |
5996 | } | |
5997 | ||
5998 | static bool append_open_options(QDict *d, BlockDriverState *bs) | |
5999 | { | |
6000 | const QDictEntry *entry; | |
6001 | bool found_any = false; | |
6002 | ||
6003 | for (entry = qdict_first(bs->options); entry; | |
6004 | entry = qdict_next(bs->options, entry)) | |
6005 | { | |
6006 | /* Only take options for this level and exclude all non-driver-specific | |
6007 | * options */ | |
6008 | if (!strchr(qdict_entry_key(entry), '.') && | |
6009 | strcmp(qdict_entry_key(entry), "node-name")) | |
6010 | { | |
6011 | qobject_incref(qdict_entry_value(entry)); | |
6012 | qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry)); | |
6013 | found_any = true; | |
6014 | } | |
6015 | } | |
6016 | ||
6017 | return found_any; | |
6018 | } | |
6019 | ||
6020 | /* Updates the following BDS fields: | |
6021 | * - exact_filename: A filename which may be used for opening a block device | |
6022 | * which (mostly) equals the given BDS (even without any | |
6023 | * other options; so reading and writing must return the same | |
6024 | * results, but caching etc. may be different) | |
6025 | * - full_open_options: Options which, when given when opening a block device | |
6026 | * (without a filename), result in a BDS (mostly) | |
6027 | * equalling the given one | |
6028 | * - filename: If exact_filename is set, it is copied here. Otherwise, | |
6029 | * full_open_options is converted to a JSON object, prefixed with | |
6030 | * "json:" (for use through the JSON pseudo protocol) and put here. | |
6031 | */ | |
6032 | void bdrv_refresh_filename(BlockDriverState *bs) | |
6033 | { | |
6034 | BlockDriver *drv = bs->drv; | |
6035 | QDict *opts; | |
6036 | ||
6037 | if (!drv) { | |
6038 | return; | |
6039 | } | |
6040 | ||
6041 | /* This BDS's file name will most probably depend on its file's name, so | |
6042 | * refresh that first */ | |
6043 | if (bs->file) { | |
6044 | bdrv_refresh_filename(bs->file); | |
6045 | } | |
6046 | ||
6047 | if (drv->bdrv_refresh_filename) { | |
6048 | /* Obsolete information is of no use here, so drop the old file name | |
6049 | * information before refreshing it */ | |
6050 | bs->exact_filename[0] = '\0'; | |
6051 | if (bs->full_open_options) { | |
6052 | QDECREF(bs->full_open_options); | |
6053 | bs->full_open_options = NULL; | |
6054 | } | |
6055 | ||
6056 | drv->bdrv_refresh_filename(bs); | |
6057 | } else if (bs->file) { | |
6058 | /* Try to reconstruct valid information from the underlying file */ | |
6059 | bool has_open_options; | |
6060 | ||
6061 | bs->exact_filename[0] = '\0'; | |
6062 | if (bs->full_open_options) { | |
6063 | QDECREF(bs->full_open_options); | |
6064 | bs->full_open_options = NULL; | |
6065 | } | |
6066 | ||
6067 | opts = qdict_new(); | |
6068 | has_open_options = append_open_options(opts, bs); | |
6069 | ||
6070 | /* If no specific options have been given for this BDS, the filename of | |
6071 | * the underlying file should suffice for this one as well */ | |
6072 | if (bs->file->exact_filename[0] && !has_open_options) { | |
6073 | strcpy(bs->exact_filename, bs->file->exact_filename); | |
6074 | } | |
6075 | /* Reconstructing the full options QDict is simple for most format block | |
6076 | * drivers, as long as the full options are known for the underlying | |
6077 | * file BDS. The full options QDict of that file BDS should somehow | |
6078 | * contain a representation of the filename, therefore the following | |
6079 | * suffices without querying the (exact_)filename of this BDS. */ | |
6080 | if (bs->file->full_open_options) { | |
6081 | qdict_put_obj(opts, "driver", | |
6082 | QOBJECT(qstring_from_str(drv->format_name))); | |
6083 | QINCREF(bs->file->full_open_options); | |
6084 | qdict_put_obj(opts, "file", QOBJECT(bs->file->full_open_options)); | |
6085 | ||
6086 | bs->full_open_options = opts; | |
6087 | } else { | |
6088 | QDECREF(opts); | |
6089 | } | |
6090 | } else if (!bs->full_open_options && qdict_size(bs->options)) { | |
6091 | /* There is no underlying file BDS (at least referenced by BDS.file), | |
6092 | * so the full options QDict should be equal to the options given | |
6093 | * specifically for this block device when it was opened (plus the | |
6094 | * driver specification). | |
6095 | * Because those options don't change, there is no need to update | |
6096 | * full_open_options when it's already set. */ | |
6097 | ||
6098 | opts = qdict_new(); | |
6099 | append_open_options(opts, bs); | |
6100 | qdict_put_obj(opts, "driver", | |
6101 | QOBJECT(qstring_from_str(drv->format_name))); | |
6102 | ||
6103 | if (bs->exact_filename[0]) { | |
6104 | /* This may not work for all block protocol drivers (some may | |
6105 | * require this filename to be parsed), but we have to find some | |
6106 | * default solution here, so just include it. If some block driver | |
6107 | * does not support pure options without any filename at all or | |
6108 | * needs some special format of the options QDict, it needs to | |
6109 | * implement the driver-specific bdrv_refresh_filename() function. | |
6110 | */ | |
6111 | qdict_put_obj(opts, "filename", | |
6112 | QOBJECT(qstring_from_str(bs->exact_filename))); | |
6113 | } | |
6114 | ||
6115 | bs->full_open_options = opts; | |
6116 | } | |
6117 | ||
6118 | if (bs->exact_filename[0]) { | |
6119 | pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename); | |
6120 | } else if (bs->full_open_options) { | |
6121 | QString *json = qobject_to_json(QOBJECT(bs->full_open_options)); | |
6122 | snprintf(bs->filename, sizeof(bs->filename), "json:%s", | |
6123 | qstring_get_str(json)); | |
6124 | QDECREF(json); | |
6125 | } | |
6126 | } | |
6127 | ||
6128 | /* This accessor function purpose is to allow the device models to access the | |
6129 | * BlockAcctStats structure embedded inside a BlockDriverState without being | |
6130 | * aware of the BlockDriverState structure layout. | |
6131 | * It will go away when the BlockAcctStats structure will be moved inside | |
6132 | * the device models. | |
6133 | */ | |
6134 | BlockAcctStats *bdrv_get_stats(BlockDriverState *bs) | |
6135 | { | |
6136 | return &bs->stats; | |
6137 | } |