2 * Block protocol for I/O error injection
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "qemu/config-file.h"
27 #include "block/block_int.h"
28 #include "qemu/module.h"
30 typedef struct BDRVBlkdebugState {
34 QLIST_HEAD(, BlkdebugRule) rules[BLKDBG_EVENT_MAX];
35 QSIMPLEQ_HEAD(, BlkdebugRule) active_rules;
36 QLIST_HEAD(, BlkdebugSuspendedReq) suspended_reqs;
39 typedef struct BlkdebugAIOCB {
40 BlockDriverAIOCB common;
45 typedef struct BlkdebugSuspendedReq {
48 QLIST_ENTRY(BlkdebugSuspendedReq) next;
49 } BlkdebugSuspendedReq;
51 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb);
53 static const AIOCBInfo blkdebug_aiocb_info = {
54 .aiocb_size = sizeof(BlkdebugAIOCB),
55 .cancel = blkdebug_aio_cancel,
64 typedef struct BlkdebugRule {
82 QLIST_ENTRY(BlkdebugRule) next;
83 QSIMPLEQ_ENTRY(BlkdebugRule) active_next;
86 static QemuOptsList inject_error_opts = {
87 .name = "inject-error",
88 .head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
92 .type = QEMU_OPT_STRING,
96 .type = QEMU_OPT_NUMBER,
100 .type = QEMU_OPT_NUMBER,
104 .type = QEMU_OPT_NUMBER,
108 .type = QEMU_OPT_BOOL,
111 .name = "immediately",
112 .type = QEMU_OPT_BOOL,
114 { /* end of list */ }
118 static QemuOptsList set_state_opts = {
120 .head = QTAILQ_HEAD_INITIALIZER(set_state_opts.head),
124 .type = QEMU_OPT_STRING,
128 .type = QEMU_OPT_NUMBER,
132 .type = QEMU_OPT_NUMBER,
134 { /* end of list */ }
138 static QemuOptsList *config_groups[] = {
144 static const char *event_names[BLKDBG_EVENT_MAX] = {
145 [BLKDBG_L1_UPDATE] = "l1_update",
146 [BLKDBG_L1_GROW_ALLOC_TABLE] = "l1_grow.alloc_table",
147 [BLKDBG_L1_GROW_WRITE_TABLE] = "l1_grow.write_table",
148 [BLKDBG_L1_GROW_ACTIVATE_TABLE] = "l1_grow.activate_table",
150 [BLKDBG_L2_LOAD] = "l2_load",
151 [BLKDBG_L2_UPDATE] = "l2_update",
152 [BLKDBG_L2_UPDATE_COMPRESSED] = "l2_update_compressed",
153 [BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read",
154 [BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write",
156 [BLKDBG_READ_AIO] = "read_aio",
157 [BLKDBG_READ_BACKING_AIO] = "read_backing_aio",
158 [BLKDBG_READ_COMPRESSED] = "read_compressed",
160 [BLKDBG_WRITE_AIO] = "write_aio",
161 [BLKDBG_WRITE_COMPRESSED] = "write_compressed",
163 [BLKDBG_VMSTATE_LOAD] = "vmstate_load",
164 [BLKDBG_VMSTATE_SAVE] = "vmstate_save",
166 [BLKDBG_COW_READ] = "cow_read",
167 [BLKDBG_COW_WRITE] = "cow_write",
169 [BLKDBG_REFTABLE_LOAD] = "reftable_load",
170 [BLKDBG_REFTABLE_GROW] = "reftable_grow",
171 [BLKDBG_REFTABLE_UPDATE] = "reftable_update",
173 [BLKDBG_REFBLOCK_LOAD] = "refblock_load",
174 [BLKDBG_REFBLOCK_UPDATE] = "refblock_update",
175 [BLKDBG_REFBLOCK_UPDATE_PART] = "refblock_update_part",
176 [BLKDBG_REFBLOCK_ALLOC] = "refblock_alloc",
177 [BLKDBG_REFBLOCK_ALLOC_HOOKUP] = "refblock_alloc.hookup",
178 [BLKDBG_REFBLOCK_ALLOC_WRITE] = "refblock_alloc.write",
179 [BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS] = "refblock_alloc.write_blocks",
180 [BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE] = "refblock_alloc.write_table",
181 [BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE] = "refblock_alloc.switch_table",
183 [BLKDBG_CLUSTER_ALLOC] = "cluster_alloc",
184 [BLKDBG_CLUSTER_ALLOC_BYTES] = "cluster_alloc_bytes",
185 [BLKDBG_CLUSTER_FREE] = "cluster_free",
187 [BLKDBG_FLUSH_TO_OS] = "flush_to_os",
188 [BLKDBG_FLUSH_TO_DISK] = "flush_to_disk",
191 static int get_event_by_name(const char *name, BlkDebugEvent *event)
195 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
196 if (!strcmp(event_names[i], name)) {
205 struct add_rule_data {
206 BDRVBlkdebugState *s;
210 static int add_rule(QemuOpts *opts, void *opaque)
212 struct add_rule_data *d = opaque;
213 BDRVBlkdebugState *s = d->s;
214 const char* event_name;
216 struct BlkdebugRule *rule;
218 /* Find the right event for the rule */
219 event_name = qemu_opt_get(opts, "event");
220 if (!event_name || get_event_by_name(event_name, &event) < 0) {
224 /* Set attributes common for all actions */
225 rule = g_malloc0(sizeof(*rule));
226 *rule = (struct BlkdebugRule) {
229 .state = qemu_opt_get_number(opts, "state", 0),
232 /* Parse action-specific options */
234 case ACTION_INJECT_ERROR:
235 rule->options.inject.error = qemu_opt_get_number(opts, "errno", EIO);
236 rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
237 rule->options.inject.immediately =
238 qemu_opt_get_bool(opts, "immediately", 0);
239 rule->options.inject.sector = qemu_opt_get_number(opts, "sector", -1);
242 case ACTION_SET_STATE:
243 rule->options.set_state.new_state =
244 qemu_opt_get_number(opts, "new_state", 0);
248 rule->options.suspend.tag =
249 g_strdup(qemu_opt_get(opts, "tag"));
254 QLIST_INSERT_HEAD(&s->rules[event], rule, next);
259 static void remove_rule(BlkdebugRule *rule)
261 switch (rule->action) {
262 case ACTION_INJECT_ERROR:
263 case ACTION_SET_STATE:
266 g_free(rule->options.suspend.tag);
270 QLIST_REMOVE(rule, next);
274 static int read_config(BDRVBlkdebugState *s, const char *filename)
278 struct add_rule_data d;
280 f = fopen(filename, "r");
285 ret = qemu_config_parse(f, config_groups, filename);
291 d.action = ACTION_INJECT_ERROR;
292 qemu_opts_foreach(&inject_error_opts, add_rule, &d, 0);
294 d.action = ACTION_SET_STATE;
295 qemu_opts_foreach(&set_state_opts, add_rule, &d, 0);
299 qemu_opts_reset(&inject_error_opts);
300 qemu_opts_reset(&set_state_opts);
305 /* Valid blkdebug filenames look like blkdebug:path/to/config:path/to/image */
306 static void blkdebug_parse_filename(const char *filename, QDict *options,
311 /* Parse the blkdebug: prefix */
312 if (!strstart(filename, "blkdebug:", &filename)) {
313 error_setg(errp, "File name string must start with 'blkdebug:'");
317 /* Parse config file path */
318 c = strchr(filename, ':');
320 error_setg(errp, "blkdebug requires both config file and image path");
325 QString *config_path;
326 config_path = qstring_from_substr(filename, 0, c - filename - 1);
327 qdict_put(options, "config", config_path);
330 /* TODO Allow multi-level nesting and set file.filename here */
332 qdict_put(options, "x-image", qstring_from_str(filename));
335 static QemuOptsList runtime_opts = {
337 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
341 .type = QEMU_OPT_STRING,
342 .help = "Path to the configuration file",
346 .type = QEMU_OPT_STRING,
347 .help = "[internal use only, will be removed]",
349 { /* end of list */ }
353 static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
356 BDRVBlkdebugState *s = bs->opaque;
358 Error *local_err = NULL;
359 const char *filename, *config;
362 opts = qemu_opts_create_nofail(&runtime_opts);
363 qemu_opts_absorb_qdict(opts, options, &local_err);
364 if (error_is_set(&local_err)) {
365 qerror_report_err(local_err);
366 error_free(local_err);
371 /* Read rules from config file */
372 config = qemu_opt_get(opts, "config");
374 ret = read_config(s, config);
380 /* Set initial state */
383 /* Open the backing file */
384 filename = qemu_opt_get(opts, "x-image");
385 if (filename == NULL) {
390 ret = bdrv_file_open(&bs->file, filename, NULL, flags, &local_err);
392 qerror_report_err(local_err);
393 error_free(local_err);
403 static void error_callback_bh(void *opaque)
405 struct BlkdebugAIOCB *acb = opaque;
406 qemu_bh_delete(acb->bh);
407 acb->common.cb(acb->common.opaque, acb->ret);
408 qemu_aio_release(acb);
411 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb)
413 BlkdebugAIOCB *acb = container_of(blockacb, BlkdebugAIOCB, common);
414 qemu_aio_release(acb);
417 static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
418 BlockDriverCompletionFunc *cb, void *opaque, BlkdebugRule *rule)
420 BDRVBlkdebugState *s = bs->opaque;
421 int error = rule->options.inject.error;
422 struct BlkdebugAIOCB *acb;
425 if (rule->options.inject.once) {
426 QSIMPLEQ_INIT(&s->active_rules);
429 if (rule->options.inject.immediately) {
433 acb = qemu_aio_get(&blkdebug_aiocb_info, bs, cb, opaque);
436 bh = qemu_bh_new(error_callback_bh, acb);
438 qemu_bh_schedule(bh);
443 static BlockDriverAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
444 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
445 BlockDriverCompletionFunc *cb, void *opaque)
447 BDRVBlkdebugState *s = bs->opaque;
448 BlkdebugRule *rule = NULL;
450 QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
451 if (rule->options.inject.sector == -1 ||
452 (rule->options.inject.sector >= sector_num &&
453 rule->options.inject.sector < sector_num + nb_sectors)) {
458 if (rule && rule->options.inject.error) {
459 return inject_error(bs, cb, opaque, rule);
462 return bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
465 static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
466 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
467 BlockDriverCompletionFunc *cb, void *opaque)
469 BDRVBlkdebugState *s = bs->opaque;
470 BlkdebugRule *rule = NULL;
472 QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
473 if (rule->options.inject.sector == -1 ||
474 (rule->options.inject.sector >= sector_num &&
475 rule->options.inject.sector < sector_num + nb_sectors)) {
480 if (rule && rule->options.inject.error) {
481 return inject_error(bs, cb, opaque, rule);
484 return bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
488 static void blkdebug_close(BlockDriverState *bs)
490 BDRVBlkdebugState *s = bs->opaque;
491 BlkdebugRule *rule, *next;
494 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
495 QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
501 static void suspend_request(BlockDriverState *bs, BlkdebugRule *rule)
503 BDRVBlkdebugState *s = bs->opaque;
504 BlkdebugSuspendedReq r;
506 r = (BlkdebugSuspendedReq) {
507 .co = qemu_coroutine_self(),
508 .tag = g_strdup(rule->options.suspend.tag),
512 QLIST_INSERT_HEAD(&s->suspended_reqs, &r, next);
514 printf("blkdebug: Suspended request '%s'\n", r.tag);
515 qemu_coroutine_yield();
516 printf("blkdebug: Resuming request '%s'\n", r.tag);
518 QLIST_REMOVE(&r, next);
522 static bool process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
525 BDRVBlkdebugState *s = bs->opaque;
527 /* Only process rules for the current state */
528 if (rule->state && rule->state != s->state) {
532 /* Take the action */
533 switch (rule->action) {
534 case ACTION_INJECT_ERROR:
536 QSIMPLEQ_INIT(&s->active_rules);
539 QSIMPLEQ_INSERT_HEAD(&s->active_rules, rule, active_next);
542 case ACTION_SET_STATE:
543 s->new_state = rule->options.set_state.new_state;
547 suspend_request(bs, rule);
553 static void blkdebug_debug_event(BlockDriverState *bs, BlkDebugEvent event)
555 BDRVBlkdebugState *s = bs->opaque;
556 struct BlkdebugRule *rule, *next;
559 assert((int)event >= 0 && event < BLKDBG_EVENT_MAX);
562 s->new_state = s->state;
563 QLIST_FOREACH_SAFE(rule, &s->rules[event], next, next) {
564 injected = process_rule(bs, rule, injected);
566 s->state = s->new_state;
569 static int blkdebug_debug_breakpoint(BlockDriverState *bs, const char *event,
572 BDRVBlkdebugState *s = bs->opaque;
573 struct BlkdebugRule *rule;
574 BlkDebugEvent blkdebug_event;
576 if (get_event_by_name(event, &blkdebug_event) < 0) {
581 rule = g_malloc(sizeof(*rule));
582 *rule = (struct BlkdebugRule) {
583 .event = blkdebug_event,
584 .action = ACTION_SUSPEND,
586 .options.suspend.tag = g_strdup(tag),
589 QLIST_INSERT_HEAD(&s->rules[blkdebug_event], rule, next);
594 static int blkdebug_debug_resume(BlockDriverState *bs, const char *tag)
596 BDRVBlkdebugState *s = bs->opaque;
597 BlkdebugSuspendedReq *r;
599 QLIST_FOREACH(r, &s->suspended_reqs, next) {
600 if (!strcmp(r->tag, tag)) {
601 qemu_coroutine_enter(r->co, NULL);
609 static bool blkdebug_debug_is_suspended(BlockDriverState *bs, const char *tag)
611 BDRVBlkdebugState *s = bs->opaque;
612 BlkdebugSuspendedReq *r;
614 QLIST_FOREACH(r, &s->suspended_reqs, next) {
615 if (!strcmp(r->tag, tag)) {
622 static int64_t blkdebug_getlength(BlockDriverState *bs)
624 return bdrv_getlength(bs->file);
627 static BlockDriver bdrv_blkdebug = {
628 .format_name = "blkdebug",
629 .protocol_name = "blkdebug",
630 .instance_size = sizeof(BDRVBlkdebugState),
632 .bdrv_parse_filename = blkdebug_parse_filename,
633 .bdrv_file_open = blkdebug_open,
634 .bdrv_close = blkdebug_close,
635 .bdrv_getlength = blkdebug_getlength,
637 .bdrv_aio_readv = blkdebug_aio_readv,
638 .bdrv_aio_writev = blkdebug_aio_writev,
640 .bdrv_debug_event = blkdebug_debug_event,
641 .bdrv_debug_breakpoint = blkdebug_debug_breakpoint,
642 .bdrv_debug_resume = blkdebug_debug_resume,
643 .bdrv_debug_is_suspended = blkdebug_debug_is_suspended,
646 static void bdrv_blkdebug_init(void)
648 bdrv_register(&bdrv_blkdebug);
651 block_init(bdrv_blkdebug_init);