]> Git Repo - qemu.git/blame - blockjob.c
job: Move cancelled to Job
[qemu.git] / blockjob.c
CommitLineData
2f0c9fe6
PB
1/*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
d38ea87a 26#include "qemu/osdep.h"
2f0c9fe6 27#include "qemu-common.h"
737e150e 28#include "block/block.h"
c87621ea 29#include "block/blockjob_int.h"
737e150e 30#include "block/block_int.h"
c9de4050 31#include "block/trace.h"
373340b2 32#include "sysemu/block-backend.h"
e688df6b 33#include "qapi/error.h"
9af23989 34#include "qapi/qapi-events-block-core.h"
cc7a8ea7 35#include "qapi/qmp/qerror.h"
10817bf0 36#include "qemu/coroutine.h"
1de7afc9 37#include "qemu/timer.h"
2f0c9fe6 38
fc24908e
PB
39/* Right now, this mutex is only needed to synchronize accesses to job->busy
40 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
41 * block_job_enter. */
42static QemuMutex block_job_mutex;
43
44static void block_job_lock(void)
45{
46 qemu_mutex_lock(&block_job_mutex);
47}
48
49static void block_job_unlock(void)
50{
51 qemu_mutex_unlock(&block_job_mutex);
52}
53
54static void __attribute__((__constructor__)) block_job_init(void)
55{
56 qemu_mutex_init(&block_job_mutex);
57}
58
8254b6d9
JS
59static void block_job_event_cancelled(BlockJob *job);
60static void block_job_event_completed(BlockJob *job, const char *msg);
5f241594 61static int block_job_event_pending(BlockJob *job);
aa9ef2e6 62static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
8254b6d9 63
c55a832f
FZ
64/* Transactional group of block jobs */
65struct BlockJobTxn {
66
67 /* Is this txn being cancelled? */
68 bool aborting;
69
70 /* List of jobs */
71 QLIST_HEAD(, BlockJob) jobs;
72
73 /* Reference count */
74 int refcnt;
75};
76
88691b37
PB
77/*
78 * The block job API is composed of two categories of functions.
79 *
80 * The first includes functions used by the monitor. The monitor is
81 * peculiar in that it accesses the block job list with block_job_get, and
82 * therefore needs consistency across block_job_get and the actual operation
83 * (e.g. block_job_set_speed). The consistency is achieved with
84 * aio_context_acquire/release. These functions are declared in blockjob.h.
85 *
86 * The second includes functions used by the block job drivers and sometimes
87 * by the core block layer. These do not care about locking, because the
88 * whole coroutine runs under the AioContext lock, and are declared in
89 * blockjob_int.h.
90 */
91
e7c1d78b 92static bool is_block_job(Job *job)
a7112795 93{
e7c1d78b
KW
94 return job_type(job) == JOB_TYPE_BACKUP ||
95 job_type(job) == JOB_TYPE_COMMIT ||
96 job_type(job) == JOB_TYPE_MIRROR ||
97 job_type(job) == JOB_TYPE_STREAM;
98}
99
100BlockJob *block_job_next(BlockJob *bjob)
101{
102 Job *job = bjob ? &bjob->job : NULL;
103
104 do {
105 job = job_next(job);
106 } while (job && !is_block_job(job));
107
108 return job ? container_of(job, BlockJob, job) : NULL;
a7112795
AG
109}
110
ffb1f10c
AG
111BlockJob *block_job_get(const char *id)
112{
e7c1d78b 113 Job *job = job_get(id);
ffb1f10c 114
e7c1d78b
KW
115 if (job && is_block_job(job)) {
116 return container_of(job, BlockJob, job);
117 } else {
118 return NULL;
ffb1f10c 119 }
ffb1f10c
AG
120}
121
c8ab5c2d
PB
122BlockJobTxn *block_job_txn_new(void)
123{
124 BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
125 QLIST_INIT(&txn->jobs);
126 txn->refcnt = 1;
127 return txn;
128}
129
130static void block_job_txn_ref(BlockJobTxn *txn)
131{
132 txn->refcnt++;
133}
134
135void block_job_txn_unref(BlockJobTxn *txn)
136{
137 if (txn && --txn->refcnt == 0) {
138 g_free(txn);
139 }
140}
141
142void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
143{
144 if (!txn) {
145 return;
146 }
147
148 assert(!job->txn);
149 job->txn = txn;
150
151 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
152 block_job_txn_ref(txn);
153}
154
a865cebb
MAL
155static void block_job_txn_del_job(BlockJob *job)
156{
157 if (job->txn) {
158 QLIST_REMOVE(job, txn_list);
159 block_job_txn_unref(job->txn);
160 job->txn = NULL;
161 }
162}
163
4c7e813c
SH
164/* Assumes the block_job_mutex is held */
165static bool block_job_timer_pending(BlockJob *job)
166{
167 return timer_pending(&job->sleep_timer);
168}
169
170/* Assumes the block_job_mutex is held */
171static bool block_job_timer_not_pending(BlockJob *job)
172{
173 return !block_job_timer_pending(job);
174}
175
f321dcb5
PB
176static void block_job_pause(BlockJob *job)
177{
178 job->pause_count++;
179}
180
181static void block_job_resume(BlockJob *job)
182{
183 assert(job->pause_count > 0);
184 job->pause_count--;
185 if (job->pause_count) {
186 return;
187 }
4c7e813c
SH
188
189 /* kick only if no timer is pending */
190 block_job_enter_cond(job, block_job_timer_not_pending);
f321dcb5
PB
191}
192
05b0d8e3
PB
193static void block_job_attached_aio_context(AioContext *new_context,
194 void *opaque);
195static void block_job_detach_aio_context(void *opaque);
196
80fa2c75 197void block_job_free(Job *job)
05b0d8e3 198{
80fa2c75
KW
199 BlockJob *bjob = container_of(job, BlockJob, job);
200 BlockDriverState *bs = blk_bs(bjob->blk);
201
202 assert(!bjob->txn);
203
204 bs->job = NULL;
205 block_job_remove_all_bdrv(bjob);
206 blk_remove_aio_context_notifier(bjob->blk,
207 block_job_attached_aio_context,
208 block_job_detach_aio_context, bjob);
209 blk_unref(bjob->blk);
210 error_free(bjob->blocker);
211 assert(!timer_pending(&bjob->sleep_timer));
05b0d8e3
PB
212}
213
463e0be1
SH
214static void block_job_attached_aio_context(AioContext *new_context,
215 void *opaque)
216{
217 BlockJob *job = opaque;
218
219 if (job->driver->attached_aio_context) {
220 job->driver->attached_aio_context(job, new_context);
221 }
222
223 block_job_resume(job);
224}
225
bae8196d
PB
226static void block_job_drain(BlockJob *job)
227{
228 /* If job is !job->busy this kicks it into the next pause point. */
229 block_job_enter(job);
230
231 blk_drain(job->blk);
232 if (job->driver->drain) {
233 job->driver->drain(job);
234 }
235}
236
463e0be1
SH
237static void block_job_detach_aio_context(void *opaque)
238{
239 BlockJob *job = opaque;
240
241 /* In case the job terminates during aio_poll()... */
80fa2c75 242 job_ref(&job->job);
463e0be1
SH
243
244 block_job_pause(job);
245
463e0be1 246 while (!job->paused && !job->completed) {
bae8196d 247 block_job_drain(job);
463e0be1
SH
248 }
249
80fa2c75 250 job_unref(&job->job);
463e0be1
SH
251}
252
f321dcb5
PB
253static char *child_job_get_parent_desc(BdrvChild *c)
254{
255 BlockJob *job = c->opaque;
252291ea 256 return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
f321dcb5
PB
257}
258
ad90feba 259static void child_job_drained_begin(BdrvChild *c)
f321dcb5 260{
ad90feba 261 BlockJob *job = c->opaque;
f321dcb5
PB
262 block_job_pause(job);
263}
264
ad90feba 265static void child_job_drained_end(BdrvChild *c)
f321dcb5 266{
ad90feba 267 BlockJob *job = c->opaque;
f321dcb5
PB
268 block_job_resume(job);
269}
270
ad90feba
KW
271static const BdrvChildRole child_job = {
272 .get_parent_desc = child_job_get_parent_desc,
273 .drained_begin = child_job_drained_begin,
274 .drained_end = child_job_drained_end,
275 .stay_at_node = true,
f321dcb5
PB
276};
277
bbc02b90
KW
278void block_job_remove_all_bdrv(BlockJob *job)
279{
280 GSList *l;
281 for (l = job->nodes; l; l = l->next) {
282 BdrvChild *c = l->data;
283 bdrv_op_unblock_all(c->bs, job->blocker);
284 bdrv_root_unref_child(c);
285 }
286 g_slist_free(job->nodes);
287 job->nodes = NULL;
288}
289
76d554e2
KW
290int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
291 uint64_t perm, uint64_t shared_perm, Error **errp)
23d402d4 292{
76d554e2
KW
293 BdrvChild *c;
294
295 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
296 job, errp);
297 if (c == NULL) {
298 return -EPERM;
299 }
300
301 job->nodes = g_slist_prepend(job->nodes, c);
23d402d4
AG
302 bdrv_ref(bs);
303 bdrv_op_block_all(bs, job->blocker);
76d554e2
KW
304
305 return 0;
23d402d4
AG
306}
307
559b935f
JS
308bool block_job_is_internal(BlockJob *job)
309{
33e9e9bd 310 return (job->job.id == NULL);
559b935f
JS
311}
312
5ccac6f1
JS
313static bool block_job_started(BlockJob *job)
314{
315 return job->co;
316}
317
bd21935b
KW
318const BlockJobDriver *block_job_driver(BlockJob *job)
319{
320 return job->driver;
321}
322
e3796a24
JS
323/**
324 * All jobs must allow a pause point before entering their job proper. This
325 * ensures that jobs can be paused prior to being started, then resumed later.
326 */
327static void coroutine_fn block_job_co_entry(void *opaque)
328{
329 BlockJob *job = opaque;
330
331 assert(job && job->driver && job->driver->start);
332 block_job_pause_point(job);
333 job->driver->start(job);
334}
335
fc24908e
PB
336static void block_job_sleep_timer_cb(void *opaque)
337{
338 BlockJob *job = opaque;
339
340 block_job_enter(job);
341}
342
5ccac6f1
JS
343void block_job_start(BlockJob *job)
344{
345 assert(job && !block_job_started(job) && job->paused &&
e3796a24
JS
346 job->driver && job->driver->start);
347 job->co = qemu_coroutine_create(block_job_co_entry, job);
348 job->pause_count--;
349 job->busy = true;
350 job->paused = false;
a50c2ab8 351 job_state_transition(&job->job, JOB_STATUS_RUNNING);
aef4278c 352 bdrv_coroutine_enter(blk_bs(job->blk), job->co);
5ccac6f1
JS
353}
354
3925cd3b
JS
355static void block_job_decommission(BlockJob *job)
356{
357 assert(job);
358 job->completed = true;
359 job->busy = false;
360 job->paused = false;
361 job->deferred_to_main_loop = true;
a865cebb 362 block_job_txn_del_job(job);
a50c2ab8 363 job_state_transition(&job->job, JOB_STATUS_NULL);
80fa2c75 364 job_unref(&job->job);
3925cd3b
JS
365}
366
75f71059
JS
367static void block_job_do_dismiss(BlockJob *job)
368{
369 block_job_decommission(job);
370}
371
e0cf0364
JS
372static void block_job_conclude(BlockJob *job)
373{
a50c2ab8 374 job_state_transition(&job->job, JOB_STATUS_CONCLUDED);
75f71059
JS
375 if (job->auto_dismiss || !block_job_started(job)) {
376 block_job_do_dismiss(job);
377 }
e0cf0364
JS
378}
379
35d6b368
JS
380static void block_job_update_rc(BlockJob *job)
381{
daa7f2f9 382 if (!job->ret && job_is_cancelled(&job->job)) {
35d6b368
JS
383 job->ret = -ECANCELED;
384 }
385 if (job->ret) {
a50c2ab8 386 job_state_transition(&job->job, JOB_STATUS_ABORTING);
35d6b368
JS
387 }
388}
389
2da4617a
JS
390static int block_job_prepare(BlockJob *job)
391{
392 if (job->ret == 0 && job->driver->prepare) {
393 job->ret = job->driver->prepare(job);
394 }
395 return job->ret;
396}
397
43628d93
JS
398static void block_job_commit(BlockJob *job)
399{
400 assert(!job->ret);
401 if (job->driver->commit) {
402 job->driver->commit(job);
403 }
404}
405
406static void block_job_abort(BlockJob *job)
407{
408 assert(job->ret);
409 if (job->driver->abort) {
410 job->driver->abort(job);
411 }
412}
413
414static void block_job_clean(BlockJob *job)
415{
416 if (job->driver->clean) {
417 job->driver->clean(job);
418 }
419}
420
11b61fbc 421static int block_job_finalize_single(BlockJob *job)
c55a832f 422{
4fb588e9
PB
423 assert(job->completed);
424
35d6b368
JS
425 /* Ensure abort is called for late-transactional failures */
426 block_job_update_rc(job);
10a3fbb0 427
c55a832f 428 if (!job->ret) {
43628d93 429 block_job_commit(job);
c55a832f 430 } else {
43628d93 431 block_job_abort(job);
e8a40bf7 432 }
43628d93 433 block_job_clean(job);
8254b6d9
JS
434
435 if (job->cb) {
436 job->cb(job->opaque, job->ret);
437 }
5ccac6f1
JS
438
439 /* Emit events only if we actually started */
440 if (block_job_started(job)) {
daa7f2f9 441 if (job_is_cancelled(&job->job)) {
5ccac6f1
JS
442 block_job_event_cancelled(job);
443 } else {
444 const char *msg = NULL;
445 if (job->ret < 0) {
446 msg = strerror(-job->ret);
447 }
448 block_job_event_completed(job, msg);
8254b6d9 449 }
8254b6d9
JS
450 }
451
a865cebb 452 block_job_txn_del_job(job);
e0cf0364 453 block_job_conclude(job);
2da4617a 454 return 0;
c55a832f
FZ
455}
456
b76e4458 457static void block_job_cancel_async(BlockJob *job, bool force)
4c241cf5
PB
458{
459 if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
460 block_job_iostatus_reset(job);
461 }
462 if (job->user_paused) {
463 /* Do not call block_job_enter here, the caller will handle it. */
464 job->user_paused = false;
465 job->pause_count--;
466 }
daa7f2f9 467 job->job.cancelled = true;
b76e4458
LL
468 /* To prevent 'force == false' overriding a previous 'force == true' */
469 job->force |= force;
4c241cf5
PB
470}
471
5f241594 472static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *), bool lock)
efe4d4b7
JS
473{
474 AioContext *ctx;
475 BlockJob *job, *next;
2da4617a 476 int rc = 0;
efe4d4b7
JS
477
478 QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
5f241594
JS
479 if (lock) {
480 ctx = blk_get_aio_context(job->blk);
481 aio_context_acquire(ctx);
482 }
2da4617a 483 rc = fn(job);
5f241594
JS
484 if (lock) {
485 aio_context_release(ctx);
486 }
2da4617a
JS
487 if (rc) {
488 break;
489 }
efe4d4b7 490 }
2da4617a 491 return rc;
efe4d4b7
JS
492}
493
c8ab5c2d
PB
494static int block_job_finish_sync(BlockJob *job,
495 void (*finish)(BlockJob *, Error **errp),
496 Error **errp)
497{
498 Error *local_err = NULL;
499 int ret;
500
501 assert(blk_bs(job->blk)->job == job);
502
80fa2c75 503 job_ref(&job->job);
c8ab5c2d 504
4fb588e9
PB
505 if (finish) {
506 finish(job, &local_err);
507 }
c8ab5c2d
PB
508 if (local_err) {
509 error_propagate(errp, local_err);
80fa2c75 510 job_unref(&job->job);
c8ab5c2d
PB
511 return -EBUSY;
512 }
513 /* block_job_drain calls block_job_enter, and it should be enough to
514 * induce progress until the job completes or moves to the main thread.
515 */
516 while (!job->deferred_to_main_loop && !job->completed) {
517 block_job_drain(job);
518 }
519 while (!job->completed) {
520 aio_poll(qemu_get_aio_context(), true);
521 }
daa7f2f9
KW
522 ret = (job_is_cancelled(&job->job) && job->ret == 0)
523 ? -ECANCELED : job->ret;
80fa2c75 524 job_unref(&job->job);
c8ab5c2d
PB
525 return ret;
526}
527
c55a832f
FZ
528static void block_job_completed_txn_abort(BlockJob *job)
529{
530 AioContext *ctx;
531 BlockJobTxn *txn = job->txn;
4fb588e9 532 BlockJob *other_job;
c55a832f
FZ
533
534 if (txn->aborting) {
535 /*
536 * We are cancelled by another job, which will handle everything.
537 */
538 return;
539 }
540 txn->aborting = true;
4fb588e9
PB
541 block_job_txn_ref(txn);
542
c55a832f
FZ
543 /* We are the first failed job. Cancel other jobs. */
544 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
b6d2e599 545 ctx = blk_get_aio_context(other_job->blk);
c55a832f
FZ
546 aio_context_acquire(ctx);
547 }
4fb588e9
PB
548
549 /* Other jobs are effectively cancelled by us, set the status for
550 * them; this job, however, may or may not be cancelled, depending
551 * on the caller, so leave it. */
c55a832f 552 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
4fb588e9 553 if (other_job != job) {
b76e4458 554 block_job_cancel_async(other_job, false);
c55a832f 555 }
c55a832f 556 }
4fb588e9
PB
557 while (!QLIST_EMPTY(&txn->jobs)) {
558 other_job = QLIST_FIRST(&txn->jobs);
b6d2e599 559 ctx = blk_get_aio_context(other_job->blk);
4fb588e9 560 if (!other_job->completed) {
daa7f2f9 561 assert(job_is_cancelled(&other_job->job));
4fb588e9
PB
562 block_job_finish_sync(other_job, NULL, NULL);
563 }
11b61fbc 564 block_job_finalize_single(other_job);
c55a832f
FZ
565 aio_context_release(ctx);
566 }
4fb588e9
PB
567
568 block_job_txn_unref(txn);
c55a832f
FZ
569}
570
11b61fbc
JS
571static int block_job_needs_finalize(BlockJob *job)
572{
573 return !job->auto_finalize;
574}
575
576static void block_job_do_finalize(BlockJob *job)
577{
578 int rc;
579 assert(job && job->txn);
580
581 /* prepare the transaction to complete */
582 rc = block_job_txn_apply(job->txn, block_job_prepare, true);
583 if (rc) {
584 block_job_completed_txn_abort(job);
585 } else {
586 block_job_txn_apply(job->txn, block_job_finalize_single, true);
587 }
588}
589
c55a832f
FZ
590static void block_job_completed_txn_success(BlockJob *job)
591{
c55a832f 592 BlockJobTxn *txn = job->txn;
efe4d4b7 593 BlockJob *other_job;
2da4617a 594
a50c2ab8 595 job_state_transition(&job->job, JOB_STATUS_WAITING);
e8af5686 596
c55a832f
FZ
597 /*
598 * Successful completion, see if there are other running jobs in this
599 * txn.
600 */
601 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
602 if (!other_job->completed) {
603 return;
604 }
c55a832f 605 assert(other_job->ret == 0);
c55a832f 606 }
2da4617a 607
5f241594 608 block_job_txn_apply(txn, block_job_event_pending, false);
11b61fbc
JS
609
610 /* If no jobs need manual finalization, automatically do so */
611 if (block_job_txn_apply(txn, block_job_needs_finalize, false) == 0) {
612 block_job_do_finalize(job);
613 }
c55a832f
FZ
614}
615
2f0c9fe6
PB
616void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
617{
aa9ef2e6 618 int64_t old_speed = job->speed;
2f0c9fe6 619
a50c2ab8 620 if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp)) {
0ec4dfb8
JS
621 return;
622 }
18bb6928
KW
623 if (speed < 0) {
624 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
2f0c9fe6
PB
625 return;
626 }
627
18bb6928
KW
628 ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
629
2f0c9fe6 630 job->speed = speed;
d4fce188 631 if (speed && speed <= old_speed) {
aa9ef2e6
JS
632 return;
633 }
634
635 /* kick only if a timer is pending */
636 block_job_enter_cond(job, block_job_timer_pending);
2f0c9fe6
PB
637}
638
dee81d51
KW
639int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
640{
641 if (!job->speed) {
642 return 0;
643 }
644
645 return ratelimit_calculate_delay(&job->limit, n);
646}
647
aeae883b
PB
648void block_job_complete(BlockJob *job, Error **errp)
649{
559b935f 650 /* Should not be reachable via external interface for internal jobs */
33e9e9bd 651 assert(job->job.id);
a50c2ab8 652 if (job_apply_verb(&job->job, JOB_VERB_COMPLETE, errp)) {
0ec4dfb8
JS
653 return;
654 }
daa7f2f9
KW
655 if (job->pause_count || job_is_cancelled(&job->job) ||
656 !job->driver->complete)
657 {
9df229c3 658 error_setg(errp, "The active block job '%s' cannot be completed",
33e9e9bd 659 job->job.id);
aeae883b
PB
660 return;
661 }
662
3fc4b10a 663 job->driver->complete(job, errp);
aeae883b
PB
664}
665
11b61fbc
JS
666void block_job_finalize(BlockJob *job, Error **errp)
667{
33e9e9bd 668 assert(job && job->job.id);
a50c2ab8 669 if (job_apply_verb(&job->job, JOB_VERB_FINALIZE, errp)) {
11b61fbc
JS
670 return;
671 }
672 block_job_do_finalize(job);
673}
674
75f71059
JS
675void block_job_dismiss(BlockJob **jobptr, Error **errp)
676{
677 BlockJob *job = *jobptr;
678 /* similarly to _complete, this is QMP-interface only. */
33e9e9bd 679 assert(job->job.id);
a50c2ab8 680 if (job_apply_verb(&job->job, JOB_VERB_DISMISS, errp)) {
75f71059
JS
681 return;
682 }
683
684 block_job_do_dismiss(job);
685 *jobptr = NULL;
686}
687
0ec4dfb8 688void block_job_user_pause(BlockJob *job, Error **errp)
0df4ba58 689{
a50c2ab8 690 if (job_apply_verb(&job->job, JOB_VERB_PAUSE, errp)) {
0ec4dfb8
JS
691 return;
692 }
693 if (job->user_paused) {
694 error_setg(errp, "Job is already paused");
695 return;
696 }
0df4ba58
JS
697 job->user_paused = true;
698 block_job_pause(job);
699}
700
0df4ba58
JS
701bool block_job_user_paused(BlockJob *job)
702{
6573d9c6 703 return job->user_paused;
0df4ba58
JS
704}
705
0ec4dfb8 706void block_job_user_resume(BlockJob *job, Error **errp)
0df4ba58 707{
0ec4dfb8
JS
708 assert(job);
709 if (!job->user_paused || job->pause_count <= 0) {
710 error_setg(errp, "Can't resume a job that was not paused");
711 return;
0df4ba58 712 }
a50c2ab8 713 if (job_apply_verb(&job->job, JOB_VERB_RESUME, errp)) {
0ec4dfb8
JS
714 return;
715 }
716 block_job_iostatus_reset(job);
717 job->user_paused = false;
718 block_job_resume(job);
0df4ba58
JS
719}
720
b76e4458 721void block_job_cancel(BlockJob *job, bool force)
8acc72a4 722{
a50c2ab8 723 if (job->job.status == JOB_STATUS_CONCLUDED) {
75f71059 724 block_job_do_dismiss(job);
11b61fbc
JS
725 return;
726 }
b76e4458 727 block_job_cancel_async(job, force);
11b61fbc 728 if (!block_job_started(job)) {
5ccac6f1 729 block_job_completed(job, -ECANCELED);
11b61fbc
JS
730 } else if (job->deferred_to_main_loop) {
731 block_job_completed_txn_abort(job);
732 } else {
733 block_job_enter(job);
5ccac6f1 734 }
8acc72a4
PB
735}
736
b76e4458 737void block_job_user_cancel(BlockJob *job, bool force, Error **errp)
0ec4dfb8 738{
a50c2ab8 739 if (job_apply_verb(&job->job, JOB_VERB_CANCEL, errp)) {
0ec4dfb8
JS
740 return;
741 }
b76e4458 742 block_job_cancel(job, force);
0ec4dfb8
JS
743}
744
345f9e1b
HR
745/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
746 * used with block_job_finish_sync() without the need for (rather nasty)
747 * function pointer casts there. */
748static void block_job_cancel_err(BlockJob *job, Error **errp)
749{
b76e4458 750 block_job_cancel(job, false);
345f9e1b
HR
751}
752
753int block_job_cancel_sync(BlockJob *job)
754{
755 return block_job_finish_sync(job, &block_job_cancel_err, NULL);
756}
757
a1a2af07
KW
758void block_job_cancel_sync_all(void)
759{
760 BlockJob *job;
761 AioContext *aio_context;
762
e7c1d78b 763 while ((job = block_job_next(NULL))) {
b6d2e599 764 aio_context = blk_get_aio_context(job->blk);
a1a2af07
KW
765 aio_context_acquire(aio_context);
766 block_job_cancel_sync(job);
767 aio_context_release(aio_context);
768 }
769}
770
345f9e1b
HR
771int block_job_complete_sync(BlockJob *job, Error **errp)
772{
773 return block_job_finish_sync(job, &block_job_complete, errp);
774}
775
05df8a6a
KW
776void block_job_progress_update(BlockJob *job, uint64_t done)
777{
778 job->offset += done;
779}
780
781void block_job_progress_set_remaining(BlockJob *job, uint64_t remaining)
782{
783 job->len = job->offset + remaining;
784}
785
559b935f 786BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
30e628b7 787{
559b935f
JS
788 BlockJobInfo *info;
789
790 if (block_job_is_internal(job)) {
791 error_setg(errp, "Cannot query QEMU internal jobs");
792 return NULL;
793 }
794 info = g_new0(BlockJobInfo, 1);
252291ea 795 info->type = g_strdup(job_type_str(&job->job));
33e9e9bd 796 info->device = g_strdup(job->job.id);
32c81a4a 797 info->len = job->len;
fc24908e 798 info->busy = atomic_read(&job->busy);
751ebd76 799 info->paused = job->pause_count > 0;
32c81a4a
PB
800 info->offset = job->offset;
801 info->speed = job->speed;
802 info->io_status = job->iostatus;
ef6dbf1e 803 info->ready = job->ready;
a50c2ab8 804 info->status = job->job.status;
b40dacdc
JS
805 info->auto_finalize = job->auto_finalize;
806 info->auto_dismiss = job->auto_dismiss;
ab9ba614
JS
807 info->has_error = job->ret != 0;
808 info->error = job->ret ? g_strdup(strerror(-job->ret)) : NULL;
30e628b7
PB
809 return info;
810}
32c81a4a
PB
811
812static void block_job_iostatus_set_err(BlockJob *job, int error)
813{
814 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
815 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
816 BLOCK_DEVICE_IO_STATUS_FAILED;
817 }
818}
819
8254b6d9 820static void block_job_event_cancelled(BlockJob *job)
bcada37b 821{
559b935f
JS
822 if (block_job_is_internal(job)) {
823 return;
824 }
825
252291ea 826 qapi_event_send_block_job_cancelled(job_type(&job->job),
33e9e9bd 827 job->job.id,
bcada37b
WX
828 job->len,
829 job->offset,
830 job->speed,
831 &error_abort);
832}
32c81a4a 833
8254b6d9 834static void block_job_event_completed(BlockJob *job, const char *msg)
a66a2a36 835{
559b935f
JS
836 if (block_job_is_internal(job)) {
837 return;
838 }
839
252291ea 840 qapi_event_send_block_job_completed(job_type(&job->job),
33e9e9bd 841 job->job.id,
bcada37b
WX
842 job->len,
843 job->offset,
844 job->speed,
845 !!msg,
846 msg,
847 &error_abort);
a66a2a36
PB
848}
849
5f241594
JS
850static int block_job_event_pending(BlockJob *job)
851{
a50c2ab8 852 job_state_transition(&job->job, JOB_STATUS_PENDING);
5f241594 853 if (!job->auto_finalize && !block_job_is_internal(job)) {
252291ea 854 qapi_event_send_block_job_pending(job_type(&job->job),
33e9e9bd 855 job->job.id,
5f241594
JS
856 &error_abort);
857 }
858 return 0;
859}
860
88691b37
PB
861/*
862 * API for block job drivers and the block layer. These functions are
863 * declared in blockjob_int.h.
864 */
865
866void *block_job_create(const char *job_id, const BlockJobDriver *driver,
75859b94 867 BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm,
88691b37
PB
868 uint64_t shared_perm, int64_t speed, int flags,
869 BlockCompletionFunc *cb, void *opaque, Error **errp)
870{
871 BlockBackend *blk;
872 BlockJob *job;
873 int ret;
874
875 if (bs->job) {
876 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
877 return NULL;
878 }
879
880 if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
881 job_id = bdrv_get_device_name(bs);
882 if (!*job_id) {
883 error_setg(errp, "An explicit job ID is required for this node");
884 return NULL;
885 }
886 }
887
888 if (job_id) {
889 if (flags & BLOCK_JOB_INTERNAL) {
890 error_setg(errp, "Cannot specify job ID for internal block job");
891 return NULL;
892 }
88691b37
PB
893 }
894
895 blk = blk_new(perm, shared_perm);
896 ret = blk_insert_bs(blk, bs, errp);
897 if (ret < 0) {
898 blk_unref(blk);
899 return NULL;
900 }
901
33e9e9bd
KW
902 job = job_create(job_id, &driver->job_driver, errp);
903 if (job == NULL) {
904 blk_unref(blk);
905 return NULL;
906 }
907
e7c1d78b 908 assert(is_block_job(&job->job));
80fa2c75 909 assert(job->job.driver->free == &block_job_free);
e7c1d78b 910
88691b37 911 job->driver = driver;
88691b37
PB
912 job->blk = blk;
913 job->cb = cb;
914 job->opaque = opaque;
915 job->busy = false;
916 job->paused = true;
917 job->pause_count = 1;
5f241594 918 job->auto_finalize = !(flags & BLOCK_JOB_MANUAL_FINALIZE);
75f71059 919 job->auto_dismiss = !(flags & BLOCK_JOB_MANUAL_DISMISS);
fc24908e
PB
920 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
921 QEMU_CLOCK_REALTIME, SCALE_NS,
922 block_job_sleep_timer_cb, job);
88691b37
PB
923
924 error_setg(&job->blocker, "block device is in use by block job: %s",
252291ea 925 job_type_str(&job->job));
88691b37
PB
926 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
927 bs->job = job;
928
88691b37
PB
929 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
930
88691b37
PB
931 blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
932 block_job_detach_aio_context, job);
933
934 /* Only set speed when necessary to avoid NotSupported error */
935 if (speed != 0) {
936 Error *local_err = NULL;
937
938 block_job_set_speed(job, speed, &local_err);
939 if (local_err) {
3925cd3b 940 block_job_early_fail(job);
88691b37
PB
941 error_propagate(errp, local_err);
942 return NULL;
943 }
944 }
75859b94
JS
945
946 /* Single jobs are modeled as single-job transactions for sake of
947 * consolidating the job management logic */
948 if (!txn) {
949 txn = block_job_txn_new();
950 block_job_txn_add_job(txn, job);
951 block_job_txn_unref(txn);
952 } else {
953 block_job_txn_add_job(txn, job);
954 }
955
88691b37
PB
956 return job;
957}
958
88691b37
PB
959void block_job_early_fail(BlockJob *job)
960{
a50c2ab8 961 assert(job->job.status == JOB_STATUS_CREATED);
3925cd3b 962 block_job_decommission(job);
88691b37
PB
963}
964
965void block_job_completed(BlockJob *job, int ret)
966{
75859b94 967 assert(job && job->txn && !job->completed);
88691b37 968 assert(blk_bs(job->blk)->job == job);
88691b37
PB
969 job->completed = true;
970 job->ret = ret;
35d6b368
JS
971 block_job_update_rc(job);
972 trace_block_job_completed(job, ret, job->ret);
973 if (job->ret) {
88691b37
PB
974 block_job_completed_txn_abort(job);
975 } else {
976 block_job_completed_txn_success(job);
977 }
978}
979
980static bool block_job_should_pause(BlockJob *job)
981{
982 return job->pause_count > 0;
983}
984
fc24908e
PB
985/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
986 * Reentering the job coroutine with block_job_enter() before the timer has
987 * expired is allowed and cancels the timer.
988 *
989 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
990 * called explicitly. */
991static void block_job_do_yield(BlockJob *job, uint64_t ns)
356f59b8 992{
fc24908e
PB
993 block_job_lock();
994 if (ns != -1) {
995 timer_mod(&job->sleep_timer, ns);
996 }
356f59b8 997 job->busy = false;
fc24908e 998 block_job_unlock();
356f59b8
PB
999 qemu_coroutine_yield();
1000
1001 /* Set by block_job_enter before re-entering the coroutine. */
1002 assert(job->busy);
1003}
1004
88691b37
PB
1005void coroutine_fn block_job_pause_point(BlockJob *job)
1006{
1007 assert(job && block_job_started(job));
1008
1009 if (!block_job_should_pause(job)) {
1010 return;
1011 }
daa7f2f9 1012 if (job_is_cancelled(&job->job)) {
88691b37
PB
1013 return;
1014 }
1015
1016 if (job->driver->pause) {
1017 job->driver->pause(job);
1018 }
1019
daa7f2f9 1020 if (block_job_should_pause(job) && !job_is_cancelled(&job->job)) {
a50c2ab8
KW
1021 JobStatus status = job->job.status;
1022 job_state_transition(&job->job, status == JOB_STATUS_READY
1023 ? JOB_STATUS_STANDBY
1024 : JOB_STATUS_PAUSED);
88691b37 1025 job->paused = true;
fc24908e 1026 block_job_do_yield(job, -1);
88691b37 1027 job->paused = false;
a50c2ab8 1028 job_state_transition(&job->job, status);
88691b37
PB
1029 }
1030
1031 if (job->driver->resume) {
1032 job->driver->resume(job);
1033 }
1034}
1035
aa9ef2e6
JS
1036/*
1037 * Conditionally enter a block_job pending a call to fn() while
1038 * under the block_job_lock critical section.
1039 */
1040static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
88691b37 1041{
eb05e011
PB
1042 if (!block_job_started(job)) {
1043 return;
1044 }
1045 if (job->deferred_to_main_loop) {
1046 return;
1047 }
1048
fc24908e 1049 block_job_lock();
356f59b8 1050 if (job->busy) {
fc24908e 1051 block_job_unlock();
356f59b8 1052 return;
88691b37 1053 }
356f59b8 1054
aa9ef2e6
JS
1055 if (fn && !fn(job)) {
1056 block_job_unlock();
1057 return;
1058 }
1059
fc24908e
PB
1060 assert(!job->deferred_to_main_loop);
1061 timer_del(&job->sleep_timer);
356f59b8 1062 job->busy = true;
fc24908e 1063 block_job_unlock();
356f59b8 1064 aio_co_wake(job->co);
88691b37
PB
1065}
1066
aa9ef2e6
JS
1067void block_job_enter(BlockJob *job)
1068{
1069 block_job_enter_cond(job, NULL);
1070}
1071
5bf1d5a7 1072void block_job_sleep_ns(BlockJob *job, int64_t ns)
88691b37
PB
1073{
1074 assert(job->busy);
1075
1076 /* Check cancellation *before* setting busy = false, too! */
daa7f2f9 1077 if (job_is_cancelled(&job->job)) {
88691b37
PB
1078 return;
1079 }
1080
88691b37 1081 if (!block_job_should_pause(job)) {
fc24908e 1082 block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
88691b37 1083 }
88691b37
PB
1084
1085 block_job_pause_point(job);
1086}
1087
1088void block_job_yield(BlockJob *job)
1089{
1090 assert(job->busy);
1091
1092 /* Check cancellation *before* setting busy = false, too! */
daa7f2f9 1093 if (job_is_cancelled(&job->job)) {
88691b37
PB
1094 return;
1095 }
1096
88691b37 1097 if (!block_job_should_pause(job)) {
fc24908e 1098 block_job_do_yield(job, -1);
88691b37 1099 }
88691b37
PB
1100
1101 block_job_pause_point(job);
1102}
1103
2caf63a9
PB
1104void block_job_iostatus_reset(BlockJob *job)
1105{
4c241cf5
PB
1106 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1107 return;
1108 }
1109 assert(job->user_paused && job->pause_count > 0);
2caf63a9
PB
1110 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1111}
1112
bcada37b 1113void block_job_event_ready(BlockJob *job)
a66a2a36 1114{
a50c2ab8 1115 job_state_transition(&job->job, JOB_STATUS_READY);
ef6dbf1e
HR
1116 job->ready = true;
1117
559b935f
JS
1118 if (block_job_is_internal(job)) {
1119 return;
1120 }
1121
252291ea 1122 qapi_event_send_block_job_ready(job_type(&job->job),
33e9e9bd 1123 job->job.id,
518848a2
MA
1124 job->len,
1125 job->offset,
1126 job->speed, &error_abort);
a66a2a36
PB
1127}
1128
81e254dc 1129BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
32c81a4a
PB
1130 int is_read, int error)
1131{
1132 BlockErrorAction action;
1133
1134 switch (on_err) {
1135 case BLOCKDEV_ON_ERROR_ENOSPC:
8c398252 1136 case BLOCKDEV_ON_ERROR_AUTO:
a589569f
WX
1137 action = (error == ENOSPC) ?
1138 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
32c81a4a
PB
1139 break;
1140 case BLOCKDEV_ON_ERROR_STOP:
a589569f 1141 action = BLOCK_ERROR_ACTION_STOP;
32c81a4a
PB
1142 break;
1143 case BLOCKDEV_ON_ERROR_REPORT:
a589569f 1144 action = BLOCK_ERROR_ACTION_REPORT;
32c81a4a
PB
1145 break;
1146 case BLOCKDEV_ON_ERROR_IGNORE:
a589569f 1147 action = BLOCK_ERROR_ACTION_IGNORE;
32c81a4a
PB
1148 break;
1149 default:
1150 abort();
1151 }
559b935f 1152 if (!block_job_is_internal(job)) {
33e9e9bd 1153 qapi_event_send_block_job_error(job->job.id,
559b935f
JS
1154 is_read ? IO_OPERATION_TYPE_READ :
1155 IO_OPERATION_TYPE_WRITE,
1156 action, &error_abort);
1157 }
a589569f 1158 if (action == BLOCK_ERROR_ACTION_STOP) {
0ec4dfb8 1159 block_job_pause(job);
751ebd76 1160 /* make the pause user visible, which will be resumed from QMP. */
0ec4dfb8 1161 job->user_paused = true;
32c81a4a 1162 block_job_iostatus_set_err(job, error);
32c81a4a
PB
1163 }
1164 return action;
1165}
dec7d421
SH
1166
1167typedef struct {
1168 BlockJob *job;
dec7d421
SH
1169 AioContext *aio_context;
1170 BlockJobDeferToMainLoopFn *fn;
1171 void *opaque;
1172} BlockJobDeferToMainLoopData;
1173
1174static void block_job_defer_to_main_loop_bh(void *opaque)
1175{
1176 BlockJobDeferToMainLoopData *data = opaque;
1177 AioContext *aio_context;
1178
dec7d421
SH
1179 /* Prevent race with block_job_defer_to_main_loop() */
1180 aio_context_acquire(data->aio_context);
1181
1182 /* Fetch BDS AioContext again, in case it has changed */
b6d2e599 1183 aio_context = blk_get_aio_context(data->job->blk);
d79df2a2
PB
1184 if (aio_context != data->aio_context) {
1185 aio_context_acquire(aio_context);
1186 }
dec7d421
SH
1187
1188 data->fn(data->job, data->opaque);
1189
d79df2a2
PB
1190 if (aio_context != data->aio_context) {
1191 aio_context_release(aio_context);
1192 }
dec7d421
SH
1193
1194 aio_context_release(data->aio_context);
1195
1196 g_free(data);
1197}
1198
1199void block_job_defer_to_main_loop(BlockJob *job,
1200 BlockJobDeferToMainLoopFn *fn,
1201 void *opaque)
1202{
1203 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
1204 data->job = job;
b6d2e599 1205 data->aio_context = blk_get_aio_context(job->blk);
dec7d421
SH
1206 data->fn = fn;
1207 data->opaque = opaque;
794f0141 1208 job->deferred_to_main_loop = true;
dec7d421 1209
fffb6e12
PB
1210 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1211 block_job_defer_to_main_loop_bh, data);
dec7d421 1212}
This page took 0.453675 seconds and 4 git commands to generate.