]>
Commit | Line | Data |
---|---|---|
2f0c9fe6 PB |
1 | /* |
2 | * QEMU System Emulator block driver | |
3 | * | |
4 | * Copyright (c) 2011 IBM Corp. | |
5 | * Copyright (c) 2012 Red Hat, Inc. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
d38ea87a | 26 | #include "qemu/osdep.h" |
2f0c9fe6 | 27 | #include "qemu-common.h" |
737e150e | 28 | #include "block/block.h" |
c87621ea | 29 | #include "block/blockjob_int.h" |
737e150e | 30 | #include "block/block_int.h" |
373340b2 | 31 | #include "sysemu/block-backend.h" |
cc7a8ea7 | 32 | #include "qapi/qmp/qerror.h" |
7b1b5d19 | 33 | #include "qapi/qmp/qjson.h" |
10817bf0 | 34 | #include "qemu/coroutine.h" |
7f0317cf | 35 | #include "qemu/id.h" |
2f0c9fe6 | 36 | #include "qmp-commands.h" |
1de7afc9 | 37 | #include "qemu/timer.h" |
5a2d2cbd | 38 | #include "qapi-event.h" |
2f0c9fe6 | 39 | |
fc24908e PB |
40 | /* Right now, this mutex is only needed to synchronize accesses to job->busy |
41 | * and job->sleep_timer, such as concurrent calls to block_job_do_yield and | |
42 | * block_job_enter. */ | |
43 | static QemuMutex block_job_mutex; | |
44 | ||
45 | static void block_job_lock(void) | |
46 | { | |
47 | qemu_mutex_lock(&block_job_mutex); | |
48 | } | |
49 | ||
50 | static void block_job_unlock(void) | |
51 | { | |
52 | qemu_mutex_unlock(&block_job_mutex); | |
53 | } | |
54 | ||
55 | static void __attribute__((__constructor__)) block_job_init(void) | |
56 | { | |
57 | qemu_mutex_init(&block_job_mutex); | |
58 | } | |
59 | ||
8254b6d9 JS |
60 | static void block_job_event_cancelled(BlockJob *job); |
61 | static void block_job_event_completed(BlockJob *job, const char *msg); | |
aa9ef2e6 | 62 | static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)); |
8254b6d9 | 63 | |
c55a832f FZ |
64 | /* Transactional group of block jobs */ |
65 | struct BlockJobTxn { | |
66 | ||
67 | /* Is this txn being cancelled? */ | |
68 | bool aborting; | |
69 | ||
70 | /* List of jobs */ | |
71 | QLIST_HEAD(, BlockJob) jobs; | |
72 | ||
73 | /* Reference count */ | |
74 | int refcnt; | |
75 | }; | |
76 | ||
a7112795 AG |
77 | static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs); |
78 | ||
88691b37 PB |
79 | /* |
80 | * The block job API is composed of two categories of functions. | |
81 | * | |
82 | * The first includes functions used by the monitor. The monitor is | |
83 | * peculiar in that it accesses the block job list with block_job_get, and | |
84 | * therefore needs consistency across block_job_get and the actual operation | |
85 | * (e.g. block_job_set_speed). The consistency is achieved with | |
86 | * aio_context_acquire/release. These functions are declared in blockjob.h. | |
87 | * | |
88 | * The second includes functions used by the block job drivers and sometimes | |
89 | * by the core block layer. These do not care about locking, because the | |
90 | * whole coroutine runs under the AioContext lock, and are declared in | |
91 | * blockjob_int.h. | |
92 | */ | |
93 | ||
a7112795 AG |
94 | BlockJob *block_job_next(BlockJob *job) |
95 | { | |
96 | if (!job) { | |
97 | return QLIST_FIRST(&block_jobs); | |
98 | } | |
99 | return QLIST_NEXT(job, job_list); | |
100 | } | |
101 | ||
ffb1f10c AG |
102 | BlockJob *block_job_get(const char *id) |
103 | { | |
104 | BlockJob *job; | |
105 | ||
106 | QLIST_FOREACH(job, &block_jobs, job_list) { | |
559b935f | 107 | if (job->id && !strcmp(id, job->id)) { |
ffb1f10c AG |
108 | return job; |
109 | } | |
110 | } | |
111 | ||
112 | return NULL; | |
113 | } | |
114 | ||
c8ab5c2d PB |
115 | BlockJobTxn *block_job_txn_new(void) |
116 | { | |
117 | BlockJobTxn *txn = g_new0(BlockJobTxn, 1); | |
118 | QLIST_INIT(&txn->jobs); | |
119 | txn->refcnt = 1; | |
120 | return txn; | |
121 | } | |
122 | ||
123 | static void block_job_txn_ref(BlockJobTxn *txn) | |
124 | { | |
125 | txn->refcnt++; | |
126 | } | |
127 | ||
128 | void block_job_txn_unref(BlockJobTxn *txn) | |
129 | { | |
130 | if (txn && --txn->refcnt == 0) { | |
131 | g_free(txn); | |
132 | } | |
133 | } | |
134 | ||
135 | void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job) | |
136 | { | |
137 | if (!txn) { | |
138 | return; | |
139 | } | |
140 | ||
141 | assert(!job->txn); | |
142 | job->txn = txn; | |
143 | ||
144 | QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); | |
145 | block_job_txn_ref(txn); | |
146 | } | |
147 | ||
f321dcb5 PB |
148 | static void block_job_pause(BlockJob *job) |
149 | { | |
150 | job->pause_count++; | |
151 | } | |
152 | ||
153 | static void block_job_resume(BlockJob *job) | |
154 | { | |
155 | assert(job->pause_count > 0); | |
156 | job->pause_count--; | |
157 | if (job->pause_count) { | |
158 | return; | |
159 | } | |
160 | block_job_enter(job); | |
161 | } | |
162 | ||
4172a003 | 163 | void block_job_ref(BlockJob *job) |
05b0d8e3 PB |
164 | { |
165 | ++job->refcnt; | |
166 | } | |
167 | ||
168 | static void block_job_attached_aio_context(AioContext *new_context, | |
169 | void *opaque); | |
170 | static void block_job_detach_aio_context(void *opaque); | |
171 | ||
4172a003 | 172 | void block_job_unref(BlockJob *job) |
05b0d8e3 PB |
173 | { |
174 | if (--job->refcnt == 0) { | |
175 | BlockDriverState *bs = blk_bs(job->blk); | |
0a3e155f | 176 | QLIST_REMOVE(job, job_list); |
05b0d8e3 PB |
177 | bs->job = NULL; |
178 | block_job_remove_all_bdrv(job); | |
179 | blk_remove_aio_context_notifier(job->blk, | |
180 | block_job_attached_aio_context, | |
181 | block_job_detach_aio_context, job); | |
182 | blk_unref(job->blk); | |
183 | error_free(job->blocker); | |
184 | g_free(job->id); | |
fc24908e | 185 | assert(!timer_pending(&job->sleep_timer)); |
05b0d8e3 PB |
186 | g_free(job); |
187 | } | |
188 | } | |
189 | ||
463e0be1 SH |
190 | static void block_job_attached_aio_context(AioContext *new_context, |
191 | void *opaque) | |
192 | { | |
193 | BlockJob *job = opaque; | |
194 | ||
195 | if (job->driver->attached_aio_context) { | |
196 | job->driver->attached_aio_context(job, new_context); | |
197 | } | |
198 | ||
199 | block_job_resume(job); | |
200 | } | |
201 | ||
bae8196d PB |
202 | static void block_job_drain(BlockJob *job) |
203 | { | |
204 | /* If job is !job->busy this kicks it into the next pause point. */ | |
205 | block_job_enter(job); | |
206 | ||
207 | blk_drain(job->blk); | |
208 | if (job->driver->drain) { | |
209 | job->driver->drain(job); | |
210 | } | |
211 | } | |
212 | ||
463e0be1 SH |
213 | static void block_job_detach_aio_context(void *opaque) |
214 | { | |
215 | BlockJob *job = opaque; | |
216 | ||
217 | /* In case the job terminates during aio_poll()... */ | |
218 | block_job_ref(job); | |
219 | ||
220 | block_job_pause(job); | |
221 | ||
463e0be1 | 222 | while (!job->paused && !job->completed) { |
bae8196d | 223 | block_job_drain(job); |
463e0be1 SH |
224 | } |
225 | ||
226 | block_job_unref(job); | |
227 | } | |
228 | ||
f321dcb5 PB |
229 | static char *child_job_get_parent_desc(BdrvChild *c) |
230 | { | |
231 | BlockJob *job = c->opaque; | |
232 | return g_strdup_printf("%s job '%s'", | |
977c736f | 233 | BlockJobType_str(job->driver->job_type), |
f321dcb5 PB |
234 | job->id); |
235 | } | |
236 | ||
237 | static const BdrvChildRole child_job = { | |
238 | .get_parent_desc = child_job_get_parent_desc, | |
239 | .stay_at_node = true, | |
240 | }; | |
241 | ||
242 | static void block_job_drained_begin(void *opaque) | |
243 | { | |
244 | BlockJob *job = opaque; | |
245 | block_job_pause(job); | |
246 | } | |
247 | ||
248 | static void block_job_drained_end(void *opaque) | |
249 | { | |
250 | BlockJob *job = opaque; | |
251 | block_job_resume(job); | |
252 | } | |
253 | ||
254 | static const BlockDevOps block_job_dev_ops = { | |
255 | .drained_begin = block_job_drained_begin, | |
256 | .drained_end = block_job_drained_end, | |
257 | }; | |
258 | ||
bbc02b90 KW |
259 | void block_job_remove_all_bdrv(BlockJob *job) |
260 | { | |
261 | GSList *l; | |
262 | for (l = job->nodes; l; l = l->next) { | |
263 | BdrvChild *c = l->data; | |
264 | bdrv_op_unblock_all(c->bs, job->blocker); | |
265 | bdrv_root_unref_child(c); | |
266 | } | |
267 | g_slist_free(job->nodes); | |
268 | job->nodes = NULL; | |
269 | } | |
270 | ||
76d554e2 KW |
271 | int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, |
272 | uint64_t perm, uint64_t shared_perm, Error **errp) | |
23d402d4 | 273 | { |
76d554e2 KW |
274 | BdrvChild *c; |
275 | ||
276 | c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm, | |
277 | job, errp); | |
278 | if (c == NULL) { | |
279 | return -EPERM; | |
280 | } | |
281 | ||
282 | job->nodes = g_slist_prepend(job->nodes, c); | |
23d402d4 AG |
283 | bdrv_ref(bs); |
284 | bdrv_op_block_all(bs, job->blocker); | |
76d554e2 KW |
285 | |
286 | return 0; | |
23d402d4 AG |
287 | } |
288 | ||
559b935f JS |
289 | bool block_job_is_internal(BlockJob *job) |
290 | { | |
291 | return (job->id == NULL); | |
292 | } | |
293 | ||
5ccac6f1 JS |
294 | static bool block_job_started(BlockJob *job) |
295 | { | |
296 | return job->co; | |
297 | } | |
298 | ||
e3796a24 JS |
299 | /** |
300 | * All jobs must allow a pause point before entering their job proper. This | |
301 | * ensures that jobs can be paused prior to being started, then resumed later. | |
302 | */ | |
303 | static void coroutine_fn block_job_co_entry(void *opaque) | |
304 | { | |
305 | BlockJob *job = opaque; | |
306 | ||
307 | assert(job && job->driver && job->driver->start); | |
308 | block_job_pause_point(job); | |
309 | job->driver->start(job); | |
310 | } | |
311 | ||
fc24908e PB |
312 | static void block_job_sleep_timer_cb(void *opaque) |
313 | { | |
314 | BlockJob *job = opaque; | |
315 | ||
316 | block_job_enter(job); | |
317 | } | |
318 | ||
5ccac6f1 JS |
319 | void block_job_start(BlockJob *job) |
320 | { | |
321 | assert(job && !block_job_started(job) && job->paused && | |
e3796a24 JS |
322 | job->driver && job->driver->start); |
323 | job->co = qemu_coroutine_create(block_job_co_entry, job); | |
324 | job->pause_count--; | |
325 | job->busy = true; | |
326 | job->paused = false; | |
aef4278c | 327 | bdrv_coroutine_enter(blk_bs(job->blk), job->co); |
5ccac6f1 JS |
328 | } |
329 | ||
c55a832f FZ |
330 | static void block_job_completed_single(BlockJob *job) |
331 | { | |
4fb588e9 PB |
332 | assert(job->completed); |
333 | ||
c55a832f FZ |
334 | if (!job->ret) { |
335 | if (job->driver->commit) { | |
336 | job->driver->commit(job); | |
337 | } | |
338 | } else { | |
339 | if (job->driver->abort) { | |
340 | job->driver->abort(job); | |
341 | } | |
342 | } | |
e8a40bf7 JS |
343 | if (job->driver->clean) { |
344 | job->driver->clean(job); | |
345 | } | |
8254b6d9 JS |
346 | |
347 | if (job->cb) { | |
348 | job->cb(job->opaque, job->ret); | |
349 | } | |
5ccac6f1 JS |
350 | |
351 | /* Emit events only if we actually started */ | |
352 | if (block_job_started(job)) { | |
353 | if (block_job_is_cancelled(job)) { | |
354 | block_job_event_cancelled(job); | |
355 | } else { | |
356 | const char *msg = NULL; | |
357 | if (job->ret < 0) { | |
358 | msg = strerror(-job->ret); | |
359 | } | |
360 | block_job_event_completed(job, msg); | |
8254b6d9 | 361 | } |
8254b6d9 JS |
362 | } |
363 | ||
c55a832f | 364 | if (job->txn) { |
1e93b9fb | 365 | QLIST_REMOVE(job, txn_list); |
c55a832f FZ |
366 | block_job_txn_unref(job->txn); |
367 | } | |
368 | block_job_unref(job); | |
369 | } | |
370 | ||
4c241cf5 PB |
371 | static void block_job_cancel_async(BlockJob *job) |
372 | { | |
373 | if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) { | |
374 | block_job_iostatus_reset(job); | |
375 | } | |
376 | if (job->user_paused) { | |
377 | /* Do not call block_job_enter here, the caller will handle it. */ | |
378 | job->user_paused = false; | |
379 | job->pause_count--; | |
380 | } | |
381 | job->cancelled = true; | |
382 | } | |
383 | ||
c8ab5c2d PB |
384 | static int block_job_finish_sync(BlockJob *job, |
385 | void (*finish)(BlockJob *, Error **errp), | |
386 | Error **errp) | |
387 | { | |
388 | Error *local_err = NULL; | |
389 | int ret; | |
390 | ||
391 | assert(blk_bs(job->blk)->job == job); | |
392 | ||
393 | block_job_ref(job); | |
394 | ||
4fb588e9 PB |
395 | if (finish) { |
396 | finish(job, &local_err); | |
397 | } | |
c8ab5c2d PB |
398 | if (local_err) { |
399 | error_propagate(errp, local_err); | |
400 | block_job_unref(job); | |
401 | return -EBUSY; | |
402 | } | |
403 | /* block_job_drain calls block_job_enter, and it should be enough to | |
404 | * induce progress until the job completes or moves to the main thread. | |
405 | */ | |
406 | while (!job->deferred_to_main_loop && !job->completed) { | |
407 | block_job_drain(job); | |
408 | } | |
409 | while (!job->completed) { | |
410 | aio_poll(qemu_get_aio_context(), true); | |
411 | } | |
412 | ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret; | |
413 | block_job_unref(job); | |
414 | return ret; | |
415 | } | |
416 | ||
c55a832f FZ |
417 | static void block_job_completed_txn_abort(BlockJob *job) |
418 | { | |
419 | AioContext *ctx; | |
420 | BlockJobTxn *txn = job->txn; | |
4fb588e9 | 421 | BlockJob *other_job; |
c55a832f FZ |
422 | |
423 | if (txn->aborting) { | |
424 | /* | |
425 | * We are cancelled by another job, which will handle everything. | |
426 | */ | |
427 | return; | |
428 | } | |
429 | txn->aborting = true; | |
4fb588e9 PB |
430 | block_job_txn_ref(txn); |
431 | ||
c55a832f FZ |
432 | /* We are the first failed job. Cancel other jobs. */ |
433 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
b6d2e599 | 434 | ctx = blk_get_aio_context(other_job->blk); |
c55a832f FZ |
435 | aio_context_acquire(ctx); |
436 | } | |
4fb588e9 PB |
437 | |
438 | /* Other jobs are effectively cancelled by us, set the status for | |
439 | * them; this job, however, may or may not be cancelled, depending | |
440 | * on the caller, so leave it. */ | |
c55a832f | 441 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { |
4fb588e9 PB |
442 | if (other_job != job) { |
443 | block_job_cancel_async(other_job); | |
c55a832f | 444 | } |
c55a832f | 445 | } |
4fb588e9 PB |
446 | while (!QLIST_EMPTY(&txn->jobs)) { |
447 | other_job = QLIST_FIRST(&txn->jobs); | |
b6d2e599 | 448 | ctx = blk_get_aio_context(other_job->blk); |
4fb588e9 PB |
449 | if (!other_job->completed) { |
450 | assert(other_job->cancelled); | |
451 | block_job_finish_sync(other_job, NULL, NULL); | |
452 | } | |
c55a832f FZ |
453 | block_job_completed_single(other_job); |
454 | aio_context_release(ctx); | |
455 | } | |
4fb588e9 PB |
456 | |
457 | block_job_txn_unref(txn); | |
c55a832f FZ |
458 | } |
459 | ||
460 | static void block_job_completed_txn_success(BlockJob *job) | |
461 | { | |
462 | AioContext *ctx; | |
463 | BlockJobTxn *txn = job->txn; | |
464 | BlockJob *other_job, *next; | |
465 | /* | |
466 | * Successful completion, see if there are other running jobs in this | |
467 | * txn. | |
468 | */ | |
469 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
470 | if (!other_job->completed) { | |
471 | return; | |
472 | } | |
473 | } | |
474 | /* We are the last completed job, commit the transaction. */ | |
475 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | |
b6d2e599 | 476 | ctx = blk_get_aio_context(other_job->blk); |
c55a832f FZ |
477 | aio_context_acquire(ctx); |
478 | assert(other_job->ret == 0); | |
479 | block_job_completed_single(other_job); | |
480 | aio_context_release(ctx); | |
481 | } | |
482 | } | |
483 | ||
aa9ef2e6 JS |
484 | /* Assumes the block_job_mutex is held */ |
485 | static bool block_job_timer_pending(BlockJob *job) | |
486 | { | |
487 | return timer_pending(&job->sleep_timer); | |
488 | } | |
489 | ||
2f0c9fe6 PB |
490 | void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) |
491 | { | |
492 | Error *local_err = NULL; | |
aa9ef2e6 | 493 | int64_t old_speed = job->speed; |
2f0c9fe6 | 494 | |
3fc4b10a | 495 | if (!job->driver->set_speed) { |
c6bd8c70 | 496 | error_setg(errp, QERR_UNSUPPORTED); |
2f0c9fe6 PB |
497 | return; |
498 | } | |
3fc4b10a | 499 | job->driver->set_speed(job, speed, &local_err); |
84d18f06 | 500 | if (local_err) { |
2f0c9fe6 PB |
501 | error_propagate(errp, local_err); |
502 | return; | |
503 | } | |
504 | ||
505 | job->speed = speed; | |
aa9ef2e6 JS |
506 | if (speed <= old_speed) { |
507 | return; | |
508 | } | |
509 | ||
510 | /* kick only if a timer is pending */ | |
511 | block_job_enter_cond(job, block_job_timer_pending); | |
2f0c9fe6 PB |
512 | } |
513 | ||
aeae883b PB |
514 | void block_job_complete(BlockJob *job, Error **errp) |
515 | { | |
559b935f JS |
516 | /* Should not be reachable via external interface for internal jobs */ |
517 | assert(job->id); | |
5ccac6f1 JS |
518 | if (job->pause_count || job->cancelled || |
519 | !block_job_started(job) || !job->driver->complete) { | |
9df229c3 AG |
520 | error_setg(errp, "The active block job '%s' cannot be completed", |
521 | job->id); | |
aeae883b PB |
522 | return; |
523 | } | |
524 | ||
3fc4b10a | 525 | job->driver->complete(job, errp); |
aeae883b PB |
526 | } |
527 | ||
0df4ba58 JS |
528 | void block_job_user_pause(BlockJob *job) |
529 | { | |
530 | job->user_paused = true; | |
531 | block_job_pause(job); | |
532 | } | |
533 | ||
0df4ba58 JS |
534 | bool block_job_user_paused(BlockJob *job) |
535 | { | |
6573d9c6 | 536 | return job->user_paused; |
0df4ba58 JS |
537 | } |
538 | ||
0df4ba58 JS |
539 | void block_job_user_resume(BlockJob *job) |
540 | { | |
541 | if (job && job->user_paused && job->pause_count > 0) { | |
2caf63a9 | 542 | block_job_iostatus_reset(job); |
4c241cf5 | 543 | job->user_paused = false; |
0df4ba58 JS |
544 | block_job_resume(job); |
545 | } | |
546 | } | |
547 | ||
8acc72a4 PB |
548 | void block_job_cancel(BlockJob *job) |
549 | { | |
5ccac6f1 | 550 | if (block_job_started(job)) { |
4c241cf5 | 551 | block_job_cancel_async(job); |
5ccac6f1 JS |
552 | block_job_enter(job); |
553 | } else { | |
554 | block_job_completed(job, -ECANCELED); | |
555 | } | |
8acc72a4 PB |
556 | } |
557 | ||
345f9e1b HR |
558 | /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be |
559 | * used with block_job_finish_sync() without the need for (rather nasty) | |
560 | * function pointer casts there. */ | |
561 | static void block_job_cancel_err(BlockJob *job, Error **errp) | |
562 | { | |
563 | block_job_cancel(job); | |
564 | } | |
565 | ||
566 | int block_job_cancel_sync(BlockJob *job) | |
567 | { | |
568 | return block_job_finish_sync(job, &block_job_cancel_err, NULL); | |
569 | } | |
570 | ||
a1a2af07 KW |
571 | void block_job_cancel_sync_all(void) |
572 | { | |
573 | BlockJob *job; | |
574 | AioContext *aio_context; | |
575 | ||
576 | while ((job = QLIST_FIRST(&block_jobs))) { | |
b6d2e599 | 577 | aio_context = blk_get_aio_context(job->blk); |
a1a2af07 KW |
578 | aio_context_acquire(aio_context); |
579 | block_job_cancel_sync(job); | |
580 | aio_context_release(aio_context); | |
581 | } | |
582 | } | |
583 | ||
345f9e1b HR |
584 | int block_job_complete_sync(BlockJob *job, Error **errp) |
585 | { | |
586 | return block_job_finish_sync(job, &block_job_complete, errp); | |
587 | } | |
588 | ||
559b935f | 589 | BlockJobInfo *block_job_query(BlockJob *job, Error **errp) |
30e628b7 | 590 | { |
559b935f JS |
591 | BlockJobInfo *info; |
592 | ||
593 | if (block_job_is_internal(job)) { | |
594 | error_setg(errp, "Cannot query QEMU internal jobs"); | |
595 | return NULL; | |
596 | } | |
597 | info = g_new0(BlockJobInfo, 1); | |
977c736f | 598 | info->type = g_strdup(BlockJobType_str(job->driver->job_type)); |
8ccb9569 | 599 | info->device = g_strdup(job->id); |
32c81a4a | 600 | info->len = job->len; |
fc24908e | 601 | info->busy = atomic_read(&job->busy); |
751ebd76 | 602 | info->paused = job->pause_count > 0; |
32c81a4a PB |
603 | info->offset = job->offset; |
604 | info->speed = job->speed; | |
605 | info->io_status = job->iostatus; | |
ef6dbf1e | 606 | info->ready = job->ready; |
30e628b7 PB |
607 | return info; |
608 | } | |
32c81a4a PB |
609 | |
610 | static void block_job_iostatus_set_err(BlockJob *job, int error) | |
611 | { | |
612 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | |
613 | job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : | |
614 | BLOCK_DEVICE_IO_STATUS_FAILED; | |
615 | } | |
616 | } | |
617 | ||
8254b6d9 | 618 | static void block_job_event_cancelled(BlockJob *job) |
bcada37b | 619 | { |
559b935f JS |
620 | if (block_job_is_internal(job)) { |
621 | return; | |
622 | } | |
623 | ||
bcada37b | 624 | qapi_event_send_block_job_cancelled(job->driver->job_type, |
8ccb9569 | 625 | job->id, |
bcada37b WX |
626 | job->len, |
627 | job->offset, | |
628 | job->speed, | |
629 | &error_abort); | |
630 | } | |
32c81a4a | 631 | |
8254b6d9 | 632 | static void block_job_event_completed(BlockJob *job, const char *msg) |
a66a2a36 | 633 | { |
559b935f JS |
634 | if (block_job_is_internal(job)) { |
635 | return; | |
636 | } | |
637 | ||
bcada37b | 638 | qapi_event_send_block_job_completed(job->driver->job_type, |
8ccb9569 | 639 | job->id, |
bcada37b WX |
640 | job->len, |
641 | job->offset, | |
642 | job->speed, | |
643 | !!msg, | |
644 | msg, | |
645 | &error_abort); | |
a66a2a36 PB |
646 | } |
647 | ||
88691b37 PB |
648 | /* |
649 | * API for block job drivers and the block layer. These functions are | |
650 | * declared in blockjob_int.h. | |
651 | */ | |
652 | ||
653 | void *block_job_create(const char *job_id, const BlockJobDriver *driver, | |
654 | BlockDriverState *bs, uint64_t perm, | |
655 | uint64_t shared_perm, int64_t speed, int flags, | |
656 | BlockCompletionFunc *cb, void *opaque, Error **errp) | |
657 | { | |
658 | BlockBackend *blk; | |
659 | BlockJob *job; | |
660 | int ret; | |
661 | ||
662 | if (bs->job) { | |
663 | error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); | |
664 | return NULL; | |
665 | } | |
666 | ||
667 | if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) { | |
668 | job_id = bdrv_get_device_name(bs); | |
669 | if (!*job_id) { | |
670 | error_setg(errp, "An explicit job ID is required for this node"); | |
671 | return NULL; | |
672 | } | |
673 | } | |
674 | ||
675 | if (job_id) { | |
676 | if (flags & BLOCK_JOB_INTERNAL) { | |
677 | error_setg(errp, "Cannot specify job ID for internal block job"); | |
678 | return NULL; | |
679 | } | |
680 | ||
681 | if (!id_wellformed(job_id)) { | |
682 | error_setg(errp, "Invalid job ID '%s'", job_id); | |
683 | return NULL; | |
684 | } | |
685 | ||
686 | if (block_job_get(job_id)) { | |
687 | error_setg(errp, "Job ID '%s' already in use", job_id); | |
688 | return NULL; | |
689 | } | |
690 | } | |
691 | ||
692 | blk = blk_new(perm, shared_perm); | |
693 | ret = blk_insert_bs(blk, bs, errp); | |
694 | if (ret < 0) { | |
695 | blk_unref(blk); | |
696 | return NULL; | |
697 | } | |
698 | ||
699 | job = g_malloc0(driver->instance_size); | |
700 | job->driver = driver; | |
701 | job->id = g_strdup(job_id); | |
702 | job->blk = blk; | |
703 | job->cb = cb; | |
704 | job->opaque = opaque; | |
705 | job->busy = false; | |
706 | job->paused = true; | |
707 | job->pause_count = 1; | |
708 | job->refcnt = 1; | |
fc24908e PB |
709 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, |
710 | QEMU_CLOCK_REALTIME, SCALE_NS, | |
711 | block_job_sleep_timer_cb, job); | |
88691b37 PB |
712 | |
713 | error_setg(&job->blocker, "block device is in use by block job: %s", | |
977c736f | 714 | BlockJobType_str(driver->job_type)); |
88691b37 PB |
715 | block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort); |
716 | bs->job = job; | |
717 | ||
718 | blk_set_dev_ops(blk, &block_job_dev_ops, job); | |
719 | bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); | |
720 | ||
721 | QLIST_INSERT_HEAD(&block_jobs, job, job_list); | |
722 | ||
723 | blk_add_aio_context_notifier(blk, block_job_attached_aio_context, | |
724 | block_job_detach_aio_context, job); | |
725 | ||
726 | /* Only set speed when necessary to avoid NotSupported error */ | |
727 | if (speed != 0) { | |
728 | Error *local_err = NULL; | |
729 | ||
730 | block_job_set_speed(job, speed, &local_err); | |
731 | if (local_err) { | |
732 | block_job_unref(job); | |
733 | error_propagate(errp, local_err); | |
734 | return NULL; | |
735 | } | |
736 | } | |
737 | return job; | |
738 | } | |
739 | ||
f321dcb5 PB |
740 | void block_job_pause_all(void) |
741 | { | |
742 | BlockJob *job = NULL; | |
743 | while ((job = block_job_next(job))) { | |
744 | AioContext *aio_context = blk_get_aio_context(job->blk); | |
745 | ||
746 | aio_context_acquire(aio_context); | |
3d5d319e | 747 | block_job_ref(job); |
f321dcb5 PB |
748 | block_job_pause(job); |
749 | aio_context_release(aio_context); | |
750 | } | |
751 | } | |
752 | ||
88691b37 PB |
753 | void block_job_early_fail(BlockJob *job) |
754 | { | |
755 | block_job_unref(job); | |
756 | } | |
757 | ||
758 | void block_job_completed(BlockJob *job, int ret) | |
759 | { | |
760 | assert(blk_bs(job->blk)->job == job); | |
761 | assert(!job->completed); | |
762 | job->completed = true; | |
763 | job->ret = ret; | |
764 | if (!job->txn) { | |
765 | block_job_completed_single(job); | |
766 | } else if (ret < 0 || block_job_is_cancelled(job)) { | |
767 | block_job_completed_txn_abort(job); | |
768 | } else { | |
769 | block_job_completed_txn_success(job); | |
770 | } | |
771 | } | |
772 | ||
773 | static bool block_job_should_pause(BlockJob *job) | |
774 | { | |
775 | return job->pause_count > 0; | |
776 | } | |
777 | ||
fc24908e PB |
778 | /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. |
779 | * Reentering the job coroutine with block_job_enter() before the timer has | |
780 | * expired is allowed and cancels the timer. | |
781 | * | |
782 | * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be | |
783 | * called explicitly. */ | |
784 | static void block_job_do_yield(BlockJob *job, uint64_t ns) | |
356f59b8 | 785 | { |
fc24908e PB |
786 | block_job_lock(); |
787 | if (ns != -1) { | |
788 | timer_mod(&job->sleep_timer, ns); | |
789 | } | |
356f59b8 | 790 | job->busy = false; |
fc24908e | 791 | block_job_unlock(); |
356f59b8 PB |
792 | qemu_coroutine_yield(); |
793 | ||
794 | /* Set by block_job_enter before re-entering the coroutine. */ | |
795 | assert(job->busy); | |
796 | } | |
797 | ||
88691b37 PB |
798 | void coroutine_fn block_job_pause_point(BlockJob *job) |
799 | { | |
800 | assert(job && block_job_started(job)); | |
801 | ||
802 | if (!block_job_should_pause(job)) { | |
803 | return; | |
804 | } | |
805 | if (block_job_is_cancelled(job)) { | |
806 | return; | |
807 | } | |
808 | ||
809 | if (job->driver->pause) { | |
810 | job->driver->pause(job); | |
811 | } | |
812 | ||
813 | if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { | |
814 | job->paused = true; | |
fc24908e | 815 | block_job_do_yield(job, -1); |
88691b37 PB |
816 | job->paused = false; |
817 | } | |
818 | ||
819 | if (job->driver->resume) { | |
820 | job->driver->resume(job); | |
821 | } | |
822 | } | |
823 | ||
f321dcb5 PB |
824 | void block_job_resume_all(void) |
825 | { | |
3d5d319e AG |
826 | BlockJob *job, *next; |
827 | ||
828 | QLIST_FOREACH_SAFE(job, &block_jobs, job_list, next) { | |
f321dcb5 PB |
829 | AioContext *aio_context = blk_get_aio_context(job->blk); |
830 | ||
831 | aio_context_acquire(aio_context); | |
832 | block_job_resume(job); | |
3d5d319e | 833 | block_job_unref(job); |
f321dcb5 PB |
834 | aio_context_release(aio_context); |
835 | } | |
836 | } | |
837 | ||
aa9ef2e6 JS |
838 | /* |
839 | * Conditionally enter a block_job pending a call to fn() while | |
840 | * under the block_job_lock critical section. | |
841 | */ | |
842 | static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)) | |
88691b37 | 843 | { |
eb05e011 PB |
844 | if (!block_job_started(job)) { |
845 | return; | |
846 | } | |
847 | if (job->deferred_to_main_loop) { | |
848 | return; | |
849 | } | |
850 | ||
fc24908e | 851 | block_job_lock(); |
356f59b8 | 852 | if (job->busy) { |
fc24908e | 853 | block_job_unlock(); |
356f59b8 | 854 | return; |
88691b37 | 855 | } |
356f59b8 | 856 | |
aa9ef2e6 JS |
857 | if (fn && !fn(job)) { |
858 | block_job_unlock(); | |
859 | return; | |
860 | } | |
861 | ||
fc24908e PB |
862 | assert(!job->deferred_to_main_loop); |
863 | timer_del(&job->sleep_timer); | |
356f59b8 | 864 | job->busy = true; |
fc24908e | 865 | block_job_unlock(); |
356f59b8 | 866 | aio_co_wake(job->co); |
88691b37 PB |
867 | } |
868 | ||
aa9ef2e6 JS |
869 | void block_job_enter(BlockJob *job) |
870 | { | |
871 | block_job_enter_cond(job, NULL); | |
872 | } | |
873 | ||
88691b37 PB |
874 | bool block_job_is_cancelled(BlockJob *job) |
875 | { | |
876 | return job->cancelled; | |
877 | } | |
878 | ||
5bf1d5a7 | 879 | void block_job_sleep_ns(BlockJob *job, int64_t ns) |
88691b37 PB |
880 | { |
881 | assert(job->busy); | |
882 | ||
883 | /* Check cancellation *before* setting busy = false, too! */ | |
884 | if (block_job_is_cancelled(job)) { | |
885 | return; | |
886 | } | |
887 | ||
88691b37 | 888 | if (!block_job_should_pause(job)) { |
fc24908e | 889 | block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); |
88691b37 | 890 | } |
88691b37 PB |
891 | |
892 | block_job_pause_point(job); | |
893 | } | |
894 | ||
895 | void block_job_yield(BlockJob *job) | |
896 | { | |
897 | assert(job->busy); | |
898 | ||
899 | /* Check cancellation *before* setting busy = false, too! */ | |
900 | if (block_job_is_cancelled(job)) { | |
901 | return; | |
902 | } | |
903 | ||
88691b37 | 904 | if (!block_job_should_pause(job)) { |
fc24908e | 905 | block_job_do_yield(job, -1); |
88691b37 | 906 | } |
88691b37 PB |
907 | |
908 | block_job_pause_point(job); | |
909 | } | |
910 | ||
2caf63a9 PB |
911 | void block_job_iostatus_reset(BlockJob *job) |
912 | { | |
4c241cf5 PB |
913 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
914 | return; | |
915 | } | |
916 | assert(job->user_paused && job->pause_count > 0); | |
2caf63a9 PB |
917 | job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; |
918 | } | |
919 | ||
bcada37b | 920 | void block_job_event_ready(BlockJob *job) |
a66a2a36 | 921 | { |
ef6dbf1e HR |
922 | job->ready = true; |
923 | ||
559b935f JS |
924 | if (block_job_is_internal(job)) { |
925 | return; | |
926 | } | |
927 | ||
518848a2 | 928 | qapi_event_send_block_job_ready(job->driver->job_type, |
8ccb9569 | 929 | job->id, |
518848a2 MA |
930 | job->len, |
931 | job->offset, | |
932 | job->speed, &error_abort); | |
a66a2a36 PB |
933 | } |
934 | ||
81e254dc | 935 | BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, |
32c81a4a PB |
936 | int is_read, int error) |
937 | { | |
938 | BlockErrorAction action; | |
939 | ||
940 | switch (on_err) { | |
941 | case BLOCKDEV_ON_ERROR_ENOSPC: | |
8c398252 | 942 | case BLOCKDEV_ON_ERROR_AUTO: |
a589569f WX |
943 | action = (error == ENOSPC) ? |
944 | BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; | |
32c81a4a PB |
945 | break; |
946 | case BLOCKDEV_ON_ERROR_STOP: | |
a589569f | 947 | action = BLOCK_ERROR_ACTION_STOP; |
32c81a4a PB |
948 | break; |
949 | case BLOCKDEV_ON_ERROR_REPORT: | |
a589569f | 950 | action = BLOCK_ERROR_ACTION_REPORT; |
32c81a4a PB |
951 | break; |
952 | case BLOCKDEV_ON_ERROR_IGNORE: | |
a589569f | 953 | action = BLOCK_ERROR_ACTION_IGNORE; |
32c81a4a PB |
954 | break; |
955 | default: | |
956 | abort(); | |
957 | } | |
559b935f JS |
958 | if (!block_job_is_internal(job)) { |
959 | qapi_event_send_block_job_error(job->id, | |
960 | is_read ? IO_OPERATION_TYPE_READ : | |
961 | IO_OPERATION_TYPE_WRITE, | |
962 | action, &error_abort); | |
963 | } | |
a589569f | 964 | if (action == BLOCK_ERROR_ACTION_STOP) { |
751ebd76 | 965 | /* make the pause user visible, which will be resumed from QMP. */ |
0df4ba58 | 966 | block_job_user_pause(job); |
32c81a4a | 967 | block_job_iostatus_set_err(job, error); |
32c81a4a PB |
968 | } |
969 | return action; | |
970 | } | |
dec7d421 SH |
971 | |
972 | typedef struct { | |
973 | BlockJob *job; | |
dec7d421 SH |
974 | AioContext *aio_context; |
975 | BlockJobDeferToMainLoopFn *fn; | |
976 | void *opaque; | |
977 | } BlockJobDeferToMainLoopData; | |
978 | ||
979 | static void block_job_defer_to_main_loop_bh(void *opaque) | |
980 | { | |
981 | BlockJobDeferToMainLoopData *data = opaque; | |
982 | AioContext *aio_context; | |
983 | ||
dec7d421 SH |
984 | /* Prevent race with block_job_defer_to_main_loop() */ |
985 | aio_context_acquire(data->aio_context); | |
986 | ||
987 | /* Fetch BDS AioContext again, in case it has changed */ | |
b6d2e599 | 988 | aio_context = blk_get_aio_context(data->job->blk); |
d79df2a2 PB |
989 | if (aio_context != data->aio_context) { |
990 | aio_context_acquire(aio_context); | |
991 | } | |
dec7d421 SH |
992 | |
993 | data->fn(data->job, data->opaque); | |
994 | ||
d79df2a2 PB |
995 | if (aio_context != data->aio_context) { |
996 | aio_context_release(aio_context); | |
997 | } | |
dec7d421 SH |
998 | |
999 | aio_context_release(data->aio_context); | |
1000 | ||
1001 | g_free(data); | |
1002 | } | |
1003 | ||
1004 | void block_job_defer_to_main_loop(BlockJob *job, | |
1005 | BlockJobDeferToMainLoopFn *fn, | |
1006 | void *opaque) | |
1007 | { | |
1008 | BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data)); | |
1009 | data->job = job; | |
b6d2e599 | 1010 | data->aio_context = blk_get_aio_context(job->blk); |
dec7d421 SH |
1011 | data->fn = fn; |
1012 | data->opaque = opaque; | |
794f0141 | 1013 | job->deferred_to_main_loop = true; |
dec7d421 | 1014 | |
fffb6e12 PB |
1015 | aio_bh_schedule_oneshot(qemu_get_aio_context(), |
1016 | block_job_defer_to_main_loop_bh, data); | |
dec7d421 | 1017 | } |