]>
Commit | Line | Data |
---|---|---|
86db1e29 | 1 | /* |
4fed947c | 2 | * Functions to sequence FLUSH and FUA writes. |
ae1b1539 TH |
3 | * |
4 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics | |
5 | * Copyright (C) 2011 Tejun Heo <[email protected]> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | * | |
9 | * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three | |
10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request | |
11 | * properties and hardware capability. | |
12 | * | |
13 | * If a request doesn't have data, only REQ_FLUSH makes sense, which | |
14 | * indicates a simple flush request. If there is data, REQ_FLUSH indicates | |
15 | * that the device cache should be flushed before the data is executed, and | |
16 | * REQ_FUA means that the data must be on non-volatile media on request | |
17 | * completion. | |
18 | * | |
19 | * If the device doesn't have writeback cache, FLUSH and FUA don't make any | |
20 | * difference. The requests are either completed immediately if there's no | |
21 | * data or executed as normal requests otherwise. | |
22 | * | |
23 | * If the device has writeback cache and supports FUA, REQ_FLUSH is | |
24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. | |
25 | * | |
26 | * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is | |
27 | * translated to PREFLUSH and REQ_FUA to POSTFLUSH. | |
28 | * | |
29 | * The actual execution of flush is double buffered. Whenever a request | |
30 | * needs to execute PRE or POSTFLUSH, it queues at | |
31 | * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a | |
32 | * flush is issued and the pending_idx is toggled. When the flush | |
33 | * completes, all the requests which were pending are proceeded to the next | |
34 | * step. This allows arbitrary merging of different types of FLUSH/FUA | |
35 | * requests. | |
36 | * | |
37 | * Currently, the following conditions are used to determine when to issue | |
38 | * flush. | |
39 | * | |
40 | * C1. At any given time, only one flush shall be in progress. This makes | |
41 | * double buffering sufficient. | |
42 | * | |
43 | * C2. Flush is deferred if any request is executing DATA of its sequence. | |
44 | * This avoids issuing separate POSTFLUSHes for requests which shared | |
45 | * PREFLUSH. | |
46 | * | |
47 | * C3. The second condition is ignored if there is a request which has | |
48 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid | |
49 | * starvation in the unlikely case where there are continuous stream of | |
50 | * FUA (without FLUSH) requests. | |
51 | * | |
52 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) | |
53 | * is beneficial. | |
54 | * | |
55 | * Note that a sequenced FLUSH/FUA request with DATA is completed twice. | |
56 | * Once while executing DATA and again after the whole sequence is | |
57 | * complete. The first completion updates the contained bio but doesn't | |
58 | * finish it so that the bio submitter is notified only after the whole | |
59 | * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in | |
60 | * req_bio_endio(). | |
61 | * | |
62 | * The above peculiarity requires that each FLUSH/FUA request has only one | |
63 | * bio attached to it, which is guaranteed as they aren't allowed to be | |
64 | * merged in the usual way. | |
86db1e29 | 65 | */ |
ae1b1539 | 66 | |
86db1e29 JA |
67 | #include <linux/kernel.h> |
68 | #include <linux/module.h> | |
69 | #include <linux/bio.h> | |
70 | #include <linux/blkdev.h> | |
5a0e3ad6 | 71 | #include <linux/gfp.h> |
86db1e29 JA |
72 | |
73 | #include "blk.h" | |
74 | ||
4fed947c TH |
75 | /* FLUSH/FUA sequences */ |
76 | enum { | |
ae1b1539 TH |
77 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
78 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ | |
79 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ | |
80 | REQ_FSEQ_DONE = (1 << 3), | |
81 | ||
82 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | | |
83 | REQ_FSEQ_POSTFLUSH, | |
84 | ||
85 | /* | |
86 | * If flush has been pending longer than the following timeout, | |
87 | * it's issued even if flush_data requests are still in flight. | |
88 | */ | |
89 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | |
4fed947c TH |
90 | }; |
91 | ||
ae1b1539 | 92 | static bool blk_kick_flush(struct request_queue *q); |
28e7d184 | 93 | |
ae1b1539 | 94 | static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) |
86db1e29 | 95 | { |
ae1b1539 | 96 | unsigned int policy = 0; |
86db1e29 | 97 | |
fa1bf42f JM |
98 | if (blk_rq_sectors(rq)) |
99 | policy |= REQ_FSEQ_DATA; | |
100 | ||
ae1b1539 TH |
101 | if (fflags & REQ_FLUSH) { |
102 | if (rq->cmd_flags & REQ_FLUSH) | |
103 | policy |= REQ_FSEQ_PREFLUSH; | |
ae1b1539 TH |
104 | if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) |
105 | policy |= REQ_FSEQ_POSTFLUSH; | |
28e7d184 | 106 | } |
ae1b1539 | 107 | return policy; |
86db1e29 JA |
108 | } |
109 | ||
ae1b1539 | 110 | static unsigned int blk_flush_cur_seq(struct request *rq) |
47f70d5a | 111 | { |
ae1b1539 TH |
112 | return 1 << ffz(rq->flush.seq); |
113 | } | |
47f70d5a | 114 | |
ae1b1539 TH |
115 | static void blk_flush_restore_request(struct request *rq) |
116 | { | |
47f70d5a | 117 | /* |
ae1b1539 TH |
118 | * After flush data completion, @rq->bio is %NULL but we need to |
119 | * complete the bio again. @rq->biotail is guaranteed to equal the | |
120 | * original @rq->bio. Restore it. | |
47f70d5a | 121 | */ |
ae1b1539 TH |
122 | rq->bio = rq->biotail; |
123 | ||
124 | /* make @rq a normal request */ | |
125 | rq->cmd_flags &= ~REQ_FLUSH_SEQ; | |
4853abaa | 126 | rq->end_io = rq->flush.saved_end_io; |
47f70d5a TH |
127 | } |
128 | ||
ae1b1539 TH |
129 | /** |
130 | * blk_flush_complete_seq - complete flush sequence | |
131 | * @rq: FLUSH/FUA request being sequenced | |
132 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) | |
133 | * @error: whether an error occurred | |
134 | * | |
135 | * @rq just completed @seq part of its flush sequence, record the | |
136 | * completion and trigger the next step. | |
137 | * | |
138 | * CONTEXT: | |
139 | * spin_lock_irq(q->queue_lock) | |
140 | * | |
141 | * RETURNS: | |
142 | * %true if requests were added to the dispatch queue, %false otherwise. | |
143 | */ | |
144 | static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | |
145 | int error) | |
86db1e29 | 146 | { |
ae1b1539 TH |
147 | struct request_queue *q = rq->q; |
148 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; | |
149 | bool queued = false; | |
150 | ||
151 | BUG_ON(rq->flush.seq & seq); | |
152 | rq->flush.seq |= seq; | |
153 | ||
154 | if (likely(!error)) | |
155 | seq = blk_flush_cur_seq(rq); | |
156 | else | |
157 | seq = REQ_FSEQ_DONE; | |
158 | ||
159 | switch (seq) { | |
160 | case REQ_FSEQ_PREFLUSH: | |
161 | case REQ_FSEQ_POSTFLUSH: | |
162 | /* queue for flush */ | |
163 | if (list_empty(pending)) | |
164 | q->flush_pending_since = jiffies; | |
165 | list_move_tail(&rq->flush.list, pending); | |
166 | break; | |
167 | ||
168 | case REQ_FSEQ_DATA: | |
169 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); | |
170 | list_add(&rq->queuelist, &q->queue_head); | |
171 | queued = true; | |
172 | break; | |
173 | ||
174 | case REQ_FSEQ_DONE: | |
175 | /* | |
176 | * @rq was previously adjusted by blk_flush_issue() for | |
177 | * flush sequencing and may already have gone through the | |
178 | * flush data request completion path. Restore @rq for | |
179 | * normal completion and end it. | |
180 | */ | |
181 | BUG_ON(!list_empty(&rq->queuelist)); | |
182 | list_del_init(&rq->flush.list); | |
183 | blk_flush_restore_request(rq); | |
184 | __blk_end_request_all(rq, error); | |
185 | break; | |
186 | ||
187 | default: | |
188 | BUG(); | |
189 | } | |
190 | ||
191 | return blk_kick_flush(q) | queued; | |
86db1e29 JA |
192 | } |
193 | ||
ae1b1539 | 194 | static void flush_end_io(struct request *flush_rq, int error) |
86db1e29 | 195 | { |
ae1b1539 TH |
196 | struct request_queue *q = flush_rq->q; |
197 | struct list_head *running = &q->flush_queue[q->flush_running_idx]; | |
ae1b1539 TH |
198 | bool queued = false; |
199 | struct request *rq, *n; | |
200 | ||
201 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); | |
202 | ||
203 | /* account completion of the flush request */ | |
204 | q->flush_running_idx ^= 1; | |
205 | elv_completed_request(q, flush_rq); | |
206 | ||
207 | /* and push the waiting requests to the next stage */ | |
208 | list_for_each_entry_safe(rq, n, running, flush.list) { | |
209 | unsigned int seq = blk_flush_cur_seq(rq); | |
210 | ||
211 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); | |
212 | queued |= blk_flush_complete_seq(rq, seq, error); | |
213 | } | |
214 | ||
47f70d5a | 215 | /* |
3ac0cc45 | 216 | * Kick the queue to avoid stall for two cases: |
217 | * 1. Moving a request silently to empty queue_head may stall the | |
218 | * queue. | |
219 | * 2. When flush request is running in non-queueable queue, the | |
220 | * queue is hold. Restart the queue after flush request is finished | |
221 | * to avoid stall. | |
222 | * This function is called from request completion path and calling | |
223 | * directly into request_fn may confuse the driver. Always use | |
224 | * kblockd. | |
47f70d5a | 225 | */ |
3ac0cc45 | 226 | if (queued || q->flush_queue_delayed) |
24ecfbe2 | 227 | blk_run_queue_async(q); |
3ac0cc45 | 228 | q->flush_queue_delayed = 0; |
86db1e29 JA |
229 | } |
230 | ||
ae1b1539 TH |
231 | /** |
232 | * blk_kick_flush - consider issuing flush request | |
233 | * @q: request_queue being kicked | |
234 | * | |
235 | * Flush related states of @q have changed, consider issuing flush request. | |
236 | * Please read the comment at the top of this file for more info. | |
237 | * | |
238 | * CONTEXT: | |
239 | * spin_lock_irq(q->queue_lock) | |
240 | * | |
241 | * RETURNS: | |
242 | * %true if flush was issued, %false otherwise. | |
243 | */ | |
244 | static bool blk_kick_flush(struct request_queue *q) | |
86db1e29 | 245 | { |
ae1b1539 TH |
246 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; |
247 | struct request *first_rq = | |
248 | list_first_entry(pending, struct request, flush.list); | |
249 | ||
250 | /* C1 described at the top of this file */ | |
251 | if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) | |
252 | return false; | |
253 | ||
254 | /* C2 and C3 */ | |
255 | if (!list_empty(&q->flush_data_in_flight) && | |
256 | time_before(jiffies, | |
257 | q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) | |
258 | return false; | |
259 | ||
260 | /* | |
261 | * Issue flush and toggle pending_idx. This makes pending_idx | |
262 | * different from running_idx, which means flush is in flight. | |
263 | */ | |
264 | blk_rq_init(q, &q->flush_rq); | |
265 | q->flush_rq.cmd_type = REQ_TYPE_FS; | |
266 | q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; | |
267 | q->flush_rq.rq_disk = first_rq->rq_disk; | |
268 | q->flush_rq.end_io = flush_end_io; | |
269 | ||
270 | q->flush_pending_idx ^= 1; | |
53d63e6b | 271 | list_add_tail(&q->flush_rq.queuelist, &q->queue_head); |
ae1b1539 | 272 | return true; |
86db1e29 JA |
273 | } |
274 | ||
ae1b1539 | 275 | static void flush_data_end_io(struct request *rq, int error) |
86db1e29 | 276 | { |
ae1b1539 | 277 | struct request_queue *q = rq->q; |
ae1b1539 | 278 | |
e83a46bb TH |
279 | /* |
280 | * After populating an empty queue, kick it to avoid stall. Read | |
281 | * the comment in flush_end_io(). | |
282 | */ | |
73c10101 | 283 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) |
24ecfbe2 | 284 | blk_run_queue_async(q); |
86db1e29 JA |
285 | } |
286 | ||
ae1b1539 TH |
287 | /** |
288 | * blk_insert_flush - insert a new FLUSH/FUA request | |
289 | * @rq: request to insert | |
290 | * | |
b710a480 | 291 | * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
ae1b1539 TH |
292 | * @rq is being submitted. Analyze what needs to be done and put it on the |
293 | * right queue. | |
294 | * | |
295 | * CONTEXT: | |
296 | * spin_lock_irq(q->queue_lock) | |
297 | */ | |
298 | void blk_insert_flush(struct request *rq) | |
86db1e29 | 299 | { |
ae1b1539 TH |
300 | struct request_queue *q = rq->q; |
301 | unsigned int fflags = q->flush_flags; /* may change, cache */ | |
302 | unsigned int policy = blk_flush_policy(fflags, rq); | |
86db1e29 | 303 | |
ae1b1539 TH |
304 | /* |
305 | * @policy now records what operations need to be done. Adjust | |
306 | * REQ_FLUSH and FUA for the driver. | |
307 | */ | |
308 | rq->cmd_flags &= ~REQ_FLUSH; | |
309 | if (!(fflags & REQ_FUA)) | |
310 | rq->cmd_flags &= ~REQ_FUA; | |
311 | ||
4853abaa JM |
312 | /* |
313 | * An empty flush handed down from a stacking driver may | |
314 | * translate into nothing if the underlying device does not | |
315 | * advertise a write-back cache. In this case, simply | |
316 | * complete the request. | |
317 | */ | |
318 | if (!policy) { | |
319 | __blk_end_bidi_request(rq, 0, 0, 0); | |
320 | return; | |
321 | } | |
322 | ||
834f9f61 | 323 | BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
4853abaa | 324 | |
ae1b1539 TH |
325 | /* |
326 | * If there's data but flush is not necessary, the request can be | |
327 | * processed directly without going through flush machinery. Queue | |
328 | * for normal execution. | |
329 | */ | |
330 | if ((policy & REQ_FSEQ_DATA) && | |
331 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | |
53d63e6b | 332 | list_add_tail(&rq->queuelist, &q->queue_head); |
ae1b1539 | 333 | return; |
28e7d184 | 334 | } |
cde4c406 | 335 | |
ae1b1539 TH |
336 | /* |
337 | * @rq should go through flush machinery. Mark it part of flush | |
338 | * sequence and submit for further processing. | |
339 | */ | |
340 | memset(&rq->flush, 0, sizeof(rq->flush)); | |
341 | INIT_LIST_HEAD(&rq->flush.list); | |
414b4ff5 | 342 | rq->cmd_flags |= REQ_FLUSH_SEQ; |
4853abaa | 343 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
ae1b1539 TH |
344 | rq->end_io = flush_data_end_io; |
345 | ||
346 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); | |
86db1e29 JA |
347 | } |
348 | ||
ae1b1539 TH |
349 | /** |
350 | * blk_abort_flushes - @q is being aborted, abort flush requests | |
351 | * @q: request_queue being aborted | |
352 | * | |
353 | * To be called from elv_abort_queue(). @q is being aborted. Prepare all | |
354 | * FLUSH/FUA requests for abortion. | |
355 | * | |
356 | * CONTEXT: | |
357 | * spin_lock_irq(q->queue_lock) | |
358 | */ | |
359 | void blk_abort_flushes(struct request_queue *q) | |
86db1e29 | 360 | { |
ae1b1539 TH |
361 | struct request *rq, *n; |
362 | int i; | |
28e7d184 | 363 | |
4fed947c | 364 | /* |
ae1b1539 TH |
365 | * Requests in flight for data are already owned by the dispatch |
366 | * queue or the device driver. Just restore for normal completion. | |
4fed947c | 367 | */ |
ae1b1539 TH |
368 | list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) { |
369 | list_del_init(&rq->flush.list); | |
370 | blk_flush_restore_request(rq); | |
4fed947c | 371 | } |
28e7d184 | 372 | |
4fed947c | 373 | /* |
ae1b1539 TH |
374 | * We need to give away requests on flush queues. Restore for |
375 | * normal completion and put them on the dispatch queue. | |
4fed947c | 376 | */ |
ae1b1539 TH |
377 | for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) { |
378 | list_for_each_entry_safe(rq, n, &q->flush_queue[i], | |
379 | flush.list) { | |
380 | list_del_init(&rq->flush.list); | |
381 | blk_flush_restore_request(rq); | |
382 | list_add_tail(&rq->queuelist, &q->queue_head); | |
383 | } | |
28e7d184 | 384 | } |
86db1e29 JA |
385 | } |
386 | ||
d391a2dd | 387 | static void bio_end_flush(struct bio *bio, int err) |
86db1e29 | 388 | { |
d391a2dd | 389 | if (err) |
86db1e29 | 390 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
f17e232e DM |
391 | if (bio->bi_private) |
392 | complete(bio->bi_private); | |
393 | bio_put(bio); | |
86db1e29 JA |
394 | } |
395 | ||
396 | /** | |
397 | * blkdev_issue_flush - queue a flush | |
398 | * @bdev: blockdev to issue flush for | |
fbd9b09a | 399 | * @gfp_mask: memory allocation flags (for bio_alloc) |
86db1e29 JA |
400 | * @error_sector: error sector |
401 | * | |
402 | * Description: | |
403 | * Issue a flush for the block device in question. Caller can supply | |
404 | * room for storing the error offset in case of a flush error, if they | |
f17e232e DM |
405 | * wish to. If WAIT flag is not passed then caller may check only what |
406 | * request was pushed in some internal queue for later handling. | |
86db1e29 | 407 | */ |
fbd9b09a | 408 | int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
dd3932ed | 409 | sector_t *error_sector) |
86db1e29 JA |
410 | { |
411 | DECLARE_COMPLETION_ONSTACK(wait); | |
412 | struct request_queue *q; | |
413 | struct bio *bio; | |
fbd9b09a | 414 | int ret = 0; |
86db1e29 JA |
415 | |
416 | if (bdev->bd_disk == NULL) | |
417 | return -ENXIO; | |
418 | ||
419 | q = bdev_get_queue(bdev); | |
420 | if (!q) | |
421 | return -ENXIO; | |
422 | ||
f10d9f61 DC |
423 | /* |
424 | * some block devices may not have their queue correctly set up here | |
425 | * (e.g. loop device without a backing file) and so issuing a flush | |
426 | * here will panic. Ensure there is a request function before issuing | |
d391a2dd | 427 | * the flush. |
f10d9f61 DC |
428 | */ |
429 | if (!q->make_request_fn) | |
430 | return -ENXIO; | |
431 | ||
fbd9b09a | 432 | bio = bio_alloc(gfp_mask, 0); |
d391a2dd | 433 | bio->bi_end_io = bio_end_flush; |
86db1e29 | 434 | bio->bi_bdev = bdev; |
dd3932ed | 435 | bio->bi_private = &wait; |
86db1e29 | 436 | |
f17e232e | 437 | bio_get(bio); |
d391a2dd | 438 | submit_bio(WRITE_FLUSH, bio); |
5577022f | 439 | wait_for_completion_io(&wait); |
dd3932ed CH |
440 | |
441 | /* | |
442 | * The driver must store the error location in ->bi_sector, if | |
443 | * it supports it. For non-stacked drivers, this should be | |
444 | * copied from blk_rq_pos(rq). | |
445 | */ | |
446 | if (error_sector) | |
f2fc7d0e | 447 | *error_sector = bio->bi_sector; |
86db1e29 | 448 | |
d391a2dd | 449 | if (!bio_flagged(bio, BIO_UPTODATE)) |
86db1e29 JA |
450 | ret = -EIO; |
451 | ||
452 | bio_put(bio); | |
453 | return ret; | |
454 | } | |
86db1e29 | 455 | EXPORT_SYMBOL(blkdev_issue_flush); |