1 // SPDX-License-Identifier: GPL-2.0
3 * Functions to sequence PREFLUSH and FUA writes.
5 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 * properties and hardware capability.
12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14 * that the device cache should be flushed before the data is executed, and
15 * REQ_FUA means that the data must be on non-volatile media on request
18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 * difference. The requests are either completed immediately if there's no data
20 * or executed as normal requests otherwise.
22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 * The actual execution of flush is double buffered. Whenever a request
29 * needs to execute PRE or POSTFLUSH, it queues at
30 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32 * completes, all the requests which were pending are proceeded to the next
33 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
36 * Currently, the following conditions are used to determine when to issue
39 * C1. At any given time, only one flush shall be in progress. This makes
40 * double buffering sufficient.
42 * C2. Flush is deferred if any request is executing DATA of its sequence.
43 * This avoids issuing separate POSTFLUSHes for requests which shared
46 * C3. The second condition is ignored if there is a request which has
47 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48 * starvation in the unlikely case where there are continuous stream of
49 * FUA (without PREFLUSH) requests.
51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55 * Once while executing DATA and again after the whole sequence is
56 * complete. The first completion updates the contained bio but doesn't
57 * finish it so that the bio submitter is notified only after the whole
58 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
62 * bio attached to it, which is guaranteed as they aren't allowed to be
63 * merged in the usual way.
66 #include <linux/kernel.h>
67 #include <linux/module.h>
68 #include <linux/bio.h>
69 #include <linux/blkdev.h>
70 #include <linux/gfp.h>
71 #include <linux/part_stat.h>
75 #include "blk-mq-sched.h"
77 /* PREFLUSH/FUA sequences */
79 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
80 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
81 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
82 REQ_FSEQ_DONE = (1 << 3),
84 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
88 * If flush has been pending longer than the following timeout,
89 * it's issued even if flush_data requests are still in flight.
91 FLUSH_PENDING_TIMEOUT = 5 * HZ,
94 static void blk_kick_flush(struct request_queue *q,
95 struct blk_flush_queue *fq, blk_opf_t flags);
97 static inline struct blk_flush_queue *
98 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
100 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
103 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
105 unsigned int policy = 0;
107 if (blk_rq_sectors(rq))
108 policy |= REQ_FSEQ_DATA;
110 if (fflags & (1UL << QUEUE_FLAG_WC)) {
111 if (rq->cmd_flags & REQ_PREFLUSH)
112 policy |= REQ_FSEQ_PREFLUSH;
113 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
114 (rq->cmd_flags & REQ_FUA))
115 policy |= REQ_FSEQ_POSTFLUSH;
120 static unsigned int blk_flush_cur_seq(struct request *rq)
122 return 1 << ffz(rq->flush.seq);
125 static void blk_flush_restore_request(struct request *rq)
128 * After flush data completion, @rq->bio is %NULL but we need to
129 * complete the bio again. @rq->biotail is guaranteed to equal the
130 * original @rq->bio. Restore it.
132 rq->bio = rq->biotail;
134 rq->__sector = rq->bio->bi_iter.bi_sector;
136 /* make @rq a normal request */
137 rq->rq_flags &= ~RQF_FLUSH_SEQ;
138 rq->end_io = rq->flush.saved_end_io;
141 static void blk_account_io_flush(struct request *rq)
143 struct block_device *part = rq->q->disk->part0;
146 part_stat_inc(part, ios[STAT_FLUSH]);
147 part_stat_add(part, nsecs[STAT_FLUSH],
148 blk_time_get_ns() - rq->start_time_ns);
153 * blk_flush_complete_seq - complete flush sequence
154 * @rq: PREFLUSH/FUA request being sequenced
156 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
157 * @error: whether an error occurred
159 * @rq just completed @seq part of its flush sequence, record the
160 * completion and trigger the next step.
163 * spin_lock_irq(fq->mq_flush_lock)
165 static void blk_flush_complete_seq(struct request *rq,
166 struct blk_flush_queue *fq,
167 unsigned int seq, blk_status_t error)
169 struct request_queue *q = rq->q;
170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
173 BUG_ON(rq->flush.seq & seq);
174 rq->flush.seq |= seq;
175 cmd_flags = rq->cmd_flags;
178 seq = blk_flush_cur_seq(rq);
183 case REQ_FSEQ_PREFLUSH:
184 case REQ_FSEQ_POSTFLUSH:
185 /* queue for flush */
186 if (list_empty(pending))
187 fq->flush_pending_since = jiffies;
188 list_move_tail(&rq->queuelist, pending);
192 fq->flush_data_in_flight++;
193 spin_lock(&q->requeue_lock);
194 list_move(&rq->queuelist, &q->requeue_list);
195 spin_unlock(&q->requeue_lock);
196 blk_mq_kick_requeue_list(q);
201 * @rq was previously adjusted by blk_insert_flush() for
202 * flush sequencing and may already have gone through the
203 * flush data request completion path. Restore @rq for
204 * normal completion and end it.
206 list_del_init(&rq->queuelist);
207 blk_flush_restore_request(rq);
208 blk_mq_end_request(rq, error);
215 blk_kick_flush(q, fq, cmd_flags);
218 static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
221 struct request_queue *q = flush_rq->q;
222 struct list_head *running;
223 struct request *rq, *n;
224 unsigned long flags = 0;
225 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
227 /* release the tag's ownership to the req cloned from */
228 spin_lock_irqsave(&fq->mq_flush_lock, flags);
230 if (!req_ref_put_and_test(flush_rq)) {
231 fq->rq_status = error;
232 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
233 return RQ_END_IO_NONE;
236 blk_account_io_flush(flush_rq);
238 * Flush request has to be marked as IDLE when it is really ended
239 * because its .end_io() is called from timeout code path too for
240 * avoiding use-after-free.
242 WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
243 if (fq->rq_status != BLK_STS_OK) {
244 error = fq->rq_status;
245 fq->rq_status = BLK_STS_OK;
249 flush_rq->tag = BLK_MQ_NO_TAG;
251 blk_mq_put_driver_tag(flush_rq);
252 flush_rq->internal_tag = BLK_MQ_NO_TAG;
255 running = &fq->flush_queue[fq->flush_running_idx];
256 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
258 /* account completion of the flush request */
259 fq->flush_running_idx ^= 1;
261 /* and push the waiting requests to the next stage */
262 list_for_each_entry_safe(rq, n, running, queuelist) {
263 unsigned int seq = blk_flush_cur_seq(rq);
265 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
266 blk_flush_complete_seq(rq, fq, seq, error);
269 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
270 return RQ_END_IO_NONE;
273 bool is_flush_rq(struct request *rq)
275 return rq->end_io == flush_end_io;
279 * blk_kick_flush - consider issuing flush request
280 * @q: request_queue being kicked
282 * @flags: cmd_flags of the original request
284 * Flush related states of @q have changed, consider issuing flush request.
285 * Please read the comment at the top of this file for more info.
288 * spin_lock_irq(fq->mq_flush_lock)
291 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
294 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
295 struct request *first_rq =
296 list_first_entry(pending, struct request, queuelist);
297 struct request *flush_rq = fq->flush_rq;
299 /* C1 described at the top of this file */
300 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
304 if (fq->flush_data_in_flight &&
306 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
310 * Issue flush and toggle pending_idx. This makes pending_idx
311 * different from running_idx, which means flush is in flight.
313 fq->flush_pending_idx ^= 1;
315 blk_rq_init(q, flush_rq);
318 * In case of none scheduler, borrow tag from the first request
319 * since they can't be in flight at the same time. And acquire
320 * the tag's ownership for flush req.
322 * In case of IO scheduler, flush rq need to borrow scheduler tag
323 * just for cheating put/get driver tag.
325 flush_rq->mq_ctx = first_rq->mq_ctx;
326 flush_rq->mq_hctx = first_rq->mq_hctx;
329 flush_rq->tag = first_rq->tag;
331 flush_rq->internal_tag = first_rq->internal_tag;
333 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
334 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
335 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
336 flush_rq->end_io = flush_end_io;
338 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
339 * implied in refcount_inc_not_zero() called from
340 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
341 * and READ flush_rq->end_io
344 req_ref_set(flush_rq, 1);
346 spin_lock(&q->requeue_lock);
347 list_add_tail(&flush_rq->queuelist, &q->flush_list);
348 spin_unlock(&q->requeue_lock);
350 blk_mq_kick_requeue_list(q);
353 static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
356 struct request_queue *q = rq->q;
357 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
358 struct blk_mq_ctx *ctx = rq->mq_ctx;
360 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
363 WARN_ON(rq->tag < 0);
364 blk_mq_put_driver_tag(rq);
368 * After populating an empty queue, kick it to avoid stall. Read
369 * the comment in flush_end_io().
371 spin_lock_irqsave(&fq->mq_flush_lock, flags);
372 fq->flush_data_in_flight--;
374 * May have been corrupted by rq->rq_next reuse, we need to
375 * re-initialize rq->queuelist before reusing it here.
377 INIT_LIST_HEAD(&rq->queuelist);
378 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
379 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
381 blk_mq_sched_restart(hctx);
382 return RQ_END_IO_NONE;
385 static void blk_rq_init_flush(struct request *rq)
388 rq->rq_flags |= RQF_FLUSH_SEQ;
389 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
390 rq->end_io = mq_flush_data_end_io;
394 * Insert a PREFLUSH/FUA request into the flush state machine.
395 * Returns true if the request has been consumed by the flush state machine,
396 * or false if the caller should continue to process it.
398 bool blk_insert_flush(struct request *rq)
400 struct request_queue *q = rq->q;
401 unsigned long fflags = q->queue_flags; /* may change, cache */
402 unsigned int policy = blk_flush_policy(fflags, rq);
403 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
405 /* FLUSH/FUA request must never be merged */
406 WARN_ON_ONCE(rq->bio != rq->biotail);
409 * @policy now records what operations need to be done. Adjust
410 * REQ_PREFLUSH and FUA for the driver.
412 rq->cmd_flags &= ~REQ_PREFLUSH;
413 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
414 rq->cmd_flags &= ~REQ_FUA;
417 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
418 * of those flags, we have to set REQ_SYNC to avoid skewing
419 * the request accounting.
421 rq->cmd_flags |= REQ_SYNC;
426 * An empty flush handed down from a stacking driver may
427 * translate into nothing if the underlying device does not
428 * advertise a write-back cache. In this case, simply
429 * complete the request.
431 blk_mq_end_request(rq, 0);
435 * If there's data, but no flush is necessary, the request can
436 * be processed directly without going through flush machinery.
437 * Queue for normal execution.
440 case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH:
442 * Initialize the flush fields and completion handler to trigger
443 * the post flush, and then just pass the command on.
445 blk_rq_init_flush(rq);
446 rq->flush.seq |= REQ_FSEQ_PREFLUSH;
447 spin_lock_irq(&fq->mq_flush_lock);
448 fq->flush_data_in_flight++;
449 spin_unlock_irq(&fq->mq_flush_lock);
453 * Mark the request as part of a flush sequence and submit it
454 * for further processing to the flush state machine.
456 blk_rq_init_flush(rq);
457 spin_lock_irq(&fq->mq_flush_lock);
458 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
459 spin_unlock_irq(&fq->mq_flush_lock);
465 * blkdev_issue_flush - queue a flush
466 * @bdev: blockdev to issue flush for
469 * Issue a flush for the block device in question.
471 int blkdev_issue_flush(struct block_device *bdev)
475 bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
476 return submit_bio_wait(&bio);
478 EXPORT_SYMBOL(blkdev_issue_flush);
480 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
483 struct blk_flush_queue *fq;
484 int rq_sz = sizeof(struct request);
486 fq = kzalloc_node(sizeof(*fq), flags, node);
490 spin_lock_init(&fq->mq_flush_lock);
492 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
493 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
497 INIT_LIST_HEAD(&fq->flush_queue[0]);
498 INIT_LIST_HEAD(&fq->flush_queue[1]);
508 void blk_free_flush_queue(struct blk_flush_queue *fq)
510 /* bio based request queue hasn't flush queue */
519 * Allow driver to set its own lock class to fq->mq_flush_lock for
520 * avoiding lockdep complaint.
522 * flush_end_io() may be called recursively from some driver, such as
523 * nvme-loop, so lockdep may complain 'possible recursive locking' because
524 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
525 * key. We need to assign different lock class for these driver's
526 * fq->mq_flush_lock for avoiding the lockdep warning.
528 * Use dynamically allocated lock class key for each 'blk_flush_queue'
529 * instance is over-kill, and more worse it introduces horrible boot delay
530 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
531 * is called for each hctx release. SCSI probing may synchronously create and
532 * destroy lots of MQ request_queues for non-existent devices, and some robot
533 * test kernel always enable lockdep option. It is observed that more than half
534 * an hour is taken during SCSI MQ probe with per-fq lock class.
536 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
537 struct lock_class_key *key)
539 lockdep_set_class(&hctx->fq->mq_flush_lock, key);
541 EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);