1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/srcu.h>
8 #include <linux/lockdep.h>
9 #include <linux/scatterlist.h>
12 struct blk_flush_queue;
14 #define BLKDEV_MIN_RQ 4
15 #define BLKDEV_DEFAULT_RQ 128
17 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
21 typedef __u32 __bitwise req_flags_t;
23 /* drive already may have started this one */
24 #define RQF_STARTED ((__force req_flags_t)(1 << 1))
25 /* may not be passed by ioscheduler */
26 #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
27 /* request for flush sequence */
28 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
29 /* merge of different types, fail separately */
30 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
31 /* track inflight for MQ */
32 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
33 /* don't call prep for this one */
34 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
35 /* vaguely specified driver internal error. Ignored by the block layer */
36 #define RQF_FAILED ((__force req_flags_t)(1 << 10))
37 /* don't warn about errors */
38 #define RQF_QUIET ((__force req_flags_t)(1 << 11))
39 /* elevator private data attached */
40 #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
41 /* account into disk and partition IO statistics */
42 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
43 /* runtime pm request */
44 #define RQF_PM ((__force req_flags_t)(1 << 15))
45 /* on IO scheduler merge hash */
46 #define RQF_HASHED ((__force req_flags_t)(1 << 16))
47 /* track IO completion time */
48 #define RQF_STATS ((__force req_flags_t)(1 << 17))
49 /* Look at ->special_vec for the actual data payload instead of the
51 #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
52 /* The per-zone write lock is held for this request */
53 #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
54 /* already slept for hybrid poll */
55 #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
56 /* ->timeout has been called, don't expire again */
57 #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
58 /* queue has elevator attached */
59 #define RQF_ELV ((__force req_flags_t)(1 << 22))
61 /* flags that prevent us from merging requests: */
62 #define RQF_NOMERGE_FLAGS \
63 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
72 * Try to put the fields that are referenced together in the same cacheline.
74 * If you modify this structure, make sure to update blk_rq_init() and
75 * especially blk_mq_rq_ctx_init() to take care of the added fields.
78 struct request_queue *q;
79 struct blk_mq_ctx *mq_ctx;
80 struct blk_mq_hw_ctx *mq_hctx;
82 unsigned int cmd_flags; /* op and common flags */
90 /* the following two fields are internal, NEVER access directly */
91 unsigned int __data_len; /* total data len */
92 sector_t __sector; /* sector cursor */
98 struct list_head queuelist;
99 struct request *rq_next;
102 struct gendisk *rq_disk;
103 struct block_device *part;
104 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
105 /* Time that the first bio started allocating this request. */
108 /* Time that this request was allocated for this IO. */
110 /* Time that I/O was submitted to the device. */
111 u64 io_start_time_ns;
113 #ifdef CONFIG_BLK_WBT
114 unsigned short wbt_flags;
117 * rq sectors used for blk stats. It has the same value
118 * with blk_rq_sectors(rq), except that it never be zeroed
121 unsigned short stats_sectors;
124 * Number of scatter-gather DMA addr+len pairs after
125 * physical address coalescing is performed.
127 unsigned short nr_phys_segments;
129 #ifdef CONFIG_BLK_DEV_INTEGRITY
130 unsigned short nr_integrity_segments;
133 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
134 struct bio_crypt_ctx *crypt_ctx;
135 struct blk_ksm_keyslot *crypt_keyslot;
138 unsigned short write_hint;
139 unsigned short ioprio;
141 enum mq_rq_state state;
144 unsigned long deadline;
147 * The hash is used inside the scheduler, and killed once the
148 * request reaches the dispatch list. The ipi_list is only used
149 * to queue the request for softirq completion, which is long
150 * after the request has been unhashed (and even removed from
151 * the dispatch list).
154 struct hlist_node hash; /* merge hash */
155 struct llist_node ipi_list;
159 * The rb_node is only used inside the io scheduler, requests
160 * are pruned when moved to the dispatch queue. So let the
161 * completion_data share space with the rb_node.
164 struct rb_node rb_node; /* sort/lookup */
165 struct bio_vec special_vec;
166 void *completion_data;
167 int error_count; /* for legacy drivers, don't use */
172 * Three pointers are available for the IO schedulers, if they need
173 * more they have to dynamically allocate it. Flush requests are
174 * never put on the IO scheduler. So let the flush fields share
175 * space with the elevator data.
185 struct list_head list;
186 rq_end_io_fn *saved_end_io;
191 struct __call_single_data csd;
196 * completion callback.
198 rq_end_io_fn *end_io;
202 #define req_op(req) \
203 ((req)->cmd_flags & REQ_OP_MASK)
205 static inline bool blk_rq_is_passthrough(struct request *rq)
207 return blk_op_is_passthrough(req_op(rq));
210 static inline unsigned short req_get_ioprio(struct request *req)
215 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
217 #define rq_dma_dir(rq) \
218 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
220 enum blk_eh_timer_return {
221 BLK_EH_DONE, /* drivers has completed the command */
222 BLK_EH_RESET_TIMER, /* reset timer and try again */
225 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
226 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
229 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
232 struct blk_mq_hw_ctx {
234 /** @lock: Protects the dispatch list. */
237 * @dispatch: Used for requests that are ready to be
238 * dispatched to the hardware but for some reason (e.g. lack of
239 * resources) could not be sent to the hardware. As soon as the
240 * driver can send new requests, requests at this list will
241 * be sent first for a fairer dispatch.
243 struct list_head dispatch;
245 * @state: BLK_MQ_S_* flags. Defines the state of the hw
246 * queue (active, scheduled to restart, stopped).
249 } ____cacheline_aligned_in_smp;
252 * @run_work: Used for scheduling a hardware queue run at a later time.
254 struct delayed_work run_work;
255 /** @cpumask: Map of available CPUs where this hctx can run. */
256 cpumask_var_t cpumask;
258 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
259 * selection from @cpumask.
263 * @next_cpu_batch: Counter of how many works left in the batch before
264 * changing to the next CPU.
268 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
272 * @sched_data: Pointer owned by the IO scheduler attached to a request
273 * queue. It's up to the IO scheduler how to use this pointer.
277 * @queue: Pointer to the request queue that owns this hardware context.
279 struct request_queue *queue;
280 /** @fq: Queue of requests that need to perform a flush operation. */
281 struct blk_flush_queue *fq;
284 * @driver_data: Pointer to data owned by the block driver that created
290 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
291 * pending request in that software queue.
293 struct sbitmap ctx_map;
296 * @dispatch_from: Software queue to be used when no scheduler was
299 struct blk_mq_ctx *dispatch_from;
301 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
302 * decide if the hw_queue is busy using Exponential Weighted Moving
305 unsigned int dispatch_busy;
307 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
309 /** @nr_ctx: Number of software queues. */
310 unsigned short nr_ctx;
311 /** @ctxs: Array of software queues. */
312 struct blk_mq_ctx **ctxs;
314 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
315 spinlock_t dispatch_wait_lock;
317 * @dispatch_wait: Waitqueue to put requests when there is no tag
318 * available at the moment, to wait for another try in the future.
320 wait_queue_entry_t dispatch_wait;
323 * @wait_index: Index of next available dispatch_wait queue to insert
329 * @tags: Tags owned by the block driver. A tag at this set is only
330 * assigned when a request is dispatched from a hardware queue.
332 struct blk_mq_tags *tags;
334 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
335 * scheduler associated with a request queue, a tag is assigned when
336 * that request is allocated. Else, this member is not used.
338 struct blk_mq_tags *sched_tags;
340 /** @queued: Number of queued requests. */
341 unsigned long queued;
342 /** @run: Number of dispatched requests. */
345 /** @numa_node: NUMA node the storage adapter has been connected to. */
346 unsigned int numa_node;
347 /** @queue_num: Index of this hardware queue. */
348 unsigned int queue_num;
351 * @nr_active: Number of active requests. Only used when a tag set is
352 * shared across request queues.
356 /** @cpuhp_online: List to store request if CPU is going to die */
357 struct hlist_node cpuhp_online;
358 /** @cpuhp_dead: List to store request if some CPU die. */
359 struct hlist_node cpuhp_dead;
360 /** @kobj: Kernel object for sysfs. */
363 #ifdef CONFIG_BLK_DEBUG_FS
365 * @debugfs_dir: debugfs directory for this hardware queue. Named
366 * as cpu<cpu_number>.
368 struct dentry *debugfs_dir;
369 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
370 struct dentry *sched_debugfs_dir;
374 * @hctx_list: if this hctx is not in use, this is an entry in
375 * q->unused_hctx_list.
377 struct list_head hctx_list;
380 * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
381 * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
382 * blk_mq_hw_ctx_size().
384 struct srcu_struct srcu[];
388 * struct blk_mq_queue_map - Map software queues to hardware queues
389 * @mq_map: CPU ID to hardware queue index map. This is an array
390 * with nr_cpu_ids elements. Each element has a value in the range
391 * [@queue_offset, @queue_offset + @nr_queues).
392 * @nr_queues: Number of hardware queues to map CPU IDs onto.
393 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
394 * driver to map each hardware queue type (enum hctx_type) onto a distinct
395 * set of hardware queues.
397 struct blk_mq_queue_map {
398 unsigned int *mq_map;
399 unsigned int nr_queues;
400 unsigned int queue_offset;
404 * enum hctx_type - Type of hardware queue
405 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
406 * @HCTX_TYPE_READ: Just for READ I/O.
407 * @HCTX_TYPE_POLL: Polled I/O of any kind.
408 * @HCTX_MAX_TYPES: Number of types of hctx.
419 * struct blk_mq_tag_set - tag set that can be shared between request queues
420 * @map: One or more ctx -> hctx mappings. One map exists for each
421 * hardware queue type (enum hctx_type) that the driver wishes
422 * to support. There are no restrictions on maps being of the
423 * same size, and it's perfectly legal to share maps between
425 * @nr_maps: Number of elements in the @map array. A number in the range
426 * [1, HCTX_MAX_TYPES].
427 * @ops: Pointers to functions that implement block driver behavior.
428 * @nr_hw_queues: Number of hardware queues supported by the block driver that
429 * owns this data structure.
430 * @queue_depth: Number of tags per hardware queue, reserved tags included.
431 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
433 * @cmd_size: Number of additional bytes to allocate per request. The block
434 * driver owns these additional bytes.
435 * @numa_node: NUMA node the storage adapter has been connected to.
436 * @timeout: Request processing timeout in jiffies.
437 * @flags: Zero or more BLK_MQ_F_* flags.
438 * @driver_data: Pointer to data owned by the block driver that created this
440 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
443 * Shared set of tags. Has @nr_hw_queues elements. If set,
444 * shared by all @tags.
445 * @tag_list_lock: Serializes tag_list accesses.
446 * @tag_list: List of the request queues that use this tag set. See also
447 * request_queue.tag_set_list.
449 struct blk_mq_tag_set {
450 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
451 unsigned int nr_maps;
452 const struct blk_mq_ops *ops;
453 unsigned int nr_hw_queues;
454 unsigned int queue_depth;
455 unsigned int reserved_tags;
456 unsigned int cmd_size;
458 unsigned int timeout;
462 struct blk_mq_tags **tags;
464 struct blk_mq_tags *shared_tags;
466 struct mutex tag_list_lock;
467 struct list_head tag_list;
471 * struct blk_mq_queue_data - Data about a request inserted in a queue
473 * @rq: Request pointer.
474 * @last: If it is the last request in the queue.
476 struct blk_mq_queue_data {
481 typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
483 typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
486 * struct blk_mq_ops - Callback functions that implements block driver
491 * @queue_rq: Queue a new request from block IO.
493 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
494 const struct blk_mq_queue_data *);
497 * @commit_rqs: If a driver uses bd->last to judge when to submit
498 * requests to hardware, it must define this function. In case of errors
499 * that make us stop issuing further requests, this hook serves the
500 * purpose of kicking the hardware (which the last request otherwise
503 void (*commit_rqs)(struct blk_mq_hw_ctx *);
506 * @get_budget: Reserve budget before queue request, once .queue_rq is
507 * run, it is driver's responsibility to release the
508 * reserved budget. Also we have to handle failure case
509 * of .get_budget for avoiding I/O deadlock.
511 int (*get_budget)(struct request_queue *);
514 * @put_budget: Release the reserved budget.
516 void (*put_budget)(struct request_queue *, int);
519 * @set_rq_budget_token: store rq's budget token
521 void (*set_rq_budget_token)(struct request *, int);
523 * @get_rq_budget_token: retrieve rq's budget token
525 int (*get_rq_budget_token)(struct request *);
528 * @timeout: Called on request timeout.
530 enum blk_eh_timer_return (*timeout)(struct request *, bool);
533 * @poll: Called to poll for completion of a specific tag.
535 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
538 * @complete: Mark the request as complete.
540 void (*complete)(struct request *);
543 * @init_hctx: Called when the block layer side of a hardware queue has
544 * been set up, allowing the driver to allocate/init matching
547 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
549 * @exit_hctx: Ditto for exit/teardown.
551 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
554 * @init_request: Called for every command allocated by the block layer
555 * to allow the driver to set up driver specific data.
557 * Tag greater than or equal to queue_depth is for setting up
560 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
561 unsigned int, unsigned int);
563 * @exit_request: Ditto for exit/teardown.
565 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
569 * @initialize_rq_fn: Called from inside blk_get_request().
571 void (*initialize_rq_fn)(struct request *rq);
574 * @cleanup_rq: Called before freeing one request which isn't completed
575 * yet, and usually for freeing the driver private data.
577 void (*cleanup_rq)(struct request *);
580 * @busy: If set, returns whether or not this queue currently is busy.
582 bool (*busy)(struct request_queue *);
585 * @map_queues: This allows drivers specify their own queue mapping by
586 * overriding the setup-time function that builds the mq_map.
588 int (*map_queues)(struct blk_mq_tag_set *set);
590 #ifdef CONFIG_BLK_DEBUG_FS
592 * @show_rq: Used by the debugfs implementation to show driver-specific
593 * information about a request.
595 void (*show_rq)(struct seq_file *m, struct request *rq);
600 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
601 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
603 * Set when this device requires underlying blk-mq device for
606 BLK_MQ_F_STACKING = 1 << 2,
607 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
608 BLK_MQ_F_BLOCKING = 1 << 5,
609 /* Do not allow an I/O scheduler to be configured. */
610 BLK_MQ_F_NO_SCHED = 1 << 6,
612 * Select 'none' during queue registration in case of a single hwq
613 * or shared hwqs instead of 'mq-deadline'.
615 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
616 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
617 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
619 BLK_MQ_S_STOPPED = 0,
620 BLK_MQ_S_TAG_ACTIVE = 1,
621 BLK_MQ_S_SCHED_RESTART = 2,
623 /* hw queue is inactive after all its CPUs become offline */
624 BLK_MQ_S_INACTIVE = 3,
626 BLK_MQ_MAX_DEPTH = 10240,
628 BLK_MQ_CPU_WORK_BATCH = 8,
630 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
631 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
632 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
633 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
634 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
635 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
637 #define BLK_MQ_NO_HCTX_IDX (-1U)
639 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
640 struct lock_class_key *lkclass);
641 #define blk_mq_alloc_disk(set, queuedata) \
643 static struct lock_class_key __key; \
645 __blk_mq_alloc_disk(set, queuedata, &__key); \
647 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
648 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
649 struct request_queue *q);
650 void blk_mq_unregister_dev(struct device *, struct request_queue *);
652 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
653 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
654 const struct blk_mq_ops *ops, unsigned int queue_depth,
655 unsigned int set_flags);
656 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
658 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
660 void blk_mq_free_request(struct request *rq);
662 bool blk_mq_queue_inflight(struct request_queue *q);
665 /* return when out of requests */
666 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
667 /* allocate from reserved pool */
668 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
670 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
673 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
674 blk_mq_req_flags_t flags);
675 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
676 unsigned int op, blk_mq_req_flags_t flags,
677 unsigned int hctx_idx);
678 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
681 BLK_MQ_UNIQUE_TAG_BITS = 16,
682 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
685 u32 blk_mq_unique_tag(struct request *rq);
687 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
689 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
692 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
694 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
698 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
699 * @rq: target request.
701 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
703 return READ_ONCE(rq->state);
706 static inline int blk_mq_request_started(struct request *rq)
708 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
711 static inline int blk_mq_request_completed(struct request *rq)
713 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
718 * Set the state to complete when completing a request from inside ->queue_rq.
719 * This is used by drivers that want to ensure special complete actions that
720 * need access to the request are called on failure, e.g. by nvme for
723 static inline void blk_mq_set_request_complete(struct request *rq)
725 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
728 void blk_mq_start_request(struct request *rq);
729 void blk_mq_end_request(struct request *rq, blk_status_t error);
730 void __blk_mq_end_request(struct request *rq, blk_status_t error);
731 void blk_mq_end_request_batch(struct io_comp_batch *ib);
734 * Only need start/end time stamping if we have iostat or
735 * blk stats enabled, or using an IO scheduler.
737 static inline bool blk_mq_need_time_stamp(struct request *rq)
739 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
743 * Batched completions only work when there is no I/O error and no special
746 static inline bool blk_mq_add_to_batch(struct request *req,
747 struct io_comp_batch *iob, int ioerror,
748 void (*complete)(struct io_comp_batch *))
750 if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror)
753 iob->complete = complete;
754 else if (iob->complete != complete)
756 iob->need_ts |= blk_mq_need_time_stamp(req);
757 rq_list_add(&iob->req_list, req);
761 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
762 void blk_mq_kick_requeue_list(struct request_queue *q);
763 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
764 void blk_mq_complete_request(struct request *rq);
765 bool blk_mq_complete_request_remote(struct request *rq);
766 bool blk_mq_queue_stopped(struct request_queue *q);
767 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
768 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
769 void blk_mq_stop_hw_queues(struct request_queue *q);
770 void blk_mq_start_hw_queues(struct request_queue *q);
771 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
772 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
773 void blk_mq_quiesce_queue(struct request_queue *q);
774 void blk_mq_unquiesce_queue(struct request_queue *q);
775 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
776 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
777 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
778 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
779 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
780 busy_tag_iter_fn *fn, void *priv);
781 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
782 void blk_mq_freeze_queue(struct request_queue *q);
783 void blk_mq_unfreeze_queue(struct request_queue *q);
784 void blk_freeze_queue_start(struct request_queue *q);
785 void blk_mq_freeze_queue_wait(struct request_queue *q);
786 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
787 unsigned long timeout);
789 int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
790 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
792 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
794 unsigned int blk_mq_rq_cpu(struct request *rq);
796 bool __blk_should_fake_timeout(struct request_queue *q);
797 static inline bool blk_should_fake_timeout(struct request_queue *q)
799 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
800 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
801 return __blk_should_fake_timeout(q);
806 * blk_mq_rq_from_pdu - cast a PDU to a request
807 * @pdu: the PDU (Protocol Data Unit) to be casted
811 * Driver command data is immediately after the request. So subtract request
812 * size to get back to the original request.
814 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
816 return pdu - sizeof(struct request);
820 * blk_mq_rq_to_pdu - cast a request to a PDU
821 * @rq: the request to be casted
823 * Return: pointer to the PDU
825 * Driver command data is immediately after the request. So add request to get
828 static inline void *blk_mq_rq_to_pdu(struct request *rq)
833 #define queue_for_each_hw_ctx(q, hctx, i) \
834 for ((i) = 0; (i) < (q)->nr_hw_queues && \
835 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
837 #define hctx_for_each_ctx(hctx, ctx, i) \
838 for ((i) = 0; (i) < (hctx)->nr_ctx && \
839 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
841 static inline void blk_mq_cleanup_rq(struct request *rq)
843 if (rq->q->mq_ops->cleanup_rq)
844 rq->q->mq_ops->cleanup_rq(rq);
847 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
848 unsigned int nr_segs)
850 rq->nr_phys_segments = nr_segs;
851 rq->__data_len = bio->bi_iter.bi_size;
852 rq->bio = rq->biotail = bio;
853 rq->ioprio = bio_prio(bio);
856 rq->rq_disk = bio->bi_bdev->bd_disk;
859 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
860 struct lock_class_key *key);
862 static inline bool rq_is_sync(struct request *rq)
864 return op_is_sync(rq->cmd_flags);
867 void blk_rq_init(struct request_queue *q, struct request *rq);
868 void blk_put_request(struct request *rq);
869 struct request *blk_get_request(struct request_queue *q, unsigned int op,
870 blk_mq_req_flags_t flags);
871 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
872 struct bio_set *bs, gfp_t gfp_mask,
873 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
874 void blk_rq_unprep_clone(struct request *rq);
875 blk_status_t blk_insert_cloned_request(struct request_queue *q,
882 unsigned long offset;
887 int blk_rq_map_user(struct request_queue *, struct request *,
888 struct rq_map_data *, void __user *, unsigned long, gfp_t);
889 int blk_rq_map_user_iov(struct request_queue *, struct request *,
890 struct rq_map_data *, const struct iov_iter *, gfp_t);
891 int blk_rq_unmap_user(struct bio *);
892 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
893 unsigned int, gfp_t);
894 int blk_rq_append_bio(struct request *rq, struct bio *bio);
895 void blk_execute_rq_nowait(struct gendisk *, struct request *, int,
897 blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
900 struct req_iterator {
901 struct bvec_iter iter;
905 #define __rq_for_each_bio(_bio, rq) \
907 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
909 #define rq_for_each_segment(bvl, _rq, _iter) \
910 __rq_for_each_bio(_iter.bio, _rq) \
911 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
913 #define rq_for_each_bvec(bvl, _rq, _iter) \
914 __rq_for_each_bio(_iter.bio, _rq) \
915 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
917 #define rq_iter_last(bvec, _iter) \
918 (_iter.bio->bi_next == NULL && \
919 bio_iter_last(bvec, _iter.iter))
922 * blk_rq_pos() : the current sector
923 * blk_rq_bytes() : bytes left in the entire request
924 * blk_rq_cur_bytes() : bytes left in the current segment
925 * blk_rq_err_bytes() : bytes left till the next error boundary
926 * blk_rq_sectors() : sectors left in the entire request
927 * blk_rq_cur_sectors() : sectors left in the current segment
928 * blk_rq_stats_sectors() : sectors of the entire request used for stats
930 static inline sector_t blk_rq_pos(const struct request *rq)
935 static inline unsigned int blk_rq_bytes(const struct request *rq)
937 return rq->__data_len;
940 static inline int blk_rq_cur_bytes(const struct request *rq)
944 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
945 return rq->bio->bi_iter.bi_size;
946 return bio_iovec(rq->bio).bv_len;
949 unsigned int blk_rq_err_bytes(const struct request *rq);
951 static inline unsigned int blk_rq_sectors(const struct request *rq)
953 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
956 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
958 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
961 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
963 return rq->stats_sectors;
967 * Some commands like WRITE SAME have a payload or data transfer size which
968 * is different from the size of the request. Any driver that supports such
969 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
970 * calculate the data transfer size.
972 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
974 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
975 return rq->special_vec.bv_len;
976 return blk_rq_bytes(rq);
980 * Return the first full biovec in the request. The caller needs to check that
981 * there are any bvecs before calling this helper.
983 static inline struct bio_vec req_bvec(struct request *rq)
985 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
986 return rq->special_vec;
987 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
990 static inline unsigned int blk_rq_count_bios(struct request *rq)
992 unsigned int nr_bios = 0;
995 __rq_for_each_bio(bio, rq)
1001 void blk_steal_bios(struct bio_list *list, struct request *rq);
1004 * Request completion related functions.
1006 * blk_update_request() completes given number of bytes and updates
1007 * the request without completing it.
1009 bool blk_update_request(struct request *rq, blk_status_t error,
1010 unsigned int nr_bytes);
1011 void blk_abort_request(struct request *);
1014 * Number of physical segments as sent to the device.
1016 * Normally this is the number of discontiguous data segments sent by the
1017 * submitter. But for data-less command like discard we might have no
1018 * actual data segments submitted, but the driver might have to add it's
1019 * own special payload. In that case we still return 1 here so that this
1020 * special payload will be mapped.
1022 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1024 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1026 return rq->nr_phys_segments;
1030 * Number of discard segments (or ranges) the driver needs to fill in.
1031 * Each discard bio merged into a request is counted as one segment.
1033 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1035 return max_t(unsigned short, rq->nr_phys_segments, 1);
1038 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1039 struct scatterlist *sglist, struct scatterlist **last_sg);
1040 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1041 struct scatterlist *sglist)
1043 struct scatterlist *last_sg = NULL;
1045 return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1047 void blk_dump_rq_flags(struct request *, char *);
1049 #ifdef CONFIG_BLK_DEV_ZONED
1050 static inline unsigned int blk_rq_zone_no(struct request *rq)
1052 return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
1055 static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1057 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1060 bool blk_req_needs_zone_write_lock(struct request *rq);
1061 bool blk_req_zone_write_trylock(struct request *rq);
1062 void __blk_req_zone_write_lock(struct request *rq);
1063 void __blk_req_zone_write_unlock(struct request *rq);
1065 static inline void blk_req_zone_write_lock(struct request *rq)
1067 if (blk_req_needs_zone_write_lock(rq))
1068 __blk_req_zone_write_lock(rq);
1071 static inline void blk_req_zone_write_unlock(struct request *rq)
1073 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1074 __blk_req_zone_write_unlock(rq);
1077 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1079 return rq->q->seq_zones_wlock &&
1080 test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
1083 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1085 if (!blk_req_needs_zone_write_lock(rq))
1087 return !blk_req_zone_is_write_locked(rq);
1089 #else /* CONFIG_BLK_DEV_ZONED */
1090 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1095 static inline void blk_req_zone_write_lock(struct request *rq)
1099 static inline void blk_req_zone_write_unlock(struct request *rq)
1102 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1107 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1111 #endif /* CONFIG_BLK_DEV_ZONED */
1113 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1114 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
1116 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1117 void rq_flush_dcache_pages(struct request *rq);
1119 static inline void rq_flush_dcache_pages(struct request *rq)
1122 #endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
1123 #endif /* BLK_MQ_H */