Linux 6.14-rc3
[linux.git] / block / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
90110e04 5#include <linux/blk-mq.h>
cf43e6be
JA
6#include "blk-stat.h"
7
24d2f903
CH
8struct blk_mq_tag_set;
9
1db4909e
ML
10struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
fe644072
LW
15/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
320ae51f
JA
18struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
c16d6b5a
ML
21 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
320ae51f
JA
23
24 unsigned int cpu;
f31967f0 25 unsigned short index_hw[HCTX_MAX_TYPES];
8ccdf4a3 26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
320ae51f 27
320ae51f 28 struct request_queue *queue;
1db4909e 29 struct blk_mq_ctxs *ctxs;
320ae51f 30 struct kobject kobj;
4bb659b1 31} ____cacheline_aligned_in_smp;
320ae51f 32
bebe84eb
CH
33enum {
34 BLK_MQ_NO_TAG = -1U,
35 BLK_MQ_TAG_MIN = 1,
36 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
37};
38
3dff6155
JG
39#define BLK_MQ_CPU_WORK_BATCH (8)
40
710fa378
CH
41typedef unsigned int __bitwise blk_insert_t;
42#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01)
43
3e08773c 44void blk_mq_submit_bio(struct bio *bio);
5a72e899
JA
45int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
46 unsigned int flags);
c7e2d94b 47void blk_mq_exit_queue(struct request_queue *q);
e3a2b3f9 48int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 49void blk_mq_wake_waiters(struct request_queue *q);
1fd40b5e
ML
50bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
51 unsigned int);
2c3ad667 52void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
b347689f
ML
53struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
54 struct blk_mq_ctx *start);
2e315dc0 55void blk_mq_put_rq_ref(struct request *rq);
2c3ad667
JA
56
57/*
58 * Internal helpers for allocating/freeing the request map
59 */
cc71a6f4
JA
60void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
61 unsigned int hctx_idx);
e155b0c2 62void blk_mq_free_rq_map(struct blk_mq_tags *tags);
63064be1
JG
63struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
64 unsigned int hctx_idx, unsigned int depth);
645db34e
JG
65void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
66 struct blk_mq_tags *tags,
67 unsigned int hctx_idx);
396eaf21 68
320ae51f
JA
69/*
70 * CPU -> queue mappings
71 */
ed76e329 72extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
320ae51f 73
b3c661b1
JA
74/*
75 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
76 * @q: request queue
e20ba6e1 77 * @type: the hctx type index
b3c661b1
JA
78 * @cpu: CPU
79 */
80static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
e20ba6e1 81 enum hctx_type type,
b3c661b1 82 unsigned int cpu)
7d7e0f90 83{
4e5cc99e 84 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
7d7e0f90
CH
85}
86
16458cf3 87static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
ff2c5660 88{
e20ba6e1
CH
89 enum hctx_type type = HCTX_TYPE_DEFAULT;
90
bb94aea1 91 /*
6ce913fe 92 * The caller ensure that if REQ_POLLED, poll must be enabled.
bb94aea1 93 */
7e923f40 94 if (opf & REQ_POLLED)
e20ba6e1 95 type = HCTX_TYPE_POLL;
7e923f40 96 else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
e20ba6e1 97 type = HCTX_TYPE_READ;
b637108a
ML
98 return type;
99}
100
101/*
102 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
103 * @q: request queue
7e923f40 104 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
b637108a
ML
105 * @ctx: software queue cpu ctx
106 */
107static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
16458cf3 108 blk_opf_t opf,
b637108a
ML
109 struct blk_mq_ctx *ctx)
110{
7e923f40 111 return ctx->hctxs[blk_mq_get_hctx_type(opf)];
ff2c5660
JA
112}
113
67aec14c
JA
114/*
115 * sysfs helpers
116 */
737f98cf 117extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31 118extern void blk_mq_sysfs_deinit(struct request_queue *q);
8682b92e
CH
119int blk_mq_sysfs_register(struct gendisk *disk);
120void blk_mq_sysfs_unregister(struct gendisk *disk);
eaa870f9
CH
121int blk_mq_sysfs_register_hctxs(struct request_queue *q);
122void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
868f2f0b 123extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
47c122e3 124void blk_mq_free_plug_rqs(struct blk_plug *plug);
dbb6f764 125void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
67aec14c 126
2a19b28f
ML
127void blk_mq_cancel_work_sync(struct request_queue *q);
128
e09aae7e
ML
129void blk_mq_release(struct request_queue *q);
130
1aecfe48
ML
131static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
132 unsigned int cpu)
133{
134 return per_cpu_ptr(q->queue_ctx, cpu);
135}
136
137/*
138 * This assumes per-cpu software queueing queues. They could be per-node
139 * as well, for instance. For now this is hardcoded as-is. Note that we don't
140 * care about preemption, since we know the ctx's are persistent. This does
141 * mean that we can't rely on ctx always matching the currently running CPU.
142 */
143static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
144{
c05f4220 145 return __blk_mq_get_ctx(q, raw_smp_processor_id());
1aecfe48
ML
146}
147
cb96a42c
ML
148struct blk_mq_alloc_data {
149 /* input parameter */
150 struct request_queue *q;
9a95e4ef 151 blk_mq_req_flags_t flags;
229a9287 152 unsigned int shallow_depth;
16458cf3 153 blk_opf_t cmd_flags;
ecaf97f4 154 req_flags_t rq_flags;
cb96a42c 155
47c122e3
JA
156 /* allocate multiple requests/tags in one go */
157 unsigned int nr_tags;
a3396b99 158 struct rq_list *cached_rqs;
47c122e3 159
cb96a42c
ML
160 /* input & output parameter */
161 struct blk_mq_ctx *ctx;
162 struct blk_mq_hw_ctx *hctx;
163};
164
bebe84eb 165struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
ce32496e 166 unsigned int reserved_tags, unsigned int flags, int node);
bebe84eb 167void blk_mq_free_tags(struct blk_mq_tags *tags);
bebe84eb
CH
168
169unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
170unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
171 unsigned int *offset);
172void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
173 unsigned int tag);
174void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
175int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
176 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
177void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
178 unsigned int size);
179void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
180
181void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
182void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
183 void *priv);
184void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
185 void *priv);
186
187static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
188 struct blk_mq_hw_ctx *hctx)
189{
190 if (!hctx)
191 return &bt->ws[0];
192 return sbq_wait_ptr(bt, &hctx->wait_index);
193}
194
195void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
196void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
197
198static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
199{
200 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
201 __blk_mq_tag_busy(hctx);
202}
203
204static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
205{
206 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
207 __blk_mq_tag_idle(hctx);
208}
209
210static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
211 unsigned int tag)
212{
213 return tag < tags->nr_reserved_tags;
214}
215
079a2e3e 216static inline bool blk_mq_is_shared_tags(unsigned int flags)
32bc15af
JG
217{
218 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
219}
220
4941115b
JA
221static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
222{
dd6216bb
CH
223 if (data->rq_flags & RQF_SCHED_TAGS)
224 return data->hctx->sched_tags;
225 return data->hctx->tags;
4941115b
JA
226}
227
5d1b25c1
BVA
228static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
229{
96a9fe64
MS
230 /* Fast path: hardware queue is not stopped most of the time. */
231 if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
232 return false;
233
234 /*
235 * This barrier is used to order adding of dispatch list before and
236 * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
237 * in blk_mq_start_stopped_hw_queue() so that dispatch code could
238 * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
239 * empty to avoid missing dispatching requests.
240 */
241 smp_mb();
242
5d1b25c1
BVA
243 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
244}
245
19c66e59
ML
246static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
247{
248 return hctx->nr_ctx && hctx->tags;
249}
250
8446fe92
CH
251unsigned int blk_mq_in_flight(struct request_queue *q,
252 struct block_device *part);
253void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
254 unsigned int inflight[2]);
f299b7c7 255
2a5a24aa
ML
256static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
257 int budget_token)
de148297 258{
de148297 259 if (q->mq_ops->put_budget)
2a5a24aa 260 q->mq_ops->put_budget(q, budget_token);
de148297
ML
261}
262
2a5a24aa 263static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
de148297 264{
de148297 265 if (q->mq_ops->get_budget)
65c76369 266 return q->mq_ops->get_budget(q);
2a5a24aa
ML
267 return 0;
268}
269
270static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
271{
272 if (token < 0)
273 return;
274
275 if (rq->q->mq_ops->set_rq_budget_token)
276 rq->q->mq_ops->set_rq_budget_token(rq, token);
277}
278
279static inline int blk_mq_get_rq_budget_token(struct request *rq)
280{
281 if (rq->q->mq_ops->get_rq_budget_token)
282 return rq->q->mq_ops->get_rq_budget_token(rq);
283 return -1;
de148297
ML
284}
285
b8643d68
CZ
286static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
287 int val)
bccf5e26 288{
079a2e3e 289 if (blk_mq_is_shared_tags(hctx->flags))
b8643d68 290 atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
bccf5e26 291 else
b8643d68
CZ
292 atomic_add(val, &hctx->nr_active);
293}
294
295static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
296{
297 __blk_mq_add_active_requests(hctx, 1);
bccf5e26
JG
298}
299
3b87c6ea
ML
300static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
301 int val)
bccf5e26 302{
079a2e3e 303 if (blk_mq_is_shared_tags(hctx->flags))
3b87c6ea 304 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
bccf5e26 305 else
3b87c6ea
ML
306 atomic_sub(val, &hctx->nr_active);
307}
308
309static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
310{
311 __blk_mq_sub_active_requests(hctx, 1);
bccf5e26
JG
312}
313
b8643d68
CZ
314static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
315 int val)
316{
317 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
318 __blk_mq_add_active_requests(hctx, val);
319}
320
321static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
322{
323 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
324 __blk_mq_inc_active_requests(hctx);
325}
326
327static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
328 int val)
329{
330 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
331 __blk_mq_sub_active_requests(hctx, val);
332}
333
334static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
335{
336 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
337 __blk_mq_dec_active_requests(hctx);
338}
339
bccf5e26
JG
340static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
341{
079a2e3e
JG
342 if (blk_mq_is_shared_tags(hctx->flags))
343 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
bccf5e26
JG
344 return atomic_read(&hctx->nr_active);
345}
4e2f62e5
JA
346static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
347 struct request *rq)
348{
b8643d68 349 blk_mq_dec_active_requests(hctx);
4e2f62e5
JA
350 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
351 rq->tag = BLK_MQ_NO_TAG;
4e2f62e5
JA
352}
353
354static inline void blk_mq_put_driver_tag(struct request *rq)
355{
356 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
357 return;
358
359 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
360}
361
b8643d68 362bool __blk_mq_alloc_driver_tag(struct request *rq);
a808a9d5
JA
363
364static inline bool blk_mq_get_driver_tag(struct request *rq)
365{
b8643d68
CZ
366 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
367 return false;
a808a9d5 368
b8643d68 369 return true;
a808a9d5 370}
61347154 371
ed76e329 372static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
0da73d00
MI
373{
374 int cpu;
375
376 for_each_possible_cpu(cpu)
ed76e329 377 qmap->mq_map[cpu] = 0;
0da73d00
MI
378}
379
fd2ef39c
JK
380/* Free all requests on the list */
381static inline void blk_mq_free_requests(struct list_head *list)
382{
383 while (!list_empty(list)) {
384 struct request *rq = list_entry_rq(list->next);
385
386 list_del_init(&rq->queuelist);
387 blk_mq_free_request(rq);
388 }
389}
390
a0235d23
JG
391/*
392 * For shared tag users, we track the number of currently active users
393 * and attempt to provide a fair share of the tag depth for each of them.
394 */
395static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
396 struct sbitmap_queue *bt)
397{
398 unsigned int depth, users;
399
400 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
401 return true;
a0235d23
JG
402
403 /*
404 * Don't try dividing an ant
405 */
406 if (bt->sb.depth == 1)
407 return true;
408
079a2e3e 409 if (blk_mq_is_shared_tags(hctx->flags)) {
f1b49fdc 410 struct request_queue *q = hctx->queue;
f1b49fdc 411
2569063c 412 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
f1b49fdc 413 return true;
f1b49fdc
JG
414 } else {
415 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
416 return true;
f1b49fdc
JG
417 }
418
4f1731df 419 users = READ_ONCE(hctx->tags->active_queues);
a0235d23
JG
420 if (!users)
421 return true;
422
423 /*
424 * Allow at least some tags
425 */
426 depth = max((bt->sb.depth + users - 1) / users, 4U);
bccf5e26 427 return __blk_mq_active_requests(hctx) < depth;
a0235d23
JG
428}
429
2a904d00 430/* run the code block in @dispatch_ops with rcu/srcu read lock held */
41adf531 431#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
2a904d00 432do { \
80bd4a7a 433 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
00e885ef 434 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
2a904d00
ML
435 int srcu_idx; \
436 \
41adf531 437 might_sleep_if(check_sleep); \
00e885ef 438 srcu_idx = srcu_read_lock(__tag_set->srcu); \
2a904d00 439 (dispatch_ops); \
00e885ef 440 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
80bd4a7a
CH
441 } else { \
442 rcu_read_lock(); \
443 (dispatch_ops); \
444 rcu_read_unlock(); \
2a904d00
ML
445 } \
446} while (0)
a0235d23 447
41adf531
ML
448#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
449 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
450
d432c817
CH
451static inline bool blk_mq_can_poll(struct request_queue *q)
452{
453 return (q->limits.features & BLK_FEAT_POLL) &&
454 q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
455}
456
320ae51f 457#endif
This page took 0.578399 seconds and 4 git commands to generate.