]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef INT_BLK_MQ_H |
3 | #define INT_BLK_MQ_H | |
4 | ||
cf43e6be | 5 | #include "blk-stat.h" |
244c65a3 | 6 | #include "blk-mq-tag.h" |
cf43e6be | 7 | |
24d2f903 CH |
8 | struct blk_mq_tag_set; |
9 | ||
1db4909e ML |
10 | struct blk_mq_ctxs { |
11 | struct kobject kobj; | |
12 | struct blk_mq_ctx __percpu *queue_ctx; | |
13 | }; | |
14 | ||
fe644072 LW |
15 | /** |
16 | * struct blk_mq_ctx - State for a software queue facing the submitting CPUs | |
17 | */ | |
320ae51f JA |
18 | struct blk_mq_ctx { |
19 | struct { | |
20 | spinlock_t lock; | |
c16d6b5a ML |
21 | struct list_head rq_lists[HCTX_MAX_TYPES]; |
22 | } ____cacheline_aligned_in_smp; | |
320ae51f JA |
23 | |
24 | unsigned int cpu; | |
f31967f0 | 25 | unsigned short index_hw[HCTX_MAX_TYPES]; |
320ae51f JA |
26 | |
27 | /* incremented at dispatch time */ | |
28 | unsigned long rq_dispatched[2]; | |
29 | unsigned long rq_merged; | |
30 | ||
31 | /* incremented at completion time */ | |
32 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; | |
33 | ||
34 | struct request_queue *queue; | |
1db4909e | 35 | struct blk_mq_ctxs *ctxs; |
320ae51f | 36 | struct kobject kobj; |
4bb659b1 | 37 | } ____cacheline_aligned_in_smp; |
320ae51f | 38 | |
780db207 | 39 | void blk_mq_freeze_queue(struct request_queue *q); |
3edcc0ce | 40 | void blk_mq_free_queue(struct request_queue *q); |
e3a2b3f9 | 41 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
aed3ea94 | 42 | void blk_mq_wake_waiters(struct request_queue *q); |
de148297 | 43 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); |
2c3ad667 | 44 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
8ab6bb9e | 45 | bool blk_mq_get_driver_tag(struct request *rq); |
b347689f ML |
46 | struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, |
47 | struct blk_mq_ctx *start); | |
2c3ad667 JA |
48 | |
49 | /* | |
50 | * Internal helpers for allocating/freeing the request map | |
51 | */ | |
cc71a6f4 JA |
52 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
53 | unsigned int hctx_idx); | |
54 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); | |
55 | struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, | |
56 | unsigned int hctx_idx, | |
57 | unsigned int nr_tags, | |
58 | unsigned int reserved_tags); | |
59 | int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |
60 | unsigned int hctx_idx, unsigned int depth); | |
2c3ad667 JA |
61 | |
62 | /* | |
63 | * Internal helpers for request insertion into sw queues | |
64 | */ | |
65 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
66 | bool at_head); | |
b0850297 | 67 | void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); |
bd166ef1 JA |
68 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
69 | struct list_head *list); | |
320ae51f | 70 | |
d6a51a97 JW |
71 | blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, |
72 | struct request *rq, | |
73 | blk_qc_t *cookie, | |
74 | bool bypass, bool last); | |
6ce3dd6e ML |
75 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, |
76 | struct list_head *list); | |
396eaf21 | 77 | |
320ae51f JA |
78 | /* |
79 | * CPU -> queue mappings | |
80 | */ | |
ed76e329 | 81 | extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); |
320ae51f | 82 | |
b3c661b1 JA |
83 | /* |
84 | * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue | |
85 | * @q: request queue | |
e20ba6e1 | 86 | * @type: the hctx type index |
b3c661b1 JA |
87 | * @cpu: CPU |
88 | */ | |
89 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, | |
e20ba6e1 | 90 | enum hctx_type type, |
b3c661b1 | 91 | unsigned int cpu) |
7d7e0f90 | 92 | { |
e20ba6e1 | 93 | return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; |
7d7e0f90 CH |
94 | } |
95 | ||
b3c661b1 JA |
96 | /* |
97 | * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue | |
98 | * @q: request queue | |
99 | * @flags: request command flags | |
100 | * @cpu: CPU | |
101 | */ | |
102 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, | |
103 | unsigned int flags, | |
104 | unsigned int cpu) | |
ff2c5660 | 105 | { |
e20ba6e1 CH |
106 | enum hctx_type type = HCTX_TYPE_DEFAULT; |
107 | ||
5aceaeb2 CH |
108 | if ((flags & REQ_HIPRI) && |
109 | q->tag_set->nr_maps > HCTX_TYPE_POLL && | |
110 | q->tag_set->map[HCTX_TYPE_POLL].nr_queues && | |
111 | test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) | |
e20ba6e1 | 112 | type = HCTX_TYPE_POLL; |
b3c661b1 | 113 | |
5aceaeb2 CH |
114 | else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && |
115 | q->tag_set->nr_maps > HCTX_TYPE_READ && | |
116 | q->tag_set->map[HCTX_TYPE_READ].nr_queues) | |
e20ba6e1 | 117 | type = HCTX_TYPE_READ; |
5aceaeb2 | 118 | |
e20ba6e1 | 119 | return blk_mq_map_queue_type(q, type, cpu); |
ff2c5660 JA |
120 | } |
121 | ||
67aec14c JA |
122 | /* |
123 | * sysfs helpers | |
124 | */ | |
737f98cf | 125 | extern void blk_mq_sysfs_init(struct request_queue *q); |
7ea5fe31 | 126 | extern void blk_mq_sysfs_deinit(struct request_queue *q); |
2d0364c8 | 127 | extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); |
67aec14c JA |
128 | extern int blk_mq_sysfs_register(struct request_queue *q); |
129 | extern void blk_mq_sysfs_unregister(struct request_queue *q); | |
868f2f0b | 130 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
67aec14c | 131 | |
e09aae7e ML |
132 | void blk_mq_release(struct request_queue *q); |
133 | ||
1d9bd516 TH |
134 | /** |
135 | * blk_mq_rq_state() - read the current MQ_RQ_* state of a request | |
136 | * @rq: target request. | |
137 | */ | |
12f5b931 | 138 | static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) |
1d9bd516 | 139 | { |
12f5b931 | 140 | return READ_ONCE(rq->state); |
1d9bd516 TH |
141 | } |
142 | ||
1aecfe48 ML |
143 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
144 | unsigned int cpu) | |
145 | { | |
146 | return per_cpu_ptr(q->queue_ctx, cpu); | |
147 | } | |
148 | ||
149 | /* | |
150 | * This assumes per-cpu software queueing queues. They could be per-node | |
151 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | |
152 | * care about preemption, since we know the ctx's are persistent. This does | |
153 | * mean that we can't rely on ctx always matching the currently running CPU. | |
154 | */ | |
155 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | |
156 | { | |
157 | return __blk_mq_get_ctx(q, get_cpu()); | |
158 | } | |
159 | ||
160 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |
161 | { | |
162 | put_cpu(); | |
163 | } | |
164 | ||
cb96a42c ML |
165 | struct blk_mq_alloc_data { |
166 | /* input parameter */ | |
167 | struct request_queue *q; | |
9a95e4ef | 168 | blk_mq_req_flags_t flags; |
229a9287 | 169 | unsigned int shallow_depth; |
f9afca4d | 170 | unsigned int cmd_flags; |
cb96a42c ML |
171 | |
172 | /* input & output parameter */ | |
173 | struct blk_mq_ctx *ctx; | |
174 | struct blk_mq_hw_ctx *hctx; | |
175 | }; | |
176 | ||
4941115b JA |
177 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
178 | { | |
bd166ef1 JA |
179 | if (data->flags & BLK_MQ_REQ_INTERNAL) |
180 | return data->hctx->sched_tags; | |
181 | ||
4941115b JA |
182 | return data->hctx->tags; |
183 | } | |
184 | ||
5d1b25c1 BVA |
185 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
186 | { | |
187 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); | |
188 | } | |
189 | ||
19c66e59 ML |
190 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
191 | { | |
192 | return hctx->nr_ctx && hctx->tags; | |
193 | } | |
194 | ||
e016b782 | 195 | unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); |
bf0ddaba OS |
196 | void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, |
197 | unsigned int inflight[2]); | |
f299b7c7 | 198 | |
de148297 ML |
199 | static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) |
200 | { | |
201 | struct request_queue *q = hctx->queue; | |
202 | ||
203 | if (q->mq_ops->put_budget) | |
204 | q->mq_ops->put_budget(hctx); | |
205 | } | |
206 | ||
88022d72 | 207 | static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) |
de148297 ML |
208 | { |
209 | struct request_queue *q = hctx->queue; | |
210 | ||
211 | if (q->mq_ops->get_budget) | |
212 | return q->mq_ops->get_budget(hctx); | |
88022d72 | 213 | return true; |
de148297 ML |
214 | } |
215 | ||
244c65a3 ML |
216 | static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, |
217 | struct request *rq) | |
218 | { | |
219 | blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); | |
220 | rq->tag = -1; | |
221 | ||
222 | if (rq->rq_flags & RQF_MQ_INFLIGHT) { | |
223 | rq->rq_flags &= ~RQF_MQ_INFLIGHT; | |
224 | atomic_dec(&hctx->nr_active); | |
225 | } | |
226 | } | |
227 | ||
228 | static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, | |
229 | struct request *rq) | |
230 | { | |
231 | if (rq->tag == -1 || rq->internal_tag == -1) | |
232 | return; | |
233 | ||
234 | __blk_mq_put_driver_tag(hctx, rq); | |
235 | } | |
236 | ||
237 | static inline void blk_mq_put_driver_tag(struct request *rq) | |
238 | { | |
244c65a3 ML |
239 | if (rq->tag == -1 || rq->internal_tag == -1) |
240 | return; | |
241 | ||
ea4f995e | 242 | __blk_mq_put_driver_tag(rq->mq_hctx, rq); |
244c65a3 ML |
243 | } |
244 | ||
ed76e329 | 245 | static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) |
0da73d00 MI |
246 | { |
247 | int cpu; | |
248 | ||
249 | for_each_possible_cpu(cpu) | |
ed76e329 | 250 | qmap->mq_map[cpu] = 0; |
0da73d00 MI |
251 | } |
252 | ||
320ae51f | 253 | #endif |