]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
75bb4625 | 2 | /* |
88459642 OS |
3 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
4 | * fairer distribution of tags between multiple submitters when a shared tag map | |
5 | * is used. | |
75bb4625 JA |
6 | * |
7 | * Copyright (C) 2013-2014 Jens Axboe | |
8 | */ | |
320ae51f JA |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | |
320ae51f JA |
11 | |
12 | #include <linux/blk-mq.h> | |
f9934a80 | 13 | #include <linux/delay.h> |
320ae51f JA |
14 | #include "blk.h" |
15 | #include "blk-mq.h" | |
d97e594c | 16 | #include "blk-mq-sched.h" |
320ae51f JA |
17 | #include "blk-mq-tag.h" |
18 | ||
180dccb0 LQ |
19 | /* |
20 | * Recalculate wakeup batch when tag is shared by hctx. | |
21 | */ | |
22 | static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, | |
23 | unsigned int users) | |
24 | { | |
25 | if (!users) | |
26 | return; | |
27 | ||
28 | sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags, | |
29 | users); | |
30 | sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags, | |
31 | users); | |
32 | } | |
33 | ||
0d2602ca JA |
34 | /* |
35 | * If a previously inactive queue goes active, bump the active user count. | |
d263ed99 JW |
36 | * We need to do this before try to allocate driver tag, then even if fail |
37 | * to get tag when first time, the other shared-tag users could reserve | |
38 | * budget for it. | |
0d2602ca JA |
39 | */ |
40 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
41 | { | |
180dccb0 LQ |
42 | unsigned int users; |
43 | ||
079a2e3e | 44 | if (blk_mq_is_shared_tags(hctx->flags)) { |
f1b49fdc | 45 | struct request_queue *q = hctx->queue; |
f1b49fdc | 46 | |
180dccb0 LQ |
47 | if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || |
48 | test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) { | |
49 | return true; | |
50 | } | |
f1b49fdc | 51 | } else { |
180dccb0 LQ |
52 | if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || |
53 | test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) { | |
54 | return true; | |
55 | } | |
f1b49fdc | 56 | } |
0d2602ca | 57 | |
180dccb0 LQ |
58 | users = atomic_inc_return(&hctx->tags->active_queues); |
59 | ||
60 | blk_mq_update_wake_batch(hctx->tags, users); | |
61 | ||
0d2602ca JA |
62 | return true; |
63 | } | |
64 | ||
65 | /* | |
aed3ea94 | 66 | * Wakeup all potentially sleeping on tags |
0d2602ca | 67 | */ |
aed3ea94 | 68 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca | 69 | { |
ae0f1a73 | 70 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
88459642 | 71 | if (include_reserve) |
ae0f1a73 | 72 | sbitmap_queue_wake_all(&tags->breserved_tags); |
0d2602ca JA |
73 | } |
74 | ||
e3a2b3f9 JA |
75 | /* |
76 | * If a previously busy queue goes inactive, potential waiters could now | |
77 | * be allowed to queue. Wake them up and check. | |
78 | */ | |
79 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
80 | { | |
81 | struct blk_mq_tags *tags = hctx->tags; | |
180dccb0 | 82 | unsigned int users; |
e3a2b3f9 | 83 | |
079a2e3e | 84 | if (blk_mq_is_shared_tags(hctx->flags)) { |
e155b0c2 JG |
85 | struct request_queue *q = hctx->queue; |
86 | ||
f1b49fdc JG |
87 | if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, |
88 | &q->queue_flags)) | |
89 | return; | |
f1b49fdc JG |
90 | } else { |
91 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
92 | return; | |
f1b49fdc | 93 | } |
e3a2b3f9 | 94 | |
180dccb0 LQ |
95 | users = atomic_dec_return(&tags->active_queues); |
96 | ||
97 | blk_mq_update_wake_batch(tags, users); | |
079a2e3e | 98 | |
aed3ea94 | 99 | blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f9 JA |
100 | } |
101 | ||
200e86b3 JA |
102 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, |
103 | struct sbitmap_queue *bt) | |
4bb659b1 | 104 | { |
28500850 ML |
105 | if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && |
106 | !hctx_may_queue(data->hctx, bt)) | |
76647368 | 107 | return BLK_MQ_NO_TAG; |
42fdc5e4 | 108 | |
229a9287 OS |
109 | if (data->shallow_depth) |
110 | return __sbitmap_queue_get_shallow(bt, data->shallow_depth); | |
111 | else | |
112 | return __sbitmap_queue_get(bt); | |
4bb659b1 JA |
113 | } |
114 | ||
349302da JA |
115 | unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, |
116 | unsigned int *offset) | |
117 | { | |
118 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); | |
119 | struct sbitmap_queue *bt = &tags->bitmap_tags; | |
120 | unsigned long ret; | |
121 | ||
122 | if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || | |
123 | data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
124 | return 0; | |
125 | ret = __sbitmap_queue_get_batch(bt, nr_tags, offset); | |
126 | *offset += tags->nr_reserved_tags; | |
127 | return ret; | |
128 | } | |
129 | ||
4941115b | 130 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51f | 131 | { |
4941115b JA |
132 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
133 | struct sbitmap_queue *bt; | |
88459642 | 134 | struct sbq_wait_state *ws; |
5d2ee712 | 135 | DEFINE_SBQ_WAIT(wait); |
4941115b | 136 | unsigned int tag_offset; |
320ae51f JA |
137 | int tag; |
138 | ||
4941115b JA |
139 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
140 | if (unlikely(!tags->nr_reserved_tags)) { | |
141 | WARN_ON_ONCE(1); | |
419c3d5e | 142 | return BLK_MQ_NO_TAG; |
4941115b | 143 | } |
ae0f1a73 | 144 | bt = &tags->breserved_tags; |
4941115b JA |
145 | tag_offset = 0; |
146 | } else { | |
ae0f1a73 | 147 | bt = &tags->bitmap_tags; |
4941115b JA |
148 | tag_offset = tags->nr_reserved_tags; |
149 | } | |
150 | ||
200e86b3 | 151 | tag = __blk_mq_get_tag(data, bt); |
76647368 | 152 | if (tag != BLK_MQ_NO_TAG) |
4941115b | 153 | goto found_tag; |
4bb659b1 | 154 | |
6f3b0e8b | 155 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
419c3d5e | 156 | return BLK_MQ_NO_TAG; |
4bb659b1 | 157 | |
4941115b | 158 | ws = bt_wait_ptr(bt, data->hctx); |
4bb659b1 | 159 | do { |
e6fc4649 ML |
160 | struct sbitmap_queue *bt_prev; |
161 | ||
b3223207 BVA |
162 | /* |
163 | * We're out of tags on this hardware queue, kick any | |
164 | * pending IO submits before going to sleep waiting for | |
8cecb07d | 165 | * some to complete. |
b3223207 | 166 | */ |
8cecb07d | 167 | blk_mq_run_hw_queue(data->hctx, false); |
b3223207 | 168 | |
080ff351 JA |
169 | /* |
170 | * Retry tag allocation after running the hardware queue, | |
171 | * as running the queue may also have found completions. | |
172 | */ | |
200e86b3 | 173 | tag = __blk_mq_get_tag(data, bt); |
76647368 | 174 | if (tag != BLK_MQ_NO_TAG) |
080ff351 JA |
175 | break; |
176 | ||
5d2ee712 | 177 | sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE); |
4e5dff41 JA |
178 | |
179 | tag = __blk_mq_get_tag(data, bt); | |
76647368 | 180 | if (tag != BLK_MQ_NO_TAG) |
4e5dff41 JA |
181 | break; |
182 | ||
e6fc4649 | 183 | bt_prev = bt; |
4bb659b1 | 184 | io_schedule(); |
cb96a42c | 185 | |
5d2ee712 JA |
186 | sbitmap_finish_wait(bt, ws, &wait); |
187 | ||
cb96a42c | 188 | data->ctx = blk_mq_get_ctx(data->q); |
f9afca4d | 189 | data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, |
8ccdf4a3 | 190 | data->ctx); |
4941115b JA |
191 | tags = blk_mq_tags_from_data(data); |
192 | if (data->flags & BLK_MQ_REQ_RESERVED) | |
ae0f1a73 | 193 | bt = &tags->breserved_tags; |
4941115b | 194 | else |
ae0f1a73 | 195 | bt = &tags->bitmap_tags; |
4941115b | 196 | |
e6fc4649 ML |
197 | /* |
198 | * If destination hw queue is changed, fake wake up on | |
199 | * previous queue for compensating the wake up miss, so | |
200 | * other allocations on previous queue won't be starved. | |
201 | */ | |
202 | if (bt != bt_prev) | |
203 | sbitmap_queue_wake_up(bt_prev); | |
204 | ||
4941115b | 205 | ws = bt_wait_ptr(bt, data->hctx); |
4bb659b1 JA |
206 | } while (1); |
207 | ||
5d2ee712 | 208 | sbitmap_finish_wait(bt, ws, &wait); |
320ae51f | 209 | |
4941115b | 210 | found_tag: |
bf0beec0 ML |
211 | /* |
212 | * Give up this allocation if the hctx is inactive. The caller will | |
213 | * retry on an active hctx. | |
214 | */ | |
215 | if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) { | |
216 | blk_mq_put_tag(tags, data->ctx, tag + tag_offset); | |
217 | return BLK_MQ_NO_TAG; | |
218 | } | |
4941115b | 219 | return tag + tag_offset; |
320ae51f JA |
220 | } |
221 | ||
cae740a0 JG |
222 | void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, |
223 | unsigned int tag) | |
320ae51f | 224 | { |
415b806d | 225 | if (!blk_mq_tag_is_reserved(tags, tag)) { |
4bb659b1 JA |
226 | const int real_tag = tag - tags->nr_reserved_tags; |
227 | ||
70114c39 | 228 | BUG_ON(real_tag >= tags->nr_tags); |
ae0f1a73 | 229 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
70114c39 JA |
230 | } else { |
231 | BUG_ON(tag >= tags->nr_reserved_tags); | |
ae0f1a73 | 232 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
70114c39 | 233 | } |
320ae51f JA |
234 | } |
235 | ||
f794f335 JA |
236 | void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags) |
237 | { | |
238 | sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags, | |
239 | tag_array, nr_tags); | |
240 | } | |
241 | ||
88459642 OS |
242 | struct bt_iter_data { |
243 | struct blk_mq_hw_ctx *hctx; | |
fea9f92f | 244 | struct request_queue *q; |
fc39f8d2 | 245 | busy_tag_iter_fn *fn; |
88459642 OS |
246 | void *data; |
247 | bool reserved; | |
248 | }; | |
249 | ||
2e315dc0 ML |
250 | static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, |
251 | unsigned int bitnr) | |
252 | { | |
bd63141d ML |
253 | struct request *rq; |
254 | unsigned long flags; | |
2e315dc0 | 255 | |
bd63141d ML |
256 | spin_lock_irqsave(&tags->lock, flags); |
257 | rq = tags->rqs[bitnr]; | |
0a467d0f | 258 | if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq)) |
bd63141d ML |
259 | rq = NULL; |
260 | spin_unlock_irqrestore(&tags->lock, flags); | |
2e315dc0 ML |
261 | return rq; |
262 | } | |
263 | ||
88459642 | 264 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
320ae51f | 265 | { |
88459642 OS |
266 | struct bt_iter_data *iter_data = data; |
267 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; | |
fea9f92f JG |
268 | struct request_queue *q = iter_data->q; |
269 | struct blk_mq_tag_set *set = q->tag_set; | |
88459642 | 270 | bool reserved = iter_data->reserved; |
fea9f92f | 271 | struct blk_mq_tags *tags; |
81481eb4 | 272 | struct request *rq; |
2e315dc0 | 273 | bool ret = true; |
4bb659b1 | 274 | |
fea9f92f JG |
275 | if (blk_mq_is_shared_tags(set->flags)) |
276 | tags = set->shared_tags; | |
277 | else | |
278 | tags = hctx->tags; | |
279 | ||
88459642 OS |
280 | if (!reserved) |
281 | bitnr += tags->nr_reserved_tags; | |
7f5562d5 JA |
282 | /* |
283 | * We can hit rq == NULL here, because the tagging functions | |
c7b1bf5c | 284 | * test and set the bit before assigning ->rqs[]. |
7f5562d5 | 285 | */ |
2e315dc0 ML |
286 | rq = blk_mq_find_and_get_req(tags, bitnr); |
287 | if (!rq) | |
288 | return true; | |
289 | ||
fea9f92f | 290 | if (rq->q == q && (!hctx || rq->mq_hctx == hctx)) |
8ab30a33 | 291 | ret = iter_data->fn(rq, iter_data->data, reserved); |
2e315dc0 ML |
292 | blk_mq_put_rq_ref(rq); |
293 | return ret; | |
88459642 | 294 | } |
4bb659b1 | 295 | |
c7b1bf5c BVA |
296 | /** |
297 | * bt_for_each - iterate over the requests associated with a hardware queue | |
298 | * @hctx: Hardware queue to examine. | |
fea9f92f | 299 | * @q: Request queue to examine. |
c7b1bf5c BVA |
300 | * @bt: sbitmap to examine. This is either the breserved_tags member |
301 | * or the bitmap_tags member of struct blk_mq_tags. | |
302 | * @fn: Pointer to the function that will be called for each request | |
303 | * associated with @hctx that has been assigned a driver tag. | |
304 | * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved) | |
ab11fe5a JA |
305 | * where rq is a pointer to a request. Return true to continue |
306 | * iterating tags, false to stop. | |
c7b1bf5c BVA |
307 | * @data: Will be passed as third argument to @fn. |
308 | * @reserved: Indicates whether @bt is the breserved_tags member or the | |
309 | * bitmap_tags member of struct blk_mq_tags. | |
310 | */ | |
fea9f92f JG |
311 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q, |
312 | struct sbitmap_queue *bt, busy_tag_iter_fn *fn, | |
313 | void *data, bool reserved) | |
88459642 OS |
314 | { |
315 | struct bt_iter_data iter_data = { | |
316 | .hctx = hctx, | |
317 | .fn = fn, | |
318 | .data = data, | |
319 | .reserved = reserved, | |
fea9f92f | 320 | .q = q, |
88459642 OS |
321 | }; |
322 | ||
323 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); | |
320ae51f JA |
324 | } |
325 | ||
88459642 OS |
326 | struct bt_tags_iter_data { |
327 | struct blk_mq_tags *tags; | |
328 | busy_tag_iter_fn *fn; | |
329 | void *data; | |
602380d2 | 330 | unsigned int flags; |
88459642 OS |
331 | }; |
332 | ||
602380d2 ML |
333 | #define BT_TAG_ITER_RESERVED (1 << 0) |
334 | #define BT_TAG_ITER_STARTED (1 << 1) | |
22f614bc | 335 | #define BT_TAG_ITER_STATIC_RQS (1 << 2) |
602380d2 | 336 | |
88459642 | 337 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
f26cdc85 | 338 | { |
88459642 OS |
339 | struct bt_tags_iter_data *iter_data = data; |
340 | struct blk_mq_tags *tags = iter_data->tags; | |
602380d2 | 341 | bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED; |
f26cdc85 | 342 | struct request *rq; |
2e315dc0 ML |
343 | bool ret = true; |
344 | bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS); | |
f26cdc85 | 345 | |
88459642 OS |
346 | if (!reserved) |
347 | bitnr += tags->nr_reserved_tags; | |
7f5562d5 JA |
348 | |
349 | /* | |
350 | * We can hit rq == NULL here, because the tagging functions | |
22f614bc | 351 | * test and set the bit before assigning ->rqs[]. |
7f5562d5 | 352 | */ |
2e315dc0 | 353 | if (iter_static_rqs) |
22f614bc ML |
354 | rq = tags->static_rqs[bitnr]; |
355 | else | |
2e315dc0 | 356 | rq = blk_mq_find_and_get_req(tags, bitnr); |
602380d2 ML |
357 | if (!rq) |
358 | return true; | |
2e315dc0 ML |
359 | |
360 | if (!(iter_data->flags & BT_TAG_ITER_STARTED) || | |
361 | blk_mq_request_started(rq)) | |
362 | ret = iter_data->fn(rq, iter_data->data, reserved); | |
363 | if (!iter_static_rqs) | |
364 | blk_mq_put_rq_ref(rq); | |
365 | return ret; | |
88459642 OS |
366 | } |
367 | ||
c7b1bf5c BVA |
368 | /** |
369 | * bt_tags_for_each - iterate over the requests in a tag map | |
370 | * @tags: Tag map to iterate over. | |
371 | * @bt: sbitmap to examine. This is either the breserved_tags member | |
372 | * or the bitmap_tags member of struct blk_mq_tags. | |
373 | * @fn: Pointer to the function that will be called for each started | |
374 | * request. @fn will be called as follows: @fn(rq, @data, | |
ab11fe5a JA |
375 | * @reserved) where rq is a pointer to a request. Return true |
376 | * to continue iterating tags, false to stop. | |
c7b1bf5c | 377 | * @data: Will be passed as second argument to @fn. |
602380d2 | 378 | * @flags: BT_TAG_ITER_* |
c7b1bf5c | 379 | */ |
88459642 | 380 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
602380d2 | 381 | busy_tag_iter_fn *fn, void *data, unsigned int flags) |
88459642 OS |
382 | { |
383 | struct bt_tags_iter_data iter_data = { | |
384 | .tags = tags, | |
385 | .fn = fn, | |
386 | .data = data, | |
602380d2 | 387 | .flags = flags, |
88459642 OS |
388 | }; |
389 | ||
390 | if (tags->rqs) | |
391 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); | |
f26cdc85 KB |
392 | } |
393 | ||
602380d2 ML |
394 | static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, |
395 | busy_tag_iter_fn *fn, void *priv, unsigned int flags) | |
396 | { | |
397 | WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); | |
398 | ||
399 | if (tags->nr_reserved_tags) | |
ae0f1a73 | 400 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, |
602380d2 | 401 | flags | BT_TAG_ITER_RESERVED); |
ae0f1a73 | 402 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags); |
602380d2 ML |
403 | } |
404 | ||
c7b1bf5c | 405 | /** |
602380d2 | 406 | * blk_mq_all_tag_iter - iterate over all requests in a tag map |
c7b1bf5c | 407 | * @tags: Tag map to iterate over. |
602380d2 | 408 | * @fn: Pointer to the function that will be called for each |
c7b1bf5c BVA |
409 | * request. @fn will be called as follows: @fn(rq, @priv, |
410 | * reserved) where rq is a pointer to a request. 'reserved' | |
ab11fe5a JA |
411 | * indicates whether or not @rq is a reserved request. Return |
412 | * true to continue iterating tags, false to stop. | |
c7b1bf5c | 413 | * @priv: Will be passed as second argument to @fn. |
22f614bc ML |
414 | * |
415 | * Caller has to pass the tag map from which requests are allocated. | |
c7b1bf5c | 416 | */ |
602380d2 ML |
417 | void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
418 | void *priv) | |
f26cdc85 | 419 | { |
a8a5e383 | 420 | __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); |
f26cdc85 | 421 | } |
f26cdc85 | 422 | |
c7b1bf5c BVA |
423 | /** |
424 | * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set | |
425 | * @tagset: Tag set to iterate over. | |
426 | * @fn: Pointer to the function that will be called for each started | |
427 | * request. @fn will be called as follows: @fn(rq, @priv, | |
428 | * reserved) where rq is a pointer to a request. 'reserved' | |
ab11fe5a JA |
429 | * indicates whether or not @rq is a reserved request. Return |
430 | * true to continue iterating tags, false to stop. | |
c7b1bf5c | 431 | * @priv: Will be passed as second argument to @fn. |
2e315dc0 ML |
432 | * |
433 | * We grab one request reference before calling @fn and release it after | |
434 | * @fn returns. | |
c7b1bf5c | 435 | */ |
e0489487 SG |
436 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
437 | busy_tag_iter_fn *fn, void *priv) | |
438 | { | |
0994c64e JG |
439 | unsigned int flags = tagset->flags; |
440 | int i, nr_tags; | |
e0489487 | 441 | |
0994c64e JG |
442 | nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues; |
443 | ||
444 | for (i = 0; i < nr_tags; i++) { | |
e0489487 | 445 | if (tagset->tags && tagset->tags[i]) |
602380d2 ML |
446 | __blk_mq_all_tag_iter(tagset->tags[i], fn, priv, |
447 | BT_TAG_ITER_STARTED); | |
e0489487 SG |
448 | } |
449 | } | |
450 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); | |
451 | ||
f9934a80 ML |
452 | static bool blk_mq_tagset_count_completed_rqs(struct request *rq, |
453 | void *data, bool reserved) | |
454 | { | |
455 | unsigned *count = data; | |
456 | ||
457 | if (blk_mq_request_completed(rq)) | |
458 | (*count)++; | |
459 | return true; | |
460 | } | |
461 | ||
462 | /** | |
9cf1adc6 BC |
463 | * blk_mq_tagset_wait_completed_request - Wait until all scheduled request |
464 | * completions have finished. | |
f9934a80 ML |
465 | * @tagset: Tag set to drain completed request |
466 | * | |
467 | * Note: This function has to be run after all IO queues are shutdown | |
468 | */ | |
469 | void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset) | |
470 | { | |
471 | while (true) { | |
472 | unsigned count = 0; | |
473 | ||
474 | blk_mq_tagset_busy_iter(tagset, | |
475 | blk_mq_tagset_count_completed_rqs, &count); | |
476 | if (!count) | |
477 | break; | |
478 | msleep(5); | |
479 | } | |
480 | } | |
481 | EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); | |
482 | ||
c7b1bf5c BVA |
483 | /** |
484 | * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag | |
485 | * @q: Request queue to examine. | |
486 | * @fn: Pointer to the function that will be called for each request | |
487 | * on @q. @fn will be called as follows: @fn(hctx, rq, @priv, | |
488 | * reserved) where rq is a pointer to a request and hctx points | |
489 | * to the hardware queue associated with the request. 'reserved' | |
490 | * indicates whether or not @rq is a reserved request. | |
491 | * @priv: Will be passed as third argument to @fn. | |
492 | * | |
493 | * Note: if @q->tag_set is shared with other request queues then @fn will be | |
494 | * called for all requests on all queues that share that tag set and not only | |
495 | * for requests associated with @q. | |
496 | */ | |
fc39f8d2 | 497 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, |
81481eb4 | 498 | void *priv) |
320ae51f | 499 | { |
f5bbbbe4 | 500 | /* |
c7b1bf5c BVA |
501 | * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx |
502 | * while the queue is frozen. So we can use q_usage_counter to avoid | |
76cffccd | 503 | * racing with it. |
f5bbbbe4 | 504 | */ |
530ca2c9 | 505 | if (!percpu_ref_tryget(&q->q_usage_counter)) |
f5bbbbe4 | 506 | return; |
0bf6cd5b | 507 | |
fea9f92f JG |
508 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
509 | struct blk_mq_tags *tags = q->tag_set->shared_tags; | |
510 | struct sbitmap_queue *bresv = &tags->breserved_tags; | |
511 | struct sbitmap_queue *btags = &tags->bitmap_tags; | |
0bf6cd5b CH |
512 | |
513 | if (tags->nr_reserved_tags) | |
fea9f92f JG |
514 | bt_for_each(NULL, q, bresv, fn, priv, true); |
515 | bt_for_each(NULL, q, btags, fn, priv, false); | |
516 | } else { | |
517 | struct blk_mq_hw_ctx *hctx; | |
518 | int i; | |
519 | ||
520 | queue_for_each_hw_ctx(q, hctx, i) { | |
521 | struct blk_mq_tags *tags = hctx->tags; | |
522 | struct sbitmap_queue *bresv = &tags->breserved_tags; | |
523 | struct sbitmap_queue *btags = &tags->bitmap_tags; | |
524 | ||
525 | /* | |
526 | * If no software queues are currently mapped to this | |
527 | * hardware queue, there's nothing to check | |
528 | */ | |
529 | if (!blk_mq_hw_queue_mapped(hctx)) | |
530 | continue; | |
531 | ||
532 | if (tags->nr_reserved_tags) | |
533 | bt_for_each(hctx, q, bresv, fn, priv, true); | |
534 | bt_for_each(hctx, q, btags, fn, priv, false); | |
535 | } | |
4bb659b1 | 536 | } |
530ca2c9 | 537 | blk_queue_exit(q); |
4bb659b1 JA |
538 | } |
539 | ||
f4a644db OS |
540 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
541 | bool round_robin, int node) | |
4bb659b1 | 542 | { |
f4a644db OS |
543 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, |
544 | node); | |
4bb659b1 JA |
545 | } |
546 | ||
56b68085 JG |
547 | int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, |
548 | struct sbitmap_queue *breserved_tags, | |
549 | unsigned int queue_depth, unsigned int reserved, | |
550 | int node, int alloc_policy) | |
4bb659b1 | 551 | { |
56b68085 | 552 | unsigned int depth = queue_depth - reserved; |
f4a644db | 553 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
4bb659b1 | 554 | |
56b68085 | 555 | if (bt_alloc(bitmap_tags, depth, round_robin, node)) |
4d063237 | 556 | return -ENOMEM; |
56b68085 | 557 | if (bt_alloc(breserved_tags, reserved, round_robin, node)) |
88459642 | 558 | goto free_bitmap_tags; |
4bb659b1 | 559 | |
56b68085 JG |
560 | return 0; |
561 | ||
562 | free_bitmap_tags: | |
563 | sbitmap_queue_free(bitmap_tags); | |
564 | return -ENOMEM; | |
565 | } | |
566 | ||
320ae51f | 567 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
24391c0d | 568 | unsigned int reserved_tags, |
e155b0c2 | 569 | int node, int alloc_policy) |
320ae51f | 570 | { |
320ae51f | 571 | struct blk_mq_tags *tags; |
320ae51f JA |
572 | |
573 | if (total_tags > BLK_MQ_TAG_MAX) { | |
574 | pr_err("blk-mq: tag depth too large\n"); | |
575 | return NULL; | |
576 | } | |
577 | ||
578 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
579 | if (!tags) | |
580 | return NULL; | |
581 | ||
320ae51f JA |
582 | tags->nr_tags = total_tags; |
583 | tags->nr_reserved_tags = reserved_tags; | |
bd63141d | 584 | spin_lock_init(&tags->lock); |
320ae51f | 585 | |
ae0f1a73 JG |
586 | if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags, |
587 | total_tags, reserved_tags, node, | |
588 | alloc_policy) < 0) { | |
4d063237 HR |
589 | kfree(tags); |
590 | return NULL; | |
591 | } | |
592 | return tags; | |
320ae51f JA |
593 | } |
594 | ||
e155b0c2 | 595 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
320ae51f | 596 | { |
ae0f1a73 JG |
597 | sbitmap_queue_free(&tags->bitmap_tags); |
598 | sbitmap_queue_free(&tags->breserved_tags); | |
320ae51f JA |
599 | kfree(tags); |
600 | } | |
601 | ||
70f36b60 JA |
602 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
603 | struct blk_mq_tags **tagsptr, unsigned int tdepth, | |
604 | bool can_grow) | |
e3a2b3f9 | 605 | { |
70f36b60 JA |
606 | struct blk_mq_tags *tags = *tagsptr; |
607 | ||
608 | if (tdepth <= tags->nr_reserved_tags) | |
e3a2b3f9 JA |
609 | return -EINVAL; |
610 | ||
611 | /* | |
70f36b60 JA |
612 | * If we are allowed to grow beyond the original size, allocate |
613 | * a new set of tags before freeing the old one. | |
e3a2b3f9 | 614 | */ |
70f36b60 JA |
615 | if (tdepth > tags->nr_tags) { |
616 | struct blk_mq_tag_set *set = hctx->queue->tag_set; | |
617 | struct blk_mq_tags *new; | |
70f36b60 JA |
618 | |
619 | if (!can_grow) | |
620 | return -EINVAL; | |
621 | ||
622 | /* | |
623 | * We need some sort of upper limit, set it high enough that | |
624 | * no valid use cases should require more. | |
625 | */ | |
d97e594c | 626 | if (tdepth > MAX_SCHED_RQ) |
70f36b60 JA |
627 | return -EINVAL; |
628 | ||
e155b0c2 JG |
629 | /* |
630 | * Only the sbitmap needs resizing since we allocated the max | |
631 | * initially. | |
632 | */ | |
079a2e3e | 633 | if (blk_mq_is_shared_tags(set->flags)) |
e155b0c2 JG |
634 | return 0; |
635 | ||
63064be1 | 636 | new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); |
70f36b60 JA |
637 | if (!new) |
638 | return -ENOMEM; | |
70f36b60 | 639 | |
645db34e | 640 | blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num); |
70f36b60 JA |
641 | *tagsptr = new; |
642 | } else { | |
643 | /* | |
644 | * Don't need (or can't) update reserved tags here, they | |
645 | * remain static and should never need resizing. | |
646 | */ | |
ae0f1a73 | 647 | sbitmap_queue_resize(&tags->bitmap_tags, |
75d6e175 | 648 | tdepth - tags->nr_reserved_tags); |
70f36b60 | 649 | } |
88459642 | 650 | |
e3a2b3f9 JA |
651 | return 0; |
652 | } | |
653 | ||
079a2e3e | 654 | void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size) |
32bc15af | 655 | { |
079a2e3e | 656 | struct blk_mq_tags *tags = set->shared_tags; |
e155b0c2 | 657 | |
ae0f1a73 | 658 | sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); |
32bc15af JG |
659 | } |
660 | ||
079a2e3e | 661 | void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) |
a7e7388d | 662 | { |
079a2e3e | 663 | sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, |
a7e7388d JG |
664 | q->nr_requests - q->tag_set->reserved_tags); |
665 | } | |
666 | ||
205fb5f5 BVA |
667 | /** |
668 | * blk_mq_unique_tag() - return a tag that is unique queue-wide | |
669 | * @rq: request for which to compute a unique tag | |
670 | * | |
671 | * The tag field in struct request is unique per hardware queue but not over | |
672 | * all hardware queues. Hence this function that returns a tag with the | |
673 | * hardware context index in the upper bits and the per hardware queue tag in | |
674 | * the lower bits. | |
675 | * | |
676 | * Note: When called for a request that is queued on a non-multiqueue request | |
677 | * queue, the hardware context index is set to zero. | |
678 | */ | |
679 | u32 blk_mq_unique_tag(struct request *rq) | |
680 | { | |
ea4f995e | 681 | return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | |
205fb5f5 BVA |
682 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); |
683 | } | |
684 | EXPORT_SYMBOL(blk_mq_unique_tag); |