]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
75bb4625 | 2 | /* |
88459642 OS |
3 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
4 | * fairer distribution of tags between multiple submitters when a shared tag map | |
5 | * is used. | |
75bb4625 JA |
6 | * |
7 | * Copyright (C) 2013-2014 Jens Axboe | |
8 | */ | |
320ae51f JA |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | |
320ae51f | 11 | |
f9934a80 | 12 | #include <linux/delay.h> |
320ae51f JA |
13 | #include "blk.h" |
14 | #include "blk-mq.h" | |
d97e594c | 15 | #include "blk-mq-sched.h" |
320ae51f | 16 | |
180dccb0 LQ |
17 | /* |
18 | * Recalculate wakeup batch when tag is shared by hctx. | |
19 | */ | |
20 | static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, | |
21 | unsigned int users) | |
22 | { | |
23 | if (!users) | |
24 | return; | |
25 | ||
26 | sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags, | |
27 | users); | |
28 | sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags, | |
29 | users); | |
30 | } | |
31 | ||
0d2602ca JA |
32 | /* |
33 | * If a previously inactive queue goes active, bump the active user count. | |
d263ed99 JW |
34 | * We need to do this before try to allocate driver tag, then even if fail |
35 | * to get tag when first time, the other shared-tag users could reserve | |
36 | * budget for it. | |
0d2602ca | 37 | */ |
ee78ec10 | 38 | void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
0d2602ca | 39 | { |
180dccb0 | 40 | unsigned int users; |
b313a8c8 | 41 | unsigned long flags; |
4f1731df | 42 | struct blk_mq_tags *tags = hctx->tags; |
180dccb0 | 43 | |
3e94d54e TL |
44 | /* |
45 | * calling test_bit() prior to test_and_set_bit() is intentional, | |
46 | * it avoids dirtying the cacheline if the queue is already active. | |
47 | */ | |
079a2e3e | 48 | if (blk_mq_is_shared_tags(hctx->flags)) { |
f1b49fdc | 49 | struct request_queue *q = hctx->queue; |
f1b49fdc | 50 | |
3e94d54e TL |
51 | if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || |
52 | test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) | |
ee78ec10 | 53 | return; |
f1b49fdc | 54 | } else { |
3e94d54e TL |
55 | if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || |
56 | test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
ee78ec10 | 57 | return; |
f1b49fdc | 58 | } |
0d2602ca | 59 | |
b313a8c8 | 60 | spin_lock_irqsave(&tags->lock, flags); |
4f1731df YK |
61 | users = tags->active_queues + 1; |
62 | WRITE_ONCE(tags->active_queues, users); | |
63 | blk_mq_update_wake_batch(tags, users); | |
b313a8c8 | 64 | spin_unlock_irqrestore(&tags->lock, flags); |
0d2602ca JA |
65 | } |
66 | ||
67 | /* | |
aed3ea94 | 68 | * Wakeup all potentially sleeping on tags |
0d2602ca | 69 | */ |
aed3ea94 | 70 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca | 71 | { |
ae0f1a73 | 72 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
88459642 | 73 | if (include_reserve) |
ae0f1a73 | 74 | sbitmap_queue_wake_all(&tags->breserved_tags); |
0d2602ca JA |
75 | } |
76 | ||
e3a2b3f9 JA |
77 | /* |
78 | * If a previously busy queue goes inactive, potential waiters could now | |
79 | * be allowed to queue. Wake them up and check. | |
80 | */ | |
81 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
82 | { | |
83 | struct blk_mq_tags *tags = hctx->tags; | |
180dccb0 | 84 | unsigned int users; |
e3a2b3f9 | 85 | |
079a2e3e | 86 | if (blk_mq_is_shared_tags(hctx->flags)) { |
e155b0c2 JG |
87 | struct request_queue *q = hctx->queue; |
88 | ||
f1b49fdc JG |
89 | if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, |
90 | &q->queue_flags)) | |
91 | return; | |
f1b49fdc JG |
92 | } else { |
93 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
94 | return; | |
f1b49fdc | 95 | } |
e3a2b3f9 | 96 | |
4f1731df YK |
97 | spin_lock_irq(&tags->lock); |
98 | users = tags->active_queues - 1; | |
99 | WRITE_ONCE(tags->active_queues, users); | |
180dccb0 | 100 | blk_mq_update_wake_batch(tags, users); |
4f1731df | 101 | spin_unlock_irq(&tags->lock); |
079a2e3e | 102 | |
aed3ea94 | 103 | blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f9 JA |
104 | } |
105 | ||
200e86b3 JA |
106 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, |
107 | struct sbitmap_queue *bt) | |
4bb659b1 | 108 | { |
28500850 ML |
109 | if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && |
110 | !hctx_may_queue(data->hctx, bt)) | |
76647368 | 111 | return BLK_MQ_NO_TAG; |
42fdc5e4 | 112 | |
229a9287 | 113 | if (data->shallow_depth) |
3f607293 | 114 | return sbitmap_queue_get_shallow(bt, data->shallow_depth); |
229a9287 OS |
115 | else |
116 | return __sbitmap_queue_get(bt); | |
4bb659b1 JA |
117 | } |
118 | ||
349302da JA |
119 | unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, |
120 | unsigned int *offset) | |
121 | { | |
122 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); | |
123 | struct sbitmap_queue *bt = &tags->bitmap_tags; | |
124 | unsigned long ret; | |
125 | ||
126 | if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || | |
127 | data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
128 | return 0; | |
129 | ret = __sbitmap_queue_get_batch(bt, nr_tags, offset); | |
130 | *offset += tags->nr_reserved_tags; | |
131 | return ret; | |
132 | } | |
133 | ||
4941115b | 134 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51f | 135 | { |
4941115b JA |
136 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
137 | struct sbitmap_queue *bt; | |
88459642 | 138 | struct sbq_wait_state *ws; |
5d2ee712 | 139 | DEFINE_SBQ_WAIT(wait); |
4941115b | 140 | unsigned int tag_offset; |
320ae51f JA |
141 | int tag; |
142 | ||
4941115b JA |
143 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
144 | if (unlikely(!tags->nr_reserved_tags)) { | |
145 | WARN_ON_ONCE(1); | |
419c3d5e | 146 | return BLK_MQ_NO_TAG; |
4941115b | 147 | } |
ae0f1a73 | 148 | bt = &tags->breserved_tags; |
4941115b JA |
149 | tag_offset = 0; |
150 | } else { | |
ae0f1a73 | 151 | bt = &tags->bitmap_tags; |
4941115b JA |
152 | tag_offset = tags->nr_reserved_tags; |
153 | } | |
154 | ||
200e86b3 | 155 | tag = __blk_mq_get_tag(data, bt); |
76647368 | 156 | if (tag != BLK_MQ_NO_TAG) |
4941115b | 157 | goto found_tag; |
4bb659b1 | 158 | |
6f3b0e8b | 159 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
419c3d5e | 160 | return BLK_MQ_NO_TAG; |
4bb659b1 | 161 | |
4941115b | 162 | ws = bt_wait_ptr(bt, data->hctx); |
4bb659b1 | 163 | do { |
e6fc4649 ML |
164 | struct sbitmap_queue *bt_prev; |
165 | ||
b3223207 BVA |
166 | /* |
167 | * We're out of tags on this hardware queue, kick any | |
168 | * pending IO submits before going to sleep waiting for | |
8cecb07d | 169 | * some to complete. |
b3223207 | 170 | */ |
8cecb07d | 171 | blk_mq_run_hw_queue(data->hctx, false); |
b3223207 | 172 | |
080ff351 JA |
173 | /* |
174 | * Retry tag allocation after running the hardware queue, | |
175 | * as running the queue may also have found completions. | |
176 | */ | |
200e86b3 | 177 | tag = __blk_mq_get_tag(data, bt); |
76647368 | 178 | if (tag != BLK_MQ_NO_TAG) |
080ff351 JA |
179 | break; |
180 | ||
5d2ee712 | 181 | sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE); |
4e5dff41 JA |
182 | |
183 | tag = __blk_mq_get_tag(data, bt); | |
76647368 | 184 | if (tag != BLK_MQ_NO_TAG) |
4e5dff41 JA |
185 | break; |
186 | ||
e6fc4649 | 187 | bt_prev = bt; |
4bb659b1 | 188 | io_schedule(); |
cb96a42c | 189 | |
5d2ee712 JA |
190 | sbitmap_finish_wait(bt, ws, &wait); |
191 | ||
cb96a42c | 192 | data->ctx = blk_mq_get_ctx(data->q); |
f9afca4d | 193 | data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, |
8ccdf4a3 | 194 | data->ctx); |
4941115b JA |
195 | tags = blk_mq_tags_from_data(data); |
196 | if (data->flags & BLK_MQ_REQ_RESERVED) | |
ae0f1a73 | 197 | bt = &tags->breserved_tags; |
4941115b | 198 | else |
ae0f1a73 | 199 | bt = &tags->bitmap_tags; |
4941115b | 200 | |
e6fc4649 ML |
201 | /* |
202 | * If destination hw queue is changed, fake wake up on | |
203 | * previous queue for compensating the wake up miss, so | |
204 | * other allocations on previous queue won't be starved. | |
205 | */ | |
206 | if (bt != bt_prev) | |
4acb8341 | 207 | sbitmap_queue_wake_up(bt_prev, 1); |
e6fc4649 | 208 | |
4941115b | 209 | ws = bt_wait_ptr(bt, data->hctx); |
4bb659b1 JA |
210 | } while (1); |
211 | ||
5d2ee712 | 212 | sbitmap_finish_wait(bt, ws, &wait); |
320ae51f | 213 | |
4941115b | 214 | found_tag: |
bf0beec0 ML |
215 | /* |
216 | * Give up this allocation if the hctx is inactive. The caller will | |
217 | * retry on an active hctx. | |
218 | */ | |
219 | if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) { | |
220 | blk_mq_put_tag(tags, data->ctx, tag + tag_offset); | |
221 | return BLK_MQ_NO_TAG; | |
222 | } | |
4941115b | 223 | return tag + tag_offset; |
320ae51f JA |
224 | } |
225 | ||
cae740a0 JG |
226 | void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, |
227 | unsigned int tag) | |
320ae51f | 228 | { |
415b806d | 229 | if (!blk_mq_tag_is_reserved(tags, tag)) { |
4bb659b1 JA |
230 | const int real_tag = tag - tags->nr_reserved_tags; |
231 | ||
70114c39 | 232 | BUG_ON(real_tag >= tags->nr_tags); |
ae0f1a73 | 233 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
70114c39 | 234 | } else { |
ae0f1a73 | 235 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
70114c39 | 236 | } |
320ae51f JA |
237 | } |
238 | ||
f794f335 JA |
239 | void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags) |
240 | { | |
241 | sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags, | |
242 | tag_array, nr_tags); | |
243 | } | |
244 | ||
88459642 OS |
245 | struct bt_iter_data { |
246 | struct blk_mq_hw_ctx *hctx; | |
fea9f92f | 247 | struct request_queue *q; |
fc39f8d2 | 248 | busy_tag_iter_fn *fn; |
88459642 OS |
249 | void *data; |
250 | bool reserved; | |
251 | }; | |
252 | ||
2e315dc0 ML |
253 | static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, |
254 | unsigned int bitnr) | |
255 | { | |
bd63141d ML |
256 | struct request *rq; |
257 | unsigned long flags; | |
2e315dc0 | 258 | |
bd63141d ML |
259 | spin_lock_irqsave(&tags->lock, flags); |
260 | rq = tags->rqs[bitnr]; | |
0a467d0f | 261 | if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq)) |
bd63141d ML |
262 | rq = NULL; |
263 | spin_unlock_irqrestore(&tags->lock, flags); | |
2e315dc0 ML |
264 | return rq; |
265 | } | |
266 | ||
88459642 | 267 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
320ae51f | 268 | { |
88459642 OS |
269 | struct bt_iter_data *iter_data = data; |
270 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; | |
fea9f92f JG |
271 | struct request_queue *q = iter_data->q; |
272 | struct blk_mq_tag_set *set = q->tag_set; | |
fea9f92f | 273 | struct blk_mq_tags *tags; |
81481eb4 | 274 | struct request *rq; |
2e315dc0 | 275 | bool ret = true; |
4bb659b1 | 276 | |
fea9f92f JG |
277 | if (blk_mq_is_shared_tags(set->flags)) |
278 | tags = set->shared_tags; | |
279 | else | |
280 | tags = hctx->tags; | |
281 | ||
4cf6e6c0 | 282 | if (!iter_data->reserved) |
88459642 | 283 | bitnr += tags->nr_reserved_tags; |
7f5562d5 JA |
284 | /* |
285 | * We can hit rq == NULL here, because the tagging functions | |
c7b1bf5c | 286 | * test and set the bit before assigning ->rqs[]. |
7f5562d5 | 287 | */ |
2e315dc0 ML |
288 | rq = blk_mq_find_and_get_req(tags, bitnr); |
289 | if (!rq) | |
290 | return true; | |
291 | ||
fea9f92f | 292 | if (rq->q == q && (!hctx || rq->mq_hctx == hctx)) |
2dd6532e | 293 | ret = iter_data->fn(rq, iter_data->data); |
2e315dc0 ML |
294 | blk_mq_put_rq_ref(rq); |
295 | return ret; | |
88459642 | 296 | } |
4bb659b1 | 297 | |
c7b1bf5c BVA |
298 | /** |
299 | * bt_for_each - iterate over the requests associated with a hardware queue | |
300 | * @hctx: Hardware queue to examine. | |
fea9f92f | 301 | * @q: Request queue to examine. |
c7b1bf5c BVA |
302 | * @bt: sbitmap to examine. This is either the breserved_tags member |
303 | * or the bitmap_tags member of struct blk_mq_tags. | |
304 | * @fn: Pointer to the function that will be called for each request | |
305 | * associated with @hctx that has been assigned a driver tag. | |
306 | * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved) | |
ab11fe5a JA |
307 | * where rq is a pointer to a request. Return true to continue |
308 | * iterating tags, false to stop. | |
c7b1bf5c BVA |
309 | * @data: Will be passed as third argument to @fn. |
310 | * @reserved: Indicates whether @bt is the breserved_tags member or the | |
311 | * bitmap_tags member of struct blk_mq_tags. | |
312 | */ | |
fea9f92f JG |
313 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q, |
314 | struct sbitmap_queue *bt, busy_tag_iter_fn *fn, | |
315 | void *data, bool reserved) | |
88459642 OS |
316 | { |
317 | struct bt_iter_data iter_data = { | |
318 | .hctx = hctx, | |
319 | .fn = fn, | |
320 | .data = data, | |
321 | .reserved = reserved, | |
fea9f92f | 322 | .q = q, |
88459642 OS |
323 | }; |
324 | ||
325 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); | |
320ae51f JA |
326 | } |
327 | ||
88459642 OS |
328 | struct bt_tags_iter_data { |
329 | struct blk_mq_tags *tags; | |
330 | busy_tag_iter_fn *fn; | |
331 | void *data; | |
602380d2 | 332 | unsigned int flags; |
88459642 OS |
333 | }; |
334 | ||
602380d2 ML |
335 | #define BT_TAG_ITER_RESERVED (1 << 0) |
336 | #define BT_TAG_ITER_STARTED (1 << 1) | |
22f614bc | 337 | #define BT_TAG_ITER_STATIC_RQS (1 << 2) |
602380d2 | 338 | |
88459642 | 339 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
f26cdc85 | 340 | { |
88459642 OS |
341 | struct bt_tags_iter_data *iter_data = data; |
342 | struct blk_mq_tags *tags = iter_data->tags; | |
f26cdc85 | 343 | struct request *rq; |
2e315dc0 ML |
344 | bool ret = true; |
345 | bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS); | |
f26cdc85 | 346 | |
4cf6e6c0 | 347 | if (!(iter_data->flags & BT_TAG_ITER_RESERVED)) |
88459642 | 348 | bitnr += tags->nr_reserved_tags; |
7f5562d5 JA |
349 | |
350 | /* | |
351 | * We can hit rq == NULL here, because the tagging functions | |
22f614bc | 352 | * test and set the bit before assigning ->rqs[]. |
7f5562d5 | 353 | */ |
2e315dc0 | 354 | if (iter_static_rqs) |
22f614bc ML |
355 | rq = tags->static_rqs[bitnr]; |
356 | else | |
2e315dc0 | 357 | rq = blk_mq_find_and_get_req(tags, bitnr); |
602380d2 ML |
358 | if (!rq) |
359 | return true; | |
2e315dc0 ML |
360 | |
361 | if (!(iter_data->flags & BT_TAG_ITER_STARTED) || | |
362 | blk_mq_request_started(rq)) | |
2dd6532e | 363 | ret = iter_data->fn(rq, iter_data->data); |
2e315dc0 ML |
364 | if (!iter_static_rqs) |
365 | blk_mq_put_rq_ref(rq); | |
366 | return ret; | |
88459642 OS |
367 | } |
368 | ||
c7b1bf5c BVA |
369 | /** |
370 | * bt_tags_for_each - iterate over the requests in a tag map | |
371 | * @tags: Tag map to iterate over. | |
372 | * @bt: sbitmap to examine. This is either the breserved_tags member | |
373 | * or the bitmap_tags member of struct blk_mq_tags. | |
374 | * @fn: Pointer to the function that will be called for each started | |
375 | * request. @fn will be called as follows: @fn(rq, @data, | |
ab11fe5a JA |
376 | * @reserved) where rq is a pointer to a request. Return true |
377 | * to continue iterating tags, false to stop. | |
c7b1bf5c | 378 | * @data: Will be passed as second argument to @fn. |
602380d2 | 379 | * @flags: BT_TAG_ITER_* |
c7b1bf5c | 380 | */ |
88459642 | 381 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
602380d2 | 382 | busy_tag_iter_fn *fn, void *data, unsigned int flags) |
88459642 OS |
383 | { |
384 | struct bt_tags_iter_data iter_data = { | |
385 | .tags = tags, | |
386 | .fn = fn, | |
387 | .data = data, | |
602380d2 | 388 | .flags = flags, |
88459642 OS |
389 | }; |
390 | ||
391 | if (tags->rqs) | |
392 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); | |
f26cdc85 KB |
393 | } |
394 | ||
602380d2 ML |
395 | static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, |
396 | busy_tag_iter_fn *fn, void *priv, unsigned int flags) | |
397 | { | |
398 | WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); | |
399 | ||
400 | if (tags->nr_reserved_tags) | |
ae0f1a73 | 401 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, |
602380d2 | 402 | flags | BT_TAG_ITER_RESERVED); |
ae0f1a73 | 403 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags); |
602380d2 ML |
404 | } |
405 | ||
c7b1bf5c | 406 | /** |
602380d2 | 407 | * blk_mq_all_tag_iter - iterate over all requests in a tag map |
c7b1bf5c | 408 | * @tags: Tag map to iterate over. |
602380d2 | 409 | * @fn: Pointer to the function that will be called for each |
c7b1bf5c BVA |
410 | * request. @fn will be called as follows: @fn(rq, @priv, |
411 | * reserved) where rq is a pointer to a request. 'reserved' | |
ab11fe5a JA |
412 | * indicates whether or not @rq is a reserved request. Return |
413 | * true to continue iterating tags, false to stop. | |
c7b1bf5c | 414 | * @priv: Will be passed as second argument to @fn. |
22f614bc ML |
415 | * |
416 | * Caller has to pass the tag map from which requests are allocated. | |
c7b1bf5c | 417 | */ |
602380d2 ML |
418 | void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
419 | void *priv) | |
f26cdc85 | 420 | { |
a8a5e383 | 421 | __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); |
f26cdc85 | 422 | } |
f26cdc85 | 423 | |
c7b1bf5c BVA |
424 | /** |
425 | * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set | |
426 | * @tagset: Tag set to iterate over. | |
427 | * @fn: Pointer to the function that will be called for each started | |
428 | * request. @fn will be called as follows: @fn(rq, @priv, | |
429 | * reserved) where rq is a pointer to a request. 'reserved' | |
ab11fe5a JA |
430 | * indicates whether or not @rq is a reserved request. Return |
431 | * true to continue iterating tags, false to stop. | |
c7b1bf5c | 432 | * @priv: Will be passed as second argument to @fn. |
2e315dc0 ML |
433 | * |
434 | * We grab one request reference before calling @fn and release it after | |
435 | * @fn returns. | |
c7b1bf5c | 436 | */ |
e0489487 SG |
437 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
438 | busy_tag_iter_fn *fn, void *priv) | |
439 | { | |
0994c64e JG |
440 | unsigned int flags = tagset->flags; |
441 | int i, nr_tags; | |
e0489487 | 442 | |
0994c64e JG |
443 | nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues; |
444 | ||
445 | for (i = 0; i < nr_tags; i++) { | |
e0489487 | 446 | if (tagset->tags && tagset->tags[i]) |
602380d2 ML |
447 | __blk_mq_all_tag_iter(tagset->tags[i], fn, priv, |
448 | BT_TAG_ITER_STARTED); | |
e0489487 SG |
449 | } |
450 | } | |
451 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); | |
452 | ||
2dd6532e | 453 | static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data) |
f9934a80 ML |
454 | { |
455 | unsigned *count = data; | |
456 | ||
457 | if (blk_mq_request_completed(rq)) | |
458 | (*count)++; | |
459 | return true; | |
460 | } | |
461 | ||
462 | /** | |
9cf1adc6 BC |
463 | * blk_mq_tagset_wait_completed_request - Wait until all scheduled request |
464 | * completions have finished. | |
f9934a80 ML |
465 | * @tagset: Tag set to drain completed request |
466 | * | |
467 | * Note: This function has to be run after all IO queues are shutdown | |
468 | */ | |
469 | void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset) | |
470 | { | |
471 | while (true) { | |
472 | unsigned count = 0; | |
473 | ||
474 | blk_mq_tagset_busy_iter(tagset, | |
475 | blk_mq_tagset_count_completed_rqs, &count); | |
476 | if (!count) | |
477 | break; | |
478 | msleep(5); | |
479 | } | |
480 | } | |
481 | EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); | |
482 | ||
c7b1bf5c BVA |
483 | /** |
484 | * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag | |
485 | * @q: Request queue to examine. | |
486 | * @fn: Pointer to the function that will be called for each request | |
487 | * on @q. @fn will be called as follows: @fn(hctx, rq, @priv, | |
488 | * reserved) where rq is a pointer to a request and hctx points | |
489 | * to the hardware queue associated with the request. 'reserved' | |
490 | * indicates whether or not @rq is a reserved request. | |
491 | * @priv: Will be passed as third argument to @fn. | |
492 | * | |
493 | * Note: if @q->tag_set is shared with other request queues then @fn will be | |
494 | * called for all requests on all queues that share that tag set and not only | |
495 | * for requests associated with @q. | |
496 | */ | |
fc39f8d2 | 497 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, |
81481eb4 | 498 | void *priv) |
320ae51f | 499 | { |
f5bbbbe4 | 500 | /* |
4e5cc99e | 501 | * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table |
c7b1bf5c | 502 | * while the queue is frozen. So we can use q_usage_counter to avoid |
76cffccd | 503 | * racing with it. |
f5bbbbe4 | 504 | */ |
530ca2c9 | 505 | if (!percpu_ref_tryget(&q->q_usage_counter)) |
f5bbbbe4 | 506 | return; |
0bf6cd5b | 507 | |
fea9f92f JG |
508 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
509 | struct blk_mq_tags *tags = q->tag_set->shared_tags; | |
510 | struct sbitmap_queue *bresv = &tags->breserved_tags; | |
511 | struct sbitmap_queue *btags = &tags->bitmap_tags; | |
0bf6cd5b CH |
512 | |
513 | if (tags->nr_reserved_tags) | |
fea9f92f JG |
514 | bt_for_each(NULL, q, bresv, fn, priv, true); |
515 | bt_for_each(NULL, q, btags, fn, priv, false); | |
516 | } else { | |
517 | struct blk_mq_hw_ctx *hctx; | |
4f481208 | 518 | unsigned long i; |
fea9f92f JG |
519 | |
520 | queue_for_each_hw_ctx(q, hctx, i) { | |
521 | struct blk_mq_tags *tags = hctx->tags; | |
522 | struct sbitmap_queue *bresv = &tags->breserved_tags; | |
523 | struct sbitmap_queue *btags = &tags->bitmap_tags; | |
524 | ||
525 | /* | |
526 | * If no software queues are currently mapped to this | |
527 | * hardware queue, there's nothing to check | |
528 | */ | |
529 | if (!blk_mq_hw_queue_mapped(hctx)) | |
530 | continue; | |
531 | ||
532 | if (tags->nr_reserved_tags) | |
533 | bt_for_each(hctx, q, bresv, fn, priv, true); | |
534 | bt_for_each(hctx, q, btags, fn, priv, false); | |
535 | } | |
4bb659b1 | 536 | } |
530ca2c9 | 537 | blk_queue_exit(q); |
4bb659b1 JA |
538 | } |
539 | ||
f4a644db OS |
540 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
541 | bool round_robin, int node) | |
4bb659b1 | 542 | { |
f4a644db OS |
543 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, |
544 | node); | |
4bb659b1 JA |
545 | } |
546 | ||
320ae51f | 547 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
ce32496e | 548 | unsigned int reserved_tags, unsigned int flags, int node) |
320ae51f | 549 | { |
68ed4512 | 550 | unsigned int depth = total_tags - reserved_tags; |
ce32496e | 551 | bool round_robin = flags & BLK_MQ_F_TAG_RR; |
320ae51f | 552 | struct blk_mq_tags *tags; |
320ae51f JA |
553 | |
554 | if (total_tags > BLK_MQ_TAG_MAX) { | |
555 | pr_err("blk-mq: tag depth too large\n"); | |
556 | return NULL; | |
557 | } | |
558 | ||
559 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
560 | if (!tags) | |
561 | return NULL; | |
562 | ||
320ae51f JA |
563 | tags->nr_tags = total_tags; |
564 | tags->nr_reserved_tags = reserved_tags; | |
bd63141d | 565 | spin_lock_init(&tags->lock); |
68ed4512 CH |
566 | if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) |
567 | goto out_free_tags; | |
568 | if (bt_alloc(&tags->breserved_tags, reserved_tags, round_robin, node)) | |
569 | goto out_free_bitmap_tags; | |
320ae51f | 570 | |
4d063237 | 571 | return tags; |
68ed4512 CH |
572 | |
573 | out_free_bitmap_tags: | |
574 | sbitmap_queue_free(&tags->bitmap_tags); | |
575 | out_free_tags: | |
576 | kfree(tags); | |
577 | return NULL; | |
320ae51f JA |
578 | } |
579 | ||
e155b0c2 | 580 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
320ae51f | 581 | { |
ae0f1a73 JG |
582 | sbitmap_queue_free(&tags->bitmap_tags); |
583 | sbitmap_queue_free(&tags->breserved_tags); | |
320ae51f JA |
584 | kfree(tags); |
585 | } | |
586 | ||
70f36b60 JA |
587 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
588 | struct blk_mq_tags **tagsptr, unsigned int tdepth, | |
589 | bool can_grow) | |
e3a2b3f9 | 590 | { |
70f36b60 JA |
591 | struct blk_mq_tags *tags = *tagsptr; |
592 | ||
593 | if (tdepth <= tags->nr_reserved_tags) | |
e3a2b3f9 JA |
594 | return -EINVAL; |
595 | ||
596 | /* | |
70f36b60 JA |
597 | * If we are allowed to grow beyond the original size, allocate |
598 | * a new set of tags before freeing the old one. | |
e3a2b3f9 | 599 | */ |
70f36b60 JA |
600 | if (tdepth > tags->nr_tags) { |
601 | struct blk_mq_tag_set *set = hctx->queue->tag_set; | |
602 | struct blk_mq_tags *new; | |
70f36b60 JA |
603 | |
604 | if (!can_grow) | |
605 | return -EINVAL; | |
606 | ||
607 | /* | |
608 | * We need some sort of upper limit, set it high enough that | |
609 | * no valid use cases should require more. | |
610 | */ | |
d97e594c | 611 | if (tdepth > MAX_SCHED_RQ) |
70f36b60 JA |
612 | return -EINVAL; |
613 | ||
e155b0c2 JG |
614 | /* |
615 | * Only the sbitmap needs resizing since we allocated the max | |
616 | * initially. | |
617 | */ | |
079a2e3e | 618 | if (blk_mq_is_shared_tags(set->flags)) |
e155b0c2 JG |
619 | return 0; |
620 | ||
63064be1 | 621 | new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); |
70f36b60 JA |
622 | if (!new) |
623 | return -ENOMEM; | |
70f36b60 | 624 | |
645db34e | 625 | blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num); |
70f36b60 JA |
626 | *tagsptr = new; |
627 | } else { | |
628 | /* | |
629 | * Don't need (or can't) update reserved tags here, they | |
630 | * remain static and should never need resizing. | |
631 | */ | |
ae0f1a73 | 632 | sbitmap_queue_resize(&tags->bitmap_tags, |
75d6e175 | 633 | tdepth - tags->nr_reserved_tags); |
70f36b60 | 634 | } |
88459642 | 635 | |
e3a2b3f9 JA |
636 | return 0; |
637 | } | |
638 | ||
079a2e3e | 639 | void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size) |
32bc15af | 640 | { |
079a2e3e | 641 | struct blk_mq_tags *tags = set->shared_tags; |
e155b0c2 | 642 | |
ae0f1a73 | 643 | sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); |
32bc15af JG |
644 | } |
645 | ||
079a2e3e | 646 | void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) |
a7e7388d | 647 | { |
079a2e3e | 648 | sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, |
a7e7388d JG |
649 | q->nr_requests - q->tag_set->reserved_tags); |
650 | } | |
651 | ||
205fb5f5 BVA |
652 | /** |
653 | * blk_mq_unique_tag() - return a tag that is unique queue-wide | |
654 | * @rq: request for which to compute a unique tag | |
655 | * | |
656 | * The tag field in struct request is unique per hardware queue but not over | |
657 | * all hardware queues. Hence this function that returns a tag with the | |
658 | * hardware context index in the upper bits and the per hardware queue tag in | |
659 | * the lower bits. | |
660 | * | |
661 | * Note: When called for a request that is queued on a non-multiqueue request | |
662 | * queue, the hardware context index is set to zero. | |
663 | */ | |
664 | u32 blk_mq_unique_tag(struct request *rq) | |
665 | { | |
ea4f995e | 666 | return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | |
205fb5f5 BVA |
667 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); |
668 | } | |
669 | EXPORT_SYMBOL(blk_mq_unique_tag); |