]>
Commit | Line | Data |
---|---|---|
75bb4625 | 1 | /* |
88459642 OS |
2 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
3 | * fairer distribution of tags between multiple submitters when a shared tag map | |
4 | * is used. | |
75bb4625 JA |
5 | * |
6 | * Copyright (C) 2013-2014 Jens Axboe | |
7 | */ | |
320ae51f JA |
8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | |
320ae51f JA |
10 | |
11 | #include <linux/blk-mq.h> | |
12 | #include "blk.h" | |
13 | #include "blk-mq.h" | |
14 | #include "blk-mq-tag.h" | |
15 | ||
320ae51f JA |
16 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) |
17 | { | |
4bb659b1 JA |
18 | if (!tags) |
19 | return true; | |
20 | ||
88459642 | 21 | return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); |
0d2602ca JA |
22 | } |
23 | ||
24 | /* | |
25 | * If a previously inactive queue goes active, bump the active user count. | |
26 | */ | |
27 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
28 | { | |
29 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && | |
30 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
31 | atomic_inc(&hctx->tags->active_queues); | |
32 | ||
33 | return true; | |
34 | } | |
35 | ||
36 | /* | |
aed3ea94 | 37 | * Wakeup all potentially sleeping on tags |
0d2602ca | 38 | */ |
aed3ea94 | 39 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca | 40 | { |
88459642 OS |
41 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
42 | if (include_reserve) | |
43 | sbitmap_queue_wake_all(&tags->breserved_tags); | |
0d2602ca JA |
44 | } |
45 | ||
e3a2b3f9 JA |
46 | /* |
47 | * If a previously busy queue goes inactive, potential waiters could now | |
48 | * be allowed to queue. Wake them up and check. | |
49 | */ | |
50 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
51 | { | |
52 | struct blk_mq_tags *tags = hctx->tags; | |
53 | ||
54 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
55 | return; | |
56 | ||
57 | atomic_dec(&tags->active_queues); | |
58 | ||
aed3ea94 | 59 | blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f9 JA |
60 | } |
61 | ||
0d2602ca JA |
62 | /* |
63 | * For shared tag users, we track the number of currently active users | |
64 | * and attempt to provide a fair share of the tag depth for each of them. | |
65 | */ | |
66 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, | |
88459642 | 67 | struct sbitmap_queue *bt) |
0d2602ca JA |
68 | { |
69 | unsigned int depth, users; | |
70 | ||
71 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) | |
72 | return true; | |
73 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
74 | return true; | |
75 | ||
76 | /* | |
77 | * Don't try dividing an ant | |
78 | */ | |
88459642 | 79 | if (bt->sb.depth == 1) |
0d2602ca JA |
80 | return true; |
81 | ||
82 | users = atomic_read(&hctx->tags->active_queues); | |
83 | if (!users) | |
84 | return true; | |
85 | ||
86 | /* | |
87 | * Allow at least some tags | |
88 | */ | |
88459642 | 89 | depth = max((bt->sb.depth + users - 1) / users, 4U); |
0d2602ca JA |
90 | return atomic_read(&hctx->nr_active) < depth; |
91 | } | |
92 | ||
200e86b3 JA |
93 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, |
94 | struct sbitmap_queue *bt) | |
4bb659b1 | 95 | { |
200e86b3 JA |
96 | if (!(data->flags & BLK_MQ_REQ_INTERNAL) && |
97 | !hctx_may_queue(data->hctx, bt)) | |
0d2602ca | 98 | return -1; |
229a9287 OS |
99 | if (data->shallow_depth) |
100 | return __sbitmap_queue_get_shallow(bt, data->shallow_depth); | |
101 | else | |
102 | return __sbitmap_queue_get(bt); | |
4bb659b1 JA |
103 | } |
104 | ||
4941115b | 105 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51f | 106 | { |
4941115b JA |
107 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
108 | struct sbitmap_queue *bt; | |
88459642 | 109 | struct sbq_wait_state *ws; |
4bb659b1 | 110 | DEFINE_WAIT(wait); |
4941115b | 111 | unsigned int tag_offset; |
bd6737f1 | 112 | bool drop_ctx; |
320ae51f JA |
113 | int tag; |
114 | ||
4941115b JA |
115 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
116 | if (unlikely(!tags->nr_reserved_tags)) { | |
117 | WARN_ON_ONCE(1); | |
118 | return BLK_MQ_TAG_FAIL; | |
119 | } | |
120 | bt = &tags->breserved_tags; | |
121 | tag_offset = 0; | |
122 | } else { | |
123 | bt = &tags->bitmap_tags; | |
124 | tag_offset = tags->nr_reserved_tags; | |
125 | } | |
126 | ||
200e86b3 | 127 | tag = __blk_mq_get_tag(data, bt); |
4bb659b1 | 128 | if (tag != -1) |
4941115b | 129 | goto found_tag; |
4bb659b1 | 130 | |
6f3b0e8b | 131 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
4941115b | 132 | return BLK_MQ_TAG_FAIL; |
4bb659b1 | 133 | |
4941115b | 134 | ws = bt_wait_ptr(bt, data->hctx); |
bd6737f1 | 135 | drop_ctx = data->ctx == NULL; |
4bb659b1 | 136 | do { |
e6fc4649 ML |
137 | struct sbitmap_queue *bt_prev; |
138 | ||
b3223207 BVA |
139 | /* |
140 | * We're out of tags on this hardware queue, kick any | |
141 | * pending IO submits before going to sleep waiting for | |
8cecb07d | 142 | * some to complete. |
b3223207 | 143 | */ |
8cecb07d | 144 | blk_mq_run_hw_queue(data->hctx, false); |
b3223207 | 145 | |
080ff351 JA |
146 | /* |
147 | * Retry tag allocation after running the hardware queue, | |
148 | * as running the queue may also have found completions. | |
149 | */ | |
200e86b3 | 150 | tag = __blk_mq_get_tag(data, bt); |
080ff351 JA |
151 | if (tag != -1) |
152 | break; | |
153 | ||
4e5dff41 JA |
154 | prepare_to_wait_exclusive(&ws->wait, &wait, |
155 | TASK_UNINTERRUPTIBLE); | |
156 | ||
157 | tag = __blk_mq_get_tag(data, bt); | |
158 | if (tag != -1) | |
159 | break; | |
160 | ||
bd6737f1 JA |
161 | if (data->ctx) |
162 | blk_mq_put_ctx(data->ctx); | |
cb96a42c | 163 | |
e6fc4649 | 164 | bt_prev = bt; |
4bb659b1 | 165 | io_schedule(); |
cb96a42c ML |
166 | |
167 | data->ctx = blk_mq_get_ctx(data->q); | |
7d7e0f90 | 168 | data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); |
4941115b JA |
169 | tags = blk_mq_tags_from_data(data); |
170 | if (data->flags & BLK_MQ_REQ_RESERVED) | |
171 | bt = &tags->breserved_tags; | |
172 | else | |
173 | bt = &tags->bitmap_tags; | |
174 | ||
88459642 | 175 | finish_wait(&ws->wait, &wait); |
e6fc4649 ML |
176 | |
177 | /* | |
178 | * If destination hw queue is changed, fake wake up on | |
179 | * previous queue for compensating the wake up miss, so | |
180 | * other allocations on previous queue won't be starved. | |
181 | */ | |
182 | if (bt != bt_prev) | |
183 | sbitmap_queue_wake_up(bt_prev); | |
184 | ||
4941115b | 185 | ws = bt_wait_ptr(bt, data->hctx); |
4bb659b1 JA |
186 | } while (1); |
187 | ||
bd6737f1 JA |
188 | if (drop_ctx && data->ctx) |
189 | blk_mq_put_ctx(data->ctx); | |
190 | ||
88459642 | 191 | finish_wait(&ws->wait, &wait); |
320ae51f | 192 | |
4941115b JA |
193 | found_tag: |
194 | return tag + tag_offset; | |
320ae51f JA |
195 | } |
196 | ||
4941115b JA |
197 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, |
198 | struct blk_mq_ctx *ctx, unsigned int tag) | |
320ae51f | 199 | { |
415b806d | 200 | if (!blk_mq_tag_is_reserved(tags, tag)) { |
4bb659b1 JA |
201 | const int real_tag = tag - tags->nr_reserved_tags; |
202 | ||
70114c39 | 203 | BUG_ON(real_tag >= tags->nr_tags); |
f4a644db | 204 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
70114c39 JA |
205 | } else { |
206 | BUG_ON(tag >= tags->nr_reserved_tags); | |
f4a644db | 207 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
70114c39 | 208 | } |
320ae51f JA |
209 | } |
210 | ||
88459642 OS |
211 | struct bt_iter_data { |
212 | struct blk_mq_hw_ctx *hctx; | |
213 | busy_iter_fn *fn; | |
214 | void *data; | |
215 | bool reserved; | |
216 | }; | |
217 | ||
218 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) | |
320ae51f | 219 | { |
88459642 OS |
220 | struct bt_iter_data *iter_data = data; |
221 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; | |
222 | struct blk_mq_tags *tags = hctx->tags; | |
223 | bool reserved = iter_data->reserved; | |
81481eb4 | 224 | struct request *rq; |
4bb659b1 | 225 | |
88459642 OS |
226 | if (!reserved) |
227 | bitnr += tags->nr_reserved_tags; | |
228 | rq = tags->rqs[bitnr]; | |
4bb659b1 | 229 | |
7f5562d5 JA |
230 | /* |
231 | * We can hit rq == NULL here, because the tagging functions | |
232 | * test and set the bit before assining ->rqs[]. | |
233 | */ | |
234 | if (rq && rq->q == hctx->queue) | |
88459642 OS |
235 | iter_data->fn(hctx, rq, iter_data->data, reserved); |
236 | return true; | |
237 | } | |
4bb659b1 | 238 | |
88459642 OS |
239 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, |
240 | busy_iter_fn *fn, void *data, bool reserved) | |
241 | { | |
242 | struct bt_iter_data iter_data = { | |
243 | .hctx = hctx, | |
244 | .fn = fn, | |
245 | .data = data, | |
246 | .reserved = reserved, | |
247 | }; | |
248 | ||
249 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); | |
320ae51f JA |
250 | } |
251 | ||
88459642 OS |
252 | struct bt_tags_iter_data { |
253 | struct blk_mq_tags *tags; | |
254 | busy_tag_iter_fn *fn; | |
255 | void *data; | |
256 | bool reserved; | |
257 | }; | |
258 | ||
259 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) | |
f26cdc85 | 260 | { |
88459642 OS |
261 | struct bt_tags_iter_data *iter_data = data; |
262 | struct blk_mq_tags *tags = iter_data->tags; | |
263 | bool reserved = iter_data->reserved; | |
f26cdc85 | 264 | struct request *rq; |
f26cdc85 | 265 | |
88459642 OS |
266 | if (!reserved) |
267 | bitnr += tags->nr_reserved_tags; | |
7f5562d5 JA |
268 | |
269 | /* | |
270 | * We can hit rq == NULL here, because the tagging functions | |
271 | * test and set the bit before assining ->rqs[]. | |
272 | */ | |
88459642 | 273 | rq = tags->rqs[bitnr]; |
d250bf4e | 274 | if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) |
7f5562d5 | 275 | iter_data->fn(rq, iter_data->data, reserved); |
f26cdc85 | 276 | |
88459642 OS |
277 | return true; |
278 | } | |
279 | ||
280 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, | |
281 | busy_tag_iter_fn *fn, void *data, bool reserved) | |
282 | { | |
283 | struct bt_tags_iter_data iter_data = { | |
284 | .tags = tags, | |
285 | .fn = fn, | |
286 | .data = data, | |
287 | .reserved = reserved, | |
288 | }; | |
289 | ||
290 | if (tags->rqs) | |
291 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); | |
f26cdc85 KB |
292 | } |
293 | ||
e8f1e163 SG |
294 | static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, |
295 | busy_tag_iter_fn *fn, void *priv) | |
f26cdc85 KB |
296 | { |
297 | if (tags->nr_reserved_tags) | |
88459642 OS |
298 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); |
299 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); | |
f26cdc85 | 300 | } |
f26cdc85 | 301 | |
e0489487 SG |
302 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
303 | busy_tag_iter_fn *fn, void *priv) | |
304 | { | |
305 | int i; | |
306 | ||
307 | for (i = 0; i < tagset->nr_hw_queues; i++) { | |
308 | if (tagset->tags && tagset->tags[i]) | |
309 | blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); | |
310 | } | |
311 | } | |
312 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); | |
313 | ||
149e10f8 SG |
314 | int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data, |
315 | int (fn)(void *, struct request *)) | |
486cf989 SG |
316 | { |
317 | int i, j, ret = 0; | |
318 | ||
149e10f8 | 319 | if (WARN_ON_ONCE(!fn)) |
486cf989 SG |
320 | goto out; |
321 | ||
322 | for (i = 0; i < set->nr_hw_queues; i++) { | |
323 | struct blk_mq_tags *tags = set->tags[i]; | |
324 | ||
0067d4b0 SG |
325 | if (!tags) |
326 | continue; | |
327 | ||
486cf989 | 328 | for (j = 0; j < tags->nr_tags; j++) { |
2af8cbe3 | 329 | if (!tags->static_rqs[j]) |
486cf989 SG |
330 | continue; |
331 | ||
149e10f8 | 332 | ret = fn(data, tags->static_rqs[j]); |
486cf989 SG |
333 | if (ret) |
334 | goto out; | |
335 | } | |
336 | } | |
337 | ||
338 | out: | |
339 | return ret; | |
340 | } | |
149e10f8 SG |
341 | EXPORT_SYMBOL_GPL(blk_mq_tagset_iter); |
342 | ||
0bf6cd5b | 343 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
81481eb4 | 344 | void *priv) |
320ae51f | 345 | { |
0bf6cd5b CH |
346 | struct blk_mq_hw_ctx *hctx; |
347 | int i; | |
348 | ||
349 | ||
350 | queue_for_each_hw_ctx(q, hctx, i) { | |
351 | struct blk_mq_tags *tags = hctx->tags; | |
352 | ||
353 | /* | |
354 | * If not software queues are currently mapped to this | |
355 | * hardware queue, there's nothing to check | |
356 | */ | |
357 | if (!blk_mq_hw_queue_mapped(hctx)) | |
358 | continue; | |
359 | ||
360 | if (tags->nr_reserved_tags) | |
88459642 OS |
361 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
362 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); | |
4bb659b1 JA |
363 | } |
364 | ||
4bb659b1 JA |
365 | } |
366 | ||
f4a644db OS |
367 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
368 | bool round_robin, int node) | |
4bb659b1 | 369 | { |
f4a644db OS |
370 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, |
371 | node); | |
4bb659b1 JA |
372 | } |
373 | ||
374 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, | |
24391c0d | 375 | int node, int alloc_policy) |
4bb659b1 JA |
376 | { |
377 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
f4a644db | 378 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
4bb659b1 | 379 | |
f4a644db | 380 | if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) |
88459642 | 381 | goto free_tags; |
f4a644db OS |
382 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, |
383 | node)) | |
88459642 | 384 | goto free_bitmap_tags; |
4bb659b1 JA |
385 | |
386 | return tags; | |
88459642 OS |
387 | free_bitmap_tags: |
388 | sbitmap_queue_free(&tags->bitmap_tags); | |
389 | free_tags: | |
4bb659b1 JA |
390 | kfree(tags); |
391 | return NULL; | |
392 | } | |
393 | ||
320ae51f | 394 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
24391c0d SL |
395 | unsigned int reserved_tags, |
396 | int node, int alloc_policy) | |
320ae51f | 397 | { |
320ae51f | 398 | struct blk_mq_tags *tags; |
320ae51f JA |
399 | |
400 | if (total_tags > BLK_MQ_TAG_MAX) { | |
401 | pr_err("blk-mq: tag depth too large\n"); | |
402 | return NULL; | |
403 | } | |
404 | ||
405 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
406 | if (!tags) | |
407 | return NULL; | |
408 | ||
320ae51f JA |
409 | tags->nr_tags = total_tags; |
410 | tags->nr_reserved_tags = reserved_tags; | |
320ae51f | 411 | |
24391c0d | 412 | return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
320ae51f JA |
413 | } |
414 | ||
415 | void blk_mq_free_tags(struct blk_mq_tags *tags) | |
416 | { | |
88459642 OS |
417 | sbitmap_queue_free(&tags->bitmap_tags); |
418 | sbitmap_queue_free(&tags->breserved_tags); | |
320ae51f JA |
419 | kfree(tags); |
420 | } | |
421 | ||
70f36b60 JA |
422 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
423 | struct blk_mq_tags **tagsptr, unsigned int tdepth, | |
424 | bool can_grow) | |
e3a2b3f9 | 425 | { |
70f36b60 JA |
426 | struct blk_mq_tags *tags = *tagsptr; |
427 | ||
428 | if (tdepth <= tags->nr_reserved_tags) | |
e3a2b3f9 JA |
429 | return -EINVAL; |
430 | ||
70f36b60 JA |
431 | tdepth -= tags->nr_reserved_tags; |
432 | ||
e3a2b3f9 | 433 | /* |
70f36b60 JA |
434 | * If we are allowed to grow beyond the original size, allocate |
435 | * a new set of tags before freeing the old one. | |
e3a2b3f9 | 436 | */ |
70f36b60 JA |
437 | if (tdepth > tags->nr_tags) { |
438 | struct blk_mq_tag_set *set = hctx->queue->tag_set; | |
439 | struct blk_mq_tags *new; | |
440 | bool ret; | |
441 | ||
442 | if (!can_grow) | |
443 | return -EINVAL; | |
444 | ||
445 | /* | |
446 | * We need some sort of upper limit, set it high enough that | |
447 | * no valid use cases should require more. | |
448 | */ | |
449 | if (tdepth > 16 * BLKDEV_MAX_RQ) | |
450 | return -EINVAL; | |
451 | ||
452 | new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0); | |
453 | if (!new) | |
454 | return -ENOMEM; | |
455 | ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); | |
456 | if (ret) { | |
457 | blk_mq_free_rq_map(new); | |
458 | return -ENOMEM; | |
459 | } | |
460 | ||
461 | blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); | |
462 | blk_mq_free_rq_map(*tagsptr); | |
463 | *tagsptr = new; | |
464 | } else { | |
465 | /* | |
466 | * Don't need (or can't) update reserved tags here, they | |
467 | * remain static and should never need resizing. | |
468 | */ | |
469 | sbitmap_queue_resize(&tags->bitmap_tags, tdepth); | |
470 | } | |
88459642 | 471 | |
e3a2b3f9 JA |
472 | return 0; |
473 | } | |
474 | ||
205fb5f5 BVA |
475 | /** |
476 | * blk_mq_unique_tag() - return a tag that is unique queue-wide | |
477 | * @rq: request for which to compute a unique tag | |
478 | * | |
479 | * The tag field in struct request is unique per hardware queue but not over | |
480 | * all hardware queues. Hence this function that returns a tag with the | |
481 | * hardware context index in the upper bits and the per hardware queue tag in | |
482 | * the lower bits. | |
483 | * | |
484 | * Note: When called for a request that is queued on a non-multiqueue request | |
485 | * queue, the hardware context index is set to zero. | |
486 | */ | |
487 | u32 blk_mq_unique_tag(struct request *rq) | |
488 | { | |
489 | struct request_queue *q = rq->q; | |
490 | struct blk_mq_hw_ctx *hctx; | |
491 | int hwq = 0; | |
492 | ||
493 | if (q->mq_ops) { | |
7d7e0f90 | 494 | hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); |
205fb5f5 BVA |
495 | hwq = hctx->queue_num; |
496 | } | |
497 | ||
498 | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | | |
499 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); | |
500 | } | |
501 | EXPORT_SYMBOL(blk_mq_unique_tag); |