]> Git Repo - linux.git/blob - block/blk-mq.c
NFSD: Reschedule CB operations when backchannel rpc_clnt is shut down
[linux.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32
33 #include <trace/events/block.h>
34
35 #include <linux/t10-pi.h>
36 #include "blk.h"
37 #include "blk-mq.h"
38 #include "blk-mq-debugfs.h"
39 #include "blk-pm.h"
40 #include "blk-stat.h"
41 #include "blk-mq-sched.h"
42 #include "blk-rq-qos.h"
43
44 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
45 static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
46
47 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
48 static void blk_mq_request_bypass_insert(struct request *rq,
49                 blk_insert_t flags);
50 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
51                 struct list_head *list);
52 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
53                          struct io_comp_batch *iob, unsigned int flags);
54
55 /*
56  * Check if any of the ctx, dispatch list or elevator
57  * have pending work in this hardware queue.
58  */
59 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
60 {
61         return !list_empty_careful(&hctx->dispatch) ||
62                 sbitmap_any_bit_set(&hctx->ctx_map) ||
63                         blk_mq_sched_has_work(hctx);
64 }
65
66 /*
67  * Mark this ctx as having pending work in this hardware queue
68  */
69 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
70                                      struct blk_mq_ctx *ctx)
71 {
72         const int bit = ctx->index_hw[hctx->type];
73
74         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
75                 sbitmap_set_bit(&hctx->ctx_map, bit);
76 }
77
78 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
79                                       struct blk_mq_ctx *ctx)
80 {
81         const int bit = ctx->index_hw[hctx->type];
82
83         sbitmap_clear_bit(&hctx->ctx_map, bit);
84 }
85
86 struct mq_inflight {
87         struct block_device *part;
88         unsigned int inflight[2];
89 };
90
91 static bool blk_mq_check_inflight(struct request *rq, void *priv)
92 {
93         struct mq_inflight *mi = priv;
94
95         if (rq->part && blk_do_io_stat(rq) &&
96             (!mi->part->bd_partno || rq->part == mi->part) &&
97             blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
98                 mi->inflight[rq_data_dir(rq)]++;
99
100         return true;
101 }
102
103 unsigned int blk_mq_in_flight(struct request_queue *q,
104                 struct block_device *part)
105 {
106         struct mq_inflight mi = { .part = part };
107
108         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
109
110         return mi.inflight[0] + mi.inflight[1];
111 }
112
113 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
114                 unsigned int inflight[2])
115 {
116         struct mq_inflight mi = { .part = part };
117
118         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119         inflight[0] = mi.inflight[0];
120         inflight[1] = mi.inflight[1];
121 }
122
123 void blk_freeze_queue_start(struct request_queue *q)
124 {
125         mutex_lock(&q->mq_freeze_lock);
126         if (++q->mq_freeze_depth == 1) {
127                 percpu_ref_kill(&q->q_usage_counter);
128                 mutex_unlock(&q->mq_freeze_lock);
129                 if (queue_is_mq(q))
130                         blk_mq_run_hw_queues(q, false);
131         } else {
132                 mutex_unlock(&q->mq_freeze_lock);
133         }
134 }
135 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
136
137 void blk_mq_freeze_queue_wait(struct request_queue *q)
138 {
139         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
140 }
141 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
142
143 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
144                                      unsigned long timeout)
145 {
146         return wait_event_timeout(q->mq_freeze_wq,
147                                         percpu_ref_is_zero(&q->q_usage_counter),
148                                         timeout);
149 }
150 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
151
152 /*
153  * Guarantee no request is in use, so we can change any data structure of
154  * the queue afterward.
155  */
156 void blk_freeze_queue(struct request_queue *q)
157 {
158         /*
159          * In the !blk_mq case we are only calling this to kill the
160          * q_usage_counter, otherwise this increases the freeze depth
161          * and waits for it to return to zero.  For this reason there is
162          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
163          * exported to drivers as the only user for unfreeze is blk_mq.
164          */
165         blk_freeze_queue_start(q);
166         blk_mq_freeze_queue_wait(q);
167 }
168
169 void blk_mq_freeze_queue(struct request_queue *q)
170 {
171         /*
172          * ...just an alias to keep freeze and unfreeze actions balanced
173          * in the blk_mq_* namespace
174          */
175         blk_freeze_queue(q);
176 }
177 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
178
179 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
180 {
181         mutex_lock(&q->mq_freeze_lock);
182         if (force_atomic)
183                 q->q_usage_counter.data->force_atomic = true;
184         q->mq_freeze_depth--;
185         WARN_ON_ONCE(q->mq_freeze_depth < 0);
186         if (!q->mq_freeze_depth) {
187                 percpu_ref_resurrect(&q->q_usage_counter);
188                 wake_up_all(&q->mq_freeze_wq);
189         }
190         mutex_unlock(&q->mq_freeze_lock);
191 }
192
193 void blk_mq_unfreeze_queue(struct request_queue *q)
194 {
195         __blk_mq_unfreeze_queue(q, false);
196 }
197 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
198
199 /*
200  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
201  * mpt3sas driver such that this function can be removed.
202  */
203 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
204 {
205         unsigned long flags;
206
207         spin_lock_irqsave(&q->queue_lock, flags);
208         if (!q->quiesce_depth++)
209                 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
210         spin_unlock_irqrestore(&q->queue_lock, flags);
211 }
212 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
213
214 /**
215  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
216  * @set: tag_set to wait on
217  *
218  * Note: it is driver's responsibility for making sure that quiesce has
219  * been started on or more of the request_queues of the tag_set.  This
220  * function only waits for the quiesce on those request_queues that had
221  * the quiesce flag set using blk_mq_quiesce_queue_nowait.
222  */
223 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
224 {
225         if (set->flags & BLK_MQ_F_BLOCKING)
226                 synchronize_srcu(set->srcu);
227         else
228                 synchronize_rcu();
229 }
230 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
231
232 /**
233  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
234  * @q: request queue.
235  *
236  * Note: this function does not prevent that the struct request end_io()
237  * callback function is invoked. Once this function is returned, we make
238  * sure no dispatch can happen until the queue is unquiesced via
239  * blk_mq_unquiesce_queue().
240  */
241 void blk_mq_quiesce_queue(struct request_queue *q)
242 {
243         blk_mq_quiesce_queue_nowait(q);
244         /* nothing to wait for non-mq queues */
245         if (queue_is_mq(q))
246                 blk_mq_wait_quiesce_done(q->tag_set);
247 }
248 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
249
250 /*
251  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
252  * @q: request queue.
253  *
254  * This function recovers queue into the state before quiescing
255  * which is done by blk_mq_quiesce_queue.
256  */
257 void blk_mq_unquiesce_queue(struct request_queue *q)
258 {
259         unsigned long flags;
260         bool run_queue = false;
261
262         spin_lock_irqsave(&q->queue_lock, flags);
263         if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
264                 ;
265         } else if (!--q->quiesce_depth) {
266                 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
267                 run_queue = true;
268         }
269         spin_unlock_irqrestore(&q->queue_lock, flags);
270
271         /* dispatch requests which are inserted during quiescing */
272         if (run_queue)
273                 blk_mq_run_hw_queues(q, true);
274 }
275 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
276
277 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
278 {
279         struct request_queue *q;
280
281         mutex_lock(&set->tag_list_lock);
282         list_for_each_entry(q, &set->tag_list, tag_set_list) {
283                 if (!blk_queue_skip_tagset_quiesce(q))
284                         blk_mq_quiesce_queue_nowait(q);
285         }
286         blk_mq_wait_quiesce_done(set);
287         mutex_unlock(&set->tag_list_lock);
288 }
289 EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
290
291 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
292 {
293         struct request_queue *q;
294
295         mutex_lock(&set->tag_list_lock);
296         list_for_each_entry(q, &set->tag_list, tag_set_list) {
297                 if (!blk_queue_skip_tagset_quiesce(q))
298                         blk_mq_unquiesce_queue(q);
299         }
300         mutex_unlock(&set->tag_list_lock);
301 }
302 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
303
304 void blk_mq_wake_waiters(struct request_queue *q)
305 {
306         struct blk_mq_hw_ctx *hctx;
307         unsigned long i;
308
309         queue_for_each_hw_ctx(q, hctx, i)
310                 if (blk_mq_hw_queue_mapped(hctx))
311                         blk_mq_tag_wakeup_all(hctx->tags, true);
312 }
313
314 void blk_rq_init(struct request_queue *q, struct request *rq)
315 {
316         memset(rq, 0, sizeof(*rq));
317
318         INIT_LIST_HEAD(&rq->queuelist);
319         rq->q = q;
320         rq->__sector = (sector_t) -1;
321         INIT_HLIST_NODE(&rq->hash);
322         RB_CLEAR_NODE(&rq->rb_node);
323         rq->tag = BLK_MQ_NO_TAG;
324         rq->internal_tag = BLK_MQ_NO_TAG;
325         rq->start_time_ns = ktime_get_ns();
326         rq->part = NULL;
327         blk_crypto_rq_set_defaults(rq);
328 }
329 EXPORT_SYMBOL(blk_rq_init);
330
331 /* Set start and alloc time when the allocated request is actually used */
332 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
333 {
334         if (blk_mq_need_time_stamp(rq))
335                 rq->start_time_ns = ktime_get_ns();
336         else
337                 rq->start_time_ns = 0;
338
339 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
340         if (blk_queue_rq_alloc_time(rq->q))
341                 rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
342         else
343                 rq->alloc_time_ns = 0;
344 #endif
345 }
346
347 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
348                 struct blk_mq_tags *tags, unsigned int tag)
349 {
350         struct blk_mq_ctx *ctx = data->ctx;
351         struct blk_mq_hw_ctx *hctx = data->hctx;
352         struct request_queue *q = data->q;
353         struct request *rq = tags->static_rqs[tag];
354
355         rq->q = q;
356         rq->mq_ctx = ctx;
357         rq->mq_hctx = hctx;
358         rq->cmd_flags = data->cmd_flags;
359
360         if (data->flags & BLK_MQ_REQ_PM)
361                 data->rq_flags |= RQF_PM;
362         if (blk_queue_io_stat(q))
363                 data->rq_flags |= RQF_IO_STAT;
364         rq->rq_flags = data->rq_flags;
365
366         if (data->rq_flags & RQF_SCHED_TAGS) {
367                 rq->tag = BLK_MQ_NO_TAG;
368                 rq->internal_tag = tag;
369         } else {
370                 rq->tag = tag;
371                 rq->internal_tag = BLK_MQ_NO_TAG;
372         }
373         rq->timeout = 0;
374
375         rq->part = NULL;
376         rq->io_start_time_ns = 0;
377         rq->stats_sectors = 0;
378         rq->nr_phys_segments = 0;
379 #if defined(CONFIG_BLK_DEV_INTEGRITY)
380         rq->nr_integrity_segments = 0;
381 #endif
382         rq->end_io = NULL;
383         rq->end_io_data = NULL;
384
385         blk_crypto_rq_set_defaults(rq);
386         INIT_LIST_HEAD(&rq->queuelist);
387         /* tag was already set */
388         WRITE_ONCE(rq->deadline, 0);
389         req_ref_set(rq, 1);
390
391         if (rq->rq_flags & RQF_USE_SCHED) {
392                 struct elevator_queue *e = data->q->elevator;
393
394                 INIT_HLIST_NODE(&rq->hash);
395                 RB_CLEAR_NODE(&rq->rb_node);
396
397                 if (e->type->ops.prepare_request)
398                         e->type->ops.prepare_request(rq);
399         }
400
401         return rq;
402 }
403
404 static inline struct request *
405 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
406 {
407         unsigned int tag, tag_offset;
408         struct blk_mq_tags *tags;
409         struct request *rq;
410         unsigned long tag_mask;
411         int i, nr = 0;
412
413         tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
414         if (unlikely(!tag_mask))
415                 return NULL;
416
417         tags = blk_mq_tags_from_data(data);
418         for (i = 0; tag_mask; i++) {
419                 if (!(tag_mask & (1UL << i)))
420                         continue;
421                 tag = tag_offset + i;
422                 prefetch(tags->static_rqs[tag]);
423                 tag_mask &= ~(1UL << i);
424                 rq = blk_mq_rq_ctx_init(data, tags, tag);
425                 rq_list_add(data->cached_rq, rq);
426                 nr++;
427         }
428         if (!(data->rq_flags & RQF_SCHED_TAGS))
429                 blk_mq_add_active_requests(data->hctx, nr);
430         /* caller already holds a reference, add for remainder */
431         percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
432         data->nr_tags -= nr;
433
434         return rq_list_pop(data->cached_rq);
435 }
436
437 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
438 {
439         struct request_queue *q = data->q;
440         u64 alloc_time_ns = 0;
441         struct request *rq;
442         unsigned int tag;
443
444         /* alloc_time includes depth and tag waits */
445         if (blk_queue_rq_alloc_time(q))
446                 alloc_time_ns = ktime_get_ns();
447
448         if (data->cmd_flags & REQ_NOWAIT)
449                 data->flags |= BLK_MQ_REQ_NOWAIT;
450
451         if (q->elevator) {
452                 /*
453                  * All requests use scheduler tags when an I/O scheduler is
454                  * enabled for the queue.
455                  */
456                 data->rq_flags |= RQF_SCHED_TAGS;
457
458                 /*
459                  * Flush/passthrough requests are special and go directly to the
460                  * dispatch list.
461                  */
462                 if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
463                     !blk_op_is_passthrough(data->cmd_flags)) {
464                         struct elevator_mq_ops *ops = &q->elevator->type->ops;
465
466                         WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
467
468                         data->rq_flags |= RQF_USE_SCHED;
469                         if (ops->limit_depth)
470                                 ops->limit_depth(data->cmd_flags, data);
471                 }
472         }
473
474 retry:
475         data->ctx = blk_mq_get_ctx(q);
476         data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
477         if (!(data->rq_flags & RQF_SCHED_TAGS))
478                 blk_mq_tag_busy(data->hctx);
479
480         if (data->flags & BLK_MQ_REQ_RESERVED)
481                 data->rq_flags |= RQF_RESV;
482
483         /*
484          * Try batched alloc if we want more than 1 tag.
485          */
486         if (data->nr_tags > 1) {
487                 rq = __blk_mq_alloc_requests_batch(data);
488                 if (rq) {
489                         blk_mq_rq_time_init(rq, alloc_time_ns);
490                         return rq;
491                 }
492                 data->nr_tags = 1;
493         }
494
495         /*
496          * Waiting allocations only fail because of an inactive hctx.  In that
497          * case just retry the hctx assignment and tag allocation as CPU hotplug
498          * should have migrated us to an online CPU by now.
499          */
500         tag = blk_mq_get_tag(data);
501         if (tag == BLK_MQ_NO_TAG) {
502                 if (data->flags & BLK_MQ_REQ_NOWAIT)
503                         return NULL;
504                 /*
505                  * Give up the CPU and sleep for a random short time to
506                  * ensure that thread using a realtime scheduling class
507                  * are migrated off the CPU, and thus off the hctx that
508                  * is going away.
509                  */
510                 msleep(3);
511                 goto retry;
512         }
513
514         if (!(data->rq_flags & RQF_SCHED_TAGS))
515                 blk_mq_inc_active_requests(data->hctx);
516         rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
517         blk_mq_rq_time_init(rq, alloc_time_ns);
518         return rq;
519 }
520
521 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
522                                             struct blk_plug *plug,
523                                             blk_opf_t opf,
524                                             blk_mq_req_flags_t flags)
525 {
526         struct blk_mq_alloc_data data = {
527                 .q              = q,
528                 .flags          = flags,
529                 .cmd_flags      = opf,
530                 .nr_tags        = plug->nr_ios,
531                 .cached_rq      = &plug->cached_rq,
532         };
533         struct request *rq;
534
535         if (blk_queue_enter(q, flags))
536                 return NULL;
537
538         plug->nr_ios = 1;
539
540         rq = __blk_mq_alloc_requests(&data);
541         if (unlikely(!rq))
542                 blk_queue_exit(q);
543         return rq;
544 }
545
546 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
547                                                    blk_opf_t opf,
548                                                    blk_mq_req_flags_t flags)
549 {
550         struct blk_plug *plug = current->plug;
551         struct request *rq;
552
553         if (!plug)
554                 return NULL;
555
556         if (rq_list_empty(plug->cached_rq)) {
557                 if (plug->nr_ios == 1)
558                         return NULL;
559                 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
560                 if (!rq)
561                         return NULL;
562         } else {
563                 rq = rq_list_peek(&plug->cached_rq);
564                 if (!rq || rq->q != q)
565                         return NULL;
566
567                 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
568                         return NULL;
569                 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
570                         return NULL;
571
572                 plug->cached_rq = rq_list_next(rq);
573                 blk_mq_rq_time_init(rq, 0);
574         }
575
576         rq->cmd_flags = opf;
577         INIT_LIST_HEAD(&rq->queuelist);
578         return rq;
579 }
580
581 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
582                 blk_mq_req_flags_t flags)
583 {
584         struct request *rq;
585
586         rq = blk_mq_alloc_cached_request(q, opf, flags);
587         if (!rq) {
588                 struct blk_mq_alloc_data data = {
589                         .q              = q,
590                         .flags          = flags,
591                         .cmd_flags      = opf,
592                         .nr_tags        = 1,
593                 };
594                 int ret;
595
596                 ret = blk_queue_enter(q, flags);
597                 if (ret)
598                         return ERR_PTR(ret);
599
600                 rq = __blk_mq_alloc_requests(&data);
601                 if (!rq)
602                         goto out_queue_exit;
603         }
604         rq->__data_len = 0;
605         rq->__sector = (sector_t) -1;
606         rq->bio = rq->biotail = NULL;
607         return rq;
608 out_queue_exit:
609         blk_queue_exit(q);
610         return ERR_PTR(-EWOULDBLOCK);
611 }
612 EXPORT_SYMBOL(blk_mq_alloc_request);
613
614 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
615         blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
616 {
617         struct blk_mq_alloc_data data = {
618                 .q              = q,
619                 .flags          = flags,
620                 .cmd_flags      = opf,
621                 .nr_tags        = 1,
622         };
623         u64 alloc_time_ns = 0;
624         struct request *rq;
625         unsigned int cpu;
626         unsigned int tag;
627         int ret;
628
629         /* alloc_time includes depth and tag waits */
630         if (blk_queue_rq_alloc_time(q))
631                 alloc_time_ns = ktime_get_ns();
632
633         /*
634          * If the tag allocator sleeps we could get an allocation for a
635          * different hardware context.  No need to complicate the low level
636          * allocator for this for the rare use case of a command tied to
637          * a specific queue.
638          */
639         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
640             WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
641                 return ERR_PTR(-EINVAL);
642
643         if (hctx_idx >= q->nr_hw_queues)
644                 return ERR_PTR(-EIO);
645
646         ret = blk_queue_enter(q, flags);
647         if (ret)
648                 return ERR_PTR(ret);
649
650         /*
651          * Check if the hardware context is actually mapped to anything.
652          * If not tell the caller that it should skip this queue.
653          */
654         ret = -EXDEV;
655         data.hctx = xa_load(&q->hctx_table, hctx_idx);
656         if (!blk_mq_hw_queue_mapped(data.hctx))
657                 goto out_queue_exit;
658         cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
659         if (cpu >= nr_cpu_ids)
660                 goto out_queue_exit;
661         data.ctx = __blk_mq_get_ctx(q, cpu);
662
663         if (q->elevator)
664                 data.rq_flags |= RQF_SCHED_TAGS;
665         else
666                 blk_mq_tag_busy(data.hctx);
667
668         if (flags & BLK_MQ_REQ_RESERVED)
669                 data.rq_flags |= RQF_RESV;
670
671         ret = -EWOULDBLOCK;
672         tag = blk_mq_get_tag(&data);
673         if (tag == BLK_MQ_NO_TAG)
674                 goto out_queue_exit;
675         if (!(data.rq_flags & RQF_SCHED_TAGS))
676                 blk_mq_inc_active_requests(data.hctx);
677         rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
678         blk_mq_rq_time_init(rq, alloc_time_ns);
679         rq->__data_len = 0;
680         rq->__sector = (sector_t) -1;
681         rq->bio = rq->biotail = NULL;
682         return rq;
683
684 out_queue_exit:
685         blk_queue_exit(q);
686         return ERR_PTR(ret);
687 }
688 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
689
690 static void blk_mq_finish_request(struct request *rq)
691 {
692         struct request_queue *q = rq->q;
693
694         if (rq->rq_flags & RQF_USE_SCHED) {
695                 q->elevator->type->ops.finish_request(rq);
696                 /*
697                  * For postflush request that may need to be
698                  * completed twice, we should clear this flag
699                  * to avoid double finish_request() on the rq.
700                  */
701                 rq->rq_flags &= ~RQF_USE_SCHED;
702         }
703 }
704
705 static void __blk_mq_free_request(struct request *rq)
706 {
707         struct request_queue *q = rq->q;
708         struct blk_mq_ctx *ctx = rq->mq_ctx;
709         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
710         const int sched_tag = rq->internal_tag;
711
712         blk_crypto_free_request(rq);
713         blk_pm_mark_last_busy(rq);
714         rq->mq_hctx = NULL;
715
716         if (rq->tag != BLK_MQ_NO_TAG) {
717                 blk_mq_dec_active_requests(hctx);
718                 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
719         }
720         if (sched_tag != BLK_MQ_NO_TAG)
721                 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
722         blk_mq_sched_restart(hctx);
723         blk_queue_exit(q);
724 }
725
726 void blk_mq_free_request(struct request *rq)
727 {
728         struct request_queue *q = rq->q;
729
730         blk_mq_finish_request(rq);
731
732         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
733                 laptop_io_completion(q->disk->bdi);
734
735         rq_qos_done(q, rq);
736
737         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
738         if (req_ref_put_and_test(rq))
739                 __blk_mq_free_request(rq);
740 }
741 EXPORT_SYMBOL_GPL(blk_mq_free_request);
742
743 void blk_mq_free_plug_rqs(struct blk_plug *plug)
744 {
745         struct request *rq;
746
747         while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
748                 blk_mq_free_request(rq);
749 }
750
751 void blk_dump_rq_flags(struct request *rq, char *msg)
752 {
753         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
754                 rq->q->disk ? rq->q->disk->disk_name : "?",
755                 (__force unsigned long long) rq->cmd_flags);
756
757         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
758                (unsigned long long)blk_rq_pos(rq),
759                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
760         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
761                rq->bio, rq->biotail, blk_rq_bytes(rq));
762 }
763 EXPORT_SYMBOL(blk_dump_rq_flags);
764
765 static void req_bio_endio(struct request *rq, struct bio *bio,
766                           unsigned int nbytes, blk_status_t error)
767 {
768         if (unlikely(error)) {
769                 bio->bi_status = error;
770         } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
771                 /*
772                  * Partial zone append completions cannot be supported as the
773                  * BIO fragments may end up not being written sequentially.
774                  * For such case, force the completed nbytes to be equal to
775                  * the BIO size so that bio_advance() sets the BIO remaining
776                  * size to 0 and we end up calling bio_endio() before returning.
777                  */
778                 if (bio->bi_iter.bi_size != nbytes) {
779                         bio->bi_status = BLK_STS_IOERR;
780                         nbytes = bio->bi_iter.bi_size;
781                 } else {
782                         bio->bi_iter.bi_sector = rq->__sector;
783                 }
784         }
785
786         bio_advance(bio, nbytes);
787
788         if (unlikely(rq->rq_flags & RQF_QUIET))
789                 bio_set_flag(bio, BIO_QUIET);
790         /* don't actually finish bio if it's part of flush sequence */
791         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
792                 bio_endio(bio);
793 }
794
795 static void blk_account_io_completion(struct request *req, unsigned int bytes)
796 {
797         if (req->part && blk_do_io_stat(req)) {
798                 const int sgrp = op_stat_group(req_op(req));
799
800                 part_stat_lock();
801                 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
802                 part_stat_unlock();
803         }
804 }
805
806 static void blk_print_req_error(struct request *req, blk_status_t status)
807 {
808         printk_ratelimited(KERN_ERR
809                 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
810                 "phys_seg %u prio class %u\n",
811                 blk_status_to_str(status),
812                 req->q->disk ? req->q->disk->disk_name : "?",
813                 blk_rq_pos(req), (__force u32)req_op(req),
814                 blk_op_str(req_op(req)),
815                 (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
816                 req->nr_phys_segments,
817                 IOPRIO_PRIO_CLASS(req->ioprio));
818 }
819
820 /*
821  * Fully end IO on a request. Does not support partial completions, or
822  * errors.
823  */
824 static void blk_complete_request(struct request *req)
825 {
826         const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
827         int total_bytes = blk_rq_bytes(req);
828         struct bio *bio = req->bio;
829
830         trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
831
832         if (!bio)
833                 return;
834
835 #ifdef CONFIG_BLK_DEV_INTEGRITY
836         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
837                 req->q->integrity.profile->complete_fn(req, total_bytes);
838 #endif
839
840         /*
841          * Upper layers may call blk_crypto_evict_key() anytime after the last
842          * bio_endio().  Therefore, the keyslot must be released before that.
843          */
844         blk_crypto_rq_put_keyslot(req);
845
846         blk_account_io_completion(req, total_bytes);
847
848         do {
849                 struct bio *next = bio->bi_next;
850
851                 /* Completion has already been traced */
852                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
853
854                 if (req_op(req) == REQ_OP_ZONE_APPEND)
855                         bio->bi_iter.bi_sector = req->__sector;
856
857                 if (!is_flush)
858                         bio_endio(bio);
859                 bio = next;
860         } while (bio);
861
862         /*
863          * Reset counters so that the request stacking driver
864          * can find how many bytes remain in the request
865          * later.
866          */
867         if (!req->end_io) {
868                 req->bio = NULL;
869                 req->__data_len = 0;
870         }
871 }
872
873 /**
874  * blk_update_request - Complete multiple bytes without completing the request
875  * @req:      the request being processed
876  * @error:    block status code
877  * @nr_bytes: number of bytes to complete for @req
878  *
879  * Description:
880  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
881  *     the request structure even if @req doesn't have leftover.
882  *     If @req has leftover, sets it up for the next range of segments.
883  *
884  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
885  *     %false return from this function.
886  *
887  * Note:
888  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
889  *      except in the consistency check at the end of this function.
890  *
891  * Return:
892  *     %false - this request doesn't have any more data
893  *     %true  - this request has more data
894  **/
895 bool blk_update_request(struct request *req, blk_status_t error,
896                 unsigned int nr_bytes)
897 {
898         int total_bytes;
899
900         trace_block_rq_complete(req, error, nr_bytes);
901
902         if (!req->bio)
903                 return false;
904
905 #ifdef CONFIG_BLK_DEV_INTEGRITY
906         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
907             error == BLK_STS_OK)
908                 req->q->integrity.profile->complete_fn(req, nr_bytes);
909 #endif
910
911         /*
912          * Upper layers may call blk_crypto_evict_key() anytime after the last
913          * bio_endio().  Therefore, the keyslot must be released before that.
914          */
915         if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
916                 __blk_crypto_rq_put_keyslot(req);
917
918         if (unlikely(error && !blk_rq_is_passthrough(req) &&
919                      !(req->rq_flags & RQF_QUIET)) &&
920                      !test_bit(GD_DEAD, &req->q->disk->state)) {
921                 blk_print_req_error(req, error);
922                 trace_block_rq_error(req, error, nr_bytes);
923         }
924
925         blk_account_io_completion(req, nr_bytes);
926
927         total_bytes = 0;
928         while (req->bio) {
929                 struct bio *bio = req->bio;
930                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
931
932                 if (bio_bytes == bio->bi_iter.bi_size)
933                         req->bio = bio->bi_next;
934
935                 /* Completion has already been traced */
936                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
937                 req_bio_endio(req, bio, bio_bytes, error);
938
939                 total_bytes += bio_bytes;
940                 nr_bytes -= bio_bytes;
941
942                 if (!nr_bytes)
943                         break;
944         }
945
946         /*
947          * completely done
948          */
949         if (!req->bio) {
950                 /*
951                  * Reset counters so that the request stacking driver
952                  * can find how many bytes remain in the request
953                  * later.
954                  */
955                 req->__data_len = 0;
956                 return false;
957         }
958
959         req->__data_len -= total_bytes;
960
961         /* update sector only for requests with clear definition of sector */
962         if (!blk_rq_is_passthrough(req))
963                 req->__sector += total_bytes >> 9;
964
965         /* mixed attributes always follow the first bio */
966         if (req->rq_flags & RQF_MIXED_MERGE) {
967                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
968                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
969         }
970
971         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
972                 /*
973                  * If total number of sectors is less than the first segment
974                  * size, something has gone terribly wrong.
975                  */
976                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
977                         blk_dump_rq_flags(req, "request botched");
978                         req->__data_len = blk_rq_cur_bytes(req);
979                 }
980
981                 /* recalculate the number of segments */
982                 req->nr_phys_segments = blk_recalc_rq_segments(req);
983         }
984
985         return true;
986 }
987 EXPORT_SYMBOL_GPL(blk_update_request);
988
989 static inline void blk_account_io_done(struct request *req, u64 now)
990 {
991         trace_block_io_done(req);
992
993         /*
994          * Account IO completion.  flush_rq isn't accounted as a
995          * normal IO on queueing nor completion.  Accounting the
996          * containing request is enough.
997          */
998         if (blk_do_io_stat(req) && req->part &&
999             !(req->rq_flags & RQF_FLUSH_SEQ)) {
1000                 const int sgrp = op_stat_group(req_op(req));
1001
1002                 part_stat_lock();
1003                 update_io_ticks(req->part, jiffies, true);
1004                 part_stat_inc(req->part, ios[sgrp]);
1005                 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
1006                 part_stat_unlock();
1007         }
1008 }
1009
1010 static inline void blk_account_io_start(struct request *req)
1011 {
1012         trace_block_io_start(req);
1013
1014         if (blk_do_io_stat(req)) {
1015                 /*
1016                  * All non-passthrough requests are created from a bio with one
1017                  * exception: when a flush command that is part of a flush sequence
1018                  * generated by the state machine in blk-flush.c is cloned onto the
1019                  * lower device by dm-multipath we can get here without a bio.
1020                  */
1021                 if (req->bio)
1022                         req->part = req->bio->bi_bdev;
1023                 else
1024                         req->part = req->q->disk->part0;
1025
1026                 part_stat_lock();
1027                 update_io_ticks(req->part, jiffies, false);
1028                 part_stat_unlock();
1029         }
1030 }
1031
1032 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
1033 {
1034         if (rq->rq_flags & RQF_STATS)
1035                 blk_stat_add(rq, now);
1036
1037         blk_mq_sched_completed_request(rq, now);
1038         blk_account_io_done(rq, now);
1039 }
1040
1041 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1042 {
1043         if (blk_mq_need_time_stamp(rq))
1044                 __blk_mq_end_request_acct(rq, ktime_get_ns());
1045
1046         blk_mq_finish_request(rq);
1047
1048         if (rq->end_io) {
1049                 rq_qos_done(rq->q, rq);
1050                 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1051                         blk_mq_free_request(rq);
1052         } else {
1053                 blk_mq_free_request(rq);
1054         }
1055 }
1056 EXPORT_SYMBOL(__blk_mq_end_request);
1057
1058 void blk_mq_end_request(struct request *rq, blk_status_t error)
1059 {
1060         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1061                 BUG();
1062         __blk_mq_end_request(rq, error);
1063 }
1064 EXPORT_SYMBOL(blk_mq_end_request);
1065
1066 #define TAG_COMP_BATCH          32
1067
1068 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1069                                           int *tag_array, int nr_tags)
1070 {
1071         struct request_queue *q = hctx->queue;
1072
1073         blk_mq_sub_active_requests(hctx, nr_tags);
1074
1075         blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1076         percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1077 }
1078
1079 void blk_mq_end_request_batch(struct io_comp_batch *iob)
1080 {
1081         int tags[TAG_COMP_BATCH], nr_tags = 0;
1082         struct blk_mq_hw_ctx *cur_hctx = NULL;
1083         struct request *rq;
1084         u64 now = 0;
1085
1086         if (iob->need_ts)
1087                 now = ktime_get_ns();
1088
1089         while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1090                 prefetch(rq->bio);
1091                 prefetch(rq->rq_next);
1092
1093                 blk_complete_request(rq);
1094                 if (iob->need_ts)
1095                         __blk_mq_end_request_acct(rq, now);
1096
1097                 blk_mq_finish_request(rq);
1098
1099                 rq_qos_done(rq->q, rq);
1100
1101                 /*
1102                  * If end_io handler returns NONE, then it still has
1103                  * ownership of the request.
1104                  */
1105                 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1106                         continue;
1107
1108                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1109                 if (!req_ref_put_and_test(rq))
1110                         continue;
1111
1112                 blk_crypto_free_request(rq);
1113                 blk_pm_mark_last_busy(rq);
1114
1115                 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1116                         if (cur_hctx)
1117                                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1118                         nr_tags = 0;
1119                         cur_hctx = rq->mq_hctx;
1120                 }
1121                 tags[nr_tags++] = rq->tag;
1122         }
1123
1124         if (nr_tags)
1125                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1126 }
1127 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1128
1129 static void blk_complete_reqs(struct llist_head *list)
1130 {
1131         struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1132         struct request *rq, *next;
1133
1134         llist_for_each_entry_safe(rq, next, entry, ipi_list)
1135                 rq->q->mq_ops->complete(rq);
1136 }
1137
1138 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1139 {
1140         blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1141 }
1142
1143 static int blk_softirq_cpu_dead(unsigned int cpu)
1144 {
1145         blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1146         return 0;
1147 }
1148
1149 static void __blk_mq_complete_request_remote(void *data)
1150 {
1151         __raise_softirq_irqoff(BLOCK_SOFTIRQ);
1152 }
1153
1154 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1155 {
1156         int cpu = raw_smp_processor_id();
1157
1158         if (!IS_ENABLED(CONFIG_SMP) ||
1159             !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1160                 return false;
1161         /*
1162          * With force threaded interrupts enabled, raising softirq from an SMP
1163          * function call will always result in waking the ksoftirqd thread.
1164          * This is probably worse than completing the request on a different
1165          * cache domain.
1166          */
1167         if (force_irqthreads())
1168                 return false;
1169
1170         /* same CPU or cache domain?  Complete locally */
1171         if (cpu == rq->mq_ctx->cpu ||
1172             (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1173              cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1174                 return false;
1175
1176         /* don't try to IPI to an offline CPU */
1177         return cpu_online(rq->mq_ctx->cpu);
1178 }
1179
1180 static void blk_mq_complete_send_ipi(struct request *rq)
1181 {
1182         unsigned int cpu;
1183
1184         cpu = rq->mq_ctx->cpu;
1185         if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1186                 smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
1187 }
1188
1189 static void blk_mq_raise_softirq(struct request *rq)
1190 {
1191         struct llist_head *list;
1192
1193         preempt_disable();
1194         list = this_cpu_ptr(&blk_cpu_done);
1195         if (llist_add(&rq->ipi_list, list))
1196                 raise_softirq(BLOCK_SOFTIRQ);
1197         preempt_enable();
1198 }
1199
1200 bool blk_mq_complete_request_remote(struct request *rq)
1201 {
1202         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1203
1204         /*
1205          * For request which hctx has only one ctx mapping,
1206          * or a polled request, always complete locally,
1207          * it's pointless to redirect the completion.
1208          */
1209         if ((rq->mq_hctx->nr_ctx == 1 &&
1210              rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1211              rq->cmd_flags & REQ_POLLED)
1212                 return false;
1213
1214         if (blk_mq_complete_need_ipi(rq)) {
1215                 blk_mq_complete_send_ipi(rq);
1216                 return true;
1217         }
1218
1219         if (rq->q->nr_hw_queues == 1) {
1220                 blk_mq_raise_softirq(rq);
1221                 return true;
1222         }
1223         return false;
1224 }
1225 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1226
1227 /**
1228  * blk_mq_complete_request - end I/O on a request
1229  * @rq:         the request being processed
1230  *
1231  * Description:
1232  *      Complete a request by scheduling the ->complete_rq operation.
1233  **/
1234 void blk_mq_complete_request(struct request *rq)
1235 {
1236         if (!blk_mq_complete_request_remote(rq))
1237                 rq->q->mq_ops->complete(rq);
1238 }
1239 EXPORT_SYMBOL(blk_mq_complete_request);
1240
1241 /**
1242  * blk_mq_start_request - Start processing a request
1243  * @rq: Pointer to request to be started
1244  *
1245  * Function used by device drivers to notify the block layer that a request
1246  * is going to be processed now, so blk layer can do proper initializations
1247  * such as starting the timeout timer.
1248  */
1249 void blk_mq_start_request(struct request *rq)
1250 {
1251         struct request_queue *q = rq->q;
1252
1253         trace_block_rq_issue(rq);
1254
1255         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
1256             !blk_rq_is_passthrough(rq)) {
1257                 rq->io_start_time_ns = ktime_get_ns();
1258                 rq->stats_sectors = blk_rq_sectors(rq);
1259                 rq->rq_flags |= RQF_STATS;
1260                 rq_qos_issue(q, rq);
1261         }
1262
1263         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1264
1265         blk_add_timer(rq);
1266         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1267         rq->mq_hctx->tags->rqs[rq->tag] = rq;
1268
1269 #ifdef CONFIG_BLK_DEV_INTEGRITY
1270         if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1271                 q->integrity.profile->prepare_fn(rq);
1272 #endif
1273         if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1274                 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1275 }
1276 EXPORT_SYMBOL(blk_mq_start_request);
1277
1278 /*
1279  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1280  * queues. This is important for md arrays to benefit from merging
1281  * requests.
1282  */
1283 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1284 {
1285         if (plug->multiple_queues)
1286                 return BLK_MAX_REQUEST_COUNT * 2;
1287         return BLK_MAX_REQUEST_COUNT;
1288 }
1289
1290 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1291 {
1292         struct request *last = rq_list_peek(&plug->mq_list);
1293
1294         if (!plug->rq_count) {
1295                 trace_block_plug(rq->q);
1296         } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1297                    (!blk_queue_nomerges(rq->q) &&
1298                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1299                 blk_mq_flush_plug_list(plug, false);
1300                 last = NULL;
1301                 trace_block_plug(rq->q);
1302         }
1303
1304         if (!plug->multiple_queues && last && last->q != rq->q)
1305                 plug->multiple_queues = true;
1306         /*
1307          * Any request allocated from sched tags can't be issued to
1308          * ->queue_rqs() directly
1309          */
1310         if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1311                 plug->has_elevator = true;
1312         rq->rq_next = NULL;
1313         rq_list_add(&plug->mq_list, rq);
1314         plug->rq_count++;
1315 }
1316
1317 /**
1318  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1319  * @rq:         request to insert
1320  * @at_head:    insert request at head or tail of queue
1321  *
1322  * Description:
1323  *    Insert a fully prepared request at the back of the I/O scheduler queue
1324  *    for execution.  Don't wait for completion.
1325  *
1326  * Note:
1327  *    This function will invoke @done directly if the queue is dead.
1328  */
1329 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1330 {
1331         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1332
1333         WARN_ON(irqs_disabled());
1334         WARN_ON(!blk_rq_is_passthrough(rq));
1335
1336         blk_account_io_start(rq);
1337
1338         /*
1339          * As plugging can be enabled for passthrough requests on a zoned
1340          * device, directly accessing the plug instead of using blk_mq_plug()
1341          * should not have any consequences.
1342          */
1343         if (current->plug && !at_head) {
1344                 blk_add_rq_to_plug(current->plug, rq);
1345                 return;
1346         }
1347
1348         blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1349         blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
1350 }
1351 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1352
1353 struct blk_rq_wait {
1354         struct completion done;
1355         blk_status_t ret;
1356 };
1357
1358 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1359 {
1360         struct blk_rq_wait *wait = rq->end_io_data;
1361
1362         wait->ret = ret;
1363         complete(&wait->done);
1364         return RQ_END_IO_NONE;
1365 }
1366
1367 bool blk_rq_is_poll(struct request *rq)
1368 {
1369         if (!rq->mq_hctx)
1370                 return false;
1371         if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1372                 return false;
1373         return true;
1374 }
1375 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1376
1377 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1378 {
1379         do {
1380                 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1381                 cond_resched();
1382         } while (!completion_done(wait));
1383 }
1384
1385 /**
1386  * blk_execute_rq - insert a request into queue for execution
1387  * @rq:         request to insert
1388  * @at_head:    insert request at head or tail of queue
1389  *
1390  * Description:
1391  *    Insert a fully prepared request at the back of the I/O scheduler queue
1392  *    for execution and wait for completion.
1393  * Return: The blk_status_t result provided to blk_mq_end_request().
1394  */
1395 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1396 {
1397         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1398         struct blk_rq_wait wait = {
1399                 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1400         };
1401
1402         WARN_ON(irqs_disabled());
1403         WARN_ON(!blk_rq_is_passthrough(rq));
1404
1405         rq->end_io_data = &wait;
1406         rq->end_io = blk_end_sync_rq;
1407
1408         blk_account_io_start(rq);
1409         blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1410         blk_mq_run_hw_queue(hctx, false);
1411
1412         if (blk_rq_is_poll(rq)) {
1413                 blk_rq_poll_completion(rq, &wait.done);
1414         } else {
1415                 /*
1416                  * Prevent hang_check timer from firing at us during very long
1417                  * I/O
1418                  */
1419                 unsigned long hang_check = sysctl_hung_task_timeout_secs;
1420
1421                 if (hang_check)
1422                         while (!wait_for_completion_io_timeout(&wait.done,
1423                                         hang_check * (HZ/2)))
1424                                 ;
1425                 else
1426                         wait_for_completion_io(&wait.done);
1427         }
1428
1429         return wait.ret;
1430 }
1431 EXPORT_SYMBOL(blk_execute_rq);
1432
1433 static void __blk_mq_requeue_request(struct request *rq)
1434 {
1435         struct request_queue *q = rq->q;
1436
1437         blk_mq_put_driver_tag(rq);
1438
1439         trace_block_rq_requeue(rq);
1440         rq_qos_requeue(q, rq);
1441
1442         if (blk_mq_request_started(rq)) {
1443                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1444                 rq->rq_flags &= ~RQF_TIMED_OUT;
1445         }
1446 }
1447
1448 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1449 {
1450         struct request_queue *q = rq->q;
1451         unsigned long flags;
1452
1453         __blk_mq_requeue_request(rq);
1454
1455         /* this request will be re-inserted to io scheduler queue */
1456         blk_mq_sched_requeue_request(rq);
1457
1458         spin_lock_irqsave(&q->requeue_lock, flags);
1459         list_add_tail(&rq->queuelist, &q->requeue_list);
1460         spin_unlock_irqrestore(&q->requeue_lock, flags);
1461
1462         if (kick_requeue_list)
1463                 blk_mq_kick_requeue_list(q);
1464 }
1465 EXPORT_SYMBOL(blk_mq_requeue_request);
1466
1467 static void blk_mq_requeue_work(struct work_struct *work)
1468 {
1469         struct request_queue *q =
1470                 container_of(work, struct request_queue, requeue_work.work);
1471         LIST_HEAD(rq_list);
1472         LIST_HEAD(flush_list);
1473         struct request *rq;
1474
1475         spin_lock_irq(&q->requeue_lock);
1476         list_splice_init(&q->requeue_list, &rq_list);
1477         list_splice_init(&q->flush_list, &flush_list);
1478         spin_unlock_irq(&q->requeue_lock);
1479
1480         while (!list_empty(&rq_list)) {
1481                 rq = list_entry(rq_list.next, struct request, queuelist);
1482                 /*
1483                  * If RQF_DONTPREP ist set, the request has been started by the
1484                  * driver already and might have driver-specific data allocated
1485                  * already.  Insert it into the hctx dispatch list to avoid
1486                  * block layer merges for the request.
1487                  */
1488                 if (rq->rq_flags & RQF_DONTPREP) {
1489                         list_del_init(&rq->queuelist);
1490                         blk_mq_request_bypass_insert(rq, 0);
1491                 } else {
1492                         list_del_init(&rq->queuelist);
1493                         blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
1494                 }
1495         }
1496
1497         while (!list_empty(&flush_list)) {
1498                 rq = list_entry(flush_list.next, struct request, queuelist);
1499                 list_del_init(&rq->queuelist);
1500                 blk_mq_insert_request(rq, 0);
1501         }
1502
1503         blk_mq_run_hw_queues(q, false);
1504 }
1505
1506 void blk_mq_kick_requeue_list(struct request_queue *q)
1507 {
1508         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1509 }
1510 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1511
1512 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1513                                     unsigned long msecs)
1514 {
1515         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1516                                     msecs_to_jiffies(msecs));
1517 }
1518 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1519
1520 static bool blk_is_flush_data_rq(struct request *rq)
1521 {
1522         return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1523 }
1524
1525 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1526 {
1527         /*
1528          * If we find a request that isn't idle we know the queue is busy
1529          * as it's checked in the iter.
1530          * Return false to stop the iteration.
1531          *
1532          * In case of queue quiesce, if one flush data request is completed,
1533          * don't count it as inflight given the flush sequence is suspended,
1534          * and the original flush data request is invisible to driver, just
1535          * like other pending requests because of quiesce
1536          */
1537         if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1538                                 blk_is_flush_data_rq(rq) &&
1539                                 blk_mq_request_completed(rq))) {
1540                 bool *busy = priv;
1541
1542                 *busy = true;
1543                 return false;
1544         }
1545
1546         return true;
1547 }
1548
1549 bool blk_mq_queue_inflight(struct request_queue *q)
1550 {
1551         bool busy = false;
1552
1553         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1554         return busy;
1555 }
1556 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1557
1558 static void blk_mq_rq_timed_out(struct request *req)
1559 {
1560         req->rq_flags |= RQF_TIMED_OUT;
1561         if (req->q->mq_ops->timeout) {
1562                 enum blk_eh_timer_return ret;
1563
1564                 ret = req->q->mq_ops->timeout(req);
1565                 if (ret == BLK_EH_DONE)
1566                         return;
1567                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1568         }
1569
1570         blk_add_timer(req);
1571 }
1572
1573 struct blk_expired_data {
1574         bool has_timedout_rq;
1575         unsigned long next;
1576         unsigned long timeout_start;
1577 };
1578
1579 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1580 {
1581         unsigned long deadline;
1582
1583         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1584                 return false;
1585         if (rq->rq_flags & RQF_TIMED_OUT)
1586                 return false;
1587
1588         deadline = READ_ONCE(rq->deadline);
1589         if (time_after_eq(expired->timeout_start, deadline))
1590                 return true;
1591
1592         if (expired->next == 0)
1593                 expired->next = deadline;
1594         else if (time_after(expired->next, deadline))
1595                 expired->next = deadline;
1596         return false;
1597 }
1598
1599 void blk_mq_put_rq_ref(struct request *rq)
1600 {
1601         if (is_flush_rq(rq)) {
1602                 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1603                         blk_mq_free_request(rq);
1604         } else if (req_ref_put_and_test(rq)) {
1605                 __blk_mq_free_request(rq);
1606         }
1607 }
1608
1609 static bool blk_mq_check_expired(struct request *rq, void *priv)
1610 {
1611         struct blk_expired_data *expired = priv;
1612
1613         /*
1614          * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1615          * be reallocated underneath the timeout handler's processing, then
1616          * the expire check is reliable. If the request is not expired, then
1617          * it was completed and reallocated as a new request after returning
1618          * from blk_mq_check_expired().
1619          */
1620         if (blk_mq_req_expired(rq, expired)) {
1621                 expired->has_timedout_rq = true;
1622                 return false;
1623         }
1624         return true;
1625 }
1626
1627 static bool blk_mq_handle_expired(struct request *rq, void *priv)
1628 {
1629         struct blk_expired_data *expired = priv;
1630
1631         if (blk_mq_req_expired(rq, expired))
1632                 blk_mq_rq_timed_out(rq);
1633         return true;
1634 }
1635
1636 static void blk_mq_timeout_work(struct work_struct *work)
1637 {
1638         struct request_queue *q =
1639                 container_of(work, struct request_queue, timeout_work);
1640         struct blk_expired_data expired = {
1641                 .timeout_start = jiffies,
1642         };
1643         struct blk_mq_hw_ctx *hctx;
1644         unsigned long i;
1645
1646         /* A deadlock might occur if a request is stuck requiring a
1647          * timeout at the same time a queue freeze is waiting
1648          * completion, since the timeout code would not be able to
1649          * acquire the queue reference here.
1650          *
1651          * That's why we don't use blk_queue_enter here; instead, we use
1652          * percpu_ref_tryget directly, because we need to be able to
1653          * obtain a reference even in the short window between the queue
1654          * starting to freeze, by dropping the first reference in
1655          * blk_freeze_queue_start, and the moment the last request is
1656          * consumed, marked by the instant q_usage_counter reaches
1657          * zero.
1658          */
1659         if (!percpu_ref_tryget(&q->q_usage_counter))
1660                 return;
1661
1662         /* check if there is any timed-out request */
1663         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
1664         if (expired.has_timedout_rq) {
1665                 /*
1666                  * Before walking tags, we must ensure any submit started
1667                  * before the current time has finished. Since the submit
1668                  * uses srcu or rcu, wait for a synchronization point to
1669                  * ensure all running submits have finished
1670                  */
1671                 blk_mq_wait_quiesce_done(q->tag_set);
1672
1673                 expired.next = 0;
1674                 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
1675         }
1676
1677         if (expired.next != 0) {
1678                 mod_timer(&q->timeout, expired.next);
1679         } else {
1680                 /*
1681                  * Request timeouts are handled as a forward rolling timer. If
1682                  * we end up here it means that no requests are pending and
1683                  * also that no request has been pending for a while. Mark
1684                  * each hctx as idle.
1685                  */
1686                 queue_for_each_hw_ctx(q, hctx, i) {
1687                         /* the hctx may be unmapped, so check it here */
1688                         if (blk_mq_hw_queue_mapped(hctx))
1689                                 blk_mq_tag_idle(hctx);
1690                 }
1691         }
1692         blk_queue_exit(q);
1693 }
1694
1695 struct flush_busy_ctx_data {
1696         struct blk_mq_hw_ctx *hctx;
1697         struct list_head *list;
1698 };
1699
1700 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1701 {
1702         struct flush_busy_ctx_data *flush_data = data;
1703         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1704         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1705         enum hctx_type type = hctx->type;
1706
1707         spin_lock(&ctx->lock);
1708         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1709         sbitmap_clear_bit(sb, bitnr);
1710         spin_unlock(&ctx->lock);
1711         return true;
1712 }
1713
1714 /*
1715  * Process software queues that have been marked busy, splicing them
1716  * to the for-dispatch
1717  */
1718 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1719 {
1720         struct flush_busy_ctx_data data = {
1721                 .hctx = hctx,
1722                 .list = list,
1723         };
1724
1725         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1726 }
1727 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1728
1729 struct dispatch_rq_data {
1730         struct blk_mq_hw_ctx *hctx;
1731         struct request *rq;
1732 };
1733
1734 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1735                 void *data)
1736 {
1737         struct dispatch_rq_data *dispatch_data = data;
1738         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1739         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1740         enum hctx_type type = hctx->type;
1741
1742         spin_lock(&ctx->lock);
1743         if (!list_empty(&ctx->rq_lists[type])) {
1744                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1745                 list_del_init(&dispatch_data->rq->queuelist);
1746                 if (list_empty(&ctx->rq_lists[type]))
1747                         sbitmap_clear_bit(sb, bitnr);
1748         }
1749         spin_unlock(&ctx->lock);
1750
1751         return !dispatch_data->rq;
1752 }
1753
1754 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1755                                         struct blk_mq_ctx *start)
1756 {
1757         unsigned off = start ? start->index_hw[hctx->type] : 0;
1758         struct dispatch_rq_data data = {
1759                 .hctx = hctx,
1760                 .rq   = NULL,
1761         };
1762
1763         __sbitmap_for_each_set(&hctx->ctx_map, off,
1764                                dispatch_rq_from_ctx, &data);
1765
1766         return data.rq;
1767 }
1768
1769 bool __blk_mq_alloc_driver_tag(struct request *rq)
1770 {
1771         struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1772         unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1773         int tag;
1774
1775         blk_mq_tag_busy(rq->mq_hctx);
1776
1777         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1778                 bt = &rq->mq_hctx->tags->breserved_tags;
1779                 tag_offset = 0;
1780         } else {
1781                 if (!hctx_may_queue(rq->mq_hctx, bt))
1782                         return false;
1783         }
1784
1785         tag = __sbitmap_queue_get(bt);
1786         if (tag == BLK_MQ_NO_TAG)
1787                 return false;
1788
1789         rq->tag = tag + tag_offset;
1790         blk_mq_inc_active_requests(rq->mq_hctx);
1791         return true;
1792 }
1793
1794 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1795                                 int flags, void *key)
1796 {
1797         struct blk_mq_hw_ctx *hctx;
1798
1799         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1800
1801         spin_lock(&hctx->dispatch_wait_lock);
1802         if (!list_empty(&wait->entry)) {
1803                 struct sbitmap_queue *sbq;
1804
1805                 list_del_init(&wait->entry);
1806                 sbq = &hctx->tags->bitmap_tags;
1807                 atomic_dec(&sbq->ws_active);
1808         }
1809         spin_unlock(&hctx->dispatch_wait_lock);
1810
1811         blk_mq_run_hw_queue(hctx, true);
1812         return 1;
1813 }
1814
1815 /*
1816  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1817  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1818  * restart. For both cases, take care to check the condition again after
1819  * marking us as waiting.
1820  */
1821 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1822                                  struct request *rq)
1823 {
1824         struct sbitmap_queue *sbq;
1825         struct wait_queue_head *wq;
1826         wait_queue_entry_t *wait;
1827         bool ret;
1828
1829         if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1830             !(blk_mq_is_shared_tags(hctx->flags))) {
1831                 blk_mq_sched_mark_restart_hctx(hctx);
1832
1833                 /*
1834                  * It's possible that a tag was freed in the window between the
1835                  * allocation failure and adding the hardware queue to the wait
1836                  * queue.
1837                  *
1838                  * Don't clear RESTART here, someone else could have set it.
1839                  * At most this will cost an extra queue run.
1840                  */
1841                 return blk_mq_get_driver_tag(rq);
1842         }
1843
1844         wait = &hctx->dispatch_wait;
1845         if (!list_empty_careful(&wait->entry))
1846                 return false;
1847
1848         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
1849                 sbq = &hctx->tags->breserved_tags;
1850         else
1851                 sbq = &hctx->tags->bitmap_tags;
1852         wq = &bt_wait_ptr(sbq, hctx)->wait;
1853
1854         spin_lock_irq(&wq->lock);
1855         spin_lock(&hctx->dispatch_wait_lock);
1856         if (!list_empty(&wait->entry)) {
1857                 spin_unlock(&hctx->dispatch_wait_lock);
1858                 spin_unlock_irq(&wq->lock);
1859                 return false;
1860         }
1861
1862         atomic_inc(&sbq->ws_active);
1863         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1864         __add_wait_queue(wq, wait);
1865
1866         /*
1867          * Add one explicit barrier since blk_mq_get_driver_tag() may
1868          * not imply barrier in case of failure.
1869          *
1870          * Order adding us to wait queue and allocating driver tag.
1871          *
1872          * The pair is the one implied in sbitmap_queue_wake_up() which
1873          * orders clearing sbitmap tag bits and waitqueue_active() in
1874          * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
1875          *
1876          * Otherwise, re-order of adding wait queue and getting driver tag
1877          * may cause __sbitmap_queue_wake_up() to wake up nothing because
1878          * the waitqueue_active() may not observe us in wait queue.
1879          */
1880         smp_mb();
1881
1882         /*
1883          * It's possible that a tag was freed in the window between the
1884          * allocation failure and adding the hardware queue to the wait
1885          * queue.
1886          */
1887         ret = blk_mq_get_driver_tag(rq);
1888         if (!ret) {
1889                 spin_unlock(&hctx->dispatch_wait_lock);
1890                 spin_unlock_irq(&wq->lock);
1891                 return false;
1892         }
1893
1894         /*
1895          * We got a tag, remove ourselves from the wait queue to ensure
1896          * someone else gets the wakeup.
1897          */
1898         list_del_init(&wait->entry);
1899         atomic_dec(&sbq->ws_active);
1900         spin_unlock(&hctx->dispatch_wait_lock);
1901         spin_unlock_irq(&wq->lock);
1902
1903         return true;
1904 }
1905
1906 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1907 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1908 /*
1909  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1910  * - EWMA is one simple way to compute running average value
1911  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1912  * - take 4 as factor for avoiding to get too small(0) result, and this
1913  *   factor doesn't matter because EWMA decreases exponentially
1914  */
1915 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1916 {
1917         unsigned int ewma;
1918
1919         ewma = hctx->dispatch_busy;
1920
1921         if (!ewma && !busy)
1922                 return;
1923
1924         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1925         if (busy)
1926                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1927         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1928
1929         hctx->dispatch_busy = ewma;
1930 }
1931
1932 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1933
1934 static void blk_mq_handle_dev_resource(struct request *rq,
1935                                        struct list_head *list)
1936 {
1937         list_add(&rq->queuelist, list);
1938         __blk_mq_requeue_request(rq);
1939 }
1940
1941 static void blk_mq_handle_zone_resource(struct request *rq,
1942                                         struct list_head *zone_list)
1943 {
1944         /*
1945          * If we end up here it is because we cannot dispatch a request to a
1946          * specific zone due to LLD level zone-write locking or other zone
1947          * related resource not being available. In this case, set the request
1948          * aside in zone_list for retrying it later.
1949          */
1950         list_add(&rq->queuelist, zone_list);
1951         __blk_mq_requeue_request(rq);
1952 }
1953
1954 enum prep_dispatch {
1955         PREP_DISPATCH_OK,
1956         PREP_DISPATCH_NO_TAG,
1957         PREP_DISPATCH_NO_BUDGET,
1958 };
1959
1960 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1961                                                   bool need_budget)
1962 {
1963         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1964         int budget_token = -1;
1965
1966         if (need_budget) {
1967                 budget_token = blk_mq_get_dispatch_budget(rq->q);
1968                 if (budget_token < 0) {
1969                         blk_mq_put_driver_tag(rq);
1970                         return PREP_DISPATCH_NO_BUDGET;
1971                 }
1972                 blk_mq_set_rq_budget_token(rq, budget_token);
1973         }
1974
1975         if (!blk_mq_get_driver_tag(rq)) {
1976                 /*
1977                  * The initial allocation attempt failed, so we need to
1978                  * rerun the hardware queue when a tag is freed. The
1979                  * waitqueue takes care of that. If the queue is run
1980                  * before we add this entry back on the dispatch list,
1981                  * we'll re-run it below.
1982                  */
1983                 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1984                         /*
1985                          * All budgets not got from this function will be put
1986                          * together during handling partial dispatch
1987                          */
1988                         if (need_budget)
1989                                 blk_mq_put_dispatch_budget(rq->q, budget_token);
1990                         return PREP_DISPATCH_NO_TAG;
1991                 }
1992         }
1993
1994         return PREP_DISPATCH_OK;
1995 }
1996
1997 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1998 static void blk_mq_release_budgets(struct request_queue *q,
1999                 struct list_head *list)
2000 {
2001         struct request *rq;
2002
2003         list_for_each_entry(rq, list, queuelist) {
2004                 int budget_token = blk_mq_get_rq_budget_token(rq);
2005
2006                 if (budget_token >= 0)
2007                         blk_mq_put_dispatch_budget(q, budget_token);
2008         }
2009 }
2010
2011 /*
2012  * blk_mq_commit_rqs will notify driver using bd->last that there is no
2013  * more requests. (See comment in struct blk_mq_ops for commit_rqs for
2014  * details)
2015  * Attention, we should explicitly call this in unusual cases:
2016  *  1) did not queue everything initially scheduled to queue
2017  *  2) the last attempt to queue a request failed
2018  */
2019 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
2020                               bool from_schedule)
2021 {
2022         if (hctx->queue->mq_ops->commit_rqs && queued) {
2023                 trace_block_unplug(hctx->queue, queued, !from_schedule);
2024                 hctx->queue->mq_ops->commit_rqs(hctx);
2025         }
2026 }
2027
2028 /*
2029  * Returns true if we did some work AND can potentially do more.
2030  */
2031 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
2032                              unsigned int nr_budgets)
2033 {
2034         enum prep_dispatch prep;
2035         struct request_queue *q = hctx->queue;
2036         struct request *rq;
2037         int queued;
2038         blk_status_t ret = BLK_STS_OK;
2039         LIST_HEAD(zone_list);
2040         bool needs_resource = false;
2041
2042         if (list_empty(list))
2043                 return false;
2044
2045         /*
2046          * Now process all the entries, sending them to the driver.
2047          */
2048         queued = 0;
2049         do {
2050                 struct blk_mq_queue_data bd;
2051
2052                 rq = list_first_entry(list, struct request, queuelist);
2053
2054                 WARN_ON_ONCE(hctx != rq->mq_hctx);
2055                 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2056                 if (prep != PREP_DISPATCH_OK)
2057                         break;
2058
2059                 list_del_init(&rq->queuelist);
2060
2061                 bd.rq = rq;
2062                 bd.last = list_empty(list);
2063
2064                 /*
2065                  * once the request is queued to lld, no need to cover the
2066                  * budget any more
2067                  */
2068                 if (nr_budgets)
2069                         nr_budgets--;
2070                 ret = q->mq_ops->queue_rq(hctx, &bd);
2071                 switch (ret) {
2072                 case BLK_STS_OK:
2073                         queued++;
2074                         break;
2075                 case BLK_STS_RESOURCE:
2076                         needs_resource = true;
2077                         fallthrough;
2078                 case BLK_STS_DEV_RESOURCE:
2079                         blk_mq_handle_dev_resource(rq, list);
2080                         goto out;
2081                 case BLK_STS_ZONE_RESOURCE:
2082                         /*
2083                          * Move the request to zone_list and keep going through
2084                          * the dispatch list to find more requests the drive can
2085                          * accept.
2086                          */
2087                         blk_mq_handle_zone_resource(rq, &zone_list);
2088                         needs_resource = true;
2089                         break;
2090                 default:
2091                         blk_mq_end_request(rq, ret);
2092                 }
2093         } while (!list_empty(list));
2094 out:
2095         if (!list_empty(&zone_list))
2096                 list_splice_tail_init(&zone_list, list);
2097
2098         /* If we didn't flush the entire list, we could have told the driver
2099          * there was more coming, but that turned out to be a lie.
2100          */
2101         if (!list_empty(list) || ret != BLK_STS_OK)
2102                 blk_mq_commit_rqs(hctx, queued, false);
2103
2104         /*
2105          * Any items that need requeuing? Stuff them into hctx->dispatch,
2106          * that is where we will continue on next queue run.
2107          */
2108         if (!list_empty(list)) {
2109                 bool needs_restart;
2110                 /* For non-shared tags, the RESTART check will suffice */
2111                 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2112                         ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
2113                         blk_mq_is_shared_tags(hctx->flags));
2114
2115                 if (nr_budgets)
2116                         blk_mq_release_budgets(q, list);
2117
2118                 spin_lock(&hctx->lock);
2119                 list_splice_tail_init(list, &hctx->dispatch);
2120                 spin_unlock(&hctx->lock);
2121
2122                 /*
2123                  * Order adding requests to hctx->dispatch and checking
2124                  * SCHED_RESTART flag. The pair of this smp_mb() is the one
2125                  * in blk_mq_sched_restart(). Avoid restart code path to
2126                  * miss the new added requests to hctx->dispatch, meantime
2127                  * SCHED_RESTART is observed here.
2128                  */
2129                 smp_mb();
2130
2131                 /*
2132                  * If SCHED_RESTART was set by the caller of this function and
2133                  * it is no longer set that means that it was cleared by another
2134                  * thread and hence that a queue rerun is needed.
2135                  *
2136                  * If 'no_tag' is set, that means that we failed getting
2137                  * a driver tag with an I/O scheduler attached. If our dispatch
2138                  * waitqueue is no longer active, ensure that we run the queue
2139                  * AFTER adding our entries back to the list.
2140                  *
2141                  * If no I/O scheduler has been configured it is possible that
2142                  * the hardware queue got stopped and restarted before requests
2143                  * were pushed back onto the dispatch list. Rerun the queue to
2144                  * avoid starvation. Notes:
2145                  * - blk_mq_run_hw_queue() checks whether or not a queue has
2146                  *   been stopped before rerunning a queue.
2147                  * - Some but not all block drivers stop a queue before
2148                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2149                  *   and dm-rq.
2150                  *
2151                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2152                  * bit is set, run queue after a delay to avoid IO stalls
2153                  * that could otherwise occur if the queue is idle.  We'll do
2154                  * similar if we couldn't get budget or couldn't lock a zone
2155                  * and SCHED_RESTART is set.
2156                  */
2157                 needs_restart = blk_mq_sched_needs_restart(hctx);
2158                 if (prep == PREP_DISPATCH_NO_BUDGET)
2159                         needs_resource = true;
2160                 if (!needs_restart ||
2161                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2162                         blk_mq_run_hw_queue(hctx, true);
2163                 else if (needs_resource)
2164                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2165
2166                 blk_mq_update_dispatch_busy(hctx, true);
2167                 return false;
2168         }
2169
2170         blk_mq_update_dispatch_busy(hctx, false);
2171         return true;
2172 }
2173
2174 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2175 {
2176         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2177
2178         if (cpu >= nr_cpu_ids)
2179                 cpu = cpumask_first(hctx->cpumask);
2180         return cpu;
2181 }
2182
2183 /*
2184  * It'd be great if the workqueue API had a way to pass
2185  * in a mask and had some smarts for more clever placement.
2186  * For now we just round-robin here, switching for every
2187  * BLK_MQ_CPU_WORK_BATCH queued items.
2188  */
2189 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2190 {
2191         bool tried = false;
2192         int next_cpu = hctx->next_cpu;
2193
2194         if (hctx->queue->nr_hw_queues == 1)
2195                 return WORK_CPU_UNBOUND;
2196
2197         if (--hctx->next_cpu_batch <= 0) {
2198 select_cpu:
2199                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2200                                 cpu_online_mask);
2201                 if (next_cpu >= nr_cpu_ids)
2202                         next_cpu = blk_mq_first_mapped_cpu(hctx);
2203                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2204         }
2205
2206         /*
2207          * Do unbound schedule if we can't find a online CPU for this hctx,
2208          * and it should only happen in the path of handling CPU DEAD.
2209          */
2210         if (!cpu_online(next_cpu)) {
2211                 if (!tried) {
2212                         tried = true;
2213                         goto select_cpu;
2214                 }
2215
2216                 /*
2217                  * Make sure to re-select CPU next time once after CPUs
2218                  * in hctx->cpumask become online again.
2219                  */
2220                 hctx->next_cpu = next_cpu;
2221                 hctx->next_cpu_batch = 1;
2222                 return WORK_CPU_UNBOUND;
2223         }
2224
2225         hctx->next_cpu = next_cpu;
2226         return next_cpu;
2227 }
2228
2229 /**
2230  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2231  * @hctx: Pointer to the hardware queue to run.
2232  * @msecs: Milliseconds of delay to wait before running the queue.
2233  *
2234  * Run a hardware queue asynchronously with a delay of @msecs.
2235  */
2236 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2237 {
2238         if (unlikely(blk_mq_hctx_stopped(hctx)))
2239                 return;
2240         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2241                                     msecs_to_jiffies(msecs));
2242 }
2243 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2244
2245 /**
2246  * blk_mq_run_hw_queue - Start to run a hardware queue.
2247  * @hctx: Pointer to the hardware queue to run.
2248  * @async: If we want to run the queue asynchronously.
2249  *
2250  * Check if the request queue is not in a quiesced state and if there are
2251  * pending requests to be sent. If this is true, run the queue to send requests
2252  * to hardware.
2253  */
2254 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2255 {
2256         bool need_run;
2257
2258         /*
2259          * We can't run the queue inline with interrupts disabled.
2260          */
2261         WARN_ON_ONCE(!async && in_interrupt());
2262
2263         might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
2264
2265         /*
2266          * When queue is quiesced, we may be switching io scheduler, or
2267          * updating nr_hw_queues, or other things, and we can't run queue
2268          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2269          *
2270          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2271          * quiesced.
2272          */
2273         __blk_mq_run_dispatch_ops(hctx->queue, false,
2274                 need_run = !blk_queue_quiesced(hctx->queue) &&
2275                 blk_mq_hctx_has_pending(hctx));
2276
2277         if (!need_run)
2278                 return;
2279
2280         if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2281                 blk_mq_delay_run_hw_queue(hctx, 0);
2282                 return;
2283         }
2284
2285         blk_mq_run_dispatch_ops(hctx->queue,
2286                                 blk_mq_sched_dispatch_requests(hctx));
2287 }
2288 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2289
2290 /*
2291  * Return prefered queue to dispatch from (if any) for non-mq aware IO
2292  * scheduler.
2293  */
2294 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2295 {
2296         struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2297         /*
2298          * If the IO scheduler does not respect hardware queues when
2299          * dispatching, we just don't bother with multiple HW queues and
2300          * dispatch from hctx for the current CPU since running multiple queues
2301          * just causes lock contention inside the scheduler and pointless cache
2302          * bouncing.
2303          */
2304         struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2305
2306         if (!blk_mq_hctx_stopped(hctx))
2307                 return hctx;
2308         return NULL;
2309 }
2310
2311 /**
2312  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2313  * @q: Pointer to the request queue to run.
2314  * @async: If we want to run the queue asynchronously.
2315  */
2316 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2317 {
2318         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2319         unsigned long i;
2320
2321         sq_hctx = NULL;
2322         if (blk_queue_sq_sched(q))
2323                 sq_hctx = blk_mq_get_sq_hctx(q);
2324         queue_for_each_hw_ctx(q, hctx, i) {
2325                 if (blk_mq_hctx_stopped(hctx))
2326                         continue;
2327                 /*
2328                  * Dispatch from this hctx either if there's no hctx preferred
2329                  * by IO scheduler or if it has requests that bypass the
2330                  * scheduler.
2331                  */
2332                 if (!sq_hctx || sq_hctx == hctx ||
2333                     !list_empty_careful(&hctx->dispatch))
2334                         blk_mq_run_hw_queue(hctx, async);
2335         }
2336 }
2337 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2338
2339 /**
2340  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2341  * @q: Pointer to the request queue to run.
2342  * @msecs: Milliseconds of delay to wait before running the queues.
2343  */
2344 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2345 {
2346         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2347         unsigned long i;
2348
2349         sq_hctx = NULL;
2350         if (blk_queue_sq_sched(q))
2351                 sq_hctx = blk_mq_get_sq_hctx(q);
2352         queue_for_each_hw_ctx(q, hctx, i) {
2353                 if (blk_mq_hctx_stopped(hctx))
2354                         continue;
2355                 /*
2356                  * If there is already a run_work pending, leave the
2357                  * pending delay untouched. Otherwise, a hctx can stall
2358                  * if another hctx is re-delaying the other's work
2359                  * before the work executes.
2360                  */
2361                 if (delayed_work_pending(&hctx->run_work))
2362                         continue;
2363                 /*
2364                  * Dispatch from this hctx either if there's no hctx preferred
2365                  * by IO scheduler or if it has requests that bypass the
2366                  * scheduler.
2367                  */
2368                 if (!sq_hctx || sq_hctx == hctx ||
2369                     !list_empty_careful(&hctx->dispatch))
2370                         blk_mq_delay_run_hw_queue(hctx, msecs);
2371         }
2372 }
2373 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2374
2375 /*
2376  * This function is often used for pausing .queue_rq() by driver when
2377  * there isn't enough resource or some conditions aren't satisfied, and
2378  * BLK_STS_RESOURCE is usually returned.
2379  *
2380  * We do not guarantee that dispatch can be drained or blocked
2381  * after blk_mq_stop_hw_queue() returns. Please use
2382  * blk_mq_quiesce_queue() for that requirement.
2383  */
2384 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2385 {
2386         cancel_delayed_work(&hctx->run_work);
2387
2388         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2389 }
2390 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2391
2392 /*
2393  * This function is often used for pausing .queue_rq() by driver when
2394  * there isn't enough resource or some conditions aren't satisfied, and
2395  * BLK_STS_RESOURCE is usually returned.
2396  *
2397  * We do not guarantee that dispatch can be drained or blocked
2398  * after blk_mq_stop_hw_queues() returns. Please use
2399  * blk_mq_quiesce_queue() for that requirement.
2400  */
2401 void blk_mq_stop_hw_queues(struct request_queue *q)
2402 {
2403         struct blk_mq_hw_ctx *hctx;
2404         unsigned long i;
2405
2406         queue_for_each_hw_ctx(q, hctx, i)
2407                 blk_mq_stop_hw_queue(hctx);
2408 }
2409 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2410
2411 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2412 {
2413         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2414
2415         blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
2416 }
2417 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2418
2419 void blk_mq_start_hw_queues(struct request_queue *q)
2420 {
2421         struct blk_mq_hw_ctx *hctx;
2422         unsigned long i;
2423
2424         queue_for_each_hw_ctx(q, hctx, i)
2425                 blk_mq_start_hw_queue(hctx);
2426 }
2427 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2428
2429 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2430 {
2431         if (!blk_mq_hctx_stopped(hctx))
2432                 return;
2433
2434         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2435         blk_mq_run_hw_queue(hctx, async);
2436 }
2437 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2438
2439 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2440 {
2441         struct blk_mq_hw_ctx *hctx;
2442         unsigned long i;
2443
2444         queue_for_each_hw_ctx(q, hctx, i)
2445                 blk_mq_start_stopped_hw_queue(hctx, async ||
2446                                         (hctx->flags & BLK_MQ_F_BLOCKING));
2447 }
2448 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2449
2450 static void blk_mq_run_work_fn(struct work_struct *work)
2451 {
2452         struct blk_mq_hw_ctx *hctx =
2453                 container_of(work, struct blk_mq_hw_ctx, run_work.work);
2454
2455         blk_mq_run_dispatch_ops(hctx->queue,
2456                                 blk_mq_sched_dispatch_requests(hctx));
2457 }
2458
2459 /**
2460  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2461  * @rq: Pointer to request to be inserted.
2462  * @flags: BLK_MQ_INSERT_*
2463  *
2464  * Should only be used carefully, when the caller knows we want to
2465  * bypass a potential IO scheduler on the target device.
2466  */
2467 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2468 {
2469         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2470
2471         spin_lock(&hctx->lock);
2472         if (flags & BLK_MQ_INSERT_AT_HEAD)
2473                 list_add(&rq->queuelist, &hctx->dispatch);
2474         else
2475                 list_add_tail(&rq->queuelist, &hctx->dispatch);
2476         spin_unlock(&hctx->lock);
2477 }
2478
2479 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
2480                 struct blk_mq_ctx *ctx, struct list_head *list,
2481                 bool run_queue_async)
2482 {
2483         struct request *rq;
2484         enum hctx_type type = hctx->type;
2485
2486         /*
2487          * Try to issue requests directly if the hw queue isn't busy to save an
2488          * extra enqueue & dequeue to the sw queue.
2489          */
2490         if (!hctx->dispatch_busy && !run_queue_async) {
2491                 blk_mq_run_dispatch_ops(hctx->queue,
2492                         blk_mq_try_issue_list_directly(hctx, list));
2493                 if (list_empty(list))
2494                         goto out;
2495         }
2496
2497         /*
2498          * preemption doesn't flush plug list, so it's possible ctx->cpu is
2499          * offline now
2500          */
2501         list_for_each_entry(rq, list, queuelist) {
2502                 BUG_ON(rq->mq_ctx != ctx);
2503                 trace_block_rq_insert(rq);
2504                 if (rq->cmd_flags & REQ_NOWAIT)
2505                         run_queue_async = true;
2506         }
2507
2508         spin_lock(&ctx->lock);
2509         list_splice_tail_init(list, &ctx->rq_lists[type]);
2510         blk_mq_hctx_mark_pending(hctx, ctx);
2511         spin_unlock(&ctx->lock);
2512 out:
2513         blk_mq_run_hw_queue(hctx, run_queue_async);
2514 }
2515
2516 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
2517 {
2518         struct request_queue *q = rq->q;
2519         struct blk_mq_ctx *ctx = rq->mq_ctx;
2520         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2521
2522         if (blk_rq_is_passthrough(rq)) {
2523                 /*
2524                  * Passthrough request have to be added to hctx->dispatch
2525                  * directly.  The device may be in a situation where it can't
2526                  * handle FS request, and always returns BLK_STS_RESOURCE for
2527                  * them, which gets them added to hctx->dispatch.
2528                  *
2529                  * If a passthrough request is required to unblock the queues,
2530                  * and it is added to the scheduler queue, there is no chance to
2531                  * dispatch it given we prioritize requests in hctx->dispatch.
2532                  */
2533                 blk_mq_request_bypass_insert(rq, flags);
2534         } else if (req_op(rq) == REQ_OP_FLUSH) {
2535                 /*
2536                  * Firstly normal IO request is inserted to scheduler queue or
2537                  * sw queue, meantime we add flush request to dispatch queue(
2538                  * hctx->dispatch) directly and there is at most one in-flight
2539                  * flush request for each hw queue, so it doesn't matter to add
2540                  * flush request to tail or front of the dispatch queue.
2541                  *
2542                  * Secondly in case of NCQ, flush request belongs to non-NCQ
2543                  * command, and queueing it will fail when there is any
2544                  * in-flight normal IO request(NCQ command). When adding flush
2545                  * rq to the front of hctx->dispatch, it is easier to introduce
2546                  * extra time to flush rq's latency because of S_SCHED_RESTART
2547                  * compared with adding to the tail of dispatch queue, then
2548                  * chance of flush merge is increased, and less flush requests
2549                  * will be issued to controller. It is observed that ~10% time
2550                  * is saved in blktests block/004 on disk attached to AHCI/NCQ
2551                  * drive when adding flush rq to the front of hctx->dispatch.
2552                  *
2553                  * Simply queue flush rq to the front of hctx->dispatch so that
2554                  * intensive flush workloads can benefit in case of NCQ HW.
2555                  */
2556                 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
2557         } else if (q->elevator) {
2558                 LIST_HEAD(list);
2559
2560                 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
2561
2562                 list_add(&rq->queuelist, &list);
2563                 q->elevator->type->ops.insert_requests(hctx, &list, flags);
2564         } else {
2565                 trace_block_rq_insert(rq);
2566
2567                 spin_lock(&ctx->lock);
2568                 if (flags & BLK_MQ_INSERT_AT_HEAD)
2569                         list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2570                 else
2571                         list_add_tail(&rq->queuelist,
2572                                       &ctx->rq_lists[hctx->type]);
2573                 blk_mq_hctx_mark_pending(hctx, ctx);
2574                 spin_unlock(&ctx->lock);
2575         }
2576 }
2577
2578 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2579                 unsigned int nr_segs)
2580 {
2581         int err;
2582
2583         if (bio->bi_opf & REQ_RAHEAD)
2584                 rq->cmd_flags |= REQ_FAILFAST_MASK;
2585
2586         rq->__sector = bio->bi_iter.bi_sector;
2587         blk_rq_bio_prep(rq, bio, nr_segs);
2588
2589         /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2590         err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2591         WARN_ON_ONCE(err);
2592
2593         blk_account_io_start(rq);
2594 }
2595
2596 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2597                                             struct request *rq, bool last)
2598 {
2599         struct request_queue *q = rq->q;
2600         struct blk_mq_queue_data bd = {
2601                 .rq = rq,
2602                 .last = last,
2603         };
2604         blk_status_t ret;
2605
2606         /*
2607          * For OK queue, we are done. For error, caller may kill it.
2608          * Any other error (busy), just add it to our list as we
2609          * previously would have done.
2610          */
2611         ret = q->mq_ops->queue_rq(hctx, &bd);
2612         switch (ret) {
2613         case BLK_STS_OK:
2614                 blk_mq_update_dispatch_busy(hctx, false);
2615                 break;
2616         case BLK_STS_RESOURCE:
2617         case BLK_STS_DEV_RESOURCE:
2618                 blk_mq_update_dispatch_busy(hctx, true);
2619                 __blk_mq_requeue_request(rq);
2620                 break;
2621         default:
2622                 blk_mq_update_dispatch_busy(hctx, false);
2623                 break;
2624         }
2625
2626         return ret;
2627 }
2628
2629 static bool blk_mq_get_budget_and_tag(struct request *rq)
2630 {
2631         int budget_token;
2632
2633         budget_token = blk_mq_get_dispatch_budget(rq->q);
2634         if (budget_token < 0)
2635                 return false;
2636         blk_mq_set_rq_budget_token(rq, budget_token);
2637         if (!blk_mq_get_driver_tag(rq)) {
2638                 blk_mq_put_dispatch_budget(rq->q, budget_token);
2639                 return false;
2640         }
2641         return true;
2642 }
2643
2644 /**
2645  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2646  * @hctx: Pointer of the associated hardware queue.
2647  * @rq: Pointer to request to be sent.
2648  *
2649  * If the device has enough resources to accept a new request now, send the
2650  * request directly to device driver. Else, insert at hctx->dispatch queue, so
2651  * we can try send it another time in the future. Requests inserted at this
2652  * queue have higher priority.
2653  */
2654 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2655                 struct request *rq)
2656 {
2657         blk_status_t ret;
2658
2659         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2660                 blk_mq_insert_request(rq, 0);
2661                 return;
2662         }
2663
2664         if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2665                 blk_mq_insert_request(rq, 0);
2666                 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2667                 return;
2668         }
2669
2670         ret = __blk_mq_issue_directly(hctx, rq, true);
2671         switch (ret) {
2672         case BLK_STS_OK:
2673                 break;
2674         case BLK_STS_RESOURCE:
2675         case BLK_STS_DEV_RESOURCE:
2676                 blk_mq_request_bypass_insert(rq, 0);
2677                 blk_mq_run_hw_queue(hctx, false);
2678                 break;
2679         default:
2680                 blk_mq_end_request(rq, ret);
2681                 break;
2682         }
2683 }
2684
2685 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2686 {
2687         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2688
2689         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2690                 blk_mq_insert_request(rq, 0);
2691                 return BLK_STS_OK;
2692         }
2693
2694         if (!blk_mq_get_budget_and_tag(rq))
2695                 return BLK_STS_RESOURCE;
2696         return __blk_mq_issue_directly(hctx, rq, last);
2697 }
2698
2699 static void blk_mq_plug_issue_direct(struct blk_plug *plug)
2700 {
2701         struct blk_mq_hw_ctx *hctx = NULL;
2702         struct request *rq;
2703         int queued = 0;
2704         blk_status_t ret = BLK_STS_OK;
2705
2706         while ((rq = rq_list_pop(&plug->mq_list))) {
2707                 bool last = rq_list_empty(plug->mq_list);
2708
2709                 if (hctx != rq->mq_hctx) {
2710                         if (hctx) {
2711                                 blk_mq_commit_rqs(hctx, queued, false);
2712                                 queued = 0;
2713                         }
2714                         hctx = rq->mq_hctx;
2715                 }
2716
2717                 ret = blk_mq_request_issue_directly(rq, last);
2718                 switch (ret) {
2719                 case BLK_STS_OK:
2720                         queued++;
2721                         break;
2722                 case BLK_STS_RESOURCE:
2723                 case BLK_STS_DEV_RESOURCE:
2724                         blk_mq_request_bypass_insert(rq, 0);
2725                         blk_mq_run_hw_queue(hctx, false);
2726                         goto out;
2727                 default:
2728                         blk_mq_end_request(rq, ret);
2729                         break;
2730                 }
2731         }
2732
2733 out:
2734         if (ret != BLK_STS_OK)
2735                 blk_mq_commit_rqs(hctx, queued, false);
2736 }
2737
2738 static void __blk_mq_flush_plug_list(struct request_queue *q,
2739                                      struct blk_plug *plug)
2740 {
2741         if (blk_queue_quiesced(q))
2742                 return;
2743         q->mq_ops->queue_rqs(&plug->mq_list);
2744 }
2745
2746 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2747 {
2748         struct blk_mq_hw_ctx *this_hctx = NULL;
2749         struct blk_mq_ctx *this_ctx = NULL;
2750         struct request *requeue_list = NULL;
2751         struct request **requeue_lastp = &requeue_list;
2752         unsigned int depth = 0;
2753         bool is_passthrough = false;
2754         LIST_HEAD(list);
2755
2756         do {
2757                 struct request *rq = rq_list_pop(&plug->mq_list);
2758
2759                 if (!this_hctx) {
2760                         this_hctx = rq->mq_hctx;
2761                         this_ctx = rq->mq_ctx;
2762                         is_passthrough = blk_rq_is_passthrough(rq);
2763                 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2764                            is_passthrough != blk_rq_is_passthrough(rq)) {
2765                         rq_list_add_tail(&requeue_lastp, rq);
2766                         continue;
2767                 }
2768                 list_add(&rq->queuelist, &list);
2769                 depth++;
2770         } while (!rq_list_empty(plug->mq_list));
2771
2772         plug->mq_list = requeue_list;
2773         trace_block_unplug(this_hctx->queue, depth, !from_sched);
2774
2775         percpu_ref_get(&this_hctx->queue->q_usage_counter);
2776         /* passthrough requests should never be issued to the I/O scheduler */
2777         if (is_passthrough) {
2778                 spin_lock(&this_hctx->lock);
2779                 list_splice_tail_init(&list, &this_hctx->dispatch);
2780                 spin_unlock(&this_hctx->lock);
2781                 blk_mq_run_hw_queue(this_hctx, from_sched);
2782         } else if (this_hctx->queue->elevator) {
2783                 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
2784                                 &list, 0);
2785                 blk_mq_run_hw_queue(this_hctx, from_sched);
2786         } else {
2787                 blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
2788         }
2789         percpu_ref_put(&this_hctx->queue->q_usage_counter);
2790 }
2791
2792 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2793 {
2794         struct request *rq;
2795
2796         /*
2797          * We may have been called recursively midway through handling
2798          * plug->mq_list via a schedule() in the driver's queue_rq() callback.
2799          * To avoid mq_list changing under our feet, clear rq_count early and
2800          * bail out specifically if rq_count is 0 rather than checking
2801          * whether the mq_list is empty.
2802          */
2803         if (plug->rq_count == 0)
2804                 return;
2805         plug->rq_count = 0;
2806
2807         if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2808                 struct request_queue *q;
2809
2810                 rq = rq_list_peek(&plug->mq_list);
2811                 q = rq->q;
2812
2813                 /*
2814                  * Peek first request and see if we have a ->queue_rqs() hook.
2815                  * If we do, we can dispatch the whole plug list in one go. We
2816                  * already know at this point that all requests belong to the
2817                  * same queue, caller must ensure that's the case.
2818                  */
2819                 if (q->mq_ops->queue_rqs) {
2820                         blk_mq_run_dispatch_ops(q,
2821                                 __blk_mq_flush_plug_list(q, plug));
2822                         if (rq_list_empty(plug->mq_list))
2823                                 return;
2824                 }
2825
2826                 blk_mq_run_dispatch_ops(q,
2827                                 blk_mq_plug_issue_direct(plug));
2828                 if (rq_list_empty(plug->mq_list))
2829                         return;
2830         }
2831
2832         do {
2833                 blk_mq_dispatch_plug_list(plug, from_schedule);
2834         } while (!rq_list_empty(plug->mq_list));
2835 }
2836
2837 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2838                 struct list_head *list)
2839 {
2840         int queued = 0;
2841         blk_status_t ret = BLK_STS_OK;
2842
2843         while (!list_empty(list)) {
2844                 struct request *rq = list_first_entry(list, struct request,
2845                                 queuelist);
2846
2847                 list_del_init(&rq->queuelist);
2848                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2849                 switch (ret) {
2850                 case BLK_STS_OK:
2851                         queued++;
2852                         break;
2853                 case BLK_STS_RESOURCE:
2854                 case BLK_STS_DEV_RESOURCE:
2855                         blk_mq_request_bypass_insert(rq, 0);
2856                         if (list_empty(list))
2857                                 blk_mq_run_hw_queue(hctx, false);
2858                         goto out;
2859                 default:
2860                         blk_mq_end_request(rq, ret);
2861                         break;
2862                 }
2863         }
2864
2865 out:
2866         if (ret != BLK_STS_OK)
2867                 blk_mq_commit_rqs(hctx, queued, false);
2868 }
2869
2870 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2871                                      struct bio *bio, unsigned int nr_segs)
2872 {
2873         if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2874                 if (blk_attempt_plug_merge(q, bio, nr_segs))
2875                         return true;
2876                 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2877                         return true;
2878         }
2879         return false;
2880 }
2881
2882 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2883                                                struct blk_plug *plug,
2884                                                struct bio *bio,
2885                                                unsigned int nsegs)
2886 {
2887         struct blk_mq_alloc_data data = {
2888                 .q              = q,
2889                 .nr_tags        = 1,
2890                 .cmd_flags      = bio->bi_opf,
2891         };
2892         struct request *rq;
2893
2894         if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2895                 return NULL;
2896
2897         rq_qos_throttle(q, bio);
2898
2899         if (plug) {
2900                 data.nr_tags = plug->nr_ios;
2901                 plug->nr_ios = 1;
2902                 data.cached_rq = &plug->cached_rq;
2903         }
2904
2905         rq = __blk_mq_alloc_requests(&data);
2906         if (rq)
2907                 return rq;
2908         rq_qos_cleanup(q, bio);
2909         if (bio->bi_opf & REQ_NOWAIT)
2910                 bio_wouldblock_error(bio);
2911         return NULL;
2912 }
2913
2914 /*
2915  * Check if we can use the passed on request for submitting the passed in bio,
2916  * and remove it from the request list if it can be used.
2917  */
2918 static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
2919                 struct bio *bio)
2920 {
2921         enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
2922         enum hctx_type hctx_type = rq->mq_hctx->type;
2923
2924         WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2925
2926         if (type != hctx_type &&
2927             !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
2928                 return false;
2929         if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2930                 return false;
2931
2932         /*
2933          * If any qos ->throttle() end up blocking, we will have flushed the
2934          * plug and hence killed the cached_rq list as well. Pop this entry
2935          * before we throttle.
2936          */
2937         plug->cached_rq = rq_list_next(rq);
2938         rq_qos_throttle(rq->q, bio);
2939
2940         blk_mq_rq_time_init(rq, 0);
2941         rq->cmd_flags = bio->bi_opf;
2942         INIT_LIST_HEAD(&rq->queuelist);
2943         return true;
2944 }
2945
2946 /**
2947  * blk_mq_submit_bio - Create and send a request to block device.
2948  * @bio: Bio pointer.
2949  *
2950  * Builds up a request structure from @q and @bio and send to the device. The
2951  * request may not be queued directly to hardware if:
2952  * * This request can be merged with another one
2953  * * We want to place request at plug queue for possible future merging
2954  * * There is an IO scheduler active at this queue
2955  *
2956  * It will not queue the request if there is an error with the bio, or at the
2957  * request creation.
2958  */
2959 void blk_mq_submit_bio(struct bio *bio)
2960 {
2961         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2962         struct blk_plug *plug = blk_mq_plug(bio);
2963         const int is_sync = op_is_sync(bio->bi_opf);
2964         struct blk_mq_hw_ctx *hctx;
2965         struct request *rq = NULL;
2966         unsigned int nr_segs = 1;
2967         blk_status_t ret;
2968
2969         bio = blk_queue_bounce(bio, q);
2970
2971         if (plug) {
2972                 rq = rq_list_peek(&plug->cached_rq);
2973                 if (rq && rq->q != q)
2974                         rq = NULL;
2975         }
2976         if (rq) {
2977                 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
2978                         bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2979                         if (!bio)
2980                                 return;
2981                 }
2982                 if (!bio_integrity_prep(bio))
2983                         return;
2984                 if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
2985                         return;
2986                 if (blk_mq_use_cached_rq(rq, plug, bio))
2987                         goto done;
2988                 percpu_ref_get(&q->q_usage_counter);
2989         } else {
2990                 if (unlikely(bio_queue_enter(bio)))
2991                         return;
2992                 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
2993                         bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2994                         if (!bio)
2995                                 goto fail;
2996                 }
2997                 if (!bio_integrity_prep(bio))
2998                         goto fail;
2999         }
3000
3001         rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3002         if (unlikely(!rq)) {
3003 fail:
3004                 blk_queue_exit(q);
3005                 return;
3006         }
3007
3008 done:
3009         trace_block_getrq(bio);
3010
3011         rq_qos_track(q, rq, bio);
3012
3013         blk_mq_bio_to_request(rq, bio, nr_segs);
3014
3015         ret = blk_crypto_rq_get_keyslot(rq);
3016         if (ret != BLK_STS_OK) {
3017                 bio->bi_status = ret;
3018                 bio_endio(bio);
3019                 blk_mq_free_request(rq);
3020                 return;
3021         }
3022
3023         if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3024                 return;
3025
3026         if (plug) {
3027                 blk_add_rq_to_plug(plug, rq);
3028                 return;
3029         }
3030
3031         hctx = rq->mq_hctx;
3032         if ((rq->rq_flags & RQF_USE_SCHED) ||
3033             (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3034                 blk_mq_insert_request(rq, 0);
3035                 blk_mq_run_hw_queue(hctx, true);
3036         } else {
3037                 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3038         }
3039 }
3040
3041 #ifdef CONFIG_BLK_MQ_STACKING
3042 /**
3043  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
3044  * @rq: the request being queued
3045  */
3046 blk_status_t blk_insert_cloned_request(struct request *rq)
3047 {
3048         struct request_queue *q = rq->q;
3049         unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
3050         unsigned int max_segments = blk_rq_get_max_segments(rq);
3051         blk_status_t ret;
3052
3053         if (blk_rq_sectors(rq) > max_sectors) {
3054                 /*
3055                  * SCSI device does not have a good way to return if
3056                  * Write Same/Zero is actually supported. If a device rejects
3057                  * a non-read/write command (discard, write same,etc.) the
3058                  * low-level device driver will set the relevant queue limit to
3059                  * 0 to prevent blk-lib from issuing more of the offending
3060                  * operations. Commands queued prior to the queue limit being
3061                  * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
3062                  * errors being propagated to upper layers.
3063                  */
3064                 if (max_sectors == 0)
3065                         return BLK_STS_NOTSUPP;
3066
3067                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
3068                         __func__, blk_rq_sectors(rq), max_sectors);
3069                 return BLK_STS_IOERR;
3070         }
3071
3072         /*
3073          * The queue settings related to segment counting may differ from the
3074          * original queue.
3075          */
3076         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3077         if (rq->nr_phys_segments > max_segments) {
3078                 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
3079                         __func__, rq->nr_phys_segments, max_segments);
3080                 return BLK_STS_IOERR;
3081         }
3082
3083         if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3084                 return BLK_STS_IOERR;
3085
3086         ret = blk_crypto_rq_get_keyslot(rq);
3087         if (ret != BLK_STS_OK)
3088                 return ret;
3089
3090         blk_account_io_start(rq);
3091
3092         /*
3093          * Since we have a scheduler attached on the top device,
3094          * bypass a potential scheduler on the bottom device for
3095          * insert.
3096          */
3097         blk_mq_run_dispatch_ops(q,
3098                         ret = blk_mq_request_issue_directly(rq, true));
3099         if (ret)
3100                 blk_account_io_done(rq, ktime_get_ns());
3101         return ret;
3102 }
3103 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
3104
3105 /**
3106  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3107  * @rq: the clone request to be cleaned up
3108  *
3109  * Description:
3110  *     Free all bios in @rq for a cloned request.
3111  */
3112 void blk_rq_unprep_clone(struct request *rq)
3113 {
3114         struct bio *bio;
3115
3116         while ((bio = rq->bio) != NULL) {
3117                 rq->bio = bio->bi_next;
3118
3119                 bio_put(bio);
3120         }
3121 }
3122 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3123
3124 /**
3125  * blk_rq_prep_clone - Helper function to setup clone request
3126  * @rq: the request to be setup
3127  * @rq_src: original request to be cloned
3128  * @bs: bio_set that bios for clone are allocated from
3129  * @gfp_mask: memory allocation mask for bio
3130  * @bio_ctr: setup function to be called for each clone bio.
3131  *           Returns %0 for success, non %0 for failure.
3132  * @data: private data to be passed to @bio_ctr
3133  *
3134  * Description:
3135  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3136  *     Also, pages which the original bios are pointing to are not copied
3137  *     and the cloned bios just point same pages.
3138  *     So cloned bios must be completed before original bios, which means
3139  *     the caller must complete @rq before @rq_src.
3140  */
3141 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3142                       struct bio_set *bs, gfp_t gfp_mask,
3143                       int (*bio_ctr)(struct bio *, struct bio *, void *),
3144                       void *data)
3145 {
3146         struct bio *bio, *bio_src;
3147
3148         if (!bs)
3149                 bs = &fs_bio_set;
3150
3151         __rq_for_each_bio(bio_src, rq_src) {
3152                 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3153                                       bs);
3154                 if (!bio)
3155                         goto free_and_out;
3156
3157                 if (bio_ctr && bio_ctr(bio, bio_src, data))
3158                         goto free_and_out;
3159
3160                 if (rq->bio) {
3161                         rq->biotail->bi_next = bio;
3162                         rq->biotail = bio;
3163                 } else {
3164                         rq->bio = rq->biotail = bio;
3165                 }
3166                 bio = NULL;
3167         }
3168
3169         /* Copy attributes of the original request to the clone request. */
3170         rq->__sector = blk_rq_pos(rq_src);
3171         rq->__data_len = blk_rq_bytes(rq_src);
3172         if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3173                 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3174                 rq->special_vec = rq_src->special_vec;
3175         }
3176         rq->nr_phys_segments = rq_src->nr_phys_segments;
3177         rq->ioprio = rq_src->ioprio;
3178
3179         if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3180                 goto free_and_out;
3181
3182         return 0;
3183
3184 free_and_out:
3185         if (bio)
3186                 bio_put(bio);
3187         blk_rq_unprep_clone(rq);
3188
3189         return -ENOMEM;
3190 }
3191 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3192 #endif /* CONFIG_BLK_MQ_STACKING */
3193
3194 /*
3195  * Steal bios from a request and add them to a bio list.
3196  * The request must not have been partially completed before.
3197  */
3198 void blk_steal_bios(struct bio_list *list, struct request *rq)
3199 {
3200         if (rq->bio) {
3201                 if (list->tail)
3202                         list->tail->bi_next = rq->bio;
3203                 else
3204                         list->head = rq->bio;
3205                 list->tail = rq->biotail;
3206
3207                 rq->bio = NULL;
3208                 rq->biotail = NULL;
3209         }
3210
3211         rq->__data_len = 0;
3212 }
3213 EXPORT_SYMBOL_GPL(blk_steal_bios);
3214
3215 static size_t order_to_size(unsigned int order)
3216 {
3217         return (size_t)PAGE_SIZE << order;
3218 }
3219
3220 /* called before freeing request pool in @tags */
3221 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3222                                     struct blk_mq_tags *tags)
3223 {
3224         struct page *page;
3225         unsigned long flags;
3226
3227         /*
3228          * There is no need to clear mapping if driver tags is not initialized
3229          * or the mapping belongs to the driver tags.
3230          */
3231         if (!drv_tags || drv_tags == tags)
3232                 return;
3233
3234         list_for_each_entry(page, &tags->page_list, lru) {
3235                 unsigned long start = (unsigned long)page_address(page);
3236                 unsigned long end = start + order_to_size(page->private);
3237                 int i;
3238
3239                 for (i = 0; i < drv_tags->nr_tags; i++) {
3240                         struct request *rq = drv_tags->rqs[i];
3241                         unsigned long rq_addr = (unsigned long)rq;
3242
3243                         if (rq_addr >= start && rq_addr < end) {
3244                                 WARN_ON_ONCE(req_ref_read(rq) != 0);
3245                                 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3246                         }
3247                 }
3248         }
3249
3250         /*
3251          * Wait until all pending iteration is done.
3252          *
3253          * Request reference is cleared and it is guaranteed to be observed
3254          * after the ->lock is released.
3255          */
3256         spin_lock_irqsave(&drv_tags->lock, flags);
3257         spin_unlock_irqrestore(&drv_tags->lock, flags);
3258 }
3259
3260 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3261                      unsigned int hctx_idx)
3262 {
3263         struct blk_mq_tags *drv_tags;
3264         struct page *page;
3265
3266         if (list_empty(&tags->page_list))
3267                 return;
3268
3269         if (blk_mq_is_shared_tags(set->flags))
3270                 drv_tags = set->shared_tags;
3271         else
3272                 drv_tags = set->tags[hctx_idx];
3273
3274         if (tags->static_rqs && set->ops->exit_request) {
3275                 int i;
3276
3277                 for (i = 0; i < tags->nr_tags; i++) {
3278                         struct request *rq = tags->static_rqs[i];
3279
3280                         if (!rq)
3281                                 continue;
3282                         set->ops->exit_request(set, rq, hctx_idx);
3283                         tags->static_rqs[i] = NULL;
3284                 }
3285         }
3286
3287         blk_mq_clear_rq_mapping(drv_tags, tags);
3288
3289         while (!list_empty(&tags->page_list)) {
3290                 page = list_first_entry(&tags->page_list, struct page, lru);
3291                 list_del_init(&page->lru);
3292                 /*
3293                  * Remove kmemleak object previously allocated in
3294                  * blk_mq_alloc_rqs().
3295                  */
3296                 kmemleak_free(page_address(page));
3297                 __free_pages(page, page->private);
3298         }
3299 }
3300
3301 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3302 {
3303         kfree(tags->rqs);
3304         tags->rqs = NULL;
3305         kfree(tags->static_rqs);
3306         tags->static_rqs = NULL;
3307
3308         blk_mq_free_tags(tags);
3309 }
3310
3311 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3312                 unsigned int hctx_idx)
3313 {
3314         int i;
3315
3316         for (i = 0; i < set->nr_maps; i++) {
3317                 unsigned int start = set->map[i].queue_offset;
3318                 unsigned int end = start + set->map[i].nr_queues;
3319
3320                 if (hctx_idx >= start && hctx_idx < end)
3321                         break;
3322         }
3323
3324         if (i >= set->nr_maps)
3325                 i = HCTX_TYPE_DEFAULT;
3326
3327         return i;
3328 }
3329
3330 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3331                 unsigned int hctx_idx)
3332 {
3333         enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3334
3335         return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3336 }
3337
3338 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3339                                                unsigned int hctx_idx,
3340                                                unsigned int nr_tags,
3341                                                unsigned int reserved_tags)
3342 {
3343         int node = blk_mq_get_hctx_node(set, hctx_idx);
3344         struct blk_mq_tags *tags;
3345
3346         if (node == NUMA_NO_NODE)
3347                 node = set->numa_node;
3348
3349         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3350                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3351         if (!tags)
3352                 return NULL;
3353
3354         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3355                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3356                                  node);
3357         if (!tags->rqs)
3358                 goto err_free_tags;
3359
3360         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3361                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3362                                         node);
3363         if (!tags->static_rqs)
3364                 goto err_free_rqs;
3365
3366         return tags;
3367
3368 err_free_rqs:
3369         kfree(tags->rqs);
3370 err_free_tags:
3371         blk_mq_free_tags(tags);
3372         return NULL;
3373 }
3374
3375 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3376                                unsigned int hctx_idx, int node)
3377 {
3378         int ret;
3379
3380         if (set->ops->init_request) {
3381                 ret = set->ops->init_request(set, rq, hctx_idx, node);
3382                 if (ret)
3383                         return ret;
3384         }
3385
3386         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3387         return 0;
3388 }
3389
3390 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3391                             struct blk_mq_tags *tags,
3392                             unsigned int hctx_idx, unsigned int depth)
3393 {
3394         unsigned int i, j, entries_per_page, max_order = 4;
3395         int node = blk_mq_get_hctx_node(set, hctx_idx);
3396         size_t rq_size, left;
3397
3398         if (node == NUMA_NO_NODE)
3399                 node = set->numa_node;
3400
3401         INIT_LIST_HEAD(&tags->page_list);
3402
3403         /*
3404          * rq_size is the size of the request plus driver payload, rounded
3405          * to the cacheline size
3406          */
3407         rq_size = round_up(sizeof(struct request) + set->cmd_size,
3408                                 cache_line_size());
3409         left = rq_size * depth;
3410
3411         for (i = 0; i < depth; ) {
3412                 int this_order = max_order;
3413                 struct page *page;
3414                 int to_do;
3415                 void *p;
3416
3417                 while (this_order && left < order_to_size(this_order - 1))
3418                         this_order--;
3419
3420                 do {
3421                         page = alloc_pages_node(node,
3422                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3423                                 this_order);
3424                         if (page)
3425                                 break;
3426                         if (!this_order--)
3427                                 break;
3428                         if (order_to_size(this_order) < rq_size)
3429                                 break;
3430                 } while (1);
3431
3432                 if (!page)
3433                         goto fail;
3434
3435                 page->private = this_order;
3436                 list_add_tail(&page->lru, &tags->page_list);
3437
3438                 p = page_address(page);
3439                 /*
3440                  * Allow kmemleak to scan these pages as they contain pointers
3441                  * to additional allocations like via ops->init_request().
3442                  */
3443                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3444                 entries_per_page = order_to_size(this_order) / rq_size;
3445                 to_do = min(entries_per_page, depth - i);
3446                 left -= to_do * rq_size;
3447                 for (j = 0; j < to_do; j++) {
3448                         struct request *rq = p;
3449
3450                         tags->static_rqs[i] = rq;
3451                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3452                                 tags->static_rqs[i] = NULL;
3453                                 goto fail;
3454                         }
3455
3456                         p += rq_size;
3457                         i++;
3458                 }
3459         }
3460         return 0;
3461
3462 fail:
3463         blk_mq_free_rqs(set, tags, hctx_idx);
3464         return -ENOMEM;
3465 }
3466
3467 struct rq_iter_data {
3468         struct blk_mq_hw_ctx *hctx;
3469         bool has_rq;
3470 };
3471
3472 static bool blk_mq_has_request(struct request *rq, void *data)
3473 {
3474         struct rq_iter_data *iter_data = data;
3475
3476         if (rq->mq_hctx != iter_data->hctx)
3477                 return true;
3478         iter_data->has_rq = true;
3479         return false;
3480 }
3481
3482 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3483 {
3484         struct blk_mq_tags *tags = hctx->sched_tags ?
3485                         hctx->sched_tags : hctx->tags;
3486         struct rq_iter_data data = {
3487                 .hctx   = hctx,
3488         };
3489
3490         blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3491         return data.has_rq;
3492 }
3493
3494 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3495                 struct blk_mq_hw_ctx *hctx)
3496 {
3497         if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3498                 return false;
3499         if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3500                 return false;
3501         return true;
3502 }
3503
3504 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3505 {
3506         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3507                         struct blk_mq_hw_ctx, cpuhp_online);
3508
3509         if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3510             !blk_mq_last_cpu_in_hctx(cpu, hctx))
3511                 return 0;
3512
3513         /*
3514          * Prevent new request from being allocated on the current hctx.
3515          *
3516          * The smp_mb__after_atomic() Pairs with the implied barrier in
3517          * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
3518          * seen once we return from the tag allocator.
3519          */
3520         set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3521         smp_mb__after_atomic();
3522
3523         /*
3524          * Try to grab a reference to the queue and wait for any outstanding
3525          * requests.  If we could not grab a reference the queue has been
3526          * frozen and there are no requests.
3527          */
3528         if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3529                 while (blk_mq_hctx_has_requests(hctx))
3530                         msleep(5);
3531                 percpu_ref_put(&hctx->queue->q_usage_counter);
3532         }
3533
3534         return 0;
3535 }
3536
3537 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3538 {
3539         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3540                         struct blk_mq_hw_ctx, cpuhp_online);
3541
3542         if (cpumask_test_cpu(cpu, hctx->cpumask))
3543                 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3544         return 0;
3545 }
3546
3547 /*
3548  * 'cpu' is going away. splice any existing rq_list entries from this
3549  * software queue to the hw queue dispatch list, and ensure that it
3550  * gets run.
3551  */
3552 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3553 {
3554         struct blk_mq_hw_ctx *hctx;
3555         struct blk_mq_ctx *ctx;
3556         LIST_HEAD(tmp);
3557         enum hctx_type type;
3558
3559         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3560         if (!cpumask_test_cpu(cpu, hctx->cpumask))
3561                 return 0;
3562
3563         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3564         type = hctx->type;
3565
3566         spin_lock(&ctx->lock);
3567         if (!list_empty(&ctx->rq_lists[type])) {
3568                 list_splice_init(&ctx->rq_lists[type], &tmp);
3569                 blk_mq_hctx_clear_pending(hctx, ctx);
3570         }
3571         spin_unlock(&ctx->lock);
3572
3573         if (list_empty(&tmp))
3574                 return 0;
3575
3576         spin_lock(&hctx->lock);
3577         list_splice_tail_init(&tmp, &hctx->dispatch);
3578         spin_unlock(&hctx->lock);
3579
3580         blk_mq_run_hw_queue(hctx, true);
3581         return 0;
3582 }
3583
3584 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3585 {
3586         if (!(hctx->flags & BLK_MQ_F_STACKING))
3587                 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3588                                                     &hctx->cpuhp_online);
3589         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3590                                             &hctx->cpuhp_dead);
3591 }
3592
3593 /*
3594  * Before freeing hw queue, clearing the flush request reference in
3595  * tags->rqs[] for avoiding potential UAF.
3596  */
3597 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3598                 unsigned int queue_depth, struct request *flush_rq)
3599 {
3600         int i;
3601         unsigned long flags;
3602
3603         /* The hw queue may not be mapped yet */
3604         if (!tags)
3605                 return;
3606
3607         WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3608
3609         for (i = 0; i < queue_depth; i++)
3610                 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3611
3612         /*
3613          * Wait until all pending iteration is done.
3614          *
3615          * Request reference is cleared and it is guaranteed to be observed
3616          * after the ->lock is released.
3617          */
3618         spin_lock_irqsave(&tags->lock, flags);
3619         spin_unlock_irqrestore(&tags->lock, flags);
3620 }
3621
3622 /* hctx->ctxs will be freed in queue's release handler */
3623 static void blk_mq_exit_hctx(struct request_queue *q,
3624                 struct blk_mq_tag_set *set,
3625                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3626 {
3627         struct request *flush_rq = hctx->fq->flush_rq;
3628
3629         if (blk_mq_hw_queue_mapped(hctx))
3630                 blk_mq_tag_idle(hctx);
3631
3632         if (blk_queue_init_done(q))
3633                 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3634                                 set->queue_depth, flush_rq);
3635         if (set->ops->exit_request)
3636                 set->ops->exit_request(set, flush_rq, hctx_idx);
3637
3638         if (set->ops->exit_hctx)
3639                 set->ops->exit_hctx(hctx, hctx_idx);
3640
3641         blk_mq_remove_cpuhp(hctx);
3642
3643         xa_erase(&q->hctx_table, hctx_idx);
3644
3645         spin_lock(&q->unused_hctx_lock);
3646         list_add(&hctx->hctx_list, &q->unused_hctx_list);
3647         spin_unlock(&q->unused_hctx_lock);
3648 }
3649
3650 static void blk_mq_exit_hw_queues(struct request_queue *q,
3651                 struct blk_mq_tag_set *set, int nr_queue)
3652 {
3653         struct blk_mq_hw_ctx *hctx;
3654         unsigned long i;
3655
3656         queue_for_each_hw_ctx(q, hctx, i) {
3657                 if (i == nr_queue)
3658                         break;
3659                 blk_mq_exit_hctx(q, set, hctx, i);
3660         }
3661 }
3662
3663 static int blk_mq_init_hctx(struct request_queue *q,
3664                 struct blk_mq_tag_set *set,
3665                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3666 {
3667         hctx->queue_num = hctx_idx;
3668
3669         if (!(hctx->flags & BLK_MQ_F_STACKING))
3670                 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3671                                 &hctx->cpuhp_online);
3672         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3673
3674         hctx->tags = set->tags[hctx_idx];
3675
3676         if (set->ops->init_hctx &&
3677             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3678                 goto unregister_cpu_notifier;
3679
3680         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3681                                 hctx->numa_node))
3682                 goto exit_hctx;
3683
3684         if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3685                 goto exit_flush_rq;
3686
3687         return 0;
3688
3689  exit_flush_rq:
3690         if (set->ops->exit_request)
3691                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3692  exit_hctx:
3693         if (set->ops->exit_hctx)
3694                 set->ops->exit_hctx(hctx, hctx_idx);
3695  unregister_cpu_notifier:
3696         blk_mq_remove_cpuhp(hctx);
3697         return -1;
3698 }
3699
3700 static struct blk_mq_hw_ctx *
3701 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3702                 int node)
3703 {
3704         struct blk_mq_hw_ctx *hctx;
3705         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3706
3707         hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3708         if (!hctx)
3709                 goto fail_alloc_hctx;
3710
3711         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3712                 goto free_hctx;
3713
3714         atomic_set(&hctx->nr_active, 0);
3715         if (node == NUMA_NO_NODE)
3716                 node = set->numa_node;
3717         hctx->numa_node = node;
3718
3719         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3720         spin_lock_init(&hctx->lock);
3721         INIT_LIST_HEAD(&hctx->dispatch);
3722         hctx->queue = q;
3723         hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3724
3725         INIT_LIST_HEAD(&hctx->hctx_list);
3726
3727         /*
3728          * Allocate space for all possible cpus to avoid allocation at
3729          * runtime
3730          */
3731         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3732                         gfp, node);
3733         if (!hctx->ctxs)
3734                 goto free_cpumask;
3735
3736         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3737                                 gfp, node, false, false))
3738                 goto free_ctxs;
3739         hctx->nr_ctx = 0;
3740
3741         spin_lock_init(&hctx->dispatch_wait_lock);
3742         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3743         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3744
3745         hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3746         if (!hctx->fq)
3747                 goto free_bitmap;
3748
3749         blk_mq_hctx_kobj_init(hctx);
3750
3751         return hctx;
3752
3753  free_bitmap:
3754         sbitmap_free(&hctx->ctx_map);
3755  free_ctxs:
3756         kfree(hctx->ctxs);
3757  free_cpumask:
3758         free_cpumask_var(hctx->cpumask);
3759  free_hctx:
3760         kfree(hctx);
3761  fail_alloc_hctx:
3762         return NULL;
3763 }
3764
3765 static void blk_mq_init_cpu_queues(struct request_queue *q,
3766                                    unsigned int nr_hw_queues)
3767 {
3768         struct blk_mq_tag_set *set = q->tag_set;
3769         unsigned int i, j;
3770
3771         for_each_possible_cpu(i) {
3772                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3773                 struct blk_mq_hw_ctx *hctx;
3774                 int k;
3775
3776                 __ctx->cpu = i;
3777                 spin_lock_init(&__ctx->lock);
3778                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3779                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3780
3781                 __ctx->queue = q;
3782
3783                 /*
3784                  * Set local node, IFF we have more than one hw queue. If
3785                  * not, we remain on the home node of the device
3786                  */
3787                 for (j = 0; j < set->nr_maps; j++) {
3788                         hctx = blk_mq_map_queue_type(q, j, i);
3789                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3790                                 hctx->numa_node = cpu_to_node(i);
3791                 }
3792         }
3793 }
3794
3795 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3796                                              unsigned int hctx_idx,
3797                                              unsigned int depth)
3798 {
3799         struct blk_mq_tags *tags;
3800         int ret;
3801
3802         tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3803         if (!tags)
3804                 return NULL;
3805
3806         ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3807         if (ret) {
3808                 blk_mq_free_rq_map(tags);
3809                 return NULL;
3810         }
3811
3812         return tags;
3813 }
3814
3815 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3816                                        int hctx_idx)
3817 {
3818         if (blk_mq_is_shared_tags(set->flags)) {
3819                 set->tags[hctx_idx] = set->shared_tags;
3820
3821                 return true;
3822         }
3823
3824         set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3825                                                        set->queue_depth);
3826
3827         return set->tags[hctx_idx];
3828 }
3829
3830 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3831                              struct blk_mq_tags *tags,
3832                              unsigned int hctx_idx)
3833 {
3834         if (tags) {
3835                 blk_mq_free_rqs(set, tags, hctx_idx);
3836                 blk_mq_free_rq_map(tags);
3837         }
3838 }
3839
3840 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3841                                       unsigned int hctx_idx)
3842 {
3843         if (!blk_mq_is_shared_tags(set->flags))
3844                 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3845
3846         set->tags[hctx_idx] = NULL;
3847 }
3848
3849 static void blk_mq_map_swqueue(struct request_queue *q)
3850 {
3851         unsigned int j, hctx_idx;
3852         unsigned long i;
3853         struct blk_mq_hw_ctx *hctx;
3854         struct blk_mq_ctx *ctx;
3855         struct blk_mq_tag_set *set = q->tag_set;
3856
3857         queue_for_each_hw_ctx(q, hctx, i) {
3858                 cpumask_clear(hctx->cpumask);
3859                 hctx->nr_ctx = 0;
3860                 hctx->dispatch_from = NULL;
3861         }
3862
3863         /*
3864          * Map software to hardware queues.
3865          *
3866          * If the cpu isn't present, the cpu is mapped to first hctx.
3867          */
3868         for_each_possible_cpu(i) {
3869
3870                 ctx = per_cpu_ptr(q->queue_ctx, i);
3871                 for (j = 0; j < set->nr_maps; j++) {
3872                         if (!set->map[j].nr_queues) {
3873                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3874                                                 HCTX_TYPE_DEFAULT, i);
3875                                 continue;
3876                         }
3877                         hctx_idx = set->map[j].mq_map[i];
3878                         /* unmapped hw queue can be remapped after CPU topo changed */
3879                         if (!set->tags[hctx_idx] &&
3880                             !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3881                                 /*
3882                                  * If tags initialization fail for some hctx,
3883                                  * that hctx won't be brought online.  In this
3884                                  * case, remap the current ctx to hctx[0] which
3885                                  * is guaranteed to always have tags allocated
3886                                  */
3887                                 set->map[j].mq_map[i] = 0;
3888                         }
3889
3890                         hctx = blk_mq_map_queue_type(q, j, i);
3891                         ctx->hctxs[j] = hctx;
3892                         /*
3893                          * If the CPU is already set in the mask, then we've
3894                          * mapped this one already. This can happen if
3895                          * devices share queues across queue maps.
3896                          */
3897                         if (cpumask_test_cpu(i, hctx->cpumask))
3898                                 continue;
3899
3900                         cpumask_set_cpu(i, hctx->cpumask);
3901                         hctx->type = j;
3902                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
3903                         hctx->ctxs[hctx->nr_ctx++] = ctx;
3904
3905                         /*
3906                          * If the nr_ctx type overflows, we have exceeded the
3907                          * amount of sw queues we can support.
3908                          */
3909                         BUG_ON(!hctx->nr_ctx);
3910                 }
3911
3912                 for (; j < HCTX_MAX_TYPES; j++)
3913                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
3914                                         HCTX_TYPE_DEFAULT, i);
3915         }
3916
3917         queue_for_each_hw_ctx(q, hctx, i) {
3918                 /*
3919                  * If no software queues are mapped to this hardware queue,
3920                  * disable it and free the request entries.
3921                  */
3922                 if (!hctx->nr_ctx) {
3923                         /* Never unmap queue 0.  We need it as a
3924                          * fallback in case of a new remap fails
3925                          * allocation
3926                          */
3927                         if (i)
3928                                 __blk_mq_free_map_and_rqs(set, i);
3929
3930                         hctx->tags = NULL;
3931                         continue;
3932                 }
3933
3934                 hctx->tags = set->tags[i];
3935                 WARN_ON(!hctx->tags);
3936
3937                 /*
3938                  * Set the map size to the number of mapped software queues.
3939                  * This is more accurate and more efficient than looping
3940                  * over all possibly mapped software queues.
3941                  */
3942                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3943
3944                 /*
3945                  * Initialize batch roundrobin counts
3946                  */
3947                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3948                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3949         }
3950 }
3951
3952 /*
3953  * Caller needs to ensure that we're either frozen/quiesced, or that
3954  * the queue isn't live yet.
3955  */
3956 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3957 {
3958         struct blk_mq_hw_ctx *hctx;
3959         unsigned long i;
3960
3961         queue_for_each_hw_ctx(q, hctx, i) {
3962                 if (shared) {
3963                         hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3964                 } else {
3965                         blk_mq_tag_idle(hctx);
3966                         hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3967                 }
3968         }
3969 }
3970
3971 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3972                                          bool shared)
3973 {
3974         struct request_queue *q;
3975
3976         lockdep_assert_held(&set->tag_list_lock);
3977
3978         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3979                 blk_mq_freeze_queue(q);
3980                 queue_set_hctx_shared(q, shared);
3981                 blk_mq_unfreeze_queue(q);
3982         }
3983 }
3984
3985 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3986 {
3987         struct blk_mq_tag_set *set = q->tag_set;
3988
3989         mutex_lock(&set->tag_list_lock);
3990         list_del(&q->tag_set_list);
3991         if (list_is_singular(&set->tag_list)) {
3992                 /* just transitioned to unshared */
3993                 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3994                 /* update existing queue */
3995                 blk_mq_update_tag_set_shared(set, false);
3996         }
3997         mutex_unlock(&set->tag_list_lock);
3998         INIT_LIST_HEAD(&q->tag_set_list);
3999 }
4000
4001 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
4002                                      struct request_queue *q)
4003 {
4004         mutex_lock(&set->tag_list_lock);
4005
4006         /*
4007          * Check to see if we're transitioning to shared (from 1 to 2 queues).
4008          */
4009         if (!list_empty(&set->tag_list) &&
4010             !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
4011                 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4012                 /* update existing queue */
4013                 blk_mq_update_tag_set_shared(set, true);
4014         }
4015         if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
4016                 queue_set_hctx_shared(q, true);
4017         list_add_tail(&q->tag_set_list, &set->tag_list);
4018
4019         mutex_unlock(&set->tag_list_lock);
4020 }
4021
4022 /* All allocations will be freed in release handler of q->mq_kobj */
4023 static int blk_mq_alloc_ctxs(struct request_queue *q)
4024 {
4025         struct blk_mq_ctxs *ctxs;
4026         int cpu;
4027
4028         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
4029         if (!ctxs)
4030                 return -ENOMEM;
4031
4032         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
4033         if (!ctxs->queue_ctx)
4034                 goto fail;
4035
4036         for_each_possible_cpu(cpu) {
4037                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
4038                 ctx->ctxs = ctxs;
4039         }
4040
4041         q->mq_kobj = &ctxs->kobj;
4042         q->queue_ctx = ctxs->queue_ctx;
4043
4044         return 0;
4045  fail:
4046         kfree(ctxs);
4047         return -ENOMEM;
4048 }
4049
4050 /*
4051  * It is the actual release handler for mq, but we do it from
4052  * request queue's release handler for avoiding use-after-free
4053  * and headache because q->mq_kobj shouldn't have been introduced,
4054  * but we can't group ctx/kctx kobj without it.
4055  */
4056 void blk_mq_release(struct request_queue *q)
4057 {
4058         struct blk_mq_hw_ctx *hctx, *next;
4059         unsigned long i;
4060
4061         queue_for_each_hw_ctx(q, hctx, i)
4062                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
4063
4064         /* all hctx are in .unused_hctx_list now */
4065         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
4066                 list_del_init(&hctx->hctx_list);
4067                 kobject_put(&hctx->kobj);
4068         }
4069
4070         xa_destroy(&q->hctx_table);
4071
4072         /*
4073          * release .mq_kobj and sw queue's kobject now because
4074          * both share lifetime with request queue.
4075          */
4076         blk_mq_sysfs_deinit(q);
4077 }
4078
4079 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
4080                 void *queuedata)
4081 {
4082         struct request_queue *q;
4083         int ret;
4084
4085         q = blk_alloc_queue(set->numa_node);
4086         if (!q)
4087                 return ERR_PTR(-ENOMEM);
4088         q->queuedata = queuedata;
4089         ret = blk_mq_init_allocated_queue(set, q);
4090         if (ret) {
4091                 blk_put_queue(q);
4092                 return ERR_PTR(ret);
4093         }
4094         return q;
4095 }
4096
4097 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
4098 {
4099         return blk_mq_init_queue_data(set, NULL);
4100 }
4101 EXPORT_SYMBOL(blk_mq_init_queue);
4102
4103 /**
4104  * blk_mq_destroy_queue - shutdown a request queue
4105  * @q: request queue to shutdown
4106  *
4107  * This shuts down a request queue allocated by blk_mq_init_queue(). All future
4108  * requests will be failed with -ENODEV. The caller is responsible for dropping
4109  * the reference from blk_mq_init_queue() by calling blk_put_queue().
4110  *
4111  * Context: can sleep
4112  */
4113 void blk_mq_destroy_queue(struct request_queue *q)
4114 {
4115         WARN_ON_ONCE(!queue_is_mq(q));
4116         WARN_ON_ONCE(blk_queue_registered(q));
4117
4118         might_sleep();
4119
4120         blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4121         blk_queue_start_drain(q);
4122         blk_mq_freeze_queue_wait(q);
4123
4124         blk_sync_queue(q);
4125         blk_mq_cancel_work_sync(q);
4126         blk_mq_exit_queue(q);
4127 }
4128 EXPORT_SYMBOL(blk_mq_destroy_queue);
4129
4130 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
4131                 struct lock_class_key *lkclass)
4132 {
4133         struct request_queue *q;
4134         struct gendisk *disk;
4135
4136         q = blk_mq_init_queue_data(set, queuedata);
4137         if (IS_ERR(q))
4138                 return ERR_CAST(q);
4139
4140         disk = __alloc_disk_node(q, set->numa_node, lkclass);
4141         if (!disk) {
4142                 blk_mq_destroy_queue(q);
4143                 blk_put_queue(q);
4144                 return ERR_PTR(-ENOMEM);
4145         }
4146         set_bit(GD_OWNS_QUEUE, &disk->state);
4147         return disk;
4148 }
4149 EXPORT_SYMBOL(__blk_mq_alloc_disk);
4150
4151 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4152                 struct lock_class_key *lkclass)
4153 {
4154         struct gendisk *disk;
4155
4156         if (!blk_get_queue(q))
4157                 return NULL;
4158         disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4159         if (!disk)
4160                 blk_put_queue(q);
4161         return disk;
4162 }
4163 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4164
4165 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4166                 struct blk_mq_tag_set *set, struct request_queue *q,
4167                 int hctx_idx, int node)
4168 {
4169         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4170
4171         /* reuse dead hctx first */
4172         spin_lock(&q->unused_hctx_lock);
4173         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4174                 if (tmp->numa_node == node) {
4175                         hctx = tmp;
4176                         break;
4177                 }
4178         }
4179         if (hctx)
4180                 list_del_init(&hctx->hctx_list);
4181         spin_unlock(&q->unused_hctx_lock);
4182
4183         if (!hctx)
4184                 hctx = blk_mq_alloc_hctx(q, set, node);
4185         if (!hctx)
4186                 goto fail;
4187
4188         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4189                 goto free_hctx;
4190
4191         return hctx;
4192
4193  free_hctx:
4194         kobject_put(&hctx->kobj);
4195  fail:
4196         return NULL;
4197 }
4198
4199 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4200                                                 struct request_queue *q)
4201 {
4202         struct blk_mq_hw_ctx *hctx;
4203         unsigned long i, j;
4204
4205         /* protect against switching io scheduler  */
4206         mutex_lock(&q->sysfs_lock);
4207         for (i = 0; i < set->nr_hw_queues; i++) {
4208                 int old_node;
4209                 int node = blk_mq_get_hctx_node(set, i);
4210                 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4211
4212                 if (old_hctx) {
4213                         old_node = old_hctx->numa_node;
4214                         blk_mq_exit_hctx(q, set, old_hctx, i);
4215                 }
4216
4217                 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4218                         if (!old_hctx)
4219                                 break;
4220                         pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4221                                         node, old_node);
4222                         hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4223                         WARN_ON_ONCE(!hctx);
4224                 }
4225         }
4226         /*
4227          * Increasing nr_hw_queues fails. Free the newly allocated
4228          * hctxs and keep the previous q->nr_hw_queues.
4229          */
4230         if (i != set->nr_hw_queues) {
4231                 j = q->nr_hw_queues;
4232         } else {
4233                 j = i;
4234                 q->nr_hw_queues = set->nr_hw_queues;
4235         }
4236
4237         xa_for_each_start(&q->hctx_table, j, hctx, j)
4238                 blk_mq_exit_hctx(q, set, hctx, j);
4239         mutex_unlock(&q->sysfs_lock);
4240 }
4241
4242 static void blk_mq_update_poll_flag(struct request_queue *q)
4243 {
4244         struct blk_mq_tag_set *set = q->tag_set;
4245
4246         if (set->nr_maps > HCTX_TYPE_POLL &&
4247             set->map[HCTX_TYPE_POLL].nr_queues)
4248                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4249         else
4250                 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
4251 }
4252
4253 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4254                 struct request_queue *q)
4255 {
4256         /* mark the queue as mq asap */
4257         q->mq_ops = set->ops;
4258
4259         if (blk_mq_alloc_ctxs(q))
4260                 goto err_exit;
4261
4262         /* init q->mq_kobj and sw queues' kobjects */
4263         blk_mq_sysfs_init(q);
4264
4265         INIT_LIST_HEAD(&q->unused_hctx_list);
4266         spin_lock_init(&q->unused_hctx_lock);
4267
4268         xa_init(&q->hctx_table);
4269
4270         blk_mq_realloc_hw_ctxs(set, q);
4271         if (!q->nr_hw_queues)
4272                 goto err_hctxs;
4273
4274         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4275         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4276
4277         q->tag_set = set;
4278
4279         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4280         blk_mq_update_poll_flag(q);
4281
4282         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4283         INIT_LIST_HEAD(&q->flush_list);
4284         INIT_LIST_HEAD(&q->requeue_list);
4285         spin_lock_init(&q->requeue_lock);
4286
4287         q->nr_requests = set->queue_depth;
4288
4289         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4290         blk_mq_add_queue_tag_set(set, q);
4291         blk_mq_map_swqueue(q);
4292         return 0;
4293
4294 err_hctxs:
4295         blk_mq_release(q);
4296 err_exit:
4297         q->mq_ops = NULL;
4298         return -ENOMEM;
4299 }
4300 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4301
4302 /* tags can _not_ be used after returning from blk_mq_exit_queue */
4303 void blk_mq_exit_queue(struct request_queue *q)
4304 {
4305         struct blk_mq_tag_set *set = q->tag_set;
4306
4307         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4308         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4309         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4310         blk_mq_del_queue_tag_set(q);
4311 }
4312
4313 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4314 {
4315         int i;
4316
4317         if (blk_mq_is_shared_tags(set->flags)) {
4318                 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4319                                                 BLK_MQ_NO_HCTX_IDX,
4320                                                 set->queue_depth);
4321                 if (!set->shared_tags)
4322                         return -ENOMEM;
4323         }
4324
4325         for (i = 0; i < set->nr_hw_queues; i++) {
4326                 if (!__blk_mq_alloc_map_and_rqs(set, i))
4327                         goto out_unwind;
4328                 cond_resched();
4329         }
4330
4331         return 0;
4332
4333 out_unwind:
4334         while (--i >= 0)
4335                 __blk_mq_free_map_and_rqs(set, i);
4336
4337         if (blk_mq_is_shared_tags(set->flags)) {
4338                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4339                                         BLK_MQ_NO_HCTX_IDX);
4340         }
4341
4342         return -ENOMEM;
4343 }
4344
4345 /*
4346  * Allocate the request maps associated with this tag_set. Note that this
4347  * may reduce the depth asked for, if memory is tight. set->queue_depth
4348  * will be updated to reflect the allocated depth.
4349  */
4350 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4351 {
4352         unsigned int depth;
4353         int err;
4354
4355         depth = set->queue_depth;
4356         do {
4357                 err = __blk_mq_alloc_rq_maps(set);
4358                 if (!err)
4359                         break;
4360
4361                 set->queue_depth >>= 1;
4362                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4363                         err = -ENOMEM;
4364                         break;
4365                 }
4366         } while (set->queue_depth);
4367
4368         if (!set->queue_depth || err) {
4369                 pr_err("blk-mq: failed to allocate request map\n");
4370                 return -ENOMEM;
4371         }
4372
4373         if (depth != set->queue_depth)
4374                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4375                                                 depth, set->queue_depth);
4376
4377         return 0;
4378 }
4379
4380 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4381 {
4382         /*
4383          * blk_mq_map_queues() and multiple .map_queues() implementations
4384          * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4385          * number of hardware queues.
4386          */
4387         if (set->nr_maps == 1)
4388                 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4389
4390         if (set->ops->map_queues && !is_kdump_kernel()) {
4391                 int i;
4392
4393                 /*
4394                  * transport .map_queues is usually done in the following
4395                  * way:
4396                  *
4397                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4398                  *      mask = get_cpu_mask(queue)
4399                  *      for_each_cpu(cpu, mask)
4400                  *              set->map[x].mq_map[cpu] = queue;
4401                  * }
4402                  *
4403                  * When we need to remap, the table has to be cleared for
4404                  * killing stale mapping since one CPU may not be mapped
4405                  * to any hw queue.
4406                  */
4407                 for (i = 0; i < set->nr_maps; i++)
4408                         blk_mq_clear_mq_map(&set->map[i]);
4409
4410                 set->ops->map_queues(set);
4411         } else {
4412                 BUG_ON(set->nr_maps > 1);
4413                 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4414         }
4415 }
4416
4417 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4418                                        int new_nr_hw_queues)
4419 {
4420         struct blk_mq_tags **new_tags;
4421         int i;
4422
4423         if (set->nr_hw_queues >= new_nr_hw_queues)
4424                 goto done;
4425
4426         new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4427                                 GFP_KERNEL, set->numa_node);
4428         if (!new_tags)
4429                 return -ENOMEM;
4430
4431         if (set->tags)
4432                 memcpy(new_tags, set->tags, set->nr_hw_queues *
4433                        sizeof(*set->tags));
4434         kfree(set->tags);
4435         set->tags = new_tags;
4436
4437         for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
4438                 if (!__blk_mq_alloc_map_and_rqs(set, i)) {
4439                         while (--i >= set->nr_hw_queues)
4440                                 __blk_mq_free_map_and_rqs(set, i);
4441                         return -ENOMEM;
4442                 }
4443                 cond_resched();
4444         }
4445
4446 done:
4447         set->nr_hw_queues = new_nr_hw_queues;
4448         return 0;
4449 }
4450
4451 /*
4452  * Alloc a tag set to be associated with one or more request queues.
4453  * May fail with EINVAL for various error conditions. May adjust the
4454  * requested depth down, if it's too large. In that case, the set
4455  * value will be stored in set->queue_depth.
4456  */
4457 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4458 {
4459         int i, ret;
4460
4461         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4462
4463         if (!set->nr_hw_queues)
4464                 return -EINVAL;
4465         if (!set->queue_depth)
4466                 return -EINVAL;
4467         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4468                 return -EINVAL;
4469
4470         if (!set->ops->queue_rq)
4471                 return -EINVAL;
4472
4473         if (!set->ops->get_budget ^ !set->ops->put_budget)
4474                 return -EINVAL;
4475
4476         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4477                 pr_info("blk-mq: reduced tag depth to %u\n",
4478                         BLK_MQ_MAX_DEPTH);
4479                 set->queue_depth = BLK_MQ_MAX_DEPTH;
4480         }
4481
4482         if (!set->nr_maps)
4483                 set->nr_maps = 1;
4484         else if (set->nr_maps > HCTX_MAX_TYPES)
4485                 return -EINVAL;
4486
4487         /*
4488          * If a crashdump is active, then we are potentially in a very
4489          * memory constrained environment. Limit us to 1 queue and
4490          * 64 tags to prevent using too much memory.
4491          */
4492         if (is_kdump_kernel()) {
4493                 set->nr_hw_queues = 1;
4494                 set->nr_maps = 1;
4495                 set->queue_depth = min(64U, set->queue_depth);
4496         }
4497         /*
4498          * There is no use for more h/w queues than cpus if we just have
4499          * a single map
4500          */
4501         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4502                 set->nr_hw_queues = nr_cpu_ids;
4503
4504         if (set->flags & BLK_MQ_F_BLOCKING) {
4505                 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
4506                 if (!set->srcu)
4507                         return -ENOMEM;
4508                 ret = init_srcu_struct(set->srcu);
4509                 if (ret)
4510                         goto out_free_srcu;
4511         }
4512
4513         ret = -ENOMEM;
4514         set->tags = kcalloc_node(set->nr_hw_queues,
4515                                  sizeof(struct blk_mq_tags *), GFP_KERNEL,
4516                                  set->numa_node);
4517         if (!set->tags)
4518                 goto out_cleanup_srcu;
4519
4520         for (i = 0; i < set->nr_maps; i++) {
4521                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4522                                                   sizeof(set->map[i].mq_map[0]),
4523                                                   GFP_KERNEL, set->numa_node);
4524                 if (!set->map[i].mq_map)
4525                         goto out_free_mq_map;
4526                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4527         }
4528
4529         blk_mq_update_queue_map(set);
4530
4531         ret = blk_mq_alloc_set_map_and_rqs(set);
4532         if (ret)
4533                 goto out_free_mq_map;
4534
4535         mutex_init(&set->tag_list_lock);
4536         INIT_LIST_HEAD(&set->tag_list);
4537
4538         return 0;
4539
4540 out_free_mq_map:
4541         for (i = 0; i < set->nr_maps; i++) {
4542                 kfree(set->map[i].mq_map);
4543                 set->map[i].mq_map = NULL;
4544         }
4545         kfree(set->tags);
4546         set->tags = NULL;
4547 out_cleanup_srcu:
4548         if (set->flags & BLK_MQ_F_BLOCKING)
4549                 cleanup_srcu_struct(set->srcu);
4550 out_free_srcu:
4551         if (set->flags & BLK_MQ_F_BLOCKING)
4552                 kfree(set->srcu);
4553         return ret;
4554 }
4555 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4556
4557 /* allocate and initialize a tagset for a simple single-queue device */
4558 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4559                 const struct blk_mq_ops *ops, unsigned int queue_depth,
4560                 unsigned int set_flags)
4561 {
4562         memset(set, 0, sizeof(*set));
4563         set->ops = ops;
4564         set->nr_hw_queues = 1;
4565         set->nr_maps = 1;
4566         set->queue_depth = queue_depth;
4567         set->numa_node = NUMA_NO_NODE;
4568         set->flags = set_flags;
4569         return blk_mq_alloc_tag_set(set);
4570 }
4571 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4572
4573 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4574 {
4575         int i, j;
4576
4577         for (i = 0; i < set->nr_hw_queues; i++)
4578                 __blk_mq_free_map_and_rqs(set, i);
4579
4580         if (blk_mq_is_shared_tags(set->flags)) {
4581                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4582                                         BLK_MQ_NO_HCTX_IDX);
4583         }
4584
4585         for (j = 0; j < set->nr_maps; j++) {
4586                 kfree(set->map[j].mq_map);
4587                 set->map[j].mq_map = NULL;
4588         }
4589
4590         kfree(set->tags);
4591         set->tags = NULL;
4592         if (set->flags & BLK_MQ_F_BLOCKING) {
4593                 cleanup_srcu_struct(set->srcu);
4594                 kfree(set->srcu);
4595         }
4596 }
4597 EXPORT_SYMBOL(blk_mq_free_tag_set);
4598
4599 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4600 {
4601         struct blk_mq_tag_set *set = q->tag_set;
4602         struct blk_mq_hw_ctx *hctx;
4603         int ret;
4604         unsigned long i;
4605
4606         if (!set)
4607                 return -EINVAL;
4608
4609         if (q->nr_requests == nr)
4610                 return 0;
4611
4612         blk_mq_freeze_queue(q);
4613         blk_mq_quiesce_queue(q);
4614
4615         ret = 0;
4616         queue_for_each_hw_ctx(q, hctx, i) {
4617                 if (!hctx->tags)
4618                         continue;
4619                 /*
4620                  * If we're using an MQ scheduler, just update the scheduler
4621                  * queue depth. This is similar to what the old code would do.
4622                  */
4623                 if (hctx->sched_tags) {
4624                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4625                                                       nr, true);
4626                 } else {
4627                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4628                                                       false);
4629                 }
4630                 if (ret)
4631                         break;
4632                 if (q->elevator && q->elevator->type->ops.depth_updated)
4633                         q->elevator->type->ops.depth_updated(hctx);
4634         }
4635         if (!ret) {
4636                 q->nr_requests = nr;
4637                 if (blk_mq_is_shared_tags(set->flags)) {
4638                         if (q->elevator)
4639                                 blk_mq_tag_update_sched_shared_tags(q);
4640                         else
4641                                 blk_mq_tag_resize_shared_tags(set, nr);
4642                 }
4643         }
4644
4645         blk_mq_unquiesce_queue(q);
4646         blk_mq_unfreeze_queue(q);
4647
4648         return ret;
4649 }
4650
4651 /*
4652  * request_queue and elevator_type pair.
4653  * It is just used by __blk_mq_update_nr_hw_queues to cache
4654  * the elevator_type associated with a request_queue.
4655  */
4656 struct blk_mq_qe_pair {
4657         struct list_head node;
4658         struct request_queue *q;
4659         struct elevator_type *type;
4660 };
4661
4662 /*
4663  * Cache the elevator_type in qe pair list and switch the
4664  * io scheduler to 'none'
4665  */
4666 static bool blk_mq_elv_switch_none(struct list_head *head,
4667                 struct request_queue *q)
4668 {
4669         struct blk_mq_qe_pair *qe;
4670
4671         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4672         if (!qe)
4673                 return false;
4674
4675         /* q->elevator needs protection from ->sysfs_lock */
4676         mutex_lock(&q->sysfs_lock);
4677
4678         /* the check has to be done with holding sysfs_lock */
4679         if (!q->elevator) {
4680                 kfree(qe);
4681                 goto unlock;
4682         }
4683
4684         INIT_LIST_HEAD(&qe->node);
4685         qe->q = q;
4686         qe->type = q->elevator->type;
4687         /* keep a reference to the elevator module as we'll switch back */
4688         __elevator_get(qe->type);
4689         list_add(&qe->node, head);
4690         elevator_disable(q);
4691 unlock:
4692         mutex_unlock(&q->sysfs_lock);
4693
4694         return true;
4695 }
4696
4697 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4698                                                 struct request_queue *q)
4699 {
4700         struct blk_mq_qe_pair *qe;
4701
4702         list_for_each_entry(qe, head, node)
4703                 if (qe->q == q)
4704                         return qe;
4705
4706         return NULL;
4707 }
4708
4709 static void blk_mq_elv_switch_back(struct list_head *head,
4710                                   struct request_queue *q)
4711 {
4712         struct blk_mq_qe_pair *qe;
4713         struct elevator_type *t;
4714
4715         qe = blk_lookup_qe_pair(head, q);
4716         if (!qe)
4717                 return;
4718         t = qe->type;
4719         list_del(&qe->node);
4720         kfree(qe);
4721
4722         mutex_lock(&q->sysfs_lock);
4723         elevator_switch(q, t);
4724         /* drop the reference acquired in blk_mq_elv_switch_none */
4725         elevator_put(t);
4726         mutex_unlock(&q->sysfs_lock);
4727 }
4728
4729 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4730                                                         int nr_hw_queues)
4731 {
4732         struct request_queue *q;
4733         LIST_HEAD(head);
4734         int prev_nr_hw_queues = set->nr_hw_queues;
4735         int i;
4736
4737         lockdep_assert_held(&set->tag_list_lock);
4738
4739         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4740                 nr_hw_queues = nr_cpu_ids;
4741         if (nr_hw_queues < 1)
4742                 return;
4743         if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4744                 return;
4745
4746         list_for_each_entry(q, &set->tag_list, tag_set_list)
4747                 blk_mq_freeze_queue(q);
4748         /*
4749          * Switch IO scheduler to 'none', cleaning up the data associated
4750          * with the previous scheduler. We will switch back once we are done
4751          * updating the new sw to hw queue mappings.
4752          */
4753         list_for_each_entry(q, &set->tag_list, tag_set_list)
4754                 if (!blk_mq_elv_switch_none(&head, q))
4755                         goto switch_back;
4756
4757         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4758                 blk_mq_debugfs_unregister_hctxs(q);
4759                 blk_mq_sysfs_unregister_hctxs(q);
4760         }
4761
4762         if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
4763                 goto reregister;
4764
4765 fallback:
4766         blk_mq_update_queue_map(set);
4767         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4768                 blk_mq_realloc_hw_ctxs(set, q);
4769                 blk_mq_update_poll_flag(q);
4770                 if (q->nr_hw_queues != set->nr_hw_queues) {
4771                         int i = prev_nr_hw_queues;
4772
4773                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4774                                         nr_hw_queues, prev_nr_hw_queues);
4775                         for (; i < set->nr_hw_queues; i++)
4776                                 __blk_mq_free_map_and_rqs(set, i);
4777
4778                         set->nr_hw_queues = prev_nr_hw_queues;
4779                         goto fallback;
4780                 }
4781                 blk_mq_map_swqueue(q);
4782         }
4783
4784 reregister:
4785         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4786                 blk_mq_sysfs_register_hctxs(q);
4787                 blk_mq_debugfs_register_hctxs(q);
4788         }
4789
4790 switch_back:
4791         list_for_each_entry(q, &set->tag_list, tag_set_list)
4792                 blk_mq_elv_switch_back(&head, q);
4793
4794         list_for_each_entry(q, &set->tag_list, tag_set_list)
4795                 blk_mq_unfreeze_queue(q);
4796
4797         /* Free the excess tags when nr_hw_queues shrink. */
4798         for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
4799                 __blk_mq_free_map_and_rqs(set, i);
4800 }
4801
4802 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4803 {
4804         mutex_lock(&set->tag_list_lock);
4805         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4806         mutex_unlock(&set->tag_list_lock);
4807 }
4808 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4809
4810 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4811                          struct io_comp_batch *iob, unsigned int flags)
4812 {
4813         long state = get_current_state();
4814         int ret;
4815
4816         do {
4817                 ret = q->mq_ops->poll(hctx, iob);
4818                 if (ret > 0) {
4819                         __set_current_state(TASK_RUNNING);
4820                         return ret;
4821                 }
4822
4823                 if (signal_pending_state(state, current))
4824                         __set_current_state(TASK_RUNNING);
4825                 if (task_is_running(current))
4826                         return 1;
4827
4828                 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4829                         break;
4830                 cpu_relax();
4831         } while (!need_resched());
4832
4833         __set_current_state(TASK_RUNNING);
4834         return 0;
4835 }
4836
4837 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4838                 struct io_comp_batch *iob, unsigned int flags)
4839 {
4840         struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4841
4842         return blk_hctx_poll(q, hctx, iob, flags);
4843 }
4844
4845 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4846                 unsigned int poll_flags)
4847 {
4848         struct request_queue *q = rq->q;
4849         int ret;
4850
4851         if (!blk_rq_is_poll(rq))
4852                 return 0;
4853         if (!percpu_ref_tryget(&q->q_usage_counter))
4854                 return 0;
4855
4856         ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4857         blk_queue_exit(q);
4858
4859         return ret;
4860 }
4861 EXPORT_SYMBOL_GPL(blk_rq_poll);
4862
4863 unsigned int blk_mq_rq_cpu(struct request *rq)
4864 {
4865         return rq->mq_ctx->cpu;
4866 }
4867 EXPORT_SYMBOL(blk_mq_rq_cpu);
4868
4869 void blk_mq_cancel_work_sync(struct request_queue *q)
4870 {
4871         struct blk_mq_hw_ctx *hctx;
4872         unsigned long i;
4873
4874         cancel_delayed_work_sync(&q->requeue_work);
4875
4876         queue_for_each_hw_ctx(q, hctx, i)
4877                 cancel_delayed_work_sync(&hctx->run_work);
4878 }
4879
4880 static int __init blk_mq_init(void)
4881 {
4882         int i;
4883
4884         for_each_possible_cpu(i)
4885                 init_llist_head(&per_cpu(blk_cpu_done, i));
4886         for_each_possible_cpu(i)
4887                 INIT_CSD(&per_cpu(blk_cpu_csd, i),
4888                          __blk_mq_complete_request_remote, NULL);
4889         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4890
4891         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4892                                   "block/softirq:dead", NULL,
4893                                   blk_softirq_cpu_dead);
4894         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4895                                 blk_mq_hctx_notify_dead);
4896         cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4897                                 blk_mq_hctx_notify_online,
4898                                 blk_mq_hctx_notify_offline);
4899         return 0;
4900 }
4901 subsys_initcall(blk_mq_init);
This page took 0.305474 seconds and 4 git commands to generate.