]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
75bb4625 JA |
2 | /* |
3 | * CPU <-> hardware queue mapping helpers | |
4 | * | |
5 | * Copyright (C) 2013-2014 Jens Axboe | |
6 | */ | |
320ae51f JA |
7 | #include <linux/kernel.h> |
8 | #include <linux/threads.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/smp.h> | |
12 | #include <linux/cpu.h> | |
13 | ||
14 | #include <linux/blk-mq.h> | |
15 | #include "blk.h" | |
16 | #include "blk-mq.h" | |
17 | ||
556f36e9 ML |
18 | static int queue_index(struct blk_mq_queue_map *qmap, |
19 | unsigned int nr_queues, const int q) | |
320ae51f | 20 | { |
556f36e9 | 21 | return qmap->queue_offset + (q % nr_queues); |
320ae51f JA |
22 | } |
23 | ||
24 | static int get_first_sibling(unsigned int cpu) | |
25 | { | |
26 | unsigned int ret; | |
27 | ||
06931e62 | 28 | ret = cpumask_first(topology_sibling_cpumask(cpu)); |
320ae51f JA |
29 | if (ret < nr_cpu_ids) |
30 | return ret; | |
31 | ||
32 | return cpu; | |
33 | } | |
34 | ||
a4e1d0b7 | 35 | void blk_mq_map_queues(struct blk_mq_queue_map *qmap) |
320ae51f | 36 | { |
ed76e329 JA |
37 | unsigned int *map = qmap->mq_map; |
38 | unsigned int nr_queues = qmap->nr_queues; | |
556f36e9 ML |
39 | unsigned int cpu, first_sibling, q = 0; |
40 | ||
41 | for_each_possible_cpu(cpu) | |
42 | map[cpu] = -1; | |
43 | ||
44 | /* | |
45 | * Spread queues among present CPUs first for minimizing | |
46 | * count of dead queues which are mapped by all un-present CPUs | |
47 | */ | |
48 | for_each_present_cpu(cpu) { | |
49 | if (q >= nr_queues) | |
50 | break; | |
51 | map[cpu] = queue_index(qmap, nr_queues, q++); | |
52 | } | |
320ae51f | 53 | |
fe631457 | 54 | for_each_possible_cpu(cpu) { |
556f36e9 ML |
55 | if (map[cpu] != -1) |
56 | continue; | |
320ae51f | 57 | /* |
fe631457 MG |
58 | * First do sequential mapping between CPUs and queues. |
59 | * In case we still have CPUs to map, and we have some number of | |
ef025d7e BVA |
60 | * threads per cores then map sibling threads to the same queue |
61 | * for performance optimizations. | |
320ae51f | 62 | */ |
556f36e9 ML |
63 | if (q < nr_queues) { |
64 | map[cpu] = queue_index(qmap, nr_queues, q++); | |
fe631457 MG |
65 | } else { |
66 | first_sibling = get_first_sibling(cpu); | |
67 | if (first_sibling == cpu) | |
556f36e9 | 68 | map[cpu] = queue_index(qmap, nr_queues, q++); |
fe631457 MG |
69 | else |
70 | map[cpu] = map[first_sibling]; | |
320ae51f | 71 | } |
320ae51f | 72 | } |
320ae51f | 73 | } |
9e5a7e22 | 74 | EXPORT_SYMBOL_GPL(blk_mq_map_queues); |
320ae51f | 75 | |
cd669f88 BVA |
76 | /** |
77 | * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index | |
78 | * @qmap: CPU to hardware queue map. | |
79 | * @index: hardware queue index. | |
80 | * | |
f14bbe77 JA |
81 | * We have no quick way of doing reverse lookups. This is only used at |
82 | * queue init time, so runtime isn't important. | |
83 | */ | |
ed76e329 | 84 | int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) |
f14bbe77 JA |
85 | { |
86 | int i; | |
87 | ||
88 | for_each_possible_cpu(i) { | |
ed76e329 | 89 | if (index == qmap->mq_map[i]) |
576e85c5 | 90 | return cpu_to_node(i); |
f14bbe77 JA |
91 | } |
92 | ||
93 | return NUMA_NO_NODE; | |
94 | } |