]>
Commit | Line | Data |
---|---|---|
75bb4625 JA |
1 | /* |
2 | * CPU <-> hardware queue mapping helpers | |
3 | * | |
4 | * Copyright (C) 2013-2014 Jens Axboe | |
5 | */ | |
320ae51f JA |
6 | #include <linux/kernel.h> |
7 | #include <linux/threads.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/smp.h> | |
11 | #include <linux/cpu.h> | |
12 | ||
13 | #include <linux/blk-mq.h> | |
14 | #include "blk.h" | |
15 | #include "blk-mq.h" | |
16 | ||
320ae51f JA |
17 | static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, |
18 | const int cpu) | |
19 | { | |
959f5f5b | 20 | return cpu * nr_queues / nr_cpus; |
320ae51f JA |
21 | } |
22 | ||
23 | static int get_first_sibling(unsigned int cpu) | |
24 | { | |
25 | unsigned int ret; | |
26 | ||
06931e62 | 27 | ret = cpumask_first(topology_sibling_cpumask(cpu)); |
320ae51f JA |
28 | if (ret < nr_cpu_ids) |
29 | return ret; | |
30 | ||
31 | return cpu; | |
32 | } | |
33 | ||
da695ba2 | 34 | int blk_mq_map_queues(struct blk_mq_tag_set *set) |
320ae51f | 35 | { |
da695ba2 CH |
36 | unsigned int *map = set->mq_map; |
37 | unsigned int nr_queues = set->nr_hw_queues; | |
38 | const struct cpumask *online_mask = cpu_online_mask; | |
320ae51f JA |
39 | unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; |
40 | cpumask_var_t cpus; | |
41 | ||
42 | if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) | |
da695ba2 | 43 | return -ENOMEM; |
320ae51f JA |
44 | |
45 | cpumask_clear(cpus); | |
46 | nr_cpus = nr_uniq_cpus = 0; | |
5778322e | 47 | for_each_cpu(i, online_mask) { |
320ae51f JA |
48 | nr_cpus++; |
49 | first_sibling = get_first_sibling(i); | |
50 | if (!cpumask_test_cpu(first_sibling, cpus)) | |
51 | nr_uniq_cpus++; | |
52 | cpumask_set_cpu(i, cpus); | |
53 | } | |
54 | ||
55 | queue = 0; | |
56 | for_each_possible_cpu(i) { | |
5778322e | 57 | if (!cpumask_test_cpu(i, online_mask)) { |
320ae51f JA |
58 | map[i] = 0; |
59 | continue; | |
60 | } | |
61 | ||
62 | /* | |
63 | * Easy case - we have equal or more hardware queues. Or | |
64 | * there are no thread siblings to take into account. Do | |
65 | * 1:1 if enough, or sequential mapping if less. | |
66 | */ | |
67 | if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) { | |
68 | map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); | |
69 | queue++; | |
70 | continue; | |
71 | } | |
72 | ||
73 | /* | |
74 | * Less then nr_cpus queues, and we have some number of | |
75 | * threads per cores. Map sibling threads to the same | |
76 | * queue. | |
77 | */ | |
78 | first_sibling = get_first_sibling(i); | |
79 | if (first_sibling == i) { | |
80 | map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues, | |
81 | queue); | |
82 | queue++; | |
83 | } else | |
84 | map[i] = map[first_sibling]; | |
85 | } | |
86 | ||
320ae51f JA |
87 | free_cpumask_var(cpus); |
88 | return 0; | |
89 | } | |
9e5a7e22 | 90 | EXPORT_SYMBOL_GPL(blk_mq_map_queues); |
320ae51f | 91 | |
f14bbe77 JA |
92 | /* |
93 | * We have no quick way of doing reverse lookups. This is only used at | |
94 | * queue init time, so runtime isn't important. | |
95 | */ | |
96 | int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) | |
97 | { | |
98 | int i; | |
99 | ||
100 | for_each_possible_cpu(i) { | |
101 | if (index == mq_map[i]) | |
bffed457 | 102 | return local_memory_node(cpu_to_node(i)); |
f14bbe77 JA |
103 | } |
104 | ||
105 | return NUMA_NO_NODE; | |
106 | } |