]>
Commit | Line | Data |
---|---|---|
75bb4625 JA |
1 | /* |
2 | * CPU <-> hardware queue mapping helpers | |
3 | * | |
4 | * Copyright (C) 2013-2014 Jens Axboe | |
5 | */ | |
320ae51f JA |
6 | #include <linux/kernel.h> |
7 | #include <linux/threads.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/smp.h> | |
11 | #include <linux/cpu.h> | |
12 | ||
13 | #include <linux/blk-mq.h> | |
14 | #include "blk.h" | |
15 | #include "blk-mq.h" | |
16 | ||
03ffbcdd | 17 | static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) |
320ae51f | 18 | { |
fe631457 | 19 | return cpu % nr_queues; |
320ae51f JA |
20 | } |
21 | ||
22 | static int get_first_sibling(unsigned int cpu) | |
23 | { | |
24 | unsigned int ret; | |
25 | ||
06931e62 | 26 | ret = cpumask_first(topology_sibling_cpumask(cpu)); |
320ae51f JA |
27 | if (ret < nr_cpu_ids) |
28 | return ret; | |
29 | ||
30 | return cpu; | |
31 | } | |
32 | ||
da695ba2 | 33 | int blk_mq_map_queues(struct blk_mq_tag_set *set) |
320ae51f | 34 | { |
da695ba2 CH |
35 | unsigned int *map = set->mq_map; |
36 | unsigned int nr_queues = set->nr_hw_queues; | |
fe631457 | 37 | unsigned int cpu, first_sibling; |
320ae51f | 38 | |
fe631457 | 39 | for_each_possible_cpu(cpu) { |
320ae51f | 40 | /* |
fe631457 MG |
41 | * First do sequential mapping between CPUs and queues. |
42 | * In case we still have CPUs to map, and we have some number of | |
43 | * threads per cores then map sibling threads to the same queue for | |
44 | * performace optimizations. | |
320ae51f | 45 | */ |
fe631457 | 46 | if (cpu < nr_queues) { |
03ffbcdd | 47 | map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
fe631457 MG |
48 | } else { |
49 | first_sibling = get_first_sibling(cpu); | |
50 | if (first_sibling == cpu) | |
03ffbcdd | 51 | map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
fe631457 MG |
52 | else |
53 | map[cpu] = map[first_sibling]; | |
320ae51f | 54 | } |
320ae51f JA |
55 | } |
56 | ||
320ae51f JA |
57 | return 0; |
58 | } | |
9e5a7e22 | 59 | EXPORT_SYMBOL_GPL(blk_mq_map_queues); |
320ae51f | 60 | |
f14bbe77 JA |
61 | /* |
62 | * We have no quick way of doing reverse lookups. This is only used at | |
63 | * queue init time, so runtime isn't important. | |
64 | */ | |
65 | int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) | |
66 | { | |
67 | int i; | |
68 | ||
69 | for_each_possible_cpu(i) { | |
70 | if (index == mq_map[i]) | |
bffed457 | 71 | return local_memory_node(cpu_to_node(i)); |
f14bbe77 JA |
72 | } |
73 | ||
74 | return NUMA_NO_NODE; | |
75 | } |