]>
Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #ifndef INT_BLK_MQ_H |
2 | #define INT_BLK_MQ_H | |
3 | ||
cf43e6be JA |
4 | #include "blk-stat.h" |
5 | ||
24d2f903 CH |
6 | struct blk_mq_tag_set; |
7 | ||
320ae51f JA |
8 | struct blk_mq_ctx { |
9 | struct { | |
10 | spinlock_t lock; | |
11 | struct list_head rq_list; | |
12 | } ____cacheline_aligned_in_smp; | |
13 | ||
14 | unsigned int cpu; | |
15 | unsigned int index_hw; | |
320ae51f JA |
16 | |
17 | /* incremented at dispatch time */ | |
18 | unsigned long rq_dispatched[2]; | |
19 | unsigned long rq_merged; | |
20 | ||
21 | /* incremented at completion time */ | |
22 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; | |
cf43e6be | 23 | struct blk_rq_stat stat[2]; |
320ae51f JA |
24 | |
25 | struct request_queue *queue; | |
26 | struct kobject kobj; | |
4bb659b1 | 27 | } ____cacheline_aligned_in_smp; |
320ae51f | 28 | |
320ae51f | 29 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
780db207 | 30 | void blk_mq_freeze_queue(struct request_queue *q); |
3edcc0ce | 31 | void blk_mq_free_queue(struct request_queue *q); |
e3a2b3f9 | 32 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
aed3ea94 | 33 | void blk_mq_wake_waiters(struct request_queue *q); |
f04c3df3 | 34 | bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); |
2c3ad667 | 35 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
50e1dab8 | 36 | bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); |
2c3ad667 JA |
37 | |
38 | /* | |
39 | * Internal helpers for allocating/freeing the request map | |
40 | */ | |
cc71a6f4 JA |
41 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
42 | unsigned int hctx_idx); | |
43 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); | |
44 | struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, | |
45 | unsigned int hctx_idx, | |
46 | unsigned int nr_tags, | |
47 | unsigned int reserved_tags); | |
48 | int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |
49 | unsigned int hctx_idx, unsigned int depth); | |
2c3ad667 JA |
50 | |
51 | /* | |
52 | * Internal helpers for request insertion into sw queues | |
53 | */ | |
54 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
55 | bool at_head); | |
bd166ef1 JA |
56 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
57 | struct list_head *list); | |
320ae51f JA |
58 | /* |
59 | * CPU hotplug helpers | |
60 | */ | |
676141e4 JA |
61 | void blk_mq_enable_hotplug(void); |
62 | void blk_mq_disable_hotplug(void); | |
320ae51f JA |
63 | |
64 | /* | |
65 | * CPU -> queue mappings | |
66 | */ | |
f14bbe77 | 67 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
320ae51f | 68 | |
7d7e0f90 CH |
69 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
70 | int cpu) | |
71 | { | |
72 | return q->queue_hw_ctx[q->mq_map[cpu]]; | |
73 | } | |
74 | ||
67aec14c JA |
75 | /* |
76 | * sysfs helpers | |
77 | */ | |
78 | extern int blk_mq_sysfs_register(struct request_queue *q); | |
79 | extern void blk_mq_sysfs_unregister(struct request_queue *q); | |
868f2f0b | 80 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
67aec14c | 81 | |
07e4fead OS |
82 | /* |
83 | * debugfs helpers | |
84 | */ | |
85 | #ifdef CONFIG_DEBUG_FS | |
86 | void blk_mq_debugfs_init(void); | |
87 | int blk_mq_debugfs_register(struct request_queue *q, const char *name); | |
88 | void blk_mq_debugfs_unregister(struct request_queue *q); | |
89 | int blk_mq_debugfs_register_hctxs(struct request_queue *q); | |
90 | void blk_mq_debugfs_unregister_hctxs(struct request_queue *q); | |
91 | #else | |
92 | static inline void blk_mq_debugfs_init(void) | |
93 | { | |
94 | } | |
95 | ||
96 | int blk_mq_debugfs_register(struct request_queue *q, const char *name); | |
97 | { | |
98 | return 0; | |
99 | } | |
100 | ||
101 | void blk_mq_debugfs_unregister(struct request_queue *q) | |
102 | { | |
103 | } | |
104 | ||
105 | int blk_mq_debugfs_register_hctxs(struct request_queue *q) | |
106 | { | |
107 | return 0; | |
108 | } | |
109 | ||
110 | void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) | |
111 | { | |
112 | } | |
113 | #endif | |
114 | ||
90415837 CH |
115 | extern void blk_mq_rq_timed_out(struct request *req, bool reserved); |
116 | ||
e09aae7e ML |
117 | void blk_mq_release(struct request_queue *q); |
118 | ||
1aecfe48 ML |
119 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
120 | unsigned int cpu) | |
121 | { | |
122 | return per_cpu_ptr(q->queue_ctx, cpu); | |
123 | } | |
124 | ||
125 | /* | |
126 | * This assumes per-cpu software queueing queues. They could be per-node | |
127 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | |
128 | * care about preemption, since we know the ctx's are persistent. This does | |
129 | * mean that we can't rely on ctx always matching the currently running CPU. | |
130 | */ | |
131 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | |
132 | { | |
133 | return __blk_mq_get_ctx(q, get_cpu()); | |
134 | } | |
135 | ||
136 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |
137 | { | |
138 | put_cpu(); | |
139 | } | |
140 | ||
cb96a42c ML |
141 | struct blk_mq_alloc_data { |
142 | /* input parameter */ | |
143 | struct request_queue *q; | |
6f3b0e8b | 144 | unsigned int flags; |
cb96a42c ML |
145 | |
146 | /* input & output parameter */ | |
147 | struct blk_mq_ctx *ctx; | |
148 | struct blk_mq_hw_ctx *hctx; | |
149 | }; | |
150 | ||
151 | static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, | |
6f3b0e8b CH |
152 | struct request_queue *q, unsigned int flags, |
153 | struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) | |
cb96a42c ML |
154 | { |
155 | data->q = q; | |
6f3b0e8b | 156 | data->flags = flags; |
cb96a42c ML |
157 | data->ctx = ctx; |
158 | data->hctx = hctx; | |
159 | } | |
160 | ||
4941115b JA |
161 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
162 | { | |
bd166ef1 JA |
163 | if (data->flags & BLK_MQ_REQ_INTERNAL) |
164 | return data->hctx->sched_tags; | |
165 | ||
4941115b JA |
166 | return data->hctx->tags; |
167 | } | |
168 | ||
2c3ad667 JA |
169 | /* |
170 | * Internal helpers for request allocation/init/free | |
171 | */ | |
172 | void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |
173 | struct request *rq, unsigned int op); | |
bd166ef1 | 174 | void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
2c3ad667 | 175 | struct request *rq); |
bd166ef1 | 176 | void blk_mq_finish_request(struct request *rq); |
2c3ad667 JA |
177 | struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, |
178 | unsigned int op); | |
179 | ||
5d1b25c1 BVA |
180 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
181 | { | |
182 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); | |
183 | } | |
184 | ||
19c66e59 ML |
185 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
186 | { | |
187 | return hctx->nr_ctx && hctx->tags; | |
188 | } | |
189 | ||
320ae51f | 190 | #endif |