]>
Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #ifndef BLK_MQ_H |
2 | #define BLK_MQ_H | |
3 | ||
4 | #include <linux/blkdev.h> | |
5 | ||
6 | struct blk_mq_tags; | |
f70ced09 | 7 | struct blk_flush_queue; |
320ae51f JA |
8 | |
9 | struct blk_mq_cpu_notifier { | |
10 | struct list_head list; | |
11 | void *data; | |
e814e71b | 12 | int (*notify)(void *data, unsigned long action, unsigned int cpu); |
320ae51f JA |
13 | }; |
14 | ||
1429d7c9 | 15 | struct blk_mq_ctxmap { |
569fd0ce | 16 | unsigned int size; |
1429d7c9 JA |
17 | unsigned int bits_per_word; |
18 | struct blk_align_bitmap *map; | |
19 | }; | |
20 | ||
320ae51f JA |
21 | struct blk_mq_hw_ctx { |
22 | struct { | |
23 | spinlock_t lock; | |
24 | struct list_head dispatch; | |
25 | } ____cacheline_aligned_in_smp; | |
26 | ||
27 | unsigned long state; /* BLK_MQ_S_* flags */ | |
70f4db63 CH |
28 | struct delayed_work run_work; |
29 | struct delayed_work delay_work; | |
e4043dcf | 30 | cpumask_var_t cpumask; |
506e931f JA |
31 | int next_cpu; |
32 | int next_cpu_batch; | |
320ae51f JA |
33 | |
34 | unsigned long flags; /* BLK_MQ_F_* flags */ | |
35 | ||
36 | struct request_queue *queue; | |
f70ced09 | 37 | struct blk_flush_queue *fq; |
320ae51f JA |
38 | |
39 | void *driver_data; | |
40 | ||
1429d7c9 JA |
41 | struct blk_mq_ctxmap ctx_map; |
42 | ||
4bb659b1 JA |
43 | unsigned int nr_ctx; |
44 | struct blk_mq_ctx **ctxs; | |
45 | ||
8537b120 | 46 | atomic_t wait_index; |
320ae51f | 47 | |
320ae51f JA |
48 | struct blk_mq_tags *tags; |
49 | ||
50 | unsigned long queued; | |
51 | unsigned long run; | |
52 | #define BLK_MQ_MAX_DISPATCH_ORDER 10 | |
53 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; | |
54 | ||
320ae51f | 55 | unsigned int numa_node; |
17ded320 | 56 | unsigned int queue_num; |
320ae51f | 57 | |
0d2602ca JA |
58 | atomic_t nr_active; |
59 | ||
320ae51f JA |
60 | struct blk_mq_cpu_notifier cpu_notifier; |
61 | struct kobject kobj; | |
05229bee JA |
62 | |
63 | unsigned long poll_invoked; | |
64 | unsigned long poll_success; | |
320ae51f JA |
65 | }; |
66 | ||
24d2f903 | 67 | struct blk_mq_tag_set { |
320ae51f JA |
68 | struct blk_mq_ops *ops; |
69 | unsigned int nr_hw_queues; | |
e3a2b3f9 | 70 | unsigned int queue_depth; /* max hw supported */ |
320ae51f JA |
71 | unsigned int reserved_tags; |
72 | unsigned int cmd_size; /* per-request extra data */ | |
73 | int numa_node; | |
74 | unsigned int timeout; | |
75 | unsigned int flags; /* BLK_MQ_F_* */ | |
24d2f903 CH |
76 | void *driver_data; |
77 | ||
78 | struct blk_mq_tags **tags; | |
0d2602ca JA |
79 | |
80 | struct mutex tag_list_lock; | |
81 | struct list_head tag_list; | |
320ae51f JA |
82 | }; |
83 | ||
74c45052 JA |
84 | struct blk_mq_queue_data { |
85 | struct request *rq; | |
86 | struct list_head *list; | |
87 | bool last; | |
88 | }; | |
89 | ||
90 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); | |
320ae51f | 91 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
0152fb6b | 92 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
320ae51f JA |
93 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
94 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | |
24d2f903 CH |
95 | typedef int (init_request_fn)(void *, struct request *, unsigned int, |
96 | unsigned int, unsigned int); | |
97 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, | |
98 | unsigned int); | |
320ae51f | 99 | |
81481eb4 CH |
100 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
101 | bool); | |
f26cdc85 | 102 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); |
05229bee JA |
103 | typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); |
104 | ||
81481eb4 | 105 | |
320ae51f JA |
106 | struct blk_mq_ops { |
107 | /* | |
108 | * Queue request | |
109 | */ | |
110 | queue_rq_fn *queue_rq; | |
111 | ||
112 | /* | |
113 | * Map to specific hardware queue | |
114 | */ | |
115 | map_queue_fn *map_queue; | |
116 | ||
117 | /* | |
118 | * Called on request timeout | |
119 | */ | |
0152fb6b | 120 | timeout_fn *timeout; |
320ae51f | 121 | |
05229bee JA |
122 | /* |
123 | * Called to poll for completion of a specific tag. | |
124 | */ | |
125 | poll_fn *poll; | |
126 | ||
30a91cb4 CH |
127 | softirq_done_fn *complete; |
128 | ||
320ae51f JA |
129 | /* |
130 | * Called when the block layer side of a hardware queue has been | |
131 | * set up, allowing the driver to allocate/init matching structures. | |
132 | * Ditto for exit/teardown. | |
133 | */ | |
134 | init_hctx_fn *init_hctx; | |
135 | exit_hctx_fn *exit_hctx; | |
e9b267d9 CH |
136 | |
137 | /* | |
138 | * Called for every command allocated by the block layer to allow | |
139 | * the driver to set up driver specific data. | |
f70ced09 ML |
140 | * |
141 | * Tag greater than or equal to queue_depth is for setting up | |
142 | * flush request. | |
143 | * | |
e9b267d9 CH |
144 | * Ditto for exit/teardown. |
145 | */ | |
146 | init_request_fn *init_request; | |
147 | exit_request_fn *exit_request; | |
320ae51f JA |
148 | }; |
149 | ||
150 | enum { | |
151 | BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ | |
152 | BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ | |
153 | BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ | |
154 | ||
155 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | |
8a58d1f1 JA |
156 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
157 | BLK_MQ_F_SG_MERGE = 1 << 2, | |
e167dfb5 | 158 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
24391c0d SL |
159 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
160 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | |
320ae51f | 161 | |
5d12f905 | 162 | BLK_MQ_S_STOPPED = 0, |
0d2602ca | 163 | BLK_MQ_S_TAG_ACTIVE = 1, |
320ae51f | 164 | |
a4391c64 | 165 | BLK_MQ_MAX_DEPTH = 10240, |
506e931f JA |
166 | |
167 | BLK_MQ_CPU_WORK_BATCH = 8, | |
320ae51f | 168 | }; |
24391c0d SL |
169 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
170 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ | |
171 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) | |
172 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ | |
173 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ | |
174 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) | |
320ae51f | 175 | |
24d2f903 | 176 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
b62c21b7 MS |
177 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
178 | struct request_queue *q); | |
320ae51f JA |
179 | int blk_mq_register_disk(struct gendisk *); |
180 | void blk_mq_unregister_disk(struct gendisk *); | |
320ae51f | 181 | |
24d2f903 CH |
182 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
183 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); | |
184 | ||
320ae51f JA |
185 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
186 | ||
feb71dae | 187 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
320ae51f | 188 | void blk_mq_free_request(struct request *rq); |
7c7f2f2b | 189 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); |
320ae51f | 190 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
4ce01dd1 CH |
191 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
192 | gfp_t gfp, bool reserved); | |
0e62f51f | 193 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
f26cdc85 | 194 | struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); |
320ae51f | 195 | |
205fb5f5 BVA |
196 | enum { |
197 | BLK_MQ_UNIQUE_TAG_BITS = 16, | |
198 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, | |
199 | }; | |
200 | ||
201 | u32 blk_mq_unique_tag(struct request *rq); | |
202 | ||
203 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) | |
204 | { | |
205 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; | |
206 | } | |
207 | ||
208 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |
209 | { | |
210 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; | |
211 | } | |
212 | ||
320ae51f | 213 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
f14bbe77 | 214 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
320ae51f | 215 | |
973c0191 | 216 | int blk_mq_request_started(struct request *rq); |
e2490073 | 217 | void blk_mq_start_request(struct request *rq); |
c8a446ad CH |
218 | void blk_mq_end_request(struct request *rq, int error); |
219 | void __blk_mq_end_request(struct request *rq, int error); | |
320ae51f | 220 | |
ed0791b2 | 221 | void blk_mq_requeue_request(struct request *rq); |
6fca6a61 | 222 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
c68ed59f | 223 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
6fca6a61 | 224 | void blk_mq_kick_requeue_list(struct request_queue *q); |
1885b24d | 225 | void blk_mq_abort_requeue_list(struct request_queue *q); |
f4829a9b | 226 | void blk_mq_complete_request(struct request *rq, int error); |
30a91cb4 | 227 | |
320ae51f JA |
228 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
229 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
280d45f6 | 230 | void blk_mq_stop_hw_queues(struct request_queue *q); |
2f268556 | 231 | void blk_mq_start_hw_queues(struct request_queue *q); |
1b4a3258 | 232 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
b94ec296 | 233 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
70f4db63 | 234 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
f26cdc85 KB |
235 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
236 | void *priv); | |
c761d96b | 237 | void blk_mq_freeze_queue(struct request_queue *q); |
b4c6a028 KB |
238 | void blk_mq_unfreeze_queue(struct request_queue *q); |
239 | void blk_mq_freeze_queue_start(struct request_queue *q); | |
320ae51f JA |
240 | |
241 | /* | |
242 | * Driver command data is immediately after the request. So subtract request | |
2963e3f7 | 243 | * size to get back to the original request, add request size to get the PDU. |
320ae51f JA |
244 | */ |
245 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
246 | { | |
247 | return pdu - sizeof(struct request); | |
248 | } | |
249 | static inline void *blk_mq_rq_to_pdu(struct request *rq) | |
250 | { | |
2963e3f7 | 251 | return rq + 1; |
320ae51f JA |
252 | } |
253 | ||
320ae51f | 254 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
0d0b7d42 JA |
255 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
256 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) | |
320ae51f JA |
257 | |
258 | #define queue_for_each_ctx(q, ctx, i) \ | |
0d0b7d42 JA |
259 | for ((i) = 0; (i) < (q)->nr_queues && \ |
260 | ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) | |
320ae51f JA |
261 | |
262 | #define hctx_for_each_ctx(hctx, ctx, i) \ | |
0d0b7d42 JA |
263 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
264 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
320ae51f JA |
265 | |
266 | #define blk_ctx_sum(q, sum) \ | |
267 | ({ \ | |
268 | struct blk_mq_ctx *__x; \ | |
269 | unsigned int __ret = 0, __i; \ | |
270 | \ | |
271 | queue_for_each_ctx((q), __x, __i) \ | |
272 | __ret += sum; \ | |
273 | __ret; \ | |
274 | }) | |
275 | ||
276 | #endif |