]>
Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to tagged command queuing | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
5a0e3ad6 | 8 | #include <linux/slab.h> |
8324aa91 | 9 | |
278caf01 AB |
10 | #include "blk.h" |
11 | ||
8324aa91 JA |
12 | /** |
13 | * blk_queue_find_tag - find a request by its tag and queue | |
14 | * @q: The request queue for the device | |
15 | * @tag: The tag of the request | |
16 | * | |
17 | * Notes: | |
18 | * Should be used when a device returns a tag and you want to match | |
19 | * it with a request. | |
20 | * | |
21 | * no locks need be held. | |
22 | **/ | |
23 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) | |
24 | { | |
25 | return blk_map_queue_find_tag(q->queue_tags, tag); | |
26 | } | |
8324aa91 JA |
27 | EXPORT_SYMBOL(blk_queue_find_tag); |
28 | ||
29 | /** | |
d45b3279 | 30 | * blk_free_tags - release a given set of tag maintenance info |
8324aa91 JA |
31 | * @bqt: the tag map to free |
32 | * | |
d45b3279 CH |
33 | * Drop the reference count on @bqt and frees it when the last reference |
34 | * is dropped. | |
8324aa91 | 35 | */ |
d45b3279 | 36 | void blk_free_tags(struct blk_queue_tag *bqt) |
8324aa91 | 37 | { |
d45b3279 | 38 | if (atomic_dec_and_test(&bqt->refcnt)) { |
0e3eb45e MW |
39 | BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < |
40 | bqt->max_depth); | |
8324aa91 JA |
41 | |
42 | kfree(bqt->tag_index); | |
43 | bqt->tag_index = NULL; | |
44 | ||
45 | kfree(bqt->tag_map); | |
46 | bqt->tag_map = NULL; | |
47 | ||
48 | kfree(bqt); | |
49 | } | |
8324aa91 | 50 | } |
d45b3279 | 51 | EXPORT_SYMBOL(blk_free_tags); |
8324aa91 JA |
52 | |
53 | /** | |
54 | * __blk_queue_free_tags - release tag maintenance info | |
55 | * @q: the request queue for the device | |
56 | * | |
57 | * Notes: | |
58 | * blk_cleanup_queue() will take care of calling this function, if tagging | |
59 | * has been used. So there's no need to call this directly. | |
60 | **/ | |
61 | void __blk_queue_free_tags(struct request_queue *q) | |
62 | { | |
63 | struct blk_queue_tag *bqt = q->queue_tags; | |
64 | ||
65 | if (!bqt) | |
66 | return; | |
67 | ||
d45b3279 | 68 | blk_free_tags(bqt); |
8324aa91 JA |
69 | |
70 | q->queue_tags = NULL; | |
aa94b537 | 71 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); |
8324aa91 JA |
72 | } |
73 | ||
8324aa91 JA |
74 | /** |
75 | * blk_queue_free_tags - release tag maintenance info | |
76 | * @q: the request queue for the device | |
77 | * | |
78 | * Notes: | |
710027a4 | 79 | * This is used to disable tagged queuing to a device, yet leave |
8324aa91 JA |
80 | * queue in function. |
81 | **/ | |
82 | void blk_queue_free_tags(struct request_queue *q) | |
83 | { | |
aa94b537 | 84 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); |
8324aa91 | 85 | } |
8324aa91 JA |
86 | EXPORT_SYMBOL(blk_queue_free_tags); |
87 | ||
88 | static int | |
89 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) | |
90 | { | |
91 | struct request **tag_index; | |
92 | unsigned long *tag_map; | |
93 | int nr_ulongs; | |
94 | ||
95 | if (q && depth > q->nr_requests * 2) { | |
96 | depth = q->nr_requests * 2; | |
97 | printk(KERN_ERR "%s: adjusted depth to %d\n", | |
24c03d47 | 98 | __func__, depth); |
8324aa91 JA |
99 | } |
100 | ||
101 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); | |
102 | if (!tag_index) | |
103 | goto fail; | |
104 | ||
105 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | |
106 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | |
107 | if (!tag_map) | |
108 | goto fail; | |
109 | ||
110 | tags->real_max_depth = depth; | |
111 | tags->max_depth = depth; | |
112 | tags->tag_index = tag_index; | |
113 | tags->tag_map = tag_map; | |
114 | ||
115 | return 0; | |
116 | fail: | |
117 | kfree(tag_index); | |
118 | return -ENOMEM; | |
119 | } | |
120 | ||
121 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |
122 | int depth) | |
123 | { | |
124 | struct blk_queue_tag *tags; | |
125 | ||
126 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | |
127 | if (!tags) | |
128 | goto fail; | |
129 | ||
130 | if (init_tag_map(q, tags, depth)) | |
131 | goto fail; | |
132 | ||
8324aa91 JA |
133 | atomic_set(&tags->refcnt, 1); |
134 | return tags; | |
135 | fail: | |
136 | kfree(tags); | |
137 | return NULL; | |
138 | } | |
139 | ||
140 | /** | |
141 | * blk_init_tags - initialize the tag info for an external tag map | |
142 | * @depth: the maximum queue depth supported | |
8324aa91 JA |
143 | **/ |
144 | struct blk_queue_tag *blk_init_tags(int depth) | |
145 | { | |
146 | return __blk_queue_init_tags(NULL, depth); | |
147 | } | |
148 | EXPORT_SYMBOL(blk_init_tags); | |
149 | ||
150 | /** | |
151 | * blk_queue_init_tags - initialize the queue tag info | |
152 | * @q: the request queue for the device | |
153 | * @depth: the maximum queue depth supported | |
154 | * @tags: the tag to use | |
aa94b537 JA |
155 | * |
156 | * Queue lock must be held here if the function is called to resize an | |
157 | * existing map. | |
8324aa91 JA |
158 | **/ |
159 | int blk_queue_init_tags(struct request_queue *q, int depth, | |
160 | struct blk_queue_tag *tags) | |
161 | { | |
162 | int rc; | |
163 | ||
164 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | |
165 | ||
166 | if (!tags && !q->queue_tags) { | |
167 | tags = __blk_queue_init_tags(q, depth); | |
168 | ||
169 | if (!tags) | |
d41570b7 PST |
170 | return -ENOMEM; |
171 | ||
8324aa91 | 172 | } else if (q->queue_tags) { |
6728cb0e JA |
173 | rc = blk_queue_resize_tags(q, depth); |
174 | if (rc) | |
8324aa91 | 175 | return rc; |
75ad23bc | 176 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
8324aa91 JA |
177 | return 0; |
178 | } else | |
179 | atomic_inc(&tags->refcnt); | |
180 | ||
181 | /* | |
182 | * assign it, all done | |
183 | */ | |
184 | q->queue_tags = tags; | |
aa94b537 | 185 | queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); |
8324aa91 JA |
186 | INIT_LIST_HEAD(&q->tag_busy_list); |
187 | return 0; | |
8324aa91 | 188 | } |
8324aa91 JA |
189 | EXPORT_SYMBOL(blk_queue_init_tags); |
190 | ||
191 | /** | |
192 | * blk_queue_resize_tags - change the queueing depth | |
193 | * @q: the request queue for the device | |
194 | * @new_depth: the new max command queueing depth | |
195 | * | |
196 | * Notes: | |
197 | * Must be called with the queue lock held. | |
198 | **/ | |
199 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) | |
200 | { | |
201 | struct blk_queue_tag *bqt = q->queue_tags; | |
202 | struct request **tag_index; | |
203 | unsigned long *tag_map; | |
204 | int max_depth, nr_ulongs; | |
205 | ||
206 | if (!bqt) | |
207 | return -ENXIO; | |
208 | ||
209 | /* | |
210 | * if we already have large enough real_max_depth. just | |
211 | * adjust max_depth. *NOTE* as requests with tag value | |
212 | * between new_depth and real_max_depth can be in-flight, tag | |
213 | * map can not be shrunk blindly here. | |
214 | */ | |
215 | if (new_depth <= bqt->real_max_depth) { | |
216 | bqt->max_depth = new_depth; | |
217 | return 0; | |
218 | } | |
219 | ||
220 | /* | |
221 | * Currently cannot replace a shared tag map with a new | |
222 | * one, so error out if this is the case | |
223 | */ | |
224 | if (atomic_read(&bqt->refcnt) != 1) | |
225 | return -EBUSY; | |
226 | ||
227 | /* | |
228 | * save the old state info, so we can copy it back | |
229 | */ | |
230 | tag_index = bqt->tag_index; | |
231 | tag_map = bqt->tag_map; | |
232 | max_depth = bqt->real_max_depth; | |
233 | ||
234 | if (init_tag_map(q, bqt, new_depth)) | |
235 | return -ENOMEM; | |
236 | ||
237 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | |
238 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; | |
239 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); | |
240 | ||
241 | kfree(tag_index); | |
242 | kfree(tag_map); | |
243 | return 0; | |
244 | } | |
8324aa91 JA |
245 | EXPORT_SYMBOL(blk_queue_resize_tags); |
246 | ||
247 | /** | |
248 | * blk_queue_end_tag - end tag operations for a request | |
249 | * @q: the request queue for the device | |
250 | * @rq: the request that has completed | |
251 | * | |
252 | * Description: | |
710027a4 | 253 | * Typically called when end_that_request_first() returns %0, meaning |
8324aa91 JA |
254 | * all transfers have been done for a request. It's important to call |
255 | * this function before end_that_request_last(), as that will put the | |
256 | * request back on the free list thus corrupting the internal tag list. | |
257 | * | |
258 | * Notes: | |
259 | * queue lock must be held. | |
260 | **/ | |
261 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |
262 | { | |
263 | struct blk_queue_tag *bqt = q->queue_tags; | |
f2b20d43 | 264 | unsigned tag = rq->tag; /* negative tags invalid */ |
8324aa91 | 265 | |
f2b20d43 | 266 | BUG_ON(tag >= bqt->real_max_depth); |
8324aa91 JA |
267 | |
268 | list_del_init(&rq->queuelist); | |
269 | rq->cmd_flags &= ~REQ_QUEUED; | |
270 | rq->tag = -1; | |
271 | ||
272 | if (unlikely(bqt->tag_index[tag] == NULL)) | |
273 | printk(KERN_ERR "%s: tag %d is missing\n", | |
24c03d47 | 274 | __func__, tag); |
8324aa91 JA |
275 | |
276 | bqt->tag_index[tag] = NULL; | |
277 | ||
278 | if (unlikely(!test_bit(tag, bqt->tag_map))) { | |
279 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | |
24c03d47 | 280 | __func__, tag); |
8324aa91 JA |
281 | return; |
282 | } | |
283 | /* | |
284 | * The tag_map bit acts as a lock for tag_index[bit], so we need | |
285 | * unlock memory barrier semantics. | |
286 | */ | |
287 | clear_bit_unlock(tag, bqt->tag_map); | |
8324aa91 | 288 | } |
8324aa91 JA |
289 | EXPORT_SYMBOL(blk_queue_end_tag); |
290 | ||
291 | /** | |
292 | * blk_queue_start_tag - find a free tag and assign it | |
293 | * @q: the request queue for the device | |
294 | * @rq: the block request that needs tagging | |
295 | * | |
296 | * Description: | |
297 | * This can either be used as a stand-alone helper, or possibly be | |
298 | * assigned as the queue &prep_rq_fn (in which case &struct request | |
299 | * automagically gets a tag assigned). Note that this function | |
300 | * assumes that any type of request can be queued! if this is not | |
301 | * true for your device, you must check the request type before | |
302 | * calling this function. The request will also be removed from | |
303 | * the request queue, so it's the drivers responsibility to readd | |
304 | * it if it should need to be restarted for some reason. | |
305 | * | |
306 | * Notes: | |
307 | * queue lock must be held. | |
308 | **/ | |
309 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |
310 | { | |
311 | struct blk_queue_tag *bqt = q->queue_tags; | |
0a7ae2ff | 312 | unsigned max_depth; |
8324aa91 JA |
313 | int tag; |
314 | ||
315 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { | |
6728cb0e | 316 | printk(KERN_ERR |
8324aa91 | 317 | "%s: request %p for device [%s] already tagged %d", |
24c03d47 | 318 | __func__, rq, |
8324aa91 JA |
319 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); |
320 | BUG(); | |
321 | } | |
322 | ||
323 | /* | |
324 | * Protect against shared tag maps, as we may not have exclusive | |
325 | * access to the tag map. | |
e3ba9ae5 JA |
326 | * |
327 | * We reserve a few tags just for sync IO, since we don't want | |
328 | * to starve sync IO on behalf of flooding async IO. | |
8324aa91 | 329 | */ |
e3ba9ae5 | 330 | max_depth = bqt->max_depth; |
0a7ae2ff | 331 | if (!rq_is_sync(rq) && max_depth > 1) { |
a6b3f761 JK |
332 | switch (max_depth) { |
333 | case 2: | |
0a7ae2ff | 334 | max_depth = 1; |
a6b3f761 JK |
335 | break; |
336 | case 3: | |
337 | max_depth = 2; | |
338 | break; | |
339 | default: | |
340 | max_depth -= 2; | |
341 | } | |
1b59dd51 | 342 | if (q->in_flight[BLK_RW_ASYNC] > max_depth) |
0a7ae2ff JA |
343 | return 1; |
344 | } | |
e3ba9ae5 | 345 | |
8324aa91 | 346 | do { |
0a7ae2ff | 347 | tag = find_first_zero_bit(bqt->tag_map, max_depth); |
e3ba9ae5 | 348 | if (tag >= max_depth) |
8324aa91 JA |
349 | return 1; |
350 | ||
351 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); | |
352 | /* | |
353 | * We need lock ordering semantics given by test_and_set_bit_lock. | |
354 | * See blk_queue_end_tag for details. | |
355 | */ | |
356 | ||
357 | rq->cmd_flags |= REQ_QUEUED; | |
358 | rq->tag = tag; | |
359 | bqt->tag_index[tag] = rq; | |
9934c8c0 | 360 | blk_start_request(rq); |
8324aa91 | 361 | list_add(&rq->queuelist, &q->tag_busy_list); |
8324aa91 JA |
362 | return 0; |
363 | } | |
8324aa91 JA |
364 | EXPORT_SYMBOL(blk_queue_start_tag); |
365 | ||
366 | /** | |
367 | * blk_queue_invalidate_tags - invalidate all pending tags | |
368 | * @q: the request queue for the device | |
369 | * | |
370 | * Description: | |
371 | * Hardware conditions may dictate a need to stop all pending requests. | |
372 | * In this case, we will safely clear the block side of the tag queue and | |
373 | * readd all requests to the request queue in the right order. | |
374 | * | |
375 | * Notes: | |
376 | * queue lock must be held. | |
377 | **/ | |
378 | void blk_queue_invalidate_tags(struct request_queue *q) | |
379 | { | |
380 | struct list_head *tmp, *n; | |
381 | ||
382 | list_for_each_safe(tmp, n, &q->tag_busy_list) | |
383 | blk_requeue_request(q, list_entry_rq(tmp)); | |
384 | } | |
8324aa91 | 385 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |