]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | |
2 | #include <linux/device.h> | |
3 | #include <linux/mm.h> | |
4 | #include <asm/io.h> /* Needed for i386 to build */ | |
1da177e4 LT |
5 | #include <linux/dma-mapping.h> |
6 | #include <linux/dmapool.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/module.h> | |
c9cf5528 | 9 | #include <linux/poison.h> |
e8edc6e0 | 10 | #include <linux/sched.h> |
1da177e4 LT |
11 | |
12 | /* | |
13 | * Pool allocator ... wraps the dma_alloc_coherent page allocator, so | |
14 | * small blocks are easily used by drivers for bus mastering controllers. | |
15 | * This should probably be sharing the guts of the slab allocator. | |
16 | */ | |
17 | ||
e87aa773 MW |
18 | struct dma_pool { /* the pool */ |
19 | struct list_head page_list; | |
20 | spinlock_t lock; | |
21 | size_t blocks_per_page; | |
22 | size_t size; | |
23 | struct device *dev; | |
24 | size_t allocation; | |
25 | char name[32]; | |
26 | wait_queue_head_t waitq; | |
27 | struct list_head pools; | |
1da177e4 LT |
28 | }; |
29 | ||
e87aa773 MW |
30 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
31 | struct list_head page_list; | |
32 | void *vaddr; | |
33 | dma_addr_t dma; | |
34 | unsigned in_use; | |
35 | unsigned long bitmap[0]; | |
1da177e4 LT |
36 | }; |
37 | ||
38 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | |
1da177e4 | 39 | |
e87aa773 | 40 | static DEFINE_MUTEX(pools_lock); |
1da177e4 LT |
41 | |
42 | static ssize_t | |
e87aa773 | 43 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
44 | { |
45 | unsigned temp; | |
46 | unsigned size; | |
47 | char *next; | |
48 | struct dma_page *page; | |
49 | struct dma_pool *pool; | |
50 | ||
51 | next = buf; | |
52 | size = PAGE_SIZE; | |
53 | ||
54 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | |
55 | size -= temp; | |
56 | next += temp; | |
57 | ||
b2366d68 | 58 | mutex_lock(&pools_lock); |
1da177e4 LT |
59 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
60 | unsigned pages = 0; | |
61 | unsigned blocks = 0; | |
62 | ||
63 | list_for_each_entry(page, &pool->page_list, page_list) { | |
64 | pages++; | |
65 | blocks += page->in_use; | |
66 | } | |
67 | ||
68 | /* per-pool info, no real statistics yet */ | |
69 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | |
e87aa773 MW |
70 | pool->name, |
71 | blocks, pages * pool->blocks_per_page, | |
72 | pool->size, pages); | |
1da177e4 LT |
73 | size -= temp; |
74 | next += temp; | |
75 | } | |
b2366d68 | 76 | mutex_unlock(&pools_lock); |
1da177e4 LT |
77 | |
78 | return PAGE_SIZE - size; | |
79 | } | |
e87aa773 MW |
80 | |
81 | static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); | |
1da177e4 LT |
82 | |
83 | /** | |
84 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
85 | * @name: name of pool, for diagnostics | |
86 | * @dev: device that will be doing the DMA | |
87 | * @size: size of the blocks in this pool. | |
88 | * @align: alignment requirement for blocks; must be a power of two | |
89 | * @allocation: returned blocks won't cross this boundary (or zero) | |
90 | * Context: !in_interrupt() | |
91 | * | |
92 | * Returns a dma allocation pool with the requested characteristics, or | |
93 | * null if one can't be created. Given one of these pools, dma_pool_alloc() | |
94 | * may be used to allocate memory. Such memory will all have "consistent" | |
95 | * DMA mappings, accessible by the device and its driver without using | |
96 | * cache flushing primitives. The actual size of blocks allocated may be | |
97 | * larger than requested because of alignment. | |
98 | * | |
99 | * If allocation is nonzero, objects returned from dma_pool_alloc() won't | |
100 | * cross that size boundary. This is useful for devices which have | |
101 | * addressing restrictions on individual DMA transfers, such as not crossing | |
102 | * boundaries of 4KBytes. | |
103 | */ | |
e87aa773 MW |
104 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
105 | size_t size, size_t align, size_t allocation) | |
1da177e4 | 106 | { |
e87aa773 | 107 | struct dma_pool *retval; |
1da177e4 LT |
108 | |
109 | if (align == 0) | |
110 | align = 1; | |
111 | if (size == 0) | |
112 | return NULL; | |
113 | else if (size < align) | |
114 | size = align; | |
115 | else if ((size % align) != 0) { | |
116 | size += align + 1; | |
117 | size &= ~(align - 1); | |
118 | } | |
119 | ||
120 | if (allocation == 0) { | |
121 | if (PAGE_SIZE < size) | |
122 | allocation = size; | |
123 | else | |
124 | allocation = PAGE_SIZE; | |
e87aa773 | 125 | /* FIXME: round up for less fragmentation */ |
1da177e4 LT |
126 | } else if (allocation < size) |
127 | return NULL; | |
128 | ||
e87aa773 MW |
129 | if (! |
130 | (retval = | |
131 | kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) | |
1da177e4 LT |
132 | return retval; |
133 | ||
e87aa773 | 134 | strlcpy(retval->name, name, sizeof retval->name); |
1da177e4 LT |
135 | |
136 | retval->dev = dev; | |
137 | ||
e87aa773 MW |
138 | INIT_LIST_HEAD(&retval->page_list); |
139 | spin_lock_init(&retval->lock); | |
1da177e4 LT |
140 | retval->size = size; |
141 | retval->allocation = allocation; | |
142 | retval->blocks_per_page = allocation / size; | |
e87aa773 | 143 | init_waitqueue_head(&retval->waitq); |
1da177e4 LT |
144 | |
145 | if (dev) { | |
141ecc53 CH |
146 | int ret; |
147 | ||
b2366d68 | 148 | mutex_lock(&pools_lock); |
e87aa773 MW |
149 | if (list_empty(&dev->dma_pools)) |
150 | ret = device_create_file(dev, &dev_attr_pools); | |
141ecc53 CH |
151 | else |
152 | ret = 0; | |
1da177e4 | 153 | /* note: not currently insisting "name" be unique */ |
141ecc53 | 154 | if (!ret) |
e87aa773 | 155 | list_add(&retval->pools, &dev->dma_pools); |
141ecc53 CH |
156 | else { |
157 | kfree(retval); | |
158 | retval = NULL; | |
159 | } | |
b2366d68 | 160 | mutex_unlock(&pools_lock); |
1da177e4 | 161 | } else |
e87aa773 | 162 | INIT_LIST_HEAD(&retval->pools); |
1da177e4 LT |
163 | |
164 | return retval; | |
165 | } | |
e87aa773 | 166 | EXPORT_SYMBOL(dma_pool_create); |
1da177e4 | 167 | |
e87aa773 | 168 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
1da177e4 | 169 | { |
e87aa773 MW |
170 | struct dma_page *page; |
171 | int mapsize; | |
1da177e4 LT |
172 | |
173 | mapsize = pool->blocks_per_page; | |
174 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; | |
e87aa773 | 175 | mapsize *= sizeof(long); |
1da177e4 | 176 | |
5cbded58 | 177 | page = kmalloc(mapsize + sizeof *page, mem_flags); |
1da177e4 LT |
178 | if (!page) |
179 | return NULL; | |
e87aa773 MW |
180 | page->vaddr = dma_alloc_coherent(pool->dev, |
181 | pool->allocation, | |
182 | &page->dma, mem_flags); | |
1da177e4 | 183 | if (page->vaddr) { |
e87aa773 | 184 | memset(page->bitmap, 0xff, mapsize); /* bit set == free */ |
1da177e4 | 185 | #ifdef CONFIG_DEBUG_SLAB |
e87aa773 | 186 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 187 | #endif |
e87aa773 | 188 | list_add(&page->page_list, &pool->page_list); |
1da177e4 LT |
189 | page->in_use = 0; |
190 | } else { | |
e87aa773 | 191 | kfree(page); |
1da177e4 LT |
192 | page = NULL; |
193 | } | |
194 | return page; | |
195 | } | |
196 | ||
e87aa773 | 197 | static inline int is_page_busy(int blocks, unsigned long *bitmap) |
1da177e4 LT |
198 | { |
199 | while (blocks > 0) { | |
200 | if (*bitmap++ != ~0UL) | |
201 | return 1; | |
202 | blocks -= BITS_PER_LONG; | |
203 | } | |
204 | return 0; | |
205 | } | |
206 | ||
e87aa773 | 207 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
1da177e4 | 208 | { |
e87aa773 | 209 | dma_addr_t dma = page->dma; |
1da177e4 LT |
210 | |
211 | #ifdef CONFIG_DEBUG_SLAB | |
e87aa773 | 212 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 213 | #endif |
e87aa773 MW |
214 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
215 | list_del(&page->page_list); | |
216 | kfree(page); | |
1da177e4 LT |
217 | } |
218 | ||
1da177e4 LT |
219 | /** |
220 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
221 | * @pool: dma pool that will be destroyed | |
222 | * Context: !in_interrupt() | |
223 | * | |
224 | * Caller guarantees that no more memory from the pool is in use, | |
225 | * and that nothing will try to use the pool after this call. | |
226 | */ | |
e87aa773 | 227 | void dma_pool_destroy(struct dma_pool *pool) |
1da177e4 | 228 | { |
b2366d68 | 229 | mutex_lock(&pools_lock); |
e87aa773 MW |
230 | list_del(&pool->pools); |
231 | if (pool->dev && list_empty(&pool->dev->dma_pools)) | |
232 | device_remove_file(pool->dev, &dev_attr_pools); | |
b2366d68 | 233 | mutex_unlock(&pools_lock); |
1da177e4 | 234 | |
e87aa773 MW |
235 | while (!list_empty(&pool->page_list)) { |
236 | struct dma_page *page; | |
237 | page = list_entry(pool->page_list.next, | |
238 | struct dma_page, page_list); | |
239 | if (is_page_busy(pool->blocks_per_page, page->bitmap)) { | |
1da177e4 | 240 | if (pool->dev) |
e87aa773 MW |
241 | dev_err(pool->dev, |
242 | "dma_pool_destroy %s, %p busy\n", | |
1da177e4 LT |
243 | pool->name, page->vaddr); |
244 | else | |
e87aa773 MW |
245 | printk(KERN_ERR |
246 | "dma_pool_destroy %s, %p busy\n", | |
247 | pool->name, page->vaddr); | |
1da177e4 | 248 | /* leak the still-in-use consistent memory */ |
e87aa773 MW |
249 | list_del(&page->page_list); |
250 | kfree(page); | |
1da177e4 | 251 | } else |
e87aa773 | 252 | pool_free_page(pool, page); |
1da177e4 LT |
253 | } |
254 | ||
e87aa773 | 255 | kfree(pool); |
1da177e4 | 256 | } |
e87aa773 | 257 | EXPORT_SYMBOL(dma_pool_destroy); |
1da177e4 LT |
258 | |
259 | /** | |
260 | * dma_pool_alloc - get a block of consistent memory | |
261 | * @pool: dma pool that will produce the block | |
262 | * @mem_flags: GFP_* bitmask | |
263 | * @handle: pointer to dma address of block | |
264 | * | |
265 | * This returns the kernel virtual address of a currently unused block, | |
266 | * and reports its dma address through the handle. | |
267 | * If such a memory block can't be allocated, null is returned. | |
268 | */ | |
e87aa773 MW |
269 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
270 | dma_addr_t *handle) | |
1da177e4 | 271 | { |
e87aa773 MW |
272 | unsigned long flags; |
273 | struct dma_page *page; | |
274 | int map, block; | |
275 | size_t offset; | |
276 | void *retval; | |
277 | ||
278 | restart: | |
279 | spin_lock_irqsave(&pool->lock, flags); | |
1da177e4 | 280 | list_for_each_entry(page, &pool->page_list, page_list) { |
e87aa773 | 281 | int i; |
1da177e4 LT |
282 | /* only cachable accesses here ... */ |
283 | for (map = 0, i = 0; | |
e87aa773 MW |
284 | i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { |
285 | if (page->bitmap[map] == 0) | |
1da177e4 | 286 | continue; |
e87aa773 | 287 | block = ffz(~page->bitmap[map]); |
1da177e4 | 288 | if ((i + block) < pool->blocks_per_page) { |
e87aa773 | 289 | clear_bit(block, &page->bitmap[map]); |
1da177e4 LT |
290 | offset = (BITS_PER_LONG * map) + block; |
291 | offset *= pool->size; | |
292 | goto ready; | |
293 | } | |
294 | } | |
295 | } | |
e87aa773 MW |
296 | page = pool_alloc_page(pool, GFP_ATOMIC); |
297 | if (!page) { | |
1da177e4 | 298 | if (mem_flags & __GFP_WAIT) { |
e87aa773 | 299 | DECLARE_WAITQUEUE(wait, current); |
1da177e4 | 300 | |
d9aacccf | 301 | __set_current_state(TASK_INTERRUPTIBLE); |
e87aa773 MW |
302 | add_wait_queue(&pool->waitq, &wait); |
303 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 | 304 | |
e87aa773 | 305 | schedule_timeout(POOL_TIMEOUT_JIFFIES); |
1da177e4 | 306 | |
e87aa773 | 307 | remove_wait_queue(&pool->waitq, &wait); |
1da177e4 LT |
308 | goto restart; |
309 | } | |
310 | retval = NULL; | |
311 | goto done; | |
312 | } | |
313 | ||
e87aa773 | 314 | clear_bit(0, &page->bitmap[0]); |
1da177e4 | 315 | offset = 0; |
e87aa773 | 316 | ready: |
1da177e4 LT |
317 | page->in_use++; |
318 | retval = offset + page->vaddr; | |
319 | *handle = offset + page->dma; | |
320 | #ifdef CONFIG_DEBUG_SLAB | |
e87aa773 | 321 | memset(retval, POOL_POISON_ALLOCATED, pool->size); |
1da177e4 | 322 | #endif |
e87aa773 MW |
323 | done: |
324 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 LT |
325 | return retval; |
326 | } | |
e87aa773 | 327 | EXPORT_SYMBOL(dma_pool_alloc); |
1da177e4 | 328 | |
e87aa773 | 329 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
1da177e4 | 330 | { |
e87aa773 MW |
331 | unsigned long flags; |
332 | struct dma_page *page; | |
1da177e4 | 333 | |
e87aa773 | 334 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 LT |
335 | list_for_each_entry(page, &pool->page_list, page_list) { |
336 | if (dma < page->dma) | |
337 | continue; | |
338 | if (dma < (page->dma + pool->allocation)) | |
339 | goto done; | |
340 | } | |
341 | page = NULL; | |
e87aa773 MW |
342 | done: |
343 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 LT |
344 | return page; |
345 | } | |
346 | ||
1da177e4 LT |
347 | /** |
348 | * dma_pool_free - put block back into dma pool | |
349 | * @pool: the dma pool holding the block | |
350 | * @vaddr: virtual address of block | |
351 | * @dma: dma address of block | |
352 | * | |
353 | * Caller promises neither device nor driver will again touch this block | |
354 | * unless it is first re-allocated. | |
355 | */ | |
e87aa773 | 356 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
1da177e4 | 357 | { |
e87aa773 MW |
358 | struct dma_page *page; |
359 | unsigned long flags; | |
360 | int map, block; | |
1da177e4 | 361 | |
e87aa773 MW |
362 | page = pool_find_page(pool, dma); |
363 | if (!page) { | |
1da177e4 | 364 | if (pool->dev) |
e87aa773 MW |
365 | dev_err(pool->dev, |
366 | "dma_pool_free %s, %p/%lx (bad dma)\n", | |
367 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 | 368 | else |
e87aa773 MW |
369 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
370 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 LT |
371 | return; |
372 | } | |
373 | ||
374 | block = dma - page->dma; | |
375 | block /= pool->size; | |
376 | map = block / BITS_PER_LONG; | |
377 | block %= BITS_PER_LONG; | |
378 | ||
379 | #ifdef CONFIG_DEBUG_SLAB | |
380 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { | |
381 | if (pool->dev) | |
e87aa773 MW |
382 | dev_err(pool->dev, |
383 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
384 | pool->name, vaddr, (unsigned long long)dma); | |
1da177e4 | 385 | else |
e87aa773 MW |
386 | printk(KERN_ERR |
387 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
388 | pool->name, vaddr, (unsigned long long)dma); | |
1da177e4 LT |
389 | return; |
390 | } | |
e87aa773 | 391 | if (page->bitmap[map] & (1UL << block)) { |
1da177e4 | 392 | if (pool->dev) |
e87aa773 MW |
393 | dev_err(pool->dev, |
394 | "dma_pool_free %s, dma %Lx already free\n", | |
1da177e4 LT |
395 | pool->name, (unsigned long long)dma); |
396 | else | |
e87aa773 MW |
397 | printk(KERN_ERR |
398 | "dma_pool_free %s, dma %Lx already free\n", | |
399 | pool->name, (unsigned long long)dma); | |
1da177e4 LT |
400 | return; |
401 | } | |
e87aa773 | 402 | memset(vaddr, POOL_POISON_FREED, pool->size); |
1da177e4 LT |
403 | #endif |
404 | ||
e87aa773 | 405 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 406 | page->in_use--; |
e87aa773 MW |
407 | set_bit(block, &page->bitmap[map]); |
408 | if (waitqueue_active(&pool->waitq)) | |
409 | wake_up(&pool->waitq); | |
1da177e4 LT |
410 | /* |
411 | * Resist a temptation to do | |
412 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); | |
413 | * Better have a few empty pages hang around. | |
414 | */ | |
e87aa773 | 415 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 416 | } |
e87aa773 | 417 | EXPORT_SYMBOL(dma_pool_free); |
1da177e4 | 418 | |
9ac7849e TH |
419 | /* |
420 | * Managed DMA pool | |
421 | */ | |
422 | static void dmam_pool_release(struct device *dev, void *res) | |
423 | { | |
424 | struct dma_pool *pool = *(struct dma_pool **)res; | |
425 | ||
426 | dma_pool_destroy(pool); | |
427 | } | |
428 | ||
429 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
430 | { | |
431 | return *(struct dma_pool **)res == match_data; | |
432 | } | |
433 | ||
434 | /** | |
435 | * dmam_pool_create - Managed dma_pool_create() | |
436 | * @name: name of pool, for diagnostics | |
437 | * @dev: device that will be doing the DMA | |
438 | * @size: size of the blocks in this pool. | |
439 | * @align: alignment requirement for blocks; must be a power of two | |
440 | * @allocation: returned blocks won't cross this boundary (or zero) | |
441 | * | |
442 | * Managed dma_pool_create(). DMA pool created with this function is | |
443 | * automatically destroyed on driver detach. | |
444 | */ | |
445 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
446 | size_t size, size_t align, size_t allocation) | |
447 | { | |
448 | struct dma_pool **ptr, *pool; | |
449 | ||
450 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
451 | if (!ptr) | |
452 | return NULL; | |
453 | ||
454 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
455 | if (pool) | |
456 | devres_add(dev, ptr); | |
457 | else | |
458 | devres_free(ptr); | |
459 | ||
460 | return pool; | |
461 | } | |
e87aa773 | 462 | EXPORT_SYMBOL(dmam_pool_create); |
9ac7849e TH |
463 | |
464 | /** | |
465 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
466 | * @pool: dma pool that will be destroyed | |
467 | * | |
468 | * Managed dma_pool_destroy(). | |
469 | */ | |
470 | void dmam_pool_destroy(struct dma_pool *pool) | |
471 | { | |
472 | struct device *dev = pool->dev; | |
473 | ||
474 | dma_pool_destroy(pool); | |
475 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | |
476 | } | |
e87aa773 | 477 | EXPORT_SYMBOL(dmam_pool_destroy); |