]>
Commit | Line | Data |
---|---|---|
6182a094 MW |
1 | /* |
2 | * DMA Pool allocator | |
3 | * | |
4 | * Copyright 2001 David Brownell | |
5 | * Copyright 2007 Intel Corporation | |
6 | * Author: Matthew Wilcox <[email protected]> | |
7 | * | |
8 | * This software may be redistributed and/or modified under the terms of | |
9 | * the GNU General Public License ("GPL") version 2 as published by the | |
10 | * Free Software Foundation. | |
11 | * | |
12 | * This allocator returns small blocks of a given size which are DMA-able by | |
13 | * the given device. It uses the dma_alloc_coherent page allocator to get | |
14 | * new pages, then splits them up into blocks of the required size. | |
15 | * Many older drivers still have their own code to do this. | |
16 | * | |
17 | * The current design of this allocator is fairly simple. The pool is | |
18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of | |
19 | * allocated pages. Each page in the page_list is split into blocks of at | |
a35a3455 MW |
20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
21 | * list of free blocks within the page. Used blocks aren't tracked, but we | |
22 | * keep a count of how many are currently allocated from each page. | |
6182a094 | 23 | */ |
1da177e4 LT |
24 | |
25 | #include <linux/device.h> | |
1da177e4 LT |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/dmapool.h> | |
6182a094 MW |
28 | #include <linux/kernel.h> |
29 | #include <linux/list.h> | |
b95f1b31 | 30 | #include <linux/export.h> |
6182a094 | 31 | #include <linux/mutex.h> |
c9cf5528 | 32 | #include <linux/poison.h> |
e8edc6e0 | 33 | #include <linux/sched.h> |
6182a094 | 34 | #include <linux/slab.h> |
7c77509c | 35 | #include <linux/stat.h> |
6182a094 MW |
36 | #include <linux/spinlock.h> |
37 | #include <linux/string.h> | |
38 | #include <linux/types.h> | |
39 | #include <linux/wait.h> | |
1da177e4 | 40 | |
b5ee5bef AK |
41 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
42 | #define DMAPOOL_DEBUG 1 | |
43 | #endif | |
44 | ||
e87aa773 MW |
45 | struct dma_pool { /* the pool */ |
46 | struct list_head page_list; | |
47 | spinlock_t lock; | |
e87aa773 MW |
48 | size_t size; |
49 | struct device *dev; | |
50 | size_t allocation; | |
e34f44b3 | 51 | size_t boundary; |
e87aa773 MW |
52 | char name[32]; |
53 | wait_queue_head_t waitq; | |
54 | struct list_head pools; | |
1da177e4 LT |
55 | }; |
56 | ||
e87aa773 MW |
57 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
58 | struct list_head page_list; | |
59 | void *vaddr; | |
60 | dma_addr_t dma; | |
a35a3455 MW |
61 | unsigned int in_use; |
62 | unsigned int offset; | |
1da177e4 LT |
63 | }; |
64 | ||
65 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | |
1da177e4 | 66 | |
e87aa773 | 67 | static DEFINE_MUTEX(pools_lock); |
1da177e4 LT |
68 | |
69 | static ssize_t | |
e87aa773 | 70 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
71 | { |
72 | unsigned temp; | |
73 | unsigned size; | |
74 | char *next; | |
75 | struct dma_page *page; | |
76 | struct dma_pool *pool; | |
77 | ||
78 | next = buf; | |
79 | size = PAGE_SIZE; | |
80 | ||
81 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | |
82 | size -= temp; | |
83 | next += temp; | |
84 | ||
b2366d68 | 85 | mutex_lock(&pools_lock); |
1da177e4 LT |
86 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
87 | unsigned pages = 0; | |
88 | unsigned blocks = 0; | |
89 | ||
c4956823 | 90 | spin_lock_irq(&pool->lock); |
1da177e4 LT |
91 | list_for_each_entry(page, &pool->page_list, page_list) { |
92 | pages++; | |
93 | blocks += page->in_use; | |
94 | } | |
c4956823 | 95 | spin_unlock_irq(&pool->lock); |
1da177e4 LT |
96 | |
97 | /* per-pool info, no real statistics yet */ | |
98 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | |
a35a3455 MW |
99 | pool->name, blocks, |
100 | pages * (pool->allocation / pool->size), | |
e87aa773 | 101 | pool->size, pages); |
1da177e4 LT |
102 | size -= temp; |
103 | next += temp; | |
104 | } | |
b2366d68 | 105 | mutex_unlock(&pools_lock); |
1da177e4 LT |
106 | |
107 | return PAGE_SIZE - size; | |
108 | } | |
e87aa773 MW |
109 | |
110 | static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); | |
1da177e4 LT |
111 | |
112 | /** | |
113 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
114 | * @name: name of pool, for diagnostics | |
115 | * @dev: device that will be doing the DMA | |
116 | * @size: size of the blocks in this pool. | |
117 | * @align: alignment requirement for blocks; must be a power of two | |
e34f44b3 | 118 | * @boundary: returned blocks won't cross this power of two boundary |
1da177e4 LT |
119 | * Context: !in_interrupt() |
120 | * | |
121 | * Returns a dma allocation pool with the requested characteristics, or | |
122 | * null if one can't be created. Given one of these pools, dma_pool_alloc() | |
123 | * may be used to allocate memory. Such memory will all have "consistent" | |
124 | * DMA mappings, accessible by the device and its driver without using | |
125 | * cache flushing primitives. The actual size of blocks allocated may be | |
126 | * larger than requested because of alignment. | |
127 | * | |
e34f44b3 | 128 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
1da177e4 LT |
129 | * cross that size boundary. This is useful for devices which have |
130 | * addressing restrictions on individual DMA transfers, such as not crossing | |
131 | * boundaries of 4KBytes. | |
132 | */ | |
e87aa773 | 133 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
e34f44b3 | 134 | size_t size, size_t align, size_t boundary) |
1da177e4 | 135 | { |
e87aa773 | 136 | struct dma_pool *retval; |
e34f44b3 | 137 | size_t allocation; |
1da177e4 | 138 | |
399154be | 139 | if (align == 0) { |
1da177e4 | 140 | align = 1; |
399154be | 141 | } else if (align & (align - 1)) { |
1da177e4 | 142 | return NULL; |
1da177e4 LT |
143 | } |
144 | ||
a35a3455 | 145 | if (size == 0) { |
399154be | 146 | return NULL; |
a35a3455 MW |
147 | } else if (size < 4) { |
148 | size = 4; | |
149 | } | |
399154be MW |
150 | |
151 | if ((size % align) != 0) | |
152 | size = ALIGN(size, align); | |
153 | ||
e34f44b3 MW |
154 | allocation = max_t(size_t, size, PAGE_SIZE); |
155 | ||
156 | if (!boundary) { | |
157 | boundary = allocation; | |
158 | } else if ((boundary < size) || (boundary & (boundary - 1))) { | |
1da177e4 | 159 | return NULL; |
e34f44b3 | 160 | } |
1da177e4 | 161 | |
e34f44b3 MW |
162 | retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); |
163 | if (!retval) | |
1da177e4 LT |
164 | return retval; |
165 | ||
e34f44b3 | 166 | strlcpy(retval->name, name, sizeof(retval->name)); |
1da177e4 LT |
167 | |
168 | retval->dev = dev; | |
169 | ||
e87aa773 MW |
170 | INIT_LIST_HEAD(&retval->page_list); |
171 | spin_lock_init(&retval->lock); | |
1da177e4 | 172 | retval->size = size; |
e34f44b3 | 173 | retval->boundary = boundary; |
1da177e4 | 174 | retval->allocation = allocation; |
e87aa773 | 175 | init_waitqueue_head(&retval->waitq); |
1da177e4 LT |
176 | |
177 | if (dev) { | |
141ecc53 CH |
178 | int ret; |
179 | ||
b2366d68 | 180 | mutex_lock(&pools_lock); |
e87aa773 MW |
181 | if (list_empty(&dev->dma_pools)) |
182 | ret = device_create_file(dev, &dev_attr_pools); | |
141ecc53 CH |
183 | else |
184 | ret = 0; | |
1da177e4 | 185 | /* note: not currently insisting "name" be unique */ |
141ecc53 | 186 | if (!ret) |
e87aa773 | 187 | list_add(&retval->pools, &dev->dma_pools); |
141ecc53 CH |
188 | else { |
189 | kfree(retval); | |
190 | retval = NULL; | |
191 | } | |
b2366d68 | 192 | mutex_unlock(&pools_lock); |
1da177e4 | 193 | } else |
e87aa773 | 194 | INIT_LIST_HEAD(&retval->pools); |
1da177e4 LT |
195 | |
196 | return retval; | |
197 | } | |
e87aa773 | 198 | EXPORT_SYMBOL(dma_pool_create); |
1da177e4 | 199 | |
a35a3455 MW |
200 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
201 | { | |
202 | unsigned int offset = 0; | |
e34f44b3 | 203 | unsigned int next_boundary = pool->boundary; |
a35a3455 MW |
204 | |
205 | do { | |
206 | unsigned int next = offset + pool->size; | |
e34f44b3 MW |
207 | if (unlikely((next + pool->size) >= next_boundary)) { |
208 | next = next_boundary; | |
209 | next_boundary += pool->boundary; | |
210 | } | |
a35a3455 MW |
211 | *(int *)(page->vaddr + offset) = next; |
212 | offset = next; | |
213 | } while (offset < pool->allocation); | |
214 | } | |
215 | ||
e87aa773 | 216 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
1da177e4 | 217 | { |
e87aa773 | 218 | struct dma_page *page; |
1da177e4 | 219 | |
a35a3455 | 220 | page = kmalloc(sizeof(*page), mem_flags); |
1da177e4 LT |
221 | if (!page) |
222 | return NULL; | |
a35a3455 | 223 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
e87aa773 | 224 | &page->dma, mem_flags); |
1da177e4 | 225 | if (page->vaddr) { |
b5ee5bef | 226 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 227 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 228 | #endif |
a35a3455 | 229 | pool_initialise_page(pool, page); |
e87aa773 | 230 | list_add(&page->page_list, &pool->page_list); |
1da177e4 | 231 | page->in_use = 0; |
a35a3455 | 232 | page->offset = 0; |
1da177e4 | 233 | } else { |
e87aa773 | 234 | kfree(page); |
1da177e4 LT |
235 | page = NULL; |
236 | } | |
237 | return page; | |
238 | } | |
239 | ||
a35a3455 | 240 | static inline int is_page_busy(struct dma_page *page) |
1da177e4 | 241 | { |
a35a3455 | 242 | return page->in_use != 0; |
1da177e4 LT |
243 | } |
244 | ||
e87aa773 | 245 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
1da177e4 | 246 | { |
e87aa773 | 247 | dma_addr_t dma = page->dma; |
1da177e4 | 248 | |
b5ee5bef | 249 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 250 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 251 | #endif |
e87aa773 MW |
252 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
253 | list_del(&page->page_list); | |
254 | kfree(page); | |
1da177e4 LT |
255 | } |
256 | ||
1da177e4 LT |
257 | /** |
258 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
259 | * @pool: dma pool that will be destroyed | |
260 | * Context: !in_interrupt() | |
261 | * | |
262 | * Caller guarantees that no more memory from the pool is in use, | |
263 | * and that nothing will try to use the pool after this call. | |
264 | */ | |
e87aa773 | 265 | void dma_pool_destroy(struct dma_pool *pool) |
1da177e4 | 266 | { |
b2366d68 | 267 | mutex_lock(&pools_lock); |
e87aa773 MW |
268 | list_del(&pool->pools); |
269 | if (pool->dev && list_empty(&pool->dev->dma_pools)) | |
270 | device_remove_file(pool->dev, &dev_attr_pools); | |
b2366d68 | 271 | mutex_unlock(&pools_lock); |
1da177e4 | 272 | |
e87aa773 MW |
273 | while (!list_empty(&pool->page_list)) { |
274 | struct dma_page *page; | |
275 | page = list_entry(pool->page_list.next, | |
276 | struct dma_page, page_list); | |
a35a3455 | 277 | if (is_page_busy(page)) { |
1da177e4 | 278 | if (pool->dev) |
e87aa773 MW |
279 | dev_err(pool->dev, |
280 | "dma_pool_destroy %s, %p busy\n", | |
1da177e4 LT |
281 | pool->name, page->vaddr); |
282 | else | |
e87aa773 MW |
283 | printk(KERN_ERR |
284 | "dma_pool_destroy %s, %p busy\n", | |
285 | pool->name, page->vaddr); | |
1da177e4 | 286 | /* leak the still-in-use consistent memory */ |
e87aa773 MW |
287 | list_del(&page->page_list); |
288 | kfree(page); | |
1da177e4 | 289 | } else |
e87aa773 | 290 | pool_free_page(pool, page); |
1da177e4 LT |
291 | } |
292 | ||
e87aa773 | 293 | kfree(pool); |
1da177e4 | 294 | } |
e87aa773 | 295 | EXPORT_SYMBOL(dma_pool_destroy); |
1da177e4 LT |
296 | |
297 | /** | |
298 | * dma_pool_alloc - get a block of consistent memory | |
299 | * @pool: dma pool that will produce the block | |
300 | * @mem_flags: GFP_* bitmask | |
301 | * @handle: pointer to dma address of block | |
302 | * | |
303 | * This returns the kernel virtual address of a currently unused block, | |
304 | * and reports its dma address through the handle. | |
6182a094 | 305 | * If such a memory block can't be allocated, %NULL is returned. |
1da177e4 | 306 | */ |
e87aa773 MW |
307 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
308 | dma_addr_t *handle) | |
1da177e4 | 309 | { |
e87aa773 MW |
310 | unsigned long flags; |
311 | struct dma_page *page; | |
e87aa773 MW |
312 | size_t offset; |
313 | void *retval; | |
314 | ||
ea05c844 DZ |
315 | might_sleep_if(mem_flags & __GFP_WAIT); |
316 | ||
e87aa773 | 317 | spin_lock_irqsave(&pool->lock, flags); |
2cae367e | 318 | restart: |
1da177e4 | 319 | list_for_each_entry(page, &pool->page_list, page_list) { |
a35a3455 MW |
320 | if (page->offset < pool->allocation) |
321 | goto ready; | |
1da177e4 | 322 | } |
e87aa773 MW |
323 | page = pool_alloc_page(pool, GFP_ATOMIC); |
324 | if (!page) { | |
1da177e4 | 325 | if (mem_flags & __GFP_WAIT) { |
e87aa773 | 326 | DECLARE_WAITQUEUE(wait, current); |
1da177e4 | 327 | |
684265d4 | 328 | __set_current_state(TASK_UNINTERRUPTIBLE); |
2cae367e | 329 | __add_wait_queue(&pool->waitq, &wait); |
e87aa773 | 330 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 331 | |
e87aa773 | 332 | schedule_timeout(POOL_TIMEOUT_JIFFIES); |
1da177e4 | 333 | |
2cae367e MW |
334 | spin_lock_irqsave(&pool->lock, flags); |
335 | __remove_wait_queue(&pool->waitq, &wait); | |
1da177e4 LT |
336 | goto restart; |
337 | } | |
338 | retval = NULL; | |
339 | goto done; | |
340 | } | |
341 | ||
e87aa773 | 342 | ready: |
1da177e4 | 343 | page->in_use++; |
a35a3455 MW |
344 | offset = page->offset; |
345 | page->offset = *(int *)(page->vaddr + offset); | |
1da177e4 LT |
346 | retval = offset + page->vaddr; |
347 | *handle = offset + page->dma; | |
b5ee5bef | 348 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 349 | memset(retval, POOL_POISON_ALLOCATED, pool->size); |
1da177e4 | 350 | #endif |
e87aa773 MW |
351 | done: |
352 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 LT |
353 | return retval; |
354 | } | |
e87aa773 | 355 | EXPORT_SYMBOL(dma_pool_alloc); |
1da177e4 | 356 | |
e87aa773 | 357 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
1da177e4 | 358 | { |
e87aa773 | 359 | struct dma_page *page; |
1da177e4 | 360 | |
1da177e4 LT |
361 | list_for_each_entry(page, &pool->page_list, page_list) { |
362 | if (dma < page->dma) | |
363 | continue; | |
364 | if (dma < (page->dma + pool->allocation)) | |
84bc227d | 365 | return page; |
1da177e4 | 366 | } |
84bc227d | 367 | return NULL; |
1da177e4 LT |
368 | } |
369 | ||
1da177e4 LT |
370 | /** |
371 | * dma_pool_free - put block back into dma pool | |
372 | * @pool: the dma pool holding the block | |
373 | * @vaddr: virtual address of block | |
374 | * @dma: dma address of block | |
375 | * | |
376 | * Caller promises neither device nor driver will again touch this block | |
377 | * unless it is first re-allocated. | |
378 | */ | |
e87aa773 | 379 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
1da177e4 | 380 | { |
e87aa773 MW |
381 | struct dma_page *page; |
382 | unsigned long flags; | |
a35a3455 | 383 | unsigned int offset; |
1da177e4 | 384 | |
84bc227d | 385 | spin_lock_irqsave(&pool->lock, flags); |
e87aa773 MW |
386 | page = pool_find_page(pool, dma); |
387 | if (!page) { | |
84bc227d | 388 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 389 | if (pool->dev) |
e87aa773 MW |
390 | dev_err(pool->dev, |
391 | "dma_pool_free %s, %p/%lx (bad dma)\n", | |
392 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 | 393 | else |
e87aa773 MW |
394 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
395 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 LT |
396 | return; |
397 | } | |
398 | ||
a35a3455 | 399 | offset = vaddr - page->vaddr; |
b5ee5bef | 400 | #ifdef DMAPOOL_DEBUG |
a35a3455 | 401 | if ((dma - page->dma) != offset) { |
84bc227d | 402 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 403 | if (pool->dev) |
e87aa773 MW |
404 | dev_err(pool->dev, |
405 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
406 | pool->name, vaddr, (unsigned long long)dma); | |
1da177e4 | 407 | else |
e87aa773 MW |
408 | printk(KERN_ERR |
409 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
410 | pool->name, vaddr, (unsigned long long)dma); | |
1da177e4 LT |
411 | return; |
412 | } | |
a35a3455 MW |
413 | { |
414 | unsigned int chain = page->offset; | |
415 | while (chain < pool->allocation) { | |
416 | if (chain != offset) { | |
417 | chain = *(int *)(page->vaddr + chain); | |
418 | continue; | |
419 | } | |
84bc227d | 420 | spin_unlock_irqrestore(&pool->lock, flags); |
a35a3455 MW |
421 | if (pool->dev) |
422 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx " | |
423 | "already free\n", pool->name, | |
424 | (unsigned long long)dma); | |
425 | else | |
426 | printk(KERN_ERR "dma_pool_free %s, dma %Lx " | |
427 | "already free\n", pool->name, | |
428 | (unsigned long long)dma); | |
429 | return; | |
430 | } | |
1da177e4 | 431 | } |
e87aa773 | 432 | memset(vaddr, POOL_POISON_FREED, pool->size); |
1da177e4 LT |
433 | #endif |
434 | ||
1da177e4 | 435 | page->in_use--; |
a35a3455 MW |
436 | *(int *)vaddr = page->offset; |
437 | page->offset = offset; | |
e87aa773 | 438 | if (waitqueue_active(&pool->waitq)) |
2cae367e | 439 | wake_up_locked(&pool->waitq); |
1da177e4 LT |
440 | /* |
441 | * Resist a temptation to do | |
a35a3455 | 442 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
1da177e4 LT |
443 | * Better have a few empty pages hang around. |
444 | */ | |
e87aa773 | 445 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 446 | } |
e87aa773 | 447 | EXPORT_SYMBOL(dma_pool_free); |
1da177e4 | 448 | |
9ac7849e TH |
449 | /* |
450 | * Managed DMA pool | |
451 | */ | |
452 | static void dmam_pool_release(struct device *dev, void *res) | |
453 | { | |
454 | struct dma_pool *pool = *(struct dma_pool **)res; | |
455 | ||
456 | dma_pool_destroy(pool); | |
457 | } | |
458 | ||
459 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
460 | { | |
461 | return *(struct dma_pool **)res == match_data; | |
462 | } | |
463 | ||
464 | /** | |
465 | * dmam_pool_create - Managed dma_pool_create() | |
466 | * @name: name of pool, for diagnostics | |
467 | * @dev: device that will be doing the DMA | |
468 | * @size: size of the blocks in this pool. | |
469 | * @align: alignment requirement for blocks; must be a power of two | |
470 | * @allocation: returned blocks won't cross this boundary (or zero) | |
471 | * | |
472 | * Managed dma_pool_create(). DMA pool created with this function is | |
473 | * automatically destroyed on driver detach. | |
474 | */ | |
475 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
476 | size_t size, size_t align, size_t allocation) | |
477 | { | |
478 | struct dma_pool **ptr, *pool; | |
479 | ||
480 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
481 | if (!ptr) | |
482 | return NULL; | |
483 | ||
484 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
485 | if (pool) | |
486 | devres_add(dev, ptr); | |
487 | else | |
488 | devres_free(ptr); | |
489 | ||
490 | return pool; | |
491 | } | |
e87aa773 | 492 | EXPORT_SYMBOL(dmam_pool_create); |
9ac7849e TH |
493 | |
494 | /** | |
495 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
496 | * @pool: dma pool that will be destroyed | |
497 | * | |
498 | * Managed dma_pool_destroy(). | |
499 | */ | |
500 | void dmam_pool_destroy(struct dma_pool *pool) | |
501 | { | |
502 | struct device *dev = pool->dev; | |
503 | ||
9ac7849e | 504 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); |
ae891a1b | 505 | dma_pool_destroy(pool); |
9ac7849e | 506 | } |
e87aa773 | 507 | EXPORT_SYMBOL(dmam_pool_destroy); |