]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
c1017a4c | 2 | * Copyright (c) by Jaroslav Kysela <[email protected]> |
1da177e4 LT |
3 | * Takashi Iwai <[email protected]> |
4 | * | |
5 | * Generic memory allocators | |
6 | * | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | */ | |
23 | ||
1da177e4 LT |
24 | #include <linux/module.h> |
25 | #include <linux/proc_fs.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/mm.h> | |
ccec6e2c | 30 | #include <linux/seq_file.h> |
b6a96915 | 31 | #include <asm/uaccess.h> |
1da177e4 LT |
32 | #include <linux/dma-mapping.h> |
33 | #include <linux/moduleparam.h> | |
1a60d4c5 | 34 | #include <linux/mutex.h> |
1da177e4 | 35 | #include <sound/memalloc.h> |
1da177e4 LT |
36 | |
37 | ||
c1017a4c | 38 | MODULE_AUTHOR("Takashi Iwai <[email protected]>, Jaroslav Kysela <[email protected]>"); |
1da177e4 LT |
39 | MODULE_DESCRIPTION("Memory allocator for ALSA system."); |
40 | MODULE_LICENSE("GPL"); | |
41 | ||
42 | ||
1da177e4 LT |
43 | /* |
44 | */ | |
45 | ||
1a60d4c5 | 46 | static DEFINE_MUTEX(list_mutex); |
1da177e4 LT |
47 | static LIST_HEAD(mem_list_head); |
48 | ||
49 | /* buffer preservation list */ | |
50 | struct snd_mem_list { | |
51 | struct snd_dma_buffer buffer; | |
52 | unsigned int id; | |
53 | struct list_head list; | |
54 | }; | |
55 | ||
56 | /* id for pre-allocated buffers */ | |
57 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 | |
58 | ||
1da177e4 LT |
59 | /* |
60 | * | |
61 | * Generic memory allocators | |
62 | * | |
63 | */ | |
64 | ||
65 | static long snd_allocated_pages; /* holding the number of allocated pages */ | |
66 | ||
67 | static inline void inc_snd_pages(int order) | |
68 | { | |
69 | snd_allocated_pages += 1 << order; | |
70 | } | |
71 | ||
72 | static inline void dec_snd_pages(int order) | |
73 | { | |
74 | snd_allocated_pages -= 1 << order; | |
75 | } | |
76 | ||
1da177e4 LT |
77 | /** |
78 | * snd_malloc_pages - allocate pages with the given size | |
79 | * @size: the size to allocate in bytes | |
80 | * @gfp_flags: the allocation conditions, GFP_XXX | |
81 | * | |
82 | * Allocates the physically contiguous pages with the given size. | |
83 | * | |
84 | * Returns the pointer of the buffer, or NULL if no enoguh memory. | |
85 | */ | |
1ef64e67 | 86 | void *snd_malloc_pages(size_t size, gfp_t gfp_flags) |
1da177e4 LT |
87 | { |
88 | int pg; | |
89 | void *res; | |
90 | ||
7eaa943c TI |
91 | if (WARN_ON(!size)) |
92 | return NULL; | |
93 | if (WARN_ON(!gfp_flags)) | |
94 | return NULL; | |
f3d48f03 | 95 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ |
1da177e4 | 96 | pg = get_order(size); |
2ba8c15c | 97 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) |
1da177e4 | 98 | inc_snd_pages(pg); |
1da177e4 LT |
99 | return res; |
100 | } | |
101 | ||
102 | /** | |
103 | * snd_free_pages - release the pages | |
104 | * @ptr: the buffer pointer to release | |
105 | * @size: the allocated buffer size | |
106 | * | |
107 | * Releases the buffer allocated via snd_malloc_pages(). | |
108 | */ | |
109 | void snd_free_pages(void *ptr, size_t size) | |
110 | { | |
111 | int pg; | |
112 | ||
113 | if (ptr == NULL) | |
114 | return; | |
115 | pg = get_order(size); | |
116 | dec_snd_pages(pg); | |
1da177e4 LT |
117 | free_pages((unsigned long) ptr, pg); |
118 | } | |
119 | ||
120 | /* | |
121 | * | |
122 | * Bus-specific memory allocators | |
123 | * | |
124 | */ | |
125 | ||
8f11551b | 126 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
127 | /* allocate the coherent DMA pages */ |
128 | static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) | |
129 | { | |
130 | int pg; | |
131 | void *res; | |
1ef64e67 | 132 | gfp_t gfp_flags; |
1da177e4 | 133 | |
7eaa943c TI |
134 | if (WARN_ON(!dma)) |
135 | return NULL; | |
1da177e4 LT |
136 | pg = get_order(size); |
137 | gfp_flags = GFP_KERNEL | |
f3d48f03 | 138 | | __GFP_COMP /* compound page lets parts be mapped */ |
1da177e4 LT |
139 | | __GFP_NORETRY /* don't trigger OOM-killer */ |
140 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | |
141 | res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); | |
2ba8c15c | 142 | if (res != NULL) |
1da177e4 | 143 | inc_snd_pages(pg); |
1da177e4 LT |
144 | |
145 | return res; | |
146 | } | |
147 | ||
148 | /* free the coherent DMA pages */ | |
149 | static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, | |
150 | dma_addr_t dma) | |
151 | { | |
152 | int pg; | |
153 | ||
154 | if (ptr == NULL) | |
155 | return; | |
156 | pg = get_order(size); | |
157 | dec_snd_pages(pg); | |
1da177e4 LT |
158 | dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); |
159 | } | |
8f11551b | 160 | #endif /* CONFIG_HAS_DMA */ |
1da177e4 | 161 | |
1da177e4 LT |
162 | /* |
163 | * | |
164 | * ALSA generic memory management | |
165 | * | |
166 | */ | |
167 | ||
168 | ||
169 | /** | |
170 | * snd_dma_alloc_pages - allocate the buffer area according to the given type | |
171 | * @type: the DMA buffer type | |
172 | * @device: the device pointer | |
173 | * @size: the buffer size to allocate | |
174 | * @dmab: buffer allocation record to store the allocated data | |
175 | * | |
176 | * Calls the memory-allocator function for the corresponding | |
177 | * buffer type. | |
178 | * | |
179 | * Returns zero if the buffer with the given size is allocated successfuly, | |
180 | * other a negative value at error. | |
181 | */ | |
182 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, | |
183 | struct snd_dma_buffer *dmab) | |
184 | { | |
7eaa943c TI |
185 | if (WARN_ON(!size)) |
186 | return -ENXIO; | |
187 | if (WARN_ON(!dmab)) | |
188 | return -ENXIO; | |
1da177e4 LT |
189 | |
190 | dmab->dev.type = type; | |
191 | dmab->dev.dev = device; | |
192 | dmab->bytes = 0; | |
193 | switch (type) { | |
194 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
195 | dmab->area = snd_malloc_pages(size, (unsigned long)device); | |
196 | dmab->addr = 0; | |
197 | break; | |
8f11551b | 198 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
199 | case SNDRV_DMA_TYPE_DEV: |
200 | dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); | |
201 | break; | |
202 | case SNDRV_DMA_TYPE_DEV_SG: | |
203 | snd_malloc_sgbuf_pages(device, size, dmab, NULL); | |
204 | break; | |
8f11551b | 205 | #endif |
1da177e4 LT |
206 | default: |
207 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", type); | |
208 | dmab->area = NULL; | |
209 | dmab->addr = 0; | |
210 | return -ENXIO; | |
211 | } | |
212 | if (! dmab->area) | |
213 | return -ENOMEM; | |
214 | dmab->bytes = size; | |
215 | return 0; | |
216 | } | |
217 | ||
218 | /** | |
219 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
220 | * @type: the DMA buffer type | |
221 | * @device: the device pointer | |
222 | * @size: the buffer size to allocate | |
223 | * @dmab: buffer allocation record to store the allocated data | |
224 | * | |
225 | * Calls the memory-allocator function for the corresponding | |
226 | * buffer type. When no space is left, this function reduces the size and | |
227 | * tries to allocate again. The size actually allocated is stored in | |
228 | * res_size argument. | |
229 | * | |
230 | * Returns zero if the buffer with the given size is allocated successfuly, | |
231 | * other a negative value at error. | |
232 | */ | |
233 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
234 | struct snd_dma_buffer *dmab) | |
235 | { | |
236 | int err; | |
237 | ||
1da177e4 | 238 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
4e184f8f | 239 | size_t aligned_size; |
1da177e4 LT |
240 | if (err != -ENOMEM) |
241 | return err; | |
1da177e4 LT |
242 | if (size <= PAGE_SIZE) |
243 | return -ENOMEM; | |
4e184f8f TI |
244 | aligned_size = PAGE_SIZE << get_order(size); |
245 | if (size != aligned_size) | |
246 | size = aligned_size; | |
247 | else | |
248 | size >>= 1; | |
1da177e4 LT |
249 | } |
250 | if (! dmab->area) | |
251 | return -ENOMEM; | |
252 | return 0; | |
253 | } | |
254 | ||
255 | ||
256 | /** | |
257 | * snd_dma_free_pages - release the allocated buffer | |
258 | * @dmab: the buffer allocation record to release | |
259 | * | |
260 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
261 | */ | |
262 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
263 | { | |
264 | switch (dmab->dev.type) { | |
265 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
266 | snd_free_pages(dmab->area, dmab->bytes); | |
267 | break; | |
8f11551b | 268 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
269 | case SNDRV_DMA_TYPE_DEV: |
270 | snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
271 | break; | |
272 | case SNDRV_DMA_TYPE_DEV_SG: | |
273 | snd_free_sgbuf_pages(dmab); | |
274 | break; | |
8f11551b | 275 | #endif |
1da177e4 LT |
276 | default: |
277 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type); | |
278 | } | |
279 | } | |
280 | ||
281 | ||
282 | /** | |
283 | * snd_dma_get_reserved - get the reserved buffer for the given device | |
284 | * @dmab: the buffer allocation record to store | |
285 | * @id: the buffer id | |
286 | * | |
287 | * Looks for the reserved-buffer list and re-uses if the same buffer | |
288 | * is found in the list. When the buffer is found, it's removed from the free list. | |
289 | * | |
290 | * Returns the size of buffer if the buffer is found, or zero if not found. | |
291 | */ | |
292 | size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
293 | { | |
1da177e4 LT |
294 | struct snd_mem_list *mem; |
295 | ||
7eaa943c TI |
296 | if (WARN_ON(!dmab)) |
297 | return 0; | |
1da177e4 | 298 | |
1a60d4c5 | 299 | mutex_lock(&list_mutex); |
9244b2c3 | 300 | list_for_each_entry(mem, &mem_list_head, list) { |
1da177e4 | 301 | if (mem->id == id && |
b6a96915 TI |
302 | (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || |
303 | ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { | |
304 | struct device *dev = dmab->dev.dev; | |
9244b2c3 | 305 | list_del(&mem->list); |
1da177e4 | 306 | *dmab = mem->buffer; |
b6a96915 TI |
307 | if (dmab->dev.dev == NULL) |
308 | dmab->dev.dev = dev; | |
1da177e4 | 309 | kfree(mem); |
1a60d4c5 | 310 | mutex_unlock(&list_mutex); |
1da177e4 LT |
311 | return dmab->bytes; |
312 | } | |
313 | } | |
1a60d4c5 | 314 | mutex_unlock(&list_mutex); |
1da177e4 LT |
315 | return 0; |
316 | } | |
317 | ||
318 | /** | |
319 | * snd_dma_reserve_buf - reserve the buffer | |
320 | * @dmab: the buffer to reserve | |
321 | * @id: the buffer id | |
322 | * | |
323 | * Reserves the given buffer as a reserved buffer. | |
324 | * | |
325 | * Returns zero if successful, or a negative code at error. | |
326 | */ | |
327 | int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
328 | { | |
329 | struct snd_mem_list *mem; | |
330 | ||
7eaa943c TI |
331 | if (WARN_ON(!dmab)) |
332 | return -EINVAL; | |
1da177e4 LT |
333 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); |
334 | if (! mem) | |
335 | return -ENOMEM; | |
1a60d4c5 | 336 | mutex_lock(&list_mutex); |
1da177e4 LT |
337 | mem->buffer = *dmab; |
338 | mem->id = id; | |
339 | list_add_tail(&mem->list, &mem_list_head); | |
1a60d4c5 | 340 | mutex_unlock(&list_mutex); |
1da177e4 LT |
341 | return 0; |
342 | } | |
343 | ||
344 | /* | |
345 | * purge all reserved buffers | |
346 | */ | |
347 | static void free_all_reserved_pages(void) | |
348 | { | |
349 | struct list_head *p; | |
350 | struct snd_mem_list *mem; | |
351 | ||
1a60d4c5 | 352 | mutex_lock(&list_mutex); |
1da177e4 LT |
353 | while (! list_empty(&mem_list_head)) { |
354 | p = mem_list_head.next; | |
355 | mem = list_entry(p, struct snd_mem_list, list); | |
356 | list_del(p); | |
357 | snd_dma_free_pages(&mem->buffer); | |
358 | kfree(mem); | |
359 | } | |
1a60d4c5 | 360 | mutex_unlock(&list_mutex); |
1da177e4 LT |
361 | } |
362 | ||
363 | ||
1da177e4 LT |
364 | #ifdef CONFIG_PROC_FS |
365 | /* | |
366 | * proc file interface | |
367 | */ | |
b6a96915 | 368 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" |
a53fc188 | 369 | static struct proc_dir_entry *snd_mem_proc; |
b6a96915 | 370 | |
ccec6e2c | 371 | static int snd_mem_proc_read(struct seq_file *seq, void *offset) |
1da177e4 | 372 | { |
1da177e4 | 373 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); |
1da177e4 LT |
374 | struct snd_mem_list *mem; |
375 | int devno; | |
759ee81b | 376 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" }; |
1da177e4 | 377 | |
1a60d4c5 | 378 | mutex_lock(&list_mutex); |
ccec6e2c TI |
379 | seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", |
380 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); | |
1da177e4 | 381 | devno = 0; |
9244b2c3 | 382 | list_for_each_entry(mem, &mem_list_head, list) { |
1da177e4 | 383 | devno++; |
ccec6e2c TI |
384 | seq_printf(seq, "buffer %d : ID %08x : type %s\n", |
385 | devno, mem->id, types[mem->buffer.dev.type]); | |
386 | seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", | |
387 | (unsigned long)mem->buffer.addr, | |
388 | (int)mem->buffer.bytes); | |
1da177e4 | 389 | } |
1a60d4c5 | 390 | mutex_unlock(&list_mutex); |
ccec6e2c TI |
391 | return 0; |
392 | } | |
393 | ||
394 | static int snd_mem_proc_open(struct inode *inode, struct file *file) | |
395 | { | |
396 | return single_open(file, snd_mem_proc_read, NULL); | |
1da177e4 | 397 | } |
b6a96915 TI |
398 | |
399 | /* FIXME: for pci only - other bus? */ | |
400 | #ifdef CONFIG_PCI | |
401 | #define gettoken(bufp) strsep(bufp, " \t\n") | |
402 | ||
ccec6e2c TI |
403 | static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, |
404 | size_t count, loff_t * ppos) | |
b6a96915 TI |
405 | { |
406 | char buf[128]; | |
407 | char *token, *p; | |
408 | ||
ccec6e2c TI |
409 | if (count > sizeof(buf) - 1) |
410 | return -EINVAL; | |
b6a96915 TI |
411 | if (copy_from_user(buf, buffer, count)) |
412 | return -EFAULT; | |
ccec6e2c | 413 | buf[count] = '\0'; |
b6a96915 TI |
414 | |
415 | p = buf; | |
416 | token = gettoken(&p); | |
417 | if (! token || *token == '#') | |
ccec6e2c | 418 | return count; |
b6a96915 TI |
419 | if (strcmp(token, "add") == 0) { |
420 | char *endp; | |
421 | int vendor, device, size, buffers; | |
422 | long mask; | |
423 | int i, alloced; | |
424 | struct pci_dev *pci; | |
425 | ||
426 | if ((token = gettoken(&p)) == NULL || | |
427 | (vendor = simple_strtol(token, NULL, 0)) <= 0 || | |
428 | (token = gettoken(&p)) == NULL || | |
429 | (device = simple_strtol(token, NULL, 0)) <= 0 || | |
430 | (token = gettoken(&p)) == NULL || | |
431 | (mask = simple_strtol(token, NULL, 0)) < 0 || | |
432 | (token = gettoken(&p)) == NULL || | |
433 | (size = memparse(token, &endp)) < 64*1024 || | |
434 | size > 16*1024*1024 /* too big */ || | |
435 | (token = gettoken(&p)) == NULL || | |
436 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || | |
437 | buffers > 4) { | |
438 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); | |
ccec6e2c | 439 | return count; |
b6a96915 TI |
440 | } |
441 | vendor &= 0xffff; | |
442 | device &= 0xffff; | |
443 | ||
444 | alloced = 0; | |
445 | pci = NULL; | |
0dd119f7 | 446 | while ((pci = pci_get_device(vendor, device, pci)) != NULL) { |
b6a96915 TI |
447 | if (mask > 0 && mask < 0xffffffff) { |
448 | if (pci_set_dma_mask(pci, mask) < 0 || | |
449 | pci_set_consistent_dma_mask(pci, mask) < 0) { | |
450 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); | |
df1deb67 | 451 | pci_dev_put(pci); |
ccec6e2c | 452 | return count; |
b6a96915 TI |
453 | } |
454 | } | |
455 | for (i = 0; i < buffers; i++) { | |
456 | struct snd_dma_buffer dmab; | |
457 | memset(&dmab, 0, sizeof(dmab)); | |
458 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), | |
459 | size, &dmab) < 0) { | |
460 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
0dd119f7 | 461 | pci_dev_put(pci); |
ccec6e2c | 462 | return count; |
b6a96915 TI |
463 | } |
464 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); | |
465 | } | |
466 | alloced++; | |
467 | } | |
468 | if (! alloced) { | |
469 | for (i = 0; i < buffers; i++) { | |
470 | struct snd_dma_buffer dmab; | |
471 | memset(&dmab, 0, sizeof(dmab)); | |
472 | /* FIXME: We can allocate only in ZONE_DMA | |
473 | * without a device pointer! | |
474 | */ | |
475 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, | |
476 | size, &dmab) < 0) { | |
477 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
478 | break; | |
479 | } | |
480 | snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); | |
481 | } | |
482 | } | |
483 | } else if (strcmp(token, "erase") == 0) | |
484 | /* FIXME: need for releasing each buffer chunk? */ | |
485 | free_all_reserved_pages(); | |
486 | else | |
487 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); | |
ccec6e2c | 488 | return count; |
b6a96915 TI |
489 | } |
490 | #endif /* CONFIG_PCI */ | |
ccec6e2c TI |
491 | |
492 | static const struct file_operations snd_mem_proc_fops = { | |
493 | .owner = THIS_MODULE, | |
494 | .open = snd_mem_proc_open, | |
495 | .read = seq_read, | |
496 | #ifdef CONFIG_PCI | |
497 | .write = snd_mem_proc_write, | |
498 | #endif | |
499 | .llseek = seq_lseek, | |
500 | .release = single_release, | |
501 | }; | |
502 | ||
1da177e4 LT |
503 | #endif /* CONFIG_PROC_FS */ |
504 | ||
505 | /* | |
506 | * module entry | |
507 | */ | |
508 | ||
509 | static int __init snd_mem_init(void) | |
510 | { | |
511 | #ifdef CONFIG_PROC_FS | |
7bf4e6d3 DL |
512 | snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL, |
513 | &snd_mem_proc_fops); | |
1da177e4 | 514 | #endif |
1da177e4 LT |
515 | return 0; |
516 | } | |
517 | ||
518 | static void __exit snd_mem_exit(void) | |
519 | { | |
e0be4d32 | 520 | remove_proc_entry(SND_MEM_PROC_FILE, NULL); |
1da177e4 LT |
521 | free_all_reserved_pages(); |
522 | if (snd_allocated_pages > 0) | |
523 | printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); | |
524 | } | |
525 | ||
526 | ||
527 | module_init(snd_mem_init) | |
528 | module_exit(snd_mem_exit) | |
529 | ||
530 | ||
531 | /* | |
532 | * exports | |
533 | */ | |
534 | EXPORT_SYMBOL(snd_dma_alloc_pages); | |
535 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); | |
536 | EXPORT_SYMBOL(snd_dma_free_pages); | |
537 | ||
538 | EXPORT_SYMBOL(snd_dma_get_reserved_buf); | |
539 | EXPORT_SYMBOL(snd_dma_reserve_buf); | |
540 | ||
541 | EXPORT_SYMBOL(snd_malloc_pages); | |
542 | EXPORT_SYMBOL(snd_free_pages); |