]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
c1017a4c | 2 | * Copyright (c) by Jaroslav Kysela <[email protected]> |
1da177e4 LT |
3 | * Takashi Iwai <[email protected]> |
4 | * | |
5 | * Generic memory allocators | |
6 | * | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | */ | |
23 | ||
1da177e4 LT |
24 | #include <linux/module.h> |
25 | #include <linux/proc_fs.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/mm.h> | |
ccec6e2c | 30 | #include <linux/seq_file.h> |
b6a96915 | 31 | #include <asm/uaccess.h> |
1da177e4 LT |
32 | #include <linux/dma-mapping.h> |
33 | #include <linux/moduleparam.h> | |
1a60d4c5 | 34 | #include <linux/mutex.h> |
1da177e4 | 35 | #include <sound/memalloc.h> |
1da177e4 LT |
36 | |
37 | ||
c1017a4c | 38 | MODULE_AUTHOR("Takashi Iwai <[email protected]>, Jaroslav Kysela <[email protected]>"); |
1da177e4 LT |
39 | MODULE_DESCRIPTION("Memory allocator for ALSA system."); |
40 | MODULE_LICENSE("GPL"); | |
41 | ||
42 | ||
1da177e4 LT |
43 | /* |
44 | */ | |
45 | ||
1a60d4c5 | 46 | static DEFINE_MUTEX(list_mutex); |
1da177e4 LT |
47 | static LIST_HEAD(mem_list_head); |
48 | ||
49 | /* buffer preservation list */ | |
50 | struct snd_mem_list { | |
51 | struct snd_dma_buffer buffer; | |
52 | unsigned int id; | |
53 | struct list_head list; | |
54 | }; | |
55 | ||
56 | /* id for pre-allocated buffers */ | |
57 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 | |
58 | ||
1da177e4 LT |
59 | /* |
60 | * | |
61 | * Generic memory allocators | |
62 | * | |
63 | */ | |
64 | ||
65 | static long snd_allocated_pages; /* holding the number of allocated pages */ | |
66 | ||
67 | static inline void inc_snd_pages(int order) | |
68 | { | |
69 | snd_allocated_pages += 1 << order; | |
70 | } | |
71 | ||
72 | static inline void dec_snd_pages(int order) | |
73 | { | |
74 | snd_allocated_pages -= 1 << order; | |
75 | } | |
76 | ||
1da177e4 LT |
77 | /** |
78 | * snd_malloc_pages - allocate pages with the given size | |
79 | * @size: the size to allocate in bytes | |
80 | * @gfp_flags: the allocation conditions, GFP_XXX | |
81 | * | |
82 | * Allocates the physically contiguous pages with the given size. | |
83 | * | |
84 | * Returns the pointer of the buffer, or NULL if no enoguh memory. | |
85 | */ | |
1ef64e67 | 86 | void *snd_malloc_pages(size_t size, gfp_t gfp_flags) |
1da177e4 LT |
87 | { |
88 | int pg; | |
89 | void *res; | |
90 | ||
7eaa943c TI |
91 | if (WARN_ON(!size)) |
92 | return NULL; | |
93 | if (WARN_ON(!gfp_flags)) | |
94 | return NULL; | |
f3d48f03 | 95 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ |
1da177e4 | 96 | pg = get_order(size); |
2ba8c15c | 97 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) |
1da177e4 | 98 | inc_snd_pages(pg); |
1da177e4 LT |
99 | return res; |
100 | } | |
101 | ||
102 | /** | |
103 | * snd_free_pages - release the pages | |
104 | * @ptr: the buffer pointer to release | |
105 | * @size: the allocated buffer size | |
106 | * | |
107 | * Releases the buffer allocated via snd_malloc_pages(). | |
108 | */ | |
109 | void snd_free_pages(void *ptr, size_t size) | |
110 | { | |
111 | int pg; | |
112 | ||
113 | if (ptr == NULL) | |
114 | return; | |
115 | pg = get_order(size); | |
116 | dec_snd_pages(pg); | |
1da177e4 LT |
117 | free_pages((unsigned long) ptr, pg); |
118 | } | |
119 | ||
120 | /* | |
121 | * | |
122 | * Bus-specific memory allocators | |
123 | * | |
124 | */ | |
125 | ||
8f11551b | 126 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
127 | /* allocate the coherent DMA pages */ |
128 | static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) | |
129 | { | |
130 | int pg; | |
131 | void *res; | |
1ef64e67 | 132 | gfp_t gfp_flags; |
1da177e4 | 133 | |
7eaa943c TI |
134 | if (WARN_ON(!dma)) |
135 | return NULL; | |
1da177e4 LT |
136 | pg = get_order(size); |
137 | gfp_flags = GFP_KERNEL | |
f3d48f03 | 138 | | __GFP_COMP /* compound page lets parts be mapped */ |
1da177e4 LT |
139 | | __GFP_NORETRY /* don't trigger OOM-killer */ |
140 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | |
141 | res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); | |
2ba8c15c | 142 | if (res != NULL) |
1da177e4 | 143 | inc_snd_pages(pg); |
1da177e4 LT |
144 | |
145 | return res; | |
146 | } | |
147 | ||
148 | /* free the coherent DMA pages */ | |
149 | static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, | |
150 | dma_addr_t dma) | |
151 | { | |
152 | int pg; | |
153 | ||
154 | if (ptr == NULL) | |
155 | return; | |
156 | pg = get_order(size); | |
157 | dec_snd_pages(pg); | |
1da177e4 LT |
158 | dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); |
159 | } | |
8f11551b | 160 | #endif /* CONFIG_HAS_DMA */ |
1da177e4 | 161 | |
1da177e4 LT |
162 | /* |
163 | * | |
164 | * ALSA generic memory management | |
165 | * | |
166 | */ | |
167 | ||
168 | ||
169 | /** | |
170 | * snd_dma_alloc_pages - allocate the buffer area according to the given type | |
171 | * @type: the DMA buffer type | |
172 | * @device: the device pointer | |
173 | * @size: the buffer size to allocate | |
174 | * @dmab: buffer allocation record to store the allocated data | |
175 | * | |
176 | * Calls the memory-allocator function for the corresponding | |
177 | * buffer type. | |
178 | * | |
bfb9035c | 179 | * Returns zero if the buffer with the given size is allocated successfully, |
1da177e4 LT |
180 | * other a negative value at error. |
181 | */ | |
182 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, | |
183 | struct snd_dma_buffer *dmab) | |
184 | { | |
7eaa943c TI |
185 | if (WARN_ON(!size)) |
186 | return -ENXIO; | |
187 | if (WARN_ON(!dmab)) | |
188 | return -ENXIO; | |
1da177e4 LT |
189 | |
190 | dmab->dev.type = type; | |
191 | dmab->dev.dev = device; | |
192 | dmab->bytes = 0; | |
193 | switch (type) { | |
194 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
fea952e5 CL |
195 | dmab->area = snd_malloc_pages(size, |
196 | (__force gfp_t)(unsigned long)device); | |
1da177e4 LT |
197 | dmab->addr = 0; |
198 | break; | |
8f11551b | 199 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
200 | case SNDRV_DMA_TYPE_DEV: |
201 | dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); | |
202 | break; | |
cc6a8acd TI |
203 | #endif |
204 | #ifdef CONFIG_SND_DMA_SGBUF | |
1da177e4 LT |
205 | case SNDRV_DMA_TYPE_DEV_SG: |
206 | snd_malloc_sgbuf_pages(device, size, dmab, NULL); | |
207 | break; | |
8f11551b | 208 | #endif |
1da177e4 LT |
209 | default: |
210 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", type); | |
211 | dmab->area = NULL; | |
212 | dmab->addr = 0; | |
213 | return -ENXIO; | |
214 | } | |
215 | if (! dmab->area) | |
216 | return -ENOMEM; | |
217 | dmab->bytes = size; | |
218 | return 0; | |
219 | } | |
220 | ||
221 | /** | |
222 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
223 | * @type: the DMA buffer type | |
224 | * @device: the device pointer | |
225 | * @size: the buffer size to allocate | |
226 | * @dmab: buffer allocation record to store the allocated data | |
227 | * | |
228 | * Calls the memory-allocator function for the corresponding | |
229 | * buffer type. When no space is left, this function reduces the size and | |
230 | * tries to allocate again. The size actually allocated is stored in | |
231 | * res_size argument. | |
232 | * | |
bfb9035c | 233 | * Returns zero if the buffer with the given size is allocated successfully, |
1da177e4 LT |
234 | * other a negative value at error. |
235 | */ | |
236 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
237 | struct snd_dma_buffer *dmab) | |
238 | { | |
239 | int err; | |
240 | ||
1da177e4 | 241 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
4e184f8f | 242 | size_t aligned_size; |
1da177e4 LT |
243 | if (err != -ENOMEM) |
244 | return err; | |
1da177e4 LT |
245 | if (size <= PAGE_SIZE) |
246 | return -ENOMEM; | |
4e184f8f TI |
247 | aligned_size = PAGE_SIZE << get_order(size); |
248 | if (size != aligned_size) | |
249 | size = aligned_size; | |
250 | else | |
251 | size >>= 1; | |
1da177e4 LT |
252 | } |
253 | if (! dmab->area) | |
254 | return -ENOMEM; | |
255 | return 0; | |
256 | } | |
257 | ||
258 | ||
259 | /** | |
260 | * snd_dma_free_pages - release the allocated buffer | |
261 | * @dmab: the buffer allocation record to release | |
262 | * | |
263 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
264 | */ | |
265 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
266 | { | |
267 | switch (dmab->dev.type) { | |
268 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
269 | snd_free_pages(dmab->area, dmab->bytes); | |
270 | break; | |
8f11551b | 271 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
272 | case SNDRV_DMA_TYPE_DEV: |
273 | snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
274 | break; | |
cc6a8acd TI |
275 | #endif |
276 | #ifdef CONFIG_SND_DMA_SGBUF | |
1da177e4 LT |
277 | case SNDRV_DMA_TYPE_DEV_SG: |
278 | snd_free_sgbuf_pages(dmab); | |
279 | break; | |
8f11551b | 280 | #endif |
1da177e4 LT |
281 | default: |
282 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type); | |
283 | } | |
284 | } | |
285 | ||
286 | ||
287 | /** | |
288 | * snd_dma_get_reserved - get the reserved buffer for the given device | |
289 | * @dmab: the buffer allocation record to store | |
290 | * @id: the buffer id | |
291 | * | |
292 | * Looks for the reserved-buffer list and re-uses if the same buffer | |
293 | * is found in the list. When the buffer is found, it's removed from the free list. | |
294 | * | |
295 | * Returns the size of buffer if the buffer is found, or zero if not found. | |
296 | */ | |
297 | size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
298 | { | |
1da177e4 LT |
299 | struct snd_mem_list *mem; |
300 | ||
7eaa943c TI |
301 | if (WARN_ON(!dmab)) |
302 | return 0; | |
1da177e4 | 303 | |
1a60d4c5 | 304 | mutex_lock(&list_mutex); |
9244b2c3 | 305 | list_for_each_entry(mem, &mem_list_head, list) { |
1da177e4 | 306 | if (mem->id == id && |
b6a96915 TI |
307 | (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || |
308 | ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { | |
309 | struct device *dev = dmab->dev.dev; | |
9244b2c3 | 310 | list_del(&mem->list); |
1da177e4 | 311 | *dmab = mem->buffer; |
b6a96915 TI |
312 | if (dmab->dev.dev == NULL) |
313 | dmab->dev.dev = dev; | |
1da177e4 | 314 | kfree(mem); |
1a60d4c5 | 315 | mutex_unlock(&list_mutex); |
1da177e4 LT |
316 | return dmab->bytes; |
317 | } | |
318 | } | |
1a60d4c5 | 319 | mutex_unlock(&list_mutex); |
1da177e4 LT |
320 | return 0; |
321 | } | |
322 | ||
323 | /** | |
324 | * snd_dma_reserve_buf - reserve the buffer | |
325 | * @dmab: the buffer to reserve | |
326 | * @id: the buffer id | |
327 | * | |
328 | * Reserves the given buffer as a reserved buffer. | |
329 | * | |
330 | * Returns zero if successful, or a negative code at error. | |
331 | */ | |
332 | int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
333 | { | |
334 | struct snd_mem_list *mem; | |
335 | ||
7eaa943c TI |
336 | if (WARN_ON(!dmab)) |
337 | return -EINVAL; | |
1da177e4 LT |
338 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); |
339 | if (! mem) | |
340 | return -ENOMEM; | |
1a60d4c5 | 341 | mutex_lock(&list_mutex); |
1da177e4 LT |
342 | mem->buffer = *dmab; |
343 | mem->id = id; | |
344 | list_add_tail(&mem->list, &mem_list_head); | |
1a60d4c5 | 345 | mutex_unlock(&list_mutex); |
1da177e4 LT |
346 | return 0; |
347 | } | |
348 | ||
349 | /* | |
350 | * purge all reserved buffers | |
351 | */ | |
352 | static void free_all_reserved_pages(void) | |
353 | { | |
354 | struct list_head *p; | |
355 | struct snd_mem_list *mem; | |
356 | ||
1a60d4c5 | 357 | mutex_lock(&list_mutex); |
1da177e4 LT |
358 | while (! list_empty(&mem_list_head)) { |
359 | p = mem_list_head.next; | |
360 | mem = list_entry(p, struct snd_mem_list, list); | |
361 | list_del(p); | |
362 | snd_dma_free_pages(&mem->buffer); | |
363 | kfree(mem); | |
364 | } | |
1a60d4c5 | 365 | mutex_unlock(&list_mutex); |
1da177e4 LT |
366 | } |
367 | ||
368 | ||
1da177e4 LT |
369 | #ifdef CONFIG_PROC_FS |
370 | /* | |
371 | * proc file interface | |
372 | */ | |
b6a96915 | 373 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" |
a53fc188 | 374 | static struct proc_dir_entry *snd_mem_proc; |
b6a96915 | 375 | |
ccec6e2c | 376 | static int snd_mem_proc_read(struct seq_file *seq, void *offset) |
1da177e4 | 377 | { |
1da177e4 | 378 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); |
1da177e4 LT |
379 | struct snd_mem_list *mem; |
380 | int devno; | |
759ee81b | 381 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" }; |
1da177e4 | 382 | |
1a60d4c5 | 383 | mutex_lock(&list_mutex); |
ccec6e2c TI |
384 | seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", |
385 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); | |
1da177e4 | 386 | devno = 0; |
9244b2c3 | 387 | list_for_each_entry(mem, &mem_list_head, list) { |
1da177e4 | 388 | devno++; |
ccec6e2c TI |
389 | seq_printf(seq, "buffer %d : ID %08x : type %s\n", |
390 | devno, mem->id, types[mem->buffer.dev.type]); | |
391 | seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", | |
392 | (unsigned long)mem->buffer.addr, | |
393 | (int)mem->buffer.bytes); | |
1da177e4 | 394 | } |
1a60d4c5 | 395 | mutex_unlock(&list_mutex); |
ccec6e2c TI |
396 | return 0; |
397 | } | |
398 | ||
399 | static int snd_mem_proc_open(struct inode *inode, struct file *file) | |
400 | { | |
401 | return single_open(file, snd_mem_proc_read, NULL); | |
1da177e4 | 402 | } |
b6a96915 TI |
403 | |
404 | /* FIXME: for pci only - other bus? */ | |
405 | #ifdef CONFIG_PCI | |
406 | #define gettoken(bufp) strsep(bufp, " \t\n") | |
407 | ||
ccec6e2c TI |
408 | static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, |
409 | size_t count, loff_t * ppos) | |
b6a96915 TI |
410 | { |
411 | char buf[128]; | |
412 | char *token, *p; | |
413 | ||
ccec6e2c TI |
414 | if (count > sizeof(buf) - 1) |
415 | return -EINVAL; | |
b6a96915 TI |
416 | if (copy_from_user(buf, buffer, count)) |
417 | return -EFAULT; | |
ccec6e2c | 418 | buf[count] = '\0'; |
b6a96915 TI |
419 | |
420 | p = buf; | |
421 | token = gettoken(&p); | |
422 | if (! token || *token == '#') | |
ccec6e2c | 423 | return count; |
b6a96915 TI |
424 | if (strcmp(token, "add") == 0) { |
425 | char *endp; | |
426 | int vendor, device, size, buffers; | |
427 | long mask; | |
428 | int i, alloced; | |
429 | struct pci_dev *pci; | |
430 | ||
431 | if ((token = gettoken(&p)) == NULL || | |
432 | (vendor = simple_strtol(token, NULL, 0)) <= 0 || | |
433 | (token = gettoken(&p)) == NULL || | |
434 | (device = simple_strtol(token, NULL, 0)) <= 0 || | |
435 | (token = gettoken(&p)) == NULL || | |
436 | (mask = simple_strtol(token, NULL, 0)) < 0 || | |
437 | (token = gettoken(&p)) == NULL || | |
438 | (size = memparse(token, &endp)) < 64*1024 || | |
439 | size > 16*1024*1024 /* too big */ || | |
440 | (token = gettoken(&p)) == NULL || | |
441 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || | |
442 | buffers > 4) { | |
443 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); | |
ccec6e2c | 444 | return count; |
b6a96915 TI |
445 | } |
446 | vendor &= 0xffff; | |
447 | device &= 0xffff; | |
448 | ||
449 | alloced = 0; | |
450 | pci = NULL; | |
0dd119f7 | 451 | while ((pci = pci_get_device(vendor, device, pci)) != NULL) { |
b6a96915 TI |
452 | if (mask > 0 && mask < 0xffffffff) { |
453 | if (pci_set_dma_mask(pci, mask) < 0 || | |
454 | pci_set_consistent_dma_mask(pci, mask) < 0) { | |
455 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); | |
df1deb67 | 456 | pci_dev_put(pci); |
ccec6e2c | 457 | return count; |
b6a96915 TI |
458 | } |
459 | } | |
460 | for (i = 0; i < buffers; i++) { | |
461 | struct snd_dma_buffer dmab; | |
462 | memset(&dmab, 0, sizeof(dmab)); | |
463 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), | |
464 | size, &dmab) < 0) { | |
465 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
0dd119f7 | 466 | pci_dev_put(pci); |
ccec6e2c | 467 | return count; |
b6a96915 TI |
468 | } |
469 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); | |
470 | } | |
471 | alloced++; | |
472 | } | |
473 | if (! alloced) { | |
474 | for (i = 0; i < buffers; i++) { | |
475 | struct snd_dma_buffer dmab; | |
476 | memset(&dmab, 0, sizeof(dmab)); | |
477 | /* FIXME: We can allocate only in ZONE_DMA | |
478 | * without a device pointer! | |
479 | */ | |
480 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, | |
481 | size, &dmab) < 0) { | |
482 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
483 | break; | |
484 | } | |
485 | snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); | |
486 | } | |
487 | } | |
488 | } else if (strcmp(token, "erase") == 0) | |
489 | /* FIXME: need for releasing each buffer chunk? */ | |
490 | free_all_reserved_pages(); | |
491 | else | |
492 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); | |
ccec6e2c | 493 | return count; |
b6a96915 TI |
494 | } |
495 | #endif /* CONFIG_PCI */ | |
ccec6e2c TI |
496 | |
497 | static const struct file_operations snd_mem_proc_fops = { | |
498 | .owner = THIS_MODULE, | |
499 | .open = snd_mem_proc_open, | |
500 | .read = seq_read, | |
501 | #ifdef CONFIG_PCI | |
502 | .write = snd_mem_proc_write, | |
503 | #endif | |
504 | .llseek = seq_lseek, | |
505 | .release = single_release, | |
506 | }; | |
507 | ||
1da177e4 LT |
508 | #endif /* CONFIG_PROC_FS */ |
509 | ||
510 | /* | |
511 | * module entry | |
512 | */ | |
513 | ||
514 | static int __init snd_mem_init(void) | |
515 | { | |
516 | #ifdef CONFIG_PROC_FS | |
7bf4e6d3 DL |
517 | snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL, |
518 | &snd_mem_proc_fops); | |
1da177e4 | 519 | #endif |
1da177e4 LT |
520 | return 0; |
521 | } | |
522 | ||
523 | static void __exit snd_mem_exit(void) | |
524 | { | |
e0be4d32 | 525 | remove_proc_entry(SND_MEM_PROC_FILE, NULL); |
1da177e4 LT |
526 | free_all_reserved_pages(); |
527 | if (snd_allocated_pages > 0) | |
528 | printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); | |
529 | } | |
530 | ||
531 | ||
532 | module_init(snd_mem_init) | |
533 | module_exit(snd_mem_exit) | |
534 | ||
535 | ||
536 | /* | |
537 | * exports | |
538 | */ | |
539 | EXPORT_SYMBOL(snd_dma_alloc_pages); | |
540 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); | |
541 | EXPORT_SYMBOL(snd_dma_free_pages); | |
542 | ||
543 | EXPORT_SYMBOL(snd_dma_get_reserved_buf); | |
544 | EXPORT_SYMBOL(snd_dma_reserve_buf); | |
545 | ||
546 | EXPORT_SYMBOL(snd_malloc_pages); | |
547 | EXPORT_SYMBOL(snd_free_pages); |