]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
c1017a4c | 2 | * Copyright (c) by Jaroslav Kysela <[email protected]> |
1da177e4 LT |
3 | * Takashi Iwai <[email protected]> |
4 | * | |
5 | * Generic memory allocators | |
6 | * | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | */ | |
23 | ||
1da177e4 LT |
24 | #include <linux/module.h> |
25 | #include <linux/proc_fs.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/mm.h> | |
ccec6e2c | 30 | #include <linux/seq_file.h> |
b6a96915 | 31 | #include <asm/uaccess.h> |
1da177e4 LT |
32 | #include <linux/dma-mapping.h> |
33 | #include <linux/moduleparam.h> | |
1a60d4c5 | 34 | #include <linux/mutex.h> |
1da177e4 | 35 | #include <sound/memalloc.h> |
1da177e4 LT |
36 | |
37 | ||
c1017a4c | 38 | MODULE_AUTHOR("Takashi Iwai <[email protected]>, Jaroslav Kysela <[email protected]>"); |
1da177e4 LT |
39 | MODULE_DESCRIPTION("Memory allocator for ALSA system."); |
40 | MODULE_LICENSE("GPL"); | |
41 | ||
42 | ||
1da177e4 LT |
43 | /* |
44 | */ | |
45 | ||
1a60d4c5 | 46 | static DEFINE_MUTEX(list_mutex); |
1da177e4 LT |
47 | static LIST_HEAD(mem_list_head); |
48 | ||
49 | /* buffer preservation list */ | |
50 | struct snd_mem_list { | |
51 | struct snd_dma_buffer buffer; | |
52 | unsigned int id; | |
53 | struct list_head list; | |
54 | }; | |
55 | ||
56 | /* id for pre-allocated buffers */ | |
57 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 | |
58 | ||
1da177e4 LT |
59 | /* |
60 | * | |
61 | * Generic memory allocators | |
62 | * | |
63 | */ | |
64 | ||
65 | static long snd_allocated_pages; /* holding the number of allocated pages */ | |
66 | ||
67 | static inline void inc_snd_pages(int order) | |
68 | { | |
69 | snd_allocated_pages += 1 << order; | |
70 | } | |
71 | ||
72 | static inline void dec_snd_pages(int order) | |
73 | { | |
74 | snd_allocated_pages -= 1 << order; | |
75 | } | |
76 | ||
1da177e4 LT |
77 | /** |
78 | * snd_malloc_pages - allocate pages with the given size | |
79 | * @size: the size to allocate in bytes | |
80 | * @gfp_flags: the allocation conditions, GFP_XXX | |
81 | * | |
82 | * Allocates the physically contiguous pages with the given size. | |
83 | * | |
84 | * Returns the pointer of the buffer, or NULL if no enoguh memory. | |
85 | */ | |
1ef64e67 | 86 | void *snd_malloc_pages(size_t size, gfp_t gfp_flags) |
1da177e4 LT |
87 | { |
88 | int pg; | |
89 | void *res; | |
90 | ||
7eaa943c TI |
91 | if (WARN_ON(!size)) |
92 | return NULL; | |
93 | if (WARN_ON(!gfp_flags)) | |
94 | return NULL; | |
f3d48f03 | 95 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ |
1da177e4 | 96 | pg = get_order(size); |
2ba8c15c | 97 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) |
1da177e4 | 98 | inc_snd_pages(pg); |
1da177e4 LT |
99 | return res; |
100 | } | |
101 | ||
102 | /** | |
103 | * snd_free_pages - release the pages | |
104 | * @ptr: the buffer pointer to release | |
105 | * @size: the allocated buffer size | |
106 | * | |
107 | * Releases the buffer allocated via snd_malloc_pages(). | |
108 | */ | |
109 | void snd_free_pages(void *ptr, size_t size) | |
110 | { | |
111 | int pg; | |
112 | ||
113 | if (ptr == NULL) | |
114 | return; | |
115 | pg = get_order(size); | |
116 | dec_snd_pages(pg); | |
1da177e4 LT |
117 | free_pages((unsigned long) ptr, pg); |
118 | } | |
119 | ||
120 | /* | |
121 | * | |
122 | * Bus-specific memory allocators | |
123 | * | |
124 | */ | |
125 | ||
8f11551b | 126 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
127 | /* allocate the coherent DMA pages */ |
128 | static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) | |
129 | { | |
130 | int pg; | |
131 | void *res; | |
1ef64e67 | 132 | gfp_t gfp_flags; |
1da177e4 | 133 | |
7eaa943c TI |
134 | if (WARN_ON(!dma)) |
135 | return NULL; | |
1da177e4 LT |
136 | pg = get_order(size); |
137 | gfp_flags = GFP_KERNEL | |
f3d48f03 | 138 | | __GFP_COMP /* compound page lets parts be mapped */ |
1da177e4 LT |
139 | | __GFP_NORETRY /* don't trigger OOM-killer */ |
140 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | |
141 | res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); | |
2ba8c15c | 142 | if (res != NULL) |
1da177e4 | 143 | inc_snd_pages(pg); |
1da177e4 LT |
144 | |
145 | return res; | |
146 | } | |
147 | ||
148 | /* free the coherent DMA pages */ | |
149 | static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, | |
150 | dma_addr_t dma) | |
151 | { | |
152 | int pg; | |
153 | ||
154 | if (ptr == NULL) | |
155 | return; | |
156 | pg = get_order(size); | |
157 | dec_snd_pages(pg); | |
1da177e4 LT |
158 | dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); |
159 | } | |
8f11551b | 160 | #endif /* CONFIG_HAS_DMA */ |
1da177e4 | 161 | |
1da177e4 LT |
162 | /* |
163 | * | |
164 | * ALSA generic memory management | |
165 | * | |
166 | */ | |
167 | ||
168 | ||
169 | /** | |
170 | * snd_dma_alloc_pages - allocate the buffer area according to the given type | |
171 | * @type: the DMA buffer type | |
172 | * @device: the device pointer | |
173 | * @size: the buffer size to allocate | |
174 | * @dmab: buffer allocation record to store the allocated data | |
175 | * | |
176 | * Calls the memory-allocator function for the corresponding | |
177 | * buffer type. | |
178 | * | |
179 | * Returns zero if the buffer with the given size is allocated successfuly, | |
180 | * other a negative value at error. | |
181 | */ | |
182 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, | |
183 | struct snd_dma_buffer *dmab) | |
184 | { | |
7eaa943c TI |
185 | if (WARN_ON(!size)) |
186 | return -ENXIO; | |
187 | if (WARN_ON(!dmab)) | |
188 | return -ENXIO; | |
1da177e4 LT |
189 | |
190 | dmab->dev.type = type; | |
191 | dmab->dev.dev = device; | |
192 | dmab->bytes = 0; | |
193 | switch (type) { | |
194 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
195 | dmab->area = snd_malloc_pages(size, (unsigned long)device); | |
196 | dmab->addr = 0; | |
197 | break; | |
8f11551b | 198 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
199 | case SNDRV_DMA_TYPE_DEV: |
200 | dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); | |
201 | break; | |
cc6a8acd TI |
202 | #endif |
203 | #ifdef CONFIG_SND_DMA_SGBUF | |
1da177e4 LT |
204 | case SNDRV_DMA_TYPE_DEV_SG: |
205 | snd_malloc_sgbuf_pages(device, size, dmab, NULL); | |
206 | break; | |
8f11551b | 207 | #endif |
1da177e4 LT |
208 | default: |
209 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", type); | |
210 | dmab->area = NULL; | |
211 | dmab->addr = 0; | |
212 | return -ENXIO; | |
213 | } | |
214 | if (! dmab->area) | |
215 | return -ENOMEM; | |
216 | dmab->bytes = size; | |
217 | return 0; | |
218 | } | |
219 | ||
220 | /** | |
221 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
222 | * @type: the DMA buffer type | |
223 | * @device: the device pointer | |
224 | * @size: the buffer size to allocate | |
225 | * @dmab: buffer allocation record to store the allocated data | |
226 | * | |
227 | * Calls the memory-allocator function for the corresponding | |
228 | * buffer type. When no space is left, this function reduces the size and | |
229 | * tries to allocate again. The size actually allocated is stored in | |
230 | * res_size argument. | |
231 | * | |
232 | * Returns zero if the buffer with the given size is allocated successfuly, | |
233 | * other a negative value at error. | |
234 | */ | |
235 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
236 | struct snd_dma_buffer *dmab) | |
237 | { | |
238 | int err; | |
239 | ||
1da177e4 | 240 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
4e184f8f | 241 | size_t aligned_size; |
1da177e4 LT |
242 | if (err != -ENOMEM) |
243 | return err; | |
1da177e4 LT |
244 | if (size <= PAGE_SIZE) |
245 | return -ENOMEM; | |
4e184f8f TI |
246 | aligned_size = PAGE_SIZE << get_order(size); |
247 | if (size != aligned_size) | |
248 | size = aligned_size; | |
249 | else | |
250 | size >>= 1; | |
1da177e4 LT |
251 | } |
252 | if (! dmab->area) | |
253 | return -ENOMEM; | |
254 | return 0; | |
255 | } | |
256 | ||
257 | ||
258 | /** | |
259 | * snd_dma_free_pages - release the allocated buffer | |
260 | * @dmab: the buffer allocation record to release | |
261 | * | |
262 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
263 | */ | |
264 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
265 | { | |
266 | switch (dmab->dev.type) { | |
267 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
268 | snd_free_pages(dmab->area, dmab->bytes); | |
269 | break; | |
8f11551b | 270 | #ifdef CONFIG_HAS_DMA |
1da177e4 LT |
271 | case SNDRV_DMA_TYPE_DEV: |
272 | snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
273 | break; | |
cc6a8acd TI |
274 | #endif |
275 | #ifdef CONFIG_SND_DMA_SGBUF | |
1da177e4 LT |
276 | case SNDRV_DMA_TYPE_DEV_SG: |
277 | snd_free_sgbuf_pages(dmab); | |
278 | break; | |
8f11551b | 279 | #endif |
1da177e4 LT |
280 | default: |
281 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type); | |
282 | } | |
283 | } | |
284 | ||
285 | ||
286 | /** | |
287 | * snd_dma_get_reserved - get the reserved buffer for the given device | |
288 | * @dmab: the buffer allocation record to store | |
289 | * @id: the buffer id | |
290 | * | |
291 | * Looks for the reserved-buffer list and re-uses if the same buffer | |
292 | * is found in the list. When the buffer is found, it's removed from the free list. | |
293 | * | |
294 | * Returns the size of buffer if the buffer is found, or zero if not found. | |
295 | */ | |
296 | size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
297 | { | |
1da177e4 LT |
298 | struct snd_mem_list *mem; |
299 | ||
7eaa943c TI |
300 | if (WARN_ON(!dmab)) |
301 | return 0; | |
1da177e4 | 302 | |
1a60d4c5 | 303 | mutex_lock(&list_mutex); |
9244b2c3 | 304 | list_for_each_entry(mem, &mem_list_head, list) { |
1da177e4 | 305 | if (mem->id == id && |
b6a96915 TI |
306 | (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || |
307 | ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { | |
308 | struct device *dev = dmab->dev.dev; | |
9244b2c3 | 309 | list_del(&mem->list); |
1da177e4 | 310 | *dmab = mem->buffer; |
b6a96915 TI |
311 | if (dmab->dev.dev == NULL) |
312 | dmab->dev.dev = dev; | |
1da177e4 | 313 | kfree(mem); |
1a60d4c5 | 314 | mutex_unlock(&list_mutex); |
1da177e4 LT |
315 | return dmab->bytes; |
316 | } | |
317 | } | |
1a60d4c5 | 318 | mutex_unlock(&list_mutex); |
1da177e4 LT |
319 | return 0; |
320 | } | |
321 | ||
322 | /** | |
323 | * snd_dma_reserve_buf - reserve the buffer | |
324 | * @dmab: the buffer to reserve | |
325 | * @id: the buffer id | |
326 | * | |
327 | * Reserves the given buffer as a reserved buffer. | |
328 | * | |
329 | * Returns zero if successful, or a negative code at error. | |
330 | */ | |
331 | int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
332 | { | |
333 | struct snd_mem_list *mem; | |
334 | ||
7eaa943c TI |
335 | if (WARN_ON(!dmab)) |
336 | return -EINVAL; | |
1da177e4 LT |
337 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); |
338 | if (! mem) | |
339 | return -ENOMEM; | |
1a60d4c5 | 340 | mutex_lock(&list_mutex); |
1da177e4 LT |
341 | mem->buffer = *dmab; |
342 | mem->id = id; | |
343 | list_add_tail(&mem->list, &mem_list_head); | |
1a60d4c5 | 344 | mutex_unlock(&list_mutex); |
1da177e4 LT |
345 | return 0; |
346 | } | |
347 | ||
348 | /* | |
349 | * purge all reserved buffers | |
350 | */ | |
351 | static void free_all_reserved_pages(void) | |
352 | { | |
353 | struct list_head *p; | |
354 | struct snd_mem_list *mem; | |
355 | ||
1a60d4c5 | 356 | mutex_lock(&list_mutex); |
1da177e4 LT |
357 | while (! list_empty(&mem_list_head)) { |
358 | p = mem_list_head.next; | |
359 | mem = list_entry(p, struct snd_mem_list, list); | |
360 | list_del(p); | |
361 | snd_dma_free_pages(&mem->buffer); | |
362 | kfree(mem); | |
363 | } | |
1a60d4c5 | 364 | mutex_unlock(&list_mutex); |
1da177e4 LT |
365 | } |
366 | ||
367 | ||
1da177e4 LT |
368 | #ifdef CONFIG_PROC_FS |
369 | /* | |
370 | * proc file interface | |
371 | */ | |
b6a96915 | 372 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" |
a53fc188 | 373 | static struct proc_dir_entry *snd_mem_proc; |
b6a96915 | 374 | |
ccec6e2c | 375 | static int snd_mem_proc_read(struct seq_file *seq, void *offset) |
1da177e4 | 376 | { |
1da177e4 | 377 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); |
1da177e4 LT |
378 | struct snd_mem_list *mem; |
379 | int devno; | |
759ee81b | 380 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" }; |
1da177e4 | 381 | |
1a60d4c5 | 382 | mutex_lock(&list_mutex); |
ccec6e2c TI |
383 | seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", |
384 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); | |
1da177e4 | 385 | devno = 0; |
9244b2c3 | 386 | list_for_each_entry(mem, &mem_list_head, list) { |
1da177e4 | 387 | devno++; |
ccec6e2c TI |
388 | seq_printf(seq, "buffer %d : ID %08x : type %s\n", |
389 | devno, mem->id, types[mem->buffer.dev.type]); | |
390 | seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", | |
391 | (unsigned long)mem->buffer.addr, | |
392 | (int)mem->buffer.bytes); | |
1da177e4 | 393 | } |
1a60d4c5 | 394 | mutex_unlock(&list_mutex); |
ccec6e2c TI |
395 | return 0; |
396 | } | |
397 | ||
398 | static int snd_mem_proc_open(struct inode *inode, struct file *file) | |
399 | { | |
400 | return single_open(file, snd_mem_proc_read, NULL); | |
1da177e4 | 401 | } |
b6a96915 TI |
402 | |
403 | /* FIXME: for pci only - other bus? */ | |
404 | #ifdef CONFIG_PCI | |
405 | #define gettoken(bufp) strsep(bufp, " \t\n") | |
406 | ||
ccec6e2c TI |
407 | static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, |
408 | size_t count, loff_t * ppos) | |
b6a96915 TI |
409 | { |
410 | char buf[128]; | |
411 | char *token, *p; | |
412 | ||
ccec6e2c TI |
413 | if (count > sizeof(buf) - 1) |
414 | return -EINVAL; | |
b6a96915 TI |
415 | if (copy_from_user(buf, buffer, count)) |
416 | return -EFAULT; | |
ccec6e2c | 417 | buf[count] = '\0'; |
b6a96915 TI |
418 | |
419 | p = buf; | |
420 | token = gettoken(&p); | |
421 | if (! token || *token == '#') | |
ccec6e2c | 422 | return count; |
b6a96915 TI |
423 | if (strcmp(token, "add") == 0) { |
424 | char *endp; | |
425 | int vendor, device, size, buffers; | |
426 | long mask; | |
427 | int i, alloced; | |
428 | struct pci_dev *pci; | |
429 | ||
430 | if ((token = gettoken(&p)) == NULL || | |
431 | (vendor = simple_strtol(token, NULL, 0)) <= 0 || | |
432 | (token = gettoken(&p)) == NULL || | |
433 | (device = simple_strtol(token, NULL, 0)) <= 0 || | |
434 | (token = gettoken(&p)) == NULL || | |
435 | (mask = simple_strtol(token, NULL, 0)) < 0 || | |
436 | (token = gettoken(&p)) == NULL || | |
437 | (size = memparse(token, &endp)) < 64*1024 || | |
438 | size > 16*1024*1024 /* too big */ || | |
439 | (token = gettoken(&p)) == NULL || | |
440 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || | |
441 | buffers > 4) { | |
442 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); | |
ccec6e2c | 443 | return count; |
b6a96915 TI |
444 | } |
445 | vendor &= 0xffff; | |
446 | device &= 0xffff; | |
447 | ||
448 | alloced = 0; | |
449 | pci = NULL; | |
0dd119f7 | 450 | while ((pci = pci_get_device(vendor, device, pci)) != NULL) { |
b6a96915 TI |
451 | if (mask > 0 && mask < 0xffffffff) { |
452 | if (pci_set_dma_mask(pci, mask) < 0 || | |
453 | pci_set_consistent_dma_mask(pci, mask) < 0) { | |
454 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); | |
df1deb67 | 455 | pci_dev_put(pci); |
ccec6e2c | 456 | return count; |
b6a96915 TI |
457 | } |
458 | } | |
459 | for (i = 0; i < buffers; i++) { | |
460 | struct snd_dma_buffer dmab; | |
461 | memset(&dmab, 0, sizeof(dmab)); | |
462 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), | |
463 | size, &dmab) < 0) { | |
464 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
0dd119f7 | 465 | pci_dev_put(pci); |
ccec6e2c | 466 | return count; |
b6a96915 TI |
467 | } |
468 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); | |
469 | } | |
470 | alloced++; | |
471 | } | |
472 | if (! alloced) { | |
473 | for (i = 0; i < buffers; i++) { | |
474 | struct snd_dma_buffer dmab; | |
475 | memset(&dmab, 0, sizeof(dmab)); | |
476 | /* FIXME: We can allocate only in ZONE_DMA | |
477 | * without a device pointer! | |
478 | */ | |
479 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, | |
480 | size, &dmab) < 0) { | |
481 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
482 | break; | |
483 | } | |
484 | snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); | |
485 | } | |
486 | } | |
487 | } else if (strcmp(token, "erase") == 0) | |
488 | /* FIXME: need for releasing each buffer chunk? */ | |
489 | free_all_reserved_pages(); | |
490 | else | |
491 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); | |
ccec6e2c | 492 | return count; |
b6a96915 TI |
493 | } |
494 | #endif /* CONFIG_PCI */ | |
ccec6e2c TI |
495 | |
496 | static const struct file_operations snd_mem_proc_fops = { | |
497 | .owner = THIS_MODULE, | |
498 | .open = snd_mem_proc_open, | |
499 | .read = seq_read, | |
500 | #ifdef CONFIG_PCI | |
501 | .write = snd_mem_proc_write, | |
502 | #endif | |
503 | .llseek = seq_lseek, | |
504 | .release = single_release, | |
505 | }; | |
506 | ||
1da177e4 LT |
507 | #endif /* CONFIG_PROC_FS */ |
508 | ||
509 | /* | |
510 | * module entry | |
511 | */ | |
512 | ||
513 | static int __init snd_mem_init(void) | |
514 | { | |
515 | #ifdef CONFIG_PROC_FS | |
7bf4e6d3 DL |
516 | snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL, |
517 | &snd_mem_proc_fops); | |
1da177e4 | 518 | #endif |
1da177e4 LT |
519 | return 0; |
520 | } | |
521 | ||
522 | static void __exit snd_mem_exit(void) | |
523 | { | |
e0be4d32 | 524 | remove_proc_entry(SND_MEM_PROC_FILE, NULL); |
1da177e4 LT |
525 | free_all_reserved_pages(); |
526 | if (snd_allocated_pages > 0) | |
527 | printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); | |
528 | } | |
529 | ||
530 | ||
531 | module_init(snd_mem_init) | |
532 | module_exit(snd_mem_exit) | |
533 | ||
534 | ||
535 | /* | |
536 | * exports | |
537 | */ | |
538 | EXPORT_SYMBOL(snd_dma_alloc_pages); | |
539 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); | |
540 | EXPORT_SYMBOL(snd_dma_free_pages); | |
541 | ||
542 | EXPORT_SYMBOL(snd_dma_get_reserved_buf); | |
543 | EXPORT_SYMBOL(snd_dma_reserve_buf); | |
544 | ||
545 | EXPORT_SYMBOL(snd_malloc_pages); | |
546 | EXPORT_SYMBOL(snd_free_pages); |