]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) by Jaroslav Kysela <[email protected]> | |
3 | * Takashi Iwai <[email protected]> | |
4 | * | |
5 | * Generic memory allocators | |
6 | * | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/config.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/proc_fs.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/pci.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/mm.h> | |
b6a96915 | 31 | #include <asm/uaccess.h> |
1da177e4 LT |
32 | #include <linux/dma-mapping.h> |
33 | #include <linux/moduleparam.h> | |
34 | #include <asm/semaphore.h> | |
35 | #include <sound/memalloc.h> | |
36 | #ifdef CONFIG_SBUS | |
37 | #include <asm/sbus.h> | |
38 | #endif | |
39 | ||
40 | ||
41 | MODULE_AUTHOR("Takashi Iwai <[email protected]>, Jaroslav Kysela <[email protected]>"); | |
42 | MODULE_DESCRIPTION("Memory allocator for ALSA system."); | |
43 | MODULE_LICENSE("GPL"); | |
44 | ||
45 | ||
46 | #ifndef SNDRV_CARDS | |
47 | #define SNDRV_CARDS 8 | |
48 | #endif | |
49 | ||
1da177e4 LT |
50 | /* |
51 | */ | |
52 | ||
53 | void *snd_malloc_sgbuf_pages(struct device *device, | |
54 | size_t size, struct snd_dma_buffer *dmab, | |
55 | size_t *res_size); | |
56 | int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab); | |
57 | ||
58 | /* | |
59 | */ | |
60 | ||
61 | static DECLARE_MUTEX(list_mutex); | |
62 | static LIST_HEAD(mem_list_head); | |
63 | ||
64 | /* buffer preservation list */ | |
65 | struct snd_mem_list { | |
66 | struct snd_dma_buffer buffer; | |
67 | unsigned int id; | |
68 | struct list_head list; | |
69 | }; | |
70 | ||
71 | /* id for pre-allocated buffers */ | |
72 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 | |
73 | ||
74 | #ifdef CONFIG_SND_DEBUG | |
75 | #define __ASTRING__(x) #x | |
76 | #define snd_assert(expr, args...) do {\ | |
77 | if (!(expr)) {\ | |
78 | printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\ | |
79 | args;\ | |
80 | }\ | |
81 | } while (0) | |
82 | #else | |
83 | #define snd_assert(expr, args...) /**/ | |
84 | #endif | |
85 | ||
86 | /* | |
87 | * Hacks | |
88 | */ | |
89 | ||
90 | #if defined(__i386__) || defined(__ppc__) || defined(__x86_64__) | |
91 | /* | |
92 | * A hack to allocate large buffers via dma_alloc_coherent() | |
93 | * | |
94 | * since dma_alloc_coherent always tries GFP_DMA when the requested | |
95 | * pci memory region is below 32bit, it happens quite often that even | |
96 | * 2 order of pages cannot be allocated. | |
97 | * | |
98 | * so in the following, we allocate at first without dma_mask, so that | |
99 | * allocation will be done without GFP_DMA. if the area doesn't match | |
100 | * with the requested region, then realloate with the original dma_mask | |
101 | * again. | |
102 | * | |
103 | * Really, we want to move this type of thing into dma_alloc_coherent() | |
104 | * so dma_mask doesn't have to be messed with. | |
105 | */ | |
106 | ||
107 | static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, | |
5a0f217d VF |
108 | dma_addr_t *dma_handle, |
109 | unsigned int __nocast flags) | |
1da177e4 LT |
110 | { |
111 | void *ret; | |
112 | u64 dma_mask, coherent_dma_mask; | |
113 | ||
114 | if (dev == NULL || !dev->dma_mask) | |
115 | return dma_alloc_coherent(dev, size, dma_handle, flags); | |
116 | dma_mask = *dev->dma_mask; | |
117 | coherent_dma_mask = dev->coherent_dma_mask; | |
118 | *dev->dma_mask = 0xffffffff; /* do without masking */ | |
119 | dev->coherent_dma_mask = 0xffffffff; /* do without masking */ | |
120 | ret = dma_alloc_coherent(dev, size, dma_handle, flags); | |
121 | *dev->dma_mask = dma_mask; /* restore */ | |
122 | dev->coherent_dma_mask = coherent_dma_mask; /* restore */ | |
123 | if (ret) { | |
124 | /* obtained address is out of range? */ | |
125 | if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) { | |
126 | /* reallocate with the proper mask */ | |
127 | dma_free_coherent(dev, size, ret, *dma_handle); | |
128 | ret = dma_alloc_coherent(dev, size, dma_handle, flags); | |
129 | } | |
130 | } else { | |
131 | /* wish to success now with the proper mask... */ | |
132 | if (dma_mask != 0xffffffffUL) { | |
133 | /* allocation with GFP_ATOMIC to avoid the long stall */ | |
134 | flags &= ~GFP_KERNEL; | |
135 | flags |= GFP_ATOMIC; | |
136 | ret = dma_alloc_coherent(dev, size, dma_handle, flags); | |
137 | } | |
138 | } | |
139 | return ret; | |
140 | } | |
141 | ||
142 | /* redefine dma_alloc_coherent for some architectures */ | |
143 | #undef dma_alloc_coherent | |
144 | #define dma_alloc_coherent snd_dma_hack_alloc_coherent | |
145 | ||
146 | #endif /* arch */ | |
147 | ||
148 | #if ! defined(__arm__) | |
149 | #define NEED_RESERVE_PAGES | |
150 | #endif | |
151 | ||
152 | /* | |
153 | * | |
154 | * Generic memory allocators | |
155 | * | |
156 | */ | |
157 | ||
158 | static long snd_allocated_pages; /* holding the number of allocated pages */ | |
159 | ||
160 | static inline void inc_snd_pages(int order) | |
161 | { | |
162 | snd_allocated_pages += 1 << order; | |
163 | } | |
164 | ||
165 | static inline void dec_snd_pages(int order) | |
166 | { | |
167 | snd_allocated_pages -= 1 << order; | |
168 | } | |
169 | ||
170 | static void mark_pages(struct page *page, int order) | |
171 | { | |
172 | struct page *last_page = page + (1 << order); | |
173 | while (page < last_page) | |
174 | SetPageReserved(page++); | |
175 | } | |
176 | ||
177 | static void unmark_pages(struct page *page, int order) | |
178 | { | |
179 | struct page *last_page = page + (1 << order); | |
180 | while (page < last_page) | |
181 | ClearPageReserved(page++); | |
182 | } | |
183 | ||
184 | /** | |
185 | * snd_malloc_pages - allocate pages with the given size | |
186 | * @size: the size to allocate in bytes | |
187 | * @gfp_flags: the allocation conditions, GFP_XXX | |
188 | * | |
189 | * Allocates the physically contiguous pages with the given size. | |
190 | * | |
191 | * Returns the pointer of the buffer, or NULL if no enoguh memory. | |
192 | */ | |
193 | void *snd_malloc_pages(size_t size, unsigned int gfp_flags) | |
194 | { | |
195 | int pg; | |
196 | void *res; | |
197 | ||
198 | snd_assert(size > 0, return NULL); | |
199 | snd_assert(gfp_flags != 0, return NULL); | |
200 | pg = get_order(size); | |
201 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { | |
202 | mark_pages(virt_to_page(res), pg); | |
203 | inc_snd_pages(pg); | |
204 | } | |
205 | return res; | |
206 | } | |
207 | ||
208 | /** | |
209 | * snd_free_pages - release the pages | |
210 | * @ptr: the buffer pointer to release | |
211 | * @size: the allocated buffer size | |
212 | * | |
213 | * Releases the buffer allocated via snd_malloc_pages(). | |
214 | */ | |
215 | void snd_free_pages(void *ptr, size_t size) | |
216 | { | |
217 | int pg; | |
218 | ||
219 | if (ptr == NULL) | |
220 | return; | |
221 | pg = get_order(size); | |
222 | dec_snd_pages(pg); | |
223 | unmark_pages(virt_to_page(ptr), pg); | |
224 | free_pages((unsigned long) ptr, pg); | |
225 | } | |
226 | ||
227 | /* | |
228 | * | |
229 | * Bus-specific memory allocators | |
230 | * | |
231 | */ | |
232 | ||
233 | /* allocate the coherent DMA pages */ | |
234 | static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) | |
235 | { | |
236 | int pg; | |
237 | void *res; | |
238 | unsigned int gfp_flags; | |
239 | ||
240 | snd_assert(size > 0, return NULL); | |
241 | snd_assert(dma != NULL, return NULL); | |
242 | pg = get_order(size); | |
243 | gfp_flags = GFP_KERNEL | |
244 | | __GFP_NORETRY /* don't trigger OOM-killer */ | |
245 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | |
246 | res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); | |
247 | if (res != NULL) { | |
248 | #ifdef NEED_RESERVE_PAGES | |
249 | mark_pages(virt_to_page(res), pg); /* should be dma_to_page() */ | |
250 | #endif | |
251 | inc_snd_pages(pg); | |
252 | } | |
253 | ||
254 | return res; | |
255 | } | |
256 | ||
257 | /* free the coherent DMA pages */ | |
258 | static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, | |
259 | dma_addr_t dma) | |
260 | { | |
261 | int pg; | |
262 | ||
263 | if (ptr == NULL) | |
264 | return; | |
265 | pg = get_order(size); | |
266 | dec_snd_pages(pg); | |
267 | #ifdef NEED_RESERVE_PAGES | |
268 | unmark_pages(virt_to_page(ptr), pg); /* should be dma_to_page() */ | |
269 | #endif | |
270 | dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); | |
271 | } | |
272 | ||
273 | #ifdef CONFIG_SBUS | |
274 | ||
275 | static void *snd_malloc_sbus_pages(struct device *dev, size_t size, | |
276 | dma_addr_t *dma_addr) | |
277 | { | |
278 | struct sbus_dev *sdev = (struct sbus_dev *)dev; | |
279 | int pg; | |
280 | void *res; | |
281 | ||
282 | snd_assert(size > 0, return NULL); | |
283 | snd_assert(dma_addr != NULL, return NULL); | |
284 | pg = get_order(size); | |
285 | res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr); | |
286 | if (res != NULL) | |
287 | inc_snd_pages(pg); | |
288 | return res; | |
289 | } | |
290 | ||
291 | static void snd_free_sbus_pages(struct device *dev, size_t size, | |
292 | void *ptr, dma_addr_t dma_addr) | |
293 | { | |
294 | struct sbus_dev *sdev = (struct sbus_dev *)dev; | |
295 | int pg; | |
296 | ||
297 | if (ptr == NULL) | |
298 | return; | |
299 | pg = get_order(size); | |
300 | dec_snd_pages(pg); | |
301 | sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr); | |
302 | } | |
303 | ||
304 | #endif /* CONFIG_SBUS */ | |
305 | ||
306 | /* | |
307 | * | |
308 | * ALSA generic memory management | |
309 | * | |
310 | */ | |
311 | ||
312 | ||
313 | /** | |
314 | * snd_dma_alloc_pages - allocate the buffer area according to the given type | |
315 | * @type: the DMA buffer type | |
316 | * @device: the device pointer | |
317 | * @size: the buffer size to allocate | |
318 | * @dmab: buffer allocation record to store the allocated data | |
319 | * | |
320 | * Calls the memory-allocator function for the corresponding | |
321 | * buffer type. | |
322 | * | |
323 | * Returns zero if the buffer with the given size is allocated successfuly, | |
324 | * other a negative value at error. | |
325 | */ | |
326 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, | |
327 | struct snd_dma_buffer *dmab) | |
328 | { | |
329 | snd_assert(size > 0, return -ENXIO); | |
330 | snd_assert(dmab != NULL, return -ENXIO); | |
331 | ||
332 | dmab->dev.type = type; | |
333 | dmab->dev.dev = device; | |
334 | dmab->bytes = 0; | |
335 | switch (type) { | |
336 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
337 | dmab->area = snd_malloc_pages(size, (unsigned long)device); | |
338 | dmab->addr = 0; | |
339 | break; | |
340 | #ifdef CONFIG_SBUS | |
341 | case SNDRV_DMA_TYPE_SBUS: | |
342 | dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr); | |
343 | break; | |
344 | #endif | |
345 | case SNDRV_DMA_TYPE_DEV: | |
346 | dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); | |
347 | break; | |
348 | case SNDRV_DMA_TYPE_DEV_SG: | |
349 | snd_malloc_sgbuf_pages(device, size, dmab, NULL); | |
350 | break; | |
351 | default: | |
352 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", type); | |
353 | dmab->area = NULL; | |
354 | dmab->addr = 0; | |
355 | return -ENXIO; | |
356 | } | |
357 | if (! dmab->area) | |
358 | return -ENOMEM; | |
359 | dmab->bytes = size; | |
360 | return 0; | |
361 | } | |
362 | ||
363 | /** | |
364 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
365 | * @type: the DMA buffer type | |
366 | * @device: the device pointer | |
367 | * @size: the buffer size to allocate | |
368 | * @dmab: buffer allocation record to store the allocated data | |
369 | * | |
370 | * Calls the memory-allocator function for the corresponding | |
371 | * buffer type. When no space is left, this function reduces the size and | |
372 | * tries to allocate again. The size actually allocated is stored in | |
373 | * res_size argument. | |
374 | * | |
375 | * Returns zero if the buffer with the given size is allocated successfuly, | |
376 | * other a negative value at error. | |
377 | */ | |
378 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
379 | struct snd_dma_buffer *dmab) | |
380 | { | |
381 | int err; | |
382 | ||
383 | snd_assert(size > 0, return -ENXIO); | |
384 | snd_assert(dmab != NULL, return -ENXIO); | |
385 | ||
386 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { | |
387 | if (err != -ENOMEM) | |
388 | return err; | |
389 | size >>= 1; | |
390 | if (size <= PAGE_SIZE) | |
391 | return -ENOMEM; | |
392 | } | |
393 | if (! dmab->area) | |
394 | return -ENOMEM; | |
395 | return 0; | |
396 | } | |
397 | ||
398 | ||
399 | /** | |
400 | * snd_dma_free_pages - release the allocated buffer | |
401 | * @dmab: the buffer allocation record to release | |
402 | * | |
403 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
404 | */ | |
405 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
406 | { | |
407 | switch (dmab->dev.type) { | |
408 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
409 | snd_free_pages(dmab->area, dmab->bytes); | |
410 | break; | |
411 | #ifdef CONFIG_SBUS | |
412 | case SNDRV_DMA_TYPE_SBUS: | |
413 | snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
414 | break; | |
415 | #endif | |
416 | case SNDRV_DMA_TYPE_DEV: | |
417 | snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
418 | break; | |
419 | case SNDRV_DMA_TYPE_DEV_SG: | |
420 | snd_free_sgbuf_pages(dmab); | |
421 | break; | |
422 | default: | |
423 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type); | |
424 | } | |
425 | } | |
426 | ||
427 | ||
428 | /** | |
429 | * snd_dma_get_reserved - get the reserved buffer for the given device | |
430 | * @dmab: the buffer allocation record to store | |
431 | * @id: the buffer id | |
432 | * | |
433 | * Looks for the reserved-buffer list and re-uses if the same buffer | |
434 | * is found in the list. When the buffer is found, it's removed from the free list. | |
435 | * | |
436 | * Returns the size of buffer if the buffer is found, or zero if not found. | |
437 | */ | |
438 | size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
439 | { | |
440 | struct list_head *p; | |
441 | struct snd_mem_list *mem; | |
442 | ||
443 | snd_assert(dmab, return 0); | |
444 | ||
445 | down(&list_mutex); | |
446 | list_for_each(p, &mem_list_head) { | |
447 | mem = list_entry(p, struct snd_mem_list, list); | |
448 | if (mem->id == id && | |
b6a96915 TI |
449 | (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || |
450 | ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { | |
451 | struct device *dev = dmab->dev.dev; | |
1da177e4 LT |
452 | list_del(p); |
453 | *dmab = mem->buffer; | |
b6a96915 TI |
454 | if (dmab->dev.dev == NULL) |
455 | dmab->dev.dev = dev; | |
1da177e4 LT |
456 | kfree(mem); |
457 | up(&list_mutex); | |
458 | return dmab->bytes; | |
459 | } | |
460 | } | |
461 | up(&list_mutex); | |
462 | return 0; | |
463 | } | |
464 | ||
465 | /** | |
466 | * snd_dma_reserve_buf - reserve the buffer | |
467 | * @dmab: the buffer to reserve | |
468 | * @id: the buffer id | |
469 | * | |
470 | * Reserves the given buffer as a reserved buffer. | |
471 | * | |
472 | * Returns zero if successful, or a negative code at error. | |
473 | */ | |
474 | int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
475 | { | |
476 | struct snd_mem_list *mem; | |
477 | ||
478 | snd_assert(dmab, return -EINVAL); | |
479 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); | |
480 | if (! mem) | |
481 | return -ENOMEM; | |
482 | down(&list_mutex); | |
483 | mem->buffer = *dmab; | |
484 | mem->id = id; | |
485 | list_add_tail(&mem->list, &mem_list_head); | |
486 | up(&list_mutex); | |
487 | return 0; | |
488 | } | |
489 | ||
490 | /* | |
491 | * purge all reserved buffers | |
492 | */ | |
493 | static void free_all_reserved_pages(void) | |
494 | { | |
495 | struct list_head *p; | |
496 | struct snd_mem_list *mem; | |
497 | ||
498 | down(&list_mutex); | |
499 | while (! list_empty(&mem_list_head)) { | |
500 | p = mem_list_head.next; | |
501 | mem = list_entry(p, struct snd_mem_list, list); | |
502 | list_del(p); | |
503 | snd_dma_free_pages(&mem->buffer); | |
504 | kfree(mem); | |
505 | } | |
506 | up(&list_mutex); | |
507 | } | |
508 | ||
509 | ||
1da177e4 LT |
510 | #ifdef CONFIG_PROC_FS |
511 | /* | |
512 | * proc file interface | |
513 | */ | |
b6a96915 | 514 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" |
a53fc188 | 515 | static struct proc_dir_entry *snd_mem_proc; |
b6a96915 | 516 | |
1da177e4 LT |
517 | static int snd_mem_proc_read(char *page, char **start, off_t off, |
518 | int count, int *eof, void *data) | |
519 | { | |
520 | int len = 0; | |
521 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); | |
522 | struct list_head *p; | |
523 | struct snd_mem_list *mem; | |
524 | int devno; | |
525 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; | |
526 | ||
527 | down(&list_mutex); | |
528 | len += snprintf(page + len, count - len, | |
529 | "pages : %li bytes (%li pages per %likB)\n", | |
530 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); | |
531 | devno = 0; | |
532 | list_for_each(p, &mem_list_head) { | |
533 | mem = list_entry(p, struct snd_mem_list, list); | |
534 | devno++; | |
535 | len += snprintf(page + len, count - len, | |
536 | "buffer %d : ID %08x : type %s\n", | |
537 | devno, mem->id, types[mem->buffer.dev.type]); | |
538 | len += snprintf(page + len, count - len, | |
539 | " addr = 0x%lx, size = %d bytes\n", | |
540 | (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); | |
541 | } | |
542 | up(&list_mutex); | |
543 | return len; | |
544 | } | |
b6a96915 TI |
545 | |
546 | /* FIXME: for pci only - other bus? */ | |
547 | #ifdef CONFIG_PCI | |
548 | #define gettoken(bufp) strsep(bufp, " \t\n") | |
549 | ||
550 | static int snd_mem_proc_write(struct file *file, const char __user *buffer, | |
551 | unsigned long count, void *data) | |
552 | { | |
553 | char buf[128]; | |
554 | char *token, *p; | |
555 | ||
556 | if (count > ARRAY_SIZE(buf) - 1) | |
557 | count = ARRAY_SIZE(buf) - 1; | |
558 | if (copy_from_user(buf, buffer, count)) | |
559 | return -EFAULT; | |
560 | buf[ARRAY_SIZE(buf) - 1] = '\0'; | |
561 | ||
562 | p = buf; | |
563 | token = gettoken(&p); | |
564 | if (! token || *token == '#') | |
565 | return (int)count; | |
566 | if (strcmp(token, "add") == 0) { | |
567 | char *endp; | |
568 | int vendor, device, size, buffers; | |
569 | long mask; | |
570 | int i, alloced; | |
571 | struct pci_dev *pci; | |
572 | ||
573 | if ((token = gettoken(&p)) == NULL || | |
574 | (vendor = simple_strtol(token, NULL, 0)) <= 0 || | |
575 | (token = gettoken(&p)) == NULL || | |
576 | (device = simple_strtol(token, NULL, 0)) <= 0 || | |
577 | (token = gettoken(&p)) == NULL || | |
578 | (mask = simple_strtol(token, NULL, 0)) < 0 || | |
579 | (token = gettoken(&p)) == NULL || | |
580 | (size = memparse(token, &endp)) < 64*1024 || | |
581 | size > 16*1024*1024 /* too big */ || | |
582 | (token = gettoken(&p)) == NULL || | |
583 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || | |
584 | buffers > 4) { | |
585 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); | |
586 | return (int)count; | |
587 | } | |
588 | vendor &= 0xffff; | |
589 | device &= 0xffff; | |
590 | ||
591 | alloced = 0; | |
592 | pci = NULL; | |
593 | while ((pci = pci_find_device(vendor, device, pci)) != NULL) { | |
594 | if (mask > 0 && mask < 0xffffffff) { | |
595 | if (pci_set_dma_mask(pci, mask) < 0 || | |
596 | pci_set_consistent_dma_mask(pci, mask) < 0) { | |
597 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); | |
598 | return (int)count; | |
599 | } | |
600 | } | |
601 | for (i = 0; i < buffers; i++) { | |
602 | struct snd_dma_buffer dmab; | |
603 | memset(&dmab, 0, sizeof(dmab)); | |
604 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), | |
605 | size, &dmab) < 0) { | |
606 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
607 | return (int)count; | |
608 | } | |
609 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); | |
610 | } | |
611 | alloced++; | |
612 | } | |
613 | if (! alloced) { | |
614 | for (i = 0; i < buffers; i++) { | |
615 | struct snd_dma_buffer dmab; | |
616 | memset(&dmab, 0, sizeof(dmab)); | |
617 | /* FIXME: We can allocate only in ZONE_DMA | |
618 | * without a device pointer! | |
619 | */ | |
620 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, | |
621 | size, &dmab) < 0) { | |
622 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
623 | break; | |
624 | } | |
625 | snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); | |
626 | } | |
627 | } | |
628 | } else if (strcmp(token, "erase") == 0) | |
629 | /* FIXME: need for releasing each buffer chunk? */ | |
630 | free_all_reserved_pages(); | |
631 | else | |
632 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); | |
633 | return (int)count; | |
634 | } | |
635 | #endif /* CONFIG_PCI */ | |
1da177e4 LT |
636 | #endif /* CONFIG_PROC_FS */ |
637 | ||
638 | /* | |
639 | * module entry | |
640 | */ | |
641 | ||
642 | static int __init snd_mem_init(void) | |
643 | { | |
644 | #ifdef CONFIG_PROC_FS | |
b6a96915 TI |
645 | snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); |
646 | if (snd_mem_proc) { | |
647 | snd_mem_proc->read_proc = snd_mem_proc_read; | |
648 | #ifdef CONFIG_PCI | |
649 | snd_mem_proc->write_proc = snd_mem_proc_write; | |
650 | #endif | |
651 | } | |
1da177e4 | 652 | #endif |
1da177e4 LT |
653 | return 0; |
654 | } | |
655 | ||
656 | static void __exit snd_mem_exit(void) | |
657 | { | |
e0be4d32 | 658 | remove_proc_entry(SND_MEM_PROC_FILE, NULL); |
1da177e4 LT |
659 | free_all_reserved_pages(); |
660 | if (snd_allocated_pages > 0) | |
661 | printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); | |
662 | } | |
663 | ||
664 | ||
665 | module_init(snd_mem_init) | |
666 | module_exit(snd_mem_exit) | |
667 | ||
668 | ||
669 | /* | |
670 | * exports | |
671 | */ | |
672 | EXPORT_SYMBOL(snd_dma_alloc_pages); | |
673 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); | |
674 | EXPORT_SYMBOL(snd_dma_free_pages); | |
675 | ||
676 | EXPORT_SYMBOL(snd_dma_get_reserved_buf); | |
677 | EXPORT_SYMBOL(snd_dma_reserve_buf); | |
678 | ||
679 | EXPORT_SYMBOL(snd_malloc_pages); | |
680 | EXPORT_SYMBOL(snd_free_pages); |