]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) by Jaroslav Kysela <[email protected]> | |
3 | * Takashi Iwai <[email protected]> | |
4 | * | |
5 | * Generic memory allocators | |
6 | * | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/config.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/proc_fs.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/pci.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/mm.h> | |
b6a96915 | 31 | #include <asm/uaccess.h> |
1da177e4 LT |
32 | #include <linux/dma-mapping.h> |
33 | #include <linux/moduleparam.h> | |
34 | #include <asm/semaphore.h> | |
35 | #include <sound/memalloc.h> | |
36 | #ifdef CONFIG_SBUS | |
37 | #include <asm/sbus.h> | |
38 | #endif | |
39 | ||
40 | ||
41 | MODULE_AUTHOR("Takashi Iwai <[email protected]>, Jaroslav Kysela <[email protected]>"); | |
42 | MODULE_DESCRIPTION("Memory allocator for ALSA system."); | |
43 | MODULE_LICENSE("GPL"); | |
44 | ||
45 | ||
1da177e4 LT |
46 | /* |
47 | */ | |
48 | ||
49 | void *snd_malloc_sgbuf_pages(struct device *device, | |
50 | size_t size, struct snd_dma_buffer *dmab, | |
51 | size_t *res_size); | |
52 | int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab); | |
53 | ||
54 | /* | |
55 | */ | |
56 | ||
57 | static DECLARE_MUTEX(list_mutex); | |
58 | static LIST_HEAD(mem_list_head); | |
59 | ||
60 | /* buffer preservation list */ | |
61 | struct snd_mem_list { | |
62 | struct snd_dma_buffer buffer; | |
63 | unsigned int id; | |
64 | struct list_head list; | |
65 | }; | |
66 | ||
67 | /* id for pre-allocated buffers */ | |
68 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 | |
69 | ||
70 | #ifdef CONFIG_SND_DEBUG | |
71 | #define __ASTRING__(x) #x | |
72 | #define snd_assert(expr, args...) do {\ | |
73 | if (!(expr)) {\ | |
74 | printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\ | |
75 | args;\ | |
76 | }\ | |
77 | } while (0) | |
78 | #else | |
79 | #define snd_assert(expr, args...) /**/ | |
80 | #endif | |
81 | ||
82 | /* | |
83 | * Hacks | |
84 | */ | |
85 | ||
86 | #if defined(__i386__) || defined(__ppc__) || defined(__x86_64__) | |
87 | /* | |
88 | * A hack to allocate large buffers via dma_alloc_coherent() | |
89 | * | |
90 | * since dma_alloc_coherent always tries GFP_DMA when the requested | |
91 | * pci memory region is below 32bit, it happens quite often that even | |
92 | * 2 order of pages cannot be allocated. | |
93 | * | |
94 | * so in the following, we allocate at first without dma_mask, so that | |
95 | * allocation will be done without GFP_DMA. if the area doesn't match | |
96 | * with the requested region, then realloate with the original dma_mask | |
97 | * again. | |
98 | * | |
99 | * Really, we want to move this type of thing into dma_alloc_coherent() | |
100 | * so dma_mask doesn't have to be messed with. | |
101 | */ | |
102 | ||
103 | static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, | |
5a0f217d | 104 | dma_addr_t *dma_handle, |
dd0fc66f | 105 | gfp_t flags) |
1da177e4 LT |
106 | { |
107 | void *ret; | |
108 | u64 dma_mask, coherent_dma_mask; | |
109 | ||
110 | if (dev == NULL || !dev->dma_mask) | |
111 | return dma_alloc_coherent(dev, size, dma_handle, flags); | |
112 | dma_mask = *dev->dma_mask; | |
113 | coherent_dma_mask = dev->coherent_dma_mask; | |
114 | *dev->dma_mask = 0xffffffff; /* do without masking */ | |
115 | dev->coherent_dma_mask = 0xffffffff; /* do without masking */ | |
116 | ret = dma_alloc_coherent(dev, size, dma_handle, flags); | |
117 | *dev->dma_mask = dma_mask; /* restore */ | |
118 | dev->coherent_dma_mask = coherent_dma_mask; /* restore */ | |
119 | if (ret) { | |
120 | /* obtained address is out of range? */ | |
121 | if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) { | |
122 | /* reallocate with the proper mask */ | |
123 | dma_free_coherent(dev, size, ret, *dma_handle); | |
124 | ret = dma_alloc_coherent(dev, size, dma_handle, flags); | |
125 | } | |
126 | } else { | |
127 | /* wish to success now with the proper mask... */ | |
128 | if (dma_mask != 0xffffffffUL) { | |
129 | /* allocation with GFP_ATOMIC to avoid the long stall */ | |
130 | flags &= ~GFP_KERNEL; | |
131 | flags |= GFP_ATOMIC; | |
132 | ret = dma_alloc_coherent(dev, size, dma_handle, flags); | |
133 | } | |
134 | } | |
135 | return ret; | |
136 | } | |
137 | ||
138 | /* redefine dma_alloc_coherent for some architectures */ | |
139 | #undef dma_alloc_coherent | |
140 | #define dma_alloc_coherent snd_dma_hack_alloc_coherent | |
141 | ||
142 | #endif /* arch */ | |
143 | ||
144 | #if ! defined(__arm__) | |
145 | #define NEED_RESERVE_PAGES | |
146 | #endif | |
147 | ||
148 | /* | |
149 | * | |
150 | * Generic memory allocators | |
151 | * | |
152 | */ | |
153 | ||
154 | static long snd_allocated_pages; /* holding the number of allocated pages */ | |
155 | ||
156 | static inline void inc_snd_pages(int order) | |
157 | { | |
158 | snd_allocated_pages += 1 << order; | |
159 | } | |
160 | ||
161 | static inline void dec_snd_pages(int order) | |
162 | { | |
163 | snd_allocated_pages -= 1 << order; | |
164 | } | |
165 | ||
166 | static void mark_pages(struct page *page, int order) | |
167 | { | |
168 | struct page *last_page = page + (1 << order); | |
169 | while (page < last_page) | |
170 | SetPageReserved(page++); | |
171 | } | |
172 | ||
173 | static void unmark_pages(struct page *page, int order) | |
174 | { | |
175 | struct page *last_page = page + (1 << order); | |
176 | while (page < last_page) | |
177 | ClearPageReserved(page++); | |
178 | } | |
179 | ||
180 | /** | |
181 | * snd_malloc_pages - allocate pages with the given size | |
182 | * @size: the size to allocate in bytes | |
183 | * @gfp_flags: the allocation conditions, GFP_XXX | |
184 | * | |
185 | * Allocates the physically contiguous pages with the given size. | |
186 | * | |
187 | * Returns the pointer of the buffer, or NULL if no enoguh memory. | |
188 | */ | |
1ef64e67 | 189 | void *snd_malloc_pages(size_t size, gfp_t gfp_flags) |
1da177e4 LT |
190 | { |
191 | int pg; | |
192 | void *res; | |
193 | ||
194 | snd_assert(size > 0, return NULL); | |
195 | snd_assert(gfp_flags != 0, return NULL); | |
f3d48f03 | 196 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ |
1da177e4 LT |
197 | pg = get_order(size); |
198 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { | |
199 | mark_pages(virt_to_page(res), pg); | |
200 | inc_snd_pages(pg); | |
201 | } | |
202 | return res; | |
203 | } | |
204 | ||
205 | /** | |
206 | * snd_free_pages - release the pages | |
207 | * @ptr: the buffer pointer to release | |
208 | * @size: the allocated buffer size | |
209 | * | |
210 | * Releases the buffer allocated via snd_malloc_pages(). | |
211 | */ | |
212 | void snd_free_pages(void *ptr, size_t size) | |
213 | { | |
214 | int pg; | |
215 | ||
216 | if (ptr == NULL) | |
217 | return; | |
218 | pg = get_order(size); | |
219 | dec_snd_pages(pg); | |
220 | unmark_pages(virt_to_page(ptr), pg); | |
221 | free_pages((unsigned long) ptr, pg); | |
222 | } | |
223 | ||
224 | /* | |
225 | * | |
226 | * Bus-specific memory allocators | |
227 | * | |
228 | */ | |
229 | ||
230 | /* allocate the coherent DMA pages */ | |
231 | static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) | |
232 | { | |
233 | int pg; | |
234 | void *res; | |
1ef64e67 | 235 | gfp_t gfp_flags; |
1da177e4 LT |
236 | |
237 | snd_assert(size > 0, return NULL); | |
238 | snd_assert(dma != NULL, return NULL); | |
239 | pg = get_order(size); | |
240 | gfp_flags = GFP_KERNEL | |
f3d48f03 | 241 | | __GFP_COMP /* compound page lets parts be mapped */ |
1da177e4 LT |
242 | | __GFP_NORETRY /* don't trigger OOM-killer */ |
243 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | |
244 | res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); | |
245 | if (res != NULL) { | |
246 | #ifdef NEED_RESERVE_PAGES | |
247 | mark_pages(virt_to_page(res), pg); /* should be dma_to_page() */ | |
248 | #endif | |
249 | inc_snd_pages(pg); | |
250 | } | |
251 | ||
252 | return res; | |
253 | } | |
254 | ||
255 | /* free the coherent DMA pages */ | |
256 | static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, | |
257 | dma_addr_t dma) | |
258 | { | |
259 | int pg; | |
260 | ||
261 | if (ptr == NULL) | |
262 | return; | |
263 | pg = get_order(size); | |
264 | dec_snd_pages(pg); | |
265 | #ifdef NEED_RESERVE_PAGES | |
266 | unmark_pages(virt_to_page(ptr), pg); /* should be dma_to_page() */ | |
267 | #endif | |
268 | dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); | |
269 | } | |
270 | ||
271 | #ifdef CONFIG_SBUS | |
272 | ||
273 | static void *snd_malloc_sbus_pages(struct device *dev, size_t size, | |
274 | dma_addr_t *dma_addr) | |
275 | { | |
276 | struct sbus_dev *sdev = (struct sbus_dev *)dev; | |
277 | int pg; | |
278 | void *res; | |
279 | ||
280 | snd_assert(size > 0, return NULL); | |
281 | snd_assert(dma_addr != NULL, return NULL); | |
282 | pg = get_order(size); | |
283 | res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr); | |
284 | if (res != NULL) | |
285 | inc_snd_pages(pg); | |
286 | return res; | |
287 | } | |
288 | ||
289 | static void snd_free_sbus_pages(struct device *dev, size_t size, | |
290 | void *ptr, dma_addr_t dma_addr) | |
291 | { | |
292 | struct sbus_dev *sdev = (struct sbus_dev *)dev; | |
293 | int pg; | |
294 | ||
295 | if (ptr == NULL) | |
296 | return; | |
297 | pg = get_order(size); | |
298 | dec_snd_pages(pg); | |
299 | sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr); | |
300 | } | |
301 | ||
302 | #endif /* CONFIG_SBUS */ | |
303 | ||
304 | /* | |
305 | * | |
306 | * ALSA generic memory management | |
307 | * | |
308 | */ | |
309 | ||
310 | ||
311 | /** | |
312 | * snd_dma_alloc_pages - allocate the buffer area according to the given type | |
313 | * @type: the DMA buffer type | |
314 | * @device: the device pointer | |
315 | * @size: the buffer size to allocate | |
316 | * @dmab: buffer allocation record to store the allocated data | |
317 | * | |
318 | * Calls the memory-allocator function for the corresponding | |
319 | * buffer type. | |
320 | * | |
321 | * Returns zero if the buffer with the given size is allocated successfuly, | |
322 | * other a negative value at error. | |
323 | */ | |
324 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, | |
325 | struct snd_dma_buffer *dmab) | |
326 | { | |
327 | snd_assert(size > 0, return -ENXIO); | |
328 | snd_assert(dmab != NULL, return -ENXIO); | |
329 | ||
330 | dmab->dev.type = type; | |
331 | dmab->dev.dev = device; | |
332 | dmab->bytes = 0; | |
333 | switch (type) { | |
334 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
335 | dmab->area = snd_malloc_pages(size, (unsigned long)device); | |
336 | dmab->addr = 0; | |
337 | break; | |
338 | #ifdef CONFIG_SBUS | |
339 | case SNDRV_DMA_TYPE_SBUS: | |
340 | dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr); | |
341 | break; | |
342 | #endif | |
343 | case SNDRV_DMA_TYPE_DEV: | |
344 | dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); | |
345 | break; | |
346 | case SNDRV_DMA_TYPE_DEV_SG: | |
347 | snd_malloc_sgbuf_pages(device, size, dmab, NULL); | |
348 | break; | |
349 | default: | |
350 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", type); | |
351 | dmab->area = NULL; | |
352 | dmab->addr = 0; | |
353 | return -ENXIO; | |
354 | } | |
355 | if (! dmab->area) | |
356 | return -ENOMEM; | |
357 | dmab->bytes = size; | |
358 | return 0; | |
359 | } | |
360 | ||
361 | /** | |
362 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
363 | * @type: the DMA buffer type | |
364 | * @device: the device pointer | |
365 | * @size: the buffer size to allocate | |
366 | * @dmab: buffer allocation record to store the allocated data | |
367 | * | |
368 | * Calls the memory-allocator function for the corresponding | |
369 | * buffer type. When no space is left, this function reduces the size and | |
370 | * tries to allocate again. The size actually allocated is stored in | |
371 | * res_size argument. | |
372 | * | |
373 | * Returns zero if the buffer with the given size is allocated successfuly, | |
374 | * other a negative value at error. | |
375 | */ | |
376 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
377 | struct snd_dma_buffer *dmab) | |
378 | { | |
379 | int err; | |
380 | ||
381 | snd_assert(size > 0, return -ENXIO); | |
382 | snd_assert(dmab != NULL, return -ENXIO); | |
383 | ||
384 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { | |
385 | if (err != -ENOMEM) | |
386 | return err; | |
387 | size >>= 1; | |
388 | if (size <= PAGE_SIZE) | |
389 | return -ENOMEM; | |
390 | } | |
391 | if (! dmab->area) | |
392 | return -ENOMEM; | |
393 | return 0; | |
394 | } | |
395 | ||
396 | ||
397 | /** | |
398 | * snd_dma_free_pages - release the allocated buffer | |
399 | * @dmab: the buffer allocation record to release | |
400 | * | |
401 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
402 | */ | |
403 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
404 | { | |
405 | switch (dmab->dev.type) { | |
406 | case SNDRV_DMA_TYPE_CONTINUOUS: | |
407 | snd_free_pages(dmab->area, dmab->bytes); | |
408 | break; | |
409 | #ifdef CONFIG_SBUS | |
410 | case SNDRV_DMA_TYPE_SBUS: | |
411 | snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
412 | break; | |
413 | #endif | |
414 | case SNDRV_DMA_TYPE_DEV: | |
415 | snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
416 | break; | |
417 | case SNDRV_DMA_TYPE_DEV_SG: | |
418 | snd_free_sgbuf_pages(dmab); | |
419 | break; | |
420 | default: | |
421 | printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type); | |
422 | } | |
423 | } | |
424 | ||
425 | ||
426 | /** | |
427 | * snd_dma_get_reserved - get the reserved buffer for the given device | |
428 | * @dmab: the buffer allocation record to store | |
429 | * @id: the buffer id | |
430 | * | |
431 | * Looks for the reserved-buffer list and re-uses if the same buffer | |
432 | * is found in the list. When the buffer is found, it's removed from the free list. | |
433 | * | |
434 | * Returns the size of buffer if the buffer is found, or zero if not found. | |
435 | */ | |
436 | size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
437 | { | |
438 | struct list_head *p; | |
439 | struct snd_mem_list *mem; | |
440 | ||
441 | snd_assert(dmab, return 0); | |
442 | ||
443 | down(&list_mutex); | |
444 | list_for_each(p, &mem_list_head) { | |
445 | mem = list_entry(p, struct snd_mem_list, list); | |
446 | if (mem->id == id && | |
b6a96915 TI |
447 | (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || |
448 | ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { | |
449 | struct device *dev = dmab->dev.dev; | |
1da177e4 LT |
450 | list_del(p); |
451 | *dmab = mem->buffer; | |
b6a96915 TI |
452 | if (dmab->dev.dev == NULL) |
453 | dmab->dev.dev = dev; | |
1da177e4 LT |
454 | kfree(mem); |
455 | up(&list_mutex); | |
456 | return dmab->bytes; | |
457 | } | |
458 | } | |
459 | up(&list_mutex); | |
460 | return 0; | |
461 | } | |
462 | ||
463 | /** | |
464 | * snd_dma_reserve_buf - reserve the buffer | |
465 | * @dmab: the buffer to reserve | |
466 | * @id: the buffer id | |
467 | * | |
468 | * Reserves the given buffer as a reserved buffer. | |
469 | * | |
470 | * Returns zero if successful, or a negative code at error. | |
471 | */ | |
472 | int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) | |
473 | { | |
474 | struct snd_mem_list *mem; | |
475 | ||
476 | snd_assert(dmab, return -EINVAL); | |
477 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); | |
478 | if (! mem) | |
479 | return -ENOMEM; | |
480 | down(&list_mutex); | |
481 | mem->buffer = *dmab; | |
482 | mem->id = id; | |
483 | list_add_tail(&mem->list, &mem_list_head); | |
484 | up(&list_mutex); | |
485 | return 0; | |
486 | } | |
487 | ||
488 | /* | |
489 | * purge all reserved buffers | |
490 | */ | |
491 | static void free_all_reserved_pages(void) | |
492 | { | |
493 | struct list_head *p; | |
494 | struct snd_mem_list *mem; | |
495 | ||
496 | down(&list_mutex); | |
497 | while (! list_empty(&mem_list_head)) { | |
498 | p = mem_list_head.next; | |
499 | mem = list_entry(p, struct snd_mem_list, list); | |
500 | list_del(p); | |
501 | snd_dma_free_pages(&mem->buffer); | |
502 | kfree(mem); | |
503 | } | |
504 | up(&list_mutex); | |
505 | } | |
506 | ||
507 | ||
1da177e4 LT |
508 | #ifdef CONFIG_PROC_FS |
509 | /* | |
510 | * proc file interface | |
511 | */ | |
b6a96915 | 512 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" |
a53fc188 | 513 | static struct proc_dir_entry *snd_mem_proc; |
b6a96915 | 514 | |
1da177e4 LT |
515 | static int snd_mem_proc_read(char *page, char **start, off_t off, |
516 | int count, int *eof, void *data) | |
517 | { | |
518 | int len = 0; | |
519 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); | |
520 | struct list_head *p; | |
521 | struct snd_mem_list *mem; | |
522 | int devno; | |
523 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; | |
524 | ||
525 | down(&list_mutex); | |
526 | len += snprintf(page + len, count - len, | |
527 | "pages : %li bytes (%li pages per %likB)\n", | |
528 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); | |
529 | devno = 0; | |
530 | list_for_each(p, &mem_list_head) { | |
531 | mem = list_entry(p, struct snd_mem_list, list); | |
532 | devno++; | |
533 | len += snprintf(page + len, count - len, | |
534 | "buffer %d : ID %08x : type %s\n", | |
535 | devno, mem->id, types[mem->buffer.dev.type]); | |
536 | len += snprintf(page + len, count - len, | |
537 | " addr = 0x%lx, size = %d bytes\n", | |
538 | (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); | |
539 | } | |
540 | up(&list_mutex); | |
541 | return len; | |
542 | } | |
b6a96915 TI |
543 | |
544 | /* FIXME: for pci only - other bus? */ | |
545 | #ifdef CONFIG_PCI | |
546 | #define gettoken(bufp) strsep(bufp, " \t\n") | |
547 | ||
548 | static int snd_mem_proc_write(struct file *file, const char __user *buffer, | |
549 | unsigned long count, void *data) | |
550 | { | |
551 | char buf[128]; | |
552 | char *token, *p; | |
553 | ||
554 | if (count > ARRAY_SIZE(buf) - 1) | |
555 | count = ARRAY_SIZE(buf) - 1; | |
556 | if (copy_from_user(buf, buffer, count)) | |
557 | return -EFAULT; | |
558 | buf[ARRAY_SIZE(buf) - 1] = '\0'; | |
559 | ||
560 | p = buf; | |
561 | token = gettoken(&p); | |
562 | if (! token || *token == '#') | |
563 | return (int)count; | |
564 | if (strcmp(token, "add") == 0) { | |
565 | char *endp; | |
566 | int vendor, device, size, buffers; | |
567 | long mask; | |
568 | int i, alloced; | |
569 | struct pci_dev *pci; | |
570 | ||
571 | if ((token = gettoken(&p)) == NULL || | |
572 | (vendor = simple_strtol(token, NULL, 0)) <= 0 || | |
573 | (token = gettoken(&p)) == NULL || | |
574 | (device = simple_strtol(token, NULL, 0)) <= 0 || | |
575 | (token = gettoken(&p)) == NULL || | |
576 | (mask = simple_strtol(token, NULL, 0)) < 0 || | |
577 | (token = gettoken(&p)) == NULL || | |
578 | (size = memparse(token, &endp)) < 64*1024 || | |
579 | size > 16*1024*1024 /* too big */ || | |
580 | (token = gettoken(&p)) == NULL || | |
581 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || | |
582 | buffers > 4) { | |
583 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); | |
584 | return (int)count; | |
585 | } | |
586 | vendor &= 0xffff; | |
587 | device &= 0xffff; | |
588 | ||
589 | alloced = 0; | |
590 | pci = NULL; | |
0dd119f7 | 591 | while ((pci = pci_get_device(vendor, device, pci)) != NULL) { |
b6a96915 TI |
592 | if (mask > 0 && mask < 0xffffffff) { |
593 | if (pci_set_dma_mask(pci, mask) < 0 || | |
594 | pci_set_consistent_dma_mask(pci, mask) < 0) { | |
595 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); | |
596 | return (int)count; | |
597 | } | |
598 | } | |
599 | for (i = 0; i < buffers; i++) { | |
600 | struct snd_dma_buffer dmab; | |
601 | memset(&dmab, 0, sizeof(dmab)); | |
602 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), | |
603 | size, &dmab) < 0) { | |
604 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
0dd119f7 | 605 | pci_dev_put(pci); |
b6a96915 TI |
606 | return (int)count; |
607 | } | |
608 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); | |
609 | } | |
610 | alloced++; | |
611 | } | |
612 | if (! alloced) { | |
613 | for (i = 0; i < buffers; i++) { | |
614 | struct snd_dma_buffer dmab; | |
615 | memset(&dmab, 0, sizeof(dmab)); | |
616 | /* FIXME: We can allocate only in ZONE_DMA | |
617 | * without a device pointer! | |
618 | */ | |
619 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, | |
620 | size, &dmab) < 0) { | |
621 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | |
622 | break; | |
623 | } | |
624 | snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); | |
625 | } | |
626 | } | |
627 | } else if (strcmp(token, "erase") == 0) | |
628 | /* FIXME: need for releasing each buffer chunk? */ | |
629 | free_all_reserved_pages(); | |
630 | else | |
631 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); | |
632 | return (int)count; | |
633 | } | |
634 | #endif /* CONFIG_PCI */ | |
1da177e4 LT |
635 | #endif /* CONFIG_PROC_FS */ |
636 | ||
637 | /* | |
638 | * module entry | |
639 | */ | |
640 | ||
641 | static int __init snd_mem_init(void) | |
642 | { | |
643 | #ifdef CONFIG_PROC_FS | |
b6a96915 TI |
644 | snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); |
645 | if (snd_mem_proc) { | |
646 | snd_mem_proc->read_proc = snd_mem_proc_read; | |
647 | #ifdef CONFIG_PCI | |
648 | snd_mem_proc->write_proc = snd_mem_proc_write; | |
649 | #endif | |
650 | } | |
1da177e4 | 651 | #endif |
1da177e4 LT |
652 | return 0; |
653 | } | |
654 | ||
655 | static void __exit snd_mem_exit(void) | |
656 | { | |
e0be4d32 | 657 | remove_proc_entry(SND_MEM_PROC_FILE, NULL); |
1da177e4 LT |
658 | free_all_reserved_pages(); |
659 | if (snd_allocated_pages > 0) | |
660 | printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); | |
661 | } | |
662 | ||
663 | ||
664 | module_init(snd_mem_init) | |
665 | module_exit(snd_mem_exit) | |
666 | ||
667 | ||
668 | /* | |
669 | * exports | |
670 | */ | |
671 | EXPORT_SYMBOL(snd_dma_alloc_pages); | |
672 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); | |
673 | EXPORT_SYMBOL(snd_dma_free_pages); | |
674 | ||
675 | EXPORT_SYMBOL(snd_dma_get_reserved_buf); | |
676 | EXPORT_SYMBOL(snd_dma_reserve_buf); | |
677 | ||
678 | EXPORT_SYMBOL(snd_malloc_pages); | |
679 | EXPORT_SYMBOL(snd_free_pages); |