]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/arm/common/dmabounce.c | |
3 | * | |
4 | * Special dma_{map/unmap/dma_sync}_* routines for systems that have | |
5 | * limited DMA windows. These functions utilize bounce buffers to | |
6 | * copy data to/from buffers located outside the DMA region. This | |
7 | * only works for systems in which DMA memory is at the bottom of | |
3a2916aa | 8 | * RAM, the remainder of memory is at the top and the DMA memory |
6cbdc8c5 | 9 | * can be marked as ZONE_DMA. Anything beyond that such as discontiguous |
1da177e4 LT |
10 | * DMA windows will require custom implementations that reserve memory |
11 | * areas at early bootup. | |
12 | * | |
13 | * Original version by Brad Parker ([email protected]) | |
14 | * Re-written by Christopher Hoover <[email protected]> | |
15 | * Made generic by Deepak Saxena <[email protected]> | |
16 | * | |
17 | * Copyright (C) 2002 Hewlett Packard Company. | |
18 | * Copyright (C) 2004 MontaVista Software, Inc. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * version 2 as published by the Free Software Foundation. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/slab.h> | |
58edb515 | 28 | #include <linux/page-flags.h> |
1da177e4 LT |
29 | #include <linux/device.h> |
30 | #include <linux/dma-mapping.h> | |
31 | #include <linux/dmapool.h> | |
32 | #include <linux/list.h> | |
9f2326be | 33 | #include <linux/scatterlist.h> |
1da177e4 | 34 | |
14eb75b6 RK |
35 | #include <asm/cacheflush.h> |
36 | ||
1da177e4 | 37 | #undef STATS |
cb7610d0 | 38 | |
1da177e4 LT |
39 | #ifdef STATS |
40 | #define DO_STATS(X) do { X ; } while (0) | |
41 | #else | |
42 | #define DO_STATS(X) do { } while (0) | |
43 | #endif | |
44 | ||
45 | /* ************************************************** */ | |
46 | ||
47 | struct safe_buffer { | |
48 | struct list_head node; | |
49 | ||
50 | /* original request */ | |
51 | void *ptr; | |
52 | size_t size; | |
53 | int direction; | |
54 | ||
55 | /* safe buffer info */ | |
cb7610d0 | 56 | struct dmabounce_pool *pool; |
1da177e4 LT |
57 | void *safe; |
58 | dma_addr_t safe_dma_addr; | |
59 | }; | |
60 | ||
cb7610d0 RK |
61 | struct dmabounce_pool { |
62 | unsigned long size; | |
63 | struct dma_pool *pool; | |
64 | #ifdef STATS | |
65 | unsigned long allocs; | |
66 | #endif | |
67 | }; | |
68 | ||
1da177e4 | 69 | struct dmabounce_device_info { |
1da177e4 | 70 | struct device *dev; |
1da177e4 | 71 | struct list_head safe_buffers; |
1da177e4 | 72 | #ifdef STATS |
1da177e4 LT |
73 | unsigned long total_allocs; |
74 | unsigned long map_op_count; | |
75 | unsigned long bounce_count; | |
017cc022 | 76 | int attr_res; |
1da177e4 | 77 | #endif |
cb7610d0 RK |
78 | struct dmabounce_pool small; |
79 | struct dmabounce_pool large; | |
823588c1 KH |
80 | |
81 | rwlock_t lock; | |
1da177e4 LT |
82 | }; |
83 | ||
1da177e4 | 84 | #ifdef STATS |
017cc022 RK |
85 | static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, |
86 | char *buf) | |
1da177e4 | 87 | { |
017cc022 RK |
88 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
89 | return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", | |
90 | device_info->small.allocs, | |
91 | device_info->large.allocs, | |
cb7610d0 RK |
92 | device_info->total_allocs - device_info->small.allocs - |
93 | device_info->large.allocs, | |
017cc022 RK |
94 | device_info->total_allocs, |
95 | device_info->map_op_count, | |
96 | device_info->bounce_count); | |
1da177e4 | 97 | } |
017cc022 RK |
98 | |
99 | static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); | |
1da177e4 LT |
100 | #endif |
101 | ||
1da177e4 LT |
102 | |
103 | /* allocate a 'safe' buffer and keep track of it */ | |
104 | static inline struct safe_buffer * | |
105 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |
cb7610d0 | 106 | size_t size, enum dma_data_direction dir) |
1da177e4 LT |
107 | { |
108 | struct safe_buffer *buf; | |
cb7610d0 | 109 | struct dmabounce_pool *pool; |
1da177e4 | 110 | struct device *dev = device_info->dev; |
823588c1 | 111 | unsigned long flags; |
1da177e4 LT |
112 | |
113 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", | |
114 | __func__, ptr, size, dir); | |
115 | ||
cb7610d0 RK |
116 | if (size <= device_info->small.size) { |
117 | pool = &device_info->small; | |
118 | } else if (size <= device_info->large.size) { | |
119 | pool = &device_info->large; | |
120 | } else { | |
121 | pool = NULL; | |
122 | } | |
1da177e4 LT |
123 | |
124 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); | |
125 | if (buf == NULL) { | |
126 | dev_warn(dev, "%s: kmalloc failed\n", __func__); | |
127 | return NULL; | |
128 | } | |
129 | ||
cb7610d0 RK |
130 | buf->ptr = ptr; |
131 | buf->size = size; | |
132 | buf->direction = dir; | |
133 | buf->pool = pool; | |
1da177e4 | 134 | |
cb7610d0 RK |
135 | if (pool) { |
136 | buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, | |
137 | &buf->safe_dma_addr); | |
1da177e4 | 138 | } else { |
cb7610d0 RK |
139 | buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, |
140 | GFP_ATOMIC); | |
1da177e4 LT |
141 | } |
142 | ||
cb7610d0 RK |
143 | if (buf->safe == NULL) { |
144 | dev_warn(dev, | |
145 | "%s: could not alloc dma memory (size=%d)\n", | |
146 | __func__, size); | |
1da177e4 LT |
147 | kfree(buf); |
148 | return NULL; | |
149 | } | |
150 | ||
151 | #ifdef STATS | |
cb7610d0 RK |
152 | if (pool) |
153 | pool->allocs++; | |
154 | device_info->total_allocs++; | |
1da177e4 LT |
155 | #endif |
156 | ||
823588c1 | 157 | write_lock_irqsave(&device_info->lock, flags); |
1da177e4 | 158 | list_add(&buf->node, &device_info->safe_buffers); |
823588c1 KH |
159 | write_unlock_irqrestore(&device_info->lock, flags); |
160 | ||
1da177e4 LT |
161 | return buf; |
162 | } | |
163 | ||
164 | /* determine if a buffer is from our "safe" pool */ | |
165 | static inline struct safe_buffer * | |
166 | find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) | |
167 | { | |
e2785f0d | 168 | struct safe_buffer *b, *rb = NULL; |
823588c1 KH |
169 | unsigned long flags; |
170 | ||
171 | read_lock_irqsave(&device_info->lock, flags); | |
1da177e4 | 172 | |
b46a58fd | 173 | list_for_each_entry(b, &device_info->safe_buffers, node) |
e2785f0d KH |
174 | if (b->safe_dma_addr == safe_dma_addr) { |
175 | rb = b; | |
823588c1 | 176 | break; |
e2785f0d | 177 | } |
1da177e4 | 178 | |
823588c1 | 179 | read_unlock_irqrestore(&device_info->lock, flags); |
e2785f0d | 180 | return rb; |
1da177e4 LT |
181 | } |
182 | ||
183 | static inline void | |
184 | free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) | |
185 | { | |
823588c1 KH |
186 | unsigned long flags; |
187 | ||
1da177e4 LT |
188 | dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); |
189 | ||
823588c1 KH |
190 | write_lock_irqsave(&device_info->lock, flags); |
191 | ||
1da177e4 LT |
192 | list_del(&buf->node); |
193 | ||
823588c1 KH |
194 | write_unlock_irqrestore(&device_info->lock, flags); |
195 | ||
1da177e4 | 196 | if (buf->pool) |
cb7610d0 | 197 | dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); |
1da177e4 LT |
198 | else |
199 | dma_free_coherent(device_info->dev, buf->size, buf->safe, | |
200 | buf->safe_dma_addr); | |
201 | ||
202 | kfree(buf); | |
203 | } | |
204 | ||
205 | /* ************************************************** */ | |
206 | ||
125ab12a RK |
207 | static struct safe_buffer *find_safe_buffer_dev(struct device *dev, |
208 | dma_addr_t dma_addr, const char *where) | |
209 | { | |
210 | if (!dev || !dev->archdata.dmabounce) | |
211 | return NULL; | |
212 | if (dma_mapping_error(dev, dma_addr)) { | |
213 | if (dev) | |
214 | dev_err(dev, "Trying to %s invalid mapping\n", where); | |
215 | else | |
216 | pr_err("unknown device: Trying to %s invalid mapping\n", where); | |
217 | return NULL; | |
218 | } | |
219 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | |
220 | } | |
221 | ||
3216a97b | 222 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, |
1da177e4 LT |
223 | enum dma_data_direction dir) |
224 | { | |
ab2c2152 | 225 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
1da177e4 LT |
226 | dma_addr_t dma_addr; |
227 | int needs_bounce = 0; | |
228 | ||
229 | if (device_info) | |
230 | DO_STATS ( device_info->map_op_count++ ); | |
231 | ||
232 | dma_addr = virt_to_dma(dev, ptr); | |
233 | ||
234 | if (dev->dma_mask) { | |
235 | unsigned long mask = *dev->dma_mask; | |
236 | unsigned long limit; | |
237 | ||
238 | limit = (mask + 1) & ~mask; | |
239 | if (limit && size > limit) { | |
240 | dev_err(dev, "DMA mapping too big (requested %#x " | |
241 | "mask %#Lx)\n", size, *dev->dma_mask); | |
242 | return ~0; | |
243 | } | |
244 | ||
245 | /* | |
246 | * Figure out if we need to bounce from the DMA mask. | |
247 | */ | |
248 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | |
249 | } | |
250 | ||
251 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | |
252 | struct safe_buffer *buf; | |
253 | ||
254 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | |
255 | if (buf == 0) { | |
256 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | |
257 | __func__, ptr); | |
258 | return 0; | |
259 | } | |
260 | ||
261 | dev_dbg(dev, | |
98ed7d4b RK |
262 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
263 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | |
264 | buf->safe, buf->safe_dma_addr); | |
1da177e4 LT |
265 | |
266 | if ((dir == DMA_TO_DEVICE) || | |
267 | (dir == DMA_BIDIRECTIONAL)) { | |
268 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | |
269 | __func__, ptr, buf->safe, size); | |
270 | memcpy(buf->safe, ptr, size); | |
271 | } | |
cb7610d0 | 272 | ptr = buf->safe; |
1da177e4 LT |
273 | |
274 | dma_addr = buf->safe_dma_addr; | |
7f8e3354 RK |
275 | } else { |
276 | /* | |
277 | * We don't need to sync the DMA buffer since | |
278 | * it was allocated via the coherent allocators. | |
279 | */ | |
18eabe23 | 280 | __dma_single_cpu_to_dev(ptr, size, dir); |
1da177e4 LT |
281 | } |
282 | ||
283 | return dma_addr; | |
284 | } | |
285 | ||
3216a97b RK |
286 | static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, |
287 | size_t size, enum dma_data_direction dir) | |
1da177e4 | 288 | { |
125ab12a | 289 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); |
1da177e4 LT |
290 | |
291 | if (buf) { | |
292 | BUG_ON(buf->size != size); | |
0e18b5d7 | 293 | BUG_ON(buf->direction != dir); |
1da177e4 LT |
294 | |
295 | dev_dbg(dev, | |
98ed7d4b RK |
296 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
297 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | |
298 | buf->safe, buf->safe_dma_addr); | |
1da177e4 | 299 | |
125ab12a | 300 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
1da177e4 | 301 | |
5abc100e | 302 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
7ae5a761 | 303 | void *ptr = buf->ptr; |
5abc100e | 304 | |
1da177e4 LT |
305 | dev_dbg(dev, |
306 | "%s: copy back safe %p to unsafe %p size %d\n", | |
7ae5a761 RK |
307 | __func__, buf->safe, ptr, size); |
308 | memcpy(ptr, buf->safe, size); | |
5abc100e RK |
309 | |
310 | /* | |
f74f7e57 RK |
311 | * Since we may have written to a page cache page, |
312 | * we need to ensure that the data will be coherent | |
313 | * with user mappings. | |
5abc100e | 314 | */ |
d9fd3ab8 | 315 | __cpuc_flush_dcache_area(ptr, size); |
1da177e4 | 316 | } |
125ab12a | 317 | free_safe_buffer(dev->archdata.dmabounce, buf); |
18eabe23 RK |
318 | } else { |
319 | __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); | |
1da177e4 LT |
320 | } |
321 | } | |
322 | ||
323 | /* ************************************************** */ | |
324 | ||
325 | /* | |
326 | * see if a buffer address is in an 'unsafe' range. if it is | |
327 | * allocate a 'safe' buffer and copy the unsafe buffer into it. | |
328 | * substitute the safe buffer for the unsafe one. | |
329 | * (basically move the buffer from an unsafe area to a safe one) | |
330 | */ | |
24056f52 | 331 | dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, |
1da177e4 LT |
332 | enum dma_data_direction dir) |
333 | { | |
1da177e4 LT |
334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
335 | __func__, ptr, size, dir); | |
336 | ||
0e18b5d7 | 337 | BUG_ON(!valid_dma_direction(dir)); |
1da177e4 | 338 | |
3216a97b | 339 | return map_single(dev, ptr, size, dir); |
1da177e4 | 340 | } |
24056f52 | 341 | EXPORT_SYMBOL(__dma_map_single); |
1da177e4 | 342 | |
29cb8d0d RK |
343 | /* |
344 | * see if a mapped address was really a "safe" buffer and if so, copy | |
345 | * the data from the safe buffer back to the unsafe buffer and free up | |
346 | * the safe buffer. (basically return things back to the way they | |
347 | * should be) | |
348 | */ | |
24056f52 | 349 | void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
29cb8d0d RK |
350 | enum dma_data_direction dir) |
351 | { | |
352 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
353 | __func__, (void *) dma_addr, size, dir); | |
354 | ||
355 | unmap_single(dev, dma_addr, size, dir); | |
356 | } | |
24056f52 | 357 | EXPORT_SYMBOL(__dma_unmap_single); |
29cb8d0d | 358 | |
24056f52 | 359 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
3216a97b | 360 | unsigned long offset, size_t size, enum dma_data_direction dir) |
56f55f8b RK |
361 | { |
362 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | |
363 | __func__, page, offset, size, dir); | |
364 | ||
0e18b5d7 | 365 | BUG_ON(!valid_dma_direction(dir)); |
56f55f8b | 366 | |
58edb515 NP |
367 | if (PageHighMem(page)) { |
368 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " | |
369 | "is not supported\n"); | |
370 | return ~0; | |
371 | } | |
372 | ||
56f55f8b RK |
373 | return map_single(dev, page_address(page) + offset, size, dir); |
374 | } | |
24056f52 | 375 | EXPORT_SYMBOL(__dma_map_page); |
56f55f8b | 376 | |
1da177e4 LT |
377 | /* |
378 | * see if a mapped address was really a "safe" buffer and if so, copy | |
379 | * the data from the safe buffer back to the unsafe buffer and free up | |
380 | * the safe buffer. (basically return things back to the way they | |
381 | * should be) | |
382 | */ | |
24056f52 | 383 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
3216a97b | 384 | enum dma_data_direction dir) |
1da177e4 | 385 | { |
1da177e4 LT |
386 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
387 | __func__, (void *) dma_addr, size, dir); | |
388 | ||
1da177e4 | 389 | unmap_single(dev, dma_addr, size, dir); |
1da177e4 | 390 | } |
24056f52 | 391 | EXPORT_SYMBOL(__dma_unmap_page); |
1da177e4 | 392 | |
2638b4db RK |
393 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
394 | unsigned long off, size_t sz, enum dma_data_direction dir) | |
1da177e4 | 395 | { |
125ab12a RK |
396 | struct safe_buffer *buf; |
397 | ||
398 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | |
2638b4db | 399 | __func__, addr, off, sz, dir); |
125ab12a RK |
400 | |
401 | buf = find_safe_buffer_dev(dev, addr, __func__); | |
402 | if (!buf) | |
403 | return 1; | |
404 | ||
0e18b5d7 RK |
405 | BUG_ON(buf->direction != dir); |
406 | ||
125ab12a RK |
407 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
408 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | |
409 | buf->safe, buf->safe_dma_addr); | |
410 | ||
411 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | |
412 | ||
413 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | |
414 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", | |
415 | __func__, buf->safe + off, buf->ptr + off, sz); | |
416 | memcpy(buf->ptr + off, buf->safe + off, sz); | |
417 | } | |
418 | return 0; | |
1da177e4 | 419 | } |
2638b4db | 420 | EXPORT_SYMBOL(dmabounce_sync_for_cpu); |
1da177e4 | 421 | |
2638b4db RK |
422 | int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, |
423 | unsigned long off, size_t sz, enum dma_data_direction dir) | |
1da177e4 | 424 | { |
125ab12a RK |
425 | struct safe_buffer *buf; |
426 | ||
427 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | |
2638b4db | 428 | __func__, addr, off, sz, dir); |
125ab12a RK |
429 | |
430 | buf = find_safe_buffer_dev(dev, addr, __func__); | |
431 | if (!buf) | |
432 | return 1; | |
433 | ||
0e18b5d7 RK |
434 | BUG_ON(buf->direction != dir); |
435 | ||
125ab12a RK |
436 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
437 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | |
438 | buf->safe, buf->safe_dma_addr); | |
439 | ||
440 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | |
441 | ||
442 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { | |
443 | dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", | |
444 | __func__,buf->ptr + off, buf->safe + off, sz); | |
445 | memcpy(buf->safe + off, buf->ptr + off, sz); | |
446 | } | |
447 | return 0; | |
1da177e4 | 448 | } |
2638b4db | 449 | EXPORT_SYMBOL(dmabounce_sync_for_device); |
1da177e4 | 450 | |
3216a97b RK |
451 | static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, |
452 | const char *name, unsigned long size) | |
cb7610d0 RK |
453 | { |
454 | pool->size = size; | |
455 | DO_STATS(pool->allocs = 0); | |
456 | pool->pool = dma_pool_create(name, dev, size, | |
457 | 0 /* byte alignment */, | |
458 | 0 /* no page-crossing issues */); | |
459 | ||
460 | return pool->pool ? 0 : -ENOMEM; | |
461 | } | |
462 | ||
3216a97b RK |
463 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
464 | unsigned long large_buffer_size) | |
1da177e4 LT |
465 | { |
466 | struct dmabounce_device_info *device_info; | |
cb7610d0 | 467 | int ret; |
1da177e4 LT |
468 | |
469 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); | |
470 | if (!device_info) { | |
fc3a8828 GKH |
471 | dev_err(dev, |
472 | "Could not allocated dmabounce_device_info\n"); | |
1da177e4 LT |
473 | return -ENOMEM; |
474 | } | |
475 | ||
cb7610d0 RK |
476 | ret = dmabounce_init_pool(&device_info->small, dev, |
477 | "small_dmabounce_pool", small_buffer_size); | |
478 | if (ret) { | |
479 | dev_err(dev, | |
480 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
481 | small_buffer_size); | |
482 | goto err_free; | |
1da177e4 LT |
483 | } |
484 | ||
485 | if (large_buffer_size) { | |
cb7610d0 RK |
486 | ret = dmabounce_init_pool(&device_info->large, dev, |
487 | "large_dmabounce_pool", | |
488 | large_buffer_size); | |
489 | if (ret) { | |
490 | dev_err(dev, | |
491 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
492 | large_buffer_size); | |
493 | goto err_destroy; | |
1da177e4 LT |
494 | } |
495 | } | |
496 | ||
497 | device_info->dev = dev; | |
1da177e4 | 498 | INIT_LIST_HEAD(&device_info->safe_buffers); |
823588c1 | 499 | rwlock_init(&device_info->lock); |
1da177e4 LT |
500 | |
501 | #ifdef STATS | |
1da177e4 LT |
502 | device_info->total_allocs = 0; |
503 | device_info->map_op_count = 0; | |
504 | device_info->bounce_count = 0; | |
017cc022 | 505 | device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); |
1da177e4 LT |
506 | #endif |
507 | ||
ab2c2152 | 508 | dev->archdata.dmabounce = device_info; |
1da177e4 | 509 | |
fc3a8828 | 510 | dev_info(dev, "dmabounce: registered device\n"); |
1da177e4 LT |
511 | |
512 | return 0; | |
cb7610d0 RK |
513 | |
514 | err_destroy: | |
515 | dma_pool_destroy(device_info->small.pool); | |
516 | err_free: | |
517 | kfree(device_info); | |
518 | return ret; | |
1da177e4 | 519 | } |
3216a97b | 520 | EXPORT_SYMBOL(dmabounce_register_dev); |
1da177e4 | 521 | |
3216a97b | 522 | void dmabounce_unregister_dev(struct device *dev) |
1da177e4 | 523 | { |
ab2c2152 RK |
524 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
525 | ||
526 | dev->archdata.dmabounce = NULL; | |
1da177e4 LT |
527 | |
528 | if (!device_info) { | |
fc3a8828 GKH |
529 | dev_warn(dev, |
530 | "Never registered with dmabounce but attempting" | |
531 | "to unregister!\n"); | |
1da177e4 LT |
532 | return; |
533 | } | |
534 | ||
535 | if (!list_empty(&device_info->safe_buffers)) { | |
fc3a8828 GKH |
536 | dev_err(dev, |
537 | "Removing from dmabounce with pending buffers!\n"); | |
1da177e4 LT |
538 | BUG(); |
539 | } | |
540 | ||
cb7610d0 RK |
541 | if (device_info->small.pool) |
542 | dma_pool_destroy(device_info->small.pool); | |
543 | if (device_info->large.pool) | |
544 | dma_pool_destroy(device_info->large.pool); | |
1da177e4 LT |
545 | |
546 | #ifdef STATS | |
017cc022 RK |
547 | if (device_info->attr_res == 0) |
548 | device_remove_file(dev, &dev_attr_dmabounce_stats); | |
1da177e4 LT |
549 | #endif |
550 | ||
1da177e4 LT |
551 | kfree(device_info); |
552 | ||
fc3a8828 | 553 | dev_info(dev, "dmabounce: device unregistered\n"); |
1da177e4 | 554 | } |
1da177e4 LT |
555 | EXPORT_SYMBOL(dmabounce_unregister_dev); |
556 | ||
557 | MODULE_AUTHOR("Christopher Hoover <[email protected]>, Deepak Saxena <[email protected]>"); | |
558 | MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); | |
559 | MODULE_LICENSE("GPL"); |