]>
Commit | Line | Data |
---|---|---|
14aa7e8b AJ |
1 | /* |
2 | * Copyright (C) 2011 Texas Instruments Incorporated | |
3 | * Author: Mark Salter <[email protected]> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | #include <linux/module.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/mm_types.h> | |
13 | #include <linux/scatterlist.h> | |
14 | ||
15 | #include <asm/cacheflush.h> | |
16 | ||
17 | static void c6x_dma_sync(dma_addr_t handle, size_t size, | |
18 | enum dma_data_direction dir) | |
19 | { | |
20 | unsigned long paddr = handle; | |
21 | ||
22 | BUG_ON(!valid_dma_direction(dir)); | |
23 | ||
24 | switch (dir) { | |
25 | case DMA_FROM_DEVICE: | |
26 | L2_cache_block_invalidate(paddr, paddr + size); | |
27 | break; | |
28 | case DMA_TO_DEVICE: | |
29 | L2_cache_block_writeback(paddr, paddr + size); | |
30 | break; | |
31 | case DMA_BIDIRECTIONAL: | |
32 | L2_cache_block_writeback_invalidate(paddr, paddr + size); | |
33 | break; | |
34 | default: | |
35 | break; | |
36 | } | |
37 | } | |
38 | ||
39 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
40 | enum dma_data_direction dir) | |
41 | { | |
42 | dma_addr_t addr = virt_to_phys(ptr); | |
43 | ||
44 | c6x_dma_sync(addr, size, dir); | |
45 | ||
46 | debug_dma_map_page(dev, virt_to_page(ptr), | |
47 | (unsigned long)ptr & ~PAGE_MASK, size, | |
48 | dir, addr, true); | |
49 | return addr; | |
50 | } | |
51 | EXPORT_SYMBOL(dma_map_single); | |
52 | ||
53 | ||
54 | void dma_unmap_single(struct device *dev, dma_addr_t handle, | |
55 | size_t size, enum dma_data_direction dir) | |
56 | { | |
57 | c6x_dma_sync(handle, size, dir); | |
58 | ||
59 | debug_dma_unmap_page(dev, handle, size, dir, true); | |
60 | } | |
61 | EXPORT_SYMBOL(dma_unmap_single); | |
62 | ||
63 | ||
64 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, | |
65 | int nents, enum dma_data_direction dir) | |
66 | { | |
67 | struct scatterlist *sg; | |
68 | int i; | |
69 | ||
70 | for_each_sg(sglist, sg, nents, i) | |
71 | sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length, | |
72 | dir); | |
73 | ||
74 | debug_dma_map_sg(dev, sglist, nents, nents, dir); | |
75 | ||
76 | return nents; | |
77 | } | |
78 | EXPORT_SYMBOL(dma_map_sg); | |
79 | ||
80 | ||
81 | void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |
82 | int nents, enum dma_data_direction dir) | |
83 | { | |
84 | struct scatterlist *sg; | |
85 | int i; | |
86 | ||
87 | for_each_sg(sglist, sg, nents, i) | |
88 | dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir); | |
89 | ||
90 | debug_dma_unmap_sg(dev, sglist, nents, dir); | |
91 | } | |
92 | EXPORT_SYMBOL(dma_unmap_sg); | |
93 | ||
94 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | |
95 | size_t size, enum dma_data_direction dir) | |
96 | { | |
97 | c6x_dma_sync(handle, size, dir); | |
98 | ||
99 | debug_dma_sync_single_for_cpu(dev, handle, size, dir); | |
100 | } | |
101 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
102 | ||
103 | ||
104 | void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | |
105 | size_t size, enum dma_data_direction dir) | |
106 | { | |
107 | c6x_dma_sync(handle, size, dir); | |
108 | ||
109 | debug_dma_sync_single_for_device(dev, handle, size, dir); | |
110 | } | |
111 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
112 | ||
113 | ||
114 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, | |
115 | int nents, enum dma_data_direction dir) | |
116 | { | |
117 | struct scatterlist *sg; | |
118 | int i; | |
119 | ||
120 | for_each_sg(sglist, sg, nents, i) | |
121 | dma_sync_single_for_cpu(dev, sg_dma_address(sg), | |
122 | sg->length, dir); | |
123 | ||
124 | debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir); | |
125 | } | |
126 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
127 | ||
128 | ||
129 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | |
130 | int nents, enum dma_data_direction dir) | |
131 | { | |
132 | struct scatterlist *sg; | |
133 | int i; | |
134 | ||
135 | for_each_sg(sglist, sg, nents, i) | |
136 | dma_sync_single_for_device(dev, sg_dma_address(sg), | |
137 | sg->length, dir); | |
138 | ||
139 | debug_dma_sync_sg_for_device(dev, sglist, nents, dir); | |
140 | } | |
141 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
142 | ||
143 | ||
144 | /* Number of entries preallocated for DMA-API debugging */ | |
145 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
146 | ||
147 | static int __init dma_init(void) | |
148 | { | |
149 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
150 | ||
151 | return 0; | |
152 | } | |
153 | fs_initcall(dma_init); |